Merge branch 'master' of git://1984.lsi.us.es/nf
authorDavid S. Miller <davem@davemloft.net>
Thu, 7 Mar 2013 20:20:02 +0000 (15:20 -0500)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 Mar 2013 20:20:02 +0000 (15:20 -0500)
Pablo Neira Ayuso says:

====================
The following patchset contains Netfilter fixes for your net tree,
they are:

* Don't generate audit log message if audit is not enabled, from Gao Feng.

* Fix logging formatting for packets dropped by helpers, by Joe Perches.

* Fix a compilation warning in nfnetlink if CONFIG_PROVE_RCU is not set,
  from Paul Bolle.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
3038 files changed:
Documentation/00-INDEX
Documentation/ABI/testing/sysfs-bus-fcoe
Documentation/ABI/testing/sysfs-platform-msi-laptop [new file with mode: 0644]
Documentation/DMA-API-HOWTO.txt
Documentation/IPMI.txt
Documentation/block/cfq-iosched.txt
Documentation/blockdev/nbd.txt
Documentation/cgroups/blkio-controller.txt
Documentation/coccinelle.txt
Documentation/device-mapper/cache-policies.txt [new file with mode: 0644]
Documentation/device-mapper/cache.txt [new file with mode: 0644]
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/arc/interrupts.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt [deleted file]
Documentation/devicetree/bindings/arm/armadeus.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/fsl.txt
Documentation/devicetree/bindings/clock/imx5-clock.txt
Documentation/devicetree/bindings/clock/imx6q-clock.txt
Documentation/devicetree/bindings/dma/snps-dma.txt
Documentation/devicetree/bindings/metag/meta-intc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mips/cpu_irq.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/elm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/mtd-physmap.txt
Documentation/devicetree/bindings/serial/lantiq_asc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/dove-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/rcar-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/w1/fsl-imx-owire.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/atmel-at91rm9200-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
Documentation/devicetree/bindings/watchdog/marvel.txt
Documentation/devicetree/bindings/watchdog/pnx4008-wdt.txt
Documentation/devicetree/bindings/watchdog/qca-ar7130-wdt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
Documentation/dma-buf-sharing.txt
Documentation/filesystems/Locking
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/kbuild/kconfig-language.txt
Documentation/kbuild/kconfig.txt
Documentation/kernel-parameters.txt
Documentation/metag/00-INDEX [new file with mode: 0644]
Documentation/metag/kernel-ABI.txt [new file with mode: 0644]
Documentation/networking/tuntap.txt
Documentation/scsi/ChangeLog.megaraid_sas
Documentation/thermal/exynos_thermal_emulation [new file with mode: 0644]
Documentation/thermal/intel_powerclamp.txt [new file with mode: 0644]
Documentation/thermal/sysfs-api.txt
Documentation/watchdog/watchdog-kernel-api.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/kernel/srm_env.c
arch/arc/Kbuild [new file with mode: 0644]
arch/arc/Kconfig [new file with mode: 0644]
arch/arc/Kconfig.debug [new file with mode: 0644]
arch/arc/Makefile [new file with mode: 0644]
arch/arc/boot/Makefile [new file with mode: 0644]
arch/arc/boot/dts/Makefile [new file with mode: 0644]
arch/arc/boot/dts/angel4.dts [new file with mode: 0644]
arch/arc/boot/dts/skeleton.dts [new file with mode: 0644]
arch/arc/boot/dts/skeleton.dtsi [new file with mode: 0644]
arch/arc/configs/fpga_defconfig [new file with mode: 0644]
arch/arc/include/asm/Kbuild [new file with mode: 0644]
arch/arc/include/asm/arcregs.h [new file with mode: 0644]
arch/arc/include/asm/asm-offsets.h [new file with mode: 0644]
arch/arc/include/asm/atomic.h [new file with mode: 0644]
arch/arc/include/asm/barrier.h [new file with mode: 0644]
arch/arc/include/asm/bitops.h [new file with mode: 0644]
arch/arc/include/asm/bug.h [new file with mode: 0644]
arch/arc/include/asm/cache.h [new file with mode: 0644]
arch/arc/include/asm/cacheflush.h [new file with mode: 0644]
arch/arc/include/asm/checksum.h [new file with mode: 0644]
arch/arc/include/asm/clk.h [new file with mode: 0644]
arch/arc/include/asm/cmpxchg.h [new file with mode: 0644]
arch/arc/include/asm/current.h [new file with mode: 0644]
arch/arc/include/asm/defines.h [new file with mode: 0644]
arch/arc/include/asm/delay.h [new file with mode: 0644]
arch/arc/include/asm/disasm.h [new file with mode: 0644]
arch/arc/include/asm/dma-mapping.h [new file with mode: 0644]
arch/arc/include/asm/dma.h [new file with mode: 0644]
arch/arc/include/asm/elf.h [new file with mode: 0644]
arch/arc/include/asm/entry.h [new file with mode: 0644]
arch/arc/include/asm/exec.h [new file with mode: 0644]
arch/arc/include/asm/futex.h [new file with mode: 0644]
arch/arc/include/asm/io.h [new file with mode: 0644]
arch/arc/include/asm/irq.h [new file with mode: 0644]
arch/arc/include/asm/irqflags.h [new file with mode: 0644]
arch/arc/include/asm/kdebug.h [new file with mode: 0644]
arch/arc/include/asm/kgdb.h [new file with mode: 0644]
arch/arc/include/asm/kprobes.h [new file with mode: 0644]
arch/arc/include/asm/linkage.h [new file with mode: 0644]
arch/arc/include/asm/mach_desc.h [new file with mode: 0644]
arch/arc/include/asm/mmu.h [new file with mode: 0644]
arch/arc/include/asm/mmu_context.h [new file with mode: 0644]
arch/arc/include/asm/module.h [new file with mode: 0644]
arch/arc/include/asm/mutex.h [new file with mode: 0644]
arch/arc/include/asm/page.h [new file with mode: 0644]
arch/arc/include/asm/perf_event.h [new file with mode: 0644]
arch/arc/include/asm/pgalloc.h [new file with mode: 0644]
arch/arc/include/asm/pgtable.h [new file with mode: 0644]
arch/arc/include/asm/processor.h [new file with mode: 0644]
arch/arc/include/asm/prom.h [new file with mode: 0644]
arch/arc/include/asm/ptrace.h [new file with mode: 0644]
arch/arc/include/asm/sections.h [new file with mode: 0644]
arch/arc/include/asm/segment.h [new file with mode: 0644]
arch/arc/include/asm/serial.h [new file with mode: 0644]
arch/arc/include/asm/setup.h [new file with mode: 0644]
arch/arc/include/asm/smp.h [new file with mode: 0644]
arch/arc/include/asm/spinlock.h [new file with mode: 0644]
arch/arc/include/asm/spinlock_types.h [new file with mode: 0644]
arch/arc/include/asm/string.h [new file with mode: 0644]
arch/arc/include/asm/switch_to.h [new file with mode: 0644]
arch/arc/include/asm/syscall.h [new file with mode: 0644]
arch/arc/include/asm/syscalls.h [new file with mode: 0644]
arch/arc/include/asm/thread_info.h [new file with mode: 0644]
arch/arc/include/asm/timex.h [new file with mode: 0644]
arch/arc/include/asm/tlb-mmu1.h [new file with mode: 0644]
arch/arc/include/asm/tlb.h [new file with mode: 0644]
arch/arc/include/asm/tlbflush.h [new file with mode: 0644]
arch/arc/include/asm/uaccess.h [new file with mode: 0644]
arch/arc/include/asm/unaligned.h [new file with mode: 0644]
arch/arc/include/asm/unwind.h [new file with mode: 0644]
arch/arc/include/uapi/asm/Kbuild [new file with mode: 0644]
arch/arc/include/uapi/asm/byteorder.h [new file with mode: 0644]
arch/arc/include/uapi/asm/cachectl.h [new file with mode: 0644]
arch/arc/include/uapi/asm/elf.h [new file with mode: 0644]
arch/arc/include/uapi/asm/page.h [new file with mode: 0644]
arch/arc/include/uapi/asm/ptrace.h [new file with mode: 0644]
arch/arc/include/uapi/asm/setup.h [new file with mode: 0644]
arch/arc/include/uapi/asm/sigcontext.h [new file with mode: 0644]
arch/arc/include/uapi/asm/signal.h [new file with mode: 0644]
arch/arc/include/uapi/asm/swab.h [new file with mode: 0644]
arch/arc/include/uapi/asm/unistd.h [new file with mode: 0644]
arch/arc/kernel/Makefile [new file with mode: 0644]
arch/arc/kernel/arc_hostlink.c [new file with mode: 0644]
arch/arc/kernel/arcksyms.c [new file with mode: 0644]
arch/arc/kernel/asm-offsets.c [new file with mode: 0644]
arch/arc/kernel/clk.c [new file with mode: 0644]
arch/arc/kernel/ctx_sw.c [new file with mode: 0644]
arch/arc/kernel/ctx_sw_asm.S [new file with mode: 0644]
arch/arc/kernel/devtree.c [new file with mode: 0644]
arch/arc/kernel/disasm.c [new file with mode: 0644]
arch/arc/kernel/entry.S [new file with mode: 0644]
arch/arc/kernel/fpu.c [new file with mode: 0644]
arch/arc/kernel/head.S [new file with mode: 0644]
arch/arc/kernel/irq.c [new file with mode: 0644]
arch/arc/kernel/kgdb.c [new file with mode: 0644]
arch/arc/kernel/kprobes.c [new file with mode: 0644]
arch/arc/kernel/module.c [new file with mode: 0644]
arch/arc/kernel/process.c [new file with mode: 0644]
arch/arc/kernel/ptrace.c [new file with mode: 0644]
arch/arc/kernel/reset.c [new file with mode: 0644]
arch/arc/kernel/setup.c [new file with mode: 0644]
arch/arc/kernel/signal.c [new file with mode: 0644]
arch/arc/kernel/smp.c [new file with mode: 0644]
arch/arc/kernel/stacktrace.c [new file with mode: 0644]
arch/arc/kernel/sys.c [new file with mode: 0644]
arch/arc/kernel/time.c [new file with mode: 0644]
arch/arc/kernel/traps.c [new file with mode: 0644]
arch/arc/kernel/troubleshoot.c [new file with mode: 0644]
arch/arc/kernel/unaligned.c [new file with mode: 0644]
arch/arc/kernel/unwind.c [new file with mode: 0644]
arch/arc/kernel/vmlinux.lds.S [new file with mode: 0644]
arch/arc/lib/Makefile [new file with mode: 0644]
arch/arc/lib/memcmp.S [new file with mode: 0644]
arch/arc/lib/memcpy-700.S [new file with mode: 0644]
arch/arc/lib/memset.S [new file with mode: 0644]
arch/arc/lib/strchr-700.S [new file with mode: 0644]
arch/arc/lib/strcmp.S [new file with mode: 0644]
arch/arc/lib/strcpy-700.S [new file with mode: 0644]
arch/arc/lib/strlen.S [new file with mode: 0644]
arch/arc/mm/Makefile [new file with mode: 0644]
arch/arc/mm/cache_arc700.c [new file with mode: 0644]
arch/arc/mm/dma.c [new file with mode: 0644]
arch/arc/mm/extable.c [new file with mode: 0644]
arch/arc/mm/fault.c [new file with mode: 0644]
arch/arc/mm/init.c [new file with mode: 0644]
arch/arc/mm/ioremap.c [new file with mode: 0644]
arch/arc/mm/tlb.c [new file with mode: 0644]
arch/arc/mm/tlbex.S [new file with mode: 0644]
arch/arc/oprofile/Makefile [new file with mode: 0644]
arch/arc/oprofile/common.c [new file with mode: 0644]
arch/arc/plat-arcfpga/Kconfig [new file with mode: 0644]
arch/arc/plat-arcfpga/Makefile [new file with mode: 0644]
arch/arc/plat-arcfpga/include/plat/irq.h [new file with mode: 0644]
arch/arc/plat-arcfpga/include/plat/memmap.h [new file with mode: 0644]
arch/arc/plat-arcfpga/include/plat/smp.h [new file with mode: 0644]
arch/arc/plat-arcfpga/irq.c [new file with mode: 0644]
arch/arc/plat-arcfpga/platform.c [new file with mode: 0644]
arch/arc/plat-arcfpga/smp.c [new file with mode: 0644]
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-370-mirabox.dts
arch/arm/boot/dts/armada-370-rd.dts [new file with mode: 0644]
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts [new file with mode: 0644]
arch/arm/boot/dts/armada-xp-mv78230.dtsi
arch/arm/boot/dts/armada-xp-mv78260.dtsi
arch/arm/boot/dts/armada-xp-mv78460.dtsi
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/armada-xp.dtsi
arch/arm/boot/dts/dove-cubox.dts
arch/arm/boot/dts/dove.dtsi
arch/arm/boot/dts/imx25-karo-tx25.dts
arch/arm/boot/dts/imx25-pdk.dts [new file with mode: 0644]
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27-3ds.dts [deleted file]
arch/arm/boot/dts/imx27-apf27.dts
arch/arm/boot/dts/imx27-pdk.dts [new file with mode: 0644]
arch/arm/boot/dts/imx31-bug.dts
arch/arm/boot/dts/imx51-apf51.dts [new file with mode: 0644]
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-evk.dts
arch/arm/boot/dts/imx53-mba53.dts [new file with mode: 0644]
arch/arm/boot/dts/imx53-qsb.dts
arch/arm/boot/dts/imx53-smd.dts
arch/arm/boot/dts/imx53-tqma53.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6dl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6q-arm2.dts
arch/arm/boot/dts/imx6q-sabreauto.dts
arch/arm/boot/dts/imx6q-sabrelite.dts
arch/arm/boot/dts/imx6q-sabresd.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6qdl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-6282.dtsi
arch/arm/boot/dts/kirkwood-dreamplug.dts
arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-mplcec4.dts
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
arch/arm/boot/dts/kirkwood-nsa310.dts
arch/arm/boot/dts/kirkwood-openblocks_a6.dts
arch/arm/boot/dts/kirkwood-topkick.dts
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/configs/dove_defconfig
arch/arm/configs/mvebu_defconfig
arch/arm/include/asm/delay.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/kprobes.c
arch/arm/kernel/smp.c
arch/arm/lib/delay.c
arch/arm/mach-dove/Kconfig
arch/arm/mach-dove/Makefile
arch/arm/mach-dove/board-dt.c [new file with mode: 0644]
arch/arm/mach-dove/common.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-kirkwood/Kconfig
arch/arm/mach-kirkwood/Makefile
arch/arm/mach-kirkwood/board-dreamplug.c
arch/arm/mach-kirkwood/board-dt.c
arch/arm/mach-kirkwood/board-guruplug.c [new file with mode: 0644]
arch/arm/mach-kirkwood/board-mplcec4.c
arch/arm/mach-kirkwood/board-ns2.c
arch/arm/mach-kirkwood/board-nsa310.c
arch/arm/mach-kirkwood/board-openblocks_a6.c
arch/arm/mach-kirkwood/board-usi_topkick.c
arch/arm/mach-kirkwood/common.h
arch/arm/mach-mvebu/irq-armada-370-xp.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/am33xx-restart.c [new file with mode: 0644]
arch/arm/mach-omap2/am35xx-emac.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/cclock33xx_data.c
arch/arm/mach-omap2/cclock3xxx_data.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/cm33xx.c
arch/arm/mach-omap2/cm33xx.h
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_reset.c [new file with mode: 0644]
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm44xx.c
arch/arm/mach-omap2/prm33xx.c
arch/arm/mach-omap2/prm33xx.h
arch/arm/mach-omap2/sleep24xx.S
arch/arm/mach-omap2/soc.h
arch/arm/mach-omap2/sr_device.c
arch/arm/mm/alignment.c
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/include/plat/timex.h
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/sys32.S
arch/avr32/Kconfig
arch/avr32/include/asm/elf.h
arch/blackfin/Kconfig
arch/blackfin/include/asm/elf.h
arch/blackfin/kernel/cplbinfo.c
arch/c6x/include/asm/elf.h
arch/cris/Kconfig
arch/cris/arch-v10/drivers/sync_serial.c
arch/cris/arch-v32/drivers/cryptocop.c
arch/cris/arch-v32/drivers/sync_serial.c
arch/cris/include/asm/elf.h
arch/frv/Kconfig
arch/frv/include/asm/elf.h
arch/frv/mm/elf-fdpic.c
arch/h8300/Kconfig
arch/h8300/include/asm/elf.h
arch/hexagon/include/asm/elf.h
arch/ia64/Kconfig
arch/ia64/include/asm/elf.h
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/salinfo.c
arch/m32r/Kconfig
arch/m32r/include/asm/elf.h
arch/m68k/Kconfig
arch/m68k/include/asm/elf.h
arch/metag/Kconfig [new file with mode: 0644]
arch/metag/Kconfig.debug [new file with mode: 0644]
arch/metag/Kconfig.soc [new file with mode: 0644]
arch/metag/Makefile [new file with mode: 0644]
arch/metag/boot/.gitignore [new file with mode: 0644]
arch/metag/boot/Makefile [new file with mode: 0644]
arch/metag/boot/dts/Makefile [new file with mode: 0644]
arch/metag/boot/dts/skeleton.dts [new file with mode: 0644]
arch/metag/boot/dts/skeleton.dtsi [new file with mode: 0644]
arch/metag/configs/meta1_defconfig [new file with mode: 0644]
arch/metag/configs/meta2_defconfig [new file with mode: 0644]
arch/metag/configs/meta2_smp_defconfig [new file with mode: 0644]
arch/metag/include/asm/Kbuild [new file with mode: 0644]
arch/metag/include/asm/atomic.h [new file with mode: 0644]
arch/metag/include/asm/atomic_lnkget.h [new file with mode: 0644]
arch/metag/include/asm/atomic_lock1.h [new file with mode: 0644]
arch/metag/include/asm/barrier.h [new file with mode: 0644]
arch/metag/include/asm/bitops.h [new file with mode: 0644]
arch/metag/include/asm/bug.h [new file with mode: 0644]
arch/metag/include/asm/cache.h [new file with mode: 0644]
arch/metag/include/asm/cacheflush.h [new file with mode: 0644]
arch/metag/include/asm/cachepart.h [new file with mode: 0644]
arch/metag/include/asm/checksum.h [new file with mode: 0644]
arch/metag/include/asm/clock.h [new file with mode: 0644]
arch/metag/include/asm/cmpxchg.h [new file with mode: 0644]
arch/metag/include/asm/cmpxchg_irq.h [new file with mode: 0644]
arch/metag/include/asm/cmpxchg_lnkget.h [new file with mode: 0644]
arch/metag/include/asm/cmpxchg_lock1.h [new file with mode: 0644]
arch/metag/include/asm/core_reg.h [new file with mode: 0644]
arch/metag/include/asm/cpu.h [new file with mode: 0644]
arch/metag/include/asm/da.h [new file with mode: 0644]
arch/metag/include/asm/delay.h [new file with mode: 0644]
arch/metag/include/asm/div64.h [new file with mode: 0644]
arch/metag/include/asm/dma-mapping.h [new file with mode: 0644]
arch/metag/include/asm/elf.h [new file with mode: 0644]
arch/metag/include/asm/fixmap.h [new file with mode: 0644]
arch/metag/include/asm/ftrace.h [new file with mode: 0644]
arch/metag/include/asm/global_lock.h [new file with mode: 0644]
arch/metag/include/asm/gpio.h [new file with mode: 0644]
arch/metag/include/asm/highmem.h [new file with mode: 0644]
arch/metag/include/asm/hugetlb.h [new file with mode: 0644]
arch/metag/include/asm/hwthread.h [new file with mode: 0644]
arch/metag/include/asm/io.h [new file with mode: 0644]
arch/metag/include/asm/irq.h [new file with mode: 0644]
arch/metag/include/asm/irqflags.h [new file with mode: 0644]
arch/metag/include/asm/l2cache.h [new file with mode: 0644]
arch/metag/include/asm/linkage.h [new file with mode: 0644]
arch/metag/include/asm/mach/arch.h [new file with mode: 0644]
arch/metag/include/asm/metag_isa.h [new file with mode: 0644]
arch/metag/include/asm/metag_mem.h [new file with mode: 0644]
arch/metag/include/asm/metag_regs.h [new file with mode: 0644]
arch/metag/include/asm/mman.h [new file with mode: 0644]
arch/metag/include/asm/mmu.h [new file with mode: 0644]
arch/metag/include/asm/mmu_context.h [new file with mode: 0644]
arch/metag/include/asm/mmzone.h [new file with mode: 0644]
arch/metag/include/asm/module.h [new file with mode: 0644]
arch/metag/include/asm/page.h [new file with mode: 0644]
arch/metag/include/asm/perf_event.h [new file with mode: 0644]
arch/metag/include/asm/pgalloc.h [new file with mode: 0644]
arch/metag/include/asm/pgtable.h [new file with mode: 0644]
arch/metag/include/asm/processor.h [new file with mode: 0644]
arch/metag/include/asm/prom.h [new file with mode: 0644]
arch/metag/include/asm/ptrace.h [new file with mode: 0644]
arch/metag/include/asm/setup.h [new file with mode: 0644]
arch/metag/include/asm/smp.h [new file with mode: 0644]
arch/metag/include/asm/sparsemem.h [new file with mode: 0644]
arch/metag/include/asm/spinlock.h [new file with mode: 0644]
arch/metag/include/asm/spinlock_lnkget.h [new file with mode: 0644]
arch/metag/include/asm/spinlock_lock1.h [new file with mode: 0644]
arch/metag/include/asm/spinlock_types.h [new file with mode: 0644]
arch/metag/include/asm/stacktrace.h [new file with mode: 0644]
arch/metag/include/asm/string.h [new file with mode: 0644]
arch/metag/include/asm/switch.h [new file with mode: 0644]
arch/metag/include/asm/syscall.h [new file with mode: 0644]
arch/metag/include/asm/syscalls.h [new file with mode: 0644]
arch/metag/include/asm/tbx.h [new file with mode: 0644]
arch/metag/include/asm/tcm.h [new file with mode: 0644]
arch/metag/include/asm/thread_info.h [new file with mode: 0644]
arch/metag/include/asm/tlb.h [new file with mode: 0644]
arch/metag/include/asm/tlbflush.h [new file with mode: 0644]
arch/metag/include/asm/topology.h [new file with mode: 0644]
arch/metag/include/asm/traps.h [new file with mode: 0644]
arch/metag/include/asm/uaccess.h [new file with mode: 0644]
arch/metag/include/asm/unistd.h [new file with mode: 0644]
arch/metag/include/asm/user_gateway.h [new file with mode: 0644]
arch/metag/include/uapi/asm/Kbuild [new file with mode: 0644]
arch/metag/include/uapi/asm/byteorder.h [new file with mode: 0644]
arch/metag/include/uapi/asm/ptrace.h [new file with mode: 0644]
arch/metag/include/uapi/asm/resource.h [new file with mode: 0644]
arch/metag/include/uapi/asm/sigcontext.h [new file with mode: 0644]
arch/metag/include/uapi/asm/siginfo.h [new file with mode: 0644]
arch/metag/include/uapi/asm/swab.h [new file with mode: 0644]
arch/metag/include/uapi/asm/unistd.h [new file with mode: 0644]
arch/metag/kernel/.gitignore [new file with mode: 0644]
arch/metag/kernel/Makefile [new file with mode: 0644]
arch/metag/kernel/asm-offsets.c [new file with mode: 0644]
arch/metag/kernel/cachepart.c [new file with mode: 0644]
arch/metag/kernel/clock.c [new file with mode: 0644]
arch/metag/kernel/core_reg.c [new file with mode: 0644]
arch/metag/kernel/da.c [new file with mode: 0644]
arch/metag/kernel/devtree.c [new file with mode: 0644]
arch/metag/kernel/dma.c [new file with mode: 0644]
arch/metag/kernel/ftrace.c [new file with mode: 0644]
arch/metag/kernel/ftrace_stub.S [new file with mode: 0644]
arch/metag/kernel/head.S [new file with mode: 0644]
arch/metag/kernel/irq.c [new file with mode: 0644]
arch/metag/kernel/kick.c [new file with mode: 0644]
arch/metag/kernel/machines.c [new file with mode: 0644]
arch/metag/kernel/metag_ksyms.c [new file with mode: 0644]
arch/metag/kernel/module.c [new file with mode: 0644]
arch/metag/kernel/perf/Makefile [new file with mode: 0644]
arch/metag/kernel/perf/perf_event.c [new file with mode: 0644]
arch/metag/kernel/perf/perf_event.h [new file with mode: 0644]
arch/metag/kernel/perf_callchain.c [new file with mode: 0644]
arch/metag/kernel/process.c [new file with mode: 0644]
arch/metag/kernel/ptrace.c [new file with mode: 0644]
arch/metag/kernel/setup.c [new file with mode: 0644]
arch/metag/kernel/signal.c [new file with mode: 0644]
arch/metag/kernel/smp.c [new file with mode: 0644]
arch/metag/kernel/stacktrace.c [new file with mode: 0644]
arch/metag/kernel/sys_metag.c [new file with mode: 0644]
arch/metag/kernel/tbiunexp.S [new file with mode: 0644]
arch/metag/kernel/tcm.c [new file with mode: 0644]
arch/metag/kernel/time.c [new file with mode: 0644]
arch/metag/kernel/topology.c [new file with mode: 0644]
arch/metag/kernel/traps.c [new file with mode: 0644]
arch/metag/kernel/user_gateway.S [new file with mode: 0644]
arch/metag/kernel/vmlinux.lds.S [new file with mode: 0644]
arch/metag/lib/Makefile [new file with mode: 0644]
arch/metag/lib/ashldi3.S [new file with mode: 0644]
arch/metag/lib/ashrdi3.S [new file with mode: 0644]
arch/metag/lib/checksum.c [new file with mode: 0644]
arch/metag/lib/clear_page.S [new file with mode: 0644]
arch/metag/lib/cmpdi2.S [new file with mode: 0644]
arch/metag/lib/copy_page.S [new file with mode: 0644]
arch/metag/lib/delay.c [new file with mode: 0644]
arch/metag/lib/div64.S [new file with mode: 0644]
arch/metag/lib/divsi3.S [new file with mode: 0644]
arch/metag/lib/ip_fast_csum.S [new file with mode: 0644]
arch/metag/lib/lshrdi3.S [new file with mode: 0644]
arch/metag/lib/memcpy.S [new file with mode: 0644]
arch/metag/lib/memmove.S [new file with mode: 0644]
arch/metag/lib/memset.S [new file with mode: 0644]
arch/metag/lib/modsi3.S [new file with mode: 0644]
arch/metag/lib/muldi3.S [new file with mode: 0644]
arch/metag/lib/ucmpdi2.S [new file with mode: 0644]
arch/metag/lib/usercopy.c [new file with mode: 0644]
arch/metag/mm/Kconfig [new file with mode: 0644]
arch/metag/mm/Makefile [new file with mode: 0644]
arch/metag/mm/cache.c [new file with mode: 0644]
arch/metag/mm/extable.c [new file with mode: 0644]
arch/metag/mm/fault.c [new file with mode: 0644]
arch/metag/mm/highmem.c [new file with mode: 0644]
arch/metag/mm/hugetlbpage.c [new file with mode: 0644]
arch/metag/mm/init.c [new file with mode: 0644]
arch/metag/mm/ioremap.c [new file with mode: 0644]
arch/metag/mm/l2cache.c [new file with mode: 0644]
arch/metag/mm/maccess.c [new file with mode: 0644]
arch/metag/mm/mmu-meta1.c [new file with mode: 0644]
arch/metag/mm/mmu-meta2.c [new file with mode: 0644]
arch/metag/mm/numa.c [new file with mode: 0644]
arch/metag/tbx/Makefile [new file with mode: 0644]
arch/metag/tbx/tbicore.S [new file with mode: 0644]
arch/metag/tbx/tbictx.S [new file with mode: 0644]
arch/metag/tbx/tbictxfpu.S [new file with mode: 0644]
arch/metag/tbx/tbidefr.S [new file with mode: 0644]
arch/metag/tbx/tbidspram.S [new file with mode: 0644]
arch/metag/tbx/tbilogf.S [new file with mode: 0644]
arch/metag/tbx/tbipcx.S [new file with mode: 0644]
arch/metag/tbx/tbiroot.S [new file with mode: 0644]
arch/metag/tbx/tbisoft.S [new file with mode: 0644]
arch/metag/tbx/tbistring.c [new file with mode: 0644]
arch/metag/tbx/tbitimer.S [new file with mode: 0644]
arch/microblaze/Kconfig
arch/microblaze/Makefile
arch/microblaze/boot/.gitignore [new file with mode: 0644]
arch/microblaze/include/asm/io.h
arch/microblaze/kernel/.gitignore [new file with mode: 0644]
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/cpu/pvr.c
arch/microblaze/kernel/dma.c
arch/microblaze/kernel/early_printk.c
arch/microblaze/kernel/exceptions.c
arch/microblaze/kernel/ftrace.c
arch/microblaze/kernel/heartbeat.c
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/kgdb.c
arch/microblaze/kernel/microblaze_ksyms.c
arch/microblaze/kernel/module.c
arch/microblaze/kernel/process.c
arch/microblaze/kernel/prom.c
arch/microblaze/kernel/prom_parse.c
arch/microblaze/kernel/ptrace.c
arch/microblaze/kernel/setup.c
arch/microblaze/kernel/signal.c
arch/microblaze/kernel/stacktrace.c
arch/microblaze/kernel/sys_microblaze.c
arch/microblaze/kernel/traps.c
arch/microblaze/kernel/unwind.c
arch/microblaze/lib/ashldi3.c
arch/microblaze/lib/ashrdi3.c
arch/microblaze/lib/cmpdi2.c
arch/microblaze/lib/lshrdi3.c
arch/microblaze/lib/memcpy.c
arch/microblaze/lib/memmove.c
arch/microblaze/lib/memset.c
arch/microblaze/lib/muldi3.c
arch/microblaze/lib/uaccess_old.S
arch/microblaze/lib/ucmpdi2.c
arch/microblaze/mm/consistent.c
arch/microblaze/mm/fault.c
arch/microblaze/mm/highmem.c
arch/microblaze/mm/init.c
arch/microblaze/mm/pgtable.c
arch/microblaze/pci/indirect_pci.c
arch/microblaze/pci/iomap.c
arch/microblaze/pci/pci-common.c
arch/microblaze/pci/xilinx_pci.c
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/Platform
arch/mips/alchemy/board-gpr.c
arch/mips/alchemy/board-mtx1.c
arch/mips/alchemy/common/dbdma.c
arch/mips/alchemy/common/gpiolib.c
arch/mips/alchemy/common/irq.c
arch/mips/alchemy/common/platform.c
arch/mips/alchemy/common/setup.c
arch/mips/alchemy/common/sleeper.S
arch/mips/alchemy/common/time.c
arch/mips/alchemy/common/usb.c
arch/mips/alchemy/devboards/bcsr.c
arch/mips/alchemy/devboards/db1000.c
arch/mips/alchemy/devboards/db1200.c
arch/mips/alchemy/devboards/db1300.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/alchemy/devboards/pm.c
arch/mips/ar7/Platform
arch/mips/ar7/platform.c
arch/mips/ath79/Kconfig
arch/mips/ath79/Makefile
arch/mips/ath79/clock.c
arch/mips/ath79/common.c
arch/mips/ath79/dev-common.c
arch/mips/ath79/dev-usb.c
arch/mips/ath79/dev-wmac.c
arch/mips/ath79/early_printk.c
arch/mips/ath79/gpio.c
arch/mips/ath79/irq.c
arch/mips/ath79/mach-ap121.c
arch/mips/ath79/mach-ap136.c [new file with mode: 0644]
arch/mips/ath79/mach-ap81.c
arch/mips/ath79/mach-db120.c
arch/mips/ath79/mach-pb44.c
arch/mips/ath79/machtypes.h
arch/mips/ath79/pci.c
arch/mips/ath79/pci.h
arch/mips/ath79/setup.c
arch/mips/bcm47xx/Makefile
arch/mips/bcm47xx/nvram.c
arch/mips/bcm47xx/setup.c
arch/mips/bcm47xx/sprom.c
arch/mips/bcm47xx/wgt634u.c
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/early_printk.c
arch/mips/boot/Makefile
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
arch/mips/boot/compressed/decompress.c
arch/mips/boot/compressed/head.S
arch/mips/boot/ecoff.h
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/Kconfig
arch/mips/cavium-octeon/Makefile
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
arch/mips/cavium-octeon/executive/cvmx-helper-util.c
arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
arch/mips/cavium-octeon/executive/cvmx-helper.c
arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
arch/mips/cavium-octeon/executive/cvmx-l2c.c
arch/mips/cavium-octeon/executive/cvmx-pko.c
arch/mips/cavium-octeon/executive/cvmx-spi.c
arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
arch/mips/cavium-octeon/oct_ilm.c [new file with mode: 0644]
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/octeon-memcpy.S
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/cavium-octeon/octeon_3xxx.dts
arch/mips/cavium-octeon/octeon_68xx.dts
arch/mips/cavium-octeon/octeon_boot.h
arch/mips/cavium-octeon/setup.c
arch/mips/cavium-octeon/smp.c
arch/mips/cobalt/led.c
arch/mips/cobalt/mtd.c
arch/mips/cobalt/rtc.c
arch/mips/configs/ath79_defconfig
arch/mips/configs/pnx8550_jbs_defconfig [deleted file]
arch/mips/configs/pnx8550_stb810_defconfig [deleted file]
arch/mips/configs/rt305x_defconfig [new file with mode: 0644]
arch/mips/dec/int-handler.S
arch/mips/dec/kn02xa-berr.c
arch/mips/dec/prom/call_o32.S
arch/mips/dec/prom/dectypes.h
arch/mips/dec/prom/init.c
arch/mips/dec/prom/memory.c
arch/mips/dec/setup.c
arch/mips/dec/wbflush.c
arch/mips/emma/markeins/irq.c
arch/mips/emma/markeins/platform.c
arch/mips/emma/markeins/setup.c
arch/mips/fw/arc/file.c
arch/mips/fw/arc/identify.c
arch/mips/fw/arc/memory.c
arch/mips/fw/arc/promlib.c
arch/mips/fw/lib/call_o32.S
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/abi.h
arch/mips/include/asm/addrspace.h
arch/mips/include/asm/asm.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/barrier.h
arch/mips/include/asm/bcache.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/break.h [new file with mode: 0644]
arch/mips/include/asm/cacheops.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/compat-signal.h
arch/mips/include/asm/compat.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/dec/ioasic_addrs.h
arch/mips/include/asm/dec/kn01.h
arch/mips/include/asm/dec/kn02ca.h
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/dma.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/emma/emma2rh.h
arch/mips/include/asm/emma/markeins.h
arch/mips/include/asm/fixmap.h
arch/mips/include/asm/floppy.h
arch/mips/include/asm/fpregdef.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/fw/arc/hinv.h
arch/mips/include/asm/fw/arc/types.h
arch/mips/include/asm/fw/cfe/cfe_api.h
arch/mips/include/asm/fw/cfe/cfe_error.h
arch/mips/include/asm/gcmpregs.h
arch/mips/include/asm/gic.h
arch/mips/include/asm/gio_device.h
arch/mips/include/asm/gt64120.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/highmem.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/io.h
arch/mips/include/asm/ip32/crime.h
arch/mips/include/asm/ip32/ip32_ints.h
arch/mips/include/asm/ip32/mace.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/irq_cpu.h
arch/mips/include/asm/isadep.h
arch/mips/include/asm/jazz.h
arch/mips/include/asm/jazzdma.h
arch/mips/include/asm/kmap_types.h
arch/mips/include/asm/kprobes.h
arch/mips/include/asm/lasat/eeprom.h
arch/mips/include/asm/lasat/lasat.h
arch/mips/include/asm/lasat/serial.h
arch/mips/include/asm/local.h
arch/mips/include/asm/m48t37.h
arch/mips/include/asm/mach-ar7/ar7.h
arch/mips/include/asm/mach-ar7/irq.h
arch/mips/include/asm/mach-ath79/ar71xx_regs.h
arch/mips/include/asm/mach-ath79/ar933x_uart.h
arch/mips/include/asm/mach-ath79/ath79.h
arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
arch/mips/include/asm/mach-ath79/irq.h
arch/mips/include/asm/mach-ath79/pci.h [deleted file]
arch/mips/include/asm/mach-au1x00/au1000.h
arch/mips/include/asm/mach-au1x00/au1000_dma.h
arch/mips/include/asm/mach-au1x00/au1100_mmc.h
arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
arch/mips/include/asm/mach-au1x00/gpio-au1000.h
arch/mips/include/asm/mach-au1x00/gpio-au1300.h
arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm47xx/nvram.h [deleted file]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-bcm63xx/irq.h
arch/mips/include/asm/mach-cavium-octeon/irq.h
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
arch/mips/include/asm/mach-cobalt/mach-gt64120.h
arch/mips/include/asm/mach-db1x00/bcsr.h
arch/mips/include/asm/mach-db1x00/db1200.h
arch/mips/include/asm/mach-db1x00/db1300.h
arch/mips/include/asm/mach-emma2rh/irq.h
arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
arch/mips/include/asm/mach-generic/floppy.h
arch/mips/include/asm/mach-generic/ide.h
arch/mips/include/asm/mach-generic/irq.h
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
arch/mips/include/asm/mach-ip27/mmzone.h
arch/mips/include/asm/mach-ip27/topology.h
arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
arch/mips/include/asm/mach-ip28/spaces.h
arch/mips/include/asm/mach-ip32/dma-coherence.h
arch/mips/include/asm/mach-ip32/war.h
arch/mips/include/asm/mach-jazz/floppy.h
arch/mips/include/asm/mach-jz4740/clock.h
arch/mips/include/asm/mach-jz4740/dma.h
arch/mips/include/asm/mach-jz4740/gpio.h
arch/mips/include/asm/mach-jz4740/irq.h
arch/mips/include/asm/mach-jz4740/platform.h
arch/mips/include/asm/mach-jz4740/timer.h
arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
arch/mips/include/asm/mach-lantiq/lantiq.h
arch/mips/include/asm/mach-lantiq/war.h
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
arch/mips/include/asm/mach-lasat/mach-gt64120.h
arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
arch/mips/include/asm/mach-loongson/gpio.h
arch/mips/include/asm/mach-loongson/loongson.h
arch/mips/include/asm/mach-loongson/machine.h
arch/mips/include/asm/mach-loongson/mem.h
arch/mips/include/asm/mach-loongson1/irq.h
arch/mips/include/asm/mach-loongson1/loongson1.h
arch/mips/include/asm/mach-loongson1/platform.h
arch/mips/include/asm/mach-loongson1/prom.h
arch/mips/include/asm/mach-loongson1/regs-clk.h
arch/mips/include/asm/mach-loongson1/regs-wdt.h
arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
arch/mips/include/asm/mach-malta/irq.h
arch/mips/include/asm/mach-malta/mach-gt64120.h
arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h [new file with mode: 0644]
arch/mips/include/asm/mach-pmcs-msp71xx/war.h [new file with mode: 0644]
arch/mips/include/asm/mach-pnx833x/irq-mapping.h
arch/mips/include/asm/mach-pnx833x/pnx833x.h
arch/mips/include/asm/mach-pnx8550/cm.h [deleted file]
arch/mips/include/asm/mach-pnx8550/glb.h [deleted file]
arch/mips/include/asm/mach-pnx8550/int.h [deleted file]
arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h [deleted file]
arch/mips/include/asm/mach-pnx8550/nand.h [deleted file]
arch/mips/include/asm/mach-pnx8550/pci.h [deleted file]
arch/mips/include/asm/mach-pnx8550/uart.h [deleted file]
arch/mips/include/asm/mach-pnx8550/usb.h [deleted file]
arch/mips/include/asm/mach-pnx8550/war.h [deleted file]
arch/mips/include/asm/mach-powertv/asic.h
arch/mips/include/asm/mach-powertv/asic_regs.h
arch/mips/include/asm/mach-powertv/dma-coherence.h
arch/mips/include/asm/mach-powertv/interrupts.h
arch/mips/include/asm/mach-ralink/ralink_regs.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt305x.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/war.h [new file with mode: 0644]
arch/mips/include/asm/mach-rc32434/ddr.h
arch/mips/include/asm/mach-rc32434/dma.h
arch/mips/include/asm/mach-rc32434/dma_v.h
arch/mips/include/asm/mach-rc32434/eth.h
arch/mips/include/asm/mach-rc32434/gpio.h
arch/mips/include/asm/mach-rc32434/irq.h
arch/mips/include/asm/mach-rc32434/pci.h
arch/mips/include/asm/mach-rc32434/rb.h
arch/mips/include/asm/mach-rc32434/rc32434.h
arch/mips/include/asm/mach-rc32434/timer.h
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mach-sead3/irq.h
arch/mips/include/asm/mach-sibyte/war.h
arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
arch/mips/include/asm/mc146818-time.h
arch/mips/include/asm/mips-boards/bonito64.h
arch/mips/include/asm/mips-boards/generic.h
arch/mips/include/asm/mips-boards/launch.h
arch/mips/include/asm/mips-boards/malta.h
arch/mips/include/asm/mips-boards/maltaint.h
arch/mips/include/asm/mips-boards/piix4.h
arch/mips/include/asm/mips-boards/prom.h
arch/mips/include/asm/mips-boards/sead3int.h
arch/mips/include/asm/mips-boards/sim.h
arch/mips/include/asm/mipsmtregs.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/msc01_ic.h
arch/mips/include/asm/netlogic/common.h
arch/mips/include/asm/netlogic/haldefs.h
arch/mips/include/asm/netlogic/mips-extns.h
arch/mips/include/asm/netlogic/xlp-hal/bridge.h
arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
arch/mips/include/asm/netlogic/xlp-hal/iomap.h
arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
arch/mips/include/asm/netlogic/xlp-hal/pic.h
arch/mips/include/asm/netlogic/xlp-hal/sys.h
arch/mips/include/asm/netlogic/xlp-hal/uart.h
arch/mips/include/asm/netlogic/xlr/fmn.h
arch/mips/include/asm/netlogic/xlr/iomap.h
arch/mips/include/asm/netlogic/xlr/msidef.h
arch/mips/include/asm/netlogic/xlr/pic.h
arch/mips/include/asm/nile4.h
arch/mips/include/asm/octeon/cvmx-address.h
arch/mips/include/asm/octeon/cvmx-bootinfo.h
arch/mips/include/asm/octeon/cvmx-bootmem.h
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
arch/mips/include/asm/octeon/cvmx-config.h
arch/mips/include/asm/octeon/cvmx-fau.h
arch/mips/include/asm/octeon/cvmx-fpa.h
arch/mips/include/asm/octeon/cvmx-helper-board.h
arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
arch/mips/include/asm/octeon/cvmx-helper-util.h
arch/mips/include/asm/octeon/cvmx-helper-xaui.h
arch/mips/include/asm/octeon/cvmx-helper.h
arch/mips/include/asm/octeon/cvmx-ipd.h
arch/mips/include/asm/octeon/cvmx-l2c.h
arch/mips/include/asm/octeon/cvmx-mdio.h
arch/mips/include/asm/octeon/cvmx-pip-defs.h
arch/mips/include/asm/octeon/cvmx-pip.h
arch/mips/include/asm/octeon/cvmx-pko.h
arch/mips/include/asm/octeon/cvmx-pow.h
arch/mips/include/asm/octeon/cvmx-scratch.h
arch/mips/include/asm/octeon/cvmx-spi.h
arch/mips/include/asm/octeon/cvmx-spinlock.h
arch/mips/include/asm/octeon/cvmx-sysinfo.h
arch/mips/include/asm/octeon/cvmx-wqe.h
arch/mips/include/asm/octeon/cvmx.h
arch/mips/include/asm/octeon/octeon-feature.h
arch/mips/include/asm/octeon/octeon-model.h
arch/mips/include/asm/octeon/octeon.h
arch/mips/include/asm/octeon/pci-octeon.h
arch/mips/include/asm/paccess.h
arch/mips/include/asm/page.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pci/bridge.h
arch/mips/include/asm/pgtable-32.h
arch/mips/include/asm/pgtable-64.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h [deleted file]
arch/mips/include/asm/pmc-sierra/msp71xx/war.h [deleted file]
arch/mips/include/asm/processor.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/regdef.h
arch/mips/include/asm/rtlx.h
arch/mips/include/asm/seccomp.h
arch/mips/include/asm/sgi/gio.h
arch/mips/include/asm/sgi/hpc3.h
arch/mips/include/asm/sgi/ioc.h
arch/mips/include/asm/sgi/ip22.h
arch/mips/include/asm/sgi/mc.h
arch/mips/include/asm/sgi/pi1.h
arch/mips/include/asm/sgialib.h
arch/mips/include/asm/sgiarcs.h
arch/mips/include/asm/shmparam.h
arch/mips/include/asm/sibyte/bcm1480_int.h
arch/mips/include/asm/sibyte/bcm1480_l2c.h
arch/mips/include/asm/sibyte/bcm1480_mc.h
arch/mips/include/asm/sibyte/bcm1480_regs.h
arch/mips/include/asm/sibyte/bcm1480_scd.h
arch/mips/include/asm/sibyte/bigsur.h
arch/mips/include/asm/sibyte/carmel.h
arch/mips/include/asm/sibyte/sb1250.h
arch/mips/include/asm/sibyte/sb1250_defs.h
arch/mips/include/asm/sibyte/sb1250_dma.h
arch/mips/include/asm/sibyte/sb1250_genbus.h
arch/mips/include/asm/sibyte/sb1250_int.h
arch/mips/include/asm/sibyte/sb1250_l2c.h
arch/mips/include/asm/sibyte/sb1250_ldt.h
arch/mips/include/asm/sibyte/sb1250_mac.h
arch/mips/include/asm/sibyte/sb1250_mc.h
arch/mips/include/asm/sibyte/sb1250_regs.h
arch/mips/include/asm/sibyte/sb1250_scd.h
arch/mips/include/asm/sibyte/sb1250_smbus.h
arch/mips/include/asm/sibyte/sb1250_syncser.h
arch/mips/include/asm/sibyte/sb1250_uart.h
arch/mips/include/asm/sibyte/sentosa.h
arch/mips/include/asm/sibyte/swarm.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/smtc.h
arch/mips/include/asm/sn/addrs.h
arch/mips/include/asm/sn/agent.h
arch/mips/include/asm/sn/arch.h
arch/mips/include/asm/sn/fru.h
arch/mips/include/asm/sn/gda.h
arch/mips/include/asm/sn/intr.h
arch/mips/include/asm/sn/io.h
arch/mips/include/asm/sn/ioc3.h
arch/mips/include/asm/sn/klconfig.h
arch/mips/include/asm/sn/kldir.h
arch/mips/include/asm/sn/launch.h
arch/mips/include/asm/sn/mapped_kernel.h
arch/mips/include/asm/sn/nmi.h
arch/mips/include/asm/sn/sn0/addrs.h
arch/mips/include/asm/sn/sn0/arch.h
arch/mips/include/asm/sn/sn0/hub.h
arch/mips/include/asm/sn/sn0/hubio.h
arch/mips/include/asm/sn/sn0/hubmd.h
arch/mips/include/asm/sn/sn0/hubni.h
arch/mips/include/asm/sn/sn0/hubpi.h
arch/mips/include/asm/sn/sn0/ip27.h
arch/mips/include/asm/sn/types.h
arch/mips/include/asm/sni.h
arch/mips/include/asm/sparsemem.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spinlock_types.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/string.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/tlb.h
arch/mips/include/asm/topology.h
arch/mips/include/asm/traps.h
arch/mips/include/asm/txx9/jmr3927.h
arch/mips/include/asm/txx9/rbtx4927.h
arch/mips/include/asm/txx9/rbtx4938.h
arch/mips/include/asm/txx9/rbtx4939.h
arch/mips/include/asm/txx9/smsc_fdc37m81x.h
arch/mips/include/asm/txx9/tx3927.h
arch/mips/include/asm/txx9/tx4927.h
arch/mips/include/asm/txx9/tx4927pcic.h
arch/mips/include/asm/txx9/tx4938.h
arch/mips/include/asm/txx9/tx4939.h
arch/mips/include/asm/txx9tmr.h
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/uasm.h
arch/mips/include/asm/user.h
arch/mips/include/asm/vr41xx/pci.h
arch/mips/include/asm/vr41xx/tb0287.h
arch/mips/include/asm/war.h
arch/mips/include/asm/xtalk/xtalk.h
arch/mips/include/asm/xtalk/xwidget.h
arch/mips/include/uapi/asm/Kbuild
arch/mips/include/uapi/asm/break.h
arch/mips/include/uapi/asm/cachectl.h
arch/mips/include/uapi/asm/errno.h
arch/mips/include/uapi/asm/fcntl.h
arch/mips/include/uapi/asm/inst.h [new file with mode: 0644]
arch/mips/include/uapi/asm/ioctls.h
arch/mips/include/uapi/asm/mman.h
arch/mips/include/uapi/asm/ptrace.h
arch/mips/include/uapi/asm/sembuf.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/include/uapi/asm/signal.h
arch/mips/include/uapi/asm/socket.h
arch/mips/include/uapi/asm/sockios.h
arch/mips/include/uapi/asm/stat.h
arch/mips/include/uapi/asm/statfs.h
arch/mips/include/uapi/asm/sysmips.h
arch/mips/include/uapi/asm/termbits.h
arch/mips/include/uapi/asm/termios.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jazz/Makefile
arch/mips/jazz/irq.c
arch/mips/jazz/jazzdma.c
arch/mips/jazz/setup.c
arch/mips/jz4740/board-qi_lb60.c
arch/mips/jz4740/clock-debugfs.c
arch/mips/jz4740/clock.c
arch/mips/jz4740/dma.c
arch/mips/jz4740/gpio.c
arch/mips/jz4740/irq.c
arch/mips/jz4740/irq.h
arch/mips/jz4740/platform.c
arch/mips/jz4740/pm.c
arch/mips/jz4740/prom.c
arch/mips/jz4740/reset.c
arch/mips/jz4740/setup.c
arch/mips/jz4740/time.c
arch/mips/jz4740/timer.c
arch/mips/kernel/Makefile
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/bmips_vec.S
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-bcm1480.c
arch/mips/kernel/cevt-ds1287.c
arch/mips/kernel/cevt-gt641xx.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cevt-sb1250.c
arch/mips/kernel/cevt-smtc.c
arch/mips/kernel/cevt-txx9.c
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/cpufreq/loongson2_cpufreq.c
arch/mips/kernel/crash.c
arch/mips/kernel/csrc-bcm1480.c
arch/mips/kernel/csrc-gic.c [new file with mode: 0644]
arch/mips/kernel/csrc-ioasic.c
arch/mips/kernel/csrc-powertv.c
arch/mips/kernel/csrc-sb1250.c
arch/mips/kernel/early_printk.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/genex.S
arch/mips/kernel/head.S
arch/mips/kernel/i8259.c
arch/mips/kernel/irq-gt641xx.c
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/irq-rm7000.c
arch/mips/kernel/irq.c
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/irq_txx9.c
arch/mips/kernel/kgdb.c
arch/mips/kernel/kprobes.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/module-rela.c
arch/mips/kernel/module.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/perf_event_mipsxx.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/r2300_fpu.S
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/relocate_kernel.S
arch/mips/kernel/rtlx.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/signal_n32.c
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smtc-asm.S
arch/mips/kernel/smtc-proc.c
arch/mips/kernel/smtc.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/syscall.c
arch/mips/kernel/time.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/kernel/vpe.c
arch/mips/kernel/watch.c
arch/mips/lantiq/clk.c
arch/mips/lantiq/clk.h
arch/mips/lantiq/dts/danube.dtsi
arch/mips/lantiq/dts/easy50712.dts
arch/mips/lantiq/falcon/sysctrl.c
arch/mips/lantiq/irq.c
arch/mips/lantiq/prom.h
arch/mips/lantiq/xway/clk.c
arch/mips/lantiq/xway/reset.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/lasat/Makefile
arch/mips/lasat/ds1603.h
arch/mips/lasat/image/Makefile
arch/mips/lasat/image/head.S
arch/mips/lasat/picvue.c
arch/mips/lasat/picvue.h
arch/mips/lasat/picvue_proc.c
arch/mips/lasat/serial.c
arch/mips/lasat/sysctl.c
arch/mips/lib/bitops.c
arch/mips/lib/csum_partial.S
arch/mips/lib/delay.c
arch/mips/lib/dump_tlb.c
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/r3k_dump_tlb.c
arch/mips/lib/strncpy_user.S
arch/mips/lib/strnlen_user.S
arch/mips/lib/uncached.c
arch/mips/loongson/Makefile
arch/mips/loongson/common/bonito-irq.c
arch/mips/loongson/common/cmdline.c
arch/mips/loongson/common/cs5536/cs5536_acc.c
arch/mips/loongson/common/cs5536/cs5536_ehci.c
arch/mips/loongson/common/cs5536/cs5536_ide.c
arch/mips/loongson/common/cs5536/cs5536_isa.c
arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
arch/mips/loongson/common/cs5536/cs5536_ohci.c
arch/mips/loongson/common/cs5536/cs5536_pci.c
arch/mips/loongson/common/early_printk.c
arch/mips/loongson/common/env.c
arch/mips/loongson/common/gpio.c
arch/mips/loongson/common/init.c
arch/mips/loongson/common/irq.c
arch/mips/loongson/common/machtype.c
arch/mips/loongson/common/mem.c
arch/mips/loongson/common/pci.c
arch/mips/loongson/common/platform.c
arch/mips/loongson/common/reset.c
arch/mips/loongson/common/serial.c
arch/mips/loongson/common/setup.c
arch/mips/loongson/common/time.c
arch/mips/loongson/common/uart_base.c
arch/mips/loongson/fuloong-2e/irq.c
arch/mips/loongson/fuloong-2e/reset.c
arch/mips/loongson/lemote-2f/ec_kb3310b.h
arch/mips/loongson/lemote-2f/irq.c
arch/mips/loongson/lemote-2f/machtype.c
arch/mips/loongson/lemote-2f/reset.c
arch/mips/loongson1/Platform
arch/mips/loongson1/common/clock.c
arch/mips/loongson1/common/irq.c
arch/mips/loongson1/common/platform.c
arch/mips/loongson1/common/prom.c
arch/mips/loongson1/common/reset.c
arch/mips/loongson1/common/setup.c
arch/mips/loongson1/ls1b/board.c
arch/mips/math-emu/Makefile
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/dp_add.c
arch/mips/math-emu/dp_sqrt.c
arch/mips/math-emu/dp_sub.c
arch/mips/math-emu/ieee754.c
arch/mips/math-emu/ieee754dp.c
arch/mips/math-emu/ieee754int.h
arch/mips/math-emu/ieee754sp.c
arch/mips/math-emu/ieee754xcpt.c
arch/mips/math-emu/kernel_linkage.c
arch/mips/math-emu/sp_add.c
arch/mips/math-emu/sp_mul.c
arch/mips/math-emu/sp_sub.c
arch/mips/mm/Makefile
arch/mips/mm/c-octeon.c
arch/mips/mm/c-r3k.c
arch/mips/mm/c-r4k.c
arch/mips/mm/c-tx39.c
arch/mips/mm/cerr-sb1.c
arch/mips/mm/cex-gen.S
arch/mips/mm/cex-oct.S
arch/mips/mm/cex-sb1.S
arch/mips/mm/dma-default.c
arch/mips/mm/fault.c
arch/mips/mm/gup.c
arch/mips/mm/init.c
arch/mips/mm/ioremap.c
arch/mips/mm/page.c
arch/mips/mm/pgtable-64.c
arch/mips/mm/sc-ip22.c
arch/mips/mm/sc-r5k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm.c
arch/mips/mti-malta/malta-amon.c
arch/mips/mti-malta/malta-cmdline.c
arch/mips/mti-malta/malta-display.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-pci.c
arch/mips/mti-malta/malta-platform.c
arch/mips/mti-malta/malta-setup.c
arch/mips/mti-malta/malta-smtc.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/Makefile
arch/mips/mti-sead3/leds-sead3.c
arch/mips/mti-sead3/sead3-console.c
arch/mips/mti-sead3/sead3-display.c
arch/mips/mti-sead3/sead3-i2c-drv.c
arch/mips/mti-sead3/sead3-init.c
arch/mips/mti-sead3/sead3-memory.c [deleted file]
arch/mips/mti-sead3/sead3-net.c
arch/mips/mti-sead3/sead3-pic32-bus.c
arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
arch/mips/mti-sead3/sead3-setup.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/mti-sead3/sead3.dts [new file with mode: 0644]
arch/mips/netlogic/Platform
arch/mips/netlogic/common/irq.c
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/common/smpboot.S
arch/mips/netlogic/common/time.c
arch/mips/netlogic/dts/xlp_evp.dts
arch/mips/netlogic/xlp/nlm_hal.c
arch/mips/netlogic/xlp/usb-init.c
arch/mips/netlogic/xlp/wakeup.c
arch/mips/netlogic/xlr/fmn-config.c
arch/mips/netlogic/xlr/platform-flash.c
arch/mips/netlogic/xlr/platform.c
arch/mips/netlogic/xlr/setup.c
arch/mips/oprofile/common.c
arch/mips/oprofile/op_model_loongson2.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/Makefile
arch/mips/pci/fixup-cobalt.c
arch/mips/pci/fixup-emma2rh.c
arch/mips/pci/fixup-fuloong2e.c
arch/mips/pci/fixup-ip32.c
arch/mips/pci/fixup-lemote2f.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/fixup-pmcmsp.c
arch/mips/pci/fixup-pnx8550.c [deleted file]
arch/mips/pci/fixup-sni.c
arch/mips/pci/fixup-tb0219.c
arch/mips/pci/fixup-tb0287.c
arch/mips/pci/fixup-wrppmc.c
arch/mips/pci/ops-bcm63xx.c
arch/mips/pci/ops-bonito64.c
arch/mips/pci/ops-gt64xxx_pci0.c
arch/mips/pci/ops-lantiq.c
arch/mips/pci/ops-loongson2.c
arch/mips/pci/ops-msc.c
arch/mips/pci/ops-nile4.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/pci/ops-pnx8550.c [deleted file]
arch/mips/pci/ops-rc32434.c
arch/mips/pci/ops-sni.c
arch/mips/pci/ops-tx4927.c
arch/mips/pci/ops-vr41xx.c
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-bcm1480ht.c
arch/mips/pci/pci-bcm47xx.c
arch/mips/pci/pci-bcm63xx.c
arch/mips/pci/pci-bcm63xx.h
arch/mips/pci/pci-ip27.c
arch/mips/pci/pci-ip32.c
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci-lasat.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pci-rc32434.c
arch/mips/pci/pci-sb1250.c
arch/mips/pci/pci-vr41xx.c
arch/mips/pci/pci-vr41xx.h
arch/mips/pci/pci-xlp.c
arch/mips/pci/pci-xlr.c
arch/mips/pci/pci.c
arch/mips/pci/pcie-octeon.c
arch/mips/pmc-sierra/Kconfig [deleted file]
arch/mips/pmc-sierra/Platform [deleted file]
arch/mips/pmc-sierra/msp71xx/Makefile [deleted file]
arch/mips/pmc-sierra/msp71xx/gpio.c [deleted file]
arch/mips/pmc-sierra/msp71xx/gpio_extended.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_elb.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_eth.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_irq.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_irq_per.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_pci.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_prom.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_serial.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_setup.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_smp.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_smtc.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_time.c [deleted file]
arch/mips/pmc-sierra/msp71xx/msp_usb.c [deleted file]
arch/mips/pmcs-msp71xx/Kconfig [new file with mode: 0644]
arch/mips/pmcs-msp71xx/Makefile [new file with mode: 0644]
arch/mips/pmcs-msp71xx/Platform [new file with mode: 0644]
arch/mips/pmcs-msp71xx/gpio.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/gpio_extended.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_elb.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_eth.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_hwbutton.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_irq.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_irq_cic.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_irq_per.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_irq_slp.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_pci.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_prom.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_serial.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_setup.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_smp.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_smtc.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_time.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_usb.c [new file with mode: 0644]
arch/mips/pnx833x/Platform
arch/mips/pnx833x/common/interrupts.c
arch/mips/pnx833x/common/platform.c
arch/mips/pnx833x/common/prom.c
arch/mips/pnx833x/common/reset.c
arch/mips/pnx833x/common/setup.c
arch/mips/pnx833x/stb22x/board.c
arch/mips/pnx8550/Makefile [deleted file]
arch/mips/pnx8550/Platform [deleted file]
arch/mips/pnx8550/common/Makefile [deleted file]
arch/mips/pnx8550/common/int.c [deleted file]
arch/mips/pnx8550/common/pci.c [deleted file]
arch/mips/pnx8550/common/platform.c [deleted file]
arch/mips/pnx8550/common/proc.c [deleted file]
arch/mips/pnx8550/common/prom.c [deleted file]
arch/mips/pnx8550/common/reset.c [deleted file]
arch/mips/pnx8550/common/setup.c [deleted file]
arch/mips/pnx8550/common/time.c [deleted file]
arch/mips/pnx8550/jbs/Makefile [deleted file]
arch/mips/pnx8550/jbs/board_setup.c [deleted file]
arch/mips/pnx8550/jbs/init.c [deleted file]
arch/mips/pnx8550/jbs/irqmap.c [deleted file]
arch/mips/pnx8550/stb810/Makefile [deleted file]
arch/mips/pnx8550/stb810/board_setup.c [deleted file]
arch/mips/pnx8550/stb810/irqmap.c [deleted file]
arch/mips/pnx8550/stb810/prom_init.c [deleted file]
arch/mips/power/cpu.c
arch/mips/power/hibernate.S
arch/mips/powertv/asic/asic-calliope.c
arch/mips/powertv/asic/asic-cronus.c
arch/mips/powertv/asic/asic-gaia.c
arch/mips/powertv/asic/asic-zeus.c
arch/mips/powertv/asic/asic_devices.c
arch/mips/powertv/asic/asic_int.c
arch/mips/powertv/asic/irq_asic.c
arch/mips/powertv/asic/prealloc-calliope.c
arch/mips/powertv/asic/prealloc-cronus.c
arch/mips/powertv/asic/prealloc-cronuslite.c
arch/mips/powertv/asic/prealloc-gaia.c
arch/mips/powertv/asic/prealloc-zeus.c
arch/mips/powertv/init.c
arch/mips/powertv/ioremap.c
arch/mips/powertv/memory.c
arch/mips/powertv/powertv-usb.c
arch/mips/ralink/Kconfig [new file with mode: 0644]
arch/mips/ralink/Makefile [new file with mode: 0644]
arch/mips/ralink/Platform [new file with mode: 0644]
arch/mips/ralink/clk.c [new file with mode: 0644]
arch/mips/ralink/common.h [new file with mode: 0644]
arch/mips/ralink/dts/Makefile [new file with mode: 0644]
arch/mips/ralink/dts/rt3050.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt3052_eval.dts [new file with mode: 0644]
arch/mips/ralink/early_printk.c [new file with mode: 0644]
arch/mips/ralink/irq.c [new file with mode: 0644]
arch/mips/ralink/of.c [new file with mode: 0644]
arch/mips/ralink/prom.c [new file with mode: 0644]
arch/mips/ralink/reset.c [new file with mode: 0644]
arch/mips/ralink/rt305x.c [new file with mode: 0644]
arch/mips/rb532/devices.c
arch/mips/rb532/gpio.c
arch/mips/rb532/irq.c
arch/mips/sgi-ip22/ip22-eisa.c
arch/mips/sgi-ip22/ip22-gio.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-mc.c
arch/mips/sgi-ip22/ip22-nvram.c
arch/mips/sgi-ip22/ip22-platform.c
arch/mips/sgi-ip22/ip22-reset.c
arch/mips/sgi-ip22/ip28-berr.c
arch/mips/sgi-ip27/ip27-berr.c
arch/mips/sgi-ip27/ip27-console.c
arch/mips/sgi-ip27/ip27-hubio.c
arch/mips/sgi-ip27/ip27-init.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-nmi.c
arch/mips/sgi-ip27/ip27-reset.c
arch/mips/sgi-ip27/ip27-smp.c
arch/mips/sgi-ip27/ip27-timer.c
arch/mips/sgi-ip27/ip27-xtalk.c
arch/mips/sgi-ip32/ip32-irq.c
arch/mips/sibyte/Platform
arch/mips/sibyte/bcm1480/irq.c
arch/mips/sibyte/common/cfe.c
arch/mips/sibyte/common/sb_tbprof.c
arch/mips/sibyte/sb1250/bus_watcher.c
arch/mips/sibyte/sb1250/irq.c
arch/mips/sibyte/sb1250/setup.c
arch/mips/sibyte/swarm/platform.c
arch/mips/sibyte/swarm/rtc_xicor1241.c
arch/mips/sni/a20r.c
arch/mips/sni/eisa.c
arch/mips/sni/irq.c
arch/mips/sni/pcimt.c
arch/mips/sni/pcit.c
arch/mips/sni/rm200.c
arch/mips/sni/setup.c
arch/mips/sni/time.c
arch/mips/txx9/Platform
arch/mips/txx9/generic/irq_tx4927.c
arch/mips/txx9/generic/irq_tx4939.c
arch/mips/txx9/generic/mem_tx4927.c
arch/mips/txx9/generic/pci.c
arch/mips/txx9/generic/setup.c
arch/mips/txx9/generic/setup_tx3927.c
arch/mips/txx9/generic/setup_tx4927.c
arch/mips/txx9/generic/setup_tx4938.c
arch/mips/txx9/generic/setup_tx4939.c
arch/mips/txx9/generic/smsc_fdc37m81x.c
arch/mips/txx9/rbtx4927/irq.c
arch/mips/txx9/rbtx4927/prom.c
arch/mips/txx9/rbtx4927/setup.c
arch/mips/txx9/rbtx4938/setup.c
arch/mips/txx9/rbtx4939/setup.c
arch/mips/vr41xx/common/bcu.c
arch/mips/vr41xx/common/cmu.c
arch/mips/vr41xx/common/giu.c
arch/mips/vr41xx/common/icu.c
arch/mips/vr41xx/common/pmu.c
arch/mips/vr41xx/common/rtc.c
arch/mips/vr41xx/common/type.c
arch/mips/wrppmc/Platform
arch/mips/wrppmc/irq.c
arch/mips/wrppmc/serial.c
arch/mn10300/Kconfig
arch/mn10300/include/asm/elf.h
arch/openrisc/Kconfig
arch/openrisc/include/asm/bitops.h
arch/openrisc/include/asm/elf.h
arch/openrisc/include/asm/processor.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/mm/init.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/atomic.h
arch/parisc/kernel/binfmt_elf32.c
arch/parisc/kernel/signal.c
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/sys_parisc32.c
arch/parisc/kernel/syscall_table.S
arch/parisc/lib/memcpy.c
arch/powerpc/Kconfig
arch/powerpc/crypto/sha1-powerpc-asm.S
arch/powerpc/include/asm/bitops.h
arch/powerpc/include/asm/elf.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/proc_powerpc.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/kvm/book3s_mmu_hpte.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/cell/spufs/syscalls.c
arch/powerpc/platforms/pseries/hvCall_inst.c
arch/powerpc/platforms/pseries/hvcserver.c
arch/powerpc/platforms/pseries/scanlog.c
arch/s390/Kconfig
arch/s390/hypfs/hypfs_dbfs.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/elf.h
arch/s390/include/asm/futex.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/module.c
arch/s390/kernel/signal.c
arch/s390/kernel/syscalls.S
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_pt.c
arch/s390/lib/uaccess_std.c
arch/s390/mm/dump_pagetables.c
arch/s390/mm/vmem.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_msi.c
arch/score/Kconfig
arch/score/include/asm/elf.h
arch/sh/Kconfig
arch/sh/kernel/kprobes.c
arch/sh/mm/alignment.c
arch/sparc/Kconfig
arch/sparc/include/asm/elf_32.h
arch/sparc/kernel/kprobes.c
arch/sparc/kernel/ldc.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/systbls_64.S
arch/tile/Kconfig
arch/unicore32/Kconfig
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/ia32/ia32_aout.c
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/efi.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/sys_ia32.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpuid.c
arch/x86/kernel/head.c
arch/x86/kernel/head_64.S
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/msr.c
arch/x86/kernel/nmi.c
arch/x86/kernel/pvclock.c
arch/x86/kernel/setup.c
arch/x86/kvm/mmu.c
arch/x86/mm/fault.c
arch/x86/mm/numa.c
arch/x86/mm/pageattr.c
arch/x86/mm/srat.c
arch/x86/pci/xen.c
arch/x86/platform/efi/efi.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/xen/enlighten.c
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/boot/Makefile
arch/xtensa/boot/dts/Makefile [new file with mode: 0644]
arch/xtensa/include/asm/atomic.h
arch/xtensa/include/asm/checksum.h
arch/xtensa/include/asm/elf.h
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/ptrace.h
arch/xtensa/include/asm/regs.h
arch/xtensa/include/asm/string.h
arch/xtensa/include/asm/timex.h
arch/xtensa/include/asm/traps.h
arch/xtensa/include/uapi/asm/signal.h
arch/xtensa/include/uapi/asm/unistd.h
arch/xtensa/kernel/asm-offsets.c
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/head.S
arch/xtensa/kernel/process.c
arch/xtensa/kernel/ptrace.c
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/signal.c
arch/xtensa/kernel/syscall.c
arch/xtensa/kernel/traps.c
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/oprofile/Makefile [new file with mode: 0644]
arch/xtensa/oprofile/backtrace.c [new file with mode: 0644]
arch/xtensa/oprofile/init.c [new file with mode: 0644]
arch/xtensa/platforms/iss/Makefile
arch/xtensa/platforms/iss/simdisk.c [new file with mode: 0644]
arch/xtensa/platforms/xtfpga/setup.c
arch/xtensa/variants/dc233c/include/variant/core.h [new file with mode: 0644]
arch/xtensa/variants/dc233c/include/variant/tie-asm.h [new file with mode: 0644]
arch/xtensa/variants/dc233c/include/variant/tie.h [new file with mode: 0644]
block/Kconfig
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-ioc.c
block/blk-lib.c
block/blk-sysfs.c
block/blk.h
block/bsg.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
block/partition-generic.c
block/partitions/check.c
block/partitions/check.h
block/partitions/efi.c
block/partitions/mac.c
block/partitions/msdos.c
crypto/algapi.c
crypto/testmgr.h
drivers/acpi/Kconfig
drivers/acpi/apei/ghes.c
drivers/acpi/numa.c
drivers/amba/tegra-ahb.c
drivers/atm/atmtcp.c
drivers/atm/eni.c
drivers/atm/he.c
drivers/atm/nicstar.c
drivers/atm/solos-pci.c
drivers/base/devtmpfs.c
drivers/base/dma-buf.c
drivers/base/firmware_class.c
drivers/bcma/driver_pci_host.c
drivers/block/DAC960.c
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/drbd/drbd_main.c
drivers/block/loop.c
drivers/block/mtip32xx/Kconfig
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/rbd.c
drivers/block/rsxx/Makefile [new file with mode: 0644]
drivers/block/rsxx/config.c [new file with mode: 0644]
drivers/block/rsxx/core.c [new file with mode: 0644]
drivers/block/rsxx/cregs.c [new file with mode: 0644]
drivers/block/rsxx/dev.c [new file with mode: 0644]
drivers/block/rsxx/dma.c [new file with mode: 0644]
drivers/block/rsxx/rsxx.h [new file with mode: 0644]
drivers/block/rsxx/rsxx_cfg.h [new file with mode: 0644]
drivers/block/rsxx/rsxx_priv.h [new file with mode: 0644]
drivers/block/swim3.c
drivers/block/xd.c [deleted file]
drivers/block/xd.h [deleted file]
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/char/dsp56k.c
drivers/char/dtlk.c
drivers/char/hw_random/core.c
drivers/char/hw_random/virtio-rng.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/lp.c
drivers/char/mem.c
drivers/char/misc.c
drivers/char/nsc_gpio.c
drivers/char/pcmcia/cm4000_cs.c
drivers/char/ppdev.c
drivers/char/ps3flash.c
drivers/char/raw.c
drivers/char/sonypi.c
drivers/char/tb0219.c
drivers/char/virtio_console.c
drivers/clk/clk.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/metag_generic.c [new file with mode: 0644]
drivers/clocksource/nomadik-mtu.c
drivers/clocksource/time-armada-370-xp.c
drivers/connector/cn_proc.c
drivers/dca/dca-sysfs.c
drivers/dma/dmaengine.c
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/edac_core.h
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_module.c
drivers/edac/edac_pci_sysfs.c
drivers/edac/ghes_edac.c [new file with mode: 0644]
drivers/edac/i3200_edac.c
drivers/edac/i5100_edac.c
drivers/edac/i7core_edac.c
drivers/edac/sb_edac.c
drivers/firewire/core-cdev.c
drivers/firewire/core-device.c
drivers/firmware/efivars.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_hashtab.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/omapdrm/omap_gem_helpers.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/via/via_map.c
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/hid-roccat.c
drivers/hid/hidraw.c
drivers/hsi/hsi.c
drivers/hv/channel_mgmt.c
drivers/hv/hv.c
drivers/hv/vmbus_drv.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/ide/ide-proc.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/amso1100/c2_qp.c
drivers/infiniband/hw/cxgb3/iwch.h
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/ehca/ehca_cq.c
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_init.c
drivers/input/misc/hp_sdc_rtc.c
drivers/input/serio/Kconfig
drivers/iommu/tegra-smmu.c
drivers/irqchip/Makefile
drivers/irqchip/irq-metag-ext.c [new file with mode: 0644]
drivers/irqchip/irq-metag.c [new file with mode: 0644]
drivers/isdn/hardware/eicon/divasproc.c
drivers/isdn/hysdn/hysdn_proclog.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/socket.c
drivers/isdn/mISDN/stack.c
drivers/lguest/lguest_device.c
drivers/md/Kconfig
drivers/md/Makefile
drivers/md/bitmap.c
drivers/md/dm-bio-prison.c
drivers/md/dm-bio-prison.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-block-types.h [new file with mode: 0644]
drivers/md/dm-cache-metadata.c [new file with mode: 0644]
drivers/md/dm-cache-metadata.h [new file with mode: 0644]
drivers/md/dm-cache-policy-cleaner.c [new file with mode: 0644]
drivers/md/dm-cache-policy-internal.h [new file with mode: 0644]
drivers/md/dm-cache-policy-mq.c [new file with mode: 0644]
drivers/md/dm-cache-policy.c [new file with mode: 0644]
drivers/md/dm-cache-policy.h [new file with mode: 0644]
drivers/md/dm-cache-target.c [new file with mode: 0644]
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-flakey.c
drivers/md/dm-ioctl.c
drivers/md/dm-kcopyd.c
drivers/md/dm-linear.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-target.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/persistent-data/Kconfig
drivers/md/persistent-data/Makefile
drivers/md/persistent-data/dm-array.c [new file with mode: 0644]
drivers/md/persistent-data/dm-array.h [new file with mode: 0644]
drivers/md/persistent-data/dm-bitset.c [new file with mode: 0644]
drivers/md/persistent-data/dm-bitset.h [new file with mode: 0644]
drivers/md/persistent-data/dm-block-manager.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-btree.h
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5.c
drivers/media/pci/zoran/zoran_procfs.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/isp.h
drivers/media/rc/lirc_dev.c
drivers/media/v4l2-core/v4l2-dev.c
drivers/memstick/core/memstick.c
drivers/memstick/core/mspro_block.c
drivers/memstick/host/r592.c
drivers/mfd/rtsx_pcr.c
drivers/misc/c2port/core.c
drivers/misc/kgdbts.c
drivers/misc/sgi-gru/grutlbpurge.c
drivers/misc/tifm_core.c
drivers/misc/vmw_vmci/vmci_doorbell.c
drivers/misc/vmw_vmci/vmci_resource.c
drivers/mmc/core/host.c
drivers/mmc/host/dw_mmc.c
drivers/mtd/Kconfig
drivers/mtd/ar7part.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/cmdlinepart.c
drivers/mtd/devices/Makefile
drivers/mtd/devices/bcm47xxsflash.c
drivers/mtd/devices/bcm47xxsflash.h [new file with mode: 0644]
drivers/mtd/devices/elm.c [new file with mode: 0644]
drivers/mtd/devices/m25p80.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/physmap_of.c
drivers/mtd/maps/uclinux.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
drivers/mtd/nand/bcm47xxnflash/main.c
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/gpmi-nand/bch-regs.h
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.h
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ecc.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/omap2.c
drivers/mtd/ofpart.c
drivers/mtd/tests/mtd_nandecctest.c
drivers/mtd/tests/mtd_oobtest.c
drivers/mtd/tests/mtd_pagetest.c
drivers/mtd/tests/mtd_speedtest.c
drivers/mtd/tests/mtd_stresstest.c
drivers/mtd/tests/mtd_subpagetest.c
drivers/mtd/tests/mtd_torturetest.c
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/debug.h
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/micrel.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/asix_devices.c
drivers/net/usb/ax88179_178a.c [new file with mode: 0644]
drivers/net/usb/cdc_ncm.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/zd1201.c
drivers/oprofile/oprofilefs.c
drivers/parisc/led.c
drivers/pci/pci.c
drivers/pci/proc.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/chromeos_laptop.c [new file with mode: 0644]
drivers/platform/x86/eeepc-wmi.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/msi-laptop.c
drivers/platform/x86/msi-wmi.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/pnp/isapnp/proc.c
drivers/pnp/pnpbios/proc.c
drivers/power/bq2415x_charger.c
drivers/power/bq27x00_battery.c
drivers/power/ds2782_battery.c
drivers/pps/clients/pps-gpio.c
drivers/pps/kapi.c
drivers/pps/pps.c
drivers/remoteproc/remoteproc_core.c
drivers/remoteproc/remoteproc_virtio.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/rtc-stmp3xxx.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/fs3270.c
drivers/s390/char/tape_char.c
drivers/s390/char/vmur.c
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_debug.h
drivers/s390/cio/qdio_main.c
drivers/s390/kvm/kvm_virtio.c
drivers/sbus/char/display7seg.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-sas.c
drivers/scsi/3w-xxxx.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/comminit.c
drivers/scsi/aacraid/src.c
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/ch.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/dc395x.c
drivers/scsi/dpt_i2o.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fcoe/fcoe_sysfs.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/fcoe/libfcoe.h
drivers/scsi/fnic/Makefile
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_debugfs.c [new file with mode: 0644]
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_io.h
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/fnic_trace.c [new file with mode: 0644]
drivers/scsi/fnic/fnic_trace.h [new file with mode: 0644]
drivers/scsi/gdth.c
drivers/scsi/hpsa.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_libfc.h
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/osd/osd_initiator.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_settings.h
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_83xx.c
drivers/scsi/qla4xxx/ql4_attr.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pci.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h [new file with mode: 0644]
drivers/scsi/ufs/ufshci.h
drivers/ssb/driver_chipcommon_pmu.c
drivers/staging/android/binder.c
drivers/staging/bcm/Misc.c
drivers/staging/ccg/f_mass_storage.c
drivers/staging/ccg/rndis.c
drivers/staging/ccg/storage_common.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/dgrp/dgrp_specproc.c
drivers/staging/usbip/usbip_common.c
drivers/staging/vme/devices/vme_user.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/cpu_cooling.c
drivers/thermal/db8500_cpufreq_cooling.c
drivers/thermal/db8500_thermal.c
drivers/thermal/dove_thermal.c [new file with mode: 0644]
drivers/thermal/exynos_thermal.c
drivers/thermal/intel_powerclamp.c [new file with mode: 0644]
drivers/thermal/kirkwood_thermal.c [new file with mode: 0644]
drivers/thermal/rcar_thermal.c
drivers/thermal/spear_thermal.c
drivers/thermal/step_wise.c
drivers/thermal/thermal_sys.c
drivers/tty/hvc/hvcs.c
drivers/tty/serial/Kconfig
drivers/tty/sysrq.c
drivers/tty/tty_io.c
drivers/tty/vt/vc_screen.c
drivers/uio/uio.c
drivers/usb/core/devices.c
drivers/usb/core/devio.c
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/printer.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/storage_common.c
drivers/usb/host/ehci-timer.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vfio/vfio.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/ams369fg06.c
drivers/video/backlight/lp8788_bl.c [new file with mode: 0644]
drivers/video/fb_defio.c
drivers/video/fbmem.c
drivers/video/msm/mdp.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci.c
drivers/w1/masters/mxc_w1.c
drivers/w1/slaves/Kconfig
drivers/w1/slaves/Makefile
drivers/w1/slaves/w1_ds2413.c [new file with mode: 0644]
drivers/w1/w1_family.h
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/at91rm9200_wdt.c
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/bcm47xx_wdt.c
drivers/watchdog/booke_wdt.c
drivers/watchdog/cpwd.c
drivers/watchdog/davinci_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/orion_wdt.c
drivers/watchdog/pnx4008_wdt.c
drivers/watchdog/retu_wdt.c [new file with mode: 0644]
drivers/watchdog/s3c2410_wdt.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/stmp3xxx_rtc_wdt.c [new file with mode: 0644]
drivers/watchdog/stmp3xxx_wdt.c [deleted file]
drivers/watchdog/watchdog_core.c
drivers/watchdog/watchdog_dev.c
drivers/xen/xen-acpi-cpuhotplug.c
drivers/xen/xen-acpi-memhotplug.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenfs/super.c
drivers/zorro/proc.c
fs/9p/acl.c
fs/9p/acl.h
fs/9p/fid.c
fs/9p/fid.h
fs/9p/vfs_dentry.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/9p/xattr.c
fs/9p/xattr.h
fs/adfs/dir.c
fs/affs/amigaffs.c
fs/affs/dir.c
fs/afs/dir.c
fs/afs/flock.c
fs/afs/write.c
fs/aio.c
fs/anon_inodes.c
fs/autofs4/autofs_i.h
fs/autofs4/dev-ioctl.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/befs/linuxvfs.c
fs/bfs/dir.c
fs/binfmt_aout.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/binfmt_flat.c
fs/binfmt_misc.c
fs/bio.c
fs/block_dev.c
fs/btrfs/Kconfig
fs/btrfs/Makefile
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h [deleted file]
fs/btrfs/locking.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/print-tree.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c [new file with mode: 0644]
fs/btrfs/raid56.h [new file with mode: 0644]
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/send.h
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-defrag.c
fs/btrfs/tree-log.c
fs/btrfs/ulist.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/strings.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/cifsfs.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/readdir.c
fs/coda/dir.c
fs/coda/file.c
fs/coda/inode.c
fs/coda/pioctl.c
fs/compat_ioctl.c
fs/configfs/dir.c
fs/coredump.c
fs/cramfs/inode.c
fs/dcache.c
fs/direct-io.c
fs/dlm/config.c
fs/dlm/lock.c
fs/dlm/lockspace.c
fs/dlm/lowcomms.c
fs/dlm/recover.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/messaging.c
fs/ecryptfs/read_write.c
fs/efs/dir.c
fs/exec.c
fs/exofs/dir.c
fs/exportfs/expfs.c
fs/ext2/balloc.c
fs/ext2/dir.c
fs/ext2/inode.c
fs/ext2/ioctl.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/dir.c
fs/ext3/inode.c
fs/ext3/ioctl.c
fs/ext3/namei.c
fs/ext3/resize.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext4/acl.c
fs/ext4/balloc.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/file.c
fs/ext4/hash.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/migrate.c
fs/ext4/mmp.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/f2fs/checkpoint.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/gc.h
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/file.c
fs/fat/inode.c
fs/fat/nfs.c
fs/fcntl.c
fs/file_table.c
fs/freevxfs/vxfs_lookup.c
fs/fs-writeback.c
fs/fs_struct.c
fs/fscache/cookie.c
fs/fuse/control.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/export.c
fs/gfs2/file.c
fs/gfs2/rgrp.c
fs/gfs2/sys.c
fs/hfs/dir.c
fs/hfs/inode.c
fs/hfsplus/Makefile
fs/hfsplus/attributes.c [new file with mode: 0644]
fs/hfsplus/bfind.c
fs/hfsplus/bnode.c
fs/hfsplus/brec.c
fs/hfsplus/btree.c
fs/hfsplus/catalog.c
fs/hfsplus/dir.c
fs/hfsplus/extents.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/hfsplus_raw.h
fs/hfsplus/inode.c
fs/hfsplus/ioctl.c
fs/hfsplus/super.c
fs/hfsplus/unicode.c
fs/hfsplus/xattr.c [new file with mode: 0644]
fs/hfsplus/xattr.h [new file with mode: 0644]
fs/hfsplus/xattr_security.c [new file with mode: 0644]
fs/hfsplus/xattr_trusted.c [new file with mode: 0644]
fs/hfsplus/xattr_user.c [new file with mode: 0644]
fs/hostfs/hostfs_kern.c
fs/hpfs/dir.c
fs/hpfs/file.c
fs/hppfs/hppfs.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/ioctl.c
fs/isofs/compress.c
fs/isofs/dir.c
fs/isofs/export.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jffs2/dir.c
fs/jfs/ioctl.c
fs/jfs/jfs_dtree.c
fs/lockd/clntlock.c
fs/lockd/clntproc.c
fs/lockd/host.c
fs/lockd/mon.c
fs/lockd/svclock.c
fs/lockd/svcsubs.c
fs/locks.c
fs/logfs/dir.c
fs/logfs/file.c
fs/minix/dir.c
fs/namei.c
fs/namespace.c
fs/ncpfs/dir.c
fs/ncpfs/inode.c
fs/ncpfs/ioctl.c
fs/ncpfs/mmap.c
fs/nfs/cache_lib.c
fs/nfs/cache_lib.h
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/dns_resolve.c
fs/nfs/file.c
fs/nfs/idmap.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4filelayout.h
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4super.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/pnfs_dev.c
fs/nfs/proc.c
fs/nfs/super.c
fs/nfs/unlink.c
fs/nfsd/cache.h
fs/nfsd/export.c
fs/nfsd/fault_inject.c
fs/nfsd/nfs2acl.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4idmap.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsproc.c
fs/nfsd/nfssvc.c
fs/nfsd/nfsxdr.c
fs/nfsd/vfs.c
fs/nfsd/vfs.h
fs/nfsd/xdr.h
fs/nfsd/xdr3.h
fs/nfsd/xdr4.h
fs/nilfs2/dir.c
fs/nilfs2/file.c
fs/nilfs2/ioctl.c
fs/nilfs2/namei.c
fs/notify/dnotify/dnotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fsnotify.c
fs/notify/inode_mark.c
fs/notify/inotify/inotify_fsnotify.c
fs/notify/inotify/inotify_user.c
fs/notify/vfsmount_mark.c
fs/ntfs/dir.c
fs/ocfs2/aops.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dcache.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/export.c
fs/ocfs2/file.c
fs/ocfs2/ioctl.c
fs/ocfs2/mmap.c
fs/ocfs2/move_extents.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/ocfs2/symlink.c
fs/ocfs2/xattr.c
fs/omfs/dir.c
fs/open.c
fs/openpromfs/inode.c
fs/pipe.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/kcore.c
fs/proc/nommu.c
fs/proc/proc_devtree.c
fs/proc/proc_net.c
fs/proc/proc_sysctl.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc/vmcore.c
fs/qnx4/dir.c
fs/qnx6/dir.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/readdir.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/reiserfs/ioctl.c
fs/reiserfs/procfs.c
fs/romfs/super.c
fs/seq_file.c
fs/splice.c
fs/squashfs/dir.c
fs/stat.c
fs/super.c
fs/sync.c
fs/sysfs/bin.c
fs/sysv/dir.c
fs/timerfd.c
fs/ubifs/debug.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/ioctl.c
fs/ubifs/lpt_commit.c
fs/ubifs/orphan.c
fs/ubifs/tnc_commit.c
fs/ubifs/ubifs.h
fs/udf/dir.c
fs/udf/file.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udf_i.h
fs/udf/udf_sb.h
fs/udf/udfdecl.h
fs/ufs/dir.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl32.c
fs/xfs/xfs_log_recover.c
include/acpi/ghes.h [new file with mode: 0644]
include/asm-generic/checksum.h
include/asm-generic/cputime_nsecs.h
include/asm-generic/io.h
include/asm-generic/uaccess.h
include/asm-generic/unistd.h
include/clocksource/metag_generic.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/bcm47xx_wdt.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/binfmts.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/btrfs.h [new file with mode: 0644]
include/linux/buffer_head.h
include/linux/ceph/ceph_features.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/decode.h
include/linux/ceph/libceph.h
include/linux/ceph/mdsmap.h
include/linux/ceph/messenger.h
include/linux/ceph/osd_client.h
include/linux/ceph/osdmap.h
include/linux/ceph/rados.h
include/linux/compat.h
include/linux/completion.h
include/linux/crush/crush.h
include/linux/dcache.h
include/linux/debug_locks.h
include/linux/device-mapper.h
include/linux/dm-kcopyd.h
include/linux/dma-buf.h
include/linux/dmaengine.h
include/linux/dw_dmac.h
include/linux/edac.h
include/linux/elevator.h
include/linux/elf.h
include/linux/eventfd.h
include/linux/freezer.h
include/linux/fs.h
include/linux/fs_struct.h
include/linux/fsnotify.h
include/linux/hardirq.h
include/linux/hashtable.h
include/linux/hsi/hsi.h
include/linux/hugetlb.h
include/linux/idr.h
include/linux/if_team.h
include/linux/ipmi.h
include/linux/irqchip/metag-ext.h [new file with mode: 0644]
include/linux/irqchip/metag.h [new file with mode: 0644]
include/linux/jbd2.h
include/linux/list.h
include/linux/llist.h
include/linux/lockd/lockd.h
include/linux/lzo.h
include/linux/memblock.h
include/linux/mfd/lp8788.h
include/linux/mm.h
include/linux/mod_devicetable.h
include/linux/mtd/map.h
include/linux/nfs_xdr.h
include/linux/path.h
include/linux/pci_ids.h
include/linux/pid.h
include/linux/platform_data/elm.h [new file with mode: 0644]
include/linux/platform_data/exynos_thermal.h
include/linux/quota.h
include/linux/rculist.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/smpboot.h
include/linux/stmp3xxx_rtc_wdt.h [new file with mode: 0644]
include/linux/sunrpc/addr.h [new file with mode: 0644]
include/linux/sunrpc/cache.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/xdr.h
include/linux/thermal.h
include/linux/virtio.h
include/linux/watchdog.h
include/linux/writeback.h
include/net/9p/client.h
include/net/ax25.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/netrom.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/tcp.h
include/ras/ras_event.h
include/scsi/Kbuild
include/scsi/fc/Kbuild
include/scsi/fc/fc_els.h [deleted file]
include/scsi/fc/fc_fs.h [deleted file]
include/scsi/fc/fc_gs.h [deleted file]
include/scsi/fc/fc_ns.h [deleted file]
include/scsi/fcoe_sysfs.h
include/scsi/libfcoe.h
include/scsi/scsi_bsg_fc.h [deleted file]
include/scsi/scsi_host.h
include/scsi/scsi_netlink.h [deleted file]
include/scsi/scsi_netlink_fc.h [deleted file]
include/sound/aess.h [new file with mode: 0644]
include/trace/events/block.h
include/trace/events/ext4.h
include/trace/events/jbd2.h
include/trace/events/writeback.h
include/uapi/linux/Kbuild
include/uapi/linux/btrfs.h [new file with mode: 0644]
include/uapi/linux/dm-ioctl.h
include/uapi/linux/elf.h
include/uapi/linux/ipmi.h
include/uapi/linux/msdos_fs.h
include/uapi/linux/nbd.h
include/uapi/linux/vfio.h
include/uapi/linux/xattr.h
include/uapi/scsi/Kbuild
include/uapi/scsi/fc/Kbuild
include/uapi/scsi/fc/fc_els.h [new file with mode: 0644]
include/uapi/scsi/fc/fc_fs.h [new file with mode: 0644]
include/uapi/scsi/fc/fc_gs.h [new file with mode: 0644]
include/uapi/scsi/fc/fc_ns.h [new file with mode: 0644]
include/uapi/scsi/scsi_bsg_fc.h [new file with mode: 0644]
include/uapi/scsi/scsi_netlink.h [new file with mode: 0644]
include/uapi/scsi/scsi_netlink_fc.h [new file with mode: 0644]
include/uapi/video/Kbuild
include/uapi/video/edid.h [new file with mode: 0644]
include/uapi/video/sisfb.h [new file with mode: 0644]
include/uapi/video/uvesafb.h [new file with mode: 0644]
include/video/Kbuild
include/video/edid.h
include/video/sisfb.h
include/video/uvesafb.h
init/Kconfig
ipc/mqueue.c
ipc/shm.c
ipc/util.c
kernel/Makefile
kernel/acct.c
kernel/cgroup.c
kernel/debug/debug_core.h
kernel/debug/gdbstub.c
kernel/debug/kdb/kdb_bp.c
kernel/debug/kdb/kdb_debugger.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/proc.c
kernel/kexec.c
kernel/kfifo.c [deleted file]
kernel/kprobes.c
kernel/lockdep.c
kernel/module.c
kernel/nsproxy.c
kernel/pid.c
kernel/posix-timers.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/debug.c
kernel/sched/stats.c
kernel/signal.c
kernel/smpboot.c
kernel/softirq.c
kernel/stop_machine.c
kernel/sys.c
kernel/sysctl.c
kernel/sysctl_binary.c
kernel/time/tick-sched.c
kernel/timeconst.bc [new file with mode: 0644]
kernel/timeconst.pl [deleted file]
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_output.c
kernel/trace/trace_syscalls.c
kernel/tracepoint.c
kernel/user-return-notifier.c
kernel/user.c
kernel/utsname.c
kernel/utsname_sysctl.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Kconfig.kgdb
lib/Makefile
lib/checksum.c
lib/debugobjects.c
lib/decompress_unlzo.c
lib/devres.c
lib/idr.c
lib/kfifo.c [new file with mode: 0644]
lib/lru_cache.c
lib/lzo/Makefile
lib/lzo/lzo1x_compress.c
lib/lzo/lzo1x_decompress.c [deleted file]
lib/lzo/lzo1x_decompress_safe.c [new file with mode: 0644]
lib/lzo/lzodefs.h
lib/scatterlist.c
mm/Kconfig
mm/cleancache.c
mm/fadvise.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kmemleak.c
mm/ksm.c
mm/memblock.c
mm/mlock.c
mm/mmap.c
mm/mmu_notifier.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/swapfile.c
net/9p/error.c
net/9p/trans_virtio.c
net/9p/util.c
net/appletalk/ddp.c
net/atm/common.c
net/atm/lec.c
net/atm/proc.c
net/atm/signaling.c
net/ax25/af_ax25.c
net/ax25/ax25_ds_subr.c
net/ax25/ax25_ds_timer.c
net/ax25/ax25_iface.c
net/ax25/ax25_uid.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/gateway_client.c
net/batman-adv/main.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/translation-table.c
net/batman-adv/vis.c
net/bluetooth/hci_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/can/af_can.c
net/can/gw.c
net/can/proc.c
net/ceph/ceph_common.c
net/ceph/ceph_strings.c
net/ceph/crush/mapper.c
net/ceph/crypto.c
net/ceph/debugfs.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/ceph/pagevec.c
net/core/dev.c
net/core/flow.c
net/core/net-procfs.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/decnet/af_decnet.c
net/decnet/dn_table.c
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/raw.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/xfrm6_tunnel.c
net/ipx/af_ipx.c
net/ipx/ipx_proc.c
net/irda/ircomm/ircomm_tty.c
net/irda/iriap.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/llc/llc_sap.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_nat_core.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_RATEEST.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_route.c
net/nfc/llcp/llcp.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/vport.c
net/packet/af_packet.c
net/packet/diag.c
net/phonet/pep.c
net/phonet/socket.c
net/rds/bind.c
net/rds/connection.c
net/rds/message.c
net/rose/af_rose.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_qfq.c
net/sctp/associola.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/proc.c
net/sctp/socket.c
net/sctp/ssnmap.c
net/sctp/tsnmap.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/addr.c
net/sunrpc/auth.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xdr.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtsock.c
net/tipc/name_table.c
net/tipc/node.c
net/unix/af_unix.c
net/unix/diag.c
net/unix/garbage.c
net/wireless/core.c
net/wireless/nl80211.c
net/x25/af_x25.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
scripts/Makefile.headersinst
scripts/Makefile.modpost
scripts/checkpatch.pl
scripts/checkstack.pl
scripts/coccicheck
scripts/coccinelle/misc/memcpy-assign.cocci [new file with mode: 0644]
scripts/coccinelle/misc/orplus.cocci [new file with mode: 0644]
scripts/coccinelle/misc/semicolon.cocci [new file with mode: 0644]
scripts/depmod.sh
scripts/genksyms/genksyms.c
scripts/get_maintainer.pl
scripts/kconfig/Makefile
scripts/kconfig/conf.c
scripts/kconfig/expr.c
scripts/kconfig/gconf.c
scripts/kconfig/lkc.h
scripts/kconfig/lxdialog/check-lxdialog.sh
scripts/kconfig/lxdialog/dialog.h
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/lxdialog/menubox.c
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/kconfig/merge_config.sh
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/kconfig/qconf.cc
scripts/kconfig/symbol.c
scripts/kconfig/util.c
scripts/kconfig/zconf.l
scripts/kconfig/zconf.lex.c_shipped
scripts/kernel-doc
scripts/link-vmlinux.sh
scripts/mod/.gitignore
scripts/mod/Makefile
scripts/mod/devicetable-offsets.c [new file with mode: 0644]
scripts/mod/file2alias.c
scripts/mod/modpost.c
scripts/package/mkspec
scripts/recordmcount.c
scripts/setlocalversion
scripts/tags.sh
security/apparmor/domain.c
security/apparmor/file.c
security/apparmor/lsm.c
security/commoncap.c
security/integrity/ima/ima_api.c
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_main.c
security/integrity/ima/ima_queue.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
security/tomoyo/securityfs_if.c
sound/core/info.c
sound/core/pcm_native.c
sound/oss/msnd_pinnacle.c
sound/oss/soundcard.c
sound/pci/bt87x.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/emu10k1/emupcm.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/revo.c
sound/sound_firmware.c
tools/perf/perf.h
tools/perf/util/evlist.c
tools/testing/selftests/Makefile
tools/testing/selftests/README.txt [new file with mode: 0644]
tools/testing/selftests/efivarfs/Makefile [new file with mode: 0644]
tools/testing/selftests/efivarfs/create-read.c [new file with mode: 0644]
tools/testing/selftests/efivarfs/efivarfs.sh [new file with mode: 0644]
tools/testing/selftests/efivarfs/open-unlink.c [new file with mode: 0644]
virt/kvm/eventfd.c
virt/kvm/irq_comm.c

index 0f3e8bb..45b3df9 100644 (file)
@@ -299,6 +299,8 @@ memory-hotplug.txt
        - Hotpluggable memory support, how to use and current status.
 memory.txt
        - info on typical Linux memory problems.
+metag/
+       - directory with info about Linux on Meta architecture.
 mips/
        - directory with info about Linux on MIPS architecture.
 misc-devices/
index 50e2a80..21640ea 100644 (file)
@@ -1,14 +1,53 @@
-What:          /sys/bus/fcoe/ctlr_X
+What:          /sys/bus/fcoe/
+Date:          August 2012
+KernelVersion: TBD
+Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:   The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+       ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+                    <ifname> to this file will allocate and populate sysfs with a
+                    fcoe_ctlr_device (ctlr_X). The user can then configure any
+                    per-port settings and finally write to the fcoe_ctlr_device's
+                    'start' attribute to begin the kernel's discovery and login
+                    process.
+
+       ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+                      fcoe_ctlr_device's sysfs name to this file will log the
+                      fcoe_ctlr_device out of the fabric or otherwise connected
+                      FCoE devices. It will also free all kernel memory allocated
+                      for this fcoe_ctlr_device and any structures associated
+                      with it, this includes the scsi_host.
+
+What:          /sys/bus/fcoe/devices/ctlr_X
 Date:          March 2012
 KernelVersion: TBD
 Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description:   'FCoE Controller' instances on the fcoe bus
+Description:   'FCoE Controller' instances on the fcoe bus.
+               The FCoE Controller now has a three stage creation process.
+               1) Write interface name to ctlr_create 2) Configure the FCoE
+               Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+               discovery and login. The FCoE Controller is destroyed by
+               writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
 Attributes:
 
        fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
                          this value will change the dev_loss_tmo for all
                          FCFs discovered by this controller.
 
+       mode:             Display or change the FCoE Controller's mode. Possible
+                         modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+                         is started in 'Fabric' mode then FIP FCF discovery is
+                         initiated and ultimately a fabric login is attempted.
+                         If a FCoE Controller is started in 'VN2VN' mode then
+                         FIP VN2VN discovery and login is performed. A FCoE
+                         Controller only supports one mode at a time.
+
+       enabled:          Whether an FCoE controller is enabled or disabled.
+                         0 if disabled, 1 if enabled. Writing either 0 or 1
+                         to this file will enable or disable the FCoE controller.
+
        lesb/link_fail:   Link Error Status Block (LESB) link failure count.
 
        lesb/vlink_fail:  Link Error Status Block (LESB) virtual link
@@ -26,7 +65,7 @@ Attributes:
 
 Notes: ctlr_X (global increment starting at 0)
 
-What:          /sys/bus/fcoe/fcf_X
+What:          /sys/bus/fcoe/devices/fcf_X
 Date:          March 2012
 KernelVersion: TBD
 Contact:       Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
diff --git a/Documentation/ABI/testing/sysfs-platform-msi-laptop b/Documentation/ABI/testing/sysfs-platform-msi-laptop
new file mode 100644 (file)
index 0000000..307a247
--- /dev/null
@@ -0,0 +1,83 @@
+What:          /sys/devices/platform/msi-laptop-pf/lcd_level
+Date:          Oct 2006
+KernelVersion: 2.6.19
+Contact:       "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+               Screen brightness: contains a single integer in the range 0..8.
+
+What:          /sys/devices/platform/msi-laptop-pf/auto_brightness
+Date:          Oct 2006
+KernelVersion: 2.6.19
+Contact:       "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+               Enable automatic brightness control: contains either 0 or 1. If
+               set to 1 the hardware adjusts the screen brightness
+               automatically when the power cord is plugged/unplugged.
+
+What:          /sys/devices/platform/msi-laptop-pf/wlan
+Date:          Oct 2006
+KernelVersion: 2.6.19
+Contact:       "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+               WLAN subsystem enabled: contains either 0 or 1.
+
+What:          /sys/devices/platform/msi-laptop-pf/bluetooth
+Date:          Oct 2006
+KernelVersion: 2.6.19
+Contact:       "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+               Bluetooth subsystem enabled: contains either 0 or 1. Please
+               note that this file is constantly 0 if no Bluetooth hardware is
+               available.
+
+What:          /sys/devices/platform/msi-laptop-pf/touchpad
+Date:          Nov 2012
+KernelVersion: 3.8
+Contact:       "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+               Contains either 0 or 1 and indicates if touchpad is turned on.
+               Touchpad state can only be toggled by pressing Fn+F3.
+
+What:          /sys/devices/platform/msi-laptop-pf/turbo_mode
+Date:          Nov 2012
+KernelVersion: 3.8
+Contact:       "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+               Contains either 0 or 1 and indicates if turbo mode is turned
+               on. In turbo mode power LED is orange and processor is
+               overclocked. Turbo mode is available only if charging. It is
+               only possible to toggle turbo mode state by pressing Fn+F10,
+               and there is a few seconds cooldown between subsequent toggles.
+               If user presses Fn+F10 too frequent, turbo mode state is not
+               changed.
+
+What:          /sys/devices/platform/msi-laptop-pf/eco_mode
+Date:          Nov 2012
+KernelVersion: 3.8
+Contact:       "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+               Contains either 0 or 1 and indicates if ECO mode is turned on.
+               In ECO mode power LED is green and userspace should do some
+               powersaving actions. ECO mode is available only on battery
+               power. ECO mode can only be toggled by pressing Fn+F10.
+
+What:          /sys/devices/platform/msi-laptop-pf/turbo_cooldown
+Date:          Nov 2012
+KernelVersion: 3.8
+Contact:       "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+               Contains value in range 0..3:
+                       * 0 -> Turbo mode is off
+                       * 1 -> Turbo mode is on, cannot be turned off yet
+                       * 2 -> Turbo mode is off, cannot be turned on yet
+                       * 3 -> Turbo mode is on
+
+What:          /sys/devices/platform/msi-laptop-pf/auto_fan
+Date:          Nov 2012
+KernelVersion: 3.8
+Contact:       "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+               Contains either 0 or 1 and indicates if fan speed is controlled
+               automatically (1) or fan runs at maximal speed (0). Can be
+               toggled in software.
+
index 4a4fb29..14129f1 100644 (file)
@@ -488,9 +488,10 @@ will invoke the generic mapping error check interface. Doing so will ensure
 that the mapping code will work correctly on all dma implementations without
 any dependency on the specifics of the underlying implementation. Using the
 returned address without checking for errors could result in failures ranging
-from panics to silent data corruption. Couple of example of incorrect ways to
-check for errors that make assumptions about the underlying dma implementation
-are as follows and these are applicable to dma_map_page() as well.
+from panics to silent data corruption. A couple of examples of incorrect ways
+to check for errors that make assumptions about the underlying dma
+implementation are as follows and these are applicable to dma_map_page() as
+well.
 
 Incorrect example 1:
        dma_addr_t dma_handle;
@@ -751,7 +752,7 @@ Example 1:
                dma_unmap_single(dma_handle1);
        map_error_handling1:
 
-Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
+Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
            mapping error is detected in the middle)
 
        dma_addr_t dma_addr;
index 16eb4c9..f13c913 100644 (file)
@@ -348,34 +348,40 @@ You can change this at module load time (for a module) with:
 
   modprobe ipmi_si.o type=<type1>,<type2>....
        ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
-       irqs=<irq1>,<irq2>... trydefaults=[0|1]
+       irqs=<irq1>,<irq2>...
        regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,...
        regshifts=<shift1>,<shift2>,...
        slave_addrs=<addr1>,<addr2>,...
        force_kipmid=<enable1>,<enable2>,...
        kipmid_max_busy_us=<ustime1>,<ustime2>,...
        unload_when_empty=[0|1]
+       trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
+       tryplatform=[0|1] trypci=[0|1]
 
-Each of these except si_trydefaults is a list, the first item for the
+Each of these except try... items is a list, the first item for the
 first interface, second item for the second interface, etc.
 
 The si_type may be either "kcs", "smic", or "bt".  If you leave it blank, it
 defaults to "kcs".
 
-If you specify si_addrs as non-zero for an interface, the driver will
+If you specify addrs as non-zero for an interface, the driver will
 use the memory address given as the address of the device.  This
 overrides si_ports.
 
-If you specify si_ports as non-zero for an interface, the driver will
+If you specify ports as non-zero for an interface, the driver will
 use the I/O port given as the device address.
 
-If you specify si_irqs as non-zero for an interface, the driver will
+If you specify irqs as non-zero for an interface, the driver will
 attempt to use the given interrupt for the device.
 
-si_trydefaults sets whether the standard IPMI interface at 0xca2 and
+trydefaults sets whether the standard IPMI interface at 0xca2 and
 any interfaces specified by ACPE are tried.  By default, the driver
 tries it, set this value to zero to turn this off.
 
+The other try... items disable discovery by their corresponding
+names.  These are all enabled by default, set them to zero to disable
+them.  The tryplatform disables openfirmware.
+
 The next three parameters have to do with register layout.  The
 registers used by the interfaces may not appear at successive
 locations and they may not be in 8-bit registers.  These parameters
index d89b4fe..a5eb7d1 100644 (file)
@@ -102,6 +102,64 @@ processing of request. Therefore, increasing the value can imporve the
 performace although this can cause the latency of some I/O to increase due
 to more number of requests.
 
+CFQ Group scheduling
+====================
+
+CFQ supports blkio cgroup and has "blkio." prefixed files in each
+blkio cgroup directory. It is weight-based and there are four knobs
+for configuration - weight[_device] and leaf_weight[_device].
+Internal cgroup nodes (the ones with children) can also have tasks in
+them, so the former two configure how much proportion the cgroup as a
+whole is entitled to at its parent's level while the latter two
+configure how much proportion the tasks in the cgroup have compared to
+its direct children.
+
+Another way to think about it is assuming that each internal node has
+an implicit leaf child node which hosts all the tasks whose weight is
+configured by leaf_weight[_device]. Let's assume a blkio hierarchy
+composed of five cgroups - root, A, B, AA and AB - with the following
+weights where the names represent the hierarchy.
+
+        weight leaf_weight
+ root :  125    125
+ A    :  500    750
+ B    :  250    500
+ AA   :  500    500
+ AB   : 1000    500
+
+root never has a parent making its weight is meaningless. For backward
+compatibility, weight is always kept in sync with leaf_weight. B, AA
+and AB have no child and thus its tasks have no children cgroup to
+compete with. They always get 100% of what the cgroup won at the
+parent level. Considering only the weights which matter, the hierarchy
+looks like the following.
+
+          root
+       /    |   \
+      A     B    leaf
+     500   250   125
+   /  |  \
+  AA  AB  leaf
+ 500 1000 750
+
+If all cgroups have active IOs and competing with each other, disk
+time will be distributed like the following.
+
+Distribution below root. The total active weight at this level is
+A:500 + B:250 + C:125 = 875.
+
+ root-leaf :   125 /  875      =~ 14%
+ A         :   500 /  875      =~ 57%
+ B(-leaf)  :   250 /  875      =~ 28%
+
+A has children and further distributes its 57% among the children and
+the implicit leaf node. The total active weight at this level is
+AA:500 + AB:1000 + A-leaf:750 = 2250.
+
+ A-leaf    : ( 750 / 2250) * A =~ 19%
+ AA(-leaf) : ( 500 / 2250) * A =~ 12%
+ AB(-leaf) : (1000 / 2250) * A =~ 25%
+
 CFQ IOPS Mode for group scheduling
 ===================================
 Basic CFQ design is to provide priority based time slices. Higher priority
index aeb93ff..271e607 100644 (file)
@@ -4,43 +4,13 @@
    can use a remote server as one of its block devices. So every time
    the client computer wants to read, e.g., /dev/nb0, it sends a
    request over TCP to the server, which will reply with the data read.
-   This can be used for stations with low disk space (or even diskless -
-   if you boot from floppy) to borrow disk space from another computer.
-   Unlike NFS, it is possible to put any filesystem on it, etc. It should
-   even be possible to use NBD as a root filesystem (I've never tried),
-   but it requires a user-level program to be in the initrd to start.
-   It also allows you to run block-device in user land (making server
-   and client physically the same computer, communicating using loopback).
-   
-   Current state: It currently works. Network block device is stable.
-   I originally thought that it was impossible to swap over TCP. It
-   turned out not to be true - swapping over TCP now works and seems
-   to be deadlock-free, but it requires heavy patches into Linux's
-   network layer.
-   
+   This can be used for stations with low disk space (or even diskless)
+   to borrow disk space from another computer.
+   Unlike NFS, it is possible to put any filesystem on it, etc.
+
    For more information, or to download the nbd-client and nbd-server
    tools, go to http://nbd.sf.net/.
 
-   Howto: To setup nbd, you can simply do the following:
-
-   First, serve a device or file from a remote server:
-
-   nbd-server <port-number> <device-or-file-to-serve-to-client>
-
-   e.g.,
-       root@server1 # nbd-server 1234 /dev/sdb1
-
-       (serves sdb1 partition on TCP port 1234)
-
-   Then, on the local (client) system:
-
-   nbd-client <server-name-or-IP> <server-port-number> /dev/nb[0-n]
-
-   e.g.,
-       root@client1 # nbd-client server1 1234 /dev/nb0
-
-       (creates the nb0 device on client1)
-
    The nbd kernel module need only be installed on the client
    system, as the nbd-server is completely in userspace. In fact,
    the nbd-server has been successfully ported to other operating
index b4b1fb3..da272c8 100644 (file)
@@ -75,7 +75,7 @@ Throttling/Upper Limit policy
         mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
 
 - Specify a bandwidth rate on particular device for root group. The format
-  for policy is "<major>:<minor>  <byes_per_second>".
+  for policy is "<major>:<minor>  <bytes_per_second>".
 
         echo "8:16  1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
 
@@ -94,13 +94,11 @@ Throttling/Upper Limit policy
 
 Hierarchical Cgroups
 ====================
-- Currently none of the IO control policy supports hierarchical groups. But
-  cgroup interface does allow creation of hierarchical cgroups and internally
-  IO policies treat them as flat hierarchy.
+- Currently only CFQ supports hierarchical groups. For throttling,
+  cgroup interface does allow creation of hierarchical cgroups and
+  internally it treats them as flat hierarchy.
 
-  So this patch will allow creation of cgroup hierarchcy but at the backend
-  everything will be treated as flat. So if somebody created a hierarchy like
-  as follows.
+  If somebody created a hierarchy like as follows.
 
                        root
                        /  \
@@ -108,16 +106,20 @@ Hierarchical Cgroups
                        |
                     test3
 
-  CFQ and throttling will practically treat all groups at same level.
+  CFQ will handle the hierarchy correctly but and throttling will
+  practically treat all groups at same level. For details on CFQ
+  hierarchy support, refer to Documentation/block/cfq-iosched.txt.
+  Throttling will treat the hierarchy as if it looks like the
+  following.
 
                                pivot
                             /  /   \  \
                        root  test1 test2  test3
 
-  Down the line we can implement hierarchical accounting/control support
-  and also introduce a new cgroup file "use_hierarchy" which will control
-  whether cgroup hierarchy is viewed as flat or hierarchical by the policy..
-  This is how memory controller also has implemented the things.
+  Nesting cgroups, while allowed, isn't officially supported and blkio
+  genereates warning when cgroups nest. Once throttling implements
+  hierarchy support, hierarchy will be supported and the warning will
+  be removed.
 
 Various user visible config options
 ===================================
@@ -172,6 +174,12 @@ Proportional weight policy files
          dev     weight
          8:16    300
 
+- blkio.leaf_weight[_device]
+       - Equivalents of blkio.weight[_device] for the purpose of
+          deciding how much weight tasks in the given cgroup has while
+          competing with the cgroup's child cgroups. For details,
+          please refer to Documentation/block/cfq-iosched.txt.
+
 - blkio.time
        - disk time allocated to cgroup per device in milliseconds. First
          two fields specify the major and minor number of the device and
@@ -279,6 +287,11 @@ Proportional weight policy files
          and minor number of the device and third field specifies the number
          of times a group was dequeued from a particular device.
 
+- blkio.*_recursive
+       - Recursive version of various stats. These files show the
+          same information as their non-recursive counterparts but
+          include stats from all the descendant cgroups.
+
 Throttling/Upper limit policy files
 -----------------------------------
 - blkio.throttle.read_bps_device
index cf44eb6..dffa2d6 100644 (file)
@@ -87,6 +87,10 @@ As any static code analyzer, Coccinelle produces false
 positives. Thus, reports must be carefully checked, and patches
 reviewed.
 
+To enable verbose messages set the V= variable, for example:
+
+   make coccicheck MODE=report V=1
+
 
  Using Coccinelle with a single semantic patch
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
new file mode 100644 (file)
index 0000000..d7c440b
--- /dev/null
@@ -0,0 +1,77 @@
+Guidance for writing policies
+=============================
+
+Try to keep transactionality out of it.  The core is careful to
+avoid asking about anything that is migrating.  This is a pain, but
+makes it easier to write the policies.
+
+Mappings are loaded into the policy at construction time.
+
+Every bio that is mapped by the target is referred to the policy.
+The policy can return a simple HIT or MISS or issue a migration.
+
+Currently there's no way for the policy to issue background work,
+e.g. to start writing back dirty blocks that are going to be evicte
+soon.
+
+Because we map bios, rather than requests it's easy for the policy
+to get fooled by many small bios.  For this reason the core target
+issues periodic ticks to the policy.  It's suggested that the policy
+doesn't update states (eg, hit counts) for a block more than once
+for each tick.  The core ticks by watching bios complete, and so
+trying to see when the io scheduler has let the ios run.
+
+
+Overview of supplied cache replacement policies
+===============================================
+
+multiqueue
+----------
+
+This policy is the default.
+
+The multiqueue policy has two sets of 16 queues: one set for entries
+waiting for the cache and another one for those in the cache.
+Cache entries in the queues are aged based on logical time. Entry into
+the cache is based on variable thresholds and queue selection is based
+on hit count on entry. The policy aims to take different cache miss
+costs into account and to adjust to varying load patterns automatically.
+
+Message and constructor argument pairs are:
+       'sequential_threshold <#nr_sequential_ios>' and
+       'random_threshold <#nr_random_ios>'.
+
+The sequential threshold indicates the number of contiguous I/Os
+required before a stream is treated as sequential.  The random threshold
+is the number of intervening non-contiguous I/Os that must be seen
+before the stream is treated as random again.
+
+The sequential and random thresholds default to 512 and 4 respectively.
+
+Large, sequential ios are probably better left on the origin device
+since spindles tend to have good bandwidth. The io_tracker counts
+contiguous I/Os to try to spot when the io is in one of these sequential
+modes.
+
+cleaner
+-------
+
+The cleaner writes back all dirty blocks in a cache to decommission it.
+
+Examples
+========
+
+The syntax for a table is:
+       cache <metadata dev> <cache dev> <origin dev> <block size>
+       <#feature_args> [<feature arg>]*
+       <policy> <#policy_args> [<policy arg>]*
+
+The syntax to send a message using the dmsetup command is:
+       dmsetup message <mapped device> 0 sequential_threshold 1024
+       dmsetup message <mapped device> 0 random_threshold 8
+
+Using dmsetup:
+       dmsetup create blah --table "0 268435456 cache /dev/sdb /dev/sdc \
+           /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
+       creates a 128GB large mapped device named 'blah' with the
+       sequential threshold set to 1024 and the random_threshold set to 8.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
new file mode 100644 (file)
index 0000000..f50470a
--- /dev/null
@@ -0,0 +1,243 @@
+Introduction
+============
+
+dm-cache is a device mapper target written by Joe Thornber, Heinz
+Mauelshagen, and Mike Snitzer.
+
+It aims to improve performance of a block device (eg, a spindle) by
+dynamically migrating some of its data to a faster, smaller device
+(eg, an SSD).
+
+This device-mapper solution allows us to insert this caching at
+different levels of the dm stack, for instance above the data device for
+a thin-provisioning pool.  Caching solutions that are integrated more
+closely with the virtual memory system should give better performance.
+
+The target reuses the metadata library used in the thin-provisioning
+library.
+
+The decision as to what data to migrate and when is left to a plug-in
+policy module.  Several of these have been written as we experiment,
+and we hope other people will contribute others for specific io
+scenarios (eg. a vm image server).
+
+Glossary
+========
+
+  Migration -  Movement of the primary copy of a logical block from one
+              device to the other.
+  Promotion -  Migration from slow device to fast device.
+  Demotion  -  Migration from fast device to slow device.
+
+The origin device always contains a copy of the logical block, which
+may be out of date or kept in sync with the copy on the cache device
+(depending on policy).
+
+Design
+======
+
+Sub-devices
+-----------
+
+The target is constructed by passing three devices to it (along with
+other parameters detailed later):
+
+1. An origin device - the big, slow one.
+
+2. A cache device - the small, fast one.
+
+3. A small metadata device - records which blocks are in the cache,
+   which are dirty, and extra hints for use by the policy object.
+   This information could be put on the cache device, but having it
+   separate allows the volume manager to configure it differently,
+   e.g. as a mirror for extra robustness.
+
+Fixed block size
+----------------
+
+The origin is divided up into blocks of a fixed size.  This block size
+is configurable when you first create the cache.  Typically we've been
+using block sizes of 256k - 1024k.
+
+Having a fixed block size simplifies the target a lot.  But it is
+something of a compromise.  For instance, a small part of a block may be
+getting hit a lot, yet the whole block will be promoted to the cache.
+So large block sizes are bad because they waste cache space.  And small
+block sizes are bad because they increase the amount of metadata (both
+in core and on disk).
+
+Writeback/writethrough
+----------------------
+
+The cache has two modes, writeback and writethrough.
+
+If writeback, the default, is selected then a write to a block that is
+cached will go only to the cache and the block will be marked dirty in
+the metadata.
+
+If writethrough is selected then a write to a cached block will not
+complete until it has hit both the origin and cache devices.  Clean
+blocks should remain clean.
+
+A simple cleaner policy is provided, which will clean (write back) all
+dirty blocks in a cache.  Useful for decommissioning a cache.
+
+Migration throttling
+--------------------
+
+Migrating data between the origin and cache device uses bandwidth.
+The user can set a throttle to prevent more than a certain amount of
+migration occuring at any one time.  Currently we're not taking any
+account of normal io traffic going to the devices.  More work needs
+doing here to avoid migrating during those peak io moments.
+
+For the time being, a message "migration_threshold <#sectors>"
+can be used to set the maximum number of sectors being migrated,
+the default being 204800 sectors (or 100MB).
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
+written.  If no such requests are made then commits will occur every
+second.  This means the cache behaves like a physical disk that has a
+write cache (the same is true of the thin-provisioning target).  If
+power is lost you may lose some recent writes.  The metadata should
+always be consistent in spite of any crash.
+
+The 'dirty' state for a cache block changes far too frequently for us
+to keep updating it on the fly.  So we treat it as a hint.  In normal
+operation it will be written when the dm device is suspended.  If the
+system crashes all cache blocks will be assumed dirty when restarted.
+
+Per-block policy hints
+----------------------
+
+Policy plug-ins can store a chunk of data per cache block.  It's up to
+the policy how big this chunk is, but it should be kept small.  Like the
+dirty flags this data is lost if there's a crash so a safe fallback
+value should always be possible.
+
+For instance, the 'mq' policy, which is currently the default policy,
+uses this facility to store the hit count of the cache blocks.  If
+there's a crash this information will be lost, which means the cache
+may be less efficient until those hit counts are regenerated.
+
+Policy hints affect performance, not correctness.
+
+Policy messaging
+----------------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these.  Device-mapper
+messages are used.  Refer to cache-policies.txt.
+
+Discard bitset resolution
+-------------------------
+
+We can avoid copying data during migration if we know the block has
+been discarded.  A prime example of this is when mkfs discards the
+whole block device.  We store a bitset tracking the discard state of
+blocks.  However, we allow this bitset to have a different block size
+from the cache blocks.  This is because we need to track the discard
+state for all of the origin device (compare with the dirty bitset
+which is just for the smaller cache device).
+
+Target interface
+================
+
+Constructor
+-----------
+
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+       <#feature args> [<feature arg>]*
+       <policy> <#policy args> [policy args]*
+
+ metadata dev    : fast device holding the persistent metadata
+ cache dev      : fast device holding cached data blocks
+ origin dev     : slow device holding original data blocks
+ block size      : cache unit size in sectors
+
+ #feature args   : number of feature arguments passed
+ feature args    : writethrough.  (The default is writeback.)
+
+ policy          : the replacement policy to use
+ #policy args    : an even number of arguments corresponding to
+                   key/value pairs passed to the policy
+ policy args     : key/value pairs passed to the policy
+                  E.g. 'sequential_threshold 1024'
+                  See cache-policies.txt for details.
+
+Optional feature arguments are:
+   writethrough  : write through caching that prohibits cache block
+                  content from being different from origin block content.
+                  Without this argument, the default behaviour is to write
+                  back cache block contents later for performance reasons,
+                  so they may differ from the corresponding origin blocks.
+
+A policy called 'default' is always registered.  This is an alias for
+the policy we currently think is giving best all round performance.
+
+As the default policy could vary between kernels, if you are relying on
+the characteristics of a specific policy, always request it by name.
+
+Status
+------
+
+<#used metadata blocks>/<#total metadata blocks> <#read hits> <#read misses>
+<#write hits> <#write misses> <#demotions> <#promotions> <#blocks in cache>
+<#dirty> <#features> <features>* <#core args> <core args>* <#policy args>
+<policy args>*
+
+#used metadata blocks    : Number of metadata blocks used
+#total metadata blocks   : Total number of metadata blocks
+#read hits               : Number of times a READ bio has been mapped
+                            to the cache
+#read misses             : Number of times a READ bio has been mapped
+                            to the origin
+#write hits              : Number of times a WRITE bio has been mapped
+                            to the cache
+#write misses            : Number of times a WRITE bio has been
+                            mapped to the origin
+#demotions               : Number of times a block has been removed
+                            from the cache
+#promotions              : Number of times a block has been moved to
+                            the cache
+#blocks in cache         : Number of blocks resident in the cache
+#dirty                   : Number of blocks in the cache that differ
+                            from the origin
+#feature args            : Number of feature args to follow
+feature args             : 'writethrough' (optional)
+#core args               : Number of core arguments (must be even)
+core args                : Key/value pairs for tuning the core
+                            e.g. migration_threshold
+#policy args             : Number of policy arguments to follow (must be even)
+policy args              : Key/value pairs
+                            e.g. 'sequential_threshold 1024
+
+Messages
+--------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these.  Device-mapper
+messages are used.  (A sysfs interface would also be possible.)
+
+The message format is:
+
+   <key> <value>
+
+E.g.
+   dmsetup message my_cache 0 sequential_threshold 1024
+
+Examples
+========
+
+The test suite can be found here:
+
+https://github.com/jthornber/thinp-test-suite
+
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+       /dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0'
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+       /dev/mapper/ssd /dev/mapper/origin 1024 1 writeback \
+       mq 4 sequential_threshold 1024 random_threshold 8'
index 56fb62b..b428556 100644 (file)
@@ -30,6 +30,7 @@ The target is named "raid" and it accepts the following parameters:
   raid10        Various RAID10 inspired algorithms chosen by additional params
                - RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
                - RAID1E: Integrated Adjacent Stripe Mirroring
+               - RAID1E: Integrated Offset Stripe Mirroring
                -  and other similar RAID10 variants
 
   Reference: Chapter 4 of
@@ -64,15 +65,15 @@ The target is named "raid" and it accepts the following parameters:
                synchronisation state for each region.
 
         [raid10_copies   <# copies>]
-        [raid10_format   near]
+        [raid10_format   <near|far|offset>]
                These two options are used to alter the default layout of
                a RAID10 configuration.  The number of copies is can be
-               specified, but the default is 2.  There are other variations
-               to how the copies are laid down - the default and only current
-               option is "near".  Near copies are what most people think of
-               with respect to mirroring.  If these options are left
-               unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
-               are given, then the layouts for 2, 3 and 4 devices are:
+               specified, but the default is 2.  There are also three
+               variations to how the copies are laid down - the default
+               is "near".  Near copies are what most people think of with
+               respect to mirroring.  If these options are left unspecified,
+               or 'raid10_copies 2' and/or 'raid10_format near' are given,
+               then the layouts for 2, 3 and 4 devices are:
                2 drives         3 drives          4 drives
                --------         ----------        --------------
                A1  A1           A1  A1  A2        A1  A1  A2  A2
@@ -85,6 +86,33 @@ The target is named "raid" and it accepts the following parameters:
                3-device layout is what might be called a 'RAID1E - Integrated
                Adjacent Stripe Mirroring'.
 
+               If 'raid10_copies 2' and 'raid10_format far', then the layouts
+               for 2, 3 and 4 devices are:
+               2 drives             3 drives             4 drives
+               --------             --------------       --------------------
+               A1  A2               A1   A2   A3         A1   A2   A3   A4
+               A3  A4               A4   A5   A6         A5   A6   A7   A8
+               A5  A6               A7   A8   A9         A9   A10  A11  A12
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+               A2  A1               A3   A1   A2         A2   A1   A4   A3
+               A4  A3               A6   A4   A5         A6   A5   A8   A7
+               A6  A5               A9   A7   A8         A10  A9   A12  A11
+               ..  ..               ..   ..   ..         ..   ..   ..   ..
+
+               If 'raid10_copies 2' and 'raid10_format offset', then the
+               layouts for 2, 3 and 4 devices are:
+               2 drives       3 drives           4 drives
+               --------       ------------       -----------------
+               A1  A2         A1  A2  A3         A1  A2  A3  A4
+               A2  A1         A3  A1  A2         A2  A1  A4  A3
+               A3  A4         A4  A5  A6         A5  A6  A7  A8
+               A4  A3         A6  A4  A5         A6  A5  A8  A7
+               A5  A6         A7  A8  A9         A9  A10 A11 A12
+               A6  A5         A9  A7  A8         A10 A9  A12 A11
+               ..  ..         ..  ..  ..         ..  ..  ..  ..
+               Here we see layouts closely akin to 'RAID1E - Integrated
+               Offset Stripe Mirroring'.
+
 <#raid_devs>: The number of devices composing the array.
        Each device consists of two entries.  The first is the device
        containing the metadata (if any); the second is the one containing the
@@ -142,3 +170,5 @@ Version History
 1.3.0  Added support for RAID 10
 1.3.1  Allow device replacement/rebuild for RAID 10
 1.3.2   Fix/improve redundancy checking for RAID10
+1.4.0  Non-functional change.  Removes arg from mapping function.
+1.4.1   Add RAID10 "far" and "offset" algorithm support.
diff --git a/Documentation/devicetree/bindings/arc/interrupts.txt b/Documentation/devicetree/bindings/arc/interrupts.txt
new file mode 100644 (file)
index 0000000..9a5d562
--- /dev/null
@@ -0,0 +1,24 @@
+* ARC700 incore Interrupt Controller
+
+  The core interrupt controller provides 32 prioritised interrupts (2 levels)
+  to ARC700 core.
+
+Properties:
+
+- compatible: "snps,arc700-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+  Single Cell "interrupts" property of a device specifies the IRQ number
+  between 0 to 31
+
+  intc accessed via the special ARC AUX register interface, hence "reg" property
+  is not specified.
+
+Example:
+
+       intc: interrupt-controller {
+               compatible = "snps,arc700-intc";
+               interrupt-controller;
+               #interrupt-cells = <1>;
+       };
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
deleted file mode 100644 (file)
index 6483011..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-Marvell Armada 370 and Armada XP Global Timers
-----------------------------------------------
-
-Required properties:
-- compatible: Should be "marvell,armada-370-xp-timer"
-- interrupts: Should contain the list of Global Timer interrupts
-- reg: Should contain the base address of the Global Timer registers
-- clocks: clock driving the timer hardware
-
-Optional properties:
-- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
-  Mhz fixed mode (available on Armada XP and not on Armada 370)
diff --git a/Documentation/devicetree/bindings/arm/armadeus.txt b/Documentation/devicetree/bindings/arm/armadeus.txt
new file mode 100644 (file)
index 0000000..9821283
--- /dev/null
@@ -0,0 +1,6 @@
+Armadeus i.MX Platforms Device Tree Bindings
+-----------------------------------------------
+
+APF51: i.MX51 based module.
+Required root node properties:
+    - compatible = "armadeus,imx51-apf51", "fsl,imx51";
index f798187..e935d7d 100644 (file)
@@ -5,6 +5,14 @@ i.MX23 Evaluation Kit
 Required root node properties:
     - compatible = "fsl,imx23-evk", "fsl,imx23";
 
+i.MX25 Product Development Kit
+Required root node properties:
+    - compatible = "fsl,imx25-pdk", "fsl,imx25";
+
+i.MX27 Product Development Kit
+Required root node properties:
+    - compatible = "fsl,imx27-pdk", "fsl,imx27";
+
 i.MX28 Evaluation Kit
 Required root node properties:
     - compatible = "fsl,imx28-evk", "fsl,imx28";
index 04ad478..2a0c904 100644 (file)
@@ -171,6 +171,7 @@ clocks and IDs.
        can_sel                 156
        can1_serial_gate        157
        can1_ipg_gate           158
+       owire_gate              159
 
 Examples (for mx53):
 
index f73fdf5..969b38e 100644 (file)
@@ -203,6 +203,8 @@ clocks and IDs.
        pcie_ref                188
        pcie_ref_125m           189
        enet_ref                190
+       usbphy1_gate            191
+       usbphy2_gate            192
 
 Examples:
 
index 5bb3dfb..d58675e 100644 (file)
@@ -3,59 +3,61 @@
 Required properties:
 - compatible: "snps,dma-spear1340"
 - reg: Address range of the DMAC registers
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
 - interrupt: Should contain the DMAC interrupt number
-- nr_channels: Number of channels supported by hardware
-- is_private: The device channels should be marked as private and not for by the
-  general purpose DMA channel allocator. False if not passed.
+- dma-channels: Number of channels supported by hardware
+- dma-requests: Number of DMA request lines supported, up to 16
+- dma-masters: Number of AHB masters supported by the controller
+- #dma-cells: must be <3>
 - chan_allocation_order: order of allocation of channel, 0 (default): ascending,
   1: descending
 - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
   increase from chan n->0
 - block_size: Maximum block size supported by the controller
-- nr_masters: Number of AHB masters supported by the controller
 - data_width: Maximum data width supported by hardware per AHB master
   (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
-- slave_info:
-       - bus_id: name of this device channel, not just a device name since
-         devices may have more than one channel e.g. "foo_tx". For using the
-         dw_generic_filter(), slave drivers must pass exactly this string as
-         param to filter function.
-       - cfg_hi: Platform-specific initializer for the CFG_HI register
-       - cfg_lo: Platform-specific initializer for the CFG_LO register
-       - src_master: src master for transfers on allocated channel.
-       - dst_master: dest master for transfers on allocated channel.
+
+
+Optional properties:
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- is_private: The device channels should be marked as private and not for by the
+  general purpose DMA channel allocator. False if not passed.
 
 Example:
 
-       dma@fc000000 {
+       dmahost: dma@fc000000 {
                compatible = "snps,dma-spear1340";
                reg = <0xfc000000 0x1000>;
                interrupt-parent = <&vic1>;
                interrupts = <12>;
 
-               nr_channels = <8>;
+               dma-channels = <8>;
+               dma-requests = <16>;
+               dma-masters = <2>;
+               #dma-cells = <3>;
                chan_allocation_order = <1>;
                chan_priority = <1>;
                block_size = <0xfff>;
-               nr_masters = <2>;
                data_width = <3 3 0 0>;
+       };
 
-               slave_info {
-                       uart0-tx {
-                               bus_id = "uart0-tx";
-                               cfg_hi = <0x4000>;      /* 0x8 << 11 */
-                               cfg_lo = <0>;
-                               src_master = <0>;
-                               dst_master = <1>;
-                       };
-                       spi0-tx {
-                               bus_id = "spi0-tx";
-                               cfg_hi = <0x2000>;      /* 0x4 << 11 */
-                               cfg_lo = <0>;
-                               src_master = <0>;
-                               dst_master = <0>;
-                       };
-               };
+DMA clients connected to the Designware DMA controller must use the format
+described in the dma.txt file, using a four-cell specifier for each channel.
+The four cells in order are:
+
+1. A phandle pointing to the DMA controller
+2. The DMA request line number
+3. Source master for transfers on allocated channel
+4. Destination master for transfers on allocated channel
+
+Example:
+       
+       serial@e0000000 {
+               compatible = "arm,pl011", "arm,primecell";
+               reg = <0xe0000000 0x1000>;
+               interrupts = <0 35 0x4>;
+               status = "disabled";
+               dmas = <&dmahost 12 0 1>,
+                       <&dmahost 13 0 1 0>;
+               dma-names = "rx", "rx";
        };
diff --git a/Documentation/devicetree/bindings/metag/meta-intc.txt b/Documentation/devicetree/bindings/metag/meta-intc.txt
new file mode 100644 (file)
index 0000000..8c47dcb
--- /dev/null
@@ -0,0 +1,82 @@
+* Meta External Trigger Controller Binding
+
+This binding specifies what properties must be available in the device tree
+representation of a Meta external trigger controller.
+
+Required properties:
+
+    - compatible: Specifies the compatibility list for the interrupt controller.
+      The type shall be <string> and the value shall include "img,meta-intc".
+
+    - num-banks: Specifies the number of interrupt banks (each of which can
+      handle 32 interrupt sources).
+
+    - interrupt-controller: The presence of this property identifies the node
+      as an interupt controller. No property value shall be defined.
+
+    - #interrupt-cells: Specifies the number of cells needed to encode an
+      interrupt source. The type shall be a <u32> and the value shall be 2.
+
+    - #address-cells: Specifies the number of cells needed to encode an
+      address. The type shall be <u32> and the value shall be 0. As such,
+      'interrupt-map' nodes do not have to specify a parent unit address.
+
+Optional properties:
+
+    - no-mask: The controller doesn't have any mask registers.
+
+* Interrupt Specifier Definition
+
+  Interrupt specifiers consists of 2 cells encoded as follows:
+
+    - <1st-cell>: The interrupt-number that identifies the interrupt source.
+
+    - <2nd-cell>: The Linux interrupt flags containing level-sense information,
+                  encoded as follows:
+                    1 = edge triggered
+                    4 = level-sensitive
+
+* Examples
+
+Example 1:
+
+       /*
+        * Meta external trigger block
+        */
+       intc: intc {
+               // This is an interrupt controller node.
+               interrupt-controller;
+
+               // No address cells so that 'interrupt-map' nodes which
+               // reference this interrupt controller node do not need a parent
+               // address specifier.
+               #address-cells = <0>;
+
+               // Two cells to encode interrupt sources.
+               #interrupt-cells = <2>;
+
+               // Number of interrupt banks
+               num-banks = <2>;
+
+               // No HWMASKEXT is available (specify on Chorus2 and Comet ES1)
+               no-mask;
+
+               // Compatible with Meta hardware trigger block.
+               compatible = "img,meta-intc";
+       };
+
+Example 2:
+
+       /*
+        * An interrupt generating device that is wired to a Meta external
+        * trigger block.
+        */
+       uart1: uart@0x02004c00 {
+               // Interrupt source '5' that is level-sensitive.
+               // Note that there are only two cells as specified in the
+               // interrupt parent's '#interrupt-cells' property.
+               interrupts = <5 4 /* level */>;
+
+               // The interrupt controller that this device is wired to.
+               interrupt-parent = <&intc>;
+       };
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
new file mode 100644 (file)
index 0000000..13aa4b6
--- /dev/null
@@ -0,0 +1,47 @@
+MIPS CPU interrupt controller
+
+On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU
+IRQs from a devicetree file and create a irq_domain for IRQ controller.
+
+With the irq_domain in place we can describe how the 8 IRQs are wired to the
+platforms internal interrupt controller cascade.
+
+Below is an example of a platform describing the cascade inside the devicetree
+and the code used to load it inside arch_init_irq().
+
+Required properties:
+- compatible : Should be "mti,cpu-interrupt-controller"
+
+Example devicetree:
+       cpu-irq: cpu-irq@0 {
+               #address-cells = <0>;
+
+               interrupt-controller;
+               #interrupt-cells = <1>;
+
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       intc: intc@200 {
+               compatible = "ralink,rt2880-intc";
+               reg = <0x200 0x100>;
+
+               interrupt-controller;
+               #interrupt-cells = <1>;
+
+               interrupt-parent = <&cpu-irq>;
+               interrupts = <2>;
+       };
+
+
+Example platform irq.c:
+static struct of_device_id __initdata of_irq_ids[] = {
+       { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+       { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+       {},
+};
+
+void __init arch_init_irq(void)
+{
+       of_irq_init(of_irq_ids);
+}
diff --git a/Documentation/devicetree/bindings/mtd/elm.txt b/Documentation/devicetree/bindings/mtd/elm.txt
new file mode 100644 (file)
index 0000000..8c1528c
--- /dev/null
@@ -0,0 +1,16 @@
+Error location module
+
+Required properties:
+- compatible: Must be "ti,am33xx-elm"
+- reg: physical base address and size of the registers map.
+- interrupts: Interrupt number for the elm.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the elm
+
+Example:
+elm: elm@0 {
+       compatible = "ti,am3352-elm";
+       reg = <0x48080000 0x2000>;
+       interrupts = <4>;
+};
index dab7847..61c5ec8 100644 (file)
@@ -26,6 +26,9 @@ file systems on embedded devices.
  - linux,mtd-name: allow to specify the mtd name for retro capability with
    physmap-flash drivers as boot loader pass the mtd partition via the old
    device name physmap-flash.
+ - use-advanced-sector-protection: boolean to enable support for the
+   advanced sector protection (Spansion: PPB - Persistent Protection
+   Bits) locking.
 
 For JEDEC compatible devices, the following additional properties
 are defined:
diff --git a/Documentation/devicetree/bindings/serial/lantiq_asc.txt b/Documentation/devicetree/bindings/serial/lantiq_asc.txt
new file mode 100644 (file)
index 0000000..5b78591
--- /dev/null
@@ -0,0 +1,16 @@
+Lantiq SoC ASC serial controller
+
+Required properties:
+- compatible : Should be "lantiq,asc"
+- reg : Address and length of the register set for the device
+- interrupts: the 3 (tx rx err) interrupt numbers. The interrupt specifier
+  depends on the interrupt-parent interrupt controller.
+
+Example:
+
+asc1: serial@E100C00 {
+       compatible = "lantiq,asc";
+       reg = <0xE100C00 0x400>;
+       interrupt-parent = <&icu0>;
+       interrupts = <112 113 114>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/dove-thermal.txt b/Documentation/devicetree/bindings/thermal/dove-thermal.txt
new file mode 100644 (file)
index 0000000..6f47467
--- /dev/null
@@ -0,0 +1,18 @@
+* Dove Thermal
+
+This driver is for Dove SoCs which contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,dove-thermal"
+- reg : Address range of the thermal registers
+
+The reg properties should contain two ranges. The first is for the
+three Thermal Manager registers, while the second range contains the
+Thermal Diode Control Registers.
+
+Example:
+
+       thermal@10078 {
+               compatible = "marvell,dove-thermal";
+               reg = <0xd001c 0x0c>, <0xd005c 0x08>;
+       };
diff --git a/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt b/Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt
new file mode 100644 (file)
index 0000000..8c0f5eb
--- /dev/null
@@ -0,0 +1,15 @@
+* Kirkwood Thermal
+
+This version is for Kirkwood 88F8262 & 88F6283 SoCs. Other kirkwoods
+don't contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,kirkwood-thermal"
+- reg : Address range of the thermal registers
+
+Example:
+
+       thermal@10078 {
+               compatible = "marvell,kirkwood-thermal";
+               reg = <0x10078 0x4>;
+       };
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
new file mode 100644 (file)
index 0000000..28ef498
--- /dev/null
@@ -0,0 +1,29 @@
+* Renesas R-Car Thermal
+
+Required properties:
+- compatible           : "renesas,rcar-thermal"
+- reg                  : Address range of the thermal registers.
+                         The 1st reg will be recognized as common register
+                         if it has "interrupts".
+
+Option properties:
+
+- interrupts           : use interrupt
+
+Example (non interrupt support):
+
+thermal@e61f0100 {
+       compatible = "renesas,rcar-thermal";
+       reg = <0xe61f0100 0x38>;
+};
+
+Example (interrupt support):
+
+thermal@e61f0000 {
+       compatible = "renesas,rcar-thermal";
+       reg = <0xe61f0000 0x14
+               0xe61f0100 0x38
+               0xe61f0200 0x38
+               0xe61f0300 0x38>;
+       interrupts = <0 69 4>;
+};
diff --git a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
new file mode 100644 (file)
index 0000000..3638112
--- /dev/null
@@ -0,0 +1,15 @@
+Marvell Armada 370 and Armada XP Timers
+---------------------------------------
+
+Required properties:
+- compatible: Should be "marvell,armada-370-xp-timer"
+- interrupts: Should contain the list of Global Timer interrupts and
+  then local timer interrupts
+- reg: Should contain location and length for timers register. First
+  pair for the Global Timer registers, second pair for the
+  local/private timers.
+- clocks: clock driving the timer hardware
+
+Optional properties:
+- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
+  Mhz fixed mode (available on Armada XP and not on Armada 370)
diff --git a/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt b/Documentation/devicetree/bindings/w1/fsl-imx-owire.txt
new file mode 100644 (file)
index 0000000..ecf42c0
--- /dev/null
@@ -0,0 +1,19 @@
+* Freescale i.MX One wire bus master controller
+
+Required properties:
+- compatible : should be "fsl,imx21-owire"
+- reg : Address and length of the register set for the device
+
+Optional properties:
+- clocks : phandle of clock that supplies the module (required if platform
+               clock bindings use device tree)
+
+Example:
+
+- From imx53.dtsi:
+owire: owire@63fa4000 {
+       compatible = "fsl,imx53-owire", "fsl,imx21-owire";
+       reg = <0x63fa4000 0x4000>;
+       clocks = <&clks 159>;
+       status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-at91rm9200-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-at91rm9200-wdt.txt
new file mode 100644 (file)
index 0000000..d4d86cf
--- /dev/null
@@ -0,0 +1,9 @@
+Atmel AT91RM9200 System Timer Watchdog
+
+Required properties:
+- compatible: must be "atmel,at91sam9260-wdt".
+
+Example:
+       watchdog@fffffd00 {
+               compatible = "atmel,at91rm9200-wdt";
+       };
index 2957ebb..fcdd48f 100644 (file)
@@ -7,9 +7,13 @@ Required properties:
 - reg: physical base address of the controller and length of memory mapped
   region.
 
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
 Example:
 
        watchdog@fffffd40 {
                compatible = "atmel,at91sam9260-wdt";
                reg = <0xfffffd40 0x10>;
+               timeout-sec = <10>;
        };
index 0b2503a..5dc8d30 100644 (file)
@@ -5,10 +5,15 @@ Required Properties:
 - Compatibility : "marvell,orion-wdt"
 - reg          : Address of the timer registers
 
+Optional properties:
+
+- timeout-sec  : Contains the watchdog timeout in seconds
+
 Example:
 
        wdt@20300 {
                compatible = "marvell,orion-wdt";
                reg = <0x20300 0x28>;
+               timeout-sec = <10>;
                status = "okay";
        };
index 7c7f688..556d06c 100644 (file)
@@ -5,9 +5,13 @@ Required properties:
 - reg: physical base address of the controller and length of memory mapped
   region.
 
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
 Example:
 
        watchdog@4003C000 {
                compatible = "nxp,pnx4008-wdt";
                reg = <0x4003C000 0x1000>;
+               timeout-sec = <10>;
        };
diff --git a/Documentation/devicetree/bindings/watchdog/qca-ar7130-wdt.txt b/Documentation/devicetree/bindings/watchdog/qca-ar7130-wdt.txt
new file mode 100644 (file)
index 0000000..7a89e5f
--- /dev/null
@@ -0,0 +1,13 @@
+* Qualcomm Atheros AR7130 Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible: must be "qca,ar7130-wdt"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+
+Example:
+
+wdt@18060008 {
+       compatible = "qca,ar9330-wdt", "qca,ar7130-wdt";
+       reg = <0x18060008 0x8>;
+};
index ce0d8e7..2aa486c 100644 (file)
@@ -9,3 +9,6 @@ Required properties:
 - reg : base physical address of the controller and length of memory mapped
        region.
 - interrupts : interrupt number to the cpu.
+
+Optional properties:
+- timeout-sec : contains the watchdog timeout in seconds.
index 0188903..4966b1b 100644 (file)
@@ -302,7 +302,11 @@ Access to a dma_buf from the kernel context involves three steps:
       void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 
    The vmap call can fail if there is no vmap support in the exporter, or if it
-   runs out of vmalloc space. Fallback to kmap should be implemented.
+   runs out of vmalloc space. Fallback to kmap should be implemented. Note that
+   the dma-buf layer keeps a reference count for all vmap access and calls down
+   into the exporter's vmap function only when no vmapping exists, and only
+   unmaps it once. Protection against concurrent vmap/vunmap calls is provided
+   by taking the dma_buf->lock mutex.
 
 3. Finish access
 
index f48e0c6..0706d32 100644 (file)
@@ -10,6 +10,7 @@ be able to use diff(1).
 --------------------------- dentry_operations --------------------------
 prototypes:
        int (*d_revalidate)(struct dentry *, unsigned int);
+       int (*d_weak_revalidate)(struct dentry *, unsigned int);
        int (*d_hash)(const struct dentry *, const struct inode *,
                        struct qstr *);
        int (*d_compare)(const struct dentry *, const struct inode *,
@@ -25,6 +26,7 @@ prototypes:
 locking rules:
                rename_lock     ->d_lock        may block       rcu-walk
 d_revalidate:  no              no              yes (ref-walk)  maybe
+d_weak_revalidate:no           no              yes             no
 d_hash         no              no              no              maybe
 d_compare:     yes             no              no              maybe
 d_delete:      no              yes             no              no
index 0472c31..4db22f6 100644 (file)
@@ -441,3 +441,7 @@ d_make_root() drops the reference to inode if dentry allocation fails.
 two, it gets "is it an O_EXCL or equivalent?" boolean argument.  Note that
 local filesystems can ignore tha argument - they are guaranteed that the
 object doesn't exist.  It's remote/distributed ones that might care...
+--
+[mandatory]
+       FS_REVAL_DOT is gone; if you used to have it, add ->d_weak_revalidate()
+in your dentry operations instead.
index e386909..bc4b06b 100644 (file)
@@ -900,6 +900,7 @@ defined:
 
 struct dentry_operations {
        int (*d_revalidate)(struct dentry *, unsigned int);
+       int (*d_weak_revalidate)(struct dentry *, unsigned int);
        int (*d_hash)(const struct dentry *, const struct inode *,
                        struct qstr *);
        int (*d_compare)(const struct dentry *, const struct inode *,
@@ -915,8 +916,13 @@ struct dentry_operations {
 
   d_revalidate: called when the VFS needs to revalidate a dentry. This
        is called whenever a name look-up finds a dentry in the
-       dcache. Most filesystems leave this as NULL, because all their
-       dentries in the dcache are valid
+       dcache. Most local filesystems leave this as NULL, because all their
+       dentries in the dcache are valid. Network filesystems are different
+       since things can change on the server without the client necessarily
+       being aware of it.
+
+       This function should return a positive value if the dentry is still
+       valid, and zero or a negative error code if it isn't.
 
        d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU).
        If in rcu-walk mode, the filesystem must revalidate the dentry without
@@ -927,6 +933,20 @@ struct dentry_operations {
        If a situation is encountered that rcu-walk cannot handle, return
        -ECHILD and it will be called again in ref-walk mode.
 
+ d_weak_revalidate: called when the VFS needs to revalidate a "jumped" dentry.
+       This is called when a path-walk ends at dentry that was not acquired by
+       doing a lookup in the parent directory. This includes "/", "." and "..",
+       as well as procfs-style symlinks and mountpoint traversal.
+
+       In this case, we are less concerned with whether the dentry is still
+       fully correct, but rather that the inode is still valid. As with
+       d_revalidate, most local filesystems will set this to NULL since their
+       dcache entries are always valid.
+
+       This function has the same return code semantics as d_revalidate.
+
+       d_weak_revalidate is only called after leaving rcu-walk mode.
+
   d_hash: called when the VFS adds a dentry to the hash table. The first
        dentry passed to d_hash is the parent directory that the name is
        to be hashed into. The inode is the dentry's inode.
index a686f9c..c858f84 100644 (file)
@@ -388,26 +388,3 @@ config FOO
        depends on BAR && m
 
 limits FOO to module (=m) or disabled (=n).
-
-Kconfig symbol existence
-~~~~~~~~~~~~~~~~~~~~~~~~
-The following two methods produce the same kconfig symbol dependencies
-but differ greatly in kconfig symbol existence (production) in the
-generated config file.
-
-case 1:
-
-config FOO
-       tristate "about foo"
-       depends on BAR
-
-vs. case 2:
-
-if BAR
-config FOO
-       tristate "about foo"
-endif
-
-In case 1, the symbol FOO will always exist in the config file (given
-no other dependencies).  In case 2, the symbol FOO will only exist in
-the config file if BAR is enabled.
index a09f1a6..b8b77bb 100644 (file)
@@ -46,6 +46,12 @@ KCONFIG_OVERWRITECONFIG
 If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not
 break symlinks when .config is a symlink to somewhere else.
 
+CONFIG_
+--------------------------------------------------
+If you set CONFIG_ in the environment, Kconfig will prefix all symbols
+with its value when saving the configuration, instead of using the default,
+"CONFIG_".
+
 ______________________________________________________________________
 Environment variables for '{allyes/allmod/allno/rand}config'
 
index 1da9465..4609e81 100644 (file)
@@ -564,6 +564,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        UART at the specified I/O port or MMIO address,
                        switching to the matching ttyS device later.  The
                        options are the same as for ttyS, above.
+               hvc<n>  Use the hypervisor console device <n>. This is for
+                       both Xen and PowerPC hypervisors.
 
                 If the device connected to the port is not a TTY but a braille
                 device, prepend "brl," before the device type, for instance
@@ -757,6 +759,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        earlyprintk=    [X86,SH,BLACKFIN]
                        earlyprintk=vga
+                       earlyprintk=xen
                        earlyprintk=serial[,ttySn[,baudrate]]
                        earlyprintk=ttySn[,baudrate]
                        earlyprintk=dbgp[debugController#]
@@ -774,6 +777,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The VGA output is eventually overwritten by the real
                        console.
 
+                       The xen output can only be used by Xen PV guests.
+
        ekgdboc=        [X86,KGDB] Allow early kernel console debugging
                        ekgdboc=kbd
 
@@ -973,6 +978,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                               If specified, z/VM IUCV HVC accepts connections
                               from listed z/VM user IDs only.
 
+       hwthread_map=   [METAG] Comma-separated list of Linux cpu id to
+                               hardware thread id mappings.
+                               Format: <cpu>:<hwthread>
+
        keep_bootcon    [KNL]
                        Do not unregister boot console at start. This is only
                        useful for debugging when something happens in the window
@@ -1640,42 +1649,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        that the amount of memory usable for all allocations
                        is not too small.
 
-       movablemem_map=acpi
-                       [KNL,X86,IA-64,PPC] This parameter is similar to
-                       memmap except it specifies the memory map of
-                       ZONE_MOVABLE.
-                       This option inform the kernel to use Hot Pluggable bit
-                       in flags from SRAT from ACPI BIOS to determine which
-                       memory devices could be hotplugged. The corresponding
-                       memory ranges will be set as ZONE_MOVABLE.
-                       NOTE: Whatever node the kernel resides in will always
-                             be un-hotpluggable.
-
-       movablemem_map=nn[KMG]@ss[KMG]
-                       [KNL,X86,IA-64,PPC] This parameter is similar to
-                       memmap except it specifies the memory map of
-                       ZONE_MOVABLE.
-                       If user specifies memory ranges, the info in SRAT will
-                       be ingored. And it works like the following:
-                       - If more ranges are all within one node, then from
-                         lowest ss to the end of the node will be ZONE_MOVABLE.
-                       - If a range is within a node, then from ss to the end
-                         of the node will be ZONE_MOVABLE.
-                       - If a range covers two or more nodes, then from ss to
-                         the end of the 1st node will be ZONE_MOVABLE, and all
-                         the rest nodes will only have ZONE_MOVABLE.
-                       If memmap is specified at the same time, the
-                       movablemem_map will be limited within the memmap
-                       areas. If kernelcore or movablecore is also specified,
-                       movablemem_map will have higher priority to be
-                       satisfied. So the administrator should be careful that
-                       the amount of movablemem_map areas are not too large.
-                       Otherwise kernel won't have enough memory to start.
-                       NOTE: We don't stop users specifying the node the
-                             kernel resides in as hotpluggable so that this
-                             option can be used as a workaround of firmware
-                              bugs.
-
        MTD_Partition=  [MTD]
                        Format: <name>,<region-number>,<size>,<offset>
 
diff --git a/Documentation/metag/00-INDEX b/Documentation/metag/00-INDEX
new file mode 100644 (file)
index 0000000..db11c51
--- /dev/null
@@ -0,0 +1,4 @@
+00-INDEX
+       - this file
+kernel-ABI.txt
+       - Documents metag ABI details
diff --git a/Documentation/metag/kernel-ABI.txt b/Documentation/metag/kernel-ABI.txt
new file mode 100644 (file)
index 0000000..7b8dee8
--- /dev/null
@@ -0,0 +1,256 @@
+                       ==========================
+                       KERNEL ABIS FOR METAG ARCH
+                       ==========================
+
+This document describes the Linux ABIs for the metag architecture, and has the
+following sections:
+
+ (*) Outline of registers
+ (*) Userland registers
+ (*) Kernel registers
+ (*) System call ABI
+ (*) Calling conventions
+
+
+====================
+OUTLINE OF REGISTERS
+====================
+
+The main Meta core registers are arranged in units:
+
+       UNIT    Type    DESCRIPTION     GP      EXT     PRIV    GLOBAL
+       ======= ======= =============== ======= ======= ======= =======
+       CT      Special Control unit
+       D0      General Data unit 0     0-7     8-15    16-31   16-31
+       D1      General Data unit 1     0-7     8-15    16-31   16-31
+       A0      General Address unit 0  0-3     4-7      8-15    8-15
+       A1      General Address unit 1  0-3     4-7      8-15    8-15
+       PC      Special PC unit         0                1
+       PORT    Special Ports
+       TR      Special Trigger unit                     0-7
+       TT      Special Trace unit                       0-5
+       FX      General FP unit                 0-15
+
+GP registers form part of the main context.
+
+Extended context registers (EXT) may not be present on all hardware threads and
+can be context switched if support is enabled and the appropriate bits are set
+in e.g. the D0.8 register to indicate what extended state to preserve.
+
+Global registers are shared between threads and are privilege protected.
+
+See arch/metag/include/asm/metag_regs.h for definitions relating to core
+registers and the fields and bits they contain. See the TRMs for further details
+about special registers.
+
+Several special registers are preserved in the main context, these are the
+interesting ones:
+
+       REG     (ALIAS)         PURPOSE
+       ======================= ===============================================
+       CT.1    (TXMODE)        Processor mode bits (particularly for DSP)
+       CT.2    (TXSTATUS)      Condition flags and LSM_STEP (MGET/MSET step)
+       CT.3    (TXRPT)         Branch repeat counter
+       PC.0    (PC)            Program counter
+
+Some of the general registers have special purposes in the ABI and therefore
+have aliases:
+
+       D0 REG  (ALIAS) PURPOSE         D1 REG  (ALIAS) PURPOSE
+       =============== =============== =============== =======================
+       D0.0    (D0Re0) 32bit result    D1.0    (D1Re0) Top half of 64bit result
+       D0.1    (D0Ar6) Argument 6      D1.1    (D1Ar5) Argument 5
+       D0.2    (D0Ar4) Argument 4      D1.2    (D1Ar3) Argument 3
+       D0.3    (D0Ar2) Argument 2      D1.3    (D1Ar1) Argument 1
+       D0.4    (D0FrT) Frame temp      D1.4    (D1RtP) Return pointer
+       D0.5            Call preserved  D1.5            Call preserved
+       D0.6            Call preserved  D1.6            Call preserved
+       D0.7            Call preserved  D1.7            Call preserved
+
+       A0 REG  (ALIAS) PURPOSE         A1 REG  (ALIAS) PURPOSE
+       =============== =============== =============== =======================
+       A0.0    (A0StP) Stack pointer   A1.0    (A1GbP) Global base pointer
+       A0.1    (A0FrP) Frame pointer   A1.1    (A1LbP) Local base pointer
+       A0.2                            A1.2
+       A0.3                            A1.3
+
+
+==================
+USERLAND REGISTERS
+==================
+
+All the general purpose D0, D1, A0, A1 registers are preserved when entering the
+kernel (including asynchronous events such as interrupts and timer ticks) except
+the following which have special purposes in the ABI:
+
+       REGISTERS       WHEN    STATUS          PURPOSE
+       =============== ======= =============== ===============================
+       D0.8            DSP     Preserved       ECH, determines what extended
+                                               DSP state to preserve.
+       A0.0    (A0StP) ALWAYS  Preserved       Stack >= A0StP may be clobbered
+                                               at any time by the creation of a
+                                               signal frame.
+       A1.0    (A1GbP) SMP     Clobbered       Used as temporary for loading
+                                               kernel stack pointer and saving
+                                               core context.
+       A0.15           !SMP    Protected       Stores kernel stack pointer.
+       A1.15           ALWAYS  Protected       Stores kernel base pointer.
+
+On UP A0.15 is used to store the kernel stack pointer for storing the userland
+context. A0.15 is global between hardware threads though which means it cannot
+be used on SMP for this purpose. Since no protected local registers are
+available A1GbP is reserved for use as a temporary to allow a percpu stack
+pointer to be loaded for storing the rest of the context.
+
+
+================
+KERNEL REGISTERS
+================
+
+When in the kernel the following registers have special purposes in the ABI:
+
+       REGISTERS       WHEN    STATUS          PURPOSE
+       =============== ======= =============== ===============================
+       A0.0    (A0StP) ALWAYS  Preserved       Stack >= A0StP may be clobbered
+                                               at any time by the creation of
+                                               an irq signal frame.
+       A1.0    (A1GbP) ALWAYS  Preserved       Reserved (kernel base pointer).
+
+
+===============
+SYSTEM CALL ABI
+===============
+
+When a system call is made, the following registers are effective:
+
+       REGISTERS       CALL                    RETURN
+       =============== ======================= ===============================
+       D0.0    (D0Re0)                         Return value (or -errno)
+       D1.0    (D1Re0) System call number      Clobbered
+       D0.1    (D0Ar6) Syscall arg #6          Preserved
+       D1.1    (D1Ar5) Syscall arg #5          Preserved
+       D0.2    (D0Ar4) Syscall arg #4          Preserved
+       D1.2    (D1Ar3) Syscall arg #3          Preserved
+       D0.3    (D0Ar2) Syscall arg #2          Preserved
+       D1.3    (D1Ar1) Syscall arg #1          Preserved
+
+Due to the limited number of argument registers and some system calls with badly
+aligned 64-bit arguments, 64-bit values are always packed in consecutive
+arguments, even if this is contrary to the normal calling conventions (where the
+two halves would go in a matching pair of data registers).
+
+For example fadvise64_64 usually has the signature:
+
+       long sys_fadvise64_64(i32 fd, i64 offs, i64 len, i32 advice);
+
+But for metag fadvise64_64 is wrapped so that the 64-bit arguments are packed:
+
+       long sys_fadvise64_64_metag(i32 fd,      i32 offs_lo,
+                                   i32 offs_hi, i32 len_lo,
+                                   i32 len_hi,  i32 advice)
+
+So the arguments are packed in the registers like this:
+
+       D0 REG  (ALIAS) VALUE           D1 REG  (ALIAS) VALUE
+       =============== =============== =============== =======================
+       D0.1    (D0Ar6) advice          D1.1    (D1Ar5) hi(len)
+       D0.2    (D0Ar4) lo(len)         D1.2    (D1Ar3) hi(offs)
+       D0.3    (D0Ar2) lo(offs)        D1.3    (D1Ar1) fd
+
+
+===================
+CALLING CONVENTIONS
+===================
+
+These calling conventions apply to both user and kernel code. The stack grows
+from low addresses to high addresses in the metag ABI. The stack pointer (A0StP)
+should always point to the next free address on the stack and should at all
+times be 64-bit aligned. The following registers are effective at the point of a
+call:
+
+       REGISTERS       CALL                    RETURN
+       =============== ======================= ===============================
+       D0.0    (D0Re0)                         32bit return value
+       D1.0    (D1Re0)                         Upper half of 64bit return value
+       D0.1    (D0Ar6) 32bit argument #6       Clobbered
+       D1.1    (D1Ar5) 32bit argument #5       Clobbered
+       D0.2    (D0Ar4) 32bit argument #4       Clobbered
+       D1.2    (D1Ar3) 32bit argument #3       Clobbered
+       D0.3    (D0Ar2) 32bit argument #2       Clobbered
+       D1.3    (D1Ar1) 32bit argument #1       Clobbered
+       D0.4    (D0FrT)                         Clobbered
+       D1.4    (D1RtP) Return pointer          Clobbered
+       D{0-1}.{5-7}                            Preserved
+       A0.0    (A0StP) Stack pointer           Preserved
+       A1.0    (A0GbP)                         Preserved
+       A0.1    (A0FrP) Frame pointer           Preserved
+       A1.1    (A0LbP)                         Preserved
+       A{0-1},{2-3}                            Clobbered
+
+64-bit arguments are placed in matching pairs of registers (i.e. the same
+register number in both D0 and D1 units), with the least significant half in D0
+and the most significant half in D1, leaving a gap where necessary. Futher
+arguments are stored on the stack in reverse order (earlier arguments at higher
+addresses):
+
+       ADDRESS         0     1     2     3     4     5     6     7
+       =============== ===== ===== ===== ===== ===== ===== ===== =====
+       A0StP       -->
+       A0StP-0x08      32bit argument #8       32bit argument #7
+       A0StP-0x10      32bit argument #10      32bit argument #9
+
+Function prologues tend to look a bit like this:
+
+       /* If frame pointer in use, move it to frame temp register so it can be
+          easily pushed onto stack */
+       MOV     D0FrT,A0FrP
+
+       /* If frame pointer in use, set it to stack pointer */
+       ADD     A0FrP,A0StP,#0
+
+       /* Preserve D0FrT, D1RtP, D{0-1}.{5-7} on stack, incrementing A0StP */
+       MSETL   [A0StP++],D0FrT,D0.5,D0.6,D0.7
+
+       /* Allocate some stack space for local variables */
+       ADD     A0StP,A0StP,#0x10
+
+At this point the stack would look like this:
+
+       ADDRESS         0     1     2     3     4     5     6     7
+       =============== ===== ===== ===== ===== ===== ===== ===== =====
+       A0StP       -->
+       A0StP-0x08
+       A0StP-0x10
+       A0StP-0x18      Old D0.7                Old D1.7
+       A0StP-0x20      Old D0.6                Old D1.6
+       A0StP-0x28      Old D0.5                Old D1.5
+       A0FrP       --> Old A0FrP (frame ptr)   Old D1RtP (return ptr)
+       A0FrP-0x08      32bit argument #8       32bit argument #7
+       A0FrP-0x10      32bit argument #10      32bit argument #9
+
+Function epilogues tend to differ depending on the use of a frame pointer. An
+example of a frame pointer epilogue:
+
+       /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack, incrementing A0FrP */
+       MGETL   D0FrT,D0.5,D0.6,D0.7,[A0FrP++]
+       /* Restore stack pointer to where frame pointer was before increment */
+       SUB     A0StP,A0FrP,#0x20
+       /* Restore frame pointer from frame temp */
+       MOV     A0FrP,D0FrT
+       /* Return to caller via restored return pointer */
+       MOV     PC,D1RtP
+
+If the function hasn't touched the frame pointer, MGETL cannot be safely used
+with A0StP as it always increments and that would expose the stack to clobbering
+by interrupts (kernel) or signals (user). Therefore it's common to see the MGETL
+split into separate GETL instructions:
+
+       /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack */
+       GETL    D0FrT,D1RtP,[A0StP+#-0x30]
+       GETL    D0.5,D1.5,[A0StP+#-0x28]
+       GETL    D0.6,D1.6,[A0StP+#-0x20]
+       GETL    D0.7,D1.7,[A0StP+#-0x18]
+       /* Restore stack pointer */
+       SUB     A0StP,A0StP,#0x30
+       /* Return to caller via restored return pointer */
+       MOV     PC,D1RtP
index c0aab98..949d5dc 100644 (file)
@@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
      Proto [2 bytes]
      Raw protocol(IP, IPv6, etc) frame.
 
+  3.3 Multiqueue tuntap interface:
+
+  From version 3.8, Linux supports multiqueue tuntap which can uses multiple
+  file descriptors (queues) to parallelize packets sending or receiving. The
+  device allocation is the same as before, and if user wants to create multiple
+  queues, TUNSETIFF with the same device name must be called many times with
+  IFF_MULTI_QUEUE flag.
+
+  char *dev should be the name of the device, queues is the number of queues to
+  be created, fds is used to store and return the file descriptors (queues)
+  created to the caller. Each file descriptor were served as the interface of a
+  queue which could be accessed by userspace.
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_alloc_mq(char *dev, int queues, int *fds)
+  {
+      struct ifreq ifr;
+      int fd, err, i;
+
+      if (!dev)
+          return -1;
+
+      memset(&ifr, 0, sizeof(ifr));
+      /* Flags: IFF_TUN   - TUN device (no Ethernet headers)
+       *        IFF_TAP   - TAP device
+       *
+       *        IFF_NO_PI - Do not provide packet information
+       *        IFF_MULTI_QUEUE - Create a queue of multiqueue device
+       */
+      ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
+      strcpy(ifr.ifr_name, dev);
+
+      for (i = 0; i < queues; i++) {
+          if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
+             goto err;
+          err = ioctl(fd, TUNSETIFF, (void *)&ifr);
+          if (err) {
+             close(fd);
+             goto err;
+          }
+          fds[i] = fd;
+      }
+
+      return 0;
+  err:
+      for (--i; i >= 0; i--)
+          close(fds[i]);
+      return err;
+  }
+
+  A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
+  calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
+  calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
+  enabled by default after it was created through TUNSETIFF.
+
+  fd is the file descriptor (queue) that we want to enable or disable, when
+  enable is true we enable it, otherwise we disable it
+
+  #include <linux/if.h>
+  #include <linux/if_tun.h>
+
+  int tun_set_queue(int fd, int enable)
+  {
+      struct ifreq ifr;
+
+      memset(&ifr, 0, sizeof(ifr));
+
+      if (enable)
+         ifr.ifr_flags = IFF_ATTACH_QUEUE;
+      else
+         ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+      return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
+  }
+
 Universal TUN/TAP device driver Frequently Asked Question.
    
 1. What platforms are supported by TUN/TAP driver ?
index da03146..09673c7 100644 (file)
@@ -1,3 +1,12 @@
+Release Date    : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version     : 06.504.01.00-rc1
+    1. Add 4k FastPath DIF support.
+    2. Dont load DevHandle unless FastPath enabled.
+    3. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Mon. Oct 1, 2012 17:00:00 PST 2012 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
diff --git a/Documentation/thermal/exynos_thermal_emulation b/Documentation/thermal/exynos_thermal_emulation
new file mode 100644 (file)
index 0000000..b73bbfb
--- /dev/null
@@ -0,0 +1,53 @@
+EXYNOS EMULATION MODE
+========================
+
+Copyright (C) 2012 Samsung Electronics
+
+Written by Jonghwa Lee <jonghwa3.lee@samsung.com>
+
+Description
+-----------
+
+Exynos 4x12 (4212, 4412) and 5 series provide emulation mode for thermal management unit.
+Thermal emulation mode supports software debug for TMU's operation. User can set temperature
+manually with software code and TMU will read current temperature from user value not from
+sensor's value.
+
+Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
+When it's enabled, sysfs node will be created under
+/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
+
+The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
+temperature you want to update to sysfs node, it automatically enable emulation mode and
+current temperature will be changed into it.
+(Exynos also supports user changable delay time which would be used to delay of
+ changing temperature. However, this node only uses same delay of real sensing time, 938us.)
+
+Exynos emulation mode requires synchronous of value changing and enabling. It means when you
+want to update the any value of delay or next temperature, then you have to enable emulation
+mode at the same time. (Or you have to keep the mode enabling.) If you don't, it fails to
+change the value to updated one and just use last succeessful value repeatedly. That's why
+this node gives users the right to change termerpature only. Just one interface makes it more
+simply to use.
+
+Disabling emulation mode only requires writing value 0 to sysfs node.
+
+
+TEMP   120 |
+           |
+       100 |
+           |
+        80 |
+           |                            +-----------
+        60 |                            |          |
+           |              +-------------|          |
+        40 |              |             |          |
+           |              |             |          |
+        20 |              |             |          +----------
+           |              |             |          |          |
+         0 |______________|_____________|__________|__________|_________
+                  A             A          A                  A     TIME
+                  |<----->|     |<----->|  |<----->|          |
+                  | 938us |     |       |  |       |          |
+emulation    :  0  50     |     70      |  20      |          0
+current temp :   sensor   50            70         20        sensor
diff --git a/Documentation/thermal/intel_powerclamp.txt b/Documentation/thermal/intel_powerclamp.txt
new file mode 100644 (file)
index 0000000..332de4a
--- /dev/null
@@ -0,0 +1,307 @@
+                        =======================
+                        INTEL POWERCLAMP DRIVER
+                        =======================
+By: Arjan van de Ven <arjan@linux.intel.com>
+    Jacob Pan <jacob.jun.pan@linux.intel.com>
+
+Contents:
+       (*) Introduction
+           - Goals and Objectives
+
+       (*) Theory of Operation
+           - Idle Injection
+           - Calibration
+
+       (*) Performance Analysis
+           - Effectiveness and Limitations
+           - Power vs Performance
+           - Scalability
+           - Calibration
+           - Comparison with Alternative Techniques
+
+       (*) Usage and Interfaces
+           - Generic Thermal Layer (sysfs)
+           - Kernel APIs (TBD)
+
+============
+INTRODUCTION
+============
+
+Consider the situation where a system’s power consumption must be
+reduced at runtime, due to power budget, thermal constraint, or noise
+level, and where active cooling is not preferred. Software managed
+passive power reduction must be performed to prevent the hardware
+actions that are designed for catastrophic scenarios.
+
+Currently, P-states, T-states (clock modulation), and CPU offlining
+are used for CPU throttling.
+
+On Intel CPUs, C-states provide effective power reduction, but so far
+they’re only used opportunistically, based on workload. With the
+development of intel_powerclamp driver, the method of synchronizing
+idle injection across all online CPU threads was introduced. The goal
+is to achieve forced and controllable C-state residency.
+
+Test/Analysis has been made in the areas of power, performance,
+scalability, and user experience. In many cases, clear advantage is
+shown over taking the CPU offline or modulating the CPU clock.
+
+
+===================
+THEORY OF OPERATION
+===================
+
+Idle Injection
+--------------
+
+On modern Intel processors (Nehalem or later), package level C-state
+residency is available in MSRs, thus also available to the kernel.
+
+These MSRs are:
+      #define MSR_PKG_C2_RESIDENCY     0x60D
+      #define MSR_PKG_C3_RESIDENCY     0x3F8
+      #define MSR_PKG_C6_RESIDENCY     0x3F9
+      #define MSR_PKG_C7_RESIDENCY     0x3FA
+
+If the kernel can also inject idle time to the system, then a
+closed-loop control system can be established that manages package
+level C-state. The intel_powerclamp driver is conceived as such a
+control system, where the target set point is a user-selected idle
+ratio (based on power reduction), and the error is the difference
+between the actual package level C-state residency ratio and the target idle
+ratio.
+
+Injection is controlled by high priority kernel threads, spawned for
+each online CPU.
+
+These kernel threads, with SCHED_FIFO class, are created to perform
+clamping actions of controlled duty ratio and duration. Each per-CPU
+thread synchronizes its idle time and duration, based on the rounding
+of jiffies, so accumulated errors can be prevented to avoid a jittery
+effect. Threads are also bound to the CPU such that they cannot be
+migrated, unless the CPU is taken offline. In this case, threads
+belong to the offlined CPUs will be terminated immediately.
+
+Running as SCHED_FIFO and relatively high priority, also allows such
+scheme to work for both preemptable and non-preemptable kernels.
+Alignment of idle time around jiffies ensures scalability for HZ
+values. This effect can be better visualized using a Perf timechart.
+The following diagram shows the behavior of kernel thread
+kidle_inject/cpu. During idle injection, it runs monitor/mwait idle
+for a given "duration", then relinquishes the CPU to other tasks,
+until the next time interval.
+
+The NOHZ schedule tick is disabled during idle time, but interrupts
+are not masked. Tests show that the extra wakeups from scheduler tick
+have a dramatic impact on the effectiveness of the powerclamp driver
+on large scale systems (Westmere system with 80 processors).
+
+CPU0
+                 ____________          ____________
+kidle_inject/0   |   sleep    |  mwait |  sleep     |
+       _________|            |________|            |_______
+                              duration
+CPU1
+                 ____________          ____________
+kidle_inject/1   |   sleep    |  mwait |  sleep     |
+       _________|            |________|            |_______
+                             ^
+                             |
+                             |
+                             roundup(jiffies, interval)
+
+Only one CPU is allowed to collect statistics and update global
+control parameters. This CPU is referred to as the controlling CPU in
+this document. The controlling CPU is elected at runtime, with a
+policy that favors BSP, taking into account the possibility of a CPU
+hot-plug.
+
+In terms of dynamics of the idle control system, package level idle
+time is considered largely as a non-causal system where its behavior
+cannot be based on the past or current input. Therefore, the
+intel_powerclamp driver attempts to enforce the desired idle time
+instantly as given input (target idle ratio). After injection,
+powerclamp moniors the actual idle for a given time window and adjust
+the next injection accordingly to avoid over/under correction.
+
+When used in a causal control system, such as a temperature control,
+it is up to the user of this driver to implement algorithms where
+past samples and outputs are included in the feedback. For example, a
+PID-based thermal controller can use the powerclamp driver to
+maintain a desired target temperature, based on integral and
+derivative gains of the past samples.
+
+
+
+Calibration
+-----------
+During scalability testing, it is observed that synchronized actions
+among CPUs become challenging as the number of cores grows. This is
+also true for the ability of a system to enter package level C-states.
+
+To make sure the intel_powerclamp driver scales well, online
+calibration is implemented. The goals for doing such a calibration
+are:
+
+a) determine the effective range of idle injection ratio
+b) determine the amount of compensation needed at each target ratio
+
+Compensation to each target ratio consists of two parts:
+
+        a) steady state error compensation
+       This is to offset the error occurring when the system can
+       enter idle without extra wakeups (such as external interrupts).
+
+       b) dynamic error compensation
+       When an excessive amount of wakeups occurs during idle, an
+       additional idle ratio can be added to quiet interrupts, by
+       slowing down CPU activities.
+
+A debugfs file is provided for the user to examine compensation
+progress and results, such as on a Westmere system.
+[jacob@nex01 ~]$ cat
+/sys/kernel/debug/intel_powerclamp/powerclamp_calib
+controlling cpu: 0
+pct confidence steady dynamic (compensation)
+0      0       0       0
+1      1       0       0
+2      1       1       0
+3      3       1       0
+4      3       1       0
+5      3       1       0
+6      3       1       0
+7      3       1       0
+8      3       1       0
+...
+30     3       2       0
+31     3       2       0
+32     3       1       0
+33     3       2       0
+34     3       1       0
+35     3       2       0
+36     3       1       0
+37     3       2       0
+38     3       1       0
+39     3       2       0
+40     3       3       0
+41     3       1       0
+42     3       2       0
+43     3       1       0
+44     3       1       0
+45     3       2       0
+46     3       3       0
+47     3       0       0
+48     3       2       0
+49     3       3       0
+
+Calibration occurs during runtime. No offline method is available.
+Steady state compensation is used only when confidence levels of all
+adjacent ratios have reached satisfactory level. A confidence level
+is accumulated based on clean data collected at runtime. Data
+collected during a period without extra interrupts is considered
+clean.
+
+To compensate for excessive amounts of wakeup during idle, additional
+idle time is injected when such a condition is detected. Currently,
+we have a simple algorithm to double the injection ratio. A possible
+enhancement might be to throttle the offending IRQ, such as delaying
+EOI for level triggered interrupts. But it is a challenge to be
+non-intrusive to the scheduler or the IRQ core code.
+
+
+CPU Online/Offline
+------------------
+Per-CPU kernel threads are started/stopped upon receiving
+notifications of CPU hotplug activities. The intel_powerclamp driver
+keeps track of clamping kernel threads, even after they are migrated
+to other CPUs, after a CPU offline event.
+
+
+=====================
+Performance Analysis
+=====================
+This section describes the general performance data collected on
+multiple systems, including Westmere (80P) and Ivy Bridge (4P, 8P).
+
+Effectiveness and Limitations
+-----------------------------
+The maximum range that idle injection is allowed is capped at 50
+percent. As mentioned earlier, since interrupts are allowed during
+forced idle time, excessive interrupts could result in less
+effectiveness. The extreme case would be doing a ping -f to generated
+flooded network interrupts without much CPU acknowledgement. In this
+case, little can be done from the idle injection threads. In most
+normal cases, such as scp a large file, applications can be throttled
+by the powerclamp driver, since slowing down the CPU also slows down
+network protocol processing, which in turn reduces interrupts.
+
+When control parameters change at runtime by the controlling CPU, it
+may take an additional period for the rest of the CPUs to catch up
+with the changes. During this time, idle injection is out of sync,
+thus not able to enter package C- states at the expected ratio. But
+this effect is minor, in that in most cases change to the target
+ratio is updated much less frequently than the idle injection
+frequency.
+
+Scalability
+-----------
+Tests also show a minor, but measurable, difference between the 4P/8P
+Ivy Bridge system and the 80P Westmere server under 50% idle ratio.
+More compensation is needed on Westmere for the same amount of
+target idle ratio. The compensation also increases as the idle ratio
+gets larger. The above reason constitutes the need for the
+calibration code.
+
+On the IVB 8P system, compared to an offline CPU, powerclamp can
+achieve up to 40% better performance per watt. (measured by a spin
+counter summed over per CPU counting threads spawned for all running
+CPUs).
+
+====================
+Usage and Interfaces
+====================
+The powerclamp driver is registered to the generic thermal layer as a
+cooling device. Currently, it’s not bound to any thermal zones.
+
+jacob@chromoly:/sys/class/thermal/cooling_device14$ grep . *
+cur_state:0
+max_state:50
+type:intel_powerclamp
+
+Example usage:
+- To inject 25% idle time
+$ sudo sh -c "echo 25 > /sys/class/thermal/cooling_device80/cur_state
+"
+
+If the system is not busy and has more than 25% idle time already,
+then the powerclamp driver will not start idle injection. Using Top
+will not show idle injection kernel threads.
+
+If the system is busy (spin test below) and has less than 25% natural
+idle time, powerclamp kernel threads will do idle injection, which
+appear running to the scheduler. But the overall system idle is still
+reflected. In this example, 24.1% idle is shown. This helps the
+system admin or user determine the cause of slowdown, when a
+powerclamp driver is in action.
+
+
+Tasks: 197 total,   1 running, 196 sleeping,   0 stopped,   0 zombie
+Cpu(s): 71.2%us,  4.7%sy,  0.0%ni, 24.1%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+Mem:   3943228k total,  1689632k used,  2253596k free,    74960k buffers
+Swap:  4087804k total,        0k used,  4087804k free,   945336k cached
+
+  PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+ 3352 jacob     20   0  262m  644  428 S  286  0.0   0:17.16 spin
+ 3341 root     -51   0     0    0    0 D   25  0.0   0:01.62 kidle_inject/0
+ 3344 root     -51   0     0    0    0 D   25  0.0   0:01.60 kidle_inject/3
+ 3342 root     -51   0     0    0    0 D   25  0.0   0:01.61 kidle_inject/1
+ 3343 root     -51   0     0    0    0 D   25  0.0   0:01.60 kidle_inject/2
+ 2935 jacob     20   0  696m 125m  35m S    5  3.3   0:31.11 firefox
+ 1546 root      20   0  158m  20m 6640 S    3  0.5   0:26.97 Xorg
+ 2100 jacob     20   0 1223m  88m  30m S    3  2.3   0:23.68 compiz
+
+Tests have shown that by using the powerclamp driver as a cooling
+device, a PID based userspace thermal controller can manage to
+control CPU temperature effectively, when no other thermal influence
+is added. For example, a UltraBook user can compile the kernel under
+certain temperature (below most active trip points).
index 88c0233..6859661 100644 (file)
@@ -55,6 +55,8 @@ temperature) and throttle appropriate devices.
        .get_trip_type: get the type of certain trip point.
        .get_trip_temp: get the temperature above which the certain trip point
                        will be fired.
+       .set_emul_temp: set the emulation temperature which helps in debugging
+                       different threshold temperature points.
 
 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
@@ -153,6 +155,7 @@ Thermal zone device sys I/F, created once it's registered:
     |---trip_point_[0-*]_temp: Trip point temperature
     |---trip_point_[0-*]_type: Trip point type
     |---trip_point_[0-*]_hyst: Hysteresis value for this trip point
+    |---emul_temp:             Emulated temperature set node
 
 Thermal cooling device sys I/F, created once it's registered:
 /sys/class/thermal/cooling_device[0-*]:
@@ -252,6 +255,16 @@ passive
        Valid values: 0 (disabled) or greater than 1000
        RW, Optional
 
+emul_temp
+       Interface to set the emulated temperature method in thermal zone
+       (sensor). After setting this temperature, the thermal zone may pass
+       this temperature to platform emulation function if registered or
+       cache it locally. This is useful in debugging different temperature
+       threshold and its associated cooling action. This is write only node
+       and writing 0 on this node should disable emulation.
+       Unit: millidegree Celsius
+       WO, Optional
+
 *****************************
 * Cooling device attributes *
 *****************************
@@ -329,8 +342,9 @@ The framework includes a simple notification mechanism, in the form of a
 netlink event. Netlink socket initialization is done during the _init_
 of the framework. Drivers which intend to use the notification mechanism
 just need to call thermal_generate_netlink_event() with two arguments viz
-(originator, event). Typically the originator will be an integer assigned
-to a thermal_zone_device when it registers itself with the framework. The
+(originator, event). The originator is a pointer to struct thermal_zone_device
+from where the event has been originated. An integer which represents the
+thermal zone device will be used in the message to identify the zone. The
 event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
 THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
 crosses any of the configured thresholds.
index 086638f..a0438f3 100644 (file)
@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 22-May-2012
+Last reviewed: 12-Feb-2013
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -212,3 +212,15 @@ driver specific data to and a pointer to the data itself.
 The watchdog_get_drvdata function allows you to retrieve driver specific data.
 The argument of this function is the watchdog device where you want to retrieve
 data from. The function returns the pointer to the driver specific data.
+
+To initialize the timeout field, the following function can be used:
+
+extern int watchdog_init_timeout(struct watchdog_device *wdd,
+                                  unsigned int timeout_parm, struct device *dev);
+
+The watchdog_init_timeout function allows you to initialize the timeout field
+using the module timeout parameter or by retrieving the timeout-sec property from
+the device tree (if the module timeout parameter is invalid). Best practice is
+to set the default timeout value as timeout value in the watchdog_device and
+then use this function to set the user "preferred" timeout value.
+This routine returns zero on success and a negative errno code for failure.
index 1e5c3a4..9561658 100644 (file)
@@ -97,12 +97,13 @@ Descriptions of section entries:
           X:   net/ipv6/
           matches all files in and below net excluding net/ipv6/
        K: Keyword perl extended regex pattern to match content in a
-          patch or file.  For instance:
+          patch or file, or an affected filename.  For instance:
           K: of_get_profile
-             matches patches or files that contain "of_get_profile"
+             matches patch or file content, or filenames, that contain
+             "of_get_profile"
           K: \b(printk|pr_(info|err))\b
-             matches patches or files that contain one or more of the words
-             printk, pr_info or pr_err
+             matches patch or file content, or filenames, that contain one or
+             more of the words printk, pr_info or pr_err
           One regex pattern per line.  Multiple K: lines acceptable.
 
 Note: For the hard of thinking, this list is meant to remain in alphabetical
@@ -113,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
 
                -----------------------------------
 
-3C505 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/3c505*
-
 3C59X NETWORK DRIVER
 M:     Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
 L:     netdev@vger.kernel.org
@@ -1799,7 +1794,8 @@ F:        drivers/bcma/
 F:     include/linux/bcma/
 
 BROCADE BFA FC SCSI DRIVER
-M:     Krishna C Gudipati <kgudipat@brocade.com>
+M:     Anil Gurumurthy <agurumur@brocade.com>
+M:     Vijaya Mohan Guvva <vmohan@brocade.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/bfa/
@@ -2073,8 +2069,8 @@ S:        Maintained
 F:     include/linux/clk.h
 
 CISCO FCOE HBA DRIVER
-M:     Abhijeet Joglekar <abjoglek@cisco.com>
-M:     Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
+M:     Hiral Patel <hiralpat@cisco.com>
+M:     Suma Ramars <sramars@cisco.com>
 M:     Brian Uchino <buchino@cisco.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
@@ -2359,12 +2355,6 @@ W:       http://www.arm.linux.org.uk/
 S:     Maintained
 F:     drivers/video/cyber2000fb.*
 
-CYCLADES 2X SYNC CARD DRIVER
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-W:     http://oops.ghostprotocols.net:81/blog
-S:     Maintained
-F:     drivers/net/wan/cycx*
-
 CYCLADES ASYNC MUX DRIVER
 W:     http://www.cyclades.com/
 S:     Orphan
@@ -2902,6 +2892,13 @@ W:       bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/e7xxx_edac.c
 
+EDAC-GHES
+M:     Mauro Carvalho Chehab <mchehab@redhat.com>
+L:     linux-edac@vger.kernel.org
+W:     bluesmoke.sourceforge.net
+S:     Maintained
+F:     drivers/edac/ghes-edac.c
+
 EDAC-I82443BXGX
 M:     Tim Small <tim@buttersideup.com>
 L:     linux-edac@vger.kernel.org
@@ -3058,12 +3055,6 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     drivers/video/s1d13xxxfb.c
 F:     include/video/s1d13xxxfb.h
 
-ETHEREXPRESS-16 NETWORK DRIVER
-M:     Philip Blundell <philb@gnu.org>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/ethernet/i825xx/eexpress.*
-
 ETHERNET BRIDGE
 M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     bridge@lists.linux-foundation.org
@@ -5195,6 +5186,18 @@ F:       drivers/mtd/
 F:     include/linux/mtd/
 F:     include/uapi/mtd/
 
+METAG ARCHITECTURE
+M:     James Hogan <james.hogan@imgtec.com>
+S:     Supported
+F:     arch/metag/
+F:     Documentation/metag/
+F:     Documentation/devicetree/bindings/metag/
+F:     drivers/clocksource/metag_generic.c
+F:     drivers/irqchip/irq-metag.c
+F:     drivers/irqchip/irq-metag-ext.c
+F:     drivers/tty/metag_da.c
+F:     fs/imgdafs/
+
 MICROBLAZE ARCHITECTURE
 M:     Michal Simek <monstr@monstr.eu>
 L:     microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers)
@@ -5437,6 +5440,7 @@ F:        net/netrom/
 NETWORK BLOCK DEVICE (NBD)
 M:     Paul Clements <Paul.Clements@steeleye.com>
 S:     Maintained
+L:     nbd-general@lists.sourceforge.net
 F:     Documentation/blockdev/nbd.txt
 F:     drivers/block/nbd.c
 F:     include/linux/nbd.h
@@ -6512,6 +6516,12 @@ S:       Maintained
 F:     Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
+RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
+M:     Joshua Morris <josh.h.morris@us.ibm.com>
+M:     Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+S:     Maintained
+F:     drivers/block/rsxx/
+
 RANDOM NUMBER DRIVER
 M:     Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
@@ -7539,6 +7549,7 @@ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
 M:     Julian Andres Klode <jak@jak-linux.org>
 M:     Marc Dietrich <marvin24@gmx.de>
 L:     ac100@lists.launchpad.net (moderated for non-subscribers)
+L:     linux-tegra@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/nvec/
 
@@ -7665,6 +7676,12 @@ F:       lib/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
+SYNOPSYS ARC ARCHITECTURE
+M:     Vineet Gupta <vgupta@synopsys.com>
+L:     linux-snps-arc@vger.kernel.org
+S:     Supported
+F:     arch/arc/
+
 SYSV FILESYSTEM
 M:     Christoph Hellwig <hch@infradead.org>
 S:     Maintained
@@ -7831,9 +7848,7 @@ L:        linux-tegra@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-tegra/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
 S:     Supported
-F:     arch/arm/mach-tegra
-F:     arch/arm/boot/dts/tegra*
-F:     arch/arm/configs/tegra_defconfig
+K:     (?i)[^a-z]tegra
 
 TEHUTI ETHERNET DRIVER
 M:     Andy Gospodarek <andy@greyhouse.net>
index 2309b43..5bd9f77 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 8
+PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
@@ -192,7 +192,6 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
 # "make" in the configured kernel build directory always uses that.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-export KBUILD_BUILDHOST := $(SUBARCH)
 ARCH           ?= $(SUBARCH)
 CROSS_COMPILE  ?= $(CONFIG_CROSS_COMPILE:"%"=%)
 
@@ -620,7 +619,8 @@ KBUILD_AFLAGS       += -gdwarf-2
 endif
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
-KBUILD_CFLAGS  += $(call cc-option, -femit-struct-debug-baseonly)
+KBUILD_CFLAGS  += $(call cc-option, -femit-struct-debug-baseonly) \
+                  $(call cc-option,-fno-var-tracking)
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -1398,7 +1398,7 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files))
 # Run depmod only if we have System.map and depmod is executable
 quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
       cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
-                   $(KERNELRELEASE)
+                   $(KERNELRELEASE) "$(patsubst "%",%,$(CONFIG_SYMBOL_PREFIX))"
 
 # Create temporary dir for module support files
 # clean it up only when building all modules
index 40e2b12..5a1779c 100644 (file)
@@ -103,6 +103,22 @@ config UPROBES
 
          If in doubt, say "N".
 
+config HAVE_64BIT_ALIGNED_ACCESS
+       def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
+       help
+         Some architectures require 64 bit accesses to be 64 bit
+         aligned, which also requires structs containing 64 bit values
+         to be 64 bit aligned too. This includes some 32 bit
+         architectures which can do 64 bit accesses, as well as 64 bit
+         architectures without unaligned access.
+
+         This symbol should be selected by an architecture if 64 bit
+         accesses are required to be 64 bit aligned in this way even
+         though it is not a 64 bit architecture.
+
+         See Documentation/unaligned-memory-access.txt for more
+         information on the topic of unaligned memory accesses.
+
 config HAVE_EFFICIENT_UNALIGNED_ACCESS
        bool
        help
@@ -303,6 +319,13 @@ config ARCH_WANT_OLD_COMPAT_IPC
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        bool
 
+config HAVE_VIRT_TO_BUS
+       bool
+       help
+         An architecture should select this if it implements the
+         deprecated interface virt_to_bus().  All new architectures
+         should probably not select this.
+
 config HAVE_ARCH_SECCOMP_FILTER
        bool
        help
index 1ecbf7a..5833aa4 100644 (file)
@@ -9,6 +9,7 @@ config ALPHA
        select HAVE_PERF_EVENTS
        select HAVE_DMA_ATTRS
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select AUTO_IRQ_AFFINITY if SMP
        select GENERIC_IRQ_SHOW
index b9fc6c3..e64559f 100644 (file)
@@ -111,7 +111,7 @@ static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
                                  size_t count, loff_t *pos)
 {
        int res;
-       srm_env_t       *entry = PDE(file->f_path.dentry->d_inode)->data;
+       srm_env_t       *entry = PDE(file_inode(file))->data;
        char            *buf = (char *) __get_free_page(GFP_USER);
        unsigned long   ret1, ret2;
 
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644 (file)
index 0000000..082d329
--- /dev/null
@@ -0,0 +1,2 @@
+obj-y += kernel/
+obj-y += mm/
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644 (file)
index 0000000..e6f4eca
--- /dev/null
@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+config ARC
+       def_bool y
+       select CLONE_BACKWARDS
+       # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
+       select DEVTMPFS if !INITRAMFS_SOURCE=""
+       select GENERIC_ATOMIC64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_FIND_FIRST_BIT
+       # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+       select GENERIC_IRQ_SHOW
+       select GENERIC_KERNEL_EXECVE
+       select GENERIC_KERNEL_THREAD
+       select GENERIC_PENDING_IRQ if SMP
+       select GENERIC_SMP_IDLE_THREAD
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_GENERIC_HARDIRQS
+       select HAVE_IOREMAP_PROT
+       select HAVE_KPROBES
+       select HAVE_KRETPROBES
+       select HAVE_MEMBLOCK
+       select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+       select HAVE_OPROFILE
+       select HAVE_PERF_EVENTS
+       select IRQ_DOMAIN
+       select MODULES_USE_ELF_RELA
+       select NO_BOOTMEM
+       select OF
+       select OF_EARLY_FLATTREE
+       select PERF_USE_VMALLOC
+
+config SCHED_OMIT_FRAME_POINTER
+       def_bool y
+
+config GENERIC_CSUM
+       def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+       def_bool y
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+
+config MMU
+       def_bool y
+
+config NO_IOPORT
+       def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+       def_bool y
+
+config GENERIC_HWEIGHT
+       def_bool y
+
+config BINFMT_ELF
+       def_bool y
+
+config STACKTRACE_SUPPORT
+       def_bool y
+       select STACKTRACE
+
+config HAVE_LATENCYTOP_SUPPORT
+       def_bool y
+
+config NO_DMA
+       def_bool n
+
+source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-arcfpga/Kconfig"
+#New platform adds here
+
+endmenu
+
+menu "ARC CPU Configuration"
+
+choice
+       prompt "ARC Core"
+       default ARC_CPU_770
+
+config ARC_CPU_750D
+       bool "ARC750D"
+       help
+         Support for ARC750 core
+
+config ARC_CPU_770
+       bool "ARC770"
+       select ARC_CPU_REL_4_10
+       help
+         Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+         This core has a bunch of cool new features:
+         -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+                   Shared Address Spaces (for sharing TLB entires in MMU)
+         -Caches: New Prog Model, Region Flush
+         -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+endchoice
+
+config CPU_BIG_ENDIAN
+       bool "Enable Big Endian Mode"
+       default n
+       help
+         Build kernel for Big Endian Mode of ARC CPU
+
+# If a platform can't work with 0x8000_0000 based dma_addr_t
+config ARC_PLAT_NEEDS_CPU_TO_DMA
+       bool
+
+config SMP
+       bool "Symmetric Multi-Processing (Incomplete)"
+       default n
+       select USE_GENERIC_SMP_HELPERS
+       help
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, like most personal computers, say N. If
+         you have a system with more than one CPU, say Y.
+
+if SMP
+
+config ARC_HAS_COH_CACHES
+       def_bool n
+
+config ARC_HAS_COH_LLSC
+       def_bool n
+
+config ARC_HAS_COH_RTSC
+       def_bool n
+
+config ARC_HAS_REENTRANT_IRQ_LV2
+       def_bool n
+
+endif
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-32)"
+       range 2 32
+       depends on SMP
+       default "2"
+
+menuconfig ARC_CACHE
+       bool "Enable Cache Support"
+       default y
+       # if SMP, cache enabled ONLY if ARC implementation has cache coherency
+       depends on !SMP || ARC_HAS_COH_CACHES
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+       int "Cache Line Length (as power of 2)"
+       range 5 7
+       default "6"
+       help
+         Starting with ARC700 4.9, Cache line length is configurable,
+         This option specifies "N", with Line-len = 2 power N
+         So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+         Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+       bool "Use Instruction Cache"
+       default y
+
+config ARC_HAS_DCACHE
+       bool "Use Data Cache"
+       default y
+
+config ARC_CACHE_PAGES
+       bool "Per Page Cache Control"
+       default y
+       depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+       help
+         This can be used to over-ride the global I/D Cache Enable on a
+         per-page basis (but only for pages accessed via MMU such as
+         Kernel Virtual address or User Virtual Address)
+         TLB entries have a per-page Cache Enable Bit.
+         Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+         Global DISABLE + Per Page ENABLE won't work
+
+endif  #ARC_CACHE
+
+config ARC_HAS_ICCM
+       bool "Use ICCM"
+       help
+         Single Cycle RAMS to store Fast Path Code
+       default n
+
+config ARC_ICCM_SZ
+       int "ICCM Size in KB"
+       default "64"
+       depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+       bool "Use DCCM"
+       help
+         Single Cycle RAMS to store Fast Path Data
+       default n
+
+config ARC_DCCM_SZ
+       int "DCCM Size in KB"
+       default "64"
+       depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+       hex "DCCM map address"
+       default "0xA0000000"
+       depends on ARC_HAS_DCCM
+
+config ARC_HAS_HW_MPY
+       bool "Use Hardware Multiplier (Normal or Faster XMAC)"
+       default y
+       help
+         Influences how gcc generates code for MPY operations.
+         If enabled, MPYxx insns are generated, provided by Standard/XMAC
+         Multipler. Otherwise software multipy lib is used
+
+choice
+       prompt "ARC700 MMU Version"
+       default ARC_MMU_V3 if ARC_CPU_770
+       default ARC_MMU_V2 if ARC_CPU_750D
+
+config ARC_MMU_V1
+       bool "MMU v1"
+       help
+         Orig ARC700 MMU
+
+config ARC_MMU_V2
+       bool "MMU v2"
+       help
+         Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+         when 2 D-TLB and 1 I-TLB entries index into same 2way set.
+
+config ARC_MMU_V3
+       bool "MMU v3"
+       depends on ARC_CPU_770
+       help
+         Introduced with ARC700 4.10: New Features
+         Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+         Shared Address Spaces (SASID)
+
+endchoice
+
+
+choice
+       prompt "MMU Page Size"
+       default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+       bool "8KB"
+       help
+         Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+       bool "16KB"
+       depends on ARC_MMU_V3
+
+config ARC_PAGE_SIZE_4K
+       bool "4KB"
+       depends on ARC_MMU_V3
+
+endchoice
+
+config ARC_COMPACT_IRQ_LEVELS
+       bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+       default n
+       # Timer HAS to be high priority, for any other high priority config
+       select ARC_IRQ3_LV2
+       # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+       depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
+
+if ARC_COMPACT_IRQ_LEVELS
+
+config ARC_IRQ3_LV2
+       bool
+
+config ARC_IRQ5_LV2
+       bool
+
+config ARC_IRQ6_LV2
+       bool
+
+endif
+
+config ARC_FPU_SAVE_RESTORE
+       bool "Enable FPU state persistence across context switch"
+       default n
+       help
+         Double Precision Floating Point unit had dedictaed regs which
+         need to be saved/restored across context-switch.
+         Note that ARC FPU is overly simplistic, unlike say x86, which has
+         hardware pieces to allow software to conditionally save/restore,
+         based on actual usage of FPU by a task. Thus our implemn does
+         this for all tasks in system.
+
+menuconfig ARC_CPU_REL_4_10
+       bool "Enable support for Rel 4.10 features"
+       default n
+       help
+         -ARC770 (and dependent features) enabled
+         -ARC750 also shares some of the new features with 770
+
+config ARC_HAS_LLSC
+       bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+       default y
+       depends on ARC_CPU_770
+       # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
+       depends on !SMP || ARC_HAS_COH_LLSC
+
+config ARC_HAS_SWAPE
+       bool "Insn: SWAPE (endian-swap)"
+       default y
+       depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTSC
+       bool "Insn: RTSC (64-bit r/o cycle counter)"
+       default y
+       depends on ARC_CPU_REL_4_10
+       # if SMP, enable RTSC only if counter is coherent across cores
+       depends on !SMP || ARC_HAS_COH_RTSC
+
+endmenu   # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+       hex "Linux Link Address"
+       default "0x80000000"
+       help
+         ARC700 divides the 32 bit phy address space into two equal halves
+         -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+         -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+         Typically Linux kernel is linked at the start of untransalted addr,
+         hence the default value of 0x8zs.
+         However some customers have peripherals mapped at this addr, so
+         Linux needs to be scooted a bit.
+         If you don't know what the above means, leave this setting alone.
+
+config ARC_CURR_IN_REG
+       bool "Dedicate Register r25 for current_task pointer"
+       default y
+       help
+         This reserved Register R25 to point to Current Task in
+         kernel mode. This saves memory access for each such access
+
+
+config ARC_MISALIGN_ACCESS
+       bool "Emulate unaligned memory access (userspace only)"
+       default N
+       select SYSCTL_ARCH_UNALIGN_NO_WARN
+       select SYSCTL_ARCH_UNALIGN_ALLOW
+       help
+         This enables misaligned 16 & 32 bit memory access from user space.
+         Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+         potential bugs in code
+
+config ARC_STACK_NONEXEC
+       bool "Make stack non-executable"
+       default n
+       help
+         To disable the execute permissions of stack/heap of processes
+         which are enabled by default.
+
+config HZ
+       int "Timer Frequency"
+       default 100
+
+config ARC_METAWARE_HLINK
+       bool "Support for Metaware debugger assisted Host access"
+       default n
+       help
+         This options allows a Linux userland apps to directly access
+         host file system (open/creat/read/write etc) with help from
+         Metaware Debugger. This can come in handy for Linux-host communication
+         when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+       bool "ARC debugging"
+       default y
+
+config ARC_DW2_UNWIND
+       bool "Enable DWARF specific kernel stack unwind"
+       depends on ARC_DBG
+       default y
+       select KALLSYMS
+       help
+         Compiles the kernel with DWARF unwind information and can be used
+         to get stack backtraces.
+
+         If you say Y here the resulting kernel image will be slightly larger
+         but not slower, and it will give very useful debugging information.
+         If you don't debug the kernel, you can say N, but we may not be able
+         to solve problems without frame unwind information
+
+config ARC_DBG_TLB_PARANOIA
+       bool "Paranoia Checks in Low Level TLB Handlers"
+       depends on ARC_DBG
+       default n
+
+config ARC_DBG_TLB_MISS_COUNT
+       bool "Profile TLB Misses"
+       default n
+       select DEBUG_FS
+       depends on ARC_DBG
+       help
+         Counts number of I and D TLB Misses and exports them via Debugfs
+         The counters can be cleared via Debugfs as well
+
+config CMDLINE
+       string "Kernel command line to built-in"
+       default "print-fatal-signals=1"
+       help
+         The default command line which will be appended to the optional
+         u-boot provided command line (see below)
+
+config CMDLINE_UBOOT
+       bool "Support U-boot kernel command line passing"
+       default n
+       help
+         If you are using U-boot (www.denx.de) and wish to pass the kernel
+         command line from the U-boot environment to the Linux kernel then
+         switch this option on.
+         ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
+         to it. kernel startup code will copy the string into cmdline buffer
+         and also append CONFIG_CMDLINE.
+
+config ARC_BUILTIN_DTB_NAME
+       string "Built in DTB"
+       help
+         Set the name of the DTB to embed in the vmlinux binary
+         Leaving it blank selects the minimal "skeleton" dtb
+
+source "kernel/Kconfig.preempt"
+
+endmenu         # "ARC Architecture Configuration"
+
+source "mm/Kconfig"
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+source "arch/arc/Kconfig.debug"
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644 (file)
index 0000000..962c609
--- /dev/null
@@ -0,0 +1,34 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+       bool "Early printk" if EMBEDDED
+       default y
+       help
+         Write kernel log output directly into the VGA buffer or to a serial
+         port.
+
+         This is useful for kernel debugging when your machine crashes very
+         early before the console code is initialized. For normal operation
+         it is not recommended because it looks ugly and doesn't cooperate
+         with klogd/syslogd or the X server. You should normally N here,
+         unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+       bool "Check for stack overflows"
+       depends on DEBUG_KERNEL
+       help
+         This option will cause messages to be printed if free stack space
+         drops below a certain limit.
+
+config 16KSTACKS
+       bool "Use 16Kb for kernel stacks instead of 8Kb"
+       help
+         If you say Y here the kernel will use a  16Kb stacksize for the
+         kernel stack attached to each process/thread. The default is 8K.
+         This increases the resident kernel footprint and will cause less
+         threads to run on the system and also increase the pressure
+         on the VM subsystem for higher order allocations.
+
+endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644 (file)
index 0000000..92379c7
--- /dev/null
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+UTS_MACHINE := arc
+
+KBUILD_DEFCONFIG := fpga_defconfig
+
+cflags-y       += -mA7 -fno-common -pipe -fno-builtin -D__linux__
+
+LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/defines.h
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register defintion, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the r25 global register
+# Can't do unconditionally (like above) because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/current.h
+endif
+
+atleast_gcc44 :=  $(call cc-ifversion, -gt, 0402, y)
+cflags-$(atleast_gcc44)                        += -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
+cflags-$(CONFIG_ARC_HAS_RTSC)          += -mrtsc
+cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
+
+ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+# Generic build system uses -O2, we want -O3
+cflags-y  += -O3
+endif
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data)           += -mno-sdata -fcall-used-gp
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
+
+# STAR 9000518362:
+# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
+# --build-id w/o "-marclinux".
+# Default arc-elf32-ld is OK
+ldflags-y                              += -marclinux
+
+ARC_LIBGCC                             := -mA7
+cflags-$(CONFIG_ARC_HAS_HW_MPY)                += -multcost=16
+
+ifndef CONFIG_ARC_HAS_HW_MPY
+       cflags-y        += -mno-mpy
+
+# newlib for ARC700 assumes MPY to be always present, which is generally true
+# However, if someone really doesn't want MPY, we need to use the 600 ver
+# which coupled with -mno-mpy will use mpy emulation
+# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
+# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
+
+       ARC_LIBGCC              := -marc600
+       ifneq ($(atleast_gcc44),y)
+               cflags-y        += -multcost=30
+       endif
+endif
+
+LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE   += -mlong-calls
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS  += $(cflags-y)
+KBUILD_AFLAGS  += $(KBUILD_CFLAGS)
+LDFLAGS                += $(ldflags-y)
+
+head-y         := arch/arc/kernel/head.o
+
+# See arch/arc/Kbuild for content of core part of the kernel
+core-y         += arch/arc/
+
+# w/o this dtb won't embed into kernel binary
+core-y         += arch/arc/boot/dts/
+
+core-$(CONFIG_ARC_PLAT_FPGA_LEGACY)    += arch/arc/plat-arcfpga/
+
+drivers-$(CONFIG_OPROFILE)     += arch/arc/oprofile/
+
+libs-y         += arch/arc/lib/ $(LIBGCC)
+
+#default target for make without any arguements.
+KBUILD_IMAGE := bootpImage
+
+all:   $(KBUILD_IMAGE)
+boot   := arch/arc/boot
+
+bootpImage: vmlinux
+
+uImage: vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+       $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+       $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+       $(Q)$(MAKE) $(clean)=$(boot)
+
+# Hacks to enable final link due to absence of link-time branch relexation
+# and gcc choosing optimal(shorter) branches at -O3
+#
+# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+# However lib/decompress_inflate.o (.init.text) calls
+# zlib_inflate_workspacesize (.text) causing relocation errors.
+# Thus forcing all exten calls in this file to be long calls
+export CFLAGS_decompress_inflate.o = -mmedium-calls
+export CFLAGS_initramfs.o = -mmedium-calls
+ifdef CONFIG_SMP
+export CFLAGS_core.o = -mmedium-calls
+endif
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644 (file)
index 0000000..7d514c2
--- /dev/null
@@ -0,0 +1,26 @@
+targets := vmlinux.bin vmlinux.bin.gz uImage
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$(readelf -h vmlinux | \
+                       grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+       $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,gzip)
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+       $(call if_changed,uimage)
+
+PHONY += FORCE
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644 (file)
index 0000000..5776835
--- /dev/null
@@ -0,0 +1,13 @@
+# Built-in dtb
+builtindtb-y           := angel4
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
+       builtindtb-y    := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+endif
+
+obj-y   += $(builtindtb-y).dtb.o
+targets += $(builtindtb-y).dtb
+
+dtbs:  $(addprefix  $(obj)/, $(builtindtb-y).dtb)
+
+clean-files := *.dtb
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
new file mode 100644 (file)
index 0000000..bae4f93
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+       compatible = "snps,arc-angel4";
+       clock-frequency = <80000000>;   /* 80 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&intc>;
+
+       chosen {
+               bootargs = "console=ttyARC0,115200n8";
+       };
+
+       aliases {
+               serial0 = &arcuart0;
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;  /* 256M */
+       };
+
+       fpga {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               /* child and parent address space 1:1 mapped */
+               ranges;
+
+               intc: interrupt-controller {
+                       compatible = "snps,arc700-intc";
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+
+               arcuart0: serial@c0fc1000 {
+                       compatible = "snps,arc-uart";
+                       reg = <0xc0fc1000 0x100>;
+                       interrupts = <5>;
+                       clock-frequency = <80000000>;
+                       current-speed = <115200>;
+                       status = "okay";
+               };
+       };
+};
diff --git a/arch/arc/boot/dts/skeleton.dts b/arch/arc/boot/dts/skeleton.dts
new file mode 100644 (file)
index 0000000..25a84fb
--- /dev/null
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644 (file)
index 0000000..a870bdd
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+       compatible = "snps,arc";
+       clock-frequency = <80000000>;   /* 80 MHZ */
+       #address-cells = <1>;
+       #size-cells = <1>;
+       chosen { };
+       aliases { };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       device_type = "cpu";
+                       compatible = "snps,arc770d";
+                       reg = <0>;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;  /* 256M */
+       };
+};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
new file mode 100644 (file)
index 0000000..b869806
--- /dev/null
@@ -0,0 +1,61 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644 (file)
index 0000000..48af742
--- /dev/null
@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644 (file)
index 0000000..1b907c4
--- /dev/null
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR   0x61    /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR                0x62
+#define ARC_REG_DVFB_BCR       0x64
+#define ARC_REG_EXTARITH_BCR   0x65
+#define ARC_REG_VECBASE_BCR    0x68
+#define ARC_REG_PERIBASE_BCR   0x69
+#define ARC_REG_FP_BCR         0x6B    /* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR       0x6C    /* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR                0x6f
+#define ARC_REG_DCCM_BCR       0x74    /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR     0x75
+#define ARC_REG_ICCM_BCR       0x78
+#define ARC_REG_XY_MEM_BCR     0x79
+#define ARC_REG_MAC_BCR                0x7a
+#define ARC_REG_MUL_BCR                0x7b
+#define ARC_REG_SWAP_BCR       0x7c
+#define ARC_REG_NORM_BCR       0x7d
+#define ARC_REG_MIXMAX_BCR     0x7e
+#define ARC_REG_BARREL_BCR     0x7f
+#define ARC_REG_D_UNCACH_BCR   0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT           0       /* CPU Halted */
+#define STATUS_E1_BIT          1       /* Int 1 enable */
+#define STATUS_E2_BIT          2       /* Int 2 enable */
+#define STATUS_A1_BIT          3       /* Int 1 active */
+#define STATUS_A2_BIT          4       /* Int 2 active */
+#define STATUS_AE_BIT          5       /* Exception active */
+#define STATUS_DE_BIT          6       /* PC is in delay slot */
+#define STATUS_U_BIT           7       /* User/Kernel mode */
+#define STATUS_L_BIT           12      /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK          (1<<STATUS_H_BIT)
+#define STATUS_E1_MASK         (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK         (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK         (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK         (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK         (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK         (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK          (1<<STATUS_U_BIT)
+#define STATUS_L_MASK          (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK                   0xff0000
+#define ECR_CODE_MASK                  0x00ff00
+#define ECR_PARAM_MASK                 0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR                 0x02
+#define ECR_V_MACH_CHK                 0x20
+#define ECR_V_ITLB_MISS                        0x21
+#define ECR_V_DTLB_MISS                        0x22
+#define ECR_V_PROTV                    0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH         0x00
+#define ECR_C_PROTV_LOAD               0x01
+#define ECR_C_PROTV_STORE              0x02
+#define ECR_C_PROTV_XCHG               0x03
+#define ECR_C_PROTV_MISALIG_DATA       0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS         8
+#define ECR_C_BIT_DTLB_ST_MISS         9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY           4
+#define AUX_INTR_VEC_BASE      0x25
+#define AUX_IRQ_LEV            0x200   /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT           0x201   /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12           0x43    /* interrupt level register */
+
+#define AUX_IENABLE            0x40c
+#define AUX_ITRIGGER           0x40d
+#define AUX_IPULSE             0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT   0x23    /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL    0x22    /* timer 0 control */
+#define ARC_REG_TIMER0_CNT     0x21    /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT   0x102   /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL    0x101   /* timer 1 control */
+#define ARC_REG_TIMER1_CNT     0x100   /* timer 1 count */
+
+#define TIMER_CTRL_IE          (1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH          (1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0         0x405
+#define ARC_REG_TLBPD1         0x406
+#define ARC_REG_TLBINDEX       0x407
+#define ARC_REG_TLBCOMMAND     0x408
+#define ARC_REG_PID            0x409
+#define ARC_REG_SCRATCH_DATA0  0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE             (1 << 31)       /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR           0x80000000
+
+/* TLB Commands */
+#define TLBWrite    0x1
+#define TLBRead     0x2
+#define TLBGetIndex 0x3
+#define TLBProbe    0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI  0x5                /* write JTLB without inv uTLBs */
+#define TLBIVUTLB   0x6                /* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI              /* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR         0x77    /* Build Config reg */
+#define ARC_REG_IC_IVIC                0x10
+#define ARC_REG_IC_CTRL                0x11
+#define ARC_REG_IC_IVIL                0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG                0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE   0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR         0x72
+#define ARC_REG_DC_IVDC                0x47
+#define ARC_REG_DC_CTRL                0x48
+#define ARC_REG_DC_IVDL                0x4A
+#define ARC_REG_DC_FLSH                0x4B
+#define ARC_REG_DC_FLDL                0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG                0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH  0x40
+#define DC_CTRL_FLUSH_STATUS    0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID            0x409
+#define ARC_REG_SCRATCH_DATA0  0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE             (1 << 31)       /* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT         0x300
+#define ARC_AUX_DPFP_1L         0x301
+#define ARC_AUX_DPFP_1H         0x302
+#define ARC_AUX_DPFP_2L         0x303
+#define ARC_AUX_DPFP_2H         0x304
+#define ARC_AUX_DPFP_STAT       0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ *      Inline ASM macros to read/write AUX Regs
+ *      Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg)      __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val)          \
+               __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg)              \
+({                                     \
+       unsigned int __ret;             \
+       __asm__ __volatile__(           \
+       "       lr    %0, [%1]"         \
+       : "=r"(__ret)                   \
+       : "i"(reg));                    \
+       __ret;                          \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ *    write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val)    \
+({                                     \
+       __asm__ __volatile__(           \
+       "       sr   %0, [%1]   \n"     \
+       :                               \
+       : "ir"(val), "i"(reg_imm));     \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ *  * e.g.
+ *      reg_num = 0x69
+ *      write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ *  memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val)                \
+({                                             \
+       unsigned int tmp;                       \
+                                               \
+       __asm__ __volatile__(                   \
+       "       ld   %0, [%2]   \n\t"           \
+       "       sr   %1, [%0]   \n\t"           \
+       : "=&r"(tmp)                            \
+       : "r"(val), "memory"(&reg_in_var));     \
+})
+
+#endif
+
+#define READ_BCR(reg, into)                            \
+{                                                      \
+       unsigned int tmp;                               \
+       tmp = read_aux_reg(reg);                        \
+       if (sizeof(tmp) == sizeof(into)) {              \
+               into = *((typeof(into) *)&tmp);         \
+       } else {                                        \
+               extern void bogus_undefined(void);      \
+               bogus_undefined();                      \
+       }                                               \
+}
+
+#define WRITE_BCR(reg, into)                           \
+{                                                      \
+       unsigned int tmp;                               \
+       if (sizeof(tmp) == sizeof(into)) {              \
+               tmp = (*(unsigned int *)(into));        \
+               write_aux_reg(reg, tmp);                \
+       } else  {                                       \
+               extern void bogus_undefined(void);      \
+               bogus_undefined();                      \
+       }                                               \
+}
+
+/* Helpers */
+#define TO_KB(bytes)           ((bytes) >> 10)
+#define TO_MB(bytes)           (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages)   ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages)   (PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+       struct {
+               unsigned int l, h;
+       } aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+       unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+       unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+                    u_itlb:4, u_dtlb:4;
+#else
+       unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+                    ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID     0x1
+#define EXTN_NORM_VALID     0x2
+#define EXTN_MINMAX_VALID   0x2
+#define EXTN_BARREL_VALID   0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+                    norm:2, swap:1;
+#else
+       unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+                    crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:16, type:8, ver:8;
+#else
+       unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+       unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+       unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+       unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+       unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int addr:24, ver:8;
+#else
+       unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int res:21, sz:3, ver:8;
+#else
+       unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int fast:1, ver:8;
+#else
+       unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+       unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+       unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+       unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+       struct cpuinfo_arc_cache icache, dcache;
+       struct cpuinfo_arc_mmu mmu;
+       struct bcr_identity core;
+       unsigned int timers;
+       unsigned int vec_base;
+       unsigned int uncached_base;
+       struct cpuinfo_arc_ccm iccm, dccm;
+       struct bcr_extn extn;
+       struct bcr_extn_xymem extn_xymem;
+       struct bcr_extn_mac_mul extn_mac_mul;
+       struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644 (file)
index 0000000..dad1876
--- /dev/null
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..83f03ca
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v)  ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       unsigned int temp;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       add     %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */
+       : "r"(&v->counter), "ir"(i)
+       : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       unsigned int temp;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       sub     %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)
+       : "r"(&v->counter), "ir"(i)
+       : "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       unsigned int temp;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       add     %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)
+       : "r"(&v->counter), "ir"(i)
+       : "cc");
+
+       return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       unsigned int temp;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       sub     %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)
+       : "r"(&v->counter), "ir"(i)
+       : "cc");
+
+       return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+       unsigned int temp;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       bic     %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)
+       : "r"(addr), "ir"(mask)
+       : "cc");
+}
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+       /*
+        * Independent of hardware support, all of the atomic_xxx() APIs need
+        * to follow the same locking rules to make sure that a "hardware"
+        * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+        * sequence
+        *
+        * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+        * requires the locking.
+        */
+       unsigned long flags;
+
+       atomic_ops_lock(flags);
+       v->counter = i;
+       atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       unsigned long flags;
+
+       atomic_ops_lock(flags);
+       v->counter += i;
+       atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       unsigned long flags;
+
+       atomic_ops_lock(flags);
+       v->counter -= i;
+       atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       unsigned long flags;
+       unsigned long temp;
+
+       atomic_ops_lock(flags);
+       temp = v->counter;
+       temp += i;
+       v->counter = temp;
+       atomic_ops_unlock(flags);
+
+       return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       unsigned long flags;
+       unsigned long temp;
+
+       atomic_ops_lock(flags);
+       temp = v->counter;
+       temp -= i;
+       v->counter = temp;
+       atomic_ops_unlock(flags);
+
+       return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+       unsigned long flags;
+
+       atomic_ops_lock(flags);
+       *addr &= ~mask;
+       atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u)                                   \
+({                                                                     \
+       int c, old;                                                     \
+       c = atomic_read(v);                                             \
+       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+               c = old;                                                \
+       c;                                                              \
+})
+
+#define atomic_inc_not_zero(v)         atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v)                  atomic_add(1, v)
+#define atomic_dec(v)                  atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)         (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v)      (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v)      (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i)                 { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..f6cb7c4
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value)  do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends()  mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb()        mb()
+#define smp_rmb()       rmb()
+#define smp_wmb()       wmb()
+#else
+#define smp_mb()        barrier()
+#define smp_rmb()       barrier()
+#define smp_wmb()       barrier()
+#endif
+
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#define smp_read_barrier_depends()      do { } while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644 (file)
index 0000000..647a83a
--- /dev/null
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned int temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       bset    %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b      \n"
+       : "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned int temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       bclr    %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b      \n"
+       : "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned int temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       bxor    %0, %0, %2      \n"
+       "       scond   %0, [%1]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+}
+
+/*
+ * Semantically:
+ *    Test the bit
+ *    if clear
+ *        set it and return 0 (old value)
+ *    else
+ *        return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old, temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%2]        \n"
+       "       bset    %1, %0, %3      \n"
+       "       scond   %1, [%2]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(old), "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned int old, temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%2]        \n"
+       "       bclr    %1, %0, %3      \n"
+       "       scond   %1, [%2]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(old), "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned int old, temp;
+
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%2]        \n"
+       "       bxor    %1, %0, %3      \n"
+       "       scond   %1, [%2]        \n"
+       "       bnz     1b              \n"
+       : "=&r"(old), "=&r"(temp)
+       : "r"(m), "ir"(nr)
+       : "cc");
+
+       return (old & (1 << nr)) != 0;
+}
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ *     This avoids extra code to be generated for pointer arithmatic, since
+ *     is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ *     only consider bottom 5 bits of @nr, so NO need to mask them off.
+ *     (GCC Quirk: however for constant @nr we still need to do the masking
+ *             at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       temp = *m;
+       *m = temp | (1UL << nr);
+
+       bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       temp = *m;
+       *m = temp & ~(1UL << nr);
+
+       bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       temp = *m;
+       *m = temp ^ (1UL << nr);
+
+       bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       old = *m;
+       *m = old | (1 << nr);
+
+       bitops_unlock(flags);
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       old = *m;
+       *m = old & ~(1 << nr);
+
+       bitops_unlock(flags);
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old, flags;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       bitops_lock(flags);
+
+       old = *m;
+       *m = old ^ (1 << nr);
+
+       bitops_unlock(flags);
+
+       return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       temp = *m;
+       *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       temp = *m;
+       *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long temp;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       temp = *m;
+       *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       old = *m;
+       *m = old | (1 << nr);
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       old = *m;
+       *m = old & ~(1 << nr);
+
+       return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+       unsigned long old;
+       m += nr >> 5;
+
+       if (__builtin_constant_p(nr))
+               nr &= 0x1f;
+
+       old = *m;
+       *m = old ^ (1 << nr);
+
+       return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+       return ((1UL << (nr & 31)) &
+               (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+       unsigned long mask;
+
+       addr += nr >> 5;
+
+       /* ARC700 only considers 5 bits in bit-fiddling insn */
+       mask = 1 << nr;
+
+       return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr)     (__builtin_constant_p(nr) ? \
+                                       __constant_test_bit((nr), (addr)) : \
+                                       __test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+       unsigned int res;
+
+       __asm__ __volatile__(
+       "       norm.f  %0, %1          \n"
+       "       mov.n   %0, 0           \n"
+       "       add.p   %0, %0, 1       \n"
+       : "=r"(res)
+       : "r"(x)
+       : "cc");
+
+       return res;
+}
+
+static inline int constant_fls(int x)
+{
+       int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xffff0000u)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xff000000u)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xf0000000u)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xc0000000u)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000u)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+       if (__builtin_constant_p(x))
+              return constant_fls(x);
+
+       return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+       if (!x)
+               return 0;
+       else
+               return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+       if (!word)
+               return word;
+
+       return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit()  barrier()
+#define smp_mb__after_clear_bit()   barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644 (file)
index 0000000..2ad8f9b
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+                           unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+        unsigned long cause_reg);
+
+#define BUG()  do {                            \
+       dump_stack();                                   \
+       pr_warn("Kernel BUG in %s: %s: %d!\n",  \
+               __FILE__, __func__,  __LINE__); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644 (file)
index 0000000..6632273
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT         6
+#else
+#define L1_CACHE_SHIFT         CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS        2
+#define ARC_DCACHE_WAYS        4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN    L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN    L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK       (~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK       (~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p)        ((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr)      \
+({                                     \
+       unsigned int __ret;             \
+       __asm__ __volatile__(           \
+       "       ld.di %0, [%1]  \n"     \
+       : "=r"(__ret)                   \
+       : "r"(ptr));                    \
+       __ret;                          \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({                                     \
+       __asm__ __volatile__(           \
+       "       st.di %0, [%1]  \n"     \
+       :                               \
+       : "r"(data), "r"(ptr));         \
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE        0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644 (file)
index 0000000..97ee96f
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ *   -flush_cache_dup_mm (fork)
+ *   -likewise for flush_cache_mm (exit/execve)
+ *   -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ *  vineetg: April 2008
+ *   -Added a critical CacheLine flush to copy_to_user_page( ) which
+ *     was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+                                    int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vunmap(start, end)         flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm)                 /* called on fork */
+#define flush_cache_mm(mm)                     /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn)    /* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
+do {                                                                   \
+       memcpy(dst, src, len);                                          \
+       if (vma->vm_flags & VM_EXEC)                                    \
+               flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)           \
+       memcpy(dst, src, len);                                          \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644 (file)
index 0000000..1095729
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke  <joern.rennecke@embecosm.com>: Jan 2012
+ *  -Insn Scheduling improvements to csum core routines.
+ *      = csum_fold( ) largely derived from ARM version.
+ *      = ip_fast_cum( ) to have module scheduling
+ *  -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ *   worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ *  -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ *     Fold a partial checksum
+ *
+ *  The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ *  added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+       unsigned r = s << 16 | s >> 16; /* ror */
+       s = ~s;
+       s -= r;
+       return s >> 16;
+}
+
+/*
+ *     This is a version of ip_compute_csum() optimized for IP headers,
+ *     which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+       const void *ptr = iph;
+       unsigned int tmp, tmp2, sum;
+
+       __asm__(
+       "       ld.ab  %0, [%3, 4]              \n"
+       "       ld.ab  %2, [%3, 4]              \n"
+       "       sub    %1, %4, 2                \n"
+       "       lsr.f  lp_count, %1, 1          \n"
+       "       bcc    0f                       \n"
+       "       add.f  %0, %0, %2               \n"
+       "       ld.ab  %2, [%3, 4]              \n"
+       "0:     lp     1f                       \n"
+       "       ld.ab  %1, [%3, 4]              \n"
+       "       adc.f  %0, %0, %2               \n"
+       "       ld.ab  %2, [%3, 4]              \n"
+       "       adc.f  %0, %0, %1               \n"
+       "1:     adc.f  %0, %0, %2               \n"
+       "       add.cs %0,%0,1                  \n"
+       : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+       : "r"(ihl)
+       : "cc", "lp_count", "memory");
+
+       return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+                  unsigned short proto, __wsum sum)
+{
+       __asm__ __volatile__(
+       "       add.f %0, %0, %1        \n"
+       "       adc.f %0, %0, %2        \n"
+       "       adc.f %0, %0, %3        \n"
+       "       adc.f %0, %0, %4        \n"
+       "       adc   %0, %0, 0         \n"
+       : "+&r"(sum)
+       : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+         "r"(len),
+#else
+         "r"(len << 8),
+#endif
+         "r"(htons(proto))
+       : "cc");
+
+       return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644 (file)
index 0000000..bf9d29f
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+       return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644 (file)
index 0000000..03cd689
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+       unsigned long prev;
+
+       __asm__ __volatile__(
+       "1:     llock   %0, [%1]        \n"
+       "       brne    %0, %2, 2f      \n"
+       "       scond   %3, [%1]        \n"
+       "       bnz     1b              \n"
+       "2:                             \n"
+       : "=&r"(prev)
+       : "r"(ptr), "ir"(expected),
+         "r"(new) /* can't be "ir". scond can't take limm for "b" */
+       : "cc");
+
+       return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+       unsigned long flags;
+       int prev;
+       volatile unsigned long *p = ptr;
+
+       atomic_ops_lock(flags);
+       prev = *p;
+       if (prev == expected)
+               *p = new;
+       atomic_ops_unlock(flags);
+       return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+                               (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+                                  int size)
+{
+       extern unsigned long __xchg_bad_pointer(void);
+
+       switch (size) {
+       case 4:
+               __asm__ __volatile__(
+               "       ex  %0, [%1]    \n"
+               : "+r"(val)
+               : "r"(ptr)
+               : "memory");
+
+               return val;
+       }
+       return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+                                                sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ *   if (UP or LLSC)
+ *      xchg doesn't need serialization
+ *   else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ *      xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with)                        \
+({                                     \
+       unsigned long flags;            \
+       typeof(*(ptr)) old_val;         \
+                                       \
+       atomic_ops_lock(flags);         \
+       old_val = _xchg(ptr, with);     \
+       atomic_ops_unlock(flags);       \
+       old_val;                        \
+})
+
+#else
+
+#define xchg(ptr, with)  _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ *   LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ *         is natively "SMP safe", no serialization required).
+ *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ *         could clobber them. atomic_xchg() itself would be 1 insn, so it
+ *         can't be clobbered by others. Thus no serialization required when
+ *         atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644 (file)
index 0000000..87b9185
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ *  - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644 (file)
index 0000000..6097bb4
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE        1
+#else
+#define __CONFIG_ARC_HAS_ICACHE        0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE        1
+#else
+#define __CONFIG_ARC_HAS_DCACHE        0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644 (file)
index 0000000..442ce5d
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ *  -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ *  -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h>         /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+       __asm__ __volatile__(
+       "1:     sub.f %0, %0, 1 \n"
+       "       jpnz 1b         \n"
+       : "+r"(loops)
+       :
+       : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ *  -we have precomputed @loops_per_jiffy
+ *  -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ *  -Mathematically if we multiply and divide a number by same value the
+ *   result remains unchanged:  In this case, we use 2^32
+ *  -> (loops_per_N_usec * 2^32 ) / 2^32
+ *  -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ *  -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ *  -Divide by 2^32 is very simply right shift by 32
+ *  -We simply need to ensure that the multiply per above eqn happens in
+ *   64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+       unsigned long loops;
+
+       /* (long long) cast ensures 64 bit MPY - real or emulated
+        * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+        */
+       loops = ((long long)(usecs * 4295 * HZ) *
+                (long long)(loops_per_jiffy)) >> 32;
+
+       __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+                               : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644 (file)
index 0000000..f1cce3d
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+       op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+       op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+       op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+       op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+       op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+       op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+       op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+       noflow,
+       direct_jump,
+       direct_call,
+       indirect_jump,
+       indirect_call,
+       invalid_instr
+};
+
+#define IS_BIT(word, n)                ((word) & (1<<n))
+#define BITS(word, s, e)       (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word)     (BITS((word), 27, 31))
+#define MINOR_OPCODE(word)     (BITS((word), 16, 21))
+#define FIELD_A(word)          (BITS((word), 0, 5))
+#define FIELD_B(word)          ((BITS((word), 12, 14)<<3) | \
+                               (BITS((word), 24, 26)))
+#define FIELD_C(word)          (BITS((word), 6, 11))
+#define FIELD_u6(word)         FIELDC(word)
+#define FIELD_s12(word)                sign_extend(((BITS((word), 0, 5) << 6) | \
+                                       BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word)         sign_extend(((BITS(word, 15, 15) << 8) | \
+                                       BITS(word, 16, 23)), 9)
+#define FIELD_s21(word)                sign_extend(((BITS(word, 6, 15) << 11) | \
+                                       (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word)                sign_extend(((BITS(word, 0, 3) << 21) | \
+                                       (BITS(word, 6, 15) << 11) | \
+                                       (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word)                ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word)                ((BITS((word), 10, 10)<<3) | \
+                               BITS((word), 8, 10))
+#define FIELD_S_C(word)                ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word)                ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word)       (BITS((word), 0, 4))
+#define FIELD_S_u6(word)       (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word)       (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word)      (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word)       sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word)       sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word)       sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word)      sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word)      sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word)      sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L             0x00000100
+#define REG_LIMM               62
+
+struct disasm_state {
+       /* generic info */
+       unsigned long words[2];
+       int instr_len;
+       int major_opcode;
+       /* info for branch/jump */
+       int is_branch;
+       int target;
+       int delay_slot;
+       enum flow flow;
+       /* info for load/store */
+       int src1, src2, src3, dest, wb_reg;
+       int zz, aa, x, pref, di;
+       int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+       if (IS_BIT(value, (bits - 1)))
+               value |= (0xffffffff << bits);
+
+       return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+       uint16_t word = *((uint16_t *)addr);
+       int opcode = (word >> 11) & 0x1F;
+       return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+       int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+       *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+               struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..31f77ae
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+                           dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+                         dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+                      dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+                                          enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+               dma_cache_inv(paddr, size);
+               break;
+       case DMA_TO_DEVICE:
+               dma_cache_wback(paddr, size);
+               break;
+       case DMA_BIDIRECTIONAL:
+               dma_cache_wback_inv(paddr, size);
+               break;
+       default:
+               pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+       }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+                         enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir)                 \
+do {                                                   \
+       if (__builtin_constant_p(dir))                  \
+               __inline_dma_cache_sync(addr, sz, dir); \
+       else                                            \
+               __arc_dma_cache_sync(addr, sz, dir);    \
+}                                                      \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+              enum dma_data_direction dir)
+{
+       _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+       return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+                size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size,
+            enum dma_data_direction dir)
+{
+       unsigned long paddr = page_to_phys(page) + offset;
+       return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+              size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+          int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+                                              s->length, dir);
+
+       return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+            int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i)
+               dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                       size_t size, enum dma_data_direction dir)
+{
+       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+                       DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+                          size_t size, enum dma_data_direction dir)
+{
+       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+                       DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                             unsigned long offset, size_t size,
+                             enum dma_data_direction direction)
+{
+       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+                       size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction direction)
+{
+       _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+                       size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+                   enum dma_data_direction dir)
+{
+       int i;
+
+       for (i = 0; i < nelems; i++, sg++)
+               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+                      enum dma_data_direction dir)
+{
+       int i;
+
+       for (i = 0; i < nelems; i++, sg++)
+               _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+       /* Support 32 bit DMA mask exclusively */
+       return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+               return -EIO;
+
+       *dev->dma_mask = dma_mask;
+
+       return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644 (file)
index 0000000..ca7c451
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644 (file)
index 0000000..f4c8d36
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT           93
+
+/* ARC Relocations (kernel Modules only) */
+#define  R_ARC_32              0x4
+#define  R_ARC_32_ME           0x1B
+#define  R_ARC_S25H_PCREL      0x10
+#define  R_ARC_S25W_PCREL      0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH               EM_ARCOMPACT
+#define ELF_CLASS              ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA               ELFDATA2MSB
+#else
+#define ELF_DATA               ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ *  -we don't load something for the wrong architecture.
+ *  -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE      PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader.  We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE                (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI.  A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr)   ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP      (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM   (NULL)
+
+#define SET_PERSONALITY(ex) \
+       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644 (file)
index 0000000..23daa32
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *  Stack switching code can no longer reliably rely on the fact that
+ *  if we are NOT in user mode, stack is switched to kernel mode.
+ *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ *  it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we also need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ *  -Modified CALLEE_REG save/restore macros to handle the fact that
+ *      r25 contains the kernel current task ptr
+ *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ *      address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h>                /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>     /* For VMALLOC_START */
+#include <asm/thread_info.h>   /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a    reg1, [reg2, x]  => Pre Incr
+ *      Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab   reg1, [reg2, x]  => Post Incr
+ *      Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro  SAVE_CALLER_SAVED
+       st.a    r0, [sp, -4]
+       st.a    r1, [sp, -4]
+       st.a    r2, [sp, -4]
+       st.a    r3, [sp, -4]
+       st.a    r4, [sp, -4]
+       st.a    r5, [sp, -4]
+       st.a    r6, [sp, -4]
+       st.a    r7, [sp, -4]
+       st.a    r8, [sp, -4]
+       st.a    r9, [sp, -4]
+       st.a    r10, [sp, -4]
+       st.a    r11, [sp, -4]
+       st.a    r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+       ld.ab   r12, [sp, 4]
+       ld.ab   r11, [sp, 4]
+       ld.ab   r10, [sp, 4]
+       ld.ab   r9, [sp, 4]
+       ld.ab   r8, [sp, 4]
+       ld.ab   r7, [sp, 4]
+       ld.ab   r6, [sp, 4]
+       ld.ab   r5, [sp, 4]
+       ld.ab   r4, [sp, 4]
+       ld.ab   r3, [sp, 4]
+       ld.ab   r2, [sp, 4]
+       ld.ab   r1, [sp, 4]
+       ld.ab   r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ *  on kernel stack.
+ * User mode callee regs need to be saved in case of
+ *    -fork and friends for replicating from parent to child
+ *    -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ *  for caching task ptr. Low level exception/ISR save user mode r25
+ *  into task->thread.user_r25. So it needs to be retrieved from there and
+ *  saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+       st.a    r13, [sp, -4]
+       st.a    r14, [sp, -4]
+       st.a    r15, [sp, -4]
+       st.a    r16, [sp, -4]
+       st.a    r17, [sp, -4]
+       st.a    r18, [sp, -4]
+       st.a    r19, [sp, -4]
+       st.a    r20, [sp, -4]
+       st.a    r21, [sp, -4]
+       st.a    r22, [sp, -4]
+       st.a    r23, [sp, -4]
+       st.a    r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ; Retrieve orig r25 and save it on stack
+       ld      r12, [r25, TASK_THREAD + THREAD_USER_R25]
+       st.a    r12, [sp, -4]
+#else
+       st.a    r25, [sp, -4]
+#endif
+
+       /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+       sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+       st.a    r13, [sp, -4]
+       st.a    r14, [sp, -4]
+       st.a    r15, [sp, -4]
+       st.a    r16, [sp, -4]
+       st.a    r17, [sp, -4]
+       st.a    r18, [sp, -4]
+       st.a    r19, [sp, -4]
+       st.a    r20, [sp, -4]
+       st.a    r21, [sp, -4]
+       st.a    r22, [sp, -4]
+       st.a    r23, [sp, -4]
+       st.a    r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+       sub     sp, sp, 8
+#else
+       st.a    r25, [sp, -4]
+       sub     sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ *  This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ *  switched-IN task's CALLEE Reg File.
+ *  For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ *  which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       add     sp, sp, 8  /* skip callee_reg gutter and user r25 placeholder */
+#else
+       add     sp, sp, 4   /* skip "callee_regs->stack_place_holder" */
+       ld.ab   r25, [sp, 4]
+#endif
+
+       ld.ab   r24, [sp, 4]
+       ld.ab   r23, [sp, 4]
+       ld.ab   r22, [sp, 4]
+       ld.ab   r21, [sp, 4]
+       ld.ab   r20, [sp, 4]
+       ld.ab   r19, [sp, 4]
+       ld.ab   r18, [sp, 4]
+       ld.ab   r17, [sp, 4]
+       ld.ab   r16, [sp, 4]
+       ld.ab   r15, [sp, 4]
+       ld.ab   r14, [sp, 4]
+       ld.ab   r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ *  for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ *  r25, which needs to be shoved back into task->thread.user_r25 where from
+ *  Low level exception/ISR return code will retrieve to populate with rest of
+ *  callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+       add     sp, sp, 4   /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ld.ab   r12, [sp, 4]
+       st      r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+       ld.ab   r25, [sp, 4]
+#endif
+
+       ld.ab   r24, [sp, 4]
+       ld.ab   r23, [sp, 4]
+       ld.ab   r22, [sp, 4]
+       ld.ab   r21, [sp, 4]
+       ld.ab   r20, [sp, 4]
+       ld.ab   r19, [sp, 4]
+       ld.ab   r18, [sp, 4]
+       ld.ab   r17, [sp, 4]
+       ld.ab   r16, [sp, 4]
+       ld.ab   r15, [sp, 4]
+       ld.ab   r14, [sp, 4]
+       ld.ab   r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+       add     sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+       ld  r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+       /* Get task->thread_info (this is essentially start of a PAGE) */
+       ld  \out, [\tsk, TASK_THREAD_INFO]
+
+       /* Go to end of page where stack begins (grows upwards) */
+       add2 \out, \out, (THREAD_SIZE - 4)/4   /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry   : r9 contains pre-IRQ/exception/trap status32
+ * Exit    : SP is set to kernel mode stack pointer
+ *           If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+       /* User Mode when this happened ? Yes: Proceed to switch stack */
+       bbit1   r9, STATUS_U_BIT, 88f
+
+       /* OK we were already in kernel mode when this event happened, thus can
+        * assume SP is kernel mode SP. _NO_ need to do any stack switching
+        */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+       /* However....
+        * If Level 2 Interrupts enabled, we may end up with a corner case:
+        * 1. User Task executing
+        * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+        * 3. But before it could switch SP from USER to KERNEL stack
+        *      a L2 IRQ "Interrupts" L1
+        * Thay way although L2 IRQ happened in Kernel mode, stack is still
+        * not switched.
+        * To handle this, we may need to switch stack even if in kernel mode
+        * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+        */
+       brlo sp, VMALLOC_START, 88f
+
+       /* TODO: vineetg:
+        * We need to be a bit more cautious here. What if a kernel bug in
+        * L1 ISR, caused SP to go whaco (some small value which looks like
+        * USER stk) and then we take L2 ISR.
+        * Above brlo alone would treat it as a valid L1-L2 sceanrio
+        * instead of shouting alound
+        * The only feasible way is to make sure this L2 happened in
+        * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+        * L1 ISR before it switches stack
+        */
+
+#endif
+
+       /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+        * safe-keeping not really needed, but it keeps the epilogue code
+        * (SP restore) simpler/uniform.
+        */
+       b.d     77f
+
+       st.a    sp, [sp, -12]   ; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+       GET_CURR_TASK_ON_CPU   r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+       /* If current task pointer cached in r25, time to
+        *  -safekeep USER r25 in task->thread_struct->user_r25
+        *  -load r25 with current task ptr
+        */
+       st.as   r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+       mov     r25, r9
+#endif
+
+       /* With current tsk in r9, get it's kernel mode stack base */
+       GET_TSK_STACK_BASE  r9, r9
+
+#ifdef PT_REGS_CANARY
+       st      0xabcdabcd, [r9, 0]
+#endif
+
+       /* Save Pre Intr/Exception User SP on kernel stack */
+       st.a    sp, [r9, -12]   ; Make room for orig_r0 and orig_r8
+
+       /* CAUTION:
+        * SP should be set at the very end when we are done with everything
+        * In case of 2 levels of interrupt we depend on value of SP to assume
+        * that everything else is done (loading r25 etc)
+        */
+
+       /* set SP to point to kernel mode stack */
+       mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN  reg
+
+       ld  \reg, [sp, PT_status32]
+       bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+       bset \reg, \reg, STATUS_L_BIT
+       sr  \reg, [erstatus]
+       mov \reg, 55f
+       sr  \reg, [eret]
+
+       rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+       and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS  reg
+       GET_CURR_THR_INFO_FROM_SP  \reg
+       ld  \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+       sr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+       st  \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG        reg
+#ifdef CONFIG_SMP
+       lr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+       ld  \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION   marker
+
+       st      \marker, [sp, 8]
+       st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
+
+       /* Restore r9 used to code the early prologue */
+       EXCPN_PROLOG_RESTORE_REG  r9
+
+       SAVE_CALLER_SAVED
+       st.a    r26, [sp, -4]   /* gp */
+       st.a    fp, [sp, -4]
+       st.a    blink, [sp, -4]
+       lr      r9, [eret]
+       st.a    r9, [sp, -4]
+       lr      r9, [erstatus]
+       st.a    r9, [sp, -4]
+       st.a    lp_count, [sp, -4]
+       lr      r9, [lp_end]
+       st.a    r9, [sp, -4]
+       lr      r9, [lp_start]
+       st.a    r9, [sp, -4]
+       lr      r9, [erbta]
+       st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+       mov   r9, 0xdeadbeef
+       st    r9, [sp, -4]
+#endif
+
+       /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+       sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+       SAVE_ALL_EXCEPTION  orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+       /*
+        * Setup pt_regs->orig_r8.
+        * Encode syscall number (r8) in upper short word of event type (r9)
+        * N.B. #1: This is already endian safe (see ptrace.h)
+        *      #2: Only r9 can be used as scratch as it is already clobbered
+        *          and it's contents are no longer needed by the latter part
+        *          of exception prologue
+        */
+       lsl  r9, r8, 16
+       or   r9, r9, orig_r8_IS_SCALL
+
+       SAVE_ALL_EXCEPTION  r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+       add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+       ld.ab   r9, [sp, 4]
+       sr      r9, [erbta]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_start]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_end]
+       ld.ab   r9, [sp, 4]
+       mov     lp_count, r9
+       ld.ab   r9, [sp, 4]
+       sr      r9, [erstatus]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [eret]
+       ld.ab   blink, [sp, 4]
+       ld.ab   fp, [sp, 4]
+       ld.ab   r26, [sp, 4]    /* gp */
+       RESTORE_CALLER_SAVED
+
+       ld  sp, [sp] /* restore original sp */
+       /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+       /* restore original r9 , saved in int1_saved_reg
+       * It will be saved on stack in macro: SAVE_CALLER_SAVED
+       */
+#ifdef CONFIG_SMP
+       lr  r9, [ARC_REG_SCRATCH_DATA0]
+#else
+       ld  r9, [@int1_saved_reg]
+#endif
+
+       /* now we are ready to save the remaining context :) */
+       st      orig_r8_IS_IRQ1, [sp, 8]    /* Event Type */
+       st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
+       SAVE_CALLER_SAVED
+       st.a    r26, [sp, -4]   /* gp */
+       st.a    fp, [sp, -4]
+       st.a    blink, [sp, -4]
+       st.a    ilink1, [sp, -4]
+       lr      r9, [status32_l1]
+       st.a    r9, [sp, -4]
+       st.a    lp_count, [sp, -4]
+       lr      r9, [lp_end]
+       st.a    r9, [sp, -4]
+       lr      r9, [lp_start]
+       st.a    r9, [sp, -4]
+       lr      r9, [bta_l1]
+       st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+       mov   r9, 0xdeadbee1
+       st    r9, [sp, -4]
+#endif
+       /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+       sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+       /* TODO-vineetg: SMP we can't use global nor can we use
+       *   SCRATCH0 as we do for int1 because while int1 is using
+       *   it, int2 can come
+       */
+       /* retsore original r9 , saved in sys_saved_r9 */
+       ld  r9, [@int2_saved_reg]
+
+       /* now we are ready to save the remaining context :) */
+       st      orig_r8_IS_IRQ2, [sp, 8]    /* Event Type */
+       st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
+       SAVE_CALLER_SAVED
+       st.a    r26, [sp, -4]   /* gp */
+       st.a    fp, [sp, -4]
+       st.a    blink, [sp, -4]
+       st.a    ilink2, [sp, -4]
+       lr      r9, [status32_l2]
+       st.a    r9, [sp, -4]
+       st.a    lp_count, [sp, -4]
+       lr      r9, [lp_end]
+       st.a    r9, [sp, -4]
+       lr      r9, [lp_start]
+       st.a    r9, [sp, -4]
+       lr      r9, [bta_l2]
+       st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+       mov   r9, 0xdeadbee2
+       st    r9, [sp, -4]
+#endif
+
+       /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+       sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+       add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+       ld.ab   r9, [sp, 4] /* Actual reg file */
+       sr      r9, [bta_l1]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_start]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_end]
+       ld.ab   r9, [sp, 4]
+       mov     lp_count, r9
+       ld.ab   r9, [sp, 4]
+       sr      r9, [status32_l1]
+       ld.ab   r9, [sp, 4]
+       mov     ilink1, r9
+       ld.ab   blink, [sp, 4]
+       ld.ab   fp, [sp, 4]
+       ld.ab   r26, [sp, 4]    /* gp */
+       RESTORE_CALLER_SAVED
+
+       ld  sp, [sp] /* restore original sp */
+       /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+       add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+       ld.ab   r9, [sp, 4]
+       sr      r9, [bta_l2]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_start]
+       ld.ab   r9, [sp, 4]
+       sr      r9, [lp_end]
+       ld.ab   r9, [sp, 4]
+       mov     lp_count, r9
+       ld.ab   r9, [sp, 4]
+       sr      r9, [status32_l2]
+       ld.ab   r9, [sp, 4]
+       mov     ilink2, r9
+       ld.ab   blink, [sp, 4]
+       ld.ab   fp, [sp, 4]
+       ld.ab   r26, [sp, 4]    /* gp */
+       RESTORE_CALLER_SAVED
+
+       ld  sp, [sp] /* restore original sp */
+       /* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+       lr  \reg, [identity]
+       lsr \reg, \reg, 8
+       bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro  GET_CURR_TASK_ON_CPU   reg
+       GET_CPU_ID  \reg
+       ld.as  \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+       GET_CPU_ID  \tmp
+       add2 \tmp, @_current_task, \tmp
+       st   \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+       mov r25, \tsk
+#endif
+
+.endm
+
+
+#else   /* Uniprocessor implementation of macros */
+
+.macro  GET_CURR_TASK_ON_CPU    reg
+       ld  \reg, [@_current_task]
+.endm
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+       st  \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+       mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ *  -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+       add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+       GET_CURR_TASK_ON_CPU  \reg
+       add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif  /* __ASSEMBLY__ */
+
+#endif  /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644 (file)
index 0000000..28abc69
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644 (file)
index 0000000..4dc64dd
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+                                                       \
+       __asm__ __volatile__(                           \
+       "1:     ld  %1, [%2]                    \n"     \
+               insn                            "\n"    \
+       "2:     st  %0, [%2]                    \n"     \
+       "       mov %0, 0                       \n"     \
+       "3:                                     \n"     \
+       "       .section .fixup,\"ax\"          \n"     \
+       "       .align  4                       \n"     \
+       "4:     mov %0, %4                      \n"     \
+       "       b   3b                          \n"     \
+       "       .previous                       \n"     \
+       "       .section __ex_table,\"a\"       \n"     \
+       "       .align  4                       \n"     \
+       "       .word   1b, 4b                  \n"     \
+       "       .word   2b, 4b                  \n"     \
+       "       .previous                       \n"     \
+                                                       \
+       : "=&r" (ret), "=&r" (oldval)                   \
+       : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
+       : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+       int op = (encoded_op >> 28) & 7;
+       int cmp = (encoded_op >> 24) & 15;
+       int oparg = (encoded_op << 8) >> 20;
+       int cmparg = (encoded_op << 20) >> 20;
+       int oldval = 0, ret;
+
+       if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+               oparg = 1 << oparg;
+
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+               return -EFAULT;
+
+       pagefault_disable();    /* implies preempt_disable() */
+
+       switch (op) {
+       case FUTEX_OP_SET:
+               __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+               __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+               __futex_atomic_op("or  %0, %1, %3", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+               __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_XOR:
+               __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       pagefault_enable();     /* subsumes preempt_enable() */
+
+       if (!ret) {
+               switch (cmp) {
+               case FUTEX_OP_CMP_EQ:
+                       ret = (oldval == cmparg);
+                       break;
+               case FUTEX_OP_CMP_NE:
+                       ret = (oldval != cmparg);
+                       break;
+               case FUTEX_OP_CMP_LT:
+                       ret = (oldval < cmparg);
+                       break;
+               case FUTEX_OP_CMP_GE:
+                       ret = (oldval >= cmparg);
+                       break;
+               case FUTEX_OP_CMP_LE:
+                       ret = (oldval <= cmparg);
+                       break;
+               case FUTEX_OP_CMP_GT:
+                       ret = (oldval > cmparg);
+                       break;
+               default:
+                       ret = -ENOSYS;
+               }
+       }
+       return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ *  Notes:
+ *      -Best-Effort: Exchg happens only if compare succeeds.
+ *          If compare fails, returns; leaving retry/looping to upper layers
+ *      -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ *      -Compare fails: return orig value in @addr
+ *      -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+                                       u32 newval)
+{
+       u32 val;
+
+       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+               return -EFAULT;
+
+       pagefault_disable();    /* implies preempt_disable() */
+
+       /* TBD : can use llock/scond */
+       __asm__ __volatile__(
+       "1:     ld    %0, [%3]  \n"
+       "       brne  %0, %1, 3f        \n"
+       "2:     st    %2, [%3]  \n"
+       "3:     \n"
+       "       .section .fixup,\"ax\"  \n"
+       "4:     mov %0, %4      \n"
+       "       b   3b  \n"
+       "       .previous       \n"
+       "       .section __ex_table,\"a\"       \n"
+       "       .align  4       \n"
+       "       .word   1b, 4b  \n"
+       "       .word   2b, 4b  \n"
+       "       .previous\n"
+       : "=&r"(val)
+       : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+       : "cc", "memory");
+
+       pagefault_enable();     /* subsumes preempt_enable() */
+
+       *uval = val;
+       return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644 (file)
index 0000000..473424d
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+                                 unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz)       ioremap(phy, sz)
+#define ioremap_wc(phy, sz)            ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page)             (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+       u8 b;
+
+       __asm__ __volatile__(
+       "       ldb%U1 %0, %1   \n"
+       : "=r" (b)
+       : "m" (*(volatile u8 __force *)addr)
+       : "memory");
+
+       return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+       u16 s;
+
+       __asm__ __volatile__(
+       "       ldw%U1 %0, %1   \n"
+       : "=r" (s)
+       : "m" (*(volatile u16 __force *)addr)
+       : "memory");
+
+       return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+       u32 w;
+
+       __asm__ __volatile__(
+       "       ld%U1 %0, %1    \n"
+       : "=r" (w)
+       : "m" (*(volatile u32 __force *)addr)
+       : "memory");
+
+       return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+       __asm__ __volatile__(
+       "       stb%U1 %0, %1   \n"
+       :
+       : "r" (b), "m" (*(volatile u8 __force *)addr)
+       : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+       __asm__ __volatile__(
+       "       stw%U1 %0, %1   \n"
+       :
+       : "r" (s), "m" (*(volatile u16 __force *)addr)
+       : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+       __asm__ __volatile__(
+       "       st%U1 %0, %1    \n"
+       :
+       : "r" (w), "m" (*(volatile u32 __force *)addr)
+       : "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644 (file)
index 0000000..4c588f9
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS                32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ      3
+#define TIMER1_IRQ      4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644 (file)
index 0000000..ccd8480
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ *  -Remove explicit mov of current status32 into reg, that is not needed
+ *  -Use BIC  insn instead of INVERTED + AND
+ *  -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+       unsigned long temp, flags;
+
+       __asm__ __volatile__(
+       "       lr  %1, [status32]      \n"
+       "       bic %0, %1, %2          \n"
+       "       and.f 0, %1, %2 \n"
+       "       flag.nz %0              \n"
+       : "=r"(temp), "=r"(flags)
+       : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+       : "cc");
+
+       return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+       __asm__ __volatile__(
+       "       flag %0                 \n"
+       :
+       : "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr  %0, [status32]      \n"
+       "       and %0, %0, %1          \n"
+       "       flag %0                 \n"
+       : "=&r"(temp)
+       : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+       unsigned long temp;
+
+       __asm__ __volatile__(
+       "       lr  %0, [status32]      \n"
+       : "=&r"(temp));
+
+       return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+                       | STATUS_E2_MASK
+#endif
+               ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+       return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+       unsigned int ienb;
+
+       ienb = read_aux_reg(AUX_IENABLE);
+       ienb &= ~(1 << irq);
+       write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+       unsigned int ienb;
+
+       ienb = read_aux_reg(AUX_IENABLE);
+       ienb |= (1 << irq);
+       write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE  scratch
+       lr      \scratch, [status32]
+       bic     \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+       flag    \scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE  scratch, save
+       lr      \scratch, [status32]
+       mov     \save, \scratch         /* Make a copy */
+       bic     \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+       flag    \scratch
+.endm
+
+.macro IRQ_ENABLE  scratch
+       lr      \scratch, [status32]
+       or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+       flag    \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* KERNEL */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644 (file)
index 0000000..3fbe6c4
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+       DIE_UNUSED,
+       DIE_TRAP,
+       DIE_IERR,
+       DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644 (file)
index 0000000..f3c4934
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS           39
+
+#define BREAK_INSTR_SIZE       2
+#define CACHE_FLUSH_IS_SAFE    1
+#define NUMREGBYTES            (GDB_MAX_REGS * 4)
+#define BUFMAX                 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+       __asm__ __volatile__ ("trap_s   0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+       _R0             = 0,
+       _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+       _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+       _R25, _R26,
+       _BTA            = 27,
+       _LP_START       = 28,
+       _LP_END         = 29,
+       _LP_COUNT       = 30,
+       _STATUS32       = 31,
+       _BLINK          = 32,
+       _FP             = 33,
+       __SP            = 34,
+       _EFA            = 35,
+       _RET            = 36,
+       _ORIG_R8        = 37,
+       _STOP_PC        = 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644 (file)
index 0000000..4d9c211
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE   8
+#define MAX_STACK_SIZE  64
+
+struct arch_specific_insn {
+       int is_short;
+       kprobe_opcode_t *t1_addr, *t2_addr;
+       kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p)  do {  } while (0)
+
+#define kretprobe_blacklist_size    0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+
+struct prev_kprobe {
+       struct kprobe *kp;
+       unsigned long status;
+};
+
+struct kprobe_ctlblk {
+       unsigned int kprobe_status;
+       struct pt_regs jprobe_saved_regs;
+       char jprobes_stack[MAX_STACK_SIZE];
+       struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+                          struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+                          struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644 (file)
index 0000000..0283e9e
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+       .global \name
+       .align 4
+       \name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name)  .-##name
+       .size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+       .section .data.arcfp
+#else
+       .section .data
+#endif
+       .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+       .section .text.arcfp, "ax",@progbits
+#else
+       .section .text, "ax",@progbits
+#endif
+.endm
+
+#else  /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644 (file)
index 0000000..9998dc8
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ *     Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ *     a multi-platform kernel builds with array of such descriptors.
+ *     We extend the early DT scan to also match the DT's "compatible" string
+ *     against the @dt_compat of all such descriptors, and one with highest
+ *     "DT score" is selected as global @machine_desc.
+ *
+ * @name:              Board/SoC name
+ * @dt_compat:         Array of device tree 'compatible' strings
+ *                     (XXX: although only 1st entry is looked at)
+ * @init_early:                Very early callback [called from setup_arch()]
+ * @init_irq:          setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp:          for each CPU (e.g. setup IPI)
+ *                     [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time:         platform specific clocksource/clockevent registration
+ *                     [called from time_init()]
+ * @init_machine:      arch initcall level callback (e.g. populate static
+ *                     platform devices or parse Devicetree)
+ * @init_late:         Late initcall level callback
+ *
+ */
+struct machine_desc {
+       const char              *name;
+       const char              **dt_compat;
+
+       void                    (*init_early)(void);
+       void                    (*init_irq)(void);
+#ifdef CONFIG_SMP
+       void                    (*init_smp)(unsigned int);
+#endif
+       void                    (*init_time)(void);
+       void                    (*init_machine)(void);
+       void                    (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p)                       \
+       for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+       /* the default machine is the last one linked in */
+       if (__arch_info_end - 1 < __arch_info_begin)
+               return NULL;
+       return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name)                    \
+static const struct machine_desc __mach_desc_##_type   \
+__used                                                 \
+__attribute__((__section__(".arch.info.init"))) = {    \
+       .name           = _name,
+
+#define MACHINE_END                            \
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644 (file)
index 0000000..56b0232
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+       unsigned long asid;     /* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+       struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644 (file)
index 0000000..0d71fb1
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Refactored get_new_mmu_context( ) to only handle live-mm.
+ *   retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ *  -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/*             ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ *  -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ *   time of say switch_mm( )
+ *  -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ *  given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID  0
+#define MAX_ASID    255                        /* 8 bit PID field in PID Aux reg */
+#define NO_ASID     (MAX_ASID + 1)     /* ASID Not alloc to mmu ctxt */
+#define NUM_ASID    ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+       struct mm_struct *prev_owner;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /*
+        * Relinquish the currently owned ASID (if any).
+        * Doing unconditionally saves a cmp-n-branch; for already unused
+        * ASID slot, the value was/remains NULL
+        */
+       asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+       /* move to new ASID */
+       if (++asid_cache > MAX_ASID) {  /* ASID roll-over */
+               asid_cache = FIRST_ASID;
+               flush_tlb_all();
+       }
+
+       /*
+        * Is next ASID already owned by some-one else (we are stealing it).
+        * If so, let the orig owner be aware of this, so when it runs, it
+        * asks for a brand new ASID. This would only happen for a long-lived
+        * task with ASID from prev allocation cycle (before ASID roll-over).
+        *
+        * This might look wrong - if we are re-using some other task's ASID,
+        * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+        * care of such a case: it ensures that task with ASID from prev alloc
+        * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+        * The stealing scenario described here will only happen if that task
+        * didn't get a chance to refresh it's ASID - implying stale entries
+        * won't exist.
+        */
+       prev_owner = asid_mm_map[asid_cache];
+       if (prev_owner)
+               prev_owner->context.asid = NO_ASID;
+
+       /* Assign new ASID to tsk */
+       asid_mm_map[asid_cache] = mm;
+       mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+       pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+              " pid:%u, assigned asid:%lu\n",
+              (unsigned int)mm, (unsigned int)prev_owner,
+              (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+              (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+       write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+       mm->context.tsk = tsk;
+#endif
+       return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+    If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+       /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+       write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+       /*
+        * Get a new ASID if task doesn't have a valid one. Possible when
+        *  -task never had an ASID (fresh after fork)
+        *  -it's ASID was stolen - past an ASID roll-over.
+        *  -There's a third obscure scenario (if this task is running for the
+        *   first time afer an ASID rollover), where despite having a valid
+        *   ASID, we force a get for new ASID - see comments at top.
+        *
+        * Both the non-alloc scenario and first-use-after-rollover can be
+        * detected using the single condition below:  NO_ASID = 256
+        * while asid_cache is always a valid ASID value (0-255).
+        */
+       if (next->context.asid > asid_cache) {
+               get_new_mmu_context(next);
+       } else {
+               /*
+                * XXX: This will never happen given the chks above
+                * BUG_ON(next->context.asid > MAX_ASID);
+                */
+               write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+       }
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       asid_mm_map[mm->context.asid] = NULL;
+       mm->context.asid = NO_ASID;
+
+       local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm)   do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+       write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+       /* Unconditionally get a new ASID */
+       get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644 (file)
index 0000000..518222b
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+       void *unw_info;
+       int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644 (file)
index 0000000..a2f88ff
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644 (file)
index 0000000..bdf5461
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)     free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg)       clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg)  copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+       unsigned long pte;
+} pte_t;
+typedef struct {
+       unsigned long pgd;
+} pgd_t;
+typedef struct {
+       unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x)      ((x).pte)
+#define pgd_val(x)      ((x).pgd)
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) })
+#define __pgd(x)        ((pgd_t) { (x) })
+#define __pgprot(x)     ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x)     (x)
+#define pgd_val(x)     (x)
+#define pgprot_val(x)  (x)
+#define __pte(x)       (x)
+#define __pgprot(x)    (x)
+#define pte_pgprot(x)  (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET     (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)      (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro  __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __va(paddr)  ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr)    \
+       (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_EXEC | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL   1
+
+#include <asm-generic/memory_model.h>   /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644 (file)
index 0000000..115ad96
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644 (file)
index 0000000..36a9f20
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ *  -"/proc/meminfo | grep PageTables" kept on increasing
+ *   Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ *  -Variable pg-sz means that Page Tables could be variable sized themselves
+ *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ *  -Page Table size capped to max 1 to save memory - hence verified.
+ *  -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ *  -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ *  -Switched pgtable_t from being struct page * to unsigned long
+ *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ *       to deal with struct page. Thay way in future we can make it allocate
+ *       multiple PG Tbls in one Page Frame
+ *      =sweet side effect is avoiding calls to ugly page_address( ) from the
+ *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+       pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+       pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+       return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       int num, num2;
+       pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+       if (ret) {
+               num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+               memzero(ret, num * sizeof(pgd_t));
+
+               num2 = VMALLOC_SIZE / PGDIR_SIZE;
+               memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+               memzero(ret + num + num2,
+                              (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+       }
+       return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+       free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+       return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+                                       unsigned long address)
+{
+       pte_t *pte;
+
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+                                        __get_order_pte());
+
+       return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+       pgtable_t pte_pg;
+
+       pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+       if (pte_pg) {
+               memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+               pgtable_page_ctor(virt_to_page(pte_pg));
+       }
+
+       return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+       free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+       pgtable_page_dtor(virt_to_page(ptep));
+       free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache()   do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644 (file)
index 0000000..b7e3668
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ *     They are semantically the same although in different contexts
+ *     VALID marks a TLB entry exists and it will only happen if PRESENT
+ *  - Utilise some unused free bits to confine PTE flags to 12 bits
+ *     This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ *  -TLB Locking never really existed, except for initial specs
+ *  -SILENT_xxx not needed for our port
+ *  -Per my request, MMU V3 changes the layout of some of the bits
+ *     to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ *  -Switched form 8:11:13 split for page table lookup to 11:8:13
+ *  -this speeds up page table allocation itself as we now have to memset 1K
+ *    instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ *    need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ *      seperate PD0 and PD1, which combined forms a translation entry)
+ *      while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ *      (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED      (1<<1)     /* Page is accessed (S) */
+#define _PAGE_CACHEABLE     (1<<2)     /* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<3)     /* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<4)     /* Page has user write perm (H) */
+#define _PAGE_READ          (1<<5)     /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE     (1<<6)     /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE       (1<<7)     /* Page has kernel write perm (H) */
+#define _PAGE_K_READ        (1<<8)     /* Page has kernel perm (H) */
+#define _PAGE_GLOBAL        (1<<9)     /* Page is global (H) */
+#define _PAGE_MODIFIED      (1<<10)    /* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<10)    /* page cache/ swap (S) */
+#define _PAGE_PRESENT       (1<<11)    /* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE     (1<<0)     /* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<1)     /* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<2)     /* Page has user write perm (H) */
+#define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE     (1<<4)     /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE       (1<<5)     /* Page has kernel write perm (H) */
+#define _PAGE_K_READ        (1<<6)     /* Page has kernel perm (H) */
+#define _PAGE_ACCESSED      (1<<7)     /* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL        (1<<8)     /* Page is global (H) */
+#define _PAGE_PRESENT       (1<<9)     /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE   (1<<10)    /* Shared Code page with cmn vaddr
+                                          usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED      (1<<11)    /* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<12)    /* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31)    /* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE     __pgprot(___DEF)
+#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+                                                      _PAGE_EXECUTE)
+
+#define PAGE_SHARED    PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL          __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+                                                    _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ *       which directly corresponds to  PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ *  1. Although ARC700 can do exclusive execute/write protection (meaning R
+ *     can be tracked independet of X/W unlike some other CPUs), still to
+ *     keep things consistent with other archs:
+ *      -Write implies Read:   W => R
+ *      -Execute implies Read: X => R
+ *
+ *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ *     This is to enable COW mechanism
+ */
+       /* xwr */
+#define __P000  PAGE_U_NONE
+#define __P001  PAGE_U_R
+#define __P010  PAGE_U_R       /* Pvt-W => !W */
+#define __P011  PAGE_U_R       /* Pvt-W => !W */
+#define __P100  PAGE_U_X_R     /* X => R */
+#define __P101  PAGE_U_X_R
+#define __P110  PAGE_U_X_R     /* Pvt-W => !W and X => R */
+#define __P111  PAGE_U_X_R     /* Pvt-W => !W */
+
+#define __S000  PAGE_U_NONE
+#define __S001  PAGE_U_R
+#define __S010  PAGE_U_W_R     /* W => R */
+#define __S011  PAGE_U_W_R
+#define __S100  PAGE_U_X_R     /* X => R */
+#define __S101  PAGE_U_X_R
+#define __S110  PAGE_U_X_W_R   /* X => R */
+#define __S111  PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ *                     32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
+ * -------------------------------------------------------
+ *       |                  |                |
+ *       |                  |                --> off in page frame
+ *       |                 |
+ *       |                  ---> index into Page Table
+ *       |
+ *       ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE   PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE   8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE   8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE   9
+#endif
+
+#define BITS_FOR_PGD   (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT    (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)    /* vaddr span, not PDG sz */
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define        PTRS_PER_PTE    (1 << BITS_FOR_PTE)
+#define        PTRS_PER_PGD    (1 << BITS_FOR_PGD)
+#else
+#define        PTRS_PER_PTE    (1UL << BITS_FOR_PTE)
+#define        PTRS_PER_PGD    (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define        USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS      0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+       pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+       pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr)       (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte)         do { } while (0)
+#define pte_unmap_nested(pte)          do { } while (0)
+
+#define set_pte(pteptr, pteval)        ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval)        (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd)          virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd)    (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+       pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x)                    (!pte_val(x))
+#define pte_present(x)                 (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep)      set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x)                    (!pmd_val(x))
+#define        pmd_bad(x)                      ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x)                 (pmd_val(x))
+#define pmd_clear(xp)                  do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+               (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot)                                           \
+({                                                                     \
+       pte_t pte;                                                      \
+       pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot);   \
+       pte;                                                            \
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS      30
+#define pgoff_to_pte(x)         __pte(x)
+#define pte_to_pgoff(x)                (pte_val(x) >> 2)
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)     (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr)      (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+                                        __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr)           pte_offset(dir, addr)
+#define pte_offset_map(dir, addr)              pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte)          (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte)         (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte)         (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte)         (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte)       (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+       static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect,        &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite,  |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean,  &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty,  |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold,    &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung,  |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect,        &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec,   |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep, pte_t pteval)
+{
+       set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address)  pgd_offset(&init_mm, address)
+#define pgd_index(addr)                ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr)   (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr)      \
+({                                     \
+       pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
+       pgd_base + pgd_index(addr);     \
+})
+#else
+#define pgd_offset_fast(mm, addr)      pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+                     pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+                                       ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike)      (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike)    ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr)  (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+                       remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644 (file)
index 0000000..5f26b2c
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ *  -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h>       /* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+       unsigned long ksp;      /* kernel mode stack pointer */
+       unsigned long callee_reg;       /* pointer to callee regs */
+       unsigned long fault_address;    /* dbls as brkpt holder as well */
+       unsigned long cause_code;       /* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+       unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+       struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD  {                          \
+       .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+       ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)    do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax()    __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax()    do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm)      do { } while (0)
+#define release_segments(mm)        do { } while (0)
+
+#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk)   (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk)    (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp)                         \
+do {                                                           \
+       set_fs(USER_DS); /* reads from user space */            \
+       (_regs)->ret = (_pc);                                   \
+       /* Interrupts enabled in User Mode */                   \
+       (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK       \
+               | STATUS_E1_MASK | STATUS_E2_MASK;              \
+       (_regs)->sp = (_usp);                                   \
+       /* bogus seed values for debugging */                   \
+       (_regs)->lp_start = 0x10;                               \
+       (_regs)->lp_end = 0x80;                                 \
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE   (0x10000000)    /* 256M */
+#define VMALLOC_START  (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END    (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER    0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE      (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP       TASK_SIZE
+#define STACK_TOP_MAX   STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE      (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644 (file)
index 0000000..692d0d0
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644 (file)
index 0000000..8ae783d
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+       /*
+        * 1 word gutter after reg-file has been saved
+        * Technically not needed, Since SP always points to a "full" location
+        * (vs. "empty"). But pt_regs is shared with tools....
+        */
+       long res;
+
+       /* Real registers */
+       long bta;       /* bta_l1, bta_l2, erbta */
+       long lp_start;
+       long lp_end;
+       long lp_count;
+       long status32;  /* status32_l1, status32_l2, erstatus */
+       long ret;       /* ilink1, ilink2 or eret */
+       long blink;
+       long fp;
+       long r26;       /* gp */
+       long r12;
+       long r11;
+       long r10;
+       long r9;
+       long r8;
+       long r7;
+       long r6;
+       long r5;
+       long r4;
+       long r3;
+       long r2;
+       long r1;
+       long r0;
+       long sp;        /* user/kernel sp depending on where we came from  */
+       long orig_r0;
+
+       /*to distinguish bet excp, syscall, irq */
+       union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               /* so that assembly code is same for LE/BE */
+               unsigned long orig_r8:16, event:16;
+#else
+               unsigned long event:16, orig_r8:16;
+#endif
+               long orig_r8_word;
+       };
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+       long res;       /* Again this is not needed */
+       long r25;
+       long r24;
+       long r23;
+       long r22;
+       long r21;
+       long r20;
+       long r19;
+       long r18;
+       long r17;
+       long r16;
+       long r15;
+       long r14;
+       long r13;
+};
+
+#define instruction_pointer(regs)      ((regs)->ret)
+#define profile_pc(regs)               instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({  unsigned int sp;           \
+       if (user_mode(regs))    \
+               sp = (regs)->sp;\
+       else                    \
+               sp = -1;        \
+       sp;                     \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs)    (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event &  orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs()                                      \
+({                                                             \
+       /* open-coded current_thread_info() */                  \
+       register unsigned long sp asm ("sp");                   \
+       unsigned long pg_start = (sp & ~(THREAD_SIZE - 1));     \
+       (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1;     \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL               0x0001
+#define orig_r8_IS_SCALL_RESTARTED     0x0002
+#define orig_r8_IS_BRKPT               0x0004
+#define orig_r8_IS_EXCPN               0x0004
+#define orig_r8_IS_IRQ1                        0x0010
+#define orig_r8_IS_IRQ2                        0x0020
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644 (file)
index 0000000..6fc1159
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644 (file)
index 0000000..da2c459
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS              MAKE_MM_SEG(0)
+#define USER_DS                        MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b)       ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644 (file)
index 0000000..4dff5a1
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early-8250 requires BASE_BAUD to be defined and includes this header.
+ * We put in a typical value:
+ *     (core clk / 16) - i.e. UART samples 16 times per sec.
+ * Athough in multi-platform-image this might not work, specially if the
+ * clk driving the UART is different.
+ * We can't use DeviceTree as this is typically for early serial.
+ */
+
+#include <asm/clk.h>
+
+#define BASE_BAUD      (arc_get_core_freq() / 16)
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644 (file)
index 0000000..229e506
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+       int id;
+       const char *str;
+};
+
+struct cpuinfo_data {
+       struct id_to_str info;
+       int up_range;
+};
+
+extern int root_mountflags, end_mem;
+extern int running_on_hw;
+
+void __init setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644 (file)
index 0000000..c4fb211
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void __init first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ *     Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info:              SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick:          For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send:          To send IPI to a @cpumask
+ * @ips_clear:         To clear IPI received by @cpu at @irq
+ */
+struct plat_smp_ops {
+       const char      *info;
+       void            (*cpu_kick)(int cpu, unsigned long pc);
+       void            (*ipi_send)(void *callmap);
+       void            (*ipi_clear)(int cpu, int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops  plat_smp_ops;
+
+#endif  /* CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ *     support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ *     gaurantted by the platform (not something which core handles).
+ *     Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ *     disabling for atomicity.
+ *
+ *     However exported spinlock API is not usable due to cyclic hdr deps
+ *     (even after system.h disintegration upstream)
+ *     asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ *             -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ *     So the workaround is to use the lowest level arch spinlock API.
+ *     The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ *     but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do {            \
+       local_irq_save(flags);                  \
+       arch_spin_lock(&smp_atomic_ops_lock);   \
+} while (0)
+
+#define atomic_ops_unlock(flags) do {          \
+       arch_spin_unlock(&smp_atomic_ops_lock); \
+       local_irq_restore(flags);               \
+} while (0)
+
+#define bitops_lock(flags)     do {            \
+       local_irq_save(flags);                  \
+       arch_spin_lock(&smp_bitops_lock);       \
+} while (0)
+
+#define bitops_unlock(flags) do {              \
+       arch_spin_unlock(&smp_bitops_lock);     \
+       local_irq_restore(flags);               \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags)         local_irq_save(flags)
+#define atomic_ops_unlock(flags)       local_irq_restore(flags)
+
+#define bitops_lock(flags)             local_irq_save(flags)
+#define bitops_unlock(flags)           local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644 (file)
index 0000000..f158197
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags)      arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+       do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+       __asm__ __volatile__(
+       "1:     ex  %0, [%1]            \n"
+       "       breq  %0, %2, 1b        \n"
+       : "+&r" (tmp)
+       : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+       __asm__ __volatile__(
+       "1:     ex  %0, [%1]            \n"
+       : "+r" (tmp)
+       : "r"(&(lock->slock))
+       : "memory");
+
+       return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+       smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x)  ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       int ret = 0;
+
+       arch_spin_lock(&(rw->lock_mutex));
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        */
+       if (rw->counter > 0) {
+               rw->counter--;
+               ret = 1;
+       }
+
+       arch_spin_unlock(&(rw->lock_mutex));
+
+       smp_mb();
+       return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       int ret = 0;
+
+       arch_spin_lock(&(rw->lock_mutex));
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        */
+       if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+               rw->counter = 0;
+               ret = 1;
+       }
+       arch_spin_unlock(&(rw->lock_mutex));
+
+       return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       while (!arch_read_trylock(rw))
+               cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       while (!arch_write_trylock(rw))
+               cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       arch_spin_lock(&(rw->lock_mutex));
+       rw->counter++;
+       arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       arch_spin_lock(&(rw->lock_mutex));
+       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+       arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags)      arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags)     arch_write_lock(lock)
+
+#define arch_spin_relax(lock)  cpu_relax()
+#define arch_read_relax(lock)  cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644 (file)
index 0000000..8276bfd
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+       volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__    0
+#define __ARCH_SPIN_LOCK_LOCKED__      1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED      { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED                { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked:     0x01_00_00_00
+ * Read lock(s): 0x00_FF_00_00 to say 0x01
+ * Write lock:   0x0, but only possible if prior value "unlocked" 0x0100_0000
+ */
+typedef struct {
+       volatile unsigned int   counter;
+       arch_spinlock_t         lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__      0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED                { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644 (file)
index 0000000..87676c8
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -We had half-optimised memset/memcpy, got better versions of those
+ *  -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644 (file)
index 0000000..1b171ab
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n)     fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last)    \
+do {                                   \
+       ARC_FPU_PREV(prev, next);       \
+       last = __switch_to(prev, next);\
+       ARC_FPU_NEXT(next);             \
+       mb();                           \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..33ab304
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H  1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>                /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+       if (user_mode(regs) && in_syscall(regs))
+               return regs->orig_r8;
+       else
+               return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+       /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
+       regs->r8 = regs->orig_r8;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+       /* 0 if syscall succeeded, otherwise -Errorcode */
+       return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+       return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+                        int error, long val)
+{
+       regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i:      argument index [0,5]
+ * @n:      number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+                     unsigned int i, unsigned int n, unsigned long *args)
+{
+       unsigned long *inside_ptregs = &(regs->r0);
+       inside_ptregs -= i;
+
+       BUG_ON((i + n) > 6);
+
+       while (n--) {
+               args[i++] = (*inside_ptregs);
+               inside_ptregs--;
+       }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644 (file)
index 0000000..e53a534
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H  1
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_fork_wrapper(void);
+int sys_vfork_wrapper(void);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644 (file)
index 0000000..2d50a4c
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ *  No need for ARC specific thread_info allocator (kmalloc/free). This is
+ *  anyways one page allocation, thus slab alloc can be short-circuited and
+ *  the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE     (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ *   must also be changed
+ */
+struct thread_info {
+       unsigned long flags;            /* low level flags */
+       int preempt_count;              /* 0 => preemptable, <0 => BUG */
+       struct task_struct *task;       /* main task structure */
+       mm_segment_t addr_limit;        /* thread address space */
+       struct exec_domain *exec_domain;/* execution domain */
+       __u32 cpu;                      /* current CPU */
+       unsigned long thr_ptr;          /* TLS ptr */
+       struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .task       = &tsk,                     \
+       .exec_domain    = &default_exec_domain, \
+       .flags      = 0,                        \
+       .cpu        = 0,                        \
+       .preempt_count  = INIT_PREEMPT_COUNT,   \
+       .addr_limit = KERNEL_DS,                \
+       .restart_block  = {                     \
+               .fn = do_no_restart_syscall,    \
+       },                                      \
+}
+
+#define init_thread_info    (init_thread_union.thread_info)
+#define init_stack          (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+       register unsigned long sp asm("sp");
+       return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE      0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK    0       /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME      1       /* resumption notification requested */
+#define TIF_SIGPENDING         2       /* signal pending */
+#define TIF_NEED_RESCHED       3       /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT      4       /* syscall auditing active */
+#define TIF_SYSCALL_TRACE      15      /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE             16
+
+#define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE            (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+                                _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644 (file)
index 0000000..0a82960
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE        80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644 (file)
index 0000000..a5ff961
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+
+#include <asm/tlb.h>
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+;   and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+;    didn't want to change setup.c
+;    hence extra instruction to clean
+;
+; -- should be in cache since in same line
+;    as r0/r1 saves above
+;
+ld  r0,[jh_ex_way_sel]  ; victim pointer
+and r0,r0,1         ; clean
+xor.f   r0,r0,1         ; flip
+st  r0,[jh_ex_way_sel]  ; store back
+asr r0,r1,12        ; get set # <<1, note bit 12=R=0
+or.nz   r0,r0,1         ; set way bit
+and r0,r0,0xff      ; clean
+sr  r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+;  Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+;  Slower in thrash case (where it matters) because more code is executed
+;  Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr      r0,[eret]               /* instruction address */
+xor     r0,r0,r1                /* compare set #       */
+and.f   r0,r0,0x000fe000        /* 2-way MMU mask      */
+bne     88f                     /* not in same set - no need to probe */
+
+lr      r0,[eret]               /* instruction address */
+and     r0,r0,PAGE_MASK         /* VPN of instruction address */
+; lr  r1,[ARC_REG_TLBPD0]     /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and     r1,r1,0xff              /* Data ASID */
+or      r0,r0,r1                /* Instruction address + Data ASID */
+
+lr      r1,[ARC_REG_TLBPD0]     /* save TLBPD0 containing data TLB*/
+sr      r0,[ARC_REG_TLBPD0]     /* write instruction address to TLBPD0 */
+sr      TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr      r0,[ARC_REG_TLBINDEX]   /* r0 = index where instruction is, if at all */
+sr      r1,[ARC_REG_TLBPD0]     /* restore TLBPD0 */
+
+xor     r0,r0,1                 /* flip bottom bit of data index */
+b.d     89f
+sr      r0,[ARC_REG_TLBINDEX]   /* and put it back */
+88:
+sr  TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s   r3, r1                  ; save PD0 prepared by TLB_RELOAD in r3
+lr      r0,[eret]               /* instruction address */
+and     r0,r0,PAGE_MASK         /* VPN of instruction address */
+bmsk    r1,r3,7                 /* Data ASID, bits 7-0 */
+or_s    r0,r0,r1                /* Instruction address + Data ASID */
+
+sr      r0,[ARC_REG_TLBPD0]     /* write instruction address to TLBPD0 */
+sr      TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr      r0,[ARC_REG_TLBINDEX]   /* r0 = index where instruction is, if at all */
+sr      r3,[ARC_REG_TLBPD0]     /* restore TLBPD0 */
+
+sr      TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr      r1,[ARC_REG_TLBINDEX]   /* r1 = index where MMU wants to put data */
+cmp     r0,r1                   /* if no match on indices, go around */
+xor.eq  r1,r1,1                 /* flip bottom bit of data index */
+sr      r1,[ARC_REG_TLBINDEX]   /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644 (file)
index 0000000..3eb2ce0
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#ifdef __KERNEL__
+
+#include <asm/pgtable.h>
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0        (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE | \
+                        _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+                        _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifndef __ASSEMBLY__
+
+#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
+ *    as we don't support aliasing configs in our VIPT D$.
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
+ *    albiet for difft reasons - its better handled by moving to new ASID
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#define tlb_start_vma(tlb, vma)
+#define tlb_end_vma(tlb, vma)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void __init read_decode_mmu_bcr(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644 (file)
index 0000000..b2f9bc7
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+                          unsigned long start, unsigned long end);
+
+/* XXX: Revisit for SMP */
+#define flush_tlb_range(vma, s, e)     local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page)      local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e)   local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all()                        local_flush_tlb_all()
+#define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
+
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644 (file)
index 0000000..3242082
--- /dev/null
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ *    -__clear_user( ) called multiple times during elf load was byte loop
+ *    converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ *    -Hand crafted constant propagation for "constant" copy sizes
+ *    -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ *    -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ *    -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ *    -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h>      /* for generic string functions */
+
+
+#define __kernel_ok            (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ *     (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ *     (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majorit yof cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz)    (((sz) <= TASK_SIZE) && \
+                                (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz)  (unlikely(__kernel_ok) || \
+                                likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k)                                        \
+({                                                             \
+       long __ret = 0; /* success by default */        \
+       switch (sz) {                                           \
+       case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break;       \
+       case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break;       \
+       case 4: __arc_get_user_one(*(k), u, "ld", __ret);  break;       \
+       case 8: __arc_get_user_one_64(*(k), u, __ret);     break;       \
+       }                                                       \
+       __ret;                                                  \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret)  \
+       __asm__ __volatile__(                   \
+       "1:     "op"    %1,[%2]\n"              \
+       "2:     ;nop\n"                         \
+       "       .section .fixup, \"ax\"\n"      \
+       "       .align 4\n"                     \
+       "3:     mov %0, %3\n"                   \
+       "       j   2b\n"                       \
+       "       .previous\n"                    \
+       "       .section __ex_table, \"a\"\n"   \
+       "       .align 4\n"                     \
+       "       .word 1b,3b\n"                  \
+       "       .previous\n"                    \
+                                               \
+       : "+r" (ret), "=r" (dst)                \
+       : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret)   \
+       __asm__ __volatile__(                   \
+       "1:     ld   %1,[%2]\n"                 \
+       "4:     ld  %R1,[%2, 4]\n"              \
+       "2:     ;nop\n"                         \
+       "       .section .fixup, \"ax\"\n"      \
+       "       .align 4\n"                     \
+       "3:     mov %0, %3\n"                   \
+       "       j   2b\n"                       \
+       "       .previous\n"                    \
+       "       .section __ex_table, \"a\"\n"   \
+       "       .align 4\n"                     \
+       "       .word 1b,3b\n"                  \
+       "       .word 4b,3b\n"                  \
+       "       .previous\n"                    \
+                                               \
+       : "+r" (ret), "=r" (dst)                \
+       : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k)                                        \
+({                                                             \
+       long __ret = 0; /* success by default */        \
+       switch (sz) {                                           \
+       case 1: __arc_put_user_one(*(k), u, "stb", __ret); break;       \
+       case 2: __arc_put_user_one(*(k), u, "stw", __ret); break;       \
+       case 4: __arc_put_user_one(*(k), u, "st", __ret);  break;       \
+       case 8: __arc_put_user_one_64(*(k), u, __ret);     break;       \
+       }                                                       \
+       __ret;                                                  \
+})
+
+#define __arc_put_user_one(src, dst, op, ret)  \
+       __asm__ __volatile__(                   \
+       "1:     "op"    %1,[%2]\n"              \
+       "2:     ;nop\n"                         \
+       "       .section .fixup, \"ax\"\n"      \
+       "       .align 4\n"                     \
+       "3:     mov %0, %3\n"                   \
+       "       j   2b\n"                       \
+       "       .previous\n"                    \
+       "       .section __ex_table, \"a\"\n"   \
+       "       .align 4\n"                     \
+       "       .word 1b,3b\n"                  \
+       "       .previous\n"                    \
+                                               \
+       : "+r" (ret)                            \
+       : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret)   \
+       __asm__ __volatile__(                   \
+       "1:     st   %1,[%2]\n"                 \
+       "4:     st  %R1,[%2, 4]\n"              \
+       "2:     ;nop\n"                         \
+       "       .section .fixup, \"ax\"\n"      \
+       "       .align 4\n"                     \
+       "3:     mov %0, %3\n"                   \
+       "       j   2b\n"                       \
+       "       .previous\n"                    \
+       "       .section __ex_table, \"a\"\n"   \
+       "       .align 4\n"                     \
+       "       .word 1b,3b\n"                  \
+       "       .word 4b,3b\n"                  \
+       "       .previous\n"                    \
+                                               \
+       : "+r" (ret)                            \
+       : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       long res = 0;
+       char val;
+       unsigned long tmp1, tmp2, tmp3, tmp4;
+       unsigned long orig_n = n;
+
+       if (n == 0)
+               return 0;
+
+       /* unaligned */
+       if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+               unsigned char tmp;
+
+               __asm__ __volatile__ (
+               "       mov.f   lp_count, %0            \n"
+               "       lpnz 2f                         \n"
+               "1:     ldb.ab  %1, [%3, 1]             \n"
+               "       stb.ab  %1, [%2, 1]             \n"
+               "       sub     %0,%0,1                 \n"
+               "2:     ;nop                            \n"
+               "       .section .fixup, \"ax\"         \n"
+               "       .align 4                        \n"
+               "3:     j   2b                          \n"
+               "       .previous                       \n"
+               "       .section __ex_table, \"a\"      \n"
+               "       .align 4                        \n"
+               "       .word   1b, 3b                  \n"
+               "       .previous                       \n"
+
+               : "+r" (n),
+               /*
+                * Note as an '&' earlyclobber operand to make sure the
+                * temporary register inside the loop is not the same as
+                *  FROM or TO.
+               */
+                 "=&r" (tmp), "+r" (to), "+r" (from)
+               :
+               : "lp_count", "lp_start", "lp_end", "memory");
+
+               return n;
+       }
+
+       /*
+        * Hand-crafted constant propagation to reduce code sz of the
+        * laddered copy 16x,8,4,2,1
+        */
+       if (__builtin_constant_p(orig_n)) {
+               res = orig_n;
+
+               if (orig_n / 16) {
+                       orig_n = orig_n % 16;
+
+                       __asm__ __volatile__(
+                       "       lsr   lp_count, %7,4            \n"
+                       "       lp    3f                        \n"
+                       "1:     ld.ab   %3, [%2, 4]             \n"
+                       "11:    ld.ab   %4, [%2, 4]             \n"
+                       "12:    ld.ab   %5, [%2, 4]             \n"
+                       "13:    ld.ab   %6, [%2, 4]             \n"
+                       "       st.ab   %3, [%1, 4]             \n"
+                       "       st.ab   %4, [%1, 4]             \n"
+                       "       st.ab   %5, [%1, 4]             \n"
+                       "       st.ab   %6, [%1, 4]             \n"
+                       "       sub     %0,%0,16                \n"
+                       "3:     ;nop                            \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   3b                          \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   1b, 4b                  \n"
+                       "       .word   11b,4b                  \n"
+                       "       .word   12b,4b                  \n"
+                       "       .word   13b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from),
+                         "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+                       : "ir"(n)
+                       : "lp_count", "memory");
+               }
+               if (orig_n / 8) {
+                       orig_n = orig_n % 8;
+
+                       __asm__ __volatile__(
+                       "14:    ld.ab   %3, [%2,4]              \n"
+                       "15:    ld.ab   %4, [%2,4]              \n"
+                       "       st.ab   %3, [%1,4]              \n"
+                       "       st.ab   %4, [%1,4]              \n"
+                       "       sub     %0,%0,8                 \n"
+                       "31:    ;nop                            \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   31b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   14b,4b                  \n"
+                       "       .word   15b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from),
+                         "=r"(tmp1), "=r"(tmp2)
+                       :
+                       : "memory");
+               }
+               if (orig_n / 4) {
+                       orig_n = orig_n % 4;
+
+                       __asm__ __volatile__(
+                       "16:    ld.ab   %3, [%2,4]              \n"
+                       "       st.ab   %3, [%1,4]              \n"
+                       "       sub     %0,%0,4                 \n"
+                       "32:    ;nop                            \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   32b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   16b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+               if (orig_n / 2) {
+                       orig_n = orig_n % 2;
+
+                       __asm__ __volatile__(
+                       "17:    ldw.ab   %3, [%2,2]             \n"
+                       "       stw.ab   %3, [%1,2]             \n"
+                       "       sub      %0,%0,2                \n"
+                       "33:    ;nop                            \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   33b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   17b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+               if (orig_n & 1) {
+                       __asm__ __volatile__(
+                       "18:    ldb.ab   %3, [%2,2]             \n"
+                       "       stb.ab   %3, [%1,2]             \n"
+                       "       sub      %0,%0,1                \n"
+                       "34:    ; nop                           \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   34b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   18b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+       } else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+               __asm__ __volatile__(
+               "       mov %0,%3                       \n"
+               "       lsr.f   lp_count, %3,4          \n"  /* 16x bytes */
+               "       lpnz    3f                      \n"
+               "1:     ld.ab   %5, [%2, 4]             \n"
+               "11:    ld.ab   %6, [%2, 4]             \n"
+               "12:    ld.ab   %7, [%2, 4]             \n"
+               "13:    ld.ab   %8, [%2, 4]             \n"
+               "       st.ab   %5, [%1, 4]             \n"
+               "       st.ab   %6, [%1, 4]             \n"
+               "       st.ab   %7, [%1, 4]             \n"
+               "       st.ab   %8, [%1, 4]             \n"
+               "       sub     %0,%0,16                \n"
+               "3:     and.f   %3,%3,0xf               \n"  /* stragglers */
+               "       bz      34f                     \n"
+               "       bbit0   %3,3,31f                \n"  /* 8 bytes left */
+               "14:    ld.ab   %5, [%2,4]              \n"
+               "15:    ld.ab   %6, [%2,4]              \n"
+               "       st.ab   %5, [%1,4]              \n"
+               "       st.ab   %6, [%1,4]              \n"
+               "       sub.f   %0,%0,8                 \n"
+               "31:    bbit0   %3,2,32f                \n"  /* 4 bytes left */
+               "16:    ld.ab   %5, [%2,4]              \n"
+               "       st.ab   %5, [%1,4]              \n"
+               "       sub.f   %0,%0,4                 \n"
+               "32:    bbit0   %3,1,33f                \n"  /* 2 bytes left */
+               "17:    ldw.ab  %5, [%2,2]              \n"
+               "       stw.ab  %5, [%1,2]              \n"
+               "       sub.f   %0,%0,2                 \n"
+               "33:    bbit0   %3,0,34f                \n"
+               "18:    ldb.ab  %5, [%2,1]              \n"  /* 1 byte left */
+               "       stb.ab  %5, [%1,1]              \n"
+               "       sub.f   %0,%0,1                 \n"
+               "34:    ;nop                            \n"
+               "       .section .fixup, \"ax\"         \n"
+               "       .align 4                        \n"
+               "4:     j   34b                         \n"
+               "       .previous                       \n"
+               "       .section __ex_table, \"a\"      \n"
+               "       .align 4                        \n"
+               "       .word   1b, 4b                  \n"
+               "       .word   11b,4b                  \n"
+               "       .word   12b,4b                  \n"
+               "       .word   13b,4b                  \n"
+               "       .word   14b,4b                  \n"
+               "       .word   15b,4b                  \n"
+               "       .word   16b,4b                  \n"
+               "       .word   17b,4b                  \n"
+               "       .word   18b,4b                  \n"
+               "       .previous                       \n"
+               : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+                 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+               :
+               : "lp_count", "memory");
+       }
+
+       return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+                                          unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       long res = 0;
+       char val;
+       unsigned long tmp1, tmp2, tmp3, tmp4;
+       unsigned long orig_n = n;
+
+       if (n == 0)
+               return 0;
+
+       /* unaligned */
+       if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+               unsigned char tmp;
+
+               __asm__ __volatile__(
+               "       mov.f   lp_count, %0            \n"
+               "       lpnz 3f                         \n"
+               "       ldb.ab  %1, [%3, 1]             \n"
+               "1:     stb.ab  %1, [%2, 1]             \n"
+               "       sub     %0, %0, 1               \n"
+               "3:     ;nop                            \n"
+               "       .section .fixup, \"ax\"         \n"
+               "       .align 4                        \n"
+               "4:     j   3b                          \n"
+               "       .previous                       \n"
+               "       .section __ex_table, \"a\"      \n"
+               "       .align 4                        \n"
+               "       .word   1b, 4b                  \n"
+               "       .previous                       \n"
+
+               : "+r" (n),
+               /* Note as an '&' earlyclobber operand to make sure the
+                * temporary register inside the loop is not the same as
+                * FROM or TO.
+                */
+                 "=&r" (tmp), "+r" (to), "+r" (from)
+               :
+               : "lp_count", "lp_start", "lp_end", "memory");
+
+               return n;
+       }
+
+       if (__builtin_constant_p(orig_n)) {
+               res = orig_n;
+
+               if (orig_n / 16) {
+                       orig_n = orig_n % 16;
+
+                       __asm__ __volatile__(
+                       "       lsr lp_count, %7,4              \n"
+                       "       lp  3f                          \n"
+                       "       ld.ab %3, [%2, 4]               \n"
+                       "       ld.ab %4, [%2, 4]               \n"
+                       "       ld.ab %5, [%2, 4]               \n"
+                       "       ld.ab %6, [%2, 4]               \n"
+                       "1:     st.ab %3, [%1, 4]               \n"
+                       "11:    st.ab %4, [%1, 4]               \n"
+                       "12:    st.ab %5, [%1, 4]               \n"
+                       "13:    st.ab %6, [%1, 4]               \n"
+                       "       sub   %0, %0, 16                \n"
+                       "3:;nop                                 \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   3b                          \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   1b, 4b                  \n"
+                       "       .word   11b,4b                  \n"
+                       "       .word   12b,4b                  \n"
+                       "       .word   13b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from),
+                         "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+                       : "ir"(n)
+                       : "lp_count", "memory");
+               }
+               if (orig_n / 8) {
+                       orig_n = orig_n % 8;
+
+                       __asm__ __volatile__(
+                       "       ld.ab   %3, [%2,4]              \n"
+                       "       ld.ab   %4, [%2,4]              \n"
+                       "14:    st.ab   %3, [%1,4]              \n"
+                       "15:    st.ab   %4, [%1,4]              \n"
+                       "       sub     %0, %0, 8               \n"
+                       "31:;nop                                \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   31b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   14b,4b                  \n"
+                       "       .word   15b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from),
+                         "=r"(tmp1), "=r"(tmp2)
+                       :
+                       : "memory");
+               }
+               if (orig_n / 4) {
+                       orig_n = orig_n % 4;
+
+                       __asm__ __volatile__(
+                       "       ld.ab   %3, [%2,4]              \n"
+                       "16:    st.ab   %3, [%1,4]              \n"
+                       "       sub     %0, %0, 4               \n"
+                       "32:;nop                                \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   32b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   16b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+               if (orig_n / 2) {
+                       orig_n = orig_n % 2;
+
+                       __asm__ __volatile__(
+                       "       ldw.ab    %3, [%2,2]            \n"
+                       "17:    stw.ab    %3, [%1,2]            \n"
+                       "       sub       %0, %0, 2             \n"
+                       "33:;nop                                \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   33b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   17b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+               if (orig_n & 1) {
+                       __asm__ __volatile__(
+                       "       ldb.ab  %3, [%2,1]              \n"
+                       "18:    stb.ab  %3, [%1,1]              \n"
+                       "       sub     %0, %0, 1               \n"
+                       "34:    ;nop                            \n"
+                       "       .section .fixup, \"ax\"         \n"
+                       "       .align 4                        \n"
+                       "4:     j   34b                         \n"
+                       "       .previous                       \n"
+                       "       .section __ex_table, \"a\"      \n"
+                       "       .align 4                        \n"
+                       "       .word   18b,4b                  \n"
+                       "       .previous                       \n"
+                       : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+                       :
+                       : "memory");
+               }
+       } else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+               __asm__ __volatile__(
+               "       mov   %0,%3                     \n"
+               "       lsr.f lp_count, %3,4            \n"  /* 16x bytes */
+               "       lpnz  3f                        \n"
+               "       ld.ab %5, [%2, 4]               \n"
+               "       ld.ab %6, [%2, 4]               \n"
+               "       ld.ab %7, [%2, 4]               \n"
+               "       ld.ab %8, [%2, 4]               \n"
+               "1:     st.ab %5, [%1, 4]               \n"
+               "11:    st.ab %6, [%1, 4]               \n"
+               "12:    st.ab %7, [%1, 4]               \n"
+               "13:    st.ab %8, [%1, 4]               \n"
+               "       sub   %0, %0, 16                \n"
+               "3:     and.f %3,%3,0xf                 \n" /* stragglers */
+               "       bz 34f                          \n"
+               "       bbit0   %3,3,31f                \n" /* 8 bytes left */
+               "       ld.ab   %5, [%2,4]              \n"
+               "       ld.ab   %6, [%2,4]              \n"
+               "14:    st.ab   %5, [%1,4]              \n"
+               "15:    st.ab   %6, [%1,4]              \n"
+               "       sub.f   %0, %0, 8               \n"
+               "31:    bbit0   %3,2,32f                \n"  /* 4 bytes left */
+               "       ld.ab   %5, [%2,4]              \n"
+               "16:    st.ab   %5, [%1,4]              \n"
+               "       sub.f   %0, %0, 4               \n"
+               "32:    bbit0 %3,1,33f                  \n"  /* 2 bytes left */
+               "       ldw.ab    %5, [%2,2]            \n"
+               "17:    stw.ab    %5, [%1,2]            \n"
+               "       sub.f %0, %0, 2                 \n"
+               "33:    bbit0 %3,0,34f                  \n"
+               "       ldb.ab    %5, [%2,1]            \n"  /* 1 byte left */
+               "18:    stb.ab  %5, [%1,1]              \n"
+               "       sub.f %0, %0, 1                 \n"
+               "34:    ;nop                            \n"
+               "       .section .fixup, \"ax\"         \n"
+               "       .align 4                        \n"
+               "4:     j   34b                         \n"
+               "       .previous                       \n"
+               "       .section __ex_table, \"a\"      \n"
+               "       .align 4                        \n"
+               "       .word   1b, 4b                  \n"
+               "       .word   11b,4b                  \n"
+               "       .word   12b,4b                  \n"
+               "       .word   13b,4b                  \n"
+               "       .word   14b,4b                  \n"
+               "       .word   15b,4b                  \n"
+               "       .word   16b,4b                  \n"
+               "       .word   17b,4b                  \n"
+               "       .word   18b,4b                  \n"
+               "       .previous                       \n"
+               : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+                 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+               :
+               : "lp_count", "memory");
+       }
+
+       return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+       long res = n;
+       unsigned char *d_char = to;
+
+       __asm__ __volatile__(
+       "       bbit0   %0, 0, 1f               \n"
+       "75:    stb.ab  %2, [%0,1]              \n"
+       "       sub %1, %1, 1                   \n"
+       "1:     bbit0   %0, 1, 2f               \n"
+       "76:    stw.ab  %2, [%0,2]              \n"
+       "       sub %1, %1, 2                   \n"
+       "2:     asr.f   lp_count, %1, 2         \n"
+       "       lpnz    3f                      \n"
+       "77:    st.ab   %2, [%0,4]              \n"
+       "       sub %1, %1, 4                   \n"
+       "3:     bbit0   %1, 1, 4f               \n"
+       "78:    stw.ab  %2, [%0,2]              \n"
+       "       sub %1, %1, 2                   \n"
+       "4:     bbit0   %1, 0, 5f               \n"
+       "79:    stb.ab  %2, [%0,1]              \n"
+       "       sub %1, %1, 1                   \n"
+       "5:                                     \n"
+       "       .section .fixup, \"ax\"         \n"
+       "       .align 4                        \n"
+       "3:     j   5b                          \n"
+       "       .previous                       \n"
+       "       .section __ex_table, \"a\"      \n"
+       "       .align 4                        \n"
+       "       .word   75b, 3b                 \n"
+       "       .word   76b, 3b                 \n"
+       "       .word   77b, 3b                 \n"
+       "       .word   78b, 3b                 \n"
+       "       .word   79b, 3b                 \n"
+       "       .previous                       \n"
+       : "+r"(d_char), "+r"(res)
+       : "i"(0)
+       : "lp_count", "lp_start", "lp_end", "memory");
+
+       return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+       long res = count;
+       char val;
+       unsigned int hw_count;
+
+       if (count == 0)
+               return 0;
+
+       __asm__ __volatile__(
+       "       lp 2f           \n"
+       "1:     ldb.ab  %3, [%2, 1]             \n"
+       "       breq.d  %3, 0, 2f               \n"
+       "       stb.ab  %3, [%1, 1]             \n"
+       "2:     sub %0, %6, %4                  \n"
+       "3:     ;nop                            \n"
+       "       .section .fixup, \"ax\"         \n"
+       "       .align 4                        \n"
+       "4:     mov %0, %5                      \n"
+       "       j   3b                          \n"
+       "       .previous                       \n"
+       "       .section __ex_table, \"a\"      \n"
+       "       .align 4                        \n"
+       "       .word   1b, 4b                  \n"
+       "       .previous                       \n"
+       : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+       : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+       : "memory");
+
+       return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+       long res, tmp1, cnt;
+       char val;
+
+       __asm__ __volatile__(
+       "       mov %2, %1                      \n"
+       "1:     ldb.ab  %3, [%0, 1]             \n"
+       "       breq.d  %3, 0, 2f               \n"
+       "       sub.f   %2, %2, 1               \n"
+       "       bnz 1b                          \n"
+       "       sub %2, %2, 1                   \n"
+       "2:     sub %0, %1, %2                  \n"
+       "3:     ;nop                            \n"
+       "       .section .fixup, \"ax\"         \n"
+       "       .align 4                        \n"
+       "4:     mov %0, 0                       \n"
+       "       j   3b                          \n"
+       "       .previous                       \n"
+       "       .section __ex_table, \"a\"      \n"
+       "       .align 4                        \n"
+       "       .word 1b, 4b                    \n"
+       "       .previous                       \n"
+       : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+       : "0"(s), "1"(n)
+       : "memory");
+
+       return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n)      __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n)                __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n)             __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n)   __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n)           __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+               unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+               unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+               unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+               long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n)      arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n)                arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n)             arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n)   arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n)           arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644 (file)
index 0000000..5dbe63f
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+                    unsigned long cause, struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+                unsigned long cause, struct callee_regs *cregs)
+{
+       return 0;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644 (file)
index 0000000..7ca628b
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+       unsigned long r0;
+       unsigned long r1;
+       unsigned long r2;
+       unsigned long r3;
+       unsigned long r4;
+       unsigned long r5;
+       unsigned long r6;
+       unsigned long r7;
+       unsigned long r8;
+       unsigned long r9;
+       unsigned long r10;
+       unsigned long r11;
+       unsigned long r12;
+       unsigned long r13;
+       unsigned long r14;
+       unsigned long r15;
+       unsigned long r16;
+       unsigned long r17;
+       unsigned long r18;
+       unsigned long r19;
+       unsigned long r20;
+       unsigned long r21;
+       unsigned long r22;
+       unsigned long r23;
+       unsigned long r24;
+       unsigned long r25;
+       unsigned long r26;
+       unsigned long r27;      /* fp */
+       unsigned long r28;      /* sp */
+       unsigned long r29;
+       unsigned long r30;
+       unsigned long r31;      /* blink */
+       unsigned long r63;      /* pc */
+};
+
+struct unwind_frame_info {
+       struct arc700_regs regs;
+       struct task_struct *task;
+       unsigned call_frame:1;
+};
+
+#define UNW_PC(frame)          ((frame)->regs.r63)
+#define UNW_SP(frame)          ((frame)->regs.r28)
+#define UNW_BLINK(frame)       ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame)          ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET   4
+#define FRAME_LINK_OFFSET      0
+#define STACK_BOTTOM_UNW(tsk)  STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk)     ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame)          ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr)       (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+       PTREGS_INFO(r0), \
+       PTREGS_INFO(r1), \
+       PTREGS_INFO(r2), \
+       PTREGS_INFO(r3), \
+       PTREGS_INFO(r4), \
+       PTREGS_INFO(r5), \
+       PTREGS_INFO(r6), \
+       PTREGS_INFO(r7), \
+       PTREGS_INFO(r8), \
+       PTREGS_INFO(r9), \
+       PTREGS_INFO(r10), \
+       PTREGS_INFO(r11), \
+       PTREGS_INFO(r12), \
+       PTREGS_INFO(r13), \
+       PTREGS_INFO(r14), \
+       PTREGS_INFO(r15), \
+       PTREGS_INFO(r16), \
+       PTREGS_INFO(r17), \
+       PTREGS_INFO(r18), \
+       PTREGS_INFO(r19), \
+       PTREGS_INFO(r20), \
+       PTREGS_INFO(r21), \
+       PTREGS_INFO(r22), \
+       PTREGS_INFO(r23), \
+       PTREGS_INFO(r24), \
+       PTREGS_INFO(r25), \
+       PTREGS_INFO(r26), \
+       PTREGS_INFO(r27), \
+       PTREGS_INFO(r28), \
+       PTREGS_INFO(r29), \
+       PTREGS_INFO(r30), \
+       PTREGS_INFO(r31), \
+       PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+       ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+                             unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+                        int (*callback) (struct unwind_frame_info *info,
+                                         void *arg),
+                        void *arg)
+{
+       return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+       return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+       return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+                                           struct pt_regs *regs)
+{
+       return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644 (file)
index 0000000..18fefae
--- /dev/null
@@ -0,0 +1,12 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+header-y += elf.h
+header-y += page.h
+header-y += setup.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += swab.h
+header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644 (file)
index 0000000..9da71d4
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644 (file)
index 0000000..51c73f0
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV       0x0002
+#define CF_D_FLUSH     0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT     (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644 (file)
index 0000000..0f99ac8
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h>                /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK       0x00000f00
+#define EF_ARC_OSABI_ORIG      0x00000000   /* MUST be zero for back-compat */
+#define EF_ARC_OSABI_CURRENT   0x00000300   /* v3 (no legacy syscalls) */
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG      (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644 (file)
index 0000000..e5d41e0
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE      (1 << PAGE_SHIFT)
+#define PAGE_OFFSET    (0x80000000)
+#else
+#define PAGE_SIZE      (1UL << PAGE_SHIFT)     /* Default 8K */
+#define PAGE_OFFSET    (0x80000000UL)  /* Kernel starts at 2G onwards */
+#endif
+
+#define PAGE_MASK      (~(PAGE_SIZE-1))
+
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..6afa4f7
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ *  -ptrace (gdbserver)
+ *  -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ * Although the layout (initial padding) is similar to pt_regs to have some
+ * optimizations when copying pt_regs to/from user_regs_struct.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling.
+*/
+struct user_regs_struct {
+
+       struct scratch {
+               long pad;
+               long bta, lp_start, lp_end, lp_count;
+               long status32, ret, blink, fp, gp;
+               long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+               long sp;
+       } scratch;
+       struct callee {
+               long pad;
+               long r25, r24, r23, r22, r21, r20;
+               long r19, r18, r17, r16, r15, r14, r13;
+       } callee;
+       long efa;       /* break pt addr, for break points in delay slots */
+       long stop_pc;   /* give dbg stop_pc directly after checking orig_r8 */
+};
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644 (file)
index 0000000..a6d4e44
--- /dev/null
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644 (file)
index 0000000..9678a11
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+       struct user_regs_struct regs;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644 (file)
index 0000000..fad62f7
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER    0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644 (file)
index 0000000..095599a
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ *  -Better htonl implementation (5 instead of 9 ALU instructions)
+ *  -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x)               \
+({                                     \
+       unsigned int tmp = x;           \
+       __asm__(                        \
+       "       swape   %0, %1  \n"     \
+       : "=r" (tmp)                    \
+       : "r" (tmp));                   \
+       tmp;                            \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1)              /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ *             return  x<<24 | x>>24 |
+ *              (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c:   ld     r3,[r7,20]       ; Mem op : Get the value to be swapped
+ * 8051fd98:   asl    r5,r3,24         ; get  3rd Byte
+ * 8051fd9c:   lsr    r2,r3,24         ; get  0th Byte
+ * 8051fda0:   and    r4,r3,0xff00
+ * 8051fda8:   asl    r4,r4,8          ; get 1st Byte
+ * 8051fdac:   and    r3,r3,0x00ff0000
+ * 8051fdb4:   or     r2,r2,r5         ; combine 0th and 3rd Bytes
+ * 8051fdb8:   lsr    r3,r3,8          ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc:   or     r2,r2,r4         ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0:   or     r2,r2,r3         ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4:   st     r2,[r1,20]       ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x)                                       \
+({     unsigned long __in = (x), __tmp;                        \
+       __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */     \
+       __in = __in << 24 | __in >> 8; /* ror in,in,8 */        \
+       __tmp ^= __in;                                          \
+       __tmp &= 0xff00ff;                                      \
+       __tmp ^ __in;                                           \
+})
+
+#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bwap instruction */
+
+#define __arch_swab32(x)                                               \
+({                                                                     \
+       unsigned int tmp = x;                                           \
+       __asm__(                                                        \
+       "       .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+       "       bswap  %0, %1                                           \n"\
+       : "=r" (tmp)                                                    \
+       : "r" (tmp));                                                   \
+       tmp;                                                            \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..6f30484
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls    __NR_syscalls
+
+/* ARC specific syscall */
+#define __NR_cacheflush                (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls                (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls                (__NR_arch_specific_syscall + 2)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs             (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_sysfs, sys_sysfs)
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644 (file)
index 0000000..c242ef0
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Pass UTS_MACHINE for user_regset definition
+CFLAGS_ptrace.o                += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y  := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y  += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y  += devtree.o
+
+obj-$(CONFIG_MODULES)                  += arcksyms.o module.o
+obj-$(CONFIG_SMP)                      += smp.o
+obj-$(CONFIG_ARC_DW2_UNWIND)           += unwind.o
+obj-$(CONFIG_KPROBES)                  += kprobes.o
+obj-$(CONFIG_ARC_MISALIGN_ACCESS)      += unaligned.o
+obj-$(CONFIG_KGDB)                     += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK)       += arc_hostlink.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE)     += fpu.o
+CFLAGS_fpu.o   += -mdpfp
+
+ifdef CONFIG_ARC_DW2_UNWIND
+CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
+obj-y += ctx_sw.o
+else
+obj-y += ctx_sw_asm.o
+endif
+
+extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644 (file)
index 0000000..47b2a17
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>          /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h>          /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                              vma->vm_end - vma->vm_start,
+                              vma->vm_page_prot)) {
+               pr_warn("Hostlink buffer mmap ERROR\n");
+               return -EAGAIN;
+       }
+       return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+                       unsigned long arg)
+{
+       /* we only support, returning the physical addr to mmap in user space */
+       put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+       return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+       .unlocked_ioctl = arc_hl_ioctl,
+       .mmap           = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+       .minor  = MISC_DYNAMIC_MINOR,
+       .name   = "hostlink",
+       .fops   = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+       pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+       return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644 (file)
index 0000000..4d9e777
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644 (file)
index 0000000..0dc148e
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+       DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+       BLANK();
+
+       DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+       DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+#ifdef CONFIG_ARC_CURR_IN_REG
+       DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
+#endif
+       DEFINE(THREAD_FAULT_ADDR,
+              offsetof(struct thread_struct, fault_address));
+
+       BLANK();
+
+       DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(THREAD_INFO_PREEMPT_COUNT,
+              offsetof(struct thread_info, preempt_count));
+
+       BLANK();
+
+       DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+       DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+
+       DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+       DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+       DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+       BLANK();
+
+       DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+       DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
+       DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+       DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+       DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+       DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+       DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+       DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+       DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+       DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+       DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+
+       return 0;
+}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644 (file)
index 0000000..66ce0dc
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/clk.h>
+
+unsigned long core_freq = 800000000;
+
+/*
+ * As of now we default to device-tree provided clock
+ * In future we can determine this in early boot
+ */
+int arc_set_core_freq(unsigned long freq)
+{
+       core_freq = freq;
+       return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644 (file)
index 0000000..60844da
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ *  -"C" version of lowest level context switch asm macro called by schedular
+ *   gcc doesn't generate the dward CFI info for hand written asm, hence can't
+ *   backtrace out of it (e.g. tasks sleeping in kernel).
+ *   So we cheat a bit by writing almost similar code in inline-asm.
+ *  -This is a hacky way of doing things, but there is no other simple way.
+ *   I don't want/intend to extend unwinding code to understand raw asm
+ */
+
+#include <asm/asm-offsets.h>
+#include <linux/sched.h>
+
+struct task_struct *__sched
+__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
+{
+       unsigned int tmp;
+       unsigned int prev = (unsigned int)prev_task;
+       unsigned int next = (unsigned int)next_task;
+       int num_words_to_skip = 1;
+#ifdef CONFIG_ARC_CURR_IN_REG
+       num_words_to_skip++;
+#endif
+
+       __asm__ __volatile__(
+               /* FP/BLINK save generated by gcc (standard function prologue */
+               "st.a    r13, [sp, -4]   \n\t"
+               "st.a    r14, [sp, -4]   \n\t"
+               "st.a    r15, [sp, -4]   \n\t"
+               "st.a    r16, [sp, -4]   \n\t"
+               "st.a    r17, [sp, -4]   \n\t"
+               "st.a    r18, [sp, -4]   \n\t"
+               "st.a    r19, [sp, -4]   \n\t"
+               "st.a    r20, [sp, -4]   \n\t"
+               "st.a    r21, [sp, -4]   \n\t"
+               "st.a    r22, [sp, -4]   \n\t"
+               "st.a    r23, [sp, -4]   \n\t"
+               "st.a    r24, [sp, -4]   \n\t"
+#ifndef CONFIG_ARC_CURR_IN_REG
+               "st.a    r25, [sp, -4]   \n\t"
+#endif
+               "sub     sp, sp, %4      \n\t"  /* create gutter at top */
+
+               /* set ksp of outgoing task in tsk->thread.ksp */
+               "st.as   sp, [%3, %1]    \n\t"
+
+               "sync   \n\t"
+
+               /*
+                * setup _current_task with incoming tsk.
+                * optionally, set r25 to that as well
+                * For SMP extra work to get to &_current_task[cpu]
+                * (open coded SET_CURR_TASK_ON_CPU)
+                */
+#ifndef CONFIG_SMP
+               "st  %2, [@_current_task]       \n\t"
+#else
+               "lr   r24, [identity]           \n\t"
+               "lsr  r24, r24, 8               \n\t"
+               "bmsk r24, r24, 7               \n\t"
+               "add2 r24, @_current_task, r24  \n\t"
+               "st   %2,  [r24]                \n\t"
+#endif
+#ifdef CONFIG_ARC_CURR_IN_REG
+               "mov r25, %2   \n\t"
+#endif
+
+               /* get ksp of incoming task from tsk->thread.ksp */
+               "ld.as  sp, [%2, %1]   \n\t"
+
+               /* start loading it's CALLEE reg file */
+
+               "add    sp, sp, %4     \n\t"    /* skip gutter at top */
+
+#ifndef CONFIG_ARC_CURR_IN_REG
+               "ld.ab   r25, [sp, 4]   \n\t"
+#endif
+               "ld.ab   r24, [sp, 4]   \n\t"
+               "ld.ab   r23, [sp, 4]   \n\t"
+               "ld.ab   r22, [sp, 4]   \n\t"
+               "ld.ab   r21, [sp, 4]   \n\t"
+               "ld.ab   r20, [sp, 4]   \n\t"
+               "ld.ab   r19, [sp, 4]   \n\t"
+               "ld.ab   r18, [sp, 4]   \n\t"
+               "ld.ab   r17, [sp, 4]   \n\t"
+               "ld.ab   r16, [sp, 4]   \n\t"
+               "ld.ab   r15, [sp, 4]   \n\t"
+               "ld.ab   r14, [sp, 4]   \n\t"
+               "ld.ab   r13, [sp, 4]   \n\t"
+
+               /* last (ret value) = prev : although for ARC it mov r0, r0 */
+               "mov     %0, %3        \n\t"
+
+               /* FP/BLINK restore generated by gcc (standard func epilogue */
+
+               : "=r"(tmp)
+               : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
+                 "n"(num_words_to_skip * 4)
+               : "blink"
+       );
+
+       return (struct task_struct *)tmp;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644 (file)
index 0000000..d897234
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ *  -Moved core context switch macro out of entry.S into this file.
+ *  -This is the more "natural" hand written assembler
+ */
+
+#include <asm/entry.h>       /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+#include <asm/linkage.h>
+
+;################### Low Level Context Switch ##########################
+
+       .section .sched.text,"ax",@progbits
+       .align 4
+       .global __switch_to
+       .type   __switch_to, @function
+__switch_to:
+
+       /* Save regs on kernel mode stack of task */
+       st.a    blink, [sp, -4]
+       st.a    fp, [sp, -4]
+       SAVE_CALLEE_SAVED_KERNEL
+
+       /* Save the now KSP in task->thread.ksp */
+       st.as  sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
+
+       /*
+       * Return last task in r0 (return reg)
+       * On ARC, Return reg = First Arg reg = r0.
+       * Since we already have last task in r0,
+       * don't need to do anything special to return it
+       */
+
+       /* hardware memory barrier */
+       sync
+
+       /*
+        * switch to new task, contained in r1
+        * Temp reg r3 is required to get the ptr to store val
+        */
+       SET_CURR_TASK_ON_CPU  r1, r3
+
+       /* reload SP with kernel mode stack pointer in task->thread.ksp */
+       ld.as  sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+
+       /* restore the registers */
+       RESTORE_CALLEE_SAVED_KERNEL
+       ld.ab   fp, [sp, 4]
+       ld.ab   blink, [sp, 4]
+       j       [blink]
+
+ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644 (file)
index 0000000..bdee3a8
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/prom.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+/* called from unflatten_device_tree() to bootstrap devicetree itself */
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+       return __va(memblock_alloc(size, align));
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt:                virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+       struct boot_param_header *devtree = dt;
+       struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
+       unsigned int score, mdesc_score = ~1;
+       unsigned long dt_root;
+       const char *model, *compat;
+       void *clk;
+       char manufacturer[16];
+       unsigned long len;
+
+       /* check device tree validity */
+       if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+               return NULL;
+
+       initial_boot_params = devtree;
+       dt_root = of_get_flat_dt_root();
+
+       /*
+        * The kernel could be multi-platform enabled, thus could have many
+        * "baked-in" machine descriptors. Search thru all for the best
+        * "compatible" string match.
+        */
+       for_each_machine_desc(mdesc) {
+               score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+               if (score > 0 && score < mdesc_score) {
+                       mdesc_best = mdesc;
+                       mdesc_score = score;
+               }
+       }
+       if (!mdesc_best) {
+               const char *prop;
+               long size;
+
+               pr_err("\n unrecognized device tree list:\n[ ");
+
+               prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+               if (prop) {
+                       while (size > 0) {
+                               printk("'%s' ", prop);
+                               size -= strlen(prop) + 1;
+                               prop += strlen(prop) + 1;
+                       }
+               }
+               printk("]\n\n");
+
+               machine_halt();
+       }
+
+       /* compat = "<manufacturer>,<model>" */
+       compat =  mdesc_best->dt_compat[0];
+
+       model = strchr(compat, ',');
+       if (model)
+               model++;
+
+       strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
+
+       pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
+
+       /* Retrieve various information from the /chosen node */
+       of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+       /* Initialize {size,address}-cells info */
+       of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+       /* Setup memory, calling early_init_dt_add_memory_arch */
+       of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+       clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
+       if (clk)
+               arc_set_core_freq(of_read_ulong(clk, len/4));
+
+       return mdesc_best;
+}
+
+/*
+ * Copy the flattened DT out of .init since unflattening doesn't copy strings
+ * and the normal DT APIs refs them from orig flat DT
+ */
+void __init copy_devtree(void)
+{
+       void *alloc = early_init_dt_alloc_memory_arch(
+                       be32_to_cpu(initial_boot_params->totalsize), 64);
+       if (alloc) {
+               memcpy(alloc, initial_boot_params,
+                               be32_to_cpu(initial_boot_params->totalsize));
+               initial_boot_params = alloc;
+       }
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644 (file)
index 0000000..2f39028
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/disasm.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
+       defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+       int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+       int fieldA = 0;
+       int fieldC = 0, fieldCisReg = 0;
+       uint16_t word1 = 0, word0 = 0;
+       int subopcode, is_linked, op_format;
+       uint16_t *ins_ptr;
+       uint16_t ins_buf[4];
+       int bytes_not_copied = 0;
+
+       memset(state, 0, sizeof(struct disasm_state));
+
+       /* This fetches the upper part of the 32 bit instruction
+        * in both the cases of Little Endian or Big Endian configurations. */
+       if (userspace) {
+               bytes_not_copied = copy_from_user(ins_buf,
+                                               (const void __user *) addr, 8);
+               if (bytes_not_copied > 6)
+                       goto fault;
+               ins_ptr = ins_buf;
+       } else {
+               ins_ptr = (uint16_t *) addr;
+       }
+
+       word1 = *((uint16_t *)addr);
+
+       state->major_opcode = (word1 >> 11) & 0x1F;
+
+       /* Check if the instruction is 32 bit or 16 bit instruction */
+       if (state->major_opcode < 0x0B) {
+               if (bytes_not_copied > 4)
+                       goto fault;
+               state->instr_len = 4;
+               word0 = *((uint16_t *)(addr+2));
+               state->words[0] = (word1 << 16) | word0;
+       } else {
+               state->instr_len = 2;
+               state->words[0] = word1;
+       }
+
+       /* Read the second word in case of limm */
+       word1 = *((uint16_t *)(addr + state->instr_len));
+       word0 = *((uint16_t *)(addr + state->instr_len + 2));
+       state->words[1] = (word1 << 16) | word0;
+
+       switch (state->major_opcode) {
+       case op_Bcc:
+               state->is_branch = 1;
+
+               /* unconditional branch s25, conditional branch s21 */
+               fieldA = (IS_BIT(state->words[0], 16)) ?
+                       FIELD_s25(state->words[0]) :
+                       FIELD_s21(state->words[0]);
+
+               state->delay_slot = IS_BIT(state->words[0], 5);
+               state->target = fieldA + (addr & ~0x3);
+               state->flow = direct_jump;
+               break;
+
+       case op_BLcc:
+               if (IS_BIT(state->words[0], 16)) {
+                       /* Branch and Link*/
+                       /* unconditional branch s25, conditional branch s21 */
+                       fieldA = (IS_BIT(state->words[0], 17)) ?
+                               (FIELD_s25(state->words[0]) & ~0x3) :
+                               FIELD_s21(state->words[0]);
+
+                       state->flow = direct_call;
+               } else {
+                       /*Branch On Compare */
+                       fieldA = FIELD_s9(state->words[0]) & ~0x3;
+                       state->flow = direct_jump;
+               }
+
+               state->delay_slot = IS_BIT(state->words[0], 5);
+               state->target = fieldA + (addr & ~0x3);
+               state->is_branch = 1;
+               break;
+
+       case op_LD:  /* LD<zz> a,[b,s9] */
+               state->write = 0;
+               state->di = BITS(state->words[0], 11, 11);
+               if (state->di)
+                       break;
+               state->x = BITS(state->words[0], 6, 6);
+               state->zz = BITS(state->words[0], 7, 8);
+               state->aa = BITS(state->words[0], 9, 10);
+               state->wb_reg = FIELD_B(state->words[0]);
+               if (state->wb_reg == REG_LIMM) {
+                       state->instr_len += 4;
+                       state->aa = 0;
+                       state->src1 = state->words[1];
+               } else {
+                       state->src1 = get_reg(state->wb_reg, regs, cregs);
+               }
+               state->src2 = FIELD_s9(state->words[0]);
+               state->dest = FIELD_A(state->words[0]);
+               state->pref = (state->dest == REG_LIMM);
+               break;
+
+       case op_ST:
+               state->write = 1;
+               state->di = BITS(state->words[0], 5, 5);
+               if (state->di)
+                       break;
+               state->aa = BITS(state->words[0], 3, 4);
+               state->zz = BITS(state->words[0], 1, 2);
+               state->src1 = FIELD_C(state->words[0]);
+               if (state->src1 == REG_LIMM) {
+                       state->instr_len += 4;
+                       state->src1 = state->words[1];
+               } else {
+                       state->src1 = get_reg(state->src1, regs, cregs);
+               }
+               state->wb_reg = FIELD_B(state->words[0]);
+               if (state->wb_reg == REG_LIMM) {
+                       state->aa = 0;
+                       state->instr_len += 4;
+                       state->src2 = state->words[1];
+               } else {
+                       state->src2 = get_reg(state->wb_reg, regs, cregs);
+               }
+               state->src3 = FIELD_s9(state->words[0]);
+               break;
+
+       case op_MAJOR_4:
+               subopcode = MINOR_OPCODE(state->words[0]);
+               switch (subopcode) {
+               case 32:        /* Jcc */
+               case 33:        /* Jcc.D */
+               case 34:        /* JLcc */
+               case 35:        /* JLcc.D */
+                       is_linked = 0;
+
+                       if (subopcode == 33 || subopcode == 35)
+                               state->delay_slot = 1;
+
+                       if (subopcode == 34 || subopcode == 35)
+                               is_linked = 1;
+
+                       fieldCisReg = 0;
+                       op_format = BITS(state->words[0], 22, 23);
+                       if (op_format == 0 || ((op_format == 3) &&
+                               (!IS_BIT(state->words[0], 5)))) {
+                               fieldC = FIELD_C(state->words[0]);
+
+                               if (fieldC == REG_LIMM) {
+                                       fieldC = state->words[1];
+                                       state->instr_len += 4;
+                               } else {
+                                       fieldCisReg = 1;
+                               }
+                       } else if (op_format == 1 || ((op_format == 3)
+                               && (IS_BIT(state->words[0], 5)))) {
+                               fieldC = FIELD_C(state->words[0]);
+                       } else  {
+                               /* op_format == 2 */
+                               fieldC = FIELD_s12(state->words[0]);
+                       }
+
+                       if (!fieldCisReg) {
+                               state->target = fieldC;
+                               state->flow = is_linked ?
+                                       direct_call : direct_jump;
+                       } else {
+                               state->target = get_reg(fieldC, regs, cregs);
+                               state->flow = is_linked ?
+                                       indirect_call : indirect_jump;
+                       }
+                       state->is_branch = 1;
+                       break;
+
+               case 40:        /* LPcc */
+                       if (BITS(state->words[0], 22, 23) == 3) {
+                               /* Conditional LPcc u7 */
+                               fieldC = FIELD_C(state->words[0]);
+
+                               fieldC = fieldC << 1;
+                               fieldC += (addr & ~0x03);
+                               state->is_branch = 1;
+                               state->flow = direct_jump;
+                               state->target = fieldC;
+                       }
+                       /* For Unconditional lp, next pc is the fall through
+                        * which is updated */
+                       break;
+
+               case 48 ... 55: /* LD a,[b,c] */
+                       state->di = BITS(state->words[0], 15, 15);
+                       if (state->di)
+                               break;
+                       state->x = BITS(state->words[0], 16, 16);
+                       state->zz = BITS(state->words[0], 17, 18);
+                       state->aa = BITS(state->words[0], 22, 23);
+                       state->wb_reg = FIELD_B(state->words[0]);
+                       if (state->wb_reg == REG_LIMM) {
+                               state->instr_len += 4;
+                               state->src1 = state->words[1];
+                       } else {
+                               state->src1 = get_reg(state->wb_reg, regs,
+                                               cregs);
+                       }
+                       state->src2 = FIELD_C(state->words[0]);
+                       if (state->src2 == REG_LIMM) {
+                               state->instr_len += 4;
+                               state->src2 = state->words[1];
+                       } else {
+                               state->src2 = get_reg(state->src2, regs,
+                                       cregs);
+                       }
+                       state->dest = FIELD_A(state->words[0]);
+                       if (state->dest == REG_LIMM)
+                               state->pref = 1;
+                       break;
+
+               case 10:        /* MOV */
+                       /* still need to check for limm to extract instr len */
+                       /* MOV is special case because it only takes 2 args */
+                       switch (BITS(state->words[0], 22, 23)) {
+                       case 0: /* OP a,b,c */
+                               if (FIELD_C(state->words[0]) == REG_LIMM)
+                                       state->instr_len += 4;
+                               break;
+                       case 1: /* OP a,b,u6 */
+                               break;
+                       case 2: /* OP b,b,s12 */
+                               break;
+                       case 3: /* OP.cc b,b,c/u6 */
+                               if ((!IS_BIT(state->words[0], 5)) &&
+                                   (FIELD_C(state->words[0]) == REG_LIMM))
+                                       state->instr_len += 4;
+                               break;
+                       }
+                       break;
+
+
+               default:
+                       /* Not a Load, Jump or Loop instruction */
+                       /* still need to check for limm to extract instr len */
+                       switch (BITS(state->words[0], 22, 23)) {
+                       case 0: /* OP a,b,c */
+                               if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+                                   (FIELD_C(state->words[0]) == REG_LIMM))
+                                       state->instr_len += 4;
+                               break;
+                       case 1: /* OP a,b,u6 */
+                               break;
+                       case 2: /* OP b,b,s12 */
+                               break;
+                       case 3: /* OP.cc b,b,c/u6 */
+                               if ((!IS_BIT(state->words[0], 5)) &&
+                                  ((FIELD_B(state->words[0]) == REG_LIMM) ||
+                                   (FIELD_C(state->words[0]) == REG_LIMM)))
+                                       state->instr_len += 4;
+                               break;
+                       }
+                       break;
+               }
+               break;
+
+       /* 16 Bit Instructions */
+       case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+               state->zz = BITS(state->words[0], 3, 4);
+               state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+               state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+               state->dest = FIELD_S_A(state->words[0]);
+               break;
+
+       case op_ADD_MOV_CMP:
+               /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+               if ((BITS(state->words[0], 3, 4) < 3) &&
+                   (FIELD_S_H(state->words[0]) == REG_LIMM))
+                       state->instr_len += 4;
+               break;
+
+       case op_S:
+               subopcode = BITS(state->words[0], 5, 7);
+               switch (subopcode) {
+               case 0: /* j_s */
+               case 1: /* j_s.d */
+               case 2: /* jl_s */
+               case 3: /* jl_s.d */
+                       state->target = get_reg(FIELD_S_B(state->words[0]),
+                                               regs, cregs);
+                       state->delay_slot = subopcode & 1;
+                       state->flow = (subopcode >= 2) ?
+                               direct_call : indirect_jump;
+                       break;
+               case 7:
+                       switch (BITS(state->words[0], 8, 10)) {
+                       case 4: /* jeq_s [blink] */
+                       case 5: /* jne_s [blink] */
+                       case 6: /* j_s [blink] */
+                       case 7: /* j_s.d [blink] */
+                               state->delay_slot = (subopcode == 7);
+                               state->flow = indirect_jump;
+                               state->target = get_reg(31, regs, cregs);
+                       default:
+                               break;
+                       }
+               default:
+                       break;
+               }
+               break;
+
+       case op_LD_S:   /* LD_S c, [b, u7] */
+               state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+               state->src2 = FIELD_S_u7(state->words[0]);
+               state->dest = FIELD_S_C(state->words[0]);
+               break;
+
+       case op_LDB_S:
+       case op_STB_S:
+               /* no further handling required as byte accesses should not
+                * cause an unaligned access exception */
+               state->zz = 1;
+               break;
+
+       case op_LDWX_S: /* LDWX_S c, [b, u6] */
+               state->x = 1;
+               /* intentional fall-through */
+
+       case op_LDW_S:  /* LDW_S c, [b, u6] */
+               state->zz = 2;
+               state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+               state->src2 = FIELD_S_u6(state->words[0]);
+               state->dest = FIELD_S_C(state->words[0]);
+               break;
+
+       case op_ST_S:   /* ST_S c, [b, u7] */
+               state->write = 1;
+               state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+               state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+               state->src3 = FIELD_S_u7(state->words[0]);
+               break;
+
+       case op_STW_S:  /* STW_S c,[b,u6] */
+               state->write = 1;
+               state->zz = 2;
+               state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+               state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+               state->src3 = FIELD_S_u6(state->words[0]);
+               break;
+
+       case op_SP:     /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+               /* note: we are ignoring possibility of:
+                * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+                * cause unaliged exception anyway */
+               state->write = BITS(state->words[0], 6, 6);
+               state->zz = BITS(state->words[0], 5, 5);
+               if (state->zz)
+                       break;  /* byte accesses should not come here */
+               if (!state->write) {
+                       state->src1 = get_reg(28, regs, cregs);
+                       state->src2 = FIELD_S_u7(state->words[0]);
+                       state->dest = FIELD_S_B(state->words[0]);
+               } else {
+                       state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+                                       cregs);
+                       state->src2 = get_reg(28, regs, cregs);
+                       state->src3 = FIELD_S_u7(state->words[0]);
+               }
+               break;
+
+       case op_GP:     /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+               /* note: ADD_S r0, gp, s11 is ignored */
+               state->zz = BITS(state->words[0], 9, 10);
+               state->src1 = get_reg(26, regs, cregs);
+               state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+                       FIELD_S_s11(state->words[0]);
+               state->dest = 0;
+               break;
+
+       case op_Pcl:    /* LD_S b,[pcl,u10] */
+               state->src1 = regs->ret & ~3;
+               state->src2 = FIELD_S_u10(state->words[0]);
+               state->dest = FIELD_S_B(state->words[0]);
+               break;
+
+       case op_BR_S:
+               state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+               state->flow = direct_jump;
+               state->is_branch = 1;
+               break;
+
+       case op_B_S:
+               fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+                       FIELD_S_s7(state->words[0]) :
+                       FIELD_S_s10(state->words[0]);
+               state->target = fieldA + (addr & ~0x03);
+               state->flow = direct_jump;
+               state->is_branch = 1;
+               break;
+
+       case op_BL_S:
+               state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+               state->flow = direct_call;
+               state->is_branch = 1;
+               break;
+
+       default:
+               break;
+       }
+
+       if (bytes_not_copied <= (8 - state->instr_len))
+               return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+                      struct callee_regs *cregs)
+{
+       long *p;
+
+       if (reg <= 12) {
+               p = &regs->r0;
+               return p[-reg];
+       }
+
+       if (cregs && (reg <= 25)) {
+               p = &cregs->r13;
+               return p[13-reg];
+       }
+
+       if (reg == 26)
+               return regs->r26;
+       if (reg == 27)
+               return regs->fp;
+       if (reg == 28)
+               return regs->sp;
+       if (reg == 31)
+               return regs->blink;
+
+       return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+               struct callee_regs *cregs)
+{
+       long *p;
+
+       switch (reg) {
+       case 0 ... 12:
+               p = &regs->r0;
+               p[-reg] = val;
+               break;
+       case 13 ... 25:
+               if (cregs) {
+                       p = &cregs->r13;
+                       p[13-reg] = val;
+               }
+               break;
+       case 26:
+               regs->r26 = val;
+               break;
+       case 27:
+               regs->fp = val;
+               break;
+       case 28:
+               regs->sp = val;
+               break;
+       case 31:
+               regs->blink = val;
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ *     -@tgt_if_br is set to branch target.
+ *     -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+                            struct callee_regs *cregs,
+                            unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+       struct disasm_state instr;
+
+       memset(&instr, 0, sizeof(struct disasm_state));
+       disasm_instr(pc, &instr, 0, regs, cregs);
+
+       *next_pc = pc + instr.instr_len;
+
+       /* Instruction with possible two targets branch, jump and loop */
+       if (instr.is_branch)
+               *tgt_if_br = instr.target;
+
+       /* For the instructions with delay slots, the fall through is the
+        * instruction following the instruction in delay slot.
+        */
+        if (instr.delay_slot) {
+               struct disasm_state instr_d;
+
+               disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+               *next_pc += instr_d.instr_len;
+        }
+
+        /* Zero Overhead Loop - end of the loop */
+       if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+               && (regs->lp_count > 1)) {
+               *next_pc = regs->lp_start;
+       }
+
+       return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644 (file)
index 0000000..ef6800b
--- /dev/null
@@ -0,0 +1,839 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ *  -traced syscall return code (r0) was not saved into pt_regs for restoring
+ *   into user reg-file when traded task rets to user space.
+ *  -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ *   were not invoking post-syscall trace hook (jumping directly into
+ *   ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ *  -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ *  -To maintain the slot size of 8 bytes/vector, added nop, which is
+ *   not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ *  -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ *  -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ *   need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ *  -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ *   out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ *   active (AE bit enabled).  This causes a double fault for a subseq valid
+ *   exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ *   Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ *   setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ *  - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ *    Minor Surgery of Low Level ISR to make it SMP safe
+ *    - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ *    - _current_task is made an array of NR_CPUS
+ *    - Access of _current_task wrapped inside a macro so that if hardware
+ *       team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+/*------------------------------------------------------------------
+ *    Function                            ABI
+ *------------------------------------------------------------------
+ *
+ *  Arguments                           r0 - r7
+ *  Caller Saved Registers              r0 - r12
+ *  Callee Saved Registers              r13- r25
+ *  Global Pointer (gp)                 r26
+ *  Frame Pointer (fp)                  r27
+ *  Stack Pointer (sp)                  r28
+ *  Interrupt link register (ilink1)    r29
+ *  Interrupt link register (ilink2)    r30
+ *  Branch link register (blink)        r31
+ *------------------------------------------------------------------
+ */
+
+       .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR  lbl
+#if 1   /* Just in case, build breaks */
+       j   \lbl
+#else
+       b   \lbl
+       nop
+#endif
+.endm
+
+       .section .vector, "ax",@progbits
+       .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR   res_service             ; 0x0, Restart Vector  (0x0)
+VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
+VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+VECTOR   handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR   handle_interrupt_level2
+#else
+VECTOR   handle_interrupt_level1
+#endif
+
+.rept   25
+VECTOR   handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR   EV_MachineCheck         ; 0x100, Fatal Machine check   (0x20)
+VECTOR   EV_TLBMissI             ; 0x108, Intruction TLB miss   (0x21)
+VECTOR   EV_TLBMissD             ; 0x110, Data TLB miss         (0x22)
+VECTOR   EV_TLBProtV             ; 0x118, Protection Violation  (0x23)
+                                ;         or Misaligned Access
+VECTOR   EV_PrivilegeV           ; 0x120, Privilege Violation   (0x24)
+VECTOR   EV_Trap                 ; 0x128, Trap exception        (0x25)
+VECTOR   EV_Extension            ; 0x130, Extn Intruction Excp  (0x26)
+
+.rept   24
+VECTOR   reserved                ; Reserved Exceptions
+.endr
+
+#include <linux/linkage.h>   /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h>       /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+       .align 32
+       .type   int1_saved_reg, @object
+       .size   int1_saved_reg, 4
+int1_saved_reg:
+       .zero 4
+
+/* Each Interrupt level needs it's own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+       .type   int2_saved_reg, @object
+       .size   int2_saved_reg, 4
+int2_saved_reg:
+       .zero 4
+
+#endif
+
+; ---------------------------------------------
+       .section .text, "ax",@progbits
+
+res_service:           ; processor restart
+       flag    0x1     ; not implemented
+       nop
+       nop
+
+reserved:              ; processor restart
+       rtie            ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+;  Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level2
+
+       ; TODO-vineetg for SMP this wont work
+       ; free up r9 as scratchpad
+       st  r9, [@int2_saved_reg]
+
+       ;Which mode (user/kernel) was the system in when intr occured
+       lr  r9, [status32_l2]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_INT2
+
+       ;------------------------------------------------------
+       ; if L2 IRQ interrupted a L1 ISR, disable preemption
+       ;------------------------------------------------------
+
+       ld r9, [sp, PT_status32]        ; get statu32_l2 (saved in pt_regs)
+       bbit0 r9, STATUS_A1_BIT, 1f     ; L1 not active when L2 IRQ, so normal
+
+       ; A1 is set in status32_l2
+       ; bump thread_info->preempt_count (Disable preemption)
+       GET_CURR_THR_INFO_FROM_SP   r10
+       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+       add     r9, r9, 1
+       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+       ;------------------------------------------------------
+       ; setup params for Linux common ISR and invoke it
+       ;------------------------------------------------------
+       lr  r0, [icause2]
+       and r0, r0, 0x1f
+
+       bl.d  @arch_do_IRQ
+       mov r1, sp
+
+       mov r8,0x2
+       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
+
+       b   ret_from_exception
+
+ARC_EXIT handle_interrupt_level2
+
+#endif
+
+; ---------------------------------------------
+;  Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level1
+
+       /* free up r9 as scratchpad */
+#ifdef CONFIG_SMP
+       sr  r9, [ARC_REG_SCRATCH_DATA0]
+#else
+       st   r9, [@int1_saved_reg]
+#endif
+
+       ;Which mode (user/kernel) was the system in when intr occured
+       lr  r9, [status32_l1]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_INT1
+
+       lr  r0, [icause1]
+       and r0, r0, 0x1f
+
+       bl.d  @arch_do_IRQ
+       mov r1, sp
+
+       mov r8,0x1
+       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
+
+       b   ret_from_exception
+ARC_EXIT handle_interrupt_level1
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY instr_service
+
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       lr  r0, [ecr]
+       lr  r1, [efa]
+
+       mov r2, sp
+
+       FAKE_RET_FROM_EXCPN r9
+
+       bl  do_insterror_or_kprobe
+       b   ret_from_exception
+ARC_EXIT instr_service
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY mem_service
+
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       lr  r0, [ecr]
+       lr  r1, [efa]
+       mov r2, sp
+       bl  do_memory_error
+       b   ret_from_exception
+ARC_EXIT mem_service
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_MachineCheck
+
+       EXCPN_PROLOG_FREEUP_REG r9
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       lr  r0, [ecr]
+       lr  r1, [efa]
+       mov r2, sp
+
+       brne    r0, 0x200100, 1f
+       bl      do_tlb_overlap_fault
+       b       ret_from_exception
+
+1:
+       ; DEAD END: can't do much, display Regs and HALT
+       SAVE_CALLEE_SAVED_USER
+
+       GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r10
+       st  sp, [r10, THREAD_CALLEE_REG]
+
+       j  do_machine_check_fault
+
+ARC_EXIT EV_MachineCheck
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_TLBProtV
+
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       ;Which mode (user/kernel) was the system in when Exception occured
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       ;---------(3) Save some more regs-----------------
+       ;  vineetg: Mar 6th: Random Seg Fault issue #1
+       ;  ecr and efa were not saved in case an Intr sneaks in
+       ;  after fake rtie
+       ;
+       lr  r3, [ecr]
+       lr  r4, [efa]
+
+       ; --------(4) Return from CPU Exception Mode ---------
+       ;  Fake a rtie, but rtie to next label
+       ;  That way, subsequently, do_page_fault ( ) executes in pure kernel
+       ;  mode with further Exceptions enabled
+
+       FAKE_RET_FROM_EXCPN r9
+
+       ;------ (5) Type of Protection Violation? ----------
+       ;
+       ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+       ;   -Access Violaton (WRITE to READ ONLY Page) - for linux COW
+       ;   -Unaligned Access (READ/WRITE on odd boundary)
+       ;
+       cmp r3, 0x230400    ; Misaligned data access ?
+       beq 4f
+
+       ;========= (6a) Access Violation Processing ========
+       cmp r3, 0x230100
+       mov r1, 0x0              ; if LD exception ? write = 0
+       mov.ne r1, 0x1           ; else write = 1
+
+       mov r2, r4              ; faulting address
+       mov r0, sp              ; pt_regs
+       bl  do_page_fault
+       b   ret_from_exception
+
+       ;========== (6b) Non aligned access ============
+4:
+       mov r0, r3              ; cause code
+       mov r1, r4              ; faulting address
+       mov r2, sp              ; pt_regs
+
+#ifdef  CONFIG_ARC_MISALIGN_ACCESS
+       SAVE_CALLEE_SAVED_USER
+       mov r3, sp              ; callee_regs
+#endif
+
+       bl  do_misaligned_access
+
+#ifdef  CONFIG_ARC_MISALIGN_ACCESS
+       DISCARD_CALLEE_SAVED_USER
+#endif
+
+       b   ret_from_exception
+
+ARC_EXIT EV_TLBProtV
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_PrivilegeV
+
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       lr  r0, [ecr]
+       lr  r1, [efa]
+       mov r2, sp
+
+       FAKE_RET_FROM_EXCPN r9
+
+       bl  do_privilege_fault
+       b   ret_from_exception
+ARC_EXIT EV_PrivilegeV
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_Extension
+
+       EXCPN_PROLOG_FREEUP_REG r9
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       lr  r0, [ecr]
+       lr  r1, [efa]
+       mov r2, sp
+       bl  do_extension_fault
+       b   ret_from_exception
+ARC_EXIT EV_Extension
+
+;######################### System Call Tracing #########################
+
+tracesys:
+       ; save EFA in case tracer wants the PC of traced task
+       ; using ERET won't work since next-PC has already committed
+       lr  r12, [efa]
+       GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
+       st  r12, [r11, THREAD_FAULT_ADDR]
+
+       ; PRE Sys Call Ptrace hook
+       mov r0, sp                      ; pt_regs needed
+       bl  @syscall_trace_entry
+
+       ; Tracing code now returns the syscall num (orig or modif)
+       mov r8, r0
+
+       ; Do the Sys Call as we normally would.
+       ; Validate the Sys Call number
+       cmp     r8,  NR_syscalls
+       mov.hi  r0, -ENOSYS
+       bhi     tracesys_exit
+
+       ; Restore the sys-call args. Mere invocation of the hook abv could have
+       ; clobbered them (since they are in scratch regs). The tracer could also
+       ; have deliberately changed the syscall args: r0-r7
+       ld  r0, [sp, PT_r0]
+       ld  r1, [sp, PT_r1]
+       ld  r2, [sp, PT_r2]
+       ld  r3, [sp, PT_r3]
+       ld  r4, [sp, PT_r4]
+       ld  r5, [sp, PT_r5]
+       ld  r6, [sp, PT_r6]
+       ld  r7, [sp, PT_r7]
+       ld.as   r9, [sys_call_table, r8]
+       jl      [r9]        ; Entry into Sys Call Handler
+
+tracesys_exit:
+       st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
+
+       ;POST Sys Call Ptrace Hook
+       bl  @syscall_trace_exit
+       b   ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+       ; we'd done before calling post hook above
+
+;################### Break Point TRAP ##########################
+
+       ; ======= (5b) Trap is due to Break-Point =========
+
+trap_with_param:
+
+       ; stop_pc info by gdb needs this info
+       stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+
+       mov r0, r12
+       lr  r1, [efa]
+       mov r2, sp
+
+       ; Now that we have read EFA, its safe to do "fake" rtie
+       ;   and get out of CPU exception mode
+       FAKE_RET_FROM_EXCPN r11
+
+       ; Save callee regs in case gdb wants to have a look
+       ; SP will grow up by size of CALLEE Reg-File
+       ; NOTE: clobbers r12
+       SAVE_CALLEE_SAVED_USER
+
+       ; save location of saved Callee Regs @ thread_struct->pc
+       GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r10
+       st  sp, [r10, THREAD_CALLEE_REG]
+
+       ; Call the trap handler
+       bl  do_non_swi_trap
+
+       ; unwind stack to discard Callee saved Regs
+       DISCARD_CALLEE_SAVED_USER
+
+       b   ret_from_exception
+
+;##################### Trap Handling ##############################
+;
+; EV_Trap caused by TRAP_S and TRAP0 instructions.
+;------------------------------------------------------------------
+;   (1) System Calls
+;       :parameters in r0-r7.
+;       :r8 has the system call number
+;   (2) Break Points
+;------------------------------------------------------------------
+
+ARC_ENTRY EV_Trap
+
+       ; Need at least 1 reg to code the early exception prolog
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       ;Which mode (user/kernel) was the system in when intr occured
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_TRAP
+
+       ;------- (4) What caused the Trap --------------
+       lr     r12, [ecr]
+       and.f  0, r12, ECR_PARAM_MASK
+       bnz    trap_with_param
+
+       ; ======= (5a) Trap is due to System Call ========
+
+       ; Before doing anything, return from CPU Exception Mode
+       FAKE_RET_FROM_EXCPN r11
+
+       ; If syscall tracing ongoing, invoke pre-pos-hooks
+       GET_CURR_THR_INFO_FLAGS   r10
+       btst r10, TIF_SYSCALL_TRACE
+       bnz tracesys  ; this never comes back
+
+       ;============ This is normal System Call case ==========
+       ; Sys-call num shd not exceed the total system calls avail
+       cmp     r8,  NR_syscalls
+       mov.hi  r0, -ENOSYS
+       bhi     ret_from_system_call
+
+       ; Offset into the syscall_table and call handler
+       ld.as   r9,[sys_call_table, r8]
+       jl      [r9]        ; Entry into Sys Call Handler
+
+       ; fall through to ret_from_system_call
+ARC_EXIT EV_Trap
+
+ARC_ENTRY ret_from_system_call
+
+       st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
+
+       ; fall through yet again to ret_from_exception
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ARC_ENTRY ret_from_exception
+
+       ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+       ld  r8, [sp, PT_status32]   ; returning to User/Kernel Mode
+
+#ifdef CONFIG_PREEMPT
+       bbit0  r8, STATUS_U_BIT, resume_kernel_mode
+#else
+       bbit0  r8, STATUS_U_BIT, restore_regs
+#endif
+
+       ; Before returning to User mode check-for-and-complete any pending work
+       ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+       ; Disable IRQs to ensures that chk for pending work itself is atomic
+       ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+       ; interim IRQ).
+       IRQ_DISABLE     r10
+
+       ; Fast Path return to user mode if no pending work
+       GET_CURR_THR_INFO_FLAGS   r9
+       and.f  0,  r9, _TIF_WORK_MASK
+       bz     restore_regs
+
+       ; --- (Slow Path #1) task preemption ---
+       bbit0  r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+       mov    blink, resume_user_mode_begin  ; tail-call to U mode ret chks
+       b      @schedule        ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+       IRQ_ENABLE      r10
+
+       ; --- (Slow Path #2) pending signal  ---
+       mov r0, sp      ; pt_regs for arg to do_signal()/do_notify_resume()
+
+       bbit0  r9, TIF_SIGPENDING, .Lchk_notify_resume
+
+       ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+       ; in pt_reg since the "C" ABI (kernel code) will automatically
+       ; save/restore callee-saved regs.
+       ;
+       ; However, here we need to explicitly save callee regs because
+       ; (i)  If this signal causes coredump - full regfile needed
+       ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+       ;      tracer might call PEEKUSR(CALLEE reg)
+       ;
+       ; NOTE: SP will grow up by size of CALLEE Reg-File
+       SAVE_CALLEE_SAVED_USER          ; clobbers r12
+
+       ; save location of saved Callee Regs @ thread_struct->callee
+       GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r10
+       st  sp, [r10, THREAD_CALLEE_REG]
+
+       bl  @do_signal
+
+       ; Ideally we want to discard the Callee reg above, however if this was
+       ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+       RESTORE_CALLEE_SAVED_USER
+
+       b      resume_user_mode_begin   ; loop back to start of U mode ret
+
+       ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+       btst   r9, TIF_NOTIFY_RESUME
+       blnz   @do_notify_resume
+       b      resume_user_mode_begin   ; unconditionally back to U mode ret chks
+                                       ; for single exit point from this block
+
+#ifdef CONFIG_PREEMPT
+
+resume_kernel_mode:
+
+       ; Can't preempt if preemption disabled
+       GET_CURR_THR_INFO_FROM_SP   r10
+       ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+       brne  r8, 0, restore_regs
+
+       ; check if this task's NEED_RESCHED flag set
+       ld  r9, [r10, THREAD_INFO_FLAGS]
+       bbit0  r9, TIF_NEED_RESCHED, restore_regs
+
+       IRQ_DISABLE     r9
+
+       ; Invoke PREEMPTION
+       bl      preempt_schedule_irq
+
+       ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+       ; fall through
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+
+restore_regs :
+
+       ; Disable Interrupts while restoring reg-file back
+       ; XXX can this be optimised out
+       IRQ_DISABLE_SAVE    r9, r10     ;@r10 has prisitine (pre-disable) copy
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ; Restore User R25
+       ; Earlier this used to be only for returning to user mode
+       ; However with 2 levels of IRQ this can also happen even if
+       ; in kernel mode
+       ld r9, [sp, PT_sp]
+       brhs r9, VMALLOC_START, 8f
+       RESTORE_USER_R25
+8:
+#endif
+
+       ; Restore REG File. In case multiple Events outstanding,
+       ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+       ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+       ; decide that.
+
+       ; if Returning from Exception
+       bbit0  r10, STATUS_AE_BIT, not_exception
+       RESTORE_ALL_SYS
+       rtie
+
+       ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+not_exception:
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+       bbit0  r10, STATUS_A2_BIT, not_level2_interrupt
+
+       ;------------------------------------------------------------------
+       ; if L2 IRQ interrupted a L1 ISR,  we'd disbaled preemption earlier
+       ; so that sched doesnt move to new task, causing L1 to be delayed
+       ; undeterministically. Now that we've achieved that, lets reset
+       ; things to what they were, before returning from L2 context
+       ;----------------------------------------------------------------
+
+       ldw  r9, [sp, PT_orig_r8]      ; get orig_r8 to make sure it is
+       brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
+
+       ld r9, [sp, PT_status32]       ; get statu32_l2 (saved in pt_regs)
+       bbit0 r9, STATUS_A1_BIT, 149f  ; L1 not active when L2 IRQ, so normal
+
+       ; A1 is set in status32_l2
+       ; decrement thread_info->preempt_count (re-enable preemption)
+       GET_CURR_THR_INFO_FROM_SP   r10
+       ld      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+       ; paranoid check, given A1 was active when A2 happened, preempt count
+       ; must not be 0 beccause we would have incremented it.
+       ; If this does happen we simply HALT as it means a BUG !!!
+       cmp     r9, 0
+       bnz     2f
+       flag 1
+
+2:
+       sub     r9, r9, 1
+       st      r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+       ;return from level 2
+       RESTORE_ALL_INT2
+debug_marker_l2:
+       rtie
+
+not_level2_interrupt:
+
+#endif
+
+       bbit0  r10, STATUS_A1_BIT, not_level1_interrupt
+
+       ;return from level 1
+
+       RESTORE_ALL_INT1
+debug_marker_l1:
+       rtie
+
+not_level1_interrupt:
+
+       ;this case is for syscalls or Exceptions (with fake rtie)
+
+       RESTORE_ALL_SYS
+debug_marker_syscall:
+       rtie
+
+ARC_EXIT ret_from_exception
+
+ARC_ENTRY ret_from_fork
+       ; when the forked child comes here from the __switch_to function
+       ; r0 has the last task pointer.
+       ; put last task in scheduler queue
+       bl   @schedule_tail
+
+       ; If kernel thread, jump to it's entry-point
+       ld   r9, [sp, PT_status32]
+       brne r9, 0, 1f
+
+       jl.d [r14]
+       mov  r0, r13            ; arg to payload
+
+1:
+       ; special case of kernel_thread entry point returning back due to
+       ; kernel_execve() - pretend return from syscall to ret to userland
+       b    ret_from_exception
+ARC_EXIT ret_from_fork
+
+;################### Special Sys Call Wrappers ##########################
+
+; TBD: call do_fork directly from here
+ARC_ENTRY sys_fork_wrapper
+       SAVE_CALLEE_SAVED_USER
+       bl  @sys_fork
+       DISCARD_CALLEE_SAVED_USER
+
+       GET_CURR_THR_INFO_FLAGS   r10
+       btst r10, TIF_SYSCALL_TRACE
+       bnz  tracesys_exit
+
+       b ret_from_system_call
+ARC_EXIT sys_fork_wrapper
+
+ARC_ENTRY sys_vfork_wrapper
+       SAVE_CALLEE_SAVED_USER
+       bl  @sys_vfork
+       DISCARD_CALLEE_SAVED_USER
+
+       GET_CURR_THR_INFO_FLAGS   r10
+       btst r10, TIF_SYSCALL_TRACE
+       bnz  tracesys_exit
+
+       b ret_from_system_call
+ARC_EXIT sys_vfork_wrapper
+
+ARC_ENTRY sys_clone_wrapper
+       SAVE_CALLEE_SAVED_USER
+       bl  @sys_clone
+       DISCARD_CALLEE_SAVED_USER
+
+       GET_CURR_THR_INFO_FLAGS   r10
+       btst r10, TIF_SYSCALL_TRACE
+       bnz  tracesys_exit
+
+       b ret_from_system_call
+ARC_EXIT sys_clone_wrapper
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
+#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644 (file)
index 0000000..f352e51
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ *   dexcl1 0, r1, r0  ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ *   mov_s    r3, 0
+ *   daddh11  r1, r3, r3   ; get "hi" into r1 (dpfp1 unchanged)
+ *   dexcl1   r0, r1, r3   ; get "low" into r0 (dpfp1 low clobbered)
+ *   dexcl1    0, r1, r0   ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+       unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+       unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+       const unsigned int zero = 0;
+
+       __asm__ __volatile__(
+               "daddh11  %0, %2, %2\n"
+               "dexcl1   %1, %3, %4\n"
+               : "=&r" (*(saveto + 1)), /* early clobber must here */
+                 "=&r" (*(saveto))
+               : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+       );
+
+       __asm__ __volatile__(
+               "daddh22  %0, %2, %2\n"
+               "dexcl2   %1, %3, %4\n"
+               : "=&r"(*(saveto + 3)), /* early clobber must here */
+                 "=&r"(*(saveto + 2))
+               : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+       );
+}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644 (file)
index 0000000..006dec3
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Dec 2007
+ *  -Check if we are running on Simulator or on real hardware
+ *      to skip certain things during boot on simulator
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <linux/linkage.h>
+#include <asm/arcregs.h>
+
+       .cpu A7
+
+       .section .init.text, "ax",@progbits
+       .type stext, @function
+       .globl stext
+stext:
+       ;-------------------------------------------------------------------
+       ; Don't clobber r0-r4 yet. It might have bootloader provided info
+       ;-------------------------------------------------------------------
+
+#ifdef CONFIG_SMP
+       ; Only Boot (Master) proceeds. Others wait in platform dependent way
+       ;       IDENTITY Reg [ 3  2  1  0 ]
+       ;       (cpu-id)             ^^^        => Zero for UP ARC700
+       ;                                       => #Core-ID if SMP (Master 0)
+       GET_CPU_ID  r5
+       cmp     r5, 0
+       jnz     arc_platform_smp_wait_to_boot
+#endif
+       ; Clear BSS before updating any globals
+       ; XXX: use ZOL here
+       mov     r5, __bss_start
+       mov     r6, __bss_stop
+1:
+       st.ab   0, [r5,4]
+       brlt    r5, r6, 1b
+
+#ifdef CONFIG_CMDLINE_UBOOT
+       ; support for bootloader provided cmdline
+       ;    If cmdline passed by u-boot, then
+       ;    r0 = 1  (because ATAGS parsing, now retired, used to use 0)
+       ;    r1 = magic number (board identity)
+       ;    r2 = addr of cmdline string (somewhere in memory/flash)
+
+       brne    r0, 1, .Lother_bootup_chores    ; u-boot didn't pass cmdline
+       breq    r2, 0, .Lother_bootup_chores    ; or cmdline is NULL
+
+       mov     r5, @command_line
+1:
+       ldb.ab  r6, [r2, 1]
+       breq    r6, 0, .Lother_bootup_chores
+       b.d     1b
+       stb.ab  r6, [r5, 1]
+#endif
+
+.Lother_bootup_chores:
+
+       ; Identify if running on ISS vs Silicon
+       ;       IDENTITY Reg [ 3  2  1  0 ]
+       ;       (chip-id)      ^^^^^            ==> 0xffff for ISS
+       lr      r0, [identity]
+       lsr     r3, r0, 16
+       cmp     r3, 0xffff
+       mov.z   r4, 0
+       mov.nz  r4, 1
+       st      r4, [@running_on_hw]
+
+       ; setup "current" tsk and optionally cache it in dedicated r25
+       mov     r9, @init_task
+       SET_CURR_TASK_ON_CPU  r9, r0    ; r9 = tsk, r0 = scratch
+
+       ; setup stack (fp, sp)
+       mov     fp, 0
+
+       ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+       GET_TSK_STACK_BASE r9, sp       ; r9 = tsk, sp = stack base(output)
+
+       j       start_kernel    ; "C" entry point
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+;     First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+       .section .init.text, "ax",@progbits
+       .type first_lines_of_secondary, @function
+       .globl first_lines_of_secondary
+
+first_lines_of_secondary:
+
+       ; setup per-cpu idle task as "current" on this CPU
+       ld      r0, [@secondary_idle_tsk]
+       SET_CURR_TASK_ON_CPU  r0, r1
+
+       ; setup stack (fp, sp)
+       mov     fp, 0
+
+       ; set it's stack base to tsk->thread_info bottom
+       GET_TSK_STACK_BASE r0, sp
+
+       j       start_kernel_secondary
+
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644 (file)
index 0000000..551c10d
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/mach_desc.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC700)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ *
+ * what it does ?
+ * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
+ * -Disable all IRQs (on CPU side)
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void __init arc_init_IRQ(void)
+{
+       int level_mask = 0;
+
+       write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
+
+       /* Disable all IRQs: enable them as devices request */
+       write_aux_reg(AUX_IENABLE, 0);
+
+       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+#ifdef CONFIG_ARC_IRQ3_LV2
+       level_mask |= (1 << 3);
+#endif
+#ifdef CONFIG_ARC_IRQ5_LV2
+       level_mask |= (1 << 5);
+#endif
+#ifdef CONFIG_ARC_IRQ6_LV2
+       level_mask |= (1 << 6);
+#endif
+
+       if (level_mask) {
+               pr_info("Level-2 interrupts bitset %x\n", level_mask);
+               write_aux_reg(AUX_IRQ_LEV, level_mask);
+       }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_mask_irq(struct irq_data *data)
+{
+       arch_mask_irq(data->irq);
+}
+
+static void arc_unmask_irq(struct irq_data *data)
+{
+       arch_unmask_irq(data->irq);
+}
+
+static struct irq_chip onchip_intc = {
+       .name           = "ARC In-core Intc",
+       .irq_mask       = arc_mask_irq,
+       .irq_unmask     = arc_unmask_irq,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+                               irq_hw_number_t hw)
+{
+       if (irq == TIMER0_IRQ)
+               irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+       else
+               irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+       .xlate = irq_domain_xlate_onecell,
+       .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+void __init init_onchip_IRQ(void)
+{
+       struct device_node *intc = NULL;
+
+       intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
+       if(!intc)
+               panic("DeviceTree Missing incore intc\n");
+
+       root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+                                           &arc_intc_domain_ops, NULL);
+
+       if (!root_domain)
+               panic("root irq domain not avail\n");
+
+       /* with this we don't need to export root_domain */
+       irq_set_default_host(root_domain);
+}
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+       init_onchip_IRQ();
+
+       /* Any external intc can be setup here */
+       if (machine_desc->init_irq)
+               machine_desc->init_irq();
+
+#ifdef CONFIG_SMP
+       /* Master CPU can initialize it's side of IPI */
+       if (machine_desc->init_smp)
+               machine_desc->init_smp(smp_processor_id());
+#endif
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       irq_enter();
+       generic_handle_irq(irq);
+       irq_exit();
+       set_irq_regs(old_regs);
+}
+
+int __init get_hw_config_num_irq(void)
+{
+       uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
+
+       switch (val & 0x03) {
+       case 0:
+               return 16;
+       case 1:
+               return 32;
+       case 2:
+               return 8;
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ *    which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ *  e.g. suppose TIMER is high priority (Level 2) IRQ
+ *    Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ *    Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ *    soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ *    must be enabled while they run.
+ *    Now hardware context wise we may still be in L2 ISR (not done rtie)
+ *    still we must re-enable both L1 and L2 IRQs
+ *  Another twist is prev scenario with flow being
+ *     L1 ISR ==> interrupted by L2 ISR  ==> L2 soft ISR
+ *     here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ *     over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS   /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+       unsigned long flags;
+       flags = arch_local_save_flags();
+
+       /* Allow both L1 and L2 at the onset */
+       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+       /* Called from hard ISR (between irq_enter and irq_exit) */
+       if (in_irq()) {
+
+               /* If in L2 ISR, don't re-enable any further IRQs as this can
+                * cause IRQ priorities to get upside down. e.g. it could allow
+                * L1 be taken while in L2 hard ISR which is wrong not only in
+                * theory, it can also cause the dreaded L1-L2-L1 scenario
+                */
+               if (flags & STATUS_A2_MASK)
+                       flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+               /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+               else if (flags & STATUS_A1_MASK)
+                       flags &= ~(STATUS_E1_MASK);
+       }
+
+       /* called from soft IRQ, ideally we want to re-enable all levels */
+
+       else if (in_softirq()) {
+
+               /* However if this is case of L1 interrupted by L2,
+                * re-enabling both may cause whaco L1-L2-L1 scenario
+                * because ARC700 allows level 1 to interrupt an active L2 ISR
+                * Thus we disable both
+                * However some code, executing in soft ISR wants some IRQs
+                * to be enabled so we re-enable L2 only
+                *
+                * How do we determine L1 intr by L2
+                *  -A2 is set (means in L2 ISR)
+                *  -E1 is set in this ISR's pt_regs->status32 which is
+                *      saved copy of status32_l2 when l2 ISR happened
+                */
+               struct pt_regs *pt = get_irq_regs();
+               if ((flags & STATUS_A2_MASK) && pt &&
+                   (pt->status32 & STATUS_A1_MASK)) {
+                       /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+                       flags &= ~(STATUS_E1_MASK);
+               }
+       }
+
+       arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+       unsigned long flags;
+
+       /*
+        * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+        * context which is simply wrong
+        */
+       if (in_irq()) {
+               WARN_ONCE(1, "IRQ enabled from hard-isr");
+               return;
+       }
+
+       flags = arch_local_save_flags();
+       flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+       arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..2888ba5
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kgdb.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+                       struct callee_regs *cregs)
+{
+       int regno;
+
+       for (regno = 0; regno <= 26; regno++)
+               gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+       for (regno = 27; regno < GDB_MAX_REGS; regno++)
+               gdb_regs[regno] = 0;
+
+       gdb_regs[_FP]           = kernel_regs->fp;
+       gdb_regs[__SP]          = kernel_regs->sp;
+       gdb_regs[_BLINK]        = kernel_regs->blink;
+       gdb_regs[_RET]          = kernel_regs->ret;
+       gdb_regs[_STATUS32]     = kernel_regs->status32;
+       gdb_regs[_LP_COUNT]     = kernel_regs->lp_count;
+       gdb_regs[_LP_END]       = kernel_regs->lp_end;
+       gdb_regs[_LP_START]     = kernel_regs->lp_start;
+       gdb_regs[_BTA]          = kernel_regs->bta;
+       gdb_regs[_STOP_PC]      = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+                       struct callee_regs *cregs)
+{
+       int regno;
+
+       for (regno = 0; regno <= 26; regno++)
+               set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+       kernel_regs->fp         = gdb_regs[_FP];
+       kernel_regs->sp         = gdb_regs[__SP];
+       kernel_regs->blink      = gdb_regs[_BLINK];
+       kernel_regs->ret        = gdb_regs[_RET];
+       kernel_regs->status32   = gdb_regs[_STATUS32];
+       kernel_regs->lp_count   = gdb_regs[_LP_COUNT];
+       kernel_regs->lp_end     = gdb_regs[_LP_END];
+       kernel_regs->lp_start   = gdb_regs[_LP_START];
+       kernel_regs->bta        = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+       to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+               current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+       from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+               current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+                                struct task_struct *task)
+{
+       if (task)
+               to_gdb_regs(gdb_regs, task_pt_regs(task),
+                       (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+       uint16_t opcode[2];
+       unsigned long address[2];
+       int is_branch;
+       int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+       if (single_step_data.armed) {
+               int i;
+
+               for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+                       memcpy((void *) single_step_data.address[i],
+                               &single_step_data.opcode[i],
+                               BREAK_INSTR_SIZE);
+
+                       flush_icache_range(single_step_data.address[i],
+                               single_step_data.address[i] +
+                               BREAK_INSTR_SIZE);
+               }
+               single_step_data.armed = 0;
+       }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+       memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+       memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+               BREAK_INSTR_SIZE);
+       flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+       single_step_data.is_branch = disasm_next_pc((unsigned long)
+               regs->ret, regs, (struct callee_regs *)
+               current->thread.callee_reg,
+               &single_step_data.address[0],
+               &single_step_data.address[1]);
+
+       place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+       if (single_step_data.is_branch) {
+               place_trap(single_step_data.address[1],
+                       &single_step_data.opcode[1]);
+       }
+
+       single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+                              char *remcomInBuffer, char *remcomOutBuffer,
+                              struct pt_regs *regs)
+{
+       unsigned long addr;
+       char *ptr;
+
+       undo_single_step(regs);
+
+       switch (remcomInBuffer[0]) {
+       case 's':
+       case 'c':
+               ptr = &remcomInBuffer[1];
+               if (kgdb_hex2long(&ptr, &addr))
+                       regs->ret = addr;
+
+       case 'D':
+       case 'k':
+               atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+               if (remcomInBuffer[0] == 's') {
+                       do_single_step(regs);
+                       atomic_set(&kgdb_cpu_doing_single_step,
+                                  smp_processor_id());
+               }
+
+               return 0;
+       }
+       return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+       return instruction_pointer(regs);
+}
+
+int kgdb_arch_init(void)
+{
+       single_step_data.armed = 0;
+       return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs, int param)
+{
+       /* trap_s 3 is used for breakpoints that overwrite existing
+        * instructions, while trap_s 4 is used for compiled breakpoints.
+        *
+        * with trap_s 3 breakpoints the original instruction needs to be
+        * restored and continuation needs to start at the location of the
+        * breakpoint.
+        *
+        * with trap_s 4 (compiled) breakpoints, continuation needs to
+        * start after the breakpoint.
+        */
+       if (param == 3)
+               instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+       instruction_pointer(regs) = ip;
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+       /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       .gdb_bpt_instr          = {0x78, 0x7e},
+#else
+       .gdb_bpt_instr          = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644 (file)
index 0000000..3bfeacb
--- /dev/null
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr)   min((unsigned long)MAX_STACK_SIZE, \
+               (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+       /* Attempt to probe at unaligned address */
+       if ((unsigned long)p->addr & 0x01)
+               return -EINVAL;
+
+       /* Address should not be in exception handling code */
+
+       p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+       p->opcode = *p->addr;
+
+       return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+       *p->addr = UNIMP_S_INSTRUCTION;
+
+       flush_icache_range((unsigned long)p->addr,
+                          (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+       *p->addr = p->opcode;
+
+       flush_icache_range((unsigned long)p->addr,
+                          (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+       arch_disarm_kprobe(p);
+
+       /* Can we remove the kprobe in the middle of kprobe handling? */
+       if (p->ainsn.t1_addr) {
+               *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+               flush_icache_range((unsigned long)p->ainsn.t1_addr,
+                                  (unsigned long)p->ainsn.t1_addr +
+                                  sizeof(kprobe_opcode_t));
+
+               p->ainsn.t1_addr = NULL;
+       }
+
+       if (p->ainsn.t2_addr) {
+               *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+               flush_icache_range((unsigned long)p->ainsn.t2_addr,
+                                  (unsigned long)p->ainsn.t2_addr +
+                                  sizeof(kprobe_opcode_t));
+
+               p->ainsn.t2_addr = NULL;
+       }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       kcb->prev_kprobe.kp = kprobe_running();
+       kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+       __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+                                      struct pt_regs *regs)
+{
+       /* Remove the trap instructions inserted for single step and
+        * restore the original instructions
+        */
+       if (p->ainsn.t1_addr) {
+               *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+               flush_icache_range((unsigned long)p->ainsn.t1_addr,
+                                  (unsigned long)p->ainsn.t1_addr +
+                                  sizeof(kprobe_opcode_t));
+
+               p->ainsn.t1_addr = NULL;
+       }
+
+       if (p->ainsn.t2_addr) {
+               *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+               flush_icache_range((unsigned long)p->ainsn.t2_addr,
+                                  (unsigned long)p->ainsn.t2_addr +
+                                  sizeof(kprobe_opcode_t));
+
+               p->ainsn.t2_addr = NULL;
+       }
+
+       return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+       unsigned long next_pc;
+       unsigned long tgt_if_br = 0;
+       int is_branch;
+       unsigned long bta;
+
+       /* Copy the opcode back to the kprobe location and execute the
+        * instruction. Because of this we will not be able to get into the
+        * same kprobe until this kprobe is done
+        */
+       *(p->addr) = p->opcode;
+
+       flush_icache_range((unsigned long)p->addr,
+                          (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+       /* Now we insert the trap at the next location after this instruction to
+        * single step. If it is a branch we insert the trap at possible branch
+        * targets
+        */
+
+       bta = regs->bta;
+
+       if (regs->status32 & 0x40) {
+               /* We are in a delay slot with the branch taken */
+
+               next_pc = bta & ~0x01;
+
+               if (!p->ainsn.is_short) {
+                       if (bta & 0x01)
+                               regs->blink += 2;
+                       else {
+                               /* Branch not taken */
+                               next_pc += 2;
+
+                               /* next pc is taken from bta after executing the
+                                * delay slot instruction
+                                */
+                               regs->bta += 2;
+                       }
+               }
+
+               is_branch = 0;
+       } else
+               is_branch =
+                   disasm_next_pc((unsigned long)p->addr, regs,
+                       (struct callee_regs *) current->thread.callee_reg,
+                       &next_pc, &tgt_if_br);
+
+       p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+       p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+       *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+       flush_icache_range((unsigned long)p->ainsn.t1_addr,
+                          (unsigned long)p->ainsn.t1_addr +
+                          sizeof(kprobe_opcode_t));
+
+       if (is_branch) {
+               p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+               p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+               *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+               flush_icache_range((unsigned long)p->ainsn.t2_addr,
+                                  (unsigned long)p->ainsn.t2_addr +
+                                  sizeof(kprobe_opcode_t));
+       }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+       struct kprobe *p;
+       struct kprobe_ctlblk *kcb;
+
+       preempt_disable();
+
+       kcb = get_kprobe_ctlblk();
+       p = get_kprobe((unsigned long *)addr);
+
+       if (p) {
+               /*
+                * We have reentered the kprobe_handler, since another kprobe
+                * was hit while within the handler, we save the original
+                * kprobes and single step on the instruction of the new probe
+                * without calling any user handlers to avoid recursive
+                * kprobes.
+                */
+               if (kprobe_running()) {
+                       save_previous_kprobe(kcb);
+                       set_current_kprobe(p);
+                       kprobes_inc_nmissed_count(p);
+                       setup_singlestep(p, regs);
+                       kcb->kprobe_status = KPROBE_REENTER;
+                       return 1;
+               }
+
+               set_current_kprobe(p);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+               /* If we have no pre-handler or it returned 0, we continue with
+                * normal processing. If we have a pre-handler and it returned
+                * non-zero - which is expected from setjmp_pre_handler for
+                * jprobe, we return without single stepping and leave that to
+                * the break-handler which is invoked by a kprobe from
+                * jprobe_return
+                */
+               if (!p->pre_handler || !p->pre_handler(p, regs)) {
+                       setup_singlestep(p, regs);
+                       kcb->kprobe_status = KPROBE_HIT_SS;
+               }
+
+               return 1;
+       } else if (kprobe_running()) {
+               p = __get_cpu_var(current_kprobe);
+               if (p->break_handler && p->break_handler(p, regs)) {
+                       setup_singlestep(p, regs);
+                       kcb->kprobe_status = KPROBE_HIT_SS;
+                       return 1;
+               }
+       }
+
+       /* no_kprobe: */
+       preempt_enable_no_resched();
+       return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+                                        struct pt_regs *regs)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (!cur)
+               return 0;
+
+       resume_execution(cur, addr, regs);
+
+       /* Rearm the kprobe */
+       arch_arm_kprobe(cur);
+
+       /*
+        * When we return from trap instruction we go to the next instruction
+        * We restored the actual instruction in resume_exectuiont and we to
+        * return to the same address and execute it
+        */
+       regs->ret = addr;
+
+       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               cur->post_handler(cur, regs, 0);
+       }
+
+       if (kcb->kprobe_status == KPROBE_REENTER) {
+               restore_previous_kprobe(kcb);
+               goto out;
+       }
+
+       reset_current_kprobe();
+
+out:
+       preempt_enable_no_resched();
+       return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       switch (kcb->kprobe_status) {
+       case KPROBE_HIT_SS:
+       case KPROBE_REENTER:
+               /*
+                * We are here because the instruction being single stepped
+                * caused the fault. We reset the current kprobe and allow the
+                * exception handler as if it is regular exception. In our
+                * case it doesn't matter because the system will be halted
+                */
+               resume_execution(cur, (unsigned long)cur->addr, regs);
+
+               if (kcb->kprobe_status == KPROBE_REENTER)
+                       restore_previous_kprobe(kcb);
+               else
+                       reset_current_kprobe();
+
+               preempt_enable_no_resched();
+               break;
+
+       case KPROBE_HIT_ACTIVE:
+       case KPROBE_HIT_SSDONE:
+               /*
+                * We are here because the instructions in the pre/post handler
+                * caused the fault.
+                */
+
+               /* We increment the nmissed count for accounting,
+                * we can also use npre/npostfault count for accouting
+                * these specific fault cases.
+                */
+               kprobes_inc_nmissed_count(cur);
+
+               /*
+                * We come here because instructions in the pre/post
+                * handler caused the page_fault, this could happen
+                * if handler tries to access user space by
+                * copy_from_user(), get_user() etc. Let the
+                * user-specified handler try to fix it first.
+                */
+               if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+                       return 1;
+
+               /*
+                * In case the user-specified fault handler returned zero,
+                * try to fix up.
+                */
+               if (fixup_exception(regs))
+                       return 1;
+
+               /*
+                * fixup_exception() could not handle it,
+                * Let do_page_fault() fix it.
+                */
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+                                      unsigned long val, void *data)
+{
+       struct die_args *args = data;
+       unsigned long addr = args->err;
+       int ret = NOTIFY_DONE;
+
+       switch (val) {
+       case DIE_IERR:
+               if (arc_kprobe_handler(addr, args->regs))
+                       return NOTIFY_STOP;
+               break;
+
+       case DIE_TRAP:
+               if (arc_post_kprobe_handler(addr, args->regs))
+                       return NOTIFY_STOP;
+               break;
+
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long sp_addr = regs->sp;
+
+       kcb->jprobe_saved_regs = *regs;
+       memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+       regs->ret = (unsigned long)(jp->entry);
+
+       return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+       __asm__ __volatile__("unimp_s");
+       return;
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long sp_addr;
+
+       *regs = kcb->jprobe_saved_regs;
+       sp_addr = regs->sp;
+       memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
+       preempt_enable_no_resched();
+
+       return 1;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+       __asm__ __volatile__(".global kretprobe_trampoline\n"
+                            "kretprobe_trampoline:\n" "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+                                     struct pt_regs *regs)
+{
+
+       ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+
+       /* Replace the return addr with trampoline addr */
+       regs->blink = (unsigned long)&kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+                                             struct pt_regs *regs)
+{
+       struct kretprobe_instance *ri = NULL;
+       struct hlist_head *head, empty_rp;
+       struct hlist_node *tmp;
+       unsigned long flags, orig_ret_address = 0;
+       unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+       INIT_HLIST_HEAD(&empty_rp);
+       kretprobe_hash_lock(current, &head, &flags);
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because an multiple functions in the call path
+        * have a return probe installed on them, and/or more than one return
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always inserted at the head of the list
+        *     - when multiple return probes are registered for the same
+        *       function, the first instance's ret_addr will point to the
+        *       real return address, and all the rest will point to
+        *       kretprobe_trampoline
+        */
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               if (ri->rp && ri->rp->handler)
+                       ri->rp->handler(ri, regs);
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+               recycle_rp_inst(ri, &empty_rp);
+
+               if (orig_ret_address != trampoline_address) {
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+               }
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+       regs->ret = orig_ret_address;
+
+       reset_current_kprobe();
+       kretprobe_hash_unlock(current, &flags);
+       preempt_enable_no_resched();
+
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+               hlist_del(&ri->hlist);
+               kfree(ri);
+       }
+
+       /* By returning a non zero value, we are telling the kprobe handler
+        * that we don't want the post_handler to run
+        */
+       return 1;
+}
+
+static struct kprobe trampoline_p = {
+       .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+       .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+       /* Registering the trampoline code for the kret probe */
+       return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+       if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+               return 1;
+
+       return 0;
+}
+
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+                   struct pt_regs *regs)
+{
+       notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
+}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644 (file)
index 0000000..cdd3593
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+       *addr = (value & 0xffff0000) >> 16;
+       *(addr + 1) = (value & 0xffff);
+}
+
+/* ARC specific section quirks - before relocation loop in generic loader
+ *
+ * For dwarf unwinding out of modules, this needs to
+ * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
+ *    -fasynchronous-unwind-tables it doesn't).
+ * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
+ *    the exact section index, for later use.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+                             char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+       int i;
+
+       mod->arch.unw_sec_idx = 0;
+       mod->arch.unw_info = NULL;
+
+       for (i = 1; i < hdr->e_shnum; i++) {
+               if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
+                       sechdrs[i].sh_flags |= SHF_ALLOC;
+                       mod->arch.unw_sec_idx = i;
+                       break;
+               }
+       }
+#endif
+    return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+       if (mod->arch.unw_info)
+               unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+                      const char *strtab,
+                      unsigned int symindex,   /* sec index for sym tbl */
+                      unsigned int relsec,     /* sec index for relo sec */
+                      struct module *module)
+{
+       int i, n;
+       Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+       Elf32_Sym *sym_entry, *sym_sec;
+       Elf32_Addr relocation;
+       Elf32_Addr location;
+       Elf32_Addr sec_to_patch;
+       int relo_type;
+
+       sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+       sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+       n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+       pr_debug("\n========== Module Sym reloc ===========================\n");
+       pr_debug("Section to fixup %x\n", sec_to_patch);
+       pr_debug("=========================================================\n");
+       pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+       pr_debug("=========================================================\n");
+
+       /* Loop thru entries in relocation section */
+       for (i = 0; i < n; i++) {
+
+               /* This is where to make the change */
+               location = sec_to_patch + rel_entry[i].r_offset;
+
+               /* This is the symbol it is referring to.  Note that all
+                  undefined symbols have been resolved.  */
+               sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+               relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+               pr_debug("\t%x\t\t%x\t\t%x  %x %x [%s]\n",
+                       rel_entry[i].r_offset, rel_entry[i].r_addend,
+                       sym_entry->st_value, location, relocation,
+                       strtab + sym_entry->st_name);
+
+               /* This assumes modules are built with -mlong-calls
+                * so any branches/jumps are absolute 32 bit jmps
+                * global data access again is abs 32 bit.
+                * Both of these are handled by same relocation type
+                */
+               relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+               if (likely(R_ARC_32_ME == relo_type))
+                       arc_write_me((unsigned short *)location, relocation);
+               else if (R_ARC_32 == relo_type)
+                       *((Elf32_Addr *) location) = relocation;
+               else
+                       goto relo_err;
+
+       }
+       return 0;
+
+relo_err:
+       pr_err("%s: unknown relocation: %u\n",
+               module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+       return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+                   struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+       void *unw;
+       int unwsec = mod->arch.unw_sec_idx;
+
+       if (unwsec) {
+               unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+                                      sechdrs[unwsec].sh_size);
+               mod->arch.unw_info = unw;
+       }
+#endif
+    return 0;
+}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644 (file)
index 0000000..0a7531d
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+       task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+       return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+       return task_thread_info(current)->thr_ptr;
+}
+
+static inline void arch_idle(void)
+{
+       /* sleep, but enable all interrupts before committing */
+       __asm__("sleep 0x3");
+}
+
+void cpu_idle(void)
+{
+       /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
+
+       /* endless idle loop with no priority at all */
+       while (1) {
+               tick_nohz_idle_enter();
+               rcu_idle_enter();
+
+doze:
+               local_irq_disable();
+               if (!need_resched()) {
+                       arch_idle();
+                       goto doze;
+               } else {
+                       local_irq_enable();
+               }
+
+               rcu_idle_exit();
+               tick_nohz_idle_exit();
+
+               schedule_preempt_disabled();
+       }
+}
+
+asmlinkage void ret_from_fork(void);
+
+/* Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * |     ...        |
+ * |     ...        |
+ * |    unused      |
+ * |                |
+ * ------------------  <==== top of Stack (thread.ksp)
+ * |   UNUSED 1 word|
+ * ------------------
+ * |     r25        |
+ * ~                ~
+ * |    --to--      |   (CALLEE Regs of user mode)
+ * |     r13        |
+ * ------------------
+ * |     fp         |
+ * |    blink       |   @ret_from_fork
+ * ------------------
+ * |                |
+ * ~                ~
+ * ~                ~
+ * |                |
+ * ------------------
+ * |     r12        |
+ * ~                ~
+ * |    --to--      |   (scratch Regs of user mode)
+ * |     r0         |
+ * ------------------
+ * |   UNUSED 1 word|
+ * ------------------  <===== END of PAGE
+ */
+int copy_thread(unsigned long clone_flags,
+               unsigned long usp, unsigned long arg,
+               struct task_struct *p)
+{
+       struct pt_regs *c_regs;        /* child's pt_regs */
+       unsigned long *childksp;       /* to unwind out of __switch_to() */
+       struct callee_regs *c_callee;  /* child's callee regs */
+       struct callee_regs *parent_callee;  /* paren't callee */
+       struct pt_regs *regs = current_pt_regs();
+
+       /* Mark the specific anchors to begin with (see pic above) */
+       c_regs = task_pt_regs(p);
+       childksp = (unsigned long *)c_regs - 2;  /* 2 words for FP/BLINK */
+       c_callee = ((struct callee_regs *)childksp) - 1;
+
+       /*
+        * __switch_to() uses thread.ksp to start unwinding stack
+        * For kernel threads we don't need to create callee regs, the
+        * stack layout nevertheless needs to remain the same.
+        * Also, since __switch_to anyways unwinds callee regs, we use
+        * this to populate kernel thread entry-pt/args into callee regs,
+        * so that ret_from_kernel_thread() becomes simpler.
+        */
+       p->thread.ksp = (unsigned long)c_callee;        /* THREAD_KSP */
+
+       /* __switch_to expects FP(0), BLINK(return addr) at top */
+       childksp[0] = 0;                        /* fp */
+       childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+       if (unlikely(p->flags & PF_KTHREAD)) {
+               memset(c_regs, 0, sizeof(struct pt_regs));
+
+               c_callee->r13 = arg; /* argument to kernel thread */
+               c_callee->r14 = usp;  /* function */
+
+               return 0;
+       }
+
+       /*--------- User Task Only --------------*/
+
+       /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+       childksp[0] = 0;                                /* for POP fp */
+       childksp[1] = (unsigned long)ret_from_fork;     /* for POP blink */
+
+       /* Copy parents pt regs on child's kernel mode stack */
+       *c_regs = *regs;
+
+       if (usp)
+               c_regs->sp = usp;
+
+       c_regs->r0 = 0;         /* fork returns 0 in child */
+
+       parent_callee = ((struct callee_regs *)regs) - 1;
+       *c_callee = *parent_callee;
+
+       if (unlikely(clone_flags & CLONE_SETTLS)) {
+               /*
+                * set task's userland tls data ptr from 4th arg
+                * clone C-lib call is difft from clone sys-call
+                */
+               task_thread_info(p)->thr_ptr = regs->r3;
+       } else {
+               /* Normal fork case: set parent's TLS ptr in child */
+               task_thread_info(p)->thr_ptr =
+               task_thread_info(current)->thr_ptr;
+       }
+
+       return 0;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * Free any architecture-specific thread data structures, etc.
+ */
+void exit_thread(void)
+{
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+       return 0;
+}
+
+/*
+ * API: expected by schedular Code: If thread is sleeping where is that.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ * So we hard code that anyways.
+ */
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+       struct pt_regs *regs = task_pt_regs(t);
+       unsigned long blink = 0;
+
+       /*
+        * If the thread being queried for in not itself calling this, then it
+        * implies it is not executing, which in turn implies it is sleeping,
+        * which in turn implies it got switched OUT by the schedular.
+        * In that case, it's kernel mode blink can reliably retrieved as per
+        * the picture above (right above pt_regs).
+        */
+       if (t != current && t->state != TASK_RUNNING)
+               blink = *((unsigned int *)regs - 1);
+
+       return blink;
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+       unsigned int eflags;
+
+       if (x->e_machine != EM_ARCOMPACT)
+               return 0;
+
+       eflags = x->e_flags;
+       if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+               pr_err("ABI mismatch - you need newer toolchain\n");
+               force_sigsegv(SIGSEGV, current);
+               return 0;
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644 (file)
index 0000000..c6a81c5
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+       struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+       return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
+{
+       const struct pt_regs *ptregs = task_pt_regs(target);
+       const struct callee_regs *cregs = task_callee_regs(target);
+       int ret = 0;
+       unsigned int stop_pc_val;
+
+#define REG_O_CHUNK(START, END, PTR)   \
+       if (!ret)       \
+               ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+                       offsetof(struct user_regs_struct, START), \
+                       offsetof(struct user_regs_struct, END));
+
+#define REG_O_ONE(LOC, PTR)    \
+       if (!ret)               \
+               ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+                       offsetof(struct user_regs_struct, LOC), \
+                       offsetof(struct user_regs_struct, LOC) + 4);
+
+       REG_O_CHUNK(scratch, callee, ptregs);
+       REG_O_CHUNK(callee, efa, cregs);
+       REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+       if (!ret) {
+               if (in_brkpt_trap(ptregs)) {
+                       stop_pc_val = target->thread.fault_address;
+                       pr_debug("\t\tstop_pc (brk-pt)\n");
+               } else {
+                       stop_pc_val = ptregs->ret;
+                       pr_debug("\t\tstop_pc (others)\n");
+               }
+
+               REG_O_ONE(stop_pc, &stop_pc_val);
+       }
+
+       return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
+{
+       const struct pt_regs *ptregs = task_pt_regs(target);
+       const struct callee_regs *cregs = task_callee_regs(target);
+       int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+       if (!ret)                       \
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+                       (void *)(PTR), \
+                       offsetof(struct user_regs_struct, FIRST), \
+                       offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR)           \
+       if (!ret)                       \
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+                       (void *)(PTR), \
+                       offsetof(struct user_regs_struct, LOC), \
+                       offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC)            \
+       if (!ret)                       \
+               ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+                       offsetof(struct user_regs_struct, LOC), \
+                       offsetof(struct user_regs_struct, LOC) + 4);
+
+       /* TBD: disallow updates to STATUS32, orig_r8 etc*/
+       REG_IN_CHUNK(scratch, callee, ptregs);  /* pt_regs[bta..orig_r8] */
+       REG_IN_CHUNK(callee, efa, cregs);       /* callee_regs[r25..r13] */
+       REG_IGNORE_ONE(efa);                    /* efa update invalid */
+       REG_IN_ONE(stop_pc, &ptregs->ret);      /* stop_pc: PC update */
+
+       return ret;
+}
+
+enum arc_getset {
+       REGSET_GENERAL,
+};
+
+static const struct user_regset arc_regsets[] = {
+       [REGSET_GENERAL] = {
+              .core_note_type = NT_PRSTATUS,
+              .n = ELF_NGREG,
+              .size = sizeof(unsigned long),
+              .align = sizeof(unsigned long),
+              .get = genregs_get,
+              .set = genregs_set,
+       }
+};
+
+static const struct user_regset_view user_arc_view = {
+       .name           = UTS_MACHINE,
+       .e_machine      = EM_ARCOMPACT,
+       .regsets        = arc_regsets,
+       .n              = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+                unsigned long addr, unsigned long data)
+{
+       int ret = -EIO;
+
+       pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+       switch (request) {
+       default:
+               ret = ptrace_request(child, request, addr, data);
+               break;
+       }
+
+       return ret;
+}
+
+asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+{
+       if (tracehook_report_syscall_entry(regs))
+               return ULONG_MAX;
+
+       return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+       tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644 (file)
index 0000000..e227a2b
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+       /* Halt the processor */
+       __asm__ __volatile__("flag  1\n");
+}
+
+void machine_restart(char *__unused)
+{
+       /* Soft reset : jump to reset vector */
+       pr_info("Put your restart handler here\n");
+       machine_halt();
+}
+
+void machine_power_off(void)
+{
+       /* FIXME ::  power off ??? */
+       machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644 (file)
index 0000000..dc0f968
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+#include <asm/cache.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/prom.h>
+#include <asm/unwind.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define FIX_PTR(x)  __asm__ __volatile__(";" : "+r"(x))
+
+int running_on_hw = 1; /* vs. on ISS */
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+struct task_struct *_current_task[NR_CPUS];    /* For stack switching */
+
+struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+
+
+void __init read_arc_build_cfg_regs(void)
+{
+       struct bcr_perip uncached_space;
+       struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+       FIX_PTR(cpu);
+
+       READ_BCR(AUX_IDENTITY, cpu->core);
+
+       cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
+
+       cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+       if (cpu->vec_base == 0)
+               cpu->vec_base = (unsigned int)_int_vec_base_lds;
+
+       READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
+       cpu->uncached_base = uncached_space.start << 24;
+
+       cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
+       cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
+       cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
+       cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
+       cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
+       READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
+
+       cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
+       cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
+
+       /* Note that we read the CCM BCRs independent of kernel config
+        * This is to catch the cases where user doesn't know that
+        * CCMs are present in hardware build
+        */
+       {
+               struct bcr_iccm iccm;
+               struct bcr_dccm dccm;
+               struct bcr_dccm_base dccm_base;
+               unsigned int bcr_32bit_val;
+
+               bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
+               if (bcr_32bit_val) {
+                       iccm = *((struct bcr_iccm *)&bcr_32bit_val);
+                       cpu->iccm.base_addr = iccm.base << 16;
+                       cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
+               }
+
+               bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
+               if (bcr_32bit_val) {
+                       dccm = *((struct bcr_dccm *)&bcr_32bit_val);
+                       cpu->dccm.sz = 0x800 << (dccm.sz);
+
+                       READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
+                       cpu->dccm.base_addr = dccm_base.addr << 8;
+               }
+       }
+
+       READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+
+       read_decode_mmu_bcr();
+       read_decode_cache_bcr();
+
+       READ_BCR(ARC_REG_FP_BCR, cpu->fp);
+       READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
+}
+
+static const struct cpuinfo_data arc_cpu_tbl[] = {
+       { {0x10, "ARCTangent A5"}, 0x1F},
+       { {0x20, "ARC 600"      }, 0x2F},
+       { {0x30, "ARC 700"      }, 0x33},
+       { {0x34, "ARC 700 R4.10"}, 0x34},
+       { {0x00, NULL           } }
+};
+
+char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+       int n = 0;
+       struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+       struct bcr_identity *core = &cpu->core;
+       const struct cpuinfo_data *tbl;
+       int be = 0;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       be = 1;
+#endif
+       FIX_PTR(cpu);
+
+       n += scnprintf(buf + n, len - n,
+                      "\nARC IDENTITY\t: Family [%#02x]"
+                      " Cpu-id [%#02x] Chip-id [%#4x]\n",
+                      core->family, core->cpu_id,
+                      core->chip_id);
+
+       for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
+               if ((core->family >= tbl->info.id) &&
+                   (core->family <= tbl->up_range)) {
+                       n += scnprintf(buf + n, len - n,
+                                      "processor\t: %s %s\n",
+                                      tbl->info.str,
+                                      be ? "[Big Endian]" : "");
+                       break;
+               }
+       }
+
+       if (tbl->info.id == 0)
+               n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+
+       n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
+                      (unsigned int)(arc_get_core_freq() / 1000000),
+                      (unsigned int)(arc_get_core_freq() / 10000) % 100);
+
+       n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
+                      (cpu->timers & 0x200) ? "TIMER1" : "",
+                      (cpu->timers & 0x100) ? "TIMER0" : "");
+
+       n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
+                      cpu->vec_base);
+
+       n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
+                      cpu->uncached_base);
+
+       return buf;
+}
+
+static const struct id_to_str mul_type_nm[] = {
+       { 0x0, "N/A"},
+       { 0x1, "32x32 (spl Result Reg)" },
+       { 0x2, "32x32 (ANY Result Reg)" }
+};
+
+static const struct id_to_str mac_mul_nm[] = {
+       {0x0, "N/A"},
+       {0x1, "N/A"},
+       {0x2, "Dual 16 x 16"},
+       {0x3, "N/A"},
+       {0x4, "32x16"},
+       {0x5, "N/A"},
+       {0x6, "Dual 16x16 and 32x16"}
+};
+
+char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+{
+       int n = 0;
+       struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+
+       FIX_PTR(cpu);
+#define IS_AVAIL1(var, str)    ((var) ? str : "")
+#define IS_AVAIL2(var, str)    ((var == 0x2) ? str : "")
+#define IS_USED(var)           ((var) ? "(in-use)" : "(not used)")
+
+       n += scnprintf(buf + n, len - n,
+                      "Extn [700-Base]\t: %s %s %s %s %s %s\n",
+                      IS_AVAIL2(cpu->extn.norm, "norm,"),
+                      IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
+                      IS_AVAIL1(cpu->extn.swap, "swap,"),
+                      IS_AVAIL2(cpu->extn.minmax, "minmax,"),
+                      IS_AVAIL1(cpu->extn.crc, "crc,"),
+                      IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
+
+       n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
+                      mul_type_nm[cpu->extn.mul].str);
+
+       n += scnprintf(buf + n, len - n, "   MAC MPY: %s\n",
+                      mac_mul_nm[cpu->extn_mac_mul.type].str);
+
+       if (cpu->core.family == 0x34) {
+               n += scnprintf(buf + n, len - n,
+               "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
+                              IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
+                              IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
+                              IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
+       }
+
+       n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
+                      !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
+
+       if (cpu->dccm.sz)
+               n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
+                              cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
+
+       if (cpu->iccm.sz)
+               n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
+                              cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+
+       n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
+                      !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
+
+       if (cpu->fp.ver)
+               n += scnprintf(buf + n, len - n, "SP [v%d] %s",
+                              cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
+
+       if (cpu->dpfp.ver)
+               n += scnprintf(buf + n, len - n, "DP [v%d] %s",
+                              cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
+
+       n += scnprintf(buf + n, len - n, "\n");
+
+#ifdef _ASM_GENERIC_UNISTD_H
+       n += scnprintf(buf + n, len - n,
+                      "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
+#endif
+
+       return buf;
+}
+
+void __init arc_chk_ccms(void)
+{
+#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
+       struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+#ifdef CONFIG_ARC_HAS_DCCM
+       /*
+        * DCCM can be arbit placed in hardware.
+        * Make sure it's placement/sz matches what Linux is built with
+        */
+       if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+               panic("Linux built with incorrect DCCM Base address\n");
+
+       if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+               panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+       if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+               panic("Linux built with incorrect ICCM Size\n");
+#endif
+#endif
+}
+
+/*
+ * Ensure that FP hardware and kernel config match
+ * -If hardware contains DPFP, kernel needs to save/restore FPU state
+ *  across context switches
+ * -If hardware lacks DPFP, but kernel configured to save FPU state then
+ *  kernel trying to access non-existant DPFP regs will crash
+ *
+ * We only check for Dbl precision Floating Point, because only DPFP
+ * hardware has dedicated regs which need to be saved/restored on ctx-sw
+ * (Single Precision uses core regs), thus kernel is kind of oblivious to it
+ */
+void __init arc_chk_fpu(void)
+{
+       struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+       if (cpu->dpfp.ver) {
+#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
+               pr_warn("DPFP support broken in this kernel...\n");
+#endif
+       } else {
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+               panic("H/w lacks DPFP support, apps won't work\n");
+#endif
+       }
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ *    such as only for boot CPU etc
+ */
+
+void __init setup_processor(void)
+{
+       char str[512];
+       int cpu_id = smp_processor_id();
+
+       read_arc_build_cfg_regs();
+       arc_init_IRQ();
+
+       printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+
+       arc_mmu_init();
+       arc_cache_init();
+       arc_chk_ccms();
+
+       printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
+
+#ifdef CONFIG_SMP
+       printk(arc_platform_smp_cpuinfo());
+#endif
+
+       arc_chk_fpu();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_CMDLINE_UBOOT
+       /* Make sure that a whitespace is inserted before */
+       strlcat(command_line, " ", sizeof(command_line));
+#endif
+       /*
+        * Append .config cmdline to base command line, which might already
+        * contain u-boot "bootargs" (handled by head.S, if so configured)
+        */
+       strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+
+       /* Save unparsed command line copy for /proc/cmdline */
+       strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+       *cmdline_p = command_line;
+
+       machine_desc = setup_machine_fdt(__dtb_start);
+       if (!machine_desc)
+               panic("Embedded DT invalid\n");
+
+       /* To force early parsing of things like mem=xxx */
+       parse_early_param();
+
+       /* Platform/board specific: e.g. early console registration */
+       if (machine_desc->init_early)
+               machine_desc->init_early();
+
+       setup_processor();
+
+#ifdef CONFIG_SMP
+       smp_init_cpus();
+#endif
+
+       setup_arch_memory();
+
+       /* copy flat DT out of .init and then unflatten it */
+       copy_devtree();
+       unflatten_device_tree();
+
+       /* Can be issue if someone passes cmd line arg "ro"
+        * But that is unlikely so keeping it as it is
+        */
+       root_mountflags &= ~MS_RDONLY;
+
+       console_verbose();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+       conswitchp = &dummy_con;
+#endif
+
+       arc_unwind_init();
+       arc_unwind_setup();
+}
+
+static int __init customize_machine(void)
+{
+       /* Add platform devices */
+       if (machine_desc->init_machine)
+               machine_desc->init_machine();
+
+       return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+       if (machine_desc->init_late)
+               machine_desc->init_late();
+
+       return 0;
+}
+late_initcall(init_late_machine);
+/*
+ *  Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c)  ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p)  (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       char *str;
+       int cpu_id = ptr_to_cpu(v);
+
+       str = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!str)
+               goto done;
+
+       seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+       seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
+                  loops_per_jiffy / (500000 / HZ),
+                  (loops_per_jiffy / (5000 / HZ)) % 100);
+
+       seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+       seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+       seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+#ifdef CONFIG_SMP
+       seq_printf(m, arc_platform_smp_cpuinfo());
+#endif
+
+       free_page((unsigned long)str);
+done:
+       seq_printf(m, "\n\n");
+
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       /*
+        * Callback returns cpu-id to iterator for show routine, NULL to stop.
+        * However since NULL is also a valid cpu-id (0), we use a round-about
+        * way to pass it w/o having to kmalloc/free a 2 byte string.
+        * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+        */
+       return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+       .start  = c_start,
+       .next   = c_next,
+       .stop   = c_stop,
+       .show   = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu)
+           register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+       return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644 (file)
index 0000000..ee6ef2f
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ *  -do_signal() supports TIF_RESTORE_SIGMASK
+ *  -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ *  -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ *  -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ *  -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ *   the job to do_signal()
+ *
+ * vineetg: July 2009
+ *  -Modified Code to support the uClibc provided userland sigreturn stub
+ *   to avoid kernel synthesing it on user stack at runtime, costing TLB
+ *   probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ *  -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ *   in done in block copy rather than one word at a time.
+ *   This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ *  - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ *  -ViXS were still seeing crashes when using insmod to load drivers.
+ *   It turned out that the code to change Execute permssions for TLB entries
+ *   of user was not guarded for interrupts (mod_tlb_permission)
+ *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ *  -Exception happens in Delay slot of a JMP, and before user space resumes,
+ *   Signal is delivered (Ctrl + C) = >SIGINT.
+ *   setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ *   to run, but doesn't clear the Delay slot bit from status32. As a result,
+ *   on resuming user mode, signal handler branches off to BTA of orig JMP
+ *  -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+
+struct rt_sigframe {
+       struct siginfo info;
+       struct ucontext uc;
+#define MAGIC_SIGALTSTK                0x07302004
+       unsigned int sigret_magic;
+};
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+              sigset_t *set)
+{
+       int err;
+       err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+                            sizeof(sf->uc.uc_mcontext.regs.scratch));
+       err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+       return err;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+       sigset_t set;
+       int err;
+
+       err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+       if (!err)
+               set_current_blocked(&set);
+
+       err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+                               sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+       return err;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+       if (MAGIC_SIGALTSTK == magic)
+               return 1;
+       else
+               return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+       struct rt_sigframe __user *sf;
+       unsigned int magic;
+       int err;
+       struct pt_regs *regs = current_pt_regs();
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+       /* Since we stacked the signal on a word boundary,
+        * then 'sp' should be word aligned here.  If it's
+        * not, then the user is trying to mess with us.
+        */
+       if (regs->sp & 3)
+               goto badframe;
+
+       sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+               goto badframe;
+
+       err = restore_usr_regs(regs, sf);
+       err |= __get_user(magic, &sf->sigret_magic);
+       if (err)
+               goto badframe;
+
+       if (unlikely(is_do_ss_needed(magic)))
+               if (restore_altstack(&sf->uc.uc_stack))
+                       goto badframe;
+
+       /* Don't restart from sigreturn */
+       syscall_wont_restart(regs);
+
+       return regs->r0;
+
+badframe:
+       force_sig(SIGSEGV, current);
+       return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+                                       struct pt_regs *regs,
+                                       unsigned long framesize)
+{
+       unsigned long sp = regs->sp;
+       void __user *frame;
+
+       /* This is the X/Open sanctioned signal stack switching */
+       if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+               sp = current->sas_ss_sp + current->sas_ss_size;
+
+       /* No matter what happens, 'sp' must be word
+        * aligned otherwise nasty things could happen
+        */
+
+       /* ATPCS B01 mandates 8-byte alignment */
+       frame = (void __user *)((sp - framesize) & ~7);
+
+       /* Check that we can actually write to the signal frame */
+       if (!access_ok(VERIFY_WRITE, frame, framesize))
+               frame = NULL;
+
+       return frame;
+}
+
+/*
+ * translate the signal
+ */
+static inline int map_sig(int sig)
+{
+       struct thread_info *thread = current_thread_info();
+       if (thread->exec_domain && thread->exec_domain->signal_invmap
+           && sig < 32)
+               sig = thread->exec_domain->signal_invmap[sig];
+       return sig;
+}
+
+static int
+setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+              sigset_t *set, struct pt_regs *regs)
+{
+       struct rt_sigframe __user *sf;
+       unsigned int magic = 0;
+       int err = 0;
+
+       sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
+       if (!sf)
+               return 1;
+
+       /*
+        * SA_SIGINFO requires 3 args to signal handler:
+        *  #1: sig-no (common to any handler)
+        *  #2: struct siginfo
+        *  #3: struct ucontext (completely populated)
+        */
+       if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
+               err |= copy_siginfo_to_user(&sf->info, info);
+               err |= __put_user(0, &sf->uc.uc_flags);
+               err |= __put_user(NULL, &sf->uc.uc_link);
+               err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+               /* setup args 2 and 3 for user mode handler */
+               regs->r1 = (unsigned long)&sf->info;
+               regs->r2 = (unsigned long)&sf->uc;
+
+               /*
+                * small optim to avoid unconditonally calling do_sigaltstack
+                * in sigreturn path, now that we only have rt_sigreturn
+                */
+               magic = MAGIC_SIGALTSTK;
+       }
+
+       /*
+        * w/o SA_SIGINFO, struct ucontext is partially populated (only
+        * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+        * during signal handler execution. This works for SA_SIGINFO as well
+        * although the semantics are now overloaded (the same reg state can be
+        * inspected by userland: but are they allowed to fiddle with it ?
+        */
+       err |= stash_usr_regs(sf, regs, set);
+       err |= __put_user(magic, &sf->sigret_magic);
+       if (err)
+               return err;
+
+       /* #1 arg to the user Signal handler */
+       regs->r0 = map_sig(signo);
+
+       /* setup PC of user space signal handler */
+       regs->ret = (unsigned long)ka->sa.sa_handler;
+
+       /*
+        * handler returns using sigreturn stub provided already by userpsace
+        */
+       BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+       regs->blink = (unsigned long)ka->sa.sa_restorer;
+
+       /* User Stack for signal handler will be above the frame just carved */
+       regs->sp = (unsigned long)sf;
+
+       /*
+        * Bug 94183, Clear the DE bit, so that when signal handler
+        * starts to run, it doesn't use BTA
+        */
+       regs->status32 &= ~STATUS_DE_MASK;
+       regs->status32 |= STATUS_L_MASK;
+
+       return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+       switch (regs->r0) {
+       case -ERESTART_RESTARTBLOCK:
+       case -ERESTARTNOHAND:
+               /*
+                * ERESTARTNOHAND means that the syscall should
+                * only be restarted if there was no handler for
+                * the signal, and since we only get here if there
+                * is a handler, we don't restart
+                */
+               regs->r0 = -EINTR;   /* ERESTART_xxx is internal */
+               break;
+
+       case -ERESTARTSYS:
+               /*
+                * ERESTARTSYS means to restart the syscall if
+                * there is no handler or the handler was
+                * registered with SA_RESTART
+                */
+               if (!(ka->sa.sa_flags & SA_RESTART)) {
+                       regs->r0 = -EINTR;
+                       break;
+               }
+               /* fallthrough */
+
+       case -ERESTARTNOINTR:
+               /*
+                * ERESTARTNOINTR means that the syscall should
+                * be called again after the signal handler returns.
+                * Setup reg state just as it was before doing the trap
+                * r0 has been clobbered with sys call ret code thus it
+                * needs to be reloaded with orig first arg to syscall
+                * in orig_r0. Rest of relevant reg-file:
+                * r8 (syscall num) and (r1 - r7) will be reset to
+                * their orig user space value when we ret from kernel
+                */
+               regs->r0 = regs->orig_r0;
+               regs->ret -= 4;
+               break;
+       }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+             struct pt_regs *regs)
+{
+       sigset_t *oldset = sigmask_to_save();
+       int ret;
+
+       /* Set up the stack frame */
+       ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+       if (ret)
+               force_sigsegv(sig, current);
+       else
+               signal_delivered(sig, info, ka, regs, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+       struct k_sigaction ka;
+       siginfo_t info;
+       int signr;
+       int restart_scall;
+
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+       restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+       if (signr > 0) {
+               if (restart_scall) {
+                       arc_restart_syscall(&ka, regs);
+                       syscall_wont_restart(regs);     /* No more restarts */
+               }
+               handle_signal(signr, &ka, &info, regs);
+               return;
+       }
+
+       if (restart_scall) {
+               /* No handler for syscall: restart it */
+               if (regs->r0 == -ERESTARTNOHAND ||
+                   regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+                       regs->r0 = regs->orig_r0;
+                       regs->ret -= 4;
+               } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+                       regs->r8 = __NR_restart_syscall;
+                       regs->ret -= 4;
+               }
+               syscall_wont_restart(regs);     /* No more restarts */
+       }
+
+       /* If there's no signal to deliver, restore the saved sigmask back */
+       restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+       /*
+        * ASM glue gaurantees that this is only called when returning to
+        * user mode
+        */
+       if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+               tracehook_notify_resume(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644 (file)
index 0000000..3af3e06
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RajeshwarR: Dec 11, 2007
+ *   -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ *    -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock_types.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/mach_desc.h>
+
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+struct plat_smp_ops  plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early seconday boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < NR_CPUS; i++)
+               set_cpu_possible(i, true);
+}
+
+/* called from init ( ) =>  process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       int i;
+
+       /*
+        * Initialise the present map, which describes the set of CPUs
+        * actually populated at the present time.
+        */
+       for (i = 0; i < max_cpus; i++)
+               set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * After power-up, a non Master CPU needs to wait for Master to kick start it
+ *
+ * The default implementation halts
+ *
+ * This relies on platform specific support allowing Master to directly set
+ * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
+ *
+ * In lack of such h/w assist, platforms can override this function
+ *   - make this function busy-spin on a token, eventually set by Master
+ *     (from arc_platform_smp_wakeup_cpu())
+ *   - Once token is available, jump to @first_lines_of_secondary
+ *     (using inline asm).
+ *
+ * Alert: can NOT use stack here as it has not been determined/setup for CPU.
+ *        If it turns out to be elaborate, it's better to code it in assembly
+ *
+ */
+void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+{
+       /*
+        * As a hack for debugging - since debugger will single-step over the
+        * FLAG insn - wrap the halt itself it in a self loop
+        */
+       __asm__ __volatile__(
+       "1:             \n"
+       "       flag 1  \n"
+       "       b 1b    \n");
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+       return plat_smp_ops.info;
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void __cpuinit start_kernel_secondary(void)
+{
+       struct mm_struct *mm = &init_mm;
+       unsigned int cpu = smp_processor_id();
+
+       /* MMU, Caches, Vector Table, Interrupts etc */
+       setup_processor();
+
+       atomic_inc(&mm->mm_users);
+       atomic_inc(&mm->mm_count);
+       current->active_mm = mm;
+
+       notify_cpu_starting(cpu);
+       set_cpu_online(cpu, true);
+
+       pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+       if (machine_desc->init_smp)
+               machine_desc->init_smp(smp_processor_id());
+
+       arc_local_timer_setup(cpu);
+
+       local_irq_enable();
+       preempt_disable();
+       cpu_idle();
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor  is "HALT"ed:
+ *  -It booted, but was halted in head.S
+ *  -It was configured to halt-on-reset
+ *  So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+       unsigned long wait_till;
+
+       secondary_idle_tsk = idle;
+
+       pr_info("Idle Task [%d] %p", cpu, idle);
+       pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+       if (plat_smp_ops.cpu_kick)
+               plat_smp_ops.cpu_kick(cpu,
+                               (unsigned long)first_lines_of_secondary);
+
+       /* wait for 1 sec after kicking the secondary */
+       wait_till = jiffies + HZ;
+       while (time_before(jiffies, wait_till)) {
+               if (cpu_online(cpu))
+                       break;
+       }
+
+       if (!cpu_online(cpu)) {
+               pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+               return -1;
+       }
+
+       secondary_idle_tsk = NULL;
+
+       return 0;
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
+/*****************************************************************************/
+/*              Inter Processor Interrupt Handling                           */
+/*****************************************************************************/
+
+/*
+ * structures for inter-processor calls
+ * A Collection of single bit ipi messages
+ *
+ */
+
+/*
+ * TODO_rajesh investigate tlb message types.
+ * IPI Timer not needed because each ARC has an individual Interrupting Timer
+ */
+enum ipi_msg_type {
+       IPI_NOP = 0,
+       IPI_RESCHEDULE = 1,
+       IPI_CALL_FUNC,
+       IPI_CALL_FUNC_SINGLE,
+       IPI_CPU_STOP
+};
+
+struct ipi_data {
+       unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+       unsigned long flags;
+       unsigned int cpu;
+
+       local_irq_save(flags);
+
+       for_each_cpu(cpu, callmap) {
+               struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+               set_bit(msg, &ipi->bits);
+       }
+
+       /* Call the platform specific cross-CPU call function  */
+       if (plat_smp_ops.ipi_send)
+               plat_smp_ops.ipi_send((void *)callmap);
+
+       local_irq_restore(flags);
+}
+
+void smp_send_reschedule(int cpu)
+{
+       ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+       struct cpumask targets;
+       cpumask_copy(&targets, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &targets);
+       ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+       machine_halt();
+}
+
+static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
+{
+       unsigned long msg = 0;
+
+       do {
+               msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+               switch (msg) {
+               case IPI_RESCHEDULE:
+                       scheduler_ipi();
+                       break;
+
+               case IPI_CALL_FUNC:
+                       generic_smp_call_function_interrupt();
+                       break;
+
+               case IPI_CALL_FUNC_SINGLE:
+                       generic_smp_call_function_single_interrupt();
+                       break;
+
+               case IPI_CPU_STOP:
+                       ipi_cpu_stop(cpu);
+                       break;
+               }
+       } while (msg < BITS_PER_LONG);
+
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+irqreturn_t do_IPI(int irq, void *dev_id)
+{
+       int cpu = smp_processor_id();
+       struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+       unsigned long ops;
+
+       if (plat_smp_ops.ipi_clear)
+               plat_smp_ops.ipi_clear(cpu, irq);
+
+       /*
+        * XXX: is this loop really needed
+        * And do we need to move ipi_clean inside
+        */
+       while ((ops = xchg(&ipi->bits, 0)) != 0)
+               __do_IPI(&ops, ipi, cpu);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+int smp_ipi_irq_setup(int cpu, int irq)
+{
+       int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
+       return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..a63ff84
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ *     stacktrace.c : stacktracing APIs needed by rest of kernel
+ *                     (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  vineetg: aug 2009
+ *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ *   for displaying task's kernel mode call stack in /proc/<pid>/stack
+ *  -Iterator based approach to have single copy of unwinding core and APIs
+ *   needing unwinding, implement the logic in iterator regarding:
+ *      = which frame onwards to start capture
+ *      = which frame to stop capturing (wchan)
+ *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ *  vineetg: March 2009
+ *  -Implemented correct versions of thread_saved_pc() and get_wchan()
+ *
+ *  rajeshwarr: 2008
+ *  -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ *              Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static void seed_unwind_frame_info(struct task_struct *tsk,
+                                  struct pt_regs *regs,
+                                  struct unwind_frame_info *frame_info)
+{
+       if (tsk == NULL && regs == NULL) {
+               unsigned long fp, sp, blink, ret;
+               frame_info->task = current;
+
+               __asm__ __volatile__(
+                       "mov %0,r27\n\t"
+                       "mov %1,r28\n\t"
+                       "mov %2,r31\n\t"
+                       "mov %3,r63\n\t"
+                       : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+               );
+
+               frame_info->regs.r27 = fp;
+               frame_info->regs.r28 = sp;
+               frame_info->regs.r31 = blink;
+               frame_info->regs.r63 = ret;
+               frame_info->call_frame = 0;
+       } else if (regs == NULL) {
+
+               frame_info->task = tsk;
+
+               frame_info->regs.r27 = KSTK_FP(tsk);
+               frame_info->regs.r28 = KSTK_ESP(tsk);
+               frame_info->regs.r31 = KSTK_BLINK(tsk);
+               frame_info->regs.r63 = (unsigned int)__switch_to;
+
+               /* In the prologue of __switch_to, first FP is saved on stack
+                * and then SP is copied to FP. Dwarf assumes cfa as FP based
+                * but we didn't save FP. The value retrieved above is FP's
+                * state in previous frame.
+                * As a work around for this, we unwind from __switch_to start
+                * and adjust SP accordingly. The other limitation is that
+                * __switch_to macro is dwarf rules are not generated for inline
+                * assembly code
+                */
+               frame_info->regs.r27 = 0;
+               frame_info->regs.r28 += 64;
+               frame_info->call_frame = 0;
+
+       } else {
+               frame_info->task = tsk;
+
+               frame_info->regs.r27 = regs->fp;
+               frame_info->regs.r28 = regs->sp;
+               frame_info->regs.r31 = regs->blink;
+               frame_info->regs.r63 = regs->ret;
+               frame_info->call_frame = 0;
+       }
+}
+
+#endif
+
+static noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+               int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+       int ret = 0;
+       unsigned int address;
+       struct unwind_frame_info frame_info;
+
+       seed_unwind_frame_info(tsk, regs, &frame_info);
+
+       while (1) {
+               address = UNW_PC(&frame_info);
+
+               if (address && __kernel_text_address(address)) {
+                       if (consumer_fn(address, arg) == -1)
+                               break;
+               }
+
+               ret = arc_unwind(&frame_info);
+
+               if (ret == 0) {
+                       frame_info.regs.r63 = frame_info.regs.r31;
+                       continue;
+               } else {
+                       break;
+               }
+       }
+
+       return address;         /* return the last address it saw */
+#else
+       /* On ARC, only Dward based unwinder works. fp based backtracing is
+        * not possible (-fno-omit-frame-pointer) because of the way function
+        * prelogue is setup (callee regs saved and then fp set and not other
+        * way around
+        */
+       pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+       return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *unused)
+{
+       __print_symbol("  %s\n", address);
+       return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+       struct stack_trace *trace = arg;
+
+       if (trace->skip > 0)
+               trace->skip--;
+       else
+               trace->entries[trace->nr_entries++] = address;
+
+       if (trace->nr_entries >= trace->max_entries)
+               return -1;
+
+       return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+       struct stack_trace *trace = arg;
+
+       if (in_sched_functions(address))
+               return 0;
+
+       if (trace->skip > 0)
+               trace->skip--;
+       else
+               trace->entries[trace->nr_entries++] = address;
+
+       if (trace->nr_entries >= trace->max_entries)
+               return -1;
+
+       return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+       if (in_sched_functions(address))
+               return 0;
+
+       return -1;
+}
+
+/*-------------------------------------------------------------------------
+ *              APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+{
+       pr_info("\nStack Trace:\n");
+       arc_unwind_core(tsk, regs, __print_sym, NULL);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+       show_stacktrace(tsk, NULL);
+}
+
+/* Expected by Rest of kernel code */
+void dump_stack(void)
+{
+       show_stacktrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Ofcourse just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int get_wchan(struct task_struct *tsk)
+{
+       return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       arc_unwind_core(current, NULL, __collect_all, trace);
+}
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644 (file)
index 0000000..f6bdd07
--- /dev/null
@@ -0,0 +1,18 @@
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone      sys_clone_wrapper
+#define sys_fork       sys_fork_wrapper
+#define sys_vfork      sys_vfork_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+       [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644 (file)
index 0000000..f13f728
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 1011
+ *  -sched_clock( ) no longer jiffies based. Uses the same clocksource
+ *   as gtod
+ *
+ * Rajeshwarr/Vineetg: Mar 2008
+ *  -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
+ *   for arch independent gettimeofday()
+ *  -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
+ *
+ * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
+ * Each can programmed to go from @count to @limit and optionally
+ * interrupt when that happens.
+ * A write to Control Register clears the Interrupt
+ *
+ * We've designated TIMER0 for events (clockevents)
+ * while TIMER1 for free running (clocksource)
+ *
+ * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define ARC_TIMER_MAX  0xFFFFFFFF
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_HAS_RTSC
+
+int __cpuinit arc_counter_setup(void)
+{
+       /* RTSC insn taps into cpu clk, needs no setup */
+
+       /* For SMP, only allowed if cross-core-sync, hence usable as cs */
+       return 1;
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+       unsigned long flags;
+       union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               struct { u32 high, low; };
+#else
+               struct { u32 low, high; };
+#endif
+               cycle_t  full;
+       } stamp;
+
+       flags = arch_local_irq_save();
+
+       __asm__ __volatile(
+       "       .extCoreRegister tsch, 58,  r, cannot_shortcut  \n"
+       "       rtsc %0, 0      \n"
+       "       mov  %1, 0      \n"
+       : "=r" (stamp.low), "=r" (stamp.high));
+
+       arch_local_irq_restore(flags);
+
+       return stamp.full;
+}
+
+static struct clocksource arc_counter = {
+       .name   = "ARC RTSC",
+       .rating = 300,
+       .read   = arc_counter_read,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTSC */
+
+static bool is_usable_as_clocksource(void)
+{
+#ifdef CONFIG_SMP
+       return 0;
+#else
+       return 1;
+#endif
+}
+
+/*
+ * set 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+int __cpuinit arc_counter_setup(void)
+{
+       write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+       write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+       write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+       return is_usable_as_clocksource();
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+       return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter = {
+       .name   = "ARC Timer1",
+       .rating = 300,
+       .read   = arc_counter_read,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#endif
+
+/********** Clock Event Device *********/
+
+/*
+ * Arm the timer to interrupt after @limit cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int limit)
+{
+       write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
+       write_aux_reg(ARC_REG_TIMER0_CNT, 0);   /* start from 0 */
+
+       write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+/*
+ * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
+ * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
+ * -Rearming is done by setting the IE bit
+ *
+ * Small optimisation: Normal code would have been
+ *   if (irq_reenable)
+ *     CTRL_REG = (IE | NH);
+ *   else
+ *     CTRL_REG = NH;
+ * However since IE is BIT0 we can fold the branch
+ */
+static void arc_timer_event_ack(unsigned int irq_reenable)
+{
+       write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+}
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+                                      struct clock_event_device *dev)
+{
+       arc_timer_event_setup(delta);
+       return 0;
+}
+
+static void arc_clkevent_set_mode(enum clock_event_mode mode,
+                                 struct clock_event_device *dev)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               arc_timer_event_setup(arc_get_core_freq() / HZ);
+               break;
+       case CLOCK_EVT_MODE_ONESHOT:
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+       .name           = "ARC Timer0",
+       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+       .mode           = CLOCK_EVT_MODE_UNUSED,
+       .rating         = 300,
+       .irq            = TIMER0_IRQ,   /* hardwired, no need for resources */
+       .set_next_event = arc_clkevent_set_next_event,
+       .set_mode       = arc_clkevent_set_mode,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+       struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+
+       arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
+       clk->event_handler(clk);
+       return IRQ_HANDLED;
+}
+
+static struct irqaction arc_timer_irq = {
+       .name    = "Timer0 (clock-evt-dev)",
+       .flags   = IRQF_TIMER | IRQF_PERCPU,
+       .handler = timer_irq_handler,
+};
+
+/*
+ * Setup the local event timer for @cpu
+ * N.B. weak so that some exotic ARC SoCs can completely override it
+ */
+void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
+{
+       struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
+
+       clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
+
+       clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
+       clk->cpumask = cpumask_of(cpu);
+
+       clockevents_register_device(clk);
+
+       /*
+        * setup the per-cpu timer IRQ handler - for all cpus
+        * For non boot CPU explicitly unmask at intc
+        * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
+        */
+       if (!cpu)
+               setup_irq(TIMER0_IRQ, &arc_timer_irq);
+       else
+               arch_unmask_irq(TIMER0_IRQ);
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ *
+ * -Sets up h/w timers as applicable on boot cpu
+ * -Also sets up any global state needed for timer subsystem:
+ *    - for "counting" timer, registers a clocksource, usable across CPUs
+ *      (provided that underlying counter h/w is synchronized across cores)
+ *    - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
+ */
+void __init time_init(void)
+{
+       /*
+        * sets up the timekeeping free-flowing counter which also returns
+        * whether the counter is usable as clocksource
+        */
+       if (arc_counter_setup())
+               /*
+                * CLK upto 4.29 GHz can be safely represented in 32 bits
+                * because Max 32 bit number is 4,294,967,295
+                */
+               clocksource_register_hz(&arc_counter, arc_get_core_freq());
+
+       /* sets up the periodic event timer */
+       arc_local_timer_setup(smp_processor_id());
+
+       if (machine_desc->init_time)
+               machine_desc->init_time();
+}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644 (file)
index 0000000..7496995
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/kprobes.h>
+#include <asm/unaligned.h>
+#include <asm/kgdb.h>
+
+void __init trap_init(void)
+{
+       return;
+}
+
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+        unsigned long cause_reg)
+{
+       show_kernel_fault_diag(str, regs, address, cause_reg);
+
+       /* DEAD END */
+       __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ *  -for user faults enqueues requested signal
+ *  -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int handle_exception(unsigned long cause, char *str,
+                                    struct pt_regs *regs, siginfo_t *info)
+{
+       if (user_mode(regs)) {
+               struct task_struct *tsk = current;
+
+               tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+               tsk->thread.cause_code = cause;
+
+               force_sig_info(info->si_signo, info, tsk);
+
+       } else {
+               /* If not due to copy_(to|from)_user, we are doomed */
+               if (fixup_exception(regs))
+                       return 0;
+
+               die(str, regs, (unsigned long)info->si_addr, cause);
+       }
+
+       return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
+{                                              \
+       siginfo_t info = {                      \
+               .si_signo = signr,              \
+               .si_errno = 0,                  \
+               .si_code  = sicode,             \
+               .si_addr = (void __user *)address,      \
+       };                                      \
+       return handle_exception(cause, str, regs, &info);\
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long cause, unsigned long address,
+                        struct pt_regs *regs, struct callee_regs *cregs)
+{
+       if (misaligned_fixup(address, regs, cause, cregs) != 0) {
+               siginfo_t info;
+
+               info.si_signo = SIGBUS;
+               info.si_errno = 0;
+               info.si_code = BUS_ADRALN;
+               info.si_addr = (void __user *)address;
+               return handle_exception(cause, "Misaligned Access", regs,
+                                         &info);
+       }
+       return 0;
+}
+
+#else
+DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
+#endif
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ *  -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long cause, unsigned long address,
+                           struct pt_regs *regs)
+{
+       die("Machine Check Exception", regs, address, cause);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ *  -1 used for software breakpointing (gdb)
+ *  -2 used by kprobes
+ */
+void do_non_swi_trap(unsigned long cause, unsigned long address,
+                       struct pt_regs *regs)
+{
+       unsigned int param = cause & 0xff;
+
+       switch (param) {
+       case 1:
+               trap_is_brkpt(cause, address, regs);
+               break;
+
+       case 2:
+               trap_is_kprobe(param, address, regs);
+               break;
+
+       case 3:
+       case 4:
+               kgdb_trap(regs, param);
+               break;
+
+       default:
+               break;
+       }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ *  -For a corner case, ARC kprobes implementation resorts to using
+ *   this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long cause,
+                                      unsigned long address,
+                                      struct pt_regs *regs)
+{
+       /* Check if this exception is caused by kprobes */
+       if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
+                      cause, SIGILL) == NOTIFY_STOP)
+               return;
+
+       insterror_is_error(cause, address, regs);
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644 (file)
index 0000000..7c10873
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <asm/arcregs.h>
+
+/*
+ * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
+ *   -Prints 3 regs per line and a CR.
+ *   -To continue, callee regs right after scratch, special handling of CR
+ */
+static noinline void print_reg_file(long *reg_rev, int start_num)
+{
+       unsigned int i;
+       char buf[512];
+       int n = 0, len = sizeof(buf);
+
+       /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
+       for (i = start_num; i < start_num + 13; i++) {
+               n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
+                              i, (unsigned long)*reg_rev);
+
+               if (((i + 1) % 3) == 0)
+                       n += scnprintf(buf + n, len - n, "\n");
+
+               reg_rev--;
+       }
+
+       if (start_num != 0)
+               n += scnprintf(buf + n, len - n, "\n\n");
+
+       pr_info("%s", buf);
+}
+
+static void show_callee_regs(struct callee_regs *cregs)
+{
+       print_reg_file(&(cregs->r13), 13);
+}
+
+void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+{
+       struct path path;
+       char *path_nm = NULL;
+       struct mm_struct *mm;
+       struct file *exe_file;
+
+       mm = get_task_mm(tsk);
+       if (!mm)
+               goto done;
+
+       exe_file = get_mm_exe_file(mm);
+       mmput(mm);
+
+       if (exe_file) {
+               path = exe_file->f_path;
+               path_get(&exe_file->f_path);
+               fput(exe_file);
+               path_nm = d_path(&path, buf, 255);
+               path_put(&path);
+       }
+
+done:
+       pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
+}
+EXPORT_SYMBOL(print_task_path_n_nm);
+
+static void show_faulting_vma(unsigned long address, char *buf)
+{
+       struct vm_area_struct *vma;
+       struct inode *inode;
+       unsigned long ino = 0;
+       dev_t dev = 0;
+       char *nm = buf;
+
+       vma = find_vma(current->active_mm, address);
+
+       /* check against the find_vma( ) behaviour which returns the next VMA
+        * if the container VMA is not found
+        */
+       if (vma && (vma->vm_start <= address)) {
+               struct file *file = vma->vm_file;
+               if (file) {
+                       struct path *path = &file->f_path;
+                       nm = d_path(path, buf, PAGE_SIZE - 1);
+                       inode = vma->vm_file->f_path.dentry->d_inode;
+                       dev = inode->i_sb->s_dev;
+                       ino = inode->i_ino;
+               }
+               pr_info("    @off 0x%lx in [%s]\n"
+                       "    VMA: 0x%08lx to 0x%08lx\n\n",
+                      address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
+       } else
+               pr_info("    @No matching VMA found\n");
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+       unsigned int vec, cause_code, cause_reg;
+       unsigned long address;
+
+       cause_reg = current->thread.cause_code;
+       pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+
+       /* For Data fault, this is data address not instruction addr */
+       address = current->thread.fault_address;
+
+       vec = cause_reg >> 16;
+       cause_code = (cause_reg >> 8) & 0xFF;
+
+       /* For DTLB Miss or ProtV, display the memory involved too */
+       if (vec == ECR_V_DTLB_MISS) {
+               pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+                      (cause_code == 0x01) ? "Read From" :
+                      ((cause_code == 0x02) ? "Write to" : "EX"),
+                      address, regs->ret);
+       } else if (vec == ECR_V_ITLB_MISS) {
+               pr_cont("Insn could not be fetched\n");
+       } else if (vec == ECR_V_MACH_CHK) {
+               pr_cont("%s\n", (cause_code == 0x0) ?
+                                       "Double Fault" : "Other Fatal Err");
+
+       } else if (vec == ECR_V_PROTV) {
+               if (cause_code == ECR_C_PROTV_INST_FETCH)
+                       pr_cont("Execute from Non-exec Page\n");
+               else if (cause_code == ECR_C_PROTV_LOAD)
+                       pr_cont("Read from Non-readable Page\n");
+               else if (cause_code == ECR_C_PROTV_STORE)
+                       pr_cont("Write to Non-writable Page\n");
+               else if (cause_code == ECR_C_PROTV_XCHG)
+                       pr_cont("Data exchange protection violation\n");
+               else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+                       pr_cont("Misaligned r/w from 0x%08lx\n", address);
+       } else if (vec == ECR_V_INSN_ERR) {
+               pr_cont("Illegal Insn\n");
+       } else {
+               pr_cont("Check Programmer's Manual\n");
+       }
+}
+
+/************************************************************************
+ *  API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+       struct task_struct *tsk = current;
+       struct callee_regs *cregs;
+       char *buf;
+
+       buf = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!buf)
+               return;
+
+       print_task_path_n_nm(tsk, buf);
+
+       if (current->thread.cause_code)
+               show_ecr_verbose(regs);
+
+       pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
+       pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+
+       show_faulting_vma(regs->ret, buf);      /* faulting code, not data */
+
+       /* can't use print_vma_addr() yet as it doesn't check for
+        * non-inclusive vma
+        */
+
+       /* print special regs */
+       pr_info("status32: 0x%08lx\n", regs->status32);
+       pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
+       pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
+               regs->bta, regs->blink);
+       pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+              regs->lp_start, regs->lp_end, regs->lp_count);
+
+       /* print regs->r0 thru regs->r12
+        * Sequential printing was generating horrible code
+        */
+       print_reg_file(&(regs->r0), 0);
+
+       /* If Callee regs were saved, display them too */
+       cregs = (struct callee_regs *)current->thread.callee_reg;
+       if (cregs)
+               show_callee_regs(cregs);
+
+       free_page((unsigned long)buf);
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+                           unsigned long address, unsigned long cause_reg)
+{
+       current->thread.fault_address = address;
+       current->thread.cause_code = cause_reg;
+
+       /* Caller and Callee regs */
+       show_regs(regs);
+
+       /* Show stack trace if this Fatality happened in kernel mode */
+       if (!user_mode(regs))
+               show_stacktrace(current, regs);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+static struct dentry *test_dentry;
+static struct dentry *test_dir;
+static struct dentry *test_u32_dentry;
+
+static u32 clr_on_read = 1;
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+u32 numitlb, numdtlb, num_pte_not_present;
+
+static int fill_display_data(char *kbuf)
+{
+       size_t num = 0;
+       num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
+       num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
+       num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
+
+       if (clr_on_read)
+               numitlb = numdtlb = num_pte_not_present = 0;
+
+       return num;
+}
+
+static int tlb_stats_open(struct inode *inode, struct file *file)
+{
+       file->private_data = (void *)__get_free_page(GFP_KERNEL);
+       return 0;
+}
+
+/* called on user read(): display the couters */
+static ssize_t tlb_stats_output(struct file *file,     /* file descriptor */
+                               char __user *user_buf,  /* user buffer */
+                               size_t len,             /* length of buffer */
+                               loff_t *offset)         /* offset in the file */
+{
+       size_t num;
+       char *kbuf = (char *)file->private_data;
+
+       /* All of the data can he shoved in one iteration */
+       if (*offset != 0)
+               return 0;
+
+       num = fill_display_data(kbuf);
+
+       /* simple_read_from_buffer() is helper for copy to user space
+          It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
+          @3 (offset) into the user space address starting at @1 (user_buf).
+          @5 (len) is max size of user buffer
+        */
+       return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
+}
+
+/* called on user write : clears the counters */
+static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
+                              size_t length, loff_t *offset)
+{
+       numitlb = numdtlb = num_pte_not_present = 0;
+       return length;
+}
+
+static int tlb_stats_close(struct inode *inode, struct file *file)
+{
+       free_page((unsigned long)(file->private_data));
+       return 0;
+}
+
+static const struct file_operations tlb_stats_file_ops = {
+       .read = tlb_stats_output,
+       .write = tlb_stats_clear,
+       .open = tlb_stats_open,
+       .release = tlb_stats_close
+};
+#endif
+
+static int __init arc_debugfs_init(void)
+{
+       test_dir = debugfs_create_dir("arc", NULL);
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+       test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
+                                         &tlb_stats_file_ops);
+#endif
+
+       test_u32_dentry =
+           debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
+
+       return 0;
+}
+
+module_init(arc_debugfs_init);
+
+static void __exit arc_debugfs_exit(void)
+{
+       debugfs_remove(test_u32_dentry);
+       debugfs_remove(test_dentry);
+       debugfs_remove(test_dir);
+}
+module_exit(arc_debugfs_exit);
+
+#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644 (file)
index 0000000..4cd8163
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg : May 2011
+ *  -Adapted (from .26 to .35)
+ *  -original contribution by Tim.yao@amlogic.com
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#define __get8_unaligned_check(val, addr, err)         \
+       __asm__(                                        \
+       "1:     ldb.ab  %1, [%2, 1]\n"                  \
+       "2:\n"                                          \
+       "       .section .fixup,\"ax\"\n"               \
+       "       .align  4\n"                            \
+       "3:     mov     %0, 1\n"                        \
+       "       b       2b\n"                           \
+       "       .previous\n"                            \
+       "       .section __ex_table,\"a\"\n"            \
+       "       .align  4\n"                            \
+       "       .long   1b, 3b\n"                       \
+       "       .previous\n"                            \
+       : "=r" (err), "=&r" (val), "=r" (addr)          \
+       : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr)               \
+       do {                                            \
+               unsigned int err = 0, v, a = addr;      \
+               __get8_unaligned_check(v, a, err);      \
+               val =  v ;                              \
+               __get8_unaligned_check(v, a, err);      \
+               val |= v << 8;                          \
+               if (err)                                \
+                       goto fault;                     \
+       } while (0)
+
+#define get32_unaligned_check(val, addr)               \
+       do {                                            \
+               unsigned int err = 0, v, a = addr;      \
+               __get8_unaligned_check(v, a, err);      \
+               val =  v << 0;                          \
+               __get8_unaligned_check(v, a, err);      \
+               val |= v << 8;                          \
+               __get8_unaligned_check(v, a, err);      \
+               val |= v << 16;                         \
+               __get8_unaligned_check(v, a, err);      \
+               val |= v << 24;                         \
+               if (err)                                \
+                       goto fault;                     \
+       } while (0)
+
+#define put16_unaligned_check(val, addr)               \
+       do {                                            \
+               unsigned int err = 0, v = val, a = addr;\
+                                                       \
+               __asm__(                                \
+               "1:     stb.ab  %1, [%2, 1]\n"          \
+               "       lsr %1, %1, 8\n"                \
+               "2:     stb     %1, [%2]\n"             \
+               "3:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+               "       .align  4\n"                    \
+               "4:     mov     %0, 1\n"                \
+               "       b       3b\n"                   \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+               "       .align  4\n"                    \
+               "       .long   1b, 4b\n"               \
+               "       .long   2b, 4b\n"               \
+               "       .previous\n"                    \
+               : "=r" (err), "=&r" (v), "=&r" (a)      \
+               : "0" (err), "1" (v), "2" (a));         \
+                                                       \
+               if (err)                                \
+                       goto fault;                     \
+       } while (0)
+
+#define put32_unaligned_check(val, addr)               \
+       do {                                            \
+               unsigned int err = 0, v = val, a = addr;\
+               __asm__(                                \
+                                                       \
+               "1:     stb.ab  %1, [%2, 1]\n"          \
+               "       lsr %1, %1, 8\n"                \
+               "2:     stb.ab  %1, [%2, 1]\n"          \
+               "       lsr %1, %1, 8\n"                \
+               "3:     stb.ab  %1, [%2, 1]\n"          \
+               "       lsr %1, %1, 8\n"                \
+               "4:     stb     %1, [%2]\n"             \
+               "5:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+               "       .align  4\n"                    \
+               "6:     mov     %0, 1\n"                \
+               "       b       5b\n"                   \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+               "       .align  4\n"                    \
+               "       .long   1b, 6b\n"               \
+               "       .long   2b, 6b\n"               \
+               "       .long   3b, 6b\n"               \
+               "       .long   4b, 6b\n"               \
+               "       .previous\n"                    \
+               : "=r" (err), "=&r" (v), "=&r" (a)      \
+               : "0" (err), "1" (v), "2" (a));         \
+                                                       \
+               if (err)                                \
+                       goto fault;                     \
+       } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1;       /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1;    /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+                       struct callee_regs *cregs)
+{
+       int val;
+
+       /* register write back */
+       if ((state->aa == 1) || (state->aa == 2)) {
+               set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+               if (state->aa == 2)
+                       state->src2 = 0;
+       }
+
+       if (state->zz == 0) {
+               get32_unaligned_check(val, state->src1 + state->src2);
+       } else {
+               get16_unaligned_check(val, state->src1 + state->src2);
+
+               if (state->x)
+                       val = (val << 16) >> 16;
+       }
+
+       if (state->pref == 0)
+               set_reg(state->dest, val, regs, cregs);
+
+       return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+                       struct callee_regs *cregs)
+{
+       /* register write back */
+       if ((state->aa == 1) || (state->aa == 2)) {
+               set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+               if (state->aa == 3)
+                       state->src3 = 0;
+       } else if (state->aa == 3) {
+               if (state->zz == 2) {
+                       set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+                               regs, cregs);
+               } else if (!state->zz) {
+                       set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+                               regs, cregs);
+               } else {
+                       goto fault;
+               }
+       }
+
+       /* write fix-up */
+       if (!state->zz)
+               put32_unaligned_check(state->src1, state->src2 + state->src3);
+       else
+               put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+       return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+                    unsigned long cause, struct callee_regs *cregs)
+{
+       struct disasm_state state;
+       char buf[TASK_COMM_LEN];
+
+       /* handle user mode only and only if enabled by sysadmin */
+       if (!user_mode(regs) || !unaligned_enabled)
+               return 1;
+
+       if (no_unaligned_warning) {
+               pr_warn_once("%s(%d) made unaligned access which was emulated"
+                            " by kernel assist\n. This can degrade application"
+                            " performance significantly\n. To enable further"
+                            " logging of such instances, please \n"
+                            " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+                            get_task_comm(buf, current), task_pid_nr(current));
+       } else {
+               /* Add rate limiting if it gets down to it */
+               pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+                       get_task_comm(buf, current), task_pid_nr(current),
+                       address, regs->ret);
+
+       }
+
+       disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+       if (state.fault)
+               goto fault;
+
+       /* ldb/stb should not have unaligned exception */
+       if ((state.zz == 1) || (state.di))
+               goto fault;
+
+       if (!state.write)
+               fixup_load(&state, regs, cregs);
+       else
+               fixup_store(&state, regs, cregs);
+
+       if (state.fault)
+               goto fault;
+
+       if (delay_mode(regs)) {
+               regs->ret = regs->bta;
+               regs->status32 &= ~STATUS_DE_MASK;
+       } else {
+               regs->ret += state.instr_len;
+       }
+
+       return 0;
+
+fault:
+       pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+               state.words[0], address);
+
+       return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644 (file)
index 0000000..a8d0222
--- /dev/null
@@ -0,0 +1,1329 @@
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ *     Jan Beulich <jbeulich@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * A simple API for unwinding kernel stacks.  This is used for
+ * debugging and error reporting purposes.  The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...)                    \
+do {                                           \
+       if (dbg_unw)                            \
+               pr_info(fmt, ##__VA_ARGS__);    \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+               BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+                               % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+                               + offsetof(struct unwind_frame_info, f) \
+                               / FIELD_SIZEOF(struct unwind_frame_info, f), \
+                               FIELD_SIZEOF(struct unwind_frame_info, f) \
+       }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+       unsigned offs:BITS_PER_LONG / 2;
+       unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop                          0x00
+#define DW_CFA_set_loc                      0x01
+#define DW_CFA_advance_loc1                 0x02
+#define DW_CFA_advance_loc2                 0x03
+#define DW_CFA_advance_loc4                 0x04
+#define DW_CFA_offset_extended              0x05
+#define DW_CFA_restore_extended             0x06
+#define DW_CFA_undefined                    0x07
+#define DW_CFA_same_value                   0x08
+#define DW_CFA_register                     0x09
+#define DW_CFA_remember_state               0x0a
+#define DW_CFA_restore_state                0x0b
+#define DW_CFA_def_cfa                      0x0c
+#define DW_CFA_def_cfa_register             0x0d
+#define DW_CFA_def_cfa_offset               0x0e
+#define DW_CFA_def_cfa_expression           0x0f
+#define DW_CFA_expression                   0x10
+#define DW_CFA_offset_extended_sf           0x11
+#define DW_CFA_def_cfa_sf                   0x12
+#define DW_CFA_def_cfa_offset_sf            0x13
+#define DW_CFA_val_offset                   0x14
+#define DW_CFA_val_offset_sf                0x15
+#define DW_CFA_val_expression               0x16
+#define DW_CFA_lo_user                      0x1c
+#define DW_CFA_GNU_window_save              0x2d
+#define DW_CFA_GNU_args_size                0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user                      0x3f
+
+#define DW_EH_PE_FORM     0x07
+#define DW_EH_PE_native   0x00
+#define DW_EH_PE_leb128   0x01
+#define DW_EH_PE_data2    0x02
+#define DW_EH_PE_data4    0x03
+#define DW_EH_PE_data8    0x04
+#define DW_EH_PE_signed   0x08
+#define DW_EH_PE_ADJUST   0x70
+#define DW_EH_PE_abs      0x00
+#define DW_EH_PE_pcrel    0x10
+#define DW_EH_PE_textrel  0x20
+#define DW_EH_PE_datarel  0x30
+#define DW_EH_PE_funcrel  0x40
+#define DW_EH_PE_aligned  0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit     0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+       struct {
+               unsigned long pc;
+               unsigned long range;
+       } core, init;
+       const void *address;
+       unsigned long size;
+       const unsigned char *header;
+       unsigned long hdrsz;
+       struct unwind_table *link;
+       const char *name;
+} root_table;
+
+struct unwind_item {
+       enum item_location {
+               Nowhere,
+               Memory,
+               Register,
+               Value
+       } where;
+       uleb128_t value;
+};
+
+struct unwind_state {
+       uleb128_t loc, org;
+       const u8 *cieStart, *cieEnd;
+       uleb128_t codeAlign;
+       sleb128_t dataAlign;
+       struct cfa {
+               uleb128_t reg, offs;
+       } cfa;
+       struct unwind_item regs[ARRAY_SIZE(reg_info)];
+       unsigned stackDepth:8;
+       unsigned version:8;
+       const u8 *label;
+       const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+       struct unwind_table *table;
+
+       for (table = &root_table; table; table = table->link)
+               if ((pc >= table->core.pc
+                    && pc < table->core.pc + table->core.range)
+                   || (pc >= table->init.pc
+                       && pc < table->init.pc + table->init.range))
+                       break;
+
+       return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+                                 const void *end, signed ptrType);
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+                             const void *core_start, unsigned long core_size,
+                             const void *init_start, unsigned long init_size,
+                             const void *table_start, unsigned long table_size,
+                             const u8 *header_start, unsigned long header_size)
+{
+       const u8 *ptr = header_start + 4;
+       const u8 *end = header_start + header_size;
+
+       table->core.pc = (unsigned long)core_start;
+       table->core.range = core_size;
+       table->init.pc = (unsigned long)init_start;
+       table->init.range = init_size;
+       table->address = table_start;
+       table->size = table_size;
+
+       /* See if the linker provided table looks valid. */
+       if (header_size <= 4
+           || header_start[0] != 1
+           || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
+           || header_start[2] == DW_EH_PE_omit
+           || read_pointer(&ptr, end, header_start[2]) <= 0
+           || header_start[3] == DW_EH_PE_omit)
+               header_start = NULL;
+
+       table->hdrsz = header_size;
+       smp_wmb();
+       table->header = header_start;
+       table->link = NULL;
+       table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+       init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+                         __start_unwind, __end_unwind - __start_unwind,
+                         NULL, 0);
+         /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+       unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+       const struct eh_frame_hdr_table_entry *e1 = p1;
+       const struct eh_frame_hdr_table_entry *e2 = p2;
+
+       return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+       struct eh_frame_hdr_table_entry *e1 = p1;
+       struct eh_frame_hdr_table_entry *e2 = p2;
+       unsigned long v;
+
+       v = e1->start;
+       e1->start = e2->start;
+       e2->start = v;
+       v = e1->fde;
+       e1->fde = e2->fde;
+       e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+                                     void *(*alloc) (unsigned long))
+{
+       const u8 *ptr;
+       unsigned long tableSize = table->size, hdrSize;
+       unsigned n;
+       const u32 *fde;
+       struct {
+               u8 version;
+               u8 eh_frame_ptr_enc;
+               u8 fde_count_enc;
+               u8 table_enc;
+               unsigned long eh_frame_ptr;
+               unsigned int fde_count;
+               struct eh_frame_hdr_table_entry table[];
+       } __attribute__ ((__packed__)) *header;
+
+       if (table->header)
+               return;
+
+       if (table->hdrsz)
+               pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+                       table->name);
+
+       if (tableSize & (sizeof(*fde) - 1))
+               return;
+
+       for (fde = table->address, n = 0;
+            tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+            tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+               const u32 *cie = cie_for_fde(fde, table);
+               signed ptrType;
+
+               if (cie == &not_fde)
+                       continue;
+               if (cie == NULL || cie == &bad_cie)
+                       return;
+               ptrType = fde_pointer_type(cie);
+               if (ptrType < 0)
+                       return;
+
+               ptr = (const u8 *)(fde + 2);
+               if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+                                                               ptrType)) {
+                       /* FIXME_Rajesh We have 4 instances of null addresses
+                        * instead of the initial loc addr
+                        * return;
+                        */
+               }
+               ++n;
+       }
+
+       if (tableSize || !n)
+               return;
+
+       hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+           + 2 * n * sizeof(unsigned long);
+       header = alloc(hdrSize);
+       if (!header)
+               return;
+       header->version = 1;
+       header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+       header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+       header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+       put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+       BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+                    % __alignof(typeof(header->fde_count)));
+       header->fde_count = n;
+
+       BUILD_BUG_ON(offsetof(typeof(*header), table)
+                    % __alignof(typeof(*header->table)));
+       for (fde = table->address, tableSize = table->size, n = 0;
+            tableSize;
+            tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+               /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
+               const u32 *cie = (const u32 *)(fde[1]);
+
+               if (fde[1] == 0xffffffff)
+                       continue;       /* this is a CIE */
+               ptr = (const u8 *)(fde + 2);
+               header->table[n].start = read_pointer(&ptr,
+                                                     (const u8 *)(fde + 1) +
+                                                     *fde,
+                                                     fde_pointer_type(cie));
+               header->table[n].fde = (unsigned long)fde;
+               ++n;
+       }
+       WARN_ON(n != header->fde_count);
+
+       sort(header->table,
+            n,
+            sizeof(*header->table),
+            cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+       table->hdrsz = hdrSize;
+       smp_wmb();
+       table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+       return __alloc_bootmem_nopanic(sz,
+                                      sizeof(unsigned int),
+                                      __pa(MAX_DMA_ADDRESS));
+}
+
+void __init arc_unwind_setup(void)
+{
+       setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+                      unsigned long table_size)
+{
+       struct unwind_table *table;
+
+       if (table_size <= 0)
+               return NULL;
+
+       table = kmalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return NULL;
+
+       init_unwind_table(table, module->name,
+                         module->module_core, module->core_size,
+                         module->module_init, module->init_size,
+                         table_start, table_size,
+                         NULL, 0);
+
+#ifdef UNWIND_DEBUG
+       unw_debug("Table added for [%s] %lx %lx\n",
+               module->name, table->core.pc, table->core.range);
+#endif
+       if (last_table)
+               last_table->link = table;
+       else
+               root_table.link = table;
+       last_table = table;
+
+       return table;
+}
+
+struct unlink_table_info {
+       struct unwind_table *table;
+       int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+       struct unlink_table_info *info = arg;
+       struct unwind_table *table = info->table, *prev;
+
+       for (prev = &root_table; prev->link && prev->link != table;
+            prev = prev->link)
+               ;
+
+       if (prev->link) {
+               if (info->init_only) {
+                       table->init.pc = 0;
+                       table->init.range = 0;
+                       info->table = NULL;
+               } else {
+                       prev->link = table->link;
+                       if (!prev->link)
+                               last_table = prev;
+               }
+       } else
+               info->table = NULL;
+
+       return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+       struct unwind_table *table = handle;
+       struct unlink_table_info info;
+
+       if (!table || table == &root_table)
+               return;
+
+       if (init_only && table == last_table) {
+               table->init.pc = 0;
+               table->init.range = 0;
+               return;
+       }
+
+       info.table = table;
+       info.init_only = init_only;
+
+       unlink_table(&info); /* XXX: SMP */
+       kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+       const u8 *cur = *pcur;
+       uleb128_t value;
+       unsigned shift;
+
+       for (shift = 0, value = 0; cur < end; shift += 7) {
+               if (shift + 7 > 8 * sizeof(value)
+                   && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+                       cur = end + 1;
+                       break;
+               }
+               value |= (uleb128_t) (*cur & 0x7f) << shift;
+               if (!(*cur++ & 0x80))
+                       break;
+       }
+       *pcur = cur;
+
+       return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+       const u8 *cur = *pcur;
+       sleb128_t value;
+       unsigned shift;
+
+       for (shift = 0, value = 0; cur < end; shift += 7) {
+               if (shift + 7 > 8 * sizeof(value)
+                   && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+                       cur = end + 1;
+                       break;
+               }
+               value |= (sleb128_t) (*cur & 0x7f) << shift;
+               if (!(*cur & 0x80)) {
+                       value |= -(*cur++ & 0x40) << shift;
+                       break;
+               }
+       }
+       *pcur = cur;
+
+       return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+       const u32 *cie;
+
+       if (!*fde || (*fde & (sizeof(*fde) - 1)))
+               return &bad_cie;
+
+       if (fde[1] == 0xffffffff)
+               return &not_fde;        /* this is a CIE */
+
+       if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+               return NULL;    /* this is not a valid FDE */
+
+       /* cie = fde + 1 - fde[1] / sizeof(*fde); */
+       cie = (u32 *) fde[1];
+
+       if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+           || (*cie & (sizeof(*cie) - 1))
+           || (cie[1] != 0xffffffff))
+               return NULL;    /* this is not a (valid) CIE */
+       return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+                                 signed ptrType)
+{
+       unsigned long value = 0;
+       union {
+               const u8 *p8;
+               const u16 *p16u;
+               const s16 *p16s;
+               const u32 *p32u;
+               const s32 *p32s;
+               const unsigned long *pul;
+       } ptr;
+
+       if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+               return 0;
+       ptr.p8 = *pLoc;
+       switch (ptrType & DW_EH_PE_FORM) {
+       case DW_EH_PE_data2:
+               if (end < (const void *)(ptr.p16u + 1))
+                       return 0;
+               if (ptrType & DW_EH_PE_signed)
+                       value = get_unaligned((u16 *) ptr.p16s++);
+               else
+                       value = get_unaligned((u16 *) ptr.p16u++);
+               break;
+       case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+               if (end < (const void *)(ptr.p32u + 1))
+                       return 0;
+               if (ptrType & DW_EH_PE_signed)
+                       value = get_unaligned(ptr.p32s++);
+               else
+                       value = get_unaligned(ptr.p32u++);
+               break;
+       case DW_EH_PE_data8:
+               BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+               BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+       case DW_EH_PE_native:
+               if (end < (const void *)(ptr.pul + 1))
+                       return 0;
+               value = get_unaligned((unsigned long *)ptr.pul++);
+               break;
+       case DW_EH_PE_leb128:
+               BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+               value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+                   : get_uleb128(&ptr.p8, end);
+               if ((const void *)ptr.p8 > end)
+                       return 0;
+               break;
+       default:
+               return 0;
+       }
+       switch (ptrType & DW_EH_PE_ADJUST) {
+       case DW_EH_PE_abs:
+               break;
+       case DW_EH_PE_pcrel:
+               value += (unsigned long)*pLoc;
+               break;
+       default:
+               return 0;
+       }
+       if ((ptrType & DW_EH_PE_indirect)
+           && __get_user(value, (unsigned long __user *)value))
+               return 0;
+       *pLoc = ptr.p8;
+
+       return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+       const u8 *ptr = (const u8 *)(cie + 2);
+       unsigned version = *ptr;
+
+       if (version != 1)
+               return -1;      /* unsupported */
+
+       if (*++ptr) {
+               const char *aug;
+               const u8 *end = (const u8 *)(cie + 1) + *cie;
+               uleb128_t len;
+
+               /* check if augmentation size is first (and thus present) */
+               if (*ptr != 'z')
+                       return -1;
+
+               /* check if augmentation string is nul-terminated */
+               aug = (const void *)ptr;
+               ptr = memchr(aug, 0, end - ptr);
+               if (ptr == NULL)
+                       return -1;
+
+               ++ptr;          /* skip terminator */
+               get_uleb128(&ptr, end); /* skip code alignment */
+               get_sleb128(&ptr, end); /* skip data alignment */
+               /* skip return address column */
+               version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+               len = get_uleb128(&ptr, end);   /* augmentation length */
+
+               if (ptr + len < ptr || ptr + len > end)
+                       return -1;
+
+               end = ptr + len;
+               while (*++aug) {
+                       if (ptr >= end)
+                               return -1;
+                       switch (*aug) {
+                       case 'L':
+                               ++ptr;
+                               break;
+                       case 'P':{
+                                       signed ptrType = *ptr++;
+
+                                       if (!read_pointer(&ptr, end, ptrType)
+                                           || ptr > end)
+                                               return -1;
+                               }
+                               break;
+                       case 'R':
+                               return *ptr;
+                       default:
+                               return -1;
+                       }
+               }
+       }
+       return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+       state->loc += delta * state->codeAlign;
+
+       /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+          return delta > 0;
+        */
+       unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+       return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+                    struct unwind_state *state)
+{
+       if (reg < ARRAY_SIZE(state->regs)) {
+               state->regs[reg].where = where;
+               state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+               unw_debug("r%lu: ", reg);
+               switch (where) {
+               case Nowhere:
+                       unw_debug("s ");
+                       break;
+               case Memory:
+                       unw_debug("c(%lu) ", value);
+                       break;
+               case Register:
+                       unw_debug("r(%lu) ", value);
+                       break;
+               case Value:
+                       unw_debug("v(%lu) ", value);
+                       break;
+               default:
+                       break;
+               }
+#endif
+       }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+                     signed ptrType, struct unwind_state *state)
+{
+       union {
+               const u8 *p8;
+               const u16 *p16;
+               const u32 *p32;
+       } ptr;
+       int result = 1;
+       u8 opcode;
+
+       if (start != state->cieStart) {
+               state->loc = state->org;
+               result =
+                   processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+                              state);
+               if (targetLoc == 0 && state->label == NULL)
+                       return result;
+       }
+       for (ptr.p8 = start; result && ptr.p8 < end;) {
+               switch (*ptr.p8 >> 6) {
+                       uleb128_t value;
+
+               case 0:
+                       opcode = *ptr.p8++;
+
+                       switch (opcode) {
+                       case DW_CFA_nop:
+                               unw_debug("cfa nop ");
+                               break;
+                       case DW_CFA_set_loc:
+                               state->loc = read_pointer(&ptr.p8, end,
+                                                         ptrType);
+                               if (state->loc == 0)
+                                       result = 0;
+                               unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+                               break;
+                       case DW_CFA_advance_loc1:
+                               unw_debug("\ncfa advance loc1:");
+                               result = ptr.p8 < end
+                                   && advance_loc(*ptr.p8++, state);
+                               break;
+                       case DW_CFA_advance_loc2:
+                               value = *ptr.p8++;
+                               value += *ptr.p8++ << 8;
+                               unw_debug("\ncfa advance loc2:");
+                               result = ptr.p8 <= end + 2
+                                   /* && advance_loc(*ptr.p16++, state); */
+                                   && advance_loc(value, state);
+                               break;
+                       case DW_CFA_advance_loc4:
+                               unw_debug("\ncfa advance loc4:");
+                               result = ptr.p8 <= end + 4
+                                   && advance_loc(*ptr.p32++, state);
+                               break;
+                       case DW_CFA_offset_extended:
+                               value = get_uleb128(&ptr.p8, end);
+                               unw_debug("cfa_offset_extended: ");
+                               set_rule(value, Memory,
+                                        get_uleb128(&ptr.p8, end), state);
+                               break;
+                       case DW_CFA_val_offset:
+                               value = get_uleb128(&ptr.p8, end);
+                               set_rule(value, Value,
+                                        get_uleb128(&ptr.p8, end), state);
+                               break;
+                       case DW_CFA_offset_extended_sf:
+                               value = get_uleb128(&ptr.p8, end);
+                               set_rule(value, Memory,
+                                        get_sleb128(&ptr.p8, end), state);
+                               break;
+                       case DW_CFA_val_offset_sf:
+                               value = get_uleb128(&ptr.p8, end);
+                               set_rule(value, Value,
+                                        get_sleb128(&ptr.p8, end), state);
+                               break;
+                       case DW_CFA_restore_extended:
+                               unw_debug("cfa_restore_extended: ");
+                       case DW_CFA_undefined:
+                               unw_debug("cfa_undefined: ");
+                       case DW_CFA_same_value:
+                               unw_debug("cfa_same_value: ");
+                               set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+                                        state);
+                               break;
+                       case DW_CFA_register:
+                               unw_debug("cfa_register: ");
+                               value = get_uleb128(&ptr.p8, end);
+                               set_rule(value,
+                                        Register,
+                                        get_uleb128(&ptr.p8, end), state);
+                               break;
+                       case DW_CFA_remember_state:
+                               unw_debug("cfa_remember_state: ");
+                               if (ptr.p8 == state->label) {
+                                       state->label = NULL;
+                                       return 1;
+                               }
+                               if (state->stackDepth >= MAX_STACK_DEPTH)
+                                       return 0;
+                               state->stack[state->stackDepth++] = ptr.p8;
+                               break;
+                       case DW_CFA_restore_state:
+                               unw_debug("cfa_restore_state: ");
+                               if (state->stackDepth) {
+                                       const uleb128_t loc = state->loc;
+                                       const u8 *label = state->label;
+
+                                       state->label =
+                                           state->stack[state->stackDepth - 1];
+                                       memcpy(&state->cfa, &badCFA,
+                                              sizeof(state->cfa));
+                                       memset(state->regs, 0,
+                                              sizeof(state->regs));
+                                       state->stackDepth = 0;
+                                       result =
+                                           processCFI(start, end, 0, ptrType,
+                                                      state);
+                                       state->loc = loc;
+                                       state->label = label;
+                               } else
+                                       return 0;
+                               break;
+                       case DW_CFA_def_cfa:
+                               state->cfa.reg = get_uleb128(&ptr.p8, end);
+                               unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+                               /*nobreak*/
+                       case DW_CFA_def_cfa_offset:
+                               state->cfa.offs = get_uleb128(&ptr.p8, end);
+                               unw_debug("cfa_def_cfa_offset: 0x%lx ",
+                                         state->cfa.offs);
+                               break;
+                       case DW_CFA_def_cfa_sf:
+                               state->cfa.reg = get_uleb128(&ptr.p8, end);
+                               /*nobreak */
+                       case DW_CFA_def_cfa_offset_sf:
+                               state->cfa.offs = get_sleb128(&ptr.p8, end)
+                                   * state->dataAlign;
+                               break;
+                       case DW_CFA_def_cfa_register:
+                               unw_debug("cfa_def_cfa_regsiter: ");
+                               state->cfa.reg = get_uleb128(&ptr.p8, end);
+                               break;
+                               /*todo case DW_CFA_def_cfa_expression: */
+                               /*todo case DW_CFA_expression: */
+                               /*todo case DW_CFA_val_expression: */
+                       case DW_CFA_GNU_args_size:
+                               get_uleb128(&ptr.p8, end);
+                               break;
+                       case DW_CFA_GNU_negative_offset_extended:
+                               value = get_uleb128(&ptr.p8, end);
+                               set_rule(value,
+                                        Memory,
+                                        (uleb128_t) 0 - get_uleb128(&ptr.p8,
+                                                                    end),
+                                        state);
+                               break;
+                       case DW_CFA_GNU_window_save:
+                       default:
+                               unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+                               result = 0;
+                               break;
+                       }
+                       break;
+               case 1:
+                       unw_debug("\ncfa_adv_loc: ");
+                       result = advance_loc(*ptr.p8++ & 0x3f, state);
+                       break;
+               case 2:
+                       unw_debug("cfa_offset: ");
+                       value = *ptr.p8++ & 0x3f;
+                       set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+                                state);
+                       break;
+               case 3:
+                       unw_debug("cfa_restore: ");
+                       set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+                       break;
+               }
+
+               if (ptr.p8 > end)
+                       result = 0;
+               if (result && targetLoc != 0 && targetLoc < state->loc)
+                       return 1;
+       }
+
+       return result && ptr.p8 == end && (targetLoc == 0 || (
+               /*todo While in theory this should apply, gcc in practice omits
+                 everything past the function prolog, and hence the location
+                 never reaches the end of the function.
+               targetLoc < state->loc && */  state->label == NULL));
+}
+
+/* Unwind to previous to frame.  Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+       const u32 *fde = NULL, *cie = NULL;
+       const u8 *ptr = NULL, *end = NULL;
+       unsigned long pc = UNW_PC(frame) - frame->call_frame;
+       unsigned long startLoc = 0, endLoc = 0, cfa;
+       unsigned i;
+       signed ptrType = -1;
+       uleb128_t retAddrReg = 0;
+       const struct unwind_table *table;
+       struct unwind_state state;
+       unsigned long *fptr;
+       unsigned long addr;
+
+       unw_debug("\n\nUNWIND FRAME:\n");
+       unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+                 UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+                 UNW_FP(frame));
+
+       if (UNW_PC(frame) == 0)
+               return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+       {
+               unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+               unw_debug("\nStack Dump:\n");
+               for (i = 0; i < 20; i++, sptr++)
+                       unw_debug("0x%p:  0x%lx\n", sptr, *sptr);
+               unw_debug("\n");
+       }
+#endif
+
+       table = find_table(pc);
+       if (table != NULL
+           && !(table->size & (sizeof(*fde) - 1))) {
+               const u8 *hdr = table->header;
+               unsigned long tableSize;
+
+               smp_rmb();
+               if (hdr && hdr[0] == 1) {
+                       switch (hdr[3] & DW_EH_PE_FORM) {
+                       case DW_EH_PE_native:
+                               tableSize = sizeof(unsigned long);
+                               break;
+                       case DW_EH_PE_data2:
+                               tableSize = 2;
+                               break;
+                       case DW_EH_PE_data4:
+                               tableSize = 4;
+                               break;
+                       case DW_EH_PE_data8:
+                               tableSize = 8;
+                               break;
+                       default:
+                               tableSize = 0;
+                               break;
+                       }
+                       ptr = hdr + 4;
+                       end = hdr + table->hdrsz;
+                       if (tableSize && read_pointer(&ptr, end, hdr[1])
+                           == (unsigned long)table->address
+                           && (i = read_pointer(&ptr, end, hdr[2])) > 0
+                           && i == (end - ptr) / (2 * tableSize)
+                           && !((end - ptr) % (2 * tableSize))) {
+                               do {
+                                       const u8 *cur =
+                                           ptr + (i / 2) * (2 * tableSize);
+
+                                       startLoc = read_pointer(&cur,
+                                                               cur + tableSize,
+                                                               hdr[3]);
+                                       if (pc < startLoc)
+                                               i /= 2;
+                                       else {
+                                               ptr = cur - tableSize;
+                                               i = (i + 1) / 2;
+                                       }
+                               } while (startLoc && i > 1);
+                               if (i == 1
+                                   && (startLoc = read_pointer(&ptr,
+                                                               ptr + tableSize,
+                                                               hdr[3])) != 0
+                                   && pc >= startLoc)
+                                       fde = (void *)read_pointer(&ptr,
+                                                                  ptr +
+                                                                  tableSize,
+                                                                  hdr[3]);
+                       }
+               }
+
+               if (fde != NULL) {
+                       cie = cie_for_fde(fde, table);
+                       ptr = (const u8 *)(fde + 2);
+                       if (cie != NULL
+                           && cie != &bad_cie
+                           && cie != &not_fde
+                           && (ptrType = fde_pointer_type(cie)) >= 0
+                           && read_pointer(&ptr,
+                                           (const u8 *)(fde + 1) + *fde,
+                                           ptrType) == startLoc) {
+                               if (!(ptrType & DW_EH_PE_indirect))
+                                       ptrType &=
+                                           DW_EH_PE_FORM | DW_EH_PE_signed;
+                               endLoc =
+                                   startLoc + read_pointer(&ptr,
+                                                           (const u8 *)(fde +
+                                                                        1) +
+                                                           *fde, ptrType);
+                               if (pc >= endLoc)
+                                       fde = NULL;
+                       } else
+                               fde = NULL;
+               }
+               if (fde == NULL) {
+                       for (fde = table->address, tableSize = table->size;
+                            cie = NULL, tableSize > sizeof(*fde)
+                            && tableSize - sizeof(*fde) >= *fde;
+                            tableSize -= sizeof(*fde) + *fde,
+                            fde += 1 + *fde / sizeof(*fde)) {
+                               cie = cie_for_fde(fde, table);
+                               if (cie == &bad_cie) {
+                                       cie = NULL;
+                                       break;
+                               }
+                               if (cie == NULL
+                                   || cie == &not_fde
+                                   || (ptrType = fde_pointer_type(cie)) < 0)
+                                       continue;
+                               ptr = (const u8 *)(fde + 2);
+                               startLoc = read_pointer(&ptr,
+                                                       (const u8 *)(fde + 1) +
+                                                       *fde, ptrType);
+                               if (!startLoc)
+                                       continue;
+                               if (!(ptrType & DW_EH_PE_indirect))
+                                       ptrType &=
+                                           DW_EH_PE_FORM | DW_EH_PE_signed;
+                               endLoc =
+                                   startLoc + read_pointer(&ptr,
+                                                           (const u8 *)(fde +
+                                                                        1) +
+                                                           *fde, ptrType);
+                               if (pc >= startLoc && pc < endLoc)
+                                       break;
+                       }
+               }
+       }
+       if (cie != NULL) {
+               memset(&state, 0, sizeof(state));
+               state.cieEnd = ptr;     /* keep here temporarily */
+               ptr = (const u8 *)(cie + 2);
+               end = (const u8 *)(cie + 1) + *cie;
+               frame->call_frame = 1;
+               if ((state.version = *ptr) != 1)
+                       cie = NULL;     /* unsupported version */
+               else if (*++ptr) {
+                       /* check if augmentation size is first (thus present) */
+                       if (*ptr == 'z') {
+                               while (++ptr < end && *ptr) {
+                                       switch (*ptr) {
+                                       /* chk for ignorable or already handled
+                                        * nul-terminated augmentation string */
+                                       case 'L':
+                                       case 'P':
+                                       case 'R':
+                                               continue;
+                                       case 'S':
+                                               frame->call_frame = 0;
+                                               continue;
+                                       default:
+                                               break;
+                                       }
+                                       break;
+                               }
+                       }
+                       if (ptr >= end || *ptr)
+                               cie = NULL;
+               }
+               ++ptr;
+       }
+       if (cie != NULL) {
+               /* get code aligment factor */
+               state.codeAlign = get_uleb128(&ptr, end);
+               /* get data aligment factor */
+               state.dataAlign = get_sleb128(&ptr, end);
+               if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+                       cie = NULL;
+               else {
+                       retAddrReg =
+                           state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+                                                                     end);
+                       unw_debug("CIE Frame Info:\n");
+                       unw_debug("return Address register 0x%lx\n",
+                                 retAddrReg);
+                       unw_debug("data Align: %ld\n", state.dataAlign);
+                       unw_debug("code Align: %lu\n", state.codeAlign);
+                       /* skip augmentation */
+                       if (((const char *)(cie + 2))[1] == 'z') {
+                               uleb128_t augSize = get_uleb128(&ptr, end);
+
+                               ptr += augSize;
+                       }
+                       if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+                           || REG_INVALID(retAddrReg)
+                           || reg_info[retAddrReg].width !=
+                           sizeof(unsigned long))
+                               cie = NULL;
+               }
+       }
+       if (cie != NULL) {
+               state.cieStart = ptr;
+               ptr = state.cieEnd;
+               state.cieEnd = end;
+               end = (const u8 *)(fde + 1) + *fde;
+               /* skip augmentation */
+               if (((const char *)(cie + 2))[1] == 'z') {
+                       uleb128_t augSize = get_uleb128(&ptr, end);
+
+                       if ((ptr += augSize) > end)
+                               fde = NULL;
+               }
+       }
+       if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+               unsigned long top, bottom;
+
+               top = STACK_TOP_UNW(frame->task);
+               bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+               if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+                   && bottom < UNW_FP(frame)
+#else
+               if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+                   && bottom > UNW_FP(frame)
+#endif
+                   && !((UNW_SP(frame) | UNW_FP(frame))
+                        & (sizeof(unsigned long) - 1))) {
+                       unsigned long link;
+
+                       if (!__get_user(link, (unsigned long *)
+                                       (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+                           && link > bottom && link < UNW_FP(frame)
+#else
+                           && link > UNW_FP(frame) && link < bottom
+#endif
+                           && !(link & (sizeof(link) - 1))
+                           && !__get_user(UNW_PC(frame),
+                                          (unsigned long *)(UNW_FP(frame)
+                                               + FRAME_RETADDR_OFFSET)))
+                       {
+                               UNW_SP(frame) =
+                                   UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+                                   -
+#else
+                                   +
+#endif
+                                   sizeof(UNW_PC(frame));
+                               UNW_FP(frame) = link;
+                               return 0;
+                       }
+               }
+#endif
+               return -ENXIO;
+       }
+       state.org = startLoc;
+       memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+       unw_debug("\nProcess instructions\n");
+
+       /* process instructions
+        * For ARC, we optimize by having blink(retAddrReg) with
+        * the sameValue in the leaf function, so we should not check
+        * state.regs[retAddrReg].where == Nowhere
+        */
+       if (!processCFI(ptr, end, pc, ptrType, &state)
+           || state.loc > endLoc
+/*        || state.regs[retAddrReg].where == Nowhere */
+           || state.cfa.reg >= ARRAY_SIZE(reg_info)
+           || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+           || state.cfa.offs % sizeof(unsigned long))
+               return -EIO;
+
+#ifdef UNWIND_DEBUG
+       unw_debug("\n");
+
+       unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+       for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+               if (REG_INVALID(i))
+                       continue;
+
+               switch (state.regs[i].where) {
+               case Nowhere:
+                       break;
+               case Memory:
+                       unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+                       break;
+               case Register:
+                       unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+                       break;
+               case Value:
+                       unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+                       break;
+               }
+       }
+
+       unw_debug("\n");
+#endif
+
+       /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+       if (frame->call_frame
+           && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+               frame->call_frame = 0;
+#endif
+       cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+       startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+       endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+       if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+               startLoc = min(STACK_LIMIT(cfa), cfa);
+               endLoc = max(STACK_LIMIT(cfa), cfa);
+       }
+
+       unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx =>  0x%lx\n",
+                 state.cfa.reg, state.cfa.offs, cfa);
+
+       for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+               if (REG_INVALID(i)) {
+                       if (state.regs[i].where == Nowhere)
+                               continue;
+                       return -EIO;
+               }
+               switch (state.regs[i].where) {
+               default:
+                       break;
+               case Register:
+                       if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+                           || REG_INVALID(state.regs[i].value)
+                           || reg_info[i].width >
+                           reg_info[state.regs[i].value].width)
+                               return -EIO;
+                       switch (reg_info[state.regs[i].value].width) {
+                       case sizeof(u8):
+                               state.regs[i].value =
+                               FRAME_REG(state.regs[i].value, const u8);
+                               break;
+                       case sizeof(u16):
+                               state.regs[i].value =
+                               FRAME_REG(state.regs[i].value, const u16);
+                               break;
+                       case sizeof(u32):
+                               state.regs[i].value =
+                               FRAME_REG(state.regs[i].value, const u32);
+                               break;
+#ifdef CONFIG_64BIT
+                       case sizeof(u64):
+                               state.regs[i].value =
+                               FRAME_REG(state.regs[i].value, const u64);
+                               break;
+#endif
+                       default:
+                               return -EIO;
+                       }
+                       break;
+               }
+       }
+
+       unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+       fptr = (unsigned long *)(&frame->regs);
+       for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+               if (REG_INVALID(i))
+                       continue;
+               switch (state.regs[i].where) {
+               case Nowhere:
+                       if (reg_info[i].width != sizeof(UNW_SP(frame))
+                           || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+                           != &UNW_SP(frame))
+                               continue;
+                       UNW_SP(frame) = cfa;
+                       break;
+               case Register:
+                       switch (reg_info[i].width) {
+                       case sizeof(u8):
+                               FRAME_REG(i, u8) = state.regs[i].value;
+                               break;
+                       case sizeof(u16):
+                               FRAME_REG(i, u16) = state.regs[i].value;
+                               break;
+                       case sizeof(u32):
+                               FRAME_REG(i, u32) = state.regs[i].value;
+                               break;
+#ifdef CONFIG_64BIT
+                       case sizeof(u64):
+                               FRAME_REG(i, u64) = state.regs[i].value;
+                               break;
+#endif
+                       default:
+                               return -EIO;
+                       }
+                       break;
+               case Value:
+                       if (reg_info[i].width != sizeof(unsigned long))
+                               return -EIO;
+                       FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+                           * state.dataAlign;
+                       break;
+               case Memory:
+                       addr = cfa + state.regs[i].value * state.dataAlign;
+
+                       if ((state.regs[i].value * state.dataAlign)
+                           % sizeof(unsigned long)
+                           || addr < startLoc
+                           || addr + sizeof(unsigned long) < addr
+                           || addr + sizeof(unsigned long) > endLoc)
+                                       return -EIO;
+
+                       switch (reg_info[i].width) {
+                       case sizeof(u8):
+                               __get_user(FRAME_REG(i, u8),
+                                          (u8 __user *)addr);
+                               break;
+                       case sizeof(u16):
+                               __get_user(FRAME_REG(i, u16),
+                                          (u16 __user *)addr);
+                               break;
+                       case sizeof(u32):
+                               __get_user(FRAME_REG(i, u32),
+                                          (u32 __user *)addr);
+                               break;
+#ifdef CONFIG_64BIT
+                       case sizeof(u64):
+                               __get_user(FRAME_REG(i, u64),
+                                          (u64 __user *)addr);
+                               break;
+#endif
+                       default:
+                               return -EIO;
+                       }
+
+                       break;
+               }
+               unw_debug("r%d: 0x%lx ", i, *fptr);
+       }
+
+       return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..d3c92f5
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(_stext)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+       /*
+        * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+        * address, make sure peripheral at 0x8z doesn't clash with ICCM
+        * Essentially vector is also in ICCM.
+        */
+
+       . = CONFIG_LINUX_LINK_BASE;
+
+       _int_vec_base_lds = .;
+       .vector : {
+               *(.vector)
+               . = ALIGN(PAGE_SIZE);
+       }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+       .text.arcfp : {
+               *(.text.arcfp)
+               . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+       }
+#endif
+
+       /*
+        * The reason for having a seperate subsection .init.ramfs is to
+        * prevent objump from including it in kernel dumps
+        *
+        * Reason for having .init.ramfs above .init is to make sure that the
+        * binary blob is tucked away to one side, reducing the displacement
+        * between .init.text and .text, avoiding any possible relocation
+        * errors because of calls from .init.text to .text
+        * Yes such calls do exist. e.g.
+        *      decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+        */
+
+       __init_begin = .;
+
+       .init.ramfs : { INIT_RAM_FS }
+
+       . = ALIGN(PAGE_SIZE);
+       _stext = .;
+
+       HEAD_TEXT_SECTION
+       INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+       /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+       .init.data : {
+               INIT_DATA
+               INIT_SETUP(L1_CACHE_BYTES)
+               INIT_CALLS
+               CON_INITCALL
+               SECURITY_INITCALL
+       }
+
+       .init.arch.info : {
+               __arch_info_begin = .;
+               *(.arch.info.init)
+               __arch_info_end = .;
+       }
+
+       PERCPU_SECTION(L1_CACHE_BYTES)
+
+       /*
+        * .exit.text is discard at runtime, not link time, to deal with
+        * references from .debug_frame
+        * It will be init freed, being inside [__init_start : __init_end]
+        */
+       .exit.text : { EXIT_TEXT }
+       .exit.data : { EXIT_DATA }
+
+       . = ALIGN(PAGE_SIZE);
+       __init_end = .;
+
+       .text : {
+               _text = .;
+               TEXT_TEXT
+               SCHED_TEXT
+               LOCK_TEXT
+               KPROBES_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+       }
+       EXCEPTION_TABLE(L1_CACHE_BYTES)
+       _etext = .;
+
+       _sdata = .;
+       RO_DATA_SECTION(PAGE_SIZE)
+
+       /*
+        * 1. this is .data essentially
+        * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+        */
+       RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+       _edata = .;
+
+       BSS_SECTION(0, 0, 0)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+       . = ALIGN(PAGE_SIZE);
+       .debug_frame  : {
+               __start_unwind = .;
+               *(.debug_frame)
+               __end_unwind = .;
+       }
+#else
+       /DISCARD/ : {   *(.debug_frame) }
+#endif
+
+       NOTES
+
+       . = ALIGN(PAGE_SIZE);
+       _end = . ;
+
+       STABS_DEBUG
+       DISCARDS
+
+       .arcextmap 0 : {
+               *(.gnu.linkonce.arcextmap.*)
+               *(.arcextmap.*)
+       }
+
+       /* open-coded because we need .debug_frame seperately for unwinding */
+       .debug_aranges 0 : { *(.debug_aranges) }
+       .debug_pubnames 0 : { *(.debug_pubnames) }
+       .debug_info 0 : { *(.debug_info) }
+       .debug_abbrev 0 : { *(.debug_abbrev) }
+       .debug_line 0 : { *(.debug_line) }
+       .debug_str 0 : { *(.debug_str) }
+       .debug_loc 0 : { *(.debug_loc) }
+       .debug_macinfo 0 : { *(.debug_macinfo) }
+
+#ifdef CONFIG_ARC_HAS_DCCM
+       . = CONFIG_ARC_DCCM_BASE;
+       __arc_dccm_base = .;
+       .data.arcfp : {
+               *(.data.arcfp)
+       }
+       . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644 (file)
index 0000000..db46e20
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+lib-y  := strchr-700.o strcmp.o strcpy-700.o strlen.o
+lib-y  += memcmp.o memcpy-700.o memset.o
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644 (file)
index 0000000..bc813d5
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* BIG ENDIAN */
+#define WORD2 r3
+#define SHIFT r2
+#endif
+
+ARC_ENTRY memcmp
+       or      r12,r0,r1
+       asl_s   r12,r12,30
+       sub     r3,r2,1
+       brls    r2,r12,.Lbytewise
+       ld      r4,[r0,0]
+       ld      r5,[r1,0]
+       lsr.f   lp_count,r3,3
+       lpne    .Loop_end
+       ld_s    WORD2,[r0,4]
+       ld_s    r12,[r1,4]
+       brne    r4,r5,.Leven
+       ld.a    r4,[r0,8]
+       ld.a    r5,[r1,8]
+       brne    WORD2,r12,.Lodd
+.Loop_end:
+       asl_s   SHIFT,SHIFT,3
+       bhs_s   .Last_cmp
+       brne    r4,r5,.Leven
+       ld      r4,[r0,4]
+       ld      r5,[r1,4]
+#ifdef __LITTLE_ENDIAN__
+       nop_s
+       ; one more load latency cycle
+.Last_cmp:
+       xor     r0,r4,r5
+       bset    r0,r0,SHIFT
+       sub_s   r1,r0,1
+       bic_s   r1,r1,r0
+       norm    r1,r1
+       b.d     .Leven_cmp
+       and     r1,r1,24
+.Leven:
+       xor     r0,r4,r5
+       sub_s   r1,r0,1
+       bic_s   r1,r1,r0
+       norm    r1,r1
+       ; slow track insn
+       and     r1,r1,24
+.Leven_cmp:
+       asl     r2,r4,r1
+       asl     r12,r5,r1
+       lsr_s   r2,r2,1
+       lsr_s   r12,r12,1
+       j_s.d   [blink]
+       sub     r0,r2,r12
+       .balign 4
+.Lodd:
+       xor     r0,WORD2,r12
+       sub_s   r1,r0,1
+       bic_s   r1,r1,r0
+       norm    r1,r1
+       ; slow track insn
+       and     r1,r1,24
+       asl_s   r2,r2,r1
+       asl_s   r12,r12,r1
+       lsr_s   r2,r2,1
+       lsr_s   r12,r12,1
+       j_s.d   [blink]
+       sub     r0,r2,r12
+#else /* BIG ENDIAN */
+.Last_cmp:
+       neg_s   SHIFT,SHIFT
+       lsr     r4,r4,SHIFT
+       lsr     r5,r5,SHIFT
+       ; slow track insn
+.Leven:
+       sub.f   r0,r4,r5
+       mov.ne  r0,1
+       j_s.d   [blink]
+       bset.cs r0,r0,31
+.Lodd:
+       cmp_s   WORD2,r12
+
+       mov_s   r0,1
+       j_s.d   [blink]
+       bset.cs r0,r0,31
+#endif /* ENDIAN */
+       .balign 4
+.Lbytewise:
+       breq    r2,0,.Lnil
+       ldb     r4,[r0,0]
+       ldb     r5,[r1,0]
+       lsr.f   lp_count,r3
+       lpne    .Lbyte_end
+       ldb_s   r3,[r0,1]
+       ldb     r12,[r1,1]
+       brne    r4,r5,.Lbyte_even
+       ldb.a   r4,[r0,2]
+       ldb.a   r5,[r1,2]
+       brne    r3,r12,.Lbyte_odd
+.Lbyte_end:
+       bcc     .Lbyte_even
+       brne    r4,r5,.Lbyte_even
+       ldb_s   r3,[r0,1]
+       ldb_s   r12,[r1,1]
+.Lbyte_odd:
+       j_s.d   [blink]
+       sub     r0,r3,r12
+.Lbyte_even:
+       j_s.d   [blink]
+       sub     r0,r4,r5
+.Lnil:
+       j_s.d   [blink]
+       mov     r0,0
+ARC_EXIT memcmp
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644 (file)
index 0000000..b64cc10
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY memcpy
+       or      r3,r0,r1
+       asl_s   r3,r3,30
+       mov_s   r5,r0
+       brls.d  r2,r3,.Lcopy_bytewise
+       sub.f   r3,r2,1
+       ld_s    r12,[r1,0]
+       asr.f   lp_count,r3,3
+       bbit0.d r3,2,.Lnox4
+       bmsk_s  r2,r2,1
+       st.ab   r12,[r5,4]
+       ld.a    r12,[r1,4]
+.Lnox4:
+       lppnz   .Lendloop
+       ld_s    r3,[r1,4]
+       st.ab   r12,[r5,4]
+       ld.a    r12,[r1,8]
+       st.ab   r3,[r5,4]
+.Lendloop:
+       breq    r2,0,.Last_store
+       ld      r3,[r5,0]
+#ifdef __LITTLE_ENDIAN__
+       add3    r2,-1,r2
+       ; uses long immediate
+       xor_s   r12,r12,r3
+       bmsk    r12,r12,r2
+    xor_s      r12,r12,r3
+#else /* BIG ENDIAN */
+       sub3    r2,31,r2
+       ; uses long immediate
+        xor_s  r3,r3,r12
+        bmsk   r3,r3,r2
+        xor_s  r12,r12,r3
+#endif /* ENDIAN */
+.Last_store:
+       j_s.d   [blink]
+       st      r12,[r5,0]
+
+       .balign 4
+.Lcopy_bytewise:
+       jcs     [blink]
+       ldb_s   r12,[r1,0]
+       lsr.f   lp_count,r3
+       bhs_s   .Lnox1
+       stb.ab  r12,[r5,1]
+       ldb.a   r12,[r1,1]
+.Lnox1:
+       lppnz   .Lendbloop
+       ldb_s   r3,[r1,1]
+       stb.ab  r12,[r5,1]
+       ldb.a   r12,[r1,2]
+       stb.ab  r3,[r5,1]
+.Lendbloop:
+       j_s.d   [blink]
+       stb     r12,[r5,0]
+ARC_EXIT memcpy
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644 (file)
index 0000000..9b2d88d
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#define SMALL  7 /* Must be at least 6 to deal with alignment/loop issues.  */
+
+ARC_ENTRY memset
+       mov_s   r4,r0
+       or      r12,r0,r2
+       bmsk.f  r12,r12,1
+       extb_s  r1,r1
+       asl     r3,r1,8
+       beq.d   .Laligned
+       or_s    r1,r1,r3
+       brls    r2,SMALL,.Ltiny
+       add     r3,r2,r0
+       stb     r1,[r3,-1]
+       bclr_s  r3,r3,0
+       stw     r1,[r3,-2]
+       bmsk.f  r12,r0,1
+       add_s   r2,r2,r12
+       sub.ne  r2,r2,4
+       stb.ab  r1,[r4,1]
+       and     r4,r4,-2
+       stw.ab  r1,[r4,2]
+       and     r4,r4,-4
+.Laligned:     ; This code address should be aligned for speed.
+       asl     r3,r1,16
+       lsr.f   lp_count,r2,2
+       or_s    r1,r1,r3
+       lpne    .Loop_end
+       st.ab   r1,[r4,4]
+.Loop_end:
+       j_s     [blink]
+
+       .balign 4
+.Ltiny:
+       mov.f   lp_count,r2
+       lpne    .Ltiny_end
+       stb.ab  r1,[r4,1]
+.Ltiny_end:
+       j_s     [blink]
+ARC_EXIT memset
+
+; memzero: @r0 = mem, @r1 = size_t
+; memset:  @r0 = mem, @r1 = char, @r2 = size_t
+
+ARC_ENTRY memzero
+    ; adjust bzero args to memset args
+    mov r2, r1
+    mov r1, 0
+    b  memset    ;tail call so need to tinker with blink
+ARC_EXIT memzero
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644 (file)
index 0000000..99c1047
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has a relatively long pipeline and branch prediction, so we want
+   to avoid branches that are hard to predict.  On the other hand, the
+   presence of the norm instruction makes it easier to operate on whole
+   words branch-free.  */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strchr
+       extb_s  r1,r1
+       asl     r5,r1,8
+       bmsk    r2,r0,1
+       or      r5,r5,r1
+       mov_s   r3,0x01010101
+       breq.d  r2,r0,.Laligned
+       asl     r4,r5,16
+       sub_s   r0,r0,r2
+       asl     r7,r2,3
+       ld_s    r2,[r0]
+#ifdef __LITTLE_ENDIAN__
+       asl     r7,r3,r7
+#else
+       lsr     r7,r3,r7
+#endif
+       or      r5,r5,r4
+       ror     r4,r3
+       sub     r12,r2,r7
+       bic_s   r12,r12,r2
+       and     r12,r12,r4
+       brne.d  r12,0,.Lfound0_ua
+       xor     r6,r2,r5
+       ld.a    r2,[r0,4]
+       sub     r12,r6,r7
+       bic     r12,r12,r6
+       and     r7,r12,r4
+       breq    r7,0,.Loop ; For speed, we want this branch to be unaligned.
+       b       .Lfound_char ; Likewise this one.
+; /* We require this code address to be unaligned for speed...  */
+.Laligned:
+       ld_s    r2,[r0]
+       or      r5,r5,r4
+       ror     r4,r3
+; /* ... so that this code address is aligned, for itself and ...  */
+.Loop:
+       sub     r12,r2,r3
+       bic_s   r12,r12,r2
+       and     r12,r12,r4
+       brne.d  r12,0,.Lfound0
+       xor     r6,r2,r5
+       ld.a    r2,[r0,4]
+       sub     r12,r6,r3
+       bic     r12,r12,r6
+       and     r7,r12,r4
+       breq    r7,0,.Loop /* ... so that this branch is unaligned.  */
+       ; Found searched-for character.  r0 has already advanced to next word.
+#ifdef __LITTLE_ENDIAN__
+/* We only need the information about the first matching byte
+   (i.e. the least significant matching byte) to be exact,
+   hence there is no problem with carry effects.  */
+.Lfound_char:
+       sub     r3,r7,1
+       bic     r3,r3,r7
+       norm    r2,r3
+       sub_s   r0,r0,1
+       asr_s   r2,r2,3
+       j.d     [blink]
+       sub_s   r0,r0,r2
+
+       .balign 4
+.Lfound0_ua:
+       mov     r3,r7
+.Lfound0:
+       sub     r3,r6,r3
+       bic     r3,r3,r6
+       and     r2,r3,r4
+       or_s    r12,r12,r2
+       sub_s   r3,r12,1
+       bic_s   r3,r3,r12
+       norm    r3,r3
+       add_s   r0,r0,3
+       asr_s   r12,r3,3
+       asl.f   0,r2,r3
+       sub_s   r0,r0,r12
+       j_s.d   [blink]
+       mov.pl  r0,0
+#else /* BIG ENDIAN */
+.Lfound_char:
+       lsr     r7,r7,7
+
+       bic     r2,r7,r6
+       norm    r2,r2
+       sub_s   r0,r0,4
+       asr_s   r2,r2,3
+       j.d     [blink]
+       add_s   r0,r0,r2
+
+.Lfound0_ua:
+       mov_s   r3,r7
+.Lfound0:
+       asl_s   r2,r2,7
+       or      r7,r6,r4
+       bic_s   r12,r12,r2
+       sub     r2,r7,r3
+       or      r2,r2,r6
+       bic     r12,r2,r12
+       bic.f   r3,r4,r12
+       norm    r3,r3
+
+       add.pl  r3,r3,1
+       asr_s   r12,r3,3
+       asl.f   0,r2,r3
+       add_s   r0,r0,r12
+       j_s.d   [blink]
+       mov.mi  r0,0
+#endif /* ENDIAN */
+ARC_EXIT strchr
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644 (file)
index 0000000..5dc802b
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* This is optimized primarily for the ARC700.
+   It would be possible to speed up the loops by one cycle / word
+   respective one cycle / byte by forcing double source 1 alignment, unrolling
+   by a factor of two, and speculatively loading the second word / byte of
+   source 1; however, that would increase the overhead for loop setup / finish,
+   and strcmp might often terminate early.  */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcmp
+       or      r2,r0,r1
+       bmsk_s  r2,r2,1
+       brne    r2,0,.Lcharloop
+       mov_s   r12,0x01010101
+       ror     r5,r12
+.Lwordloop:
+       ld.ab   r2,[r0,4]
+       ld.ab   r3,[r1,4]
+       nop_s
+       sub     r4,r2,r12
+       bic     r4,r4,r2
+       and     r4,r4,r5
+       brne    r4,0,.Lfound0
+       breq    r2,r3,.Lwordloop
+#ifdef __LITTLE_ENDIAN__
+       xor     r0,r2,r3        ; mask for difference
+       sub_s   r1,r0,1
+       bic_s   r0,r0,r1        ; mask for least significant difference bit
+       sub     r1,r5,r0
+       xor     r0,r5,r1        ; mask for least significant difference byte
+       and_s   r2,r2,r0
+       and_s   r3,r3,r0
+#endif /* LITTLE ENDIAN */
+       cmp_s   r2,r3
+       mov_s   r0,1
+       j_s.d   [blink]
+       bset.lo r0,r0,31
+
+       .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+       xor     r0,r2,r3        ; mask for difference
+       or      r0,r0,r4        ; or in zero indicator
+       sub_s   r1,r0,1
+       bic_s   r0,r0,r1        ; mask for least significant difference bit
+       sub     r1,r5,r0
+       xor     r0,r5,r1        ; mask for least significant difference byte
+       and_s   r2,r2,r0
+       and_s   r3,r3,r0
+       sub.f   r0,r2,r3
+       mov.hi  r0,1
+       j_s.d   [blink]
+       bset.lo r0,r0,31
+#else /* BIG ENDIAN */
+       /* The zero-detection above can mis-detect 0x01 bytes as zeroes
+          because of carry-propagateion from a lower significant zero byte.
+          We can compensate for this by checking that bit0 is zero.
+          This compensation is not necessary in the step where we
+          get a low estimate for r2, because in any affected bytes
+          we already have 0x00 or 0x01, which will remain unchanged
+          when bit 7 is cleared.  */
+       .balign 4
+.Lfound0:
+       lsr     r0,r4,8
+       lsr_s   r1,r2
+       bic_s   r2,r2,r0        ; get low estimate for r2 and get ...
+       bic_s   r0,r0,r1        ; <this is the adjusted mask for zeros>
+       or_s    r3,r3,r0        ; ... high estimate r3 so that r2 > r3 will ...
+       cmp_s   r3,r2           ; ... be independent of trailing garbage
+       or_s    r2,r2,r0        ; likewise for r3 > r2
+       bic_s   r3,r3,r0
+       rlc     r0,0            ; r0 := r2 > r3 ? 1 : 0
+       cmp_s   r2,r3
+       j_s.d   [blink]
+       bset.lo r0,r0,31
+#endif /* ENDIAN */
+
+       .balign 4
+.Lcharloop:
+       ldb.ab  r2,[r0,1]
+       ldb.ab  r3,[r1,1]
+       nop_s
+       breq    r2,0,.Lcmpend
+       breq    r2,r3,.Lcharloop
+.Lcmpend:
+       j_s.d   [blink]
+       sub     r0,r2,r3
+ARC_EXIT strcmp
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644 (file)
index 0000000..b7ca4ae
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
+   If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+   it 8 byte aligned.  Thus, we can do a little read-ahead, without
+   dereferencing a cache line that we should not touch.
+   Note that short and long instructions have been scheduled to avoid
+   branch stalls.
+   The beq_s to r3z could be made unaligned & long to avoid a stall
+   there, but the it is not likely to be taken often, and it
+   would also be likey to cost an unaligned mispredict at the next call.  */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcpy
+       or      r2,r0,r1
+       bmsk_s  r2,r2,1
+       brne.d  r2,0,charloop
+       mov_s   r10,r0
+       ld_s    r3,[r1,0]
+       mov     r8,0x01010101
+       bbit0.d r1,2,loop_start
+       ror     r12,r8
+       sub     r2,r3,r8
+       bic_s   r2,r2,r3
+       tst_s   r2,r12
+       bne     r3z
+       mov_s   r4,r3
+       .balign 4
+loop:
+       ld.a    r3,[r1,4]
+       st.ab   r4,[r10,4]
+loop_start:
+       ld.a    r4,[r1,4]
+       sub     r2,r3,r8
+       bic_s   r2,r2,r3
+       tst_s   r2,r12
+       bne_s   r3z
+       st.ab   r3,[r10,4]
+       sub     r2,r4,r8
+       bic     r2,r2,r4
+       tst     r2,r12
+       beq     loop
+       mov_s   r3,r4
+#ifdef __LITTLE_ENDIAN__
+r3z:   bmsk.f  r1,r3,7
+       lsr_s   r3,r3,8
+#else
+r3z:   lsr.f   r1,r3,24
+       asl_s   r3,r3,8
+#endif
+       bne.d   r3z
+       stb.ab  r1,[r10,1]
+       j_s     [blink]
+
+       .balign 4
+charloop:
+       ldb.ab  r3,[r1,1]
+
+
+       brne.d  r3,0,charloop
+       stb.ab  r3,[r10,1]
+       j       [blink]
+ARC_EXIT strcpy
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644 (file)
index 0000000..39759e0
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strlen
+       or      r3,r0,7
+       ld      r2,[r3,-7]
+       ld.a    r6,[r3,-3]
+       mov     r4,0x01010101
+       ; uses long immediate
+#ifdef __LITTLE_ENDIAN__
+       asl_s   r1,r0,3
+       btst_s  r0,2
+       asl     r7,r4,r1
+       ror     r5,r4
+       sub     r1,r2,r7
+       bic_s   r1,r1,r2
+       mov.eq  r7,r4
+       sub     r12,r6,r7
+       bic     r12,r12,r6
+       or.eq   r12,r12,r1
+       and     r12,r12,r5
+       brne    r12,0,.Learly_end
+#else /* BIG ENDIAN */
+       ror     r5,r4
+       btst_s  r0,2
+       mov_s   r1,31
+       sub3    r7,r1,r0
+       sub     r1,r2,r4
+       bic_s   r1,r1,r2
+       bmsk    r1,r1,r7
+       sub     r12,r6,r4
+       bic     r12,r12,r6
+       bmsk.ne r12,r12,r7
+       or.eq   r12,r12,r1
+       and     r12,r12,r5
+       brne    r12,0,.Learly_end
+#endif /* ENDIAN */
+
+.Loop:
+       ld_s    r2,[r3,4]
+       ld.a    r6,[r3,8]
+       ; stall for load result
+       sub     r1,r2,r4
+       bic_s   r1,r1,r2
+       sub     r12,r6,r4
+       bic     r12,r12,r6
+       or      r12,r12,r1
+       and     r12,r12,r5
+       breq r12,0,.Loop
+.Lend:
+       and.f   r1,r1,r5
+       sub.ne  r3,r3,4
+       mov.eq  r1,r12
+#ifdef __LITTLE_ENDIAN__
+       sub_s   r2,r1,1
+       bic_s   r2,r2,r1
+       norm    r1,r2
+       sub_s   r0,r0,3
+       lsr_s   r1,r1,3
+       sub         r0,r3,r0
+       j_s.d   [blink]
+       sub         r0,r0,r1
+#else /* BIG ENDIAN */
+       lsr_s   r1,r1,7
+       mov.eq  r2,r6
+       bic_s   r1,r1,r2
+       norm    r1,r1
+       sub         r0,r3,r0
+       lsr_s   r1,r1,3
+       j_s.d   [blink]
+       add         r0,r0,r1
+#endif /* ENDIAN */
+.Learly_end:
+       b.d     .Lend
+       sub_s.ne r1,r1,r1
+ARC_EXIT strlen
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644 (file)
index 0000000..168dc14
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-y  := extable.o ioremap.o dma.o fault.o init.o
+obj-y  += tlb.o tlbex.o cache_arc700.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
new file mode 100644 (file)
index 0000000..88d617d
--- /dev/null
@@ -0,0 +1,768 @@
+/*
+ * ARC700 VIPT Cache Management
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ *   -flush_cache_dup_mm (fork)
+ *   -likewise for flush_cache_mm (exit/execve)
+ *   -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
+ *
+ * vineetg: Apr 2011
+ *  -Now that MMU can support larger pg sz (16K), the determiniation of
+ *   aliasing shd not be based on assumption of 8k pg
+ *
+ * vineetg: Mar 2011
+ *  -optimised version of flush_icache_range( ) for making I/D coherent
+ *   when vaddr is available (agnostic of num of aliases)
+ *
+ * vineetg: Mar 2011
+ *  -Added documentation about I-cache aliasing on ARC700 and the way it
+ *   was handled up until MMU V2.
+ *  -Spotted a three year old bug when killing the 4 aliases, which needs
+ *   bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
+ *                        instead of paddr | {0x00, 0x01, 0x10, 0x11}
+ *   (Rajesh you owe me one now)
+ *
+ * vineetg: Dec 2010
+ *  -Off-by-one error when computing num_of_lines to flush
+ *   This broke signal handling with bionic which uses synthetic sigret stub
+ *
+ * vineetg: Mar 2010
+ *  -GCC can't generate ZOL for core cache flush loops.
+ *   Conv them into iterations based as opposed to while (start < end) types
+ *
+ * Vineetg: July 2009
+ *  -In I-cache flush routine we used to chk for aliasing for every line INV.
+ *   Instead now we setup routines per cache geometry and invoke them
+ *   via function pointers.
+ *
+ * Vineetg: Jan 2009
+ *  -Cache Line flush routines used to flush an extra line beyond end addr
+ *   because check was while (end >= start) instead of (end > start)
+ *     =Some call sites had to work around by doing -1, -4 etc to end param
+ *     =Some callers didnt care. This was spec bad in case of INV routines
+ *      which would discard valid data (cause of the horrible ext2 bug
+ *      in ARC IDE driver)
+ *
+ * vineetg: June 11th 2008: Fixed flush_icache_range( )
+ *  -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
+ *   to be flushed, which it was not doing.
+ *  -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
+ *   however ARC cache maintenance OPs require PHY addr. Thus need to do
+ *   vmalloc_to_phy.
+ *  -Also added optimisation there, that for range > PAGE SIZE we flush the
+ *   entire cache in one shot rather than line by line. For e.g. a module
+ *   with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
+ *   while cache is only 16 or 32k.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+static void __ic_line_inv_no_alias(unsigned long, int);
+static void __ic_line_inv_2_alias(unsigned long, int);
+static void __ic_line_inv_4_alias(unsigned long, int);
+
+/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
+static void (*___flush_icache_rtn) (unsigned long, int);
+#endif
+
+char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
+{
+       int n = 0;
+       unsigned int c = smp_processor_id();
+
+#define PR_CACHE(p, enb, str)                                          \
+{                                                                      \
+       if (!(p)->ver)                                                  \
+               n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
+       else                                                            \
+               n += scnprintf(buf + n, len - n,                        \
+                       str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
+                       TO_KB((p)->sz), (p)->assoc, (p)->line_len,      \
+                       enb ?  "" : "DISABLED (kernel-build)");         \
+}
+
+       PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
+       PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
+
+       return buf;
+}
+
+/*
+ * Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation done here, simply read/convert the BCRs
+ */
+void __init read_decode_cache_bcr(void)
+{
+       struct bcr_cache ibcr, dbcr;
+       struct cpuinfo_arc_cache *p_ic, *p_dc;
+       unsigned int cpu = smp_processor_id();
+
+       p_ic = &cpuinfo_arc700[cpu].icache;
+       READ_BCR(ARC_REG_IC_BCR, ibcr);
+
+       if (ibcr.config == 0x3)
+               p_ic->assoc = 2;
+       p_ic->line_len = 8 << ibcr.line_len;
+       p_ic->sz = 0x200 << ibcr.sz;
+       p_ic->ver = ibcr.ver;
+
+       p_dc = &cpuinfo_arc700[cpu].dcache;
+       READ_BCR(ARC_REG_DC_BCR, dbcr);
+
+       if (dbcr.config == 0x2)
+               p_dc->assoc = 4;
+       p_dc->line_len = 16 << dbcr.line_len;
+       p_dc->sz = 0x200 << dbcr.sz;
+       p_dc->ver = dbcr.ver;
+}
+
+/*
+ * 1. Validate the Cache Geomtery (compile time config matches hardware)
+ * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
+ *    (aliasing D-cache configurations are not supported YET)
+ * 3. Enable the Caches, setup default flush mode for D-Cache
+ * 3. Calculate the SHMLBA used by user space
+ */
+void __init arc_cache_init(void)
+{
+       unsigned int temp;
+       unsigned int cpu = smp_processor_id();
+       struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+       struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+       int way_pg_ratio = way_pg_ratio;
+       char str[256];
+
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+       if (!ic->ver)
+               goto chk_dc;
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+       /* 1. Confirm some of I-cache params which Linux assumes */
+       if ((ic->assoc != ARC_ICACHE_WAYS) ||
+           (ic->line_len != ARC_ICACHE_LINE_LEN)) {
+               panic("Cache H/W doesn't match kernel Config");
+       }
+#if (CONFIG_ARC_MMU_VER > 2)
+       if (ic->ver != 3) {
+               if (running_on_hw)
+                       panic("Cache ver doesn't match MMU ver\n");
+
+               /* For ISS - suggest the toggles to use */
+               pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
+
+       }
+#endif
+
+       /*
+        * if Cache way size is <= page size then no aliasing exhibited
+        * otherwise ratio determines num of aliases.
+        * e.g. 32K I$, 2 way set assoc, 8k pg size
+        *       way-sz = 32k/2 = 16k
+        *       way-pg-ratio = 16k/8k = 2, so 2 aliases possible
+        *       (meaning 1 line could be in 2 possible locations).
+        */
+       way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
+       switch (way_pg_ratio) {
+       case 0:
+       case 1:
+               ___flush_icache_rtn = __ic_line_inv_no_alias;
+               break;
+       case 2:
+               ___flush_icache_rtn = __ic_line_inv_2_alias;
+               break;
+       case 4:
+               ___flush_icache_rtn = __ic_line_inv_4_alias;
+               break;
+       default:
+               panic("Unsupported I-Cache Sz\n");
+       }
+#endif
+
+       /* Enable/disable I-Cache */
+       temp = read_aux_reg(ARC_REG_IC_CTRL);
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+       temp &= ~IC_CTRL_CACHE_DISABLE;
+#else
+       temp |= IC_CTRL_CACHE_DISABLE;
+#endif
+
+       write_aux_reg(ARC_REG_IC_CTRL, temp);
+
+chk_dc:
+       if (!dc->ver)
+               return;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+       if ((dc->assoc != ARC_DCACHE_WAYS) ||
+           (dc->line_len != ARC_DCACHE_LINE_LEN)) {
+               panic("Cache H/W doesn't match kernel Config");
+       }
+
+       /* check for D-Cache aliasing */
+       if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
+               panic("D$ aliasing not handled right now\n");
+#endif
+
+       /* Set the default Invalidate Mode to "simpy discard dirty lines"
+        *  as this is more frequent then flush before invalidate
+        * Ofcourse we toggle this default behviour when desired
+        */
+       temp = read_aux_reg(ARC_REG_DC_CTRL);
+       temp &= ~DC_CTRL_INV_MODE_FLUSH;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+       /* Enable D-Cache: Clear Bit 0 */
+       write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+#else
+       /* Flush D cache */
+       write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+       /* Disable D cache */
+       write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
+#endif
+
+       return;
+}
+
+#define OP_INV         0x1
+#define OP_FLUSH       0x2
+#define OP_FLUSH_N_INV 0x3
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+static inline void wait_for_flush(void)
+{
+       while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+               ;
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int cacheop)
+{
+       unsigned long flags, tmp = tmp;
+       int aux;
+
+       local_irq_save(flags);
+
+       if (cacheop == OP_FLUSH_N_INV) {
+               /* Dcache provides 2 cmd: FLUSH or INV
+                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+                * flush-n-inv is achieved by INV cmd but with IM=1
+                * Default INV sub-mode is DISCARD, which needs to be toggled
+                */
+               tmp = read_aux_reg(ARC_REG_DC_CTRL);
+               write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+       }
+
+       if (cacheop & OP_INV)   /* Inv or flush-n-inv use same cmd reg */
+               aux = ARC_REG_DC_IVDC;
+       else
+               aux = ARC_REG_DC_FLSH;
+
+       write_aux_reg(aux, 0x1);
+
+       if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+               wait_for_flush();
+
+       /* Switch back the DISCARD ONLY Invalidate mode */
+       if (cacheop == OP_FLUSH_N_INV)
+               write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Per Line Operation on D-Cache
+ * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
+ * It's sole purpose is to help gcc generate ZOL
+ */
+static inline void __dc_line_loop(unsigned long start, unsigned long sz,
+                                         int aux_reg)
+{
+       int num_lines, slack;
+
+       /* Ensure we properly floor/ceil the non-line aligned/sized requests
+        * and have @start - aligned to cache line and integral @num_lines.
+        * This however can be avoided for page sized since:
+        *  -@start will be cache-line aligned already (being page aligned)
+        *  -@sz will be integral multiple of line size (being page sized).
+        */
+       if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+               slack = start & ~DCACHE_LINE_MASK;
+               sz += slack;
+               start -= slack;
+       }
+
+       num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
+
+       while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+               /*
+                * Just as for I$, in MMU v3, D$ ops also require
+                * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
+                * But we pass phy addr for both. This works since Linux
+                * doesn't support aliasing configs for D$, yet.
+                * Thus paddr is enough to provide both tag and index.
+                */
+               write_aux_reg(ARC_REG_DC_PTAG, start);
+#endif
+               write_aux_reg(aux_reg, start);
+               start += ARC_DCACHE_LINE_LEN;
+       }
+}
+
+/*
+ * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(unsigned long start, unsigned long sz,
+                                       const int cacheop)
+{
+       unsigned long flags, tmp = tmp;
+       int aux;
+
+       local_irq_save(flags);
+
+       if (cacheop == OP_FLUSH_N_INV) {
+               /*
+                * Dcache provides 2 cmd: FLUSH or INV
+                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+                * flush-n-inv is achieved by INV cmd but with IM=1
+                * Default INV sub-mode is DISCARD, which needs to be toggled
+                */
+               tmp = read_aux_reg(ARC_REG_DC_CTRL);
+               write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+       }
+
+       if (cacheop & OP_INV)   /* Inv / flush-n-inv use same cmd reg */
+               aux = ARC_REG_DC_IVDL;
+       else
+               aux = ARC_REG_DC_FLDL;
+
+       __dc_line_loop(start, sz, aux);
+
+       if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+               wait_for_flush();
+
+       /* Switch back the DISCARD ONLY Invalidate mode */
+       if (cacheop == OP_FLUSH_N_INV)
+               write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+       local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(cacheop)
+#define __dc_line_op(start, sz, cacheop)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+/*
+ *             I-Cache Aliasing in ARC700 VIPT caches
+ *
+ * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
+ * to "index" into SET of cache-line and paddr from MMU to match the TAG
+ * in the WAYS of SET.
+ *
+ * However the CDU iterface (to flush/inv) lines from software, only takes
+ * paddr (to have simpler hardware interface). For simpler cases, using paddr
+ * alone suffices.
+ * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
+ *      way_sz = cache_sz / num_ways = 16k/2 = 8k
+ *      num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
+ *   Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
+ *   bits req for calc set-index = bits 12:5 (0 based). Since this range fits
+ *   inside the bottom 13 bits of paddr, which are same for vaddr and paddr
+ *   (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
+ *   locate a cache-line.
+ *
+ * However for a difft sized cache, say 32k I$, above math yields need
+ * for 14 bits of vaddr to locate a cache line, which can't be provided by
+ * paddr, since the bit 13 (0 based) might differ between the two.
+ *
+ * This lack of extra bits needed for correct line addressing, defines the
+ * classical problem of Cache aliasing with VIPT architectures
+ * num_aliases = 1 << extra_bits
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
+ *      2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
+ *      2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ *
+ * ------------------
+ * MMU v1/v2 (Fixed Page Size 8k)
+ * ------------------
+ * The solution was to provide CDU with these additonal vaddr bits. These
+ * would be bits [x:13], x would depend on cache-geom.
+ * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
+ * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
+ * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
+ * represent the offset within cache-line. The adv of using this "clumsy"
+ * interface for additional info was no new reg was needed in CDU.
+ *
+ * 17:13 represented the max num of bits passable, actual bits needed were
+ * fewer, based on the num-of-aliases possible.
+ * -for 2 alias possibility, only bit 13 needed (32K cache)
+ * -for 4 alias possibility, bits 14:13 needed (64K cache)
+ *
+ * Since vaddr was not available for all instances of I$ flush req by core
+ * kernel, the only safe way (non-optimal though) was to kill all possible
+ * lines which could represent an alias (even if they didnt represent one
+ * in execution).
+ * e.g. for 64K I$, 4 aliases possible, so we did
+ *      flush start
+ *      flush start | 0x01
+ *      flush start | 0x2
+ *      flush start | 0x3
+ *
+ * The penalty was invoking the operation itself, since tag match is anyways
+ * paddr based, a line which didn't represent an alias would not match the
+ * paddr, hence wont be killed
+ *
+ * Note that aliasing concerns are independent of line-sz for a given cache
+ * geometry (size + set_assoc) because the extra bits required by line-sz are
+ * reduced from the set calc.
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
+ *  32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
+ *  64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
+ *
+ * ------------------
+ * MMU v3
+ * ------------------
+ * This ver of MMU supports var page sizes (1k-16k) - Linux will support
+ * 8k (default), 16k and 4k.
+ * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * meaning more vaddr bits needed to disambiguate the cache-line-op ;
+ * the existing scheme of piggybacking won't work for certain configurations.
+ * Two new registers IC_PTAG and DC_PTAG inttoduced.
+ * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ */
+
+/***********************************************************
+ * Machine specific helpers for per line I-Cache invalidate.
+ * 3 routines to accpunt for 1, 2, 4 aliases possible
+ */
+
+static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
+{
+       while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+               write_aux_reg(ARC_REG_IC_PTAG, start);
+#endif
+               write_aux_reg(ARC_REG_IC_IVIL, start);
+               start += ARC_ICACHE_LINE_LEN;
+       }
+}
+
+static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
+{
+       while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+               /*
+                *  MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
+                * reg to pass the "tag" bits and existing IVIL reg only looks
+                * at bits relevant for "index" (details above)
+                * Programming Notes:
+                * -when writing tag to PTAG reg, bit chopping can be avoided,
+                *  CDU ignores non-tag bits.
+                * -Ideally "index" must be computed from vaddr, but it is not
+                *  avail in these rtns. So to be safe, we kill the lines in all
+                *  possible indexes corresp to num of aliases possible for
+                *  given cache config.
+                */
+               write_aux_reg(ARC_REG_IC_PTAG, start);
+               write_aux_reg(ARC_REG_IC_IVIL,
+                                 start & ~(0x1 << PAGE_SHIFT));
+               write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
+#else
+               write_aux_reg(ARC_REG_IC_IVIL, start);
+               write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+#endif
+               start += ARC_ICACHE_LINE_LEN;
+       }
+}
+
+static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
+{
+       while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+               write_aux_reg(ARC_REG_IC_PTAG, start);
+
+               write_aux_reg(ARC_REG_IC_IVIL,
+                                 start & ~(0x3 << PAGE_SHIFT));
+               write_aux_reg(ARC_REG_IC_IVIL,
+                                 start & ~(0x2 << PAGE_SHIFT));
+               write_aux_reg(ARC_REG_IC_IVIL,
+                                 start & ~(0x1 << PAGE_SHIFT));
+               write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
+#else
+               write_aux_reg(ARC_REG_IC_IVIL, start);
+               write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+               write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
+               write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
+#endif
+               start += ARC_ICACHE_LINE_LEN;
+       }
+}
+
+static void __ic_line_inv(unsigned long start, unsigned long sz)
+{
+       unsigned long flags;
+       int num_lines, slack;
+
+       /*
+        * Ensure we properly floor/ceil the non-line aligned/sized requests
+        * and have @start - aligned to cache line, and integral @num_lines
+        * However page sized flushes can be compile time optimised.
+        *  -@start will be cache-line aligned already (being page aligned)
+        *  -@sz will be integral multiple of line size (being page sized).
+        */
+       if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+               slack = start & ~ICACHE_LINE_MASK;
+               sz += slack;
+               start -= slack;
+       }
+
+       num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+       local_irq_save(flags);
+       (*___flush_icache_rtn) (start, num_lines);
+       local_irq_restore(flags);
+}
+
+/* Unlike routines above, having vaddr for flush op (along with paddr),
+ * prevents the need to speculatively kill the lines in multiple sets
+ * based on ratio of way_sz : pg_sz
+ */
+static void __ic_line_inv_vaddr(unsigned long phy_start,
+                                        unsigned long vaddr, unsigned long sz)
+{
+       unsigned long flags;
+       int num_lines, slack;
+       unsigned int addr;
+
+       slack = phy_start & ~ICACHE_LINE_MASK;
+       sz += slack;
+       phy_start -= slack;
+       num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+#if (CONFIG_ARC_MMU_VER > 2)
+       vaddr &= ~ICACHE_LINE_MASK;
+       addr = phy_start;
+#else
+       /* bits 17:13 of vaddr go as bits 4:0 of paddr */
+       addr = phy_start | ((vaddr >> 13) & 0x1F);
+#endif
+
+       local_irq_save(flags);
+       while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+               /* tag comes from phy addr */
+               write_aux_reg(ARC_REG_IC_PTAG, addr);
+
+               /* index bits come from vaddr */
+               write_aux_reg(ARC_REG_IC_IVIL, vaddr);
+               vaddr += ARC_ICACHE_LINE_LEN;
+#else
+               /* this paddr contains vaddrs bits as needed */
+               write_aux_reg(ARC_REG_IC_IVIL, addr);
+#endif
+               addr += ARC_ICACHE_LINE_LEN;
+       }
+       local_irq_restore(flags);
+}
+
+#else
+
+#define __ic_line_inv(start, sz)
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/* TBD: use pg_arch_1 to optimize this */
+void flush_dcache_page(struct page *page)
+{
+       __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+       __dc_line_op(start, sz, OP_FLUSH_N_INV);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(unsigned long start, unsigned long sz)
+{
+       __dc_line_op(start, sz, OP_INV);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(unsigned long start, unsigned long sz)
+{
+       __dc_line_op(start, sz, OP_FLUSH);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying code
+ * (loadable modules, kprobes,  etc)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+       unsigned int tot_sz, off, sz;
+       unsigned long phy, pfn;
+       unsigned long flags;
+
+       /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
+
+       /* This is not the right API for user virtual address */
+       if (kstart < TASK_SIZE) {
+               BUG_ON("Flush icache range for user virtual addr space");
+               return;
+       }
+
+       /* Shortcut for bigger flush ranges.
+        * Here we don't care if this was kernel virtual or phy addr
+        */
+       tot_sz = kend - kstart;
+       if (tot_sz > PAGE_SIZE) {
+               flush_cache_all();
+               return;
+       }
+
+       /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+       if (likely(kstart > PAGE_OFFSET)) {
+               __ic_line_inv(kstart, kend - kstart);
+               __dc_line_op(kstart, kend - kstart, OP_FLUSH);
+               return;
+       }
+
+       /*
+        * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+        * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+        *     handling of kernel vaddr.
+        *
+        * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+        *     it still needs to handle  a 2 page scenario, where the range
+        *     straddles across 2 virtual pages and hence need for loop
+        */
+       while (tot_sz > 0) {
+               off = kstart % PAGE_SIZE;
+               pfn = vmalloc_to_pfn((void *)kstart);
+               phy = (pfn << PAGE_SHIFT) + off;
+               sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+               local_irq_save(flags);
+               __dc_line_op(phy, sz, OP_FLUSH);
+               __ic_line_inv(phy, sz);
+               local_irq_restore(flags);
+               kstart += sz;
+               tot_sz -= sz;
+       }
+}
+
+/*
+ * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
+ * where vaddr is also available. This allows passing both vaddr and paddr
+ * bits to CDU for cache flush, short-circuting the current pessimistic algo
+ * which kills all possible aliases.
+ * An added adv of knowing that vaddr is user-vaddr avoids various checks
+ * and handling for k-vaddr, k-paddr as done in orig ver above
+ */
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+                             int len)
+{
+       __ic_line_inv_vaddr(paddr, u_vaddr, len);
+       __dc_line_op(paddr, len, OP_FLUSH);
+}
+
+/*
+ * XXX: This also needs to be optim using pg_arch_1
+ * This is called when a page-cache page is about to be mapped into a
+ * user process' address space.  It offers an opportunity for a
+ * port to ensure d-cache/i-cache coherency if necessary.
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+       if (!(vma->vm_flags & VM_EXEC))
+               return;
+
+       __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+}
+
+void flush_icache_all(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       write_aux_reg(ARC_REG_IC_IVIC, 1);
+
+       /* lr will not complete till the icache inv operation is not over */
+       read_aux_reg(ARC_REG_IC_CTRL);
+       local_irq_restore(flags);
+}
+
+noinline void flush_cache_all(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       flush_icache_all();
+       __dc_entire_op(OP_FLUSH_N_INV);
+
+       local_irq_restore(flags);
+
+}
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+       /* TBD: optimize this */
+       flush_cache_all();
+       return 0;
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644 (file)
index 0000000..12cc648
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA Coherent API Notes
+ *
+ * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
+ * implemented by accessintg it using a kernel virtual address, with
+ * Cache bit off in the TLB entry.
+ *
+ * The default DMA address == Phy address which is 0x8000_0000 based.
+ * A platform/device can make it zero based, by over-riding
+ * plat_{dma,kernel}_addr_to_{kernel,dma}
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+#include <linux/export.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Helpers for Coherent DMA API.
+ */
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+                           dma_addr_t *dma_handle, gfp_t gfp)
+{
+       void *paddr;
+
+       /* This is linear addr (0x8000_0000 based) */
+       paddr = alloc_pages_exact(size, gfp);
+       if (!paddr)
+               return NULL;
+
+       /* This is bus address, platform dependent */
+       *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+       return paddr;
+}
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+                         dma_addr_t dma_handle)
+{
+       free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+                        size);
+}
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *dma_handle, gfp_t gfp)
+{
+       void *paddr, *kvaddr;
+
+       /* This is linear addr (0x8000_0000 based) */
+       paddr = alloc_pages_exact(size, gfp);
+       if (!paddr)
+               return NULL;
+
+       /* This is kernel Virtual address (0x7000_0000 based) */
+       kvaddr = ioremap_nocache((unsigned long)paddr, size);
+       if (kvaddr != NULL)
+               memset(kvaddr, 0, size);
+
+       /* This is bus address, platform dependent */
+       *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+       return kvaddr;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+                      dma_addr_t dma_handle)
+{
+       iounmap((void __force __iomem *)kvaddr);
+
+       free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+                        size);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Helper for streaming DMA...
+ */
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+                         enum dma_data_direction dir)
+{
+       __inline_dma_cache_sync(paddr, size, dir);
+}
+EXPORT_SYMBOL(__arc_dma_cache_sync);
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644 (file)
index 0000000..014172b
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+       const struct exception_table_entry *fixup;
+
+       fixup = search_exception_tables(instruction_pointer(regs));
+       if (fixup) {
+               regs->ret = fixup->fixup;
+
+               return 1;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+
+long arc_copy_from_user_noinline(void *to, const void __user * from,
+               unsigned long n)
+{
+       return __arc_copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_from_user_noinline);
+
+long arc_copy_to_user_noinline(void __user *to, const void *from,
+               unsigned long n)
+{
+       return __arc_copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_to_user_noinline);
+
+unsigned long arc_clear_user_noinline(void __user *to,
+               unsigned long n)
+{
+       return __arc_clear_user(to, n);
+}
+EXPORT_SYMBOL(arc_clear_user_noinline);
+
+long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+               long count)
+{
+       return __arc_strncpy_from_user(dst, src, count);
+}
+EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
+
+long arc_strnlen_user_noinline(const char __user *src, long n)
+{
+       return __arc_strnlen_user(src, n);
+}
+EXPORT_SYMBOL(arc_strnlen_user_noinline);
+#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644 (file)
index 0000000..af55aab
--- /dev/null
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <asm/pgalloc.h>
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+       /*
+        * Synchronize this task's top level page-table
+        * with the 'reference' page table.
+        */
+       pgd_t *pgd, *pgd_k;
+       pud_t *pud, *pud_k;
+       pmd_t *pmd, *pmd_k;
+
+       pgd = pgd_offset_fast(mm, address);
+       pgd_k = pgd_offset_k(address);
+
+       if (!pgd_present(*pgd_k))
+               goto bad_area;
+
+       pud = pud_offset(pgd, address);
+       pud_k = pud_offset(pgd_k, address);
+       if (!pud_present(*pud_k))
+               goto bad_area;
+
+       pmd = pmd_offset(pud, address);
+       pmd_k = pmd_offset(pud_k, address);
+       if (!pmd_present(*pmd_k))
+               goto bad_area;
+
+       set_pmd(pmd, *pmd_k);
+
+       /* XXX: create the TLB entry here */
+       return 0;
+
+bad_area:
+       return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+                  unsigned long cause_code)
+{
+       struct vm_area_struct *vma = NULL;
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+       siginfo_t info;
+       int fault, ret;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                               (write ? FAULT_FLAG_WRITE : 0);
+
+       /*
+        * We fault-in kernel-space virtual memory on-demand. The
+        * 'reference' page table is init_mm.pgd.
+        *
+        * NOTE! We MUST NOT take any locks for this case. We may
+        * be in an interrupt or a critical region, and should
+        * only copy the information from the master page table,
+        * nothing more.
+        */
+       if (address >= VMALLOC_START && address <= VMALLOC_END) {
+               ret = handle_vmalloc_fault(mm, address);
+               if (unlikely(ret))
+                       goto bad_area_nosemaphore;
+               else
+                       return;
+       }
+
+       info.si_code = SEGV_MAPERR;
+
+       /*
+        * If we're in an interrupt or have no user
+        * context, we must not take the fault..
+        */
+       if (in_atomic() || !mm)
+               goto no_context;
+
+retry:
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, address);
+       if (!vma)
+               goto bad_area;
+       if (vma->vm_start <= address)
+               goto good_area;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               goto bad_area;
+       if (expand_stack(vma, address))
+               goto bad_area;
+
+       /*
+        * Ok, we have a good vm_area for this memory access, so
+        * we can handle it..
+        */
+good_area:
+       info.si_code = SEGV_ACCERR;
+
+       /* Handle protection violation, execute on heap or stack */
+
+       if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
+               goto bad_area;
+
+       if (write) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto bad_area;
+       }
+
+survive:
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
+       if (unlikely(fatal_signal_pending(current))) {
+               if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
+                       up_read(&mm->mmap_sem);
+               if (user_mode(regs))
+                       return;
+       }
+
+       if (likely(!(fault & VM_FAULT_ERROR))) {
+               if (flags & FAULT_FLAG_ALLOW_RETRY) {
+                       /* To avoid updating stats twice for retry case */
+                       if (fault & VM_FAULT_MAJOR)
+                               tsk->maj_flt++;
+                       else
+                               tsk->min_flt++;
+
+                       if (fault & VM_FAULT_RETRY) {
+                               flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                               flags |= FAULT_FLAG_TRIED;
+                               goto retry;
+                       }
+               }
+
+               /* Fault Handled Gracefully */
+               up_read(&mm->mmap_sem);
+               return;
+       }
+
+       /* TBD: switch to pagefault_out_of_memory() */
+       if (fault & VM_FAULT_OOM)
+               goto out_of_memory;
+       else if (fault & VM_FAULT_SIGBUS)
+               goto do_sigbus;
+
+       /* no man's land */
+       BUG();
+
+       /*
+        * Something tried to access memory that isn't in our memory map..
+        * Fix it, but check if it's kernel or user first..
+        */
+bad_area:
+       up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+       /* User mode accesses just cause a SIGSEGV */
+       if (user_mode(regs)) {
+               tsk->thread.fault_address = address;
+               tsk->thread.cause_code = cause_code;
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               /* info.si_code has been set above */
+               info.si_addr = (void __user *)address;
+               force_sig_info(SIGSEGV, &info, tsk);
+               return;
+       }
+
+no_context:
+       /* Are we prepared to handle this kernel fault?
+        *
+        * (The kernel has valid exception-points in the source
+        *  when it acesses user-memory. When it fails in one
+        *  of those points, we find it in a table and do a jump
+        *  to some fixup code that loads an appropriate error
+        *  code)
+        */
+       if (fixup_exception(regs))
+               return;
+
+       die("Oops", regs, address, cause_code);
+
+out_of_memory:
+       if (is_global_init(tsk)) {
+               yield();
+               goto survive;
+       }
+       up_read(&mm->mmap_sem);
+
+       if (user_mode(regs))
+               do_group_exit(SIGKILL); /* This will never return */
+
+       goto no_context;
+
+do_sigbus:
+       up_read(&mm->mmap_sem);
+
+       if (!user_mode(regs))
+               goto no_context;
+
+       tsk->thread.fault_address = address;
+       tsk->thread.cause_code = cause_code;
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (void __user *)address;
+       force_sig_info(SIGBUS, &info, tsk);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644 (file)
index 0000000..caf797d
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLOCK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Default tot mem from .config */
+static unsigned long arc_mem_sz = 0x20000000;  /* some default */
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+       arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+       /* early console might not be setup yet - it will show up later */
+       pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+
+       return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+       arc_mem_sz = size & PAGE_MASK;
+       pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+       unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
+       unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+
+       init_mm.start_code = (unsigned long)_text;
+       init_mm.end_code = (unsigned long)_etext;
+       init_mm.end_data = (unsigned long)_edata;
+       init_mm.brk = (unsigned long)_end;
+
+       /*
+        * We do it here, so that memory is correctly instantiated
+        * even if "mem=xxx" cmline over-ride is given and/or
+        * DT has memory node. Each causes an update to @arc_mem_sz
+        * and we finally add memory one here
+        */
+       memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
+
+       /*------------- externs in mm need setting up ---------------*/
+
+       /* first page of system - kernel .vector starts here */
+       min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
+
+       /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
+       max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+
+       max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
+
+       /*------------- reserve kernel image -----------------------*/
+       memblock_reserve(CONFIG_LINUX_LINK_BASE,
+                        __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+       memblock_dump_all();
+
+       /*-------------- node setup --------------------------------*/
+       memset(zones_size, 0, sizeof(zones_size));
+       zones_size[ZONE_NORMAL] = num_physpages;
+
+       /*
+        * We can't use the helper free_area_init(zones[]) because it uses
+        * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
+        * when our kernel doesn't start at PAGE_OFFSET, i.e.
+        * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
+        */
+       free_area_init_node(0,                  /* node-id */
+                           zones_size,         /* num pages per zone */
+                           min_low_pfn,        /* first pfn of node */
+                           NULL);              /* NO holes */
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+       int codesize, datasize, initsize, reserved_pages, free_pages;
+       int tmp;
+
+       high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
+
+       totalram_pages = free_all_bootmem();
+
+       /* count all reserved pages [kernel code/data/mem_map..] */
+       reserved_pages = 0;
+       for (tmp = 0; tmp < max_mapnr; tmp++)
+               if (PageReserved(mem_map + tmp))
+                       reserved_pages++;
+
+       /* XXX: nr_free_pages() is equivalent */
+       free_pages = max_mapnr - reserved_pages;
+
+       /*
+        * For the purpose of display below, split the "reserve mem"
+        * kernel code/data is already shown explicitly,
+        * Show any other reservations (mem_map[ ] et al)
+        */
+       reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
+                                                               PAGE_SHIFT);
+
+       codesize = _etext - _text;
+       datasize = _end - _etext;
+       initsize = __init_end - __init_begin;
+
+       pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
+               PAGES_TO_MB(free_pages),
+               TO_MB(arc_mem_sz),
+               TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
+               PAGES_TO_KB(reserved_pages));
+}
+
+static void __init free_init_pages(const char *what, unsigned long begin,
+                                  unsigned long end)
+{
+       unsigned long addr;
+
+       pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
+               what, TO_KB(end - begin), begin, end);
+
+       /* need to check that the page we free is not a partial page */
+       for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               init_page_count(virt_to_page(addr));
+               free_page(addr);
+               totalram_pages++;
+       }
+}
+
+/*
+ * free_initmem: Free all the __init memory.
+ */
+void __init_refok free_initmem(void)
+{
+       free_init_pages("unused kernel memory",
+                       (unsigned long)__init_begin,
+                       (unsigned long)__init_end);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+       free_init_pages("initrd memory", start, end);
+}
+#endif
+
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+                                           unsigned long end)
+{
+       pr_err("%s(%lx, %lx)\n", __func__, start, end);
+}
+#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644 (file)
index 0000000..3e5c92c
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/cache.h>
+
+void __iomem *ioremap(unsigned long paddr, unsigned long size)
+{
+       unsigned long end;
+
+       /* Don't allow wraparound or zero size */
+       end = paddr + size - 1;
+       if (!size || (end < paddr))
+               return NULL;
+
+       /* If the region is h/w uncached, avoid MMU mappings */
+       if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+               return (void __iomem *)paddr;
+
+       return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+                          unsigned long flags)
+{
+       void __iomem *vaddr;
+       struct vm_struct *area;
+       unsigned long off, end;
+       pgprot_t prot = __pgprot(flags);
+
+       /* Don't allow wraparound, zero size */
+       end = paddr + size - 1;
+       if ((!size) || (end < paddr))
+               return NULL;
+
+       /* An early platform driver might end up here */
+       if (!slab_is_available())
+               return NULL;
+
+       /* force uncached */
+       prot = pgprot_noncached(prot);
+
+       /* Mappings have to be page-aligned */
+       off = paddr & ~PAGE_MASK;
+       paddr &= PAGE_MASK;
+       size = PAGE_ALIGN(end + 1) - paddr;
+
+       /*
+        * Ok, go for it..
+        */
+       area = get_vm_area(size, VM_IOREMAP);
+       if (!area)
+               return NULL;
+       area->phys_addr = paddr;
+       vaddr = (void __iomem *)area->addr;
+       if (ioremap_page_range((unsigned long)vaddr,
+                              (unsigned long)vaddr + size, paddr, prot)) {
+               vunmap((void __force *)vaddr);
+               return NULL;
+       }
+       return (void __iomem *)(off + (char __iomem *)vaddr);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+
+void iounmap(const void __iomem *addr)
+{
+       if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
+               return;
+
+       vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644 (file)
index 0000000..9b9ce23
--- /dev/null
@@ -0,0 +1,645 @@
+/*
+ * TLB Management (flush/create/diagnostics) for ARC700
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Aug 2011
+ *  -Reintroduce duplicate PD fixup - some customer chips still have the issue
+ *
+ * vineetg: May 2011
+ *  -No need to flush_cache_page( ) for each call to update_mmu_cache()
+ *   some of the LMBench tests improved amazingly
+ *      = page-fault thrice as fast (75 usec to 28 usec)
+ *      = mmap twice as fast (9.6 msec to 4.6 msec),
+ *      = fork (5.3 msec to 3.7 msec)
+ *
+ * vineetg: April 2011 :
+ *  -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ *      helps avoid a shift when preparing PD0 from PTE
+ *
+ * vineetg: April 2011 : Preparing for MMU V3
+ *  -MMU v2/v3 BCRs decoded differently
+ *  -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
+ *  -tlb_entry_erase( ) can be void
+ *  -local_flush_tlb_range( ):
+ *      = need not "ceil" @end
+ *      = walks MMU only if range spans < 32 entries, as opposed to 256
+ *
+ * Vineetg: Sept 10th 2008
+ *  -Changes related to MMU v2 (Rel 4.8)
+ *
+ * Vineetg: Aug 29th 2008
+ *  -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ *    flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
+ *    it fails. Thus need to load it with ANY valid value before invoking
+ *    TLBIVUTLB cmd
+ *
+ * Vineetg: Aug 21th 2008:
+ *  -Reduced the duration of IRQ lockouts in TLB Flush routines
+ *  -Multiple copies of TLB erase code seperated into a "single" function
+ *  -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
+ *       in interrupt-safe region.
+ *
+ * Vineetg: April 23rd Bug #93131
+ *    Problem: tlb_flush_kernel_range() doesnt do anything if the range to
+ *              flush is more than the size of TLB itself.
+ *
+ * Rahul Trivedi : Codito Technologies 2004
+ */
+
+#include <linux/module.h>
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/*                     Need for ARC MMU v2
+ *
+ * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
+ * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
+ * map into same set, there would be contention for the 2 ways causing severe
+ * Thrashing.
+ *
+ * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
+ * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
+ * Given this, the thrasing problem should never happen because once the 3
+ * J-TLB entries are created (even though 3rd will knock out one of the prev
+ * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
+ *
+ * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
+ * This is a simple design for keeping them in sync. So what do we do?
+ * The solution which James came up was pretty neat. It utilised the assoc
+ * of uTLBs by not invalidating always but only when absolutely necessary.
+ *
+ * - Existing TLB commands work as before
+ * - New command (TLBWriteNI) for TLB write without clearing uTLBs
+ * - New command (TLBIVUTLB) to invalidate uTLBs.
+ *
+ * The uTLBs need only be invalidated when pages are being removed from the
+ * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
+ * as a result of a miss, the removed entry is still allowed to exist in the
+ * uTLBs as it is still valid and present in the OS page table. This allows the
+ * full associativity of the uTLBs to hide the limited associativity of the main
+ * TLB.
+ *
+ * During a miss handler, the new "TLBWriteNI" command is used to load
+ * entries without clearing the uTLBs.
+ *
+ * When the OS page table is updated, TLB entries that may be associated with a
+ * removed page are removed (flushed) from the TLB using TLBWrite. In this
+ * circumstance, the uTLBs must also be cleared. This is done by using the
+ * existing TLBWrite command. An explicit IVUTLB is also required for those
+ * corner cases when TLBWrite was not executed at all because the corresp
+ * J-TLB entry got evicted/replaced.
+ */
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+int asid_cache = FIRST_ASID;
+
+/* ASID to mm struct mapping. We have one extra entry corresponding to
+ * NO_ASID to save us a compare when clearing the mm entry for old asid
+ * see get_new_mmu_context (asm-arc/mmu_context.h)
+ */
+struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * The procedure is to look it up in the MMU. If found, ERASE it by
+ *  issuing a TlbWrite CMD with PD0 = PD1 = 0
+ */
+
+static void __tlb_entry_erase(void)
+{
+       write_aux_reg(ARC_REG_TLBPD1, 0);
+       write_aux_reg(ARC_REG_TLBPD0, 0);
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+       unsigned int idx;
+
+       /* Locate the TLB entry for this vaddr + ASID */
+       write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+       idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+       /* No error means entry found, zero it out */
+       if (likely(!(idx & TLB_LKUP_ERR))) {
+               __tlb_entry_erase();
+       } else {                /* Some sort of Error */
+
+               /* Duplicate entry error */
+               if (idx & 0x1) {
+                       /* TODO we need to handle this case too */
+                       pr_emerg("unhandled Duplicate flush for %x\n",
+                              vaddr_n_asid);
+               }
+               /* else entry not found so nothing to do */
+       }
+}
+
+/****************************************************************************
+ * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
+ *
+ * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
+ *
+ * utlb_invalidate ( )
+ *  -For v2 MMU calls Flush uTLB Cmd
+ *  -For v1 MMU does nothing (except for Metal Fix v1 MMU)
+ *      This is because in v1 TLBWrite itself invalidate uTLBs
+ ***************************************************************************/
+
+static void utlb_invalidate(void)
+{
+#if (CONFIG_ARC_MMU_VER >= 2)
+
+#if (CONFIG_ARC_MMU_VER < 3)
+       /* MMU v2 introduced the uTLB Flush command.
+        * There was however an obscure hardware bug, where uTLB flush would
+        * fail when a prior probe for J-TLB (both totally unrelated) would
+        * return lkup err - because the entry didnt exist in MMU.
+        * The Workround was to set Index reg with some valid value, prior to
+        * flush. This was fixed in MMU v3 hence not needed any more
+        */
+       unsigned int idx;
+
+       /* make sure INDEX Reg is valid */
+       idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+       /* If not write some dummy val */
+       if (unlikely(idx & TLB_LKUP_ERR))
+               write_aux_reg(ARC_REG_TLBINDEX, 0xa);
+#endif
+
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+#endif
+
+}
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned int entry;
+       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+       local_irq_save(flags);
+
+       /* Load PD0 and PD1 with template for a Blank Entry */
+       write_aux_reg(ARC_REG_TLBPD1, 0);
+       write_aux_reg(ARC_REG_TLBPD0, 0);
+
+       for (entry = 0; entry < mmu->num_tlb; entry++) {
+               /* write this entry to the TLB */
+               write_aux_reg(ARC_REG_TLBINDEX, entry);
+               write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+       }
+
+       utlb_invalidate();
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+       /*
+        * Small optimisation courtesy IA64
+        * flush_mm called during fork,exit,munmap etc, multiple times as well.
+        * Only for fork( ) do we need to move parent to a new MMU ctxt,
+        * all other cases are NOPs, hence this check.
+        */
+       if (atomic_read(&mm->mm_users) == 0)
+               return;
+
+       /*
+        * Workaround for Android weirdism:
+        * A binder VMA could end up in a task such that vma->mm != tsk->mm
+        * old code would cause h/w - s/w ASID to get out of sync
+        */
+       if (current->mm != mm)
+               destroy_context(mm);
+       else
+               get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ *  -Here the fastest way (if range is too large) is to move to next ASID
+ *      without doing any explicit Shootdown
+ *  -In case of kernel Flush, entry has to be shot down explictly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                          unsigned long end)
+{
+       unsigned long flags;
+       unsigned int asid;
+
+       /* If range @start to @end is more than 32 TLB entries deep,
+        * its better to move to a new ASID rather than searching for
+        * individual entries and then shooting them down
+        *
+        * The calc above is rough, doesn't account for unaligned parts,
+        * since this is heuristics based anyways
+        */
+       if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+               local_flush_tlb_mm(vma->vm_mm);
+               return;
+       }
+
+       /*
+        * @start moved to page start: this alone suffices for checking
+        * loop end condition below, w/o need for aligning @end to end
+        * e.g. 2000 to 4001 will anyhow loop twice
+        */
+       start &= PAGE_MASK;
+
+       local_irq_save(flags);
+       asid = vma->vm_mm->context.asid;
+
+       if (asid != NO_ASID) {
+               while (start < end) {
+                       tlb_entry_erase(start | (asid & 0xff));
+                       start += PAGE_SIZE;
+               }
+       }
+
+       utlb_invalidate();
+
+       local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ *  @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       /* exactly same as above, except for TLB entry not taking ASID */
+
+       if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+               local_flush_tlb_all();
+               return;
+       }
+
+       start &= PAGE_MASK;
+
+       local_irq_save(flags);
+       while (start < end) {
+               tlb_entry_erase(start);
+               start += PAGE_SIZE;
+       }
+
+       utlb_invalidate();
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+       unsigned long flags;
+
+       /* Note that it is critical that interrupts are DISABLED between
+        * checking the ASID and using it flush the TLB entry
+        */
+       local_irq_save(flags);
+
+       if (vma->vm_mm->context.asid != NO_ASID) {
+               tlb_entry_erase((page & PAGE_MASK) |
+                               (vma->vm_mm->context.asid & 0xff));
+               utlb_invalidate();
+       }
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Routine to create a TLB entry
+ */
+void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+       unsigned long flags;
+       unsigned int idx, asid_or_sasid;
+       unsigned long pd0_flags;
+
+       /*
+        * create_tlb() assumes that current->mm == vma->mm, since
+        * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+        * -completes the lazy write to SASID reg (again valid for curr tsk)
+        *
+        * Removing the assumption involves
+        * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+        * -Fix the TLB paranoid debug code to not trigger false negatives.
+        * -More importantly it makes this handler inconsistent with fast-path
+        *  TLB Refill handler which always deals with "current"
+        *
+        * Lets see the use cases when current->mm != vma->mm and we land here
+        *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+        *     Here VM wants to pre-install a TLB entry for user stack while
+        *     current->mm still points to pre-execve mm (hence the condition).
+        *     However the stack vaddr is soon relocated (randomization) and
+        *     move_page_tables() tries to undo that TLB entry.
+        *     Thus not creating TLB entry is not any worse.
+        *
+        *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+        *     breakpoint in debugged task. Not creating a TLB now is not
+        *     performance critical.
+        *
+        * Both the cases above are not good enough for code churn.
+        */
+       if (current->active_mm != vma->vm_mm)
+               return;
+
+       local_irq_save(flags);
+
+       tlb_paranoid_check(vma->vm_mm->context.asid, address);
+
+       address &= PAGE_MASK;
+
+       /* update this PTE credentials */
+       pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+       /* Create HW TLB entry Flags (in PD0) from PTE Flags */
+#if (CONFIG_ARC_MMU_VER <= 2)
+       pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
+#else
+       pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
+#endif
+
+       /* ASID for this task */
+       asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+       write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
+
+       /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
+       write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
+
+       /* First verify if entry for this vaddr+ASID already exists */
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+       idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+       /*
+        * If Not already present get a free slot from MMU.
+        * Otherwise, Probe would have located the entry and set INDEX Reg
+        * with existing location. This will cause Write CMD to over-write
+        * existing entry with new PD0 and PD1
+        */
+       if (likely(idx & TLB_LKUP_ERR))
+               write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+       /*
+        * Commit the Entry to MMU
+        * It doesnt sound safe to use the TLBWriteNI cmd here
+        * which doesn't flush uTLBs. I'd rather be safe than sorry.
+        */
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+
+       local_irq_restore(flags);
+}
+
+/* arch hook called by core VM at the end of handle_mm_fault( ),
+ * when a new PTE is entered in Page Tables or an existing one
+ * is modified. We aggresively pre-install a TLB entry
+ */
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+                     pte_t *ptep)
+{
+
+       create_tlb(vma, vaddress, ptep);
+}
+
+/* Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+void __init read_decode_mmu_bcr(void)
+{
+       unsigned int tmp;
+       struct bcr_mmu_1_2 *mmu2;       /* encoded MMU2 attr */
+       struct bcr_mmu_3 *mmu3;         /* encoded MMU3 attr */
+       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+       tmp = read_aux_reg(ARC_REG_MMU_BCR);
+       mmu->ver = (tmp >> 24);
+
+       if (mmu->ver <= 2) {
+               mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+               mmu->pg_sz = PAGE_SIZE;
+               mmu->sets = 1 << mmu2->sets;
+               mmu->ways = 1 << mmu2->ways;
+               mmu->u_dtlb = mmu2->u_dtlb;
+               mmu->u_itlb = mmu2->u_itlb;
+       } else {
+               mmu3 = (struct bcr_mmu_3 *)&tmp;
+               mmu->pg_sz = 512 << mmu3->pg_sz;
+               mmu->sets = 1 << mmu3->sets;
+               mmu->ways = 1 << mmu3->ways;
+               mmu->u_dtlb = mmu3->u_dtlb;
+               mmu->u_itlb = mmu3->u_itlb;
+       }
+
+       mmu->num_tlb = mmu->sets * mmu->ways;
+}
+
+char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+       int n = 0;
+       struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+       n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
+                      p_mmu->ver, TO_KB(p_mmu->pg_sz));
+
+       n += scnprintf(buf + n, len - n,
+                      "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
+                      p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+                      p_mmu->u_dtlb, p_mmu->u_itlb,
+                      __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
+
+       return buf;
+}
+
+void __init arc_mmu_init(void)
+{
+       char str[256];
+       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+       printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
+
+       /* For efficiency sake, kernel is compile time built for a MMU ver
+        * This must match the hardware it is running on.
+        * Linux built for MMU V2, if run on MMU V1 will break down because V1
+        *  hardware doesn't understand cmds such as WriteNI, or IVUTLB
+        * On the other hand, Linux built for V1 if run on MMU V2 will do
+        *   un-needed workarounds to prevent memcpy thrashing.
+        * Similarly MMU V3 has new features which won't work on older MMU
+        */
+       if (mmu->ver != CONFIG_ARC_MMU_VER) {
+               panic("MMU ver %d doesn't match kernel built for %d...\n",
+                     mmu->ver, CONFIG_ARC_MMU_VER);
+       }
+
+       if (mmu->pg_sz != PAGE_SIZE)
+               panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+       /*
+        * ASID mgmt data structures are compile time init
+        *  asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
+        */
+
+       local_flush_tlb_all();
+
+       /* Enable the MMU */
+       write_aux_reg(ARC_REG_PID, MMU_ENABLE);
+
+       /* In smp we use this reg for interrupt 1 scratch */
+#ifndef CONFIG_SMP
+       /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+       write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+#endif
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ *             ---------------------   -----------
+ *             |way0|way1|way2|way3|   |way0|way1|
+ *             ---------------------   -----------
+ * [set0]      |  0 |  1 |  2 |  3 |   |  0 |  1 |
+ * [set1]      |  4 |  5 |  6 |  7 |   |  2 |  3 |
+ *             ~                   ~   ~         ~
+ * [set127]    | 508| 509| 510| 511|   | 254| 255|
+ *             ---------------------   -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ *      time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ *      the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+                         struct pt_regs *regs)
+{
+       int set, way, n;
+       unsigned int pd0[4], pd1[4];    /* assume max 4 ways */
+       unsigned long flags, is_valid;
+       struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+       local_irq_save(flags);
+
+       /* re-enable the MMU */
+       write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+
+       /* loop thru all sets of TLB */
+       for (set = 0; set < mmu->sets; set++) {
+
+               /* read out all the ways of current set */
+               for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+                       write_aux_reg(ARC_REG_TLBINDEX,
+                                         SET_WAY_TO_IDX(mmu, set, way));
+                       write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+                       pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+                       pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
+                       is_valid |= pd0[way] & _PAGE_PRESENT;
+               }
+
+               /* If all the WAYS in SET are empty, skip to next SET */
+               if (!is_valid)
+                       continue;
+
+               /* Scan the set for duplicate ways: needs a nested loop */
+               for (way = 0; way < mmu->ways; way++) {
+                       if (!pd0[way])
+                               continue;
+
+                       for (n = way + 1; n < mmu->ways; n++) {
+                               if ((pd0[way] & PAGE_MASK) ==
+                                   (pd0[n] & PAGE_MASK)) {
+
+                                       if (dup_pd_verbose) {
+                                               pr_info("Duplicate PD's @"
+                                                       "[%d:%d]/[%d:%d]\n",
+                                                    set, way, set, n);
+                                               pr_info("TLBPD0[%u]: %08x\n",
+                                                    way, pd0[way]);
+                                       }
+
+                                       /*
+                                        * clear entry @way and not @n. This is
+                                        * critical to our optimised loop
+                                        */
+                                       pd0[way] = pd1[way] = 0;
+                                       write_aux_reg(ARC_REG_TLBINDEX,
+                                               SET_WAY_TO_IDX(mmu, set, way));
+                                       __tlb_entry_erase();
+                               }
+                       }
+               }
+       }
+
+       local_irq_restore(flags);
+}
+
+/***********************************************************************
+ * Diagnostic Routines
+ *  -Called from Low Level TLB Hanlders if things don;t look good
+ **********************************************************************/
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+/*
+ * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
+ * don't match
+ */
+void print_asid_mismatch(int is_fast_path)
+{
+       int pid_sw, pid_hw;
+       pid_sw = current->active_mm->context.asid;
+       pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+       pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
+              is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
+
+       __asm__ __volatile__("flag 1");
+}
+
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
+{
+       unsigned int pid_hw;
+
+       pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+       if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
+               print_asid_mismatch(0);
+}
+#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644 (file)
index 0000000..9df765d
--- /dev/null
@@ -0,0 +1,408 @@
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: April 2011 :
+ *  -MMU v1: moved out legacy code into a seperate file
+ *  -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ *      helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ *  -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ *   entry, so that it doesn't knock out it's I-TLB entry
+ *  -Some more fine tuning:
+ *   bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ *  -Practically rewrote the I/D TLB Miss handlers
+ *   Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ *   Hence Leaner by 1.5 K
+ *   Used Conditional arithmetic to replace excessive branching
+ *   Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ *  -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ *   more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ *  -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+       .cpu A7
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/tlb.h>
+#include <asm/pgtable.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+#if (CONFIG_ARC_MMU_VER == 1)
+#include <asm/tlb-mmu1.h>
+#endif
+
+;--------------------------------------------------------------------------
+; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
+; For details refer to comments before TLBMISS_FREEUP_REGS below
+;--------------------------------------------------------------------------
+
+ARCFP_DATA ex_saved_reg1
+       .align 1 << L1_CACHE_SHIFT      ; IMP: Must be Cache Line aligned
+       .type   ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+       .size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+       .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+       .size   ex_saved_reg1, 16
+ex_saved_reg1:
+       .zero 16
+#endif
+
+;============================================================================
+;  Troubleshooting Stuff
+;============================================================================
+
+; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
+; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
+; we use the MMU PID Reg to get current ASID.
+; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
+; So we try to detect this in TLB Mis shandler
+
+
+.macro DBG_ASID_MISMATCH
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+       ; make sure h/w ASID is same as s/w ASID
+
+       GET_CURR_TASK_ON_CPU  r3
+       ld r0, [r3, TASK_ACT_MM]
+       ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+
+       lr r1, [ARC_REG_PID]
+       and r1, r1, 0xFF
+       breq r1, r0, 5f
+
+       ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
+       lr  r0, [erstatus]
+       bbit0 r0, STATUS_U_BIT, 5f
+
+       ; We sure are in troubled waters, Flag the error, but to do so
+       ; need to switch to kernel mode stack to call error routine
+       GET_TSK_STACK_BASE   r3, sp
+
+       ; Call printk to shoutout aloud
+       mov r0, 1
+       j print_asid_mismatch
+
+5:   ; ASIDs match so proceed normally
+       nop
+
+#endif
+
+.endm
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+       lr  r2, [efa]
+
+#ifndef CONFIG_SMP
+       lr  r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+       GET_CURR_TASK_ON_CPU  r1
+       ld  r1, [r1, TASK_ACT_MM]
+       ld  r1, [r1, MM_PGD]
+#endif
+
+       lsr     r0, r2, PGDIR_SHIFT     ; Bits for indexing into PGD
+       ld.as   r1, [r1, r0]            ; PGD entry corresp to faulting addr
+       and.f   r1, r1, PAGE_MASK       ; Ignoring protection and other flags
+       ;   contains Ptr to Page Table
+       bz.d    do_slow_path_pf         ; if no Page Table, do page fault
+
+       ; Get the PTE entry: The idea is
+       ; (1) x = addr >> PAGE_SHIFT    -> masks page-off bits from @fault-addr
+       ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+       ; (3) z = pgtbl[y]
+       ; To avoid the multiply by in end, we do the -2, <<2 below
+
+       lsr     r0, r2, (PAGE_SHIFT - 2)
+       and     r0, r0, ( (PTRS_PER_PTE - 1) << 2)
+       ld.aw   r0, [r1, r0]            ; get PTE and PTE ptr for fault addr
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+       and.f 0, r0, _PAGE_PRESENT
+       bz   1f
+       ld   r2, [num_pte_not_present]
+       add  r2, r2, 1
+       st   r2, [num_pte_not_present]
+1:
+#endif
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+       and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
+       sr  r3, [ARC_REG_TLBPD1]    ; these go in PD1
+
+       and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+#if (CONFIG_ARC_MMU_VER <= 2)   /* Neednot be done with v3 onwards */
+       lsr r2, r2                  ; shift PTE flags to match layout in PD0
+#endif
+
+       lr  r3,[ARC_REG_TLBPD0]     ; MMU prepares PD0 with vaddr and asid
+
+       or  r3, r3, r2              ; S | vaddr | {sasid|asid}
+       sr  r3,[ARC_REG_TLBPD0]     ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+
+       /* Get free TLB slot: Set = computed from vaddr, way = random */
+       sr  TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+       /* Commit the Write */
+#if (CONFIG_ARC_MMU_VER >= 2)   /* introduced in v2 */
+       sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+#else
+       sr TLBWrite, [ARC_REG_TLBCOMMAND]
+#endif
+.endm
+
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+;      ".size   ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+;      ".size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+       sr  r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+       GET_CPU_ID  r0                  ; get to per cpu scratch mem,
+       lsl r0, r0, L1_CACHE_SHIFT      ; cache line wide per cpu
+       add r0, @ex_saved_reg1, r0
+#else
+       st    r0, [@ex_saved_reg1]
+       mov_s r0, @ex_saved_reg1
+#endif
+       st_s  r1, [r0, 4]
+       st_s  r2, [r0, 8]
+       st_s  r3, [r0, 12]
+
+       ; VERIFY if the ASID in MMU-PID Reg is same as
+       ; one in Linux data structures
+
+       DBG_ASID_MISMATCH
+.endm
+
+;-----------------------------------------------------------------
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+       GET_CPU_ID  r0                  ; get to per cpu scratch mem
+       lsl r0, r0, L1_CACHE_SHIFT      ; each is cache line wide
+       add r0, @ex_saved_reg1, r0
+       ld_s  r3, [r0,12]
+       ld_s  r2, [r0, 8]
+       ld_s  r1, [r0, 4]
+       lr    r0, [ARC_REG_SCRATCH_DATA0]
+#else
+       mov_s r0, @ex_saved_reg1
+       ld_s  r3, [r0,12]
+       ld_s  r2, [r0, 8]
+       ld_s  r1, [r0, 4]
+       ld_s  r0, [r0]
+#endif
+.endm
+
+ARCFP_CODE     ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissI
+
+       TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+       ld  r0, [@numitlb]
+       add r0, r0, 1
+       st  r0, [@numitlb]
+#endif
+
+       ;----------------------------------------------------------------
+       ; Get the PTE corresponding to V-addr accessed
+       LOAD_FAULT_PTE
+
+       ;----------------------------------------------------------------
+       ; VERIFY_PTE: Check if PTE permissions approp for executing code
+       cmp_s   r2, VMALLOC_START
+       mov.lo  r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+       mov.hs  r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
+
+       and     r3, r0, r2  ; Mask out NON Flag bits from PTE
+       xor.f   r3, r3, r2  ; check ( ( pte & flags_test ) == flags_test )
+       bnz     do_slow_path_pf
+
+       ; Let Linux VM know that the page was accessed
+       or      r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED)  ; set Accessed Bit
+       st_s    r0, [r1]                                  ; Write back PTE
+
+       CONV_PTE_TO_TLB
+       COMMIT_ENTRY_TO_MMU
+       TLBMISS_RESTORE_REGS
+       rtie
+
+ARC_EXIT EV_TLBMissI
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissD
+
+       TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+       ld  r0, [@numdtlb]
+       add r0, r0, 1
+       st  r0, [@numdtlb]
+#endif
+
+       ;----------------------------------------------------------------
+       ; Get the PTE corresponding to V-addr accessed
+       ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE
+       LOAD_FAULT_PTE
+
+       ;----------------------------------------------------------------
+       ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+       mov_s   r2, 0
+       lr      r3, [ecr]
+       btst_s  r3, ECR_C_BIT_DTLB_LD_MISS      ; Read Access
+       or.nz   r2, r2, _PAGE_READ              ; chk for Read flag in PTE
+       btst_s  r3, ECR_C_BIT_DTLB_ST_MISS      ; Write Access
+       or.nz   r2, r2, _PAGE_WRITE             ; chk for Write flag in PTE
+       ; Above laddering takes care of XCHG access
+       ;   which is both Read and Write
+
+       ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
+       ; For copy_(to|from)_user, despite exception taken in kernel mode,
+       ; this code is not hit, because EFA would still be the user mode
+       ; address (EFA < 0x6000_0000).
+       ; This code is for legit kernel mode faults, vmalloc specifically
+       ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
+
+       lr      r3, [efa]
+       cmp     r3, VMALLOC_START - 1   ; If kernel mode access
+       asl.hi  r2, r2, 3               ; make _PAGE_xx flags as _PAGE_K_xx
+       or      r2, r2, _PAGE_PRESENT   ; Common flag for K/U mode
+
+       ; By now, r2 setup with all the Flags we need to check in PTE
+       and     r3, r0, r2              ; Mask out NON Flag bits from PTE
+       brne.d  r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+       ;----------------------------------------------------------------
+       ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+       lr      r3, [ecr]
+       or      r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always
+       btst_s  r3,  ECR_C_BIT_DTLB_ST_MISS   ; See if it was a Write Access ?
+       or.nz   r0, r0, _PAGE_MODIFIED        ; if Write, set Dirty bit as well
+       st_s    r0, [r1]                      ; Write back PTE
+
+       CONV_PTE_TO_TLB
+
+#if (CONFIG_ARC_MMU_VER == 1)
+       ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
+       ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
+       ; But only for old MMU or one with Metal Fix
+       TLB_WRITE_HEURISTICS
+#endif
+
+       COMMIT_ENTRY_TO_MMU
+       TLBMISS_RESTORE_REGS
+       rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+       ; Restore the 4-scratch regs saved by fast path miss handler
+       TLBMISS_RESTORE_REGS
+
+       ; Slow path TLB Miss handled as a regular ARC Exception
+       ; (stack switching / save the complete reg-file).
+       ; That requires freeing up r9
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       lr  r9, [erstatus]
+
+       SWITCH_TO_KERNEL_STK
+       SAVE_ALL_SYS
+
+       ; ------- setup args for Linux Page fault Hanlder ---------
+       mov_s r0, sp
+       lr  r2, [efa]
+       lr  r3, [ecr]
+
+       ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
+       ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
+       ; DTLB-ld Miss
+       ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
+       ; Following code uses that fact that st/ex have one bit in common
+
+       btst_s r3,  ECR_C_BIT_DTLB_ST_MISS
+       mov.z  r1, 0
+       mov.nz r1, 1
+
+       ; We don't want exceptions to be disabled while the fault is handled.
+       ; Now that we have saved the context we return from exception hence
+       ; exceptions get re-enable
+
+       FAKE_RET_FROM_EXCPN  r9
+
+       bl  do_page_fault
+       b   ret_from_exception
+
+ARC_EXIT EV_TLBMissD
+
+ARC_ENTRY EV_TLBMissB   ; Bogus entry to measure sz of DTLBMiss hdlr
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
new file mode 100644 (file)
index 0000000..ce417a6
--- /dev/null
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+               oprof.o cpu_buffer.o buffer_sync.o \
+               event_buffer.o oprofile_files.o \
+               oprofilefs.o oprofile_stats.o \
+               timer_int.o )
+
+oprofile-y     := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
new file mode 100644 (file)
index 0000000..c80fcad
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on orig code from @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+       /*
+        * A failure here, forces oprofile core to switch to Timer based PC
+        * sampling, which will happen if say perf is not enabled/available
+        */
+       return oprofile_perf_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+       oprofile_perf_exit();
+}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
new file mode 100644 (file)
index 0000000..b41e786
--- /dev/null
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_FPGA_LEGACY
+       bool "\"Legacy\" ARC FPGA dev Boards"
+       select ISS_SMP_EXTN if SMP
+       help
+         Support for ARC development boards, provided by Synopsys.
+         These are based on FPGA or ISS. e.g.
+         - ARCAngel4
+         - ML509
+         - MetaWare ISS
+
+if ARC_PLAT_FPGA_LEGACY
+
+config ARC_BOARD_ANGEL4
+       bool "ARC Angel4"
+       default y
+       help
+         ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
+
+config ARC_BOARD_ML509
+       bool "ML509"
+       help
+         ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
+
+config ISS_SMP_EXTN
+       bool "ARC SMP Extensions (ISS Models only)"
+       default n
+       depends on SMP
+       select ARC_HAS_COH_RTSC
+       help
+         SMP Extensions to ARC700, in a "simulation only" Model, supported in
+         ARC ISS (Instruction Set Simulator).
+         The SMP extensions include:
+         -IDU (Interrupt Distribution Unit)
+         -XTL (To enable CPU start/stop/set-PC for another CPU)
+         It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
+
+config ARC_SERIAL_BAUD
+       int "UART Baud rate"
+       default "115200"
+       depends on SERIAL_ARC || SERIAL_ARC_CONSOLE
+       help
+         Baud rate for the ARC UART
+
+menuconfig ARC_HAS_BVCI_LAT_UNIT
+       bool "BVCI Bus Latency Unit"
+       depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
+       help
+         IP to add artifical latency to BVCI Bus Based FPGA builds.
+         The default latency (even worst case) for FPGA is non-realistic
+         (~10 SDRAM, ~5 SSRAM).
+
+config BVCI_LAT_UNITS
+       hex "Latency Unit(s) Bitmap"
+       default "0x0"
+       depends on ARC_HAS_BVCI_LAT_UNIT
+       help
+         There are multiple Latency Units corresponding to the many
+         interfaces of the system bus arbiter (both CPU side as well as
+         the peripheral side).
+         To add latency to ALL memory transaction, choose Unit 0, otherwise
+         for finer grainer - interface wise latency, specify a bitmap (1 bit
+         per unit) of all units. e.g. 1,2,12 will be 0x1003
+
+         Unit  0 - System Arb and Mem Controller
+         Unit  1 - I$ and System Bus
+         Unit  2 - D$ and System Bus
+         ..
+         Unit 12 - IDE Disk controller and System Bus
+
+config BVCI_LAT_CYCLES
+       int "Latency Value in cycles"
+       range 0 63
+       default "30"
+       depends on ARC_HAS_BVCI_LAT_UNIT
+
+endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
new file mode 100644 (file)
index 0000000..a44e22e
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+KBUILD_CFLAGS  += -Iarch/arc/plat-arcfpga/include
+
+obj-y := platform.o irq.o
+obj-$(CONFIG_SMP)              += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
new file mode 100644 (file)
index 0000000..41e3356
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ *  -For AA4 board, IRQ assignments to peripherals
+ */
+
+#ifndef __PLAT_IRQ_H
+#define __PLAT_IRQ_H
+
+#define UART0_IRQ      5
+#define UART1_IRQ      10
+#define UART2_IRQ      11
+
+#define VMAC_IRQ       6
+
+#define IDE_IRQ                13
+#define PCI_IRQ                14
+#define PS2_IRQ                15
+
+#ifdef CONFIG_SMP
+#define IDU_INTERRUPT_0 16
+#endif
+
+extern void __init plat_fpga_init_IRQ(void);
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
new file mode 100644 (file)
index 0000000..1663f33
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ *  -For AA4 board, System Memory Map for Peripherals etc
+ */
+
+#ifndef __PLAT_MEMMAP_H
+#define __PLAT_MEMMAP_H
+
+#define UART0_BASE              0xC0FC1000
+#define UART1_BASE              0xC0FC1100
+
+#define VMAC_REG_BASEADDR       0xC0FC2000
+
+#define IDE_CONTROLLER_BASE     0xC0FC9000
+
+#define AHB_PCI_HOST_BRG_BASE   0xC0FD0000
+
+#define PGU_BASEADDR            0xC0FC8000
+#define VLCK_ADDR               0xC0FCF028
+
+#define BVCI_LAT_UNIT_BASE      0xC0FED000
+
+#define PS2_BASE_ADDR          0xC0FCC000
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
new file mode 100644 (file)
index 0000000..c09eb4c
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Rajeshwar Ranga: Interrupt Distribution Unit API's
+ */
+
+#ifndef __PLAT_ARCFPGA_SMP_H
+#define __PLAT_ARCFPGA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <asm/arcregs.h>
+
+#define ARC_AUX_IDU_REG_CMD            0x2000
+#define ARC_AUX_IDU_REG_PARAM          0x2001
+
+#define ARC_AUX_XTL_REG_CMD            0x2002
+#define ARC_AUX_XTL_REG_PARAM          0x2003
+
+#define ARC_REG_MP_BCR                 0x2021
+
+#define ARC_XTL_CMD_WRITE_PC           0x04
+#define ARC_XTL_CMD_CLEAR_HALT         0x02
+
+/*
+ * Build Configuration Register which identifies the sub-components
+ */
+struct bcr_mp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
+#else
+       unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
+#endif
+};
+
+/* IDU supports 256 common interrupts */
+#define NR_IDU_IRQS                    256
+
+/*
+ * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
+ * However when casted as a bitfield encoded "C" struct, gcc treats it as
+ * memory, generating different code for BE/LE, requiring strcture adj (see
+ * include/asm/arcregs.h)
+ *
+ * However when manually "carving" the value for a Aux, no special handling
+ * of BE is needed because of the property discribed above
+ */
+#define IDU_SET_COMMAND(irq, cmd)                      \
+do {                                                   \
+       uint32_t __val;                                 \
+       __val = (((irq & 0xFF) << 8) | (cmd & 0xFF));   \
+       write_aux_reg(ARC_AUX_IDU_REG_CMD, __val);      \
+} while (0)
+
+#define IDU_SET_PARAM(par)  write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
+#define IDU_GET_PARAM()     read_aux_reg(ARC_AUX_IDU_REG_PARAM)
+
+/* IDU Commands */
+#define IDU_DISABLE                    0x00
+#define IDU_ENABLE                     0x01
+#define IDU_IRQ_CLEAR                  0x02
+#define IDU_IRQ_ASSERT                 0x03
+#define IDU_IRQ_WMODE                  0x04
+#define IDU_IRQ_STATUS                 0x05
+#define IDU_IRQ_ACK                    0x06
+#define IDU_IRQ_PEND                   0x07
+#define IDU_IRQ_RMODE                  0x08
+#define IDU_IRQ_WBITMASK               0x09
+#define IDU_IRQ_RBITMASK               0x0A
+
+#define idu_enable()           IDU_SET_COMMAND(0, IDU_ENABLE)
+#define idu_disable()          IDU_SET_COMMAND(0, IDU_DISABLE)
+
+#define idu_irq_assert(irq)    IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
+#define idu_irq_clear(irq)     IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
+
+/* IDU Interrupt Mode - Destination Encoding */
+#define IDU_IRQ_MOD_DISABLE            0x00
+#define IDU_IRQ_MOD_ROUND_RECP         0x01
+#define IDU_IRQ_MOD_TCPU_FIRSTRECP     0x02
+#define IDU_IRQ_MOD_TCPU_ALLRECP       0x03
+
+/* IDU Interrupt Mode  - Triggering Mode */
+#define IDU_IRQ_MODE_LEVEL_TRIG                0x00
+#define IDU_IRQ_MODE_PULSE_TRIG                0x01
+
+#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode)   \
+       (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
+
+struct idu_irq_config {
+       uint8_t irq;
+       uint8_t dest_mode;
+       uint8_t trig_mode;
+};
+
+struct idu_irq_status {
+       uint8_t irq;
+       bool enabled;
+       bool status;
+       bool ack;
+       bool pend;
+       uint8_t next_rr;
+};
+
+extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
+extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
+
+extern void iss_model_init_smp(unsigned int cpu);
+extern void iss_model_init_early_smp(void);
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
new file mode 100644 (file)
index 0000000..d2215fd
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * ARC FPGA Platform IRQ hookups
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <plat/irq.h>
+
+void __init plat_fpga_init_IRQ(void)
+{
+       /*
+        * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
+        * request_irq() comes from any other CPU, the low level IRQ unamsking
+        * essential for getting Interrupts won't be enabled on cpu0, locking
+        * up the UART state machine.
+        */
+#ifdef CONFIG_SMP
+       arch_unmask_irq(UART0_IRQ);
+#endif
+}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
new file mode 100644 (file)
index 0000000..4e20a1a
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * ARC FPGA Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+#include <plat/memmap.h>
+#include <plat/smp.h>
+#include <plat/irq.h>
+
+/*-----------------------BVCI Latency Unit -----------------------------*/
+
+#ifdef CONFIG_ARC_HAS_BVCI_LAT_UNIT
+
+int lat_cycles = CONFIG_BVCI_LAT_CYCLES;
+
+/* BVCI Bus Profiler: Latency Unit */
+static void __init setup_bvci_lat_unit(void)
+{
+#define MAX_BVCI_UNITS 12
+
+       unsigned int i;
+       unsigned int *base = (unsigned int *)BVCI_LAT_UNIT_BASE;
+       const unsigned long units_req = CONFIG_BVCI_LAT_UNITS;
+       const unsigned int REG_UNIT = 21;
+       const unsigned int REG_VAL = 22;
+
+       /*
+        * There are multiple Latency Units corresponding to the many
+        * interfaces of the system bus arbiter (both CPU side as well as
+        * the peripheral side).
+        *
+        * Unit  0 - System Arb and Mem Controller - adds latency to all
+        *          memory trasactions
+        * Unit  1 - I$ and System Bus
+        * Unit  2 - D$ and System Bus
+        * ..
+        * Unit 12 - IDE Disk controller and System Bus
+        *
+        * The programmers model requires writing to lat_unit reg first
+        * and then the latency value (cycles) to lat_value reg
+        */
+
+       if (CONFIG_BVCI_LAT_UNITS == 0) {
+               writel(0, base + REG_UNIT);
+               writel(lat_cycles, base + REG_VAL);
+               pr_info("BVCI Latency for all Memory Transactions %d cycles\n",
+                       lat_cycles);
+       } else {
+               for_each_set_bit(i, &units_req, MAX_BVCI_UNITS) {
+                       writel(i + 1, base + REG_UNIT); /* loop is 0 based */
+                       writel(lat_cycles, base + REG_VAL);
+                       pr_info("BVCI Latency for Unit[%d] = %d cycles\n",
+                               (i + 1), lat_cycles);
+               }
+       }
+}
+#else
+static void __init setup_bvci_lat_unit(void)
+{
+}
+#endif
+
+/*----------------------- Platform Devices -----------------------------*/
+
+static unsigned long arc_uart_info[] = {
+       0,      /* uart->is_emulated (runtime @running_on_hw) */
+       0,      /* uart->port.uartclk */
+       0,      /* uart->baud */
+       0
+};
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+/*
+ * static platform data - but only for early serial
+ * TBD: derive this from a special DT node
+ */
+static struct resource arc_uart0_res[] = {
+       {
+               .start = UART0_BASE,
+               .end   = UART0_BASE + 0xFF,
+               .flags = IORESOURCE_MEM,
+       },
+       {
+               .start = UART0_IRQ,
+               .end   = UART0_IRQ,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device arc_uart0_dev = {
+       .name = "arc-uart",
+       .id = 0,
+       .num_resources = ARRAY_SIZE(arc_uart0_res),
+       .resource = arc_uart0_res,
+       .dev = {
+               .platform_data = &arc_uart_info,
+       },
+};
+
+static struct platform_device *fpga_early_devs[] __initdata = {
+       &arc_uart0_dev,
+};
+#endif
+
+static void arc_fpga_serial_init(void)
+{
+       /* To let driver workaround ISS bug: baudh Reg can't be set to 0 */
+       arc_uart_info[0] = !running_on_hw;
+
+       arc_uart_info[1] = arc_get_core_freq();
+
+       arc_uart_info[2] = CONFIG_ARC_SERIAL_BAUD;
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+       early_platform_add_devices(fpga_early_devs,
+                                  ARRAY_SIZE(fpga_early_devs));
+
+       /*
+        * ARC console driver registers itself as an early platform driver
+        * of class "earlyprintk".
+        * Install it here, followed by probe of devices.
+        * The installation here doesn't require earlyprintk in command line
+        * To do so however, replace the lines below with
+        *      parse_early_param();
+        *      early_platform_driver_probe("earlyprintk", 1, 1);
+        *                                                    ^^
+        */
+       early_platform_driver_register_all("earlyprintk");
+       early_platform_driver_probe("earlyprintk", 1, 0);
+
+       /*
+        * This is to make sure that arc uart would be preferred console
+        * despite one/more of following:
+        *   -command line lacked "console=ttyARC0" or
+        *   -CONFIG_VT_CONSOLE was enabled (for no reason whatsoever)
+        * Note that this needs to be done after above early console is reg,
+        * otherwise the early console never gets a chance to run.
+        */
+       add_preferred_console("ttyARC", 0, "115200");
+#endif
+}
+
+static void __init plat_fpga_early_init(void)
+{
+       pr_info("[plat-arcfpga]: registering early dev resources\n");
+
+       setup_bvci_lat_unit();
+
+       arc_fpga_serial_init();
+
+#ifdef CONFIG_SMP
+       iss_model_init_early_smp();
+#endif
+}
+
+static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
+#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE)
+       OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
+#endif
+       {}
+};
+
+static void __init plat_fpga_populate_dev(void)
+{
+       pr_info("[plat-arcfpga]: registering device resources\n");
+
+       /*
+        * Traverses flattened DeviceTree - registering platform devices
+        * complete with their resources
+        */
+       of_platform_populate(NULL, of_default_bus_match_table,
+                            plat_auxdata_lookup, NULL);
+}
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *aa4_compat[] __initdata = {
+       "snps,arc-angel4",
+       NULL,
+};
+
+MACHINE_START(ANGEL4, "angel4")
+       .dt_compat      = aa4_compat,
+       .init_early     = plat_fpga_early_init,
+       .init_machine   = plat_fpga_populate_dev,
+       .init_irq       = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+       .init_smp       = iss_model_init_smp,
+#endif
+MACHINE_END
+
+static const char *ml509_compat[] __initdata = {
+       "snps,arc-ml509",
+       NULL,
+};
+
+MACHINE_START(ML509, "ml509")
+       .dt_compat      = ml509_compat,
+       .init_early     = plat_fpga_early_init,
+       .init_machine   = plat_fpga_populate_dev,
+       .init_irq       = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+       .init_smp       = iss_model_init_smp,
+#endif
+MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
new file mode 100644 (file)
index 0000000..91b5534
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * ARC700 Simulation-only Extensions for SMP
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Vineet Gupta    - 2012 : split off arch common and plat specific SMP
+ *  Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <plat/irq.h>
+#include <plat/smp.h>
+
+static char smp_cpuinfo_buf[128];
+
+/*
+ *-------------------------------------------------------------------
+ * Platform specific callbacks expected by arch SMP code
+ *-------------------------------------------------------------------
+ */
+
+/*
+ * Master kick starting another CPU
+ */
+static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
+{
+       /* setup the start PC */
+       write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
+
+       /* Trigger WRITE_PC cmd for this cpu */
+       write_aux_reg(ARC_AUX_XTL_REG_CMD,
+                       (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
+
+       /* Take the cpu out of Halt */
+       write_aux_reg(ARC_AUX_XTL_REG_CMD,
+                       (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
+
+}
+
+/*
+ * Any SMP specific init any CPU does when it comes up.
+ * Here we setup the CPU to enable Inter-Processor-Interrupts
+ * Called for each CPU
+ * -Master      : init_IRQ()
+ * -Other(s)    : start_kernel_secondary()
+ */
+void iss_model_init_smp(unsigned int cpu)
+{
+       /* Check if CPU is configured for more than 16 interrupts */
+       if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
+               panic("[arcfpga] IRQ system can't support IDU IPI\n");
+
+       idu_disable();
+
+       /****************************************************************
+        * IDU provides a set of Common IRQs, each of which can be dynamically
+        * attached to (1|many|all) CPUs.
+        * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
+        *
+        * Here we use a simple 1:1 mapping:
+        * A CPU 'x' is wired to Common IRQ 'x'.
+        * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
+        * makes up for our simple IPI plumbing.
+        *
+        * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
+        *      w/o having to do one-at-a-time
+        ******************************************************************/
+
+       /*
+        * Claim an IRQ which would trigger IPI on this CPU.
+        * In IDU parlance it involves setting up a cpu bitmask for the IRQ
+        * The bitmap here contains only 1 CPU (self).
+        */
+       idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
+
+       /* Set the IRQ destination to use the bitmask above */
+       idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
+                        IDU_IRQ_MODE_PULSE_TRIG);
+
+       idu_enable();
+
+       /* Attach the arch-common IPI ISR to our IDU IRQ */
+       smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
+}
+
+static void iss_model_ipi_send(void *arg)
+{
+       struct cpumask *callmap = arg;
+       unsigned int cpu;
+
+       for_each_cpu(cpu, callmap)
+               idu_irq_assert(cpu);
+}
+
+static void iss_model_ipi_clear(int cpu, int irq)
+{
+       idu_irq_clear(IDU_INTERRUPT_0 + cpu);
+}
+
+void iss_model_init_early_smp(void)
+{
+#define IS_AVAIL1(var, str)    ((var) ? str : "")
+
+       struct bcr_mp mp;
+
+       READ_BCR(ARC_REG_MP_BCR, mp);
+
+       sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
+               mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
+               IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
+
+       plat_smp_ops.info = smp_cpuinfo_buf;
+
+       plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
+       plat_smp_ops.ipi_send = iss_model_ipi_send;
+       plat_smp_ops.ipi_clear = iss_model_ipi_clear;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Low level Platform IPI Providers
+ *-------------------------------------------------------------------
+ */
+
+/* Set the Mode for the Common IRQ */
+void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
+{
+       uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
+
+       IDU_SET_PARAM(par);
+       IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
+}
+
+/* Set the target cpu Bitmask for Common IRQ */
+void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
+{
+       IDU_SET_PARAM(mask);
+       IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
+}
+
+/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
+bool idu_irq_get_ack(uint8_t irq)
+{
+       uint32_t val;
+
+       IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
+       val = IDU_GET_PARAM();
+
+       return val & (1 << irq);
+}
+
+/*
+ * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
+ * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
+ * -After Interrupt has been taken, the IPI expcitily needs to be
+ *  cleared, to be acknowledged.
+ */
+bool idu_irq_get_pend(uint8_t irq)
+{
+       uint32_t val;
+
+       IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
+       val = IDU_GET_PARAM();
+
+       return val & (1 << irq);
+}
index 6ec8eb3..5b71469 100644 (file)
@@ -49,6 +49,7 @@ config ARM
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
+       select HAVE_VIRT_TO_BUS
        select KTIME_SCALAR
        select PERF_USE_VMALLOC
        select RTC_LIB
@@ -1675,7 +1676,6 @@ config HZ
        int
        default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
                ARCH_S5PV210 || ARCH_EXYNOS4
-       default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
        default AT91_TIMER_HZ if ARCH_AT91
        default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
        default 100
index abfce28..71768b8 100644 (file)
@@ -68,8 +68,8 @@ else
 endif
 
 check_for_multiple_loadaddr = \
-if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \
-       echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \
+if [ $(words $(UIMAGE_LOADADDR)) -ne 1 ]; then \
+       echo 'multiple (or no) load addresses: $(UIMAGE_LOADADDR)'; \
        echo 'This is incompatible with uImages'; \
        echo 'Specify LOADADDR on the commandline to build an uImage'; \
        false; \
index 411ab16..9c62558 100644 (file)
@@ -56,6 +56,7 @@ dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-dns320.dtb \
        kirkwood-dockstar.dtb \
        kirkwood-dreamplug.dtb \
        kirkwood-goflexnet.dtb \
+       kirkwood-guruplug-server-plus.dtb \
        kirkwood-ib62x0.dtb \
        kirkwood-iconnect.dtb \
        kirkwood-iomega_ix2_200.dtb \
@@ -78,11 +79,21 @@ dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
        msm8960-cdp.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
        armada-370-mirabox.dtb \
+       armada-370-rd.dtb \
        armada-xp-db.dtb \
+       armada-xp-gp.dtb \
        armada-xp-openblocks-ax3-4.dtb
-dtb-$(CONFIG_ARCH_MXC) += imx51-babbage.dtb \
+dtb-$(CONFIG_ARCH_MXC) += \
+       imx25-karo-tx25.dtb \
+       imx25-pdk.dtb \
+       imx27-apf27.dtb \
+       imx27-pdk.dtb \
+       imx31-bug.dtb \
+       imx51-apf51.dtb \
+       imx51-babbage.dtb \
        imx53-ard.dtb \
        imx53-evk.dtb \
+       imx53-mba53.dtb \
        imx53-qsb.dtb \
        imx53-smd.dtb \
        imx6q-arm2.dtb \
index c2f14e8..0957645 100644 (file)
                                mac-address = [ 00 00 00 00 00 00 ];
                        };
                };
+
+               ocmcram: ocmcram@40300000 {
+                       compatible = "ti,am3352-ocmcram";
+                       reg = <0x40300000 0x10000>;
+                       ti,hwmods = "ocmcram";
+                       ti,no_idle_on_suspend;
+               };
+
+               wkup_m3: wkup_m3@44d00000 {
+                       compatible = "ti,am3353-wkup-m3";
+                       reg = <0x44d00000 0x4000        /* M3 UMEM */
+                              0x44d80000 0x2000>;      /* M3 DMEM */
+                       ti,hwmods = "wkup_m3";
+               };
        };
 };
index 9b82fac..e34b280 100644 (file)
                        phy = <&phy1>;
                        phy-mode = "rgmii-id";
                };
+
+               mvsdio@d00d4000 {
+                       pinctrl-0 = <&sdio_pins1>;
+                       pinctrl-names = "default";
+                       /*
+                        * This device is disabled by default, because
+                        * using the SD card connector requires
+                        * changing the default CON40 connector
+                        * "DB-88F6710_MPP_2xRGMII_DEVICE_Jumper" to a
+                        * different connector
+                        * "DB-88F6710_MPP_RGMII_SD_Jumper".
+                        */
+                       status = "disabled";
+                       /* No CD or WP GPIOs */
+               };
+
+               usb@d0050000 {
+                       status = "okay";
+               };
+
+               usb@d0051000 {
+                       status = "okay";
+               };
+
+               spi0: spi@d0010600 {
+                       status = "okay";
+
+                       spi-flash@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "mx25l25635e";
+                               reg = <0>; /* Chip select 0 */
+                               spi-max-frequency = <50000000>;
+                       };
+               };
        };
 };
index 3b40713..dd0c57d 100644 (file)
                        phy = <&phy1>;
                        phy-mode = "rgmii-id";
                };
+
+               mvsdio@d00d4000 {
+                       pinctrl-0 = <&sdio_pins2>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       /*
+                        * No CD or WP GPIOs: SDIO interface used for
+                        * Wifi/Bluetooth chip
+                        */
+               };
+
+               usb@d0050000 {
+                       status = "okay";
+               };
+
+               usb@d0051000 {
+                       status = "okay";
+               };
        };
 };
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
new file mode 100644 (file)
index 0000000..f8e4855
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Device Tree file for Marvell Armada 370 Reference Design board
+ * (RD-88F6710-A1)
+ *
+ *  Copied from arch/arm/boot/dts/armada-370-db.dts
+ *
+ *  Copyright (C) 2013 Florian Fainelli <florian@openwrt.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-370.dtsi"
+
+/ {
+       model = "Marvell Armada 370 Reference Design";
+       compatible = "marvell,a370-rd", "marvell,armada370", "marvell,armada-370-xp";
+
+       chosen {
+               bootargs = "console=ttyS0,115200 earlyprintk";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>; /* 512 MB */
+       };
+
+       soc {
+               serial@d0012000 {
+                       clock-frequency = <200000000>;
+                       status = "okay";
+               };
+               sata@d00a0000 {
+                       nr-ports = <2>;
+                       status = "okay";
+               };
+
+               mdio {
+                       phy0: ethernet-phy@0 {
+                               reg = <0>;
+                       };
+
+                       phy1: ethernet-phy@1 {
+                               reg = <1>;
+                       };
+               };
+
+               ethernet@d0070000 {
+                       status = "okay";
+                       phy = <&phy0>;
+                       phy-mode = "sgmii";
+               };
+               ethernet@d0074000 {
+                       status = "okay";
+                       phy = <&phy1>;
+                       phy-mode = "rgmii-id";
+               };
+
+               mvsdio@d00d4000 {
+                       pinctrl-0 = <&sdio_pins1>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       /* No CD or WP GPIOs */
+               };
+       };
+};
index 5b29225..6f1acc7 100644 (file)
@@ -68,8 +68,9 @@
 
                timer@d0020300 {
                               compatible = "marvell,armada-370-xp-timer";
-                              reg = <0xd0020300 0x30>;
-                              interrupts = <37>, <38>, <39>, <40>;
+                              reg = <0xd0020300 0x30>,
+                              <0xd0021040 0x30>;
+                              interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
                               clocks = <&coreclk 2>;
                };
 
                        reg = <0xd0010300 0x20>;
                        interrupts = <50>;
                };
+
+               mvsdio@d00d4000 {
+                       compatible = "marvell,orion-sdio";
+                       reg = <0xd00d4000 0x200>;
+                       interrupts = <54>;
+                       clocks = <&gateclk 17>;
+                       status = "disabled";
+               };
+
+               usb@d0050000 {
+                       compatible = "marvell,orion-ehci";
+                       reg = <0xd0050000 0x500>;
+                       interrupts = <45>;
+                       status = "disabled";
+               };
+
+               usb@d0051000 {
+                       compatible = "marvell,orion-ehci";
+                       reg = <0xd0051000 0x500>;
+                       interrupts = <46>;
+                       status = "disabled";
+               };
+
+               spi0: spi@d0010600 {
+                       compatible = "marvell,orion-spi";
+                       reg = <0xd0010600 0x28>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       cell-index = <0>;
+                       interrupts = <30>;
+                       clocks = <&coreclk 0>;
+                       status = "disabled";
+               };
+
+               spi1: spi@d0010680 {
+                       compatible = "marvell,orion-spi";
+                       reg = <0xd0010680 0x28>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       cell-index = <1>;
+                       interrupts = <92>;
+                       clocks = <&coreclk 0>;
+                       status = "disabled";
+               };
        };
 };
 
index 636cf7d..8188d13 100644 (file)
                pinctrl {
                        compatible = "marvell,mv88f6710-pinctrl";
                        reg = <0xd0018000 0x38>;
+
+                       sdio_pins1: sdio-pins1 {
+                             marvell,pins = "mpp9",  "mpp11", "mpp12",
+                                            "mpp13", "mpp14", "mpp15";
+                             marvell,function = "sd0";
+                       };
+
+                       sdio_pins2: sdio-pins2 {
+                             marvell,pins = "mpp47", "mpp48", "mpp49",
+                                            "mpp50", "mpp51", "mpp52";
+                             marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
                                dmacap,memset;
                        };
                };
+
+               usb@d0050000 {
+                       clocks = <&coreclk 0>;
+               };
+
+               usb@d0051000 {
+                       clocks = <&coreclk 0>;
+               };
+
        };
 };
index 8e53b25..e83505e 100644 (file)
                        phy = <&phy3>;
                        phy-mode = "sgmii";
                };
+
+               mvsdio@d00d4000 {
+                       pinctrl-0 = <&sdio_pins>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       /* No CD or WP GPIOs */
+               };
+
+               usb@d0050000 {
+                       status = "okay";
+               };
+
+               usb@d0051000 {
+                       status = "okay";
+               };
+
+               usb@d0052000 {
+                       status = "okay";
+               };
+
+               spi0: spi@d0010600 {
+                       status = "okay";
+
+                       spi-flash@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "m25p64";
+                               reg = <0>; /* Chip select 0 */
+                               spi-max-frequency = <20000000>;
+                       };
+               };
        };
 };
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
new file mode 100644 (file)
index 0000000..1c8afe2
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Device Tree file for Marvell Armada XP development board
+ * (DB-MV784MP-GP)
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-xp-mv78460.dtsi"
+
+/ {
+       model = "Marvell Armada XP Development Board DB-MV784MP-GP";
+       compatible = "marvell,axp-gp", "marvell,armadaxp-mv78460", "marvell,armadaxp", "marvell,armada-370-xp";
+
+       chosen {
+               bootargs = "console=ttyS0,115200 earlyprintk";
+       };
+
+       memory {
+               device_type = "memory";
+
+               /*
+                * 4 GB of plug-in RAM modules by default but only 3GB
+                * are visible, the amount of memory available can be
+                * changed by the bootloader according the size of the
+                * module actually plugged
+                */
+               reg = <0x00000000 0xC0000000>;
+       };
+
+       soc {
+               serial@d0012000 {
+                       clock-frequency = <250000000>;
+                       status = "okay";
+               };
+               serial@d0012100 {
+                       clock-frequency = <250000000>;
+                       status = "okay";
+               };
+               serial@d0012200 {
+                       clock-frequency = <250000000>;
+                       status = "okay";
+               };
+               serial@d0012300 {
+                       clock-frequency = <250000000>;
+                       status = "okay";
+               };
+
+               sata@d00a0000 {
+                       nr-ports = <2>;
+                       status = "okay";
+               };
+
+               mdio {
+                       phy0: ethernet-phy@0 {
+                               reg = <16>;
+                       };
+
+                       phy1: ethernet-phy@1 {
+                               reg = <17>;
+                       };
+
+                       phy2: ethernet-phy@2 {
+                               reg = <18>;
+                       };
+
+                       phy3: ethernet-phy@3 {
+                               reg = <19>;
+                       };
+               };
+
+               ethernet@d0070000 {
+                       status = "okay";
+                       phy = <&phy0>;
+                       phy-mode = "rgmii-id";
+               };
+               ethernet@d0074000 {
+                       status = "okay";
+                       phy = <&phy1>;
+                       phy-mode = "rgmii-id";
+               };
+               ethernet@d0030000 {
+                       status = "okay";
+                       phy = <&phy2>;
+                       phy-mode = "rgmii-id";
+               };
+               ethernet@d0034000 {
+                       status = "okay";
+                       phy = <&phy3>;
+                       phy-mode = "rgmii-id";
+               };
+
+               spi0: spi@d0010600 {
+                       status = "okay";
+
+                       spi-flash@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "n25q128a13";
+                               reg = <0>; /* Chip select 0 */
+                               spi-max-frequency = <108000000>;
+                       };
+               };
+       };
+};
index e041f42..f56c405 100644 (file)
                pinctrl {
                        compatible = "marvell,mv78230-pinctrl";
                        reg = <0xd0018000 0x38>;
+
+                       sdio_pins: sdio-pins {
+                               marvell,pins = "mpp30", "mpp31", "mpp32",
+                                              "mpp33", "mpp34", "mpp35";
+                               marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
index 9e23bd8..f8f2b78 100644 (file)
                pinctrl {
                        compatible = "marvell,mv78260-pinctrl";
                        reg = <0xd0018000 0x38>;
+
+                       sdio_pins: sdio-pins {
+                               marvell,pins = "mpp30", "mpp31", "mpp32",
+                                              "mpp33", "mpp34", "mpp35";
+                               marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
index 9659661..936c25d 100644 (file)
                pinctrl {
                        compatible = "marvell,mv78460-pinctrl";
                        reg = <0xd0018000 0x38>;
+
+                       sdio_pins: sdio-pins {
+                               marvell,pins = "mpp30", "mpp31", "mpp32",
+                                              "mpp33", "mpp34", "mpp35";
+                               marvell,function = "sd0";
+                       };
                };
 
                gpio0: gpio@d0018100 {
index b42652f..3818a82 100644 (file)
                        };
                };
 
+               gpio_keys {
+                       compatible = "gpio-keys";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       button@1 {
+                               label = "Init Button";
+                               linux,code = <116>;
+                               gpios = <&gpio1 28 0>;
+                       };
+               };
+
                mdio {
                        phy0: ethernet-phy@0 {
                                reg = <0>;
                        nr-ports = <2>;
                        status = "okay";
                };
+               usb@d0050000 {
+                       status = "okay";
+               };
+               usb@d0051000 {
+                       status = "okay";
+               };
        };
 };
index 2e37ef1..1443949 100644 (file)
@@ -30,7 +30,7 @@
        };
 
        mpic: interrupt-controller@d0020000 {
-             reg = <0xd0020a00 0x1d0>,
+             reg = <0xd0020a00 0x2d0>,
                    <0xd0021070 0x58>;
        };
 
                                dmacap,memset;
                        };
                };
+
+               usb@d0050000 {
+                       clocks = <&gateclk 18>;
+               };
+
+               usb@d0051000 {
+                       clocks = <&gateclk 19>;
+               };
+
+               usb@d0052000 {
+                       compatible = "marvell,orion-ehci";
+                       reg = <0xd0052000 0x500>;
+                       interrupts = <47>;
+                       clocks = <&gateclk 20>;
+                       status = "disabled";
+               };
+
        };
 };
index cdee96f..7e3065a 100644 (file)
 
        leds {
                compatible = "gpio-leds";
+               pinctrl-0 = <&pmx_gpio_18>;
+               pinctrl-names = "default";
+
                power {
                        label = "Power";
                        gpios = <&gpio0 18 1>;
                        linux,default-trigger = "default-on";
                };
        };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               usb_power: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 1 0>;
+               };
+       };
 };
 
 &uart0 { status = "okay"; };
 };
 
 &pinctrl {
-       pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;
+       pinctrl-0 = <&pmx_gpio_1 &pmx_gpio_12>;
        pinctrl-names = "default";
 
+       pmx_gpio_1: pmx-gpio-1 {
+               marvell,pins = "mpp1";
+               marvell,function = "gpio";
+       };
+
        pmx_gpio_12: pmx-gpio-12 {
                marvell,pins = "mpp12";
                marvell,function = "gpio";
index 740630f..67dbe20 100644 (file)
@@ -55,7 +55,7 @@
                        reg = <0x12000 0x100>;
                        reg-shift = <2>;
                        interrupts = <7>;
-                       clock-frequency = <166666667>;
+                       clocks = <&core_clk 0>;
                        status = "disabled";
                };
 
@@ -64,7 +64,7 @@
                        reg = <0x12100 0x100>;
                        reg-shift = <2>;
                        interrupts = <8>;
-                       clock-frequency = <166666667>;
+                       clocks = <&core_clk 0>;
                        status = "disabled";
                };
 
@@ -73,7 +73,7 @@
                        reg = <0x12000 0x100>;
                        reg-shift = <2>;
                        interrupts = <9>;
-                       clock-frequency = <166666667>;
+                       clocks = <&core_clk 0>;
                        status = "disabled";
                };
 
@@ -82,7 +82,7 @@
                        reg = <0x12100 0x100>;
                        reg-shift = <2>;
                        interrupts = <10>;
-                       clock-frequency = <166666667>;
+                       clocks = <&core_clk 0>;
                        status = "disabled";
                };
 
                        status = "disabled";
                };
 
+               ehci0: usb-host@50000 {
+                       compatible = "marvell,orion-ehci";
+                       reg = <0x50000 0x1000>;
+                       interrupts = <24>;
+                       clocks = <&gate_clk 0>;
+                       status = "okay";
+               };
+
+               ehci1: usb-host@51000 {
+                       compatible = "marvell,orion-ehci";
+                       reg = <0x51000 0x1000>;
+                       interrupts = <25>;
+                       clocks = <&gate_clk 1>;
+                       status = "okay";
+               };
+
                sdio0: sdio@92000 {
                        compatible = "marvell,dove-sdhci";
                        reg = <0x92000 0x100>;
index d81f8a0..1a9d049 100644 (file)
        memory {
                reg = <0x80000000 0x02000000 0x90000000 0x02000000>;
        };
+};
 
-       soc {
-               aips@43f00000 {
-                       uart1: serial@43f90000 {
-                               status = "okay";
-                       };
-               };
+&uart1 {
+       status = "okay";
+};
 
-               spba@50000000 {
-                       fec: ethernet@50038000 {
-                               status = "okay";
-                               phy-mode = "rmii";
-                       };
-               };
+&fec {
+       phy-mode = "rmii";
+       status = "okay";
+};
 
-               emi@80000000 {
-                       nand@bb000000 {
-                               nand-on-flash-bbt;
-                               status = "okay";
-                       };
-               };
-       };
+&nfc {
+       nand-on-flash-bbt;
+       status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
new file mode 100644 (file)
index 0000000..a02a860
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx25.dtsi"
+
+/ {
+       model = "Freescale i.MX25 Product Development Kit";
+       compatible = "fsl,imx25-pdk", "fsl,imx25";
+
+       memory {
+               reg = <0x80000000 0x4000000>;
+       };
+};
+
+&uart1 {
+       status = "okay";
+};
+
+&fec {
+       phy-mode = "rmii";
+       status = "okay";
+};
+
+&nfc {
+       nand-on-flash-bbt;
+       status = "okay";
+};
index e1b13eb..94f3305 100644 (file)
                        reg = <0x80000000 0x3b002000>;
                        ranges;
 
-                       nand@bb000000 {
+                       nfc: nand@bb000000 {
                                #address-cells = <1>;
                                #size-cells = <1>;
 
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
deleted file mode 100644 (file)
index fa04c7b..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Sascha Hauer, Pengutronix
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/dts-v1/;
-/include/ "imx27.dtsi"
-
-/ {
-       model = "mx27_3ds";
-       compatible = "freescale,imx27-3ds", "fsl,imx27";
-
-       memory {
-               reg = <0x0 0x0>;
-       };
-
-       soc {
-               aipi@10000000 { /* aipi1 */
-                       uart1: serial@1000a000 {
-                               fsl,uart-has-rtscts;
-                               status = "okay";
-                       };
-               };
-
-               aipi@10020000 { /* aipi2 */
-                       ethernet@1002b000 {
-                               status = "okay";
-                       };
-               };
-       };
-};
index c0327c0..b464c80 100644 (file)
                        clock-frequency = <0>;
                };
        };
+};
 
-       soc {
-               aipi@10000000 {
-                       serial@1000a000 {
-                               status = "okay";
-                       };
+&uart1 {
+       status = "okay";
+};
 
-                       ethernet@1002b000 {
-                               status = "okay";
-                       };
-               };
+&fec {
+       status = "okay";
+};
 
-               nand@d8000000 {
-                       status = "okay";
-                       nand-bus-width = <16>;
-                       nand-ecc-mode = "hw";
-                       nand-on-flash-bbt;
+&nfc {
+       status = "okay";
+       nand-bus-width = <16>;
+       nand-ecc-mode = "hw";
+       nand-on-flash-bbt;
 
-                       partition@0 {
-                               label = "u-boot";
-                               reg = <0x0 0x100000>;
-                       };
+       partition@0 {
+               label = "u-boot";
+               reg = <0x0 0x100000>;
+       };
 
-                       partition@100000 {
-                               label = "env";
-                               reg = <0x100000 0x80000>;
-                       };
+       partition@100000 {
+               label = "env";
+               reg = <0x100000 0x80000>;
+       };
 
-                       partition@180000 {
-                               label = "env2";
-                               reg = <0x180000 0x80000>;
-                       };
+       partition@180000 {
+               label = "env2";
+               reg = <0x180000 0x80000>;
+       };
 
-                       partition@200000 {
-                               label = "firmware";
-                               reg = <0x200000 0x80000>;
-                       };
+       partition@200000 {
+               label = "firmware";
+               reg = <0x200000 0x80000>;
+       };
 
-                       partition@280000 {
-                               label = "dtb";
-                               reg = <0x280000 0x80000>;
-                       };
+       partition@280000 {
+               label = "dtb";
+               reg = <0x280000 0x80000>;
+       };
 
-                       partition@300000 {
-                               label = "kernel";
-                               reg = <0x300000 0x500000>;
-                       };
+       partition@300000 {
+               label = "kernel";
+               reg = <0x300000 0x500000>;
+       };
 
-                       partition@800000 {
-                               label = "rootfs";
-                               reg = <0x800000 0xf800000>;
-                       };
-               };
+       partition@800000 {
+               label = "rootfs";
+               reg = <0x800000 0xf800000>;
        };
 };
diff --git a/arch/arm/boot/dts/imx27-pdk.dts b/arch/arm/boot/dts/imx27-pdk.dts
new file mode 100644 (file)
index 0000000..41cd110
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx27.dtsi"
+
+/ {
+       model = "Freescale i.MX27 Product Development Kit";
+       compatible = "fsl,imx27-pdk", "fsl,imx27";
+
+       memory {
+               reg = <0x0 0x0>;
+       };
+};
+
+&uart1 {
+       fsl,uart-has-rtscts;
+       status = "okay";
+};
+
+&fec {
+       status = "okay";
+};
index 7f67402..9ac6f6b 100644 (file)
        memory {
                reg = <0x80000000 0x8000000>; /* 128M */
        };
+};
 
-       soc {
-               aips@43f00000 { /* AIPS1 */
-                       uart5: serial@43fb4000 {
-                               fsl,uart-has-rtscts;
-                               status = "okay";
-                       };
-               };
-       };
+&uart5 {
+       fsl,uart-has-rtscts;
+       status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx51-apf51.dts b/arch/arm/boot/dts/imx51-apf51.dts
new file mode 100644 (file)
index 0000000..92d3a66
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Armadeus Systems - <support@armadeus.com>
+ * Copyright 2012 Laurent Cans <laurent.cans@gmail.com>
+ *
+ * Based on mx51-babbage.dts
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx51.dtsi"
+
+/ {
+       model = "Armadeus Systems APF51 module";
+       compatible = "armadeus,imx51-apf51", "fsl,imx51";
+
+       memory {
+               reg = <0x90000000 0x20000000>;
+       };
+
+       clocks {
+               ckih1 {
+                       clock-frequency = <0>;
+               };
+
+               osc {
+                       clock-frequency = <33554432>;
+               };
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_2>;
+       phy-mode = "mii";
+       phy-reset-gpios = <&gpio3 0 0>;
+       phy-reset-duration = <1>;
+       status = "okay";
+};
+
+&uart3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart3_2>;
+       status = "okay";
+};
index 567e7ee..aab6e43 100644 (file)
                reg = <0x90000000 0x20000000>;
        };
 
-       soc {
-               display@di0 {
-                       compatible = "fsl,imx-parallel-display";
-                       crtcs = <&ipu 0>;
-                       interface-pix-fmt = "rgb24";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_ipu_disp1_1>;
-               };
-
-               display@di1 {
-                       compatible = "fsl,imx-parallel-display";
-                       crtcs = <&ipu 1>;
-                       interface-pix-fmt = "rgb565";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_ipu_disp2_1>;
-               };
-
-               aips@70000000 { /* aips-1 */
-                       spba@70000000 {
-                               esdhc@70004000 { /* ESDHC1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc1_1>;
-                                       fsl,cd-controller;
-                                       fsl,wp-controller;
-                                       status = "okay";
-                               };
-
-                               esdhc@70008000 { /* ESDHC2 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc2_1>;
-                                       cd-gpios = <&gpio1 6 0>;
-                                       wp-gpios = <&gpio1 5 0>;
-                                       status = "okay";
-                               };
-
-                               uart3: serial@7000c000 {
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_uart3_1>;
-                                       fsl,uart-has-rtscts;
-                                       status = "okay";
-                               };
-
-                               ecspi@70010000 { /* ECSPI1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_ecspi1_1>;
-                                       fsl,spi-num-chipselects = <2>;
-                                       cs-gpios = <&gpio4 24 0>, <&gpio4 25 0>;
-                                       status = "okay";
-
-                                       pmic: mc13892@0 {
-                                               #address-cells = <1>;
-                                               #size-cells = <0>;
-                                               compatible = "fsl,mc13892";
-                                               spi-max-frequency = <6000000>;
-                                               reg = <0>;
-                                               interrupt-parent = <&gpio1>;
-                                               interrupts = <8 0x4>;
-
-                                               regulators {
-                                                       sw1_reg: sw1 {
-                                                               regulator-min-microvolt = <600000>;
-                                                               regulator-max-microvolt = <1375000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       sw2_reg: sw2 {
-                                                               regulator-min-microvolt = <900000>;
-                                                               regulator-max-microvolt = <1850000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       sw3_reg: sw3 {
-                                                               regulator-min-microvolt = <1100000>;
-                                                               regulator-max-microvolt = <1850000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       sw4_reg: sw4 {
-                                                               regulator-min-microvolt = <1100000>;
-                                                               regulator-max-microvolt = <1850000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       vpll_reg: vpll {
-                                                               regulator-min-microvolt = <1050000>;
-                                                               regulator-max-microvolt = <1800000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       vdig_reg: vdig {
-                                                               regulator-min-microvolt = <1650000>;
-                                                               regulator-max-microvolt = <1650000>;
-                                                               regulator-boot-on;
-                                                       };
-
-                                                       vsd_reg: vsd {
-                                                               regulator-min-microvolt = <1800000>;
-                                                               regulator-max-microvolt = <3150000>;
-                                                       };
-
-                                                       vusb2_reg: vusb2 {
-                                                               regulator-min-microvolt = <2400000>;
-                                                               regulator-max-microvolt = <2775000>;
-                                                               regulator-boot-on;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       vvideo_reg: vvideo {
-                                                               regulator-min-microvolt = <2775000>;
-                                                               regulator-max-microvolt = <2775000>;
-                                                       };
-
-                                                       vaudio_reg: vaudio {
-                                                               regulator-min-microvolt = <2300000>;
-                                                               regulator-max-microvolt = <3000000>;
-                                                       };
-
-                                                       vcam_reg: vcam {
-                                                               regulator-min-microvolt = <2500000>;
-                                                               regulator-max-microvolt = <3000000>;
-                                                       };
-
-                                                       vgen1_reg: vgen1 {
-                                                               regulator-min-microvolt = <1200000>;
-                                                               regulator-max-microvolt = <1200000>;
-                                                       };
-
-                                                       vgen2_reg: vgen2 {
-                                                               regulator-min-microvolt = <1200000>;
-                                                               regulator-max-microvolt = <3150000>;
-                                                               regulator-always-on;
-                                                       };
-
-                                                       vgen3_reg: vgen3 {
-                                                               regulator-min-microvolt = <1800000>;
-                                                               regulator-max-microvolt = <2900000>;
-                                                               regulator-always-on;
-                                                       };
-                                               };
-                                       };
-
-                                       flash: at45db321d@1 {
-                                               #address-cells = <1>;
-                                               #size-cells = <1>;
-                                               compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
-                                               spi-max-frequency = <25000000>;
-                                               reg = <1>;
-
-                                               partition@0 {
-                                                       label = "U-Boot";
-                                                       reg = <0x0 0x40000>;
-                                                       read-only;
-                                               };
-
-                                               partition@40000 {
-                                                       label = "Kernel";
-                                                       reg = <0x40000 0x3c0000>;
-                                               };
-                                       };
-                               };
-
-                               ssi2: ssi@70014000 {
-                                       fsl,mode = "i2s-slave";
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@73fa8000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       694  0x20d5     /* MX51_PAD_GPIO1_0__SD1_CD */
-                                                       697  0x20d5     /* MX51_PAD_GPIO1_1__SD1_WP */
-                                                       737  0x100      /* MX51_PAD_GPIO1_5__GPIO1_5 */
-                                                       740  0x100      /* MX51_PAD_GPIO1_6__GPIO1_6 */
-                                                       121  0x5        /* MX51_PAD_EIM_A27__GPIO2_21 */
-                                                       402  0x85       /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
-                                                       405  0x85       /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
-                                               >;
-                                       };
-                               };
-                       };
-
-                       uart1: serial@73fbc000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart1_1>;
-                               fsl,uart-has-rtscts;
-                               status = "okay";
-                       };
-
-                       uart2: serial@73fc0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart2_1>;
-                               status = "okay";
-                       };
-               };
-
-               aips@80000000 { /* aips-2 */
-                       i2c@83fc4000 { /* I2C2 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c2_1>;
-                               status = "okay";
-
-                               sgtl5000: codec@0a {
-                                       compatible = "fsl,sgtl5000";
-                                       reg = <0x0a>;
-                                       clock-frequency = <26000000>;
-                                       VDDA-supply = <&vdig_reg>;
-                                       VDDIO-supply = <&vvideo_reg>;
-                               };
-                       };
-
-                       audmux@83fd0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_audmux_1>;
-                               status = "okay";
-                       };
+       display@di0 {
+               compatible = "fsl,imx-parallel-display";
+               crtcs = <&ipu 0>;
+               interface-pix-fmt = "rgb24";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_ipu_disp1_1>;
+       };
 
-                       ethernet@83fec000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fec_1>;
-                               phy-mode = "mii";
-                               status = "okay";
-                       };
-               };
+       display@di1 {
+               compatible = "fsl,imx-parallel-display";
+               crtcs = <&ipu 1>;
+               interface-pix-fmt = "rgb565";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_ipu_disp2_1>;
        };
 
        gpio-keys {
                mux-ext-port = <3>;
        };
 };
+
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1_1>;
+       fsl,cd-controller;
+       fsl,wp-controller;
+       status = "okay";
+};
+
+&esdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc2_1>;
+       cd-gpios = <&gpio1 6 0>;
+       wp-gpios = <&gpio1 5 0>;
+       status = "okay";
+};
+
+&uart3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart3_1>;
+       fsl,uart-has-rtscts;
+       status = "okay";
+};
+
+&ecspi1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ecspi1_1>;
+       fsl,spi-num-chipselects = <2>;
+       cs-gpios = <&gpio4 24 0>, <&gpio4 25 0>;
+       status = "okay";
+
+       pmic: mc13892@0 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "fsl,mc13892";
+               spi-max-frequency = <6000000>;
+               reg = <0>;
+               interrupt-parent = <&gpio1>;
+               interrupts = <8 0x4>;
+
+               regulators {
+                       sw1_reg: sw1 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <1375000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw2_reg: sw2 {
+                               regulator-min-microvolt = <900000>;
+                               regulator-max-microvolt = <1850000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3_reg: sw3 {
+                               regulator-min-microvolt = <1100000>;
+                               regulator-max-microvolt = <1850000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw4_reg: sw4 {
+                               regulator-min-microvolt = <1100000>;
+                               regulator-max-microvolt = <1850000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vpll_reg: vpll {
+                               regulator-min-microvolt = <1050000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vdig_reg: vdig {
+                               regulator-min-microvolt = <1650000>;
+                               regulator-max-microvolt = <1650000>;
+                               regulator-boot-on;
+                       };
+
+                       vsd_reg: vsd {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3150000>;
+                       };
+
+                       vusb2_reg: vusb2 {
+                               regulator-min-microvolt = <2400000>;
+                               regulator-max-microvolt = <2775000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vvideo_reg: vvideo {
+                               regulator-min-microvolt = <2775000>;
+                               regulator-max-microvolt = <2775000>;
+                       };
+
+                       vaudio_reg: vaudio {
+                               regulator-min-microvolt = <2300000>;
+                               regulator-max-microvolt = <3000000>;
+                       };
+
+                       vcam_reg: vcam {
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <3000000>;
+                       };
+
+                       vgen1_reg: vgen1 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
+                       };
+
+                       vgen2_reg: vgen2 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3150000>;
+                               regulator-always-on;
+                       };
+
+                       vgen3_reg: vgen3 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <2900000>;
+                               regulator-always-on;
+                       };
+               };
+       };
+
+       flash: at45db321d@1 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+               spi-max-frequency = <25000000>;
+               reg = <1>;
+
+               partition@0 {
+                       label = "U-Boot";
+                       reg = <0x0 0x40000>;
+                       read-only;
+               };
+
+               partition@40000 {
+                       label = "Kernel";
+                       reg = <0x40000 0x3c0000>;
+               };
+       };
+};
+
+&ssi2 {
+       fsl,mode = "i2s-slave";
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               694  0x20d5     /* MX51_PAD_GPIO1_0__SD1_CD */
+                               697  0x20d5     /* MX51_PAD_GPIO1_1__SD1_WP */
+                               737  0x100      /* MX51_PAD_GPIO1_5__GPIO1_5 */
+                               740  0x100      /* MX51_PAD_GPIO1_6__GPIO1_6 */
+                               121  0x5        /* MX51_PAD_EIM_A27__GPIO2_21 */
+                               402  0x85       /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
+                               405  0x85       /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
+                       >;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_1>;
+       fsl,uart-has-rtscts;
+       status = "okay";
+};
+
+&uart2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart2_1>;
+       status = "okay";
+};
+
+&i2c2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2_1>;
+       status = "okay";
+
+       sgtl5000: codec@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+               clock-frequency = <26000000>;
+               VDDA-supply = <&vdig_reg>;
+               VDDIO-supply = <&vvideo_reg>;
+       };
+};
+
+&audmux {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_audmux_1>;
+       status = "okay";
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_1>;
+       phy-mode = "mii";
+       status = "okay";
+};
+
+&kpp {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_kpp_1>;
+       linux,keymap = <0x00000067      /* KEY_UP */
+                       0x0001006c      /* KEY_DOWN */
+                       0x00020072      /* KEY_VOLUMEDOWN */
+                       0x00030066      /* KEY_HOME */
+                       0x0100006a      /* KEY_RIGHT */
+                       0x01010069      /* KEY_LEFT */
+                       0x0102001c      /* KEY_ENTER */
+                       0x01030073      /* KEY_VOLUMEUP */
+                       0x02000040      /* KEY_F6 */
+                       0x02010042      /* KEY_F8 */
+                       0x02020043      /* KEY_F9 */
+                       0x02030044      /* KEY_F10 */
+                       0x0300003b      /* KEY_F1 */
+                       0x0301003c      /* KEY_F2 */
+                       0x0302003d      /* KEY_F3 */
+                       0x03030074>;    /* KEY_POWER */
+       status = "okay";
+};
index 1f5d45e..fcf035b 100644 (file)
                                #interrupt-cells = <2>;
                        };
 
+                       kpp: kpp@73f94000 {
+                               compatible = "fsl,imx51-kpp", "fsl,imx21-kpp";
+                               reg = <0x73f94000 0x4000>;
+                               interrupts = <60>;
+                               clocks = <&clks 0>;
+                               status = "disabled";
+                       };
+
                        wdog1: wdog@73f98000 {
                                compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
                                reg = <0x73f98000 0x4000>;
                                                        260 0x80000000  /* MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK */
                                                >;
                                        };
+
+                                       pinctrl_fec_2: fecgrp-2 {
+                                               fsl,pins = <
+                                                       589 0x80000000 /* MX51_PAD_DI_GP3__FEC_TX_ER */
+                                                       592 0x80000000 /* MX51_PAD_DI2_PIN4__FEC_CRS */
+                                                       594 0x80000000 /* MX51_PAD_DI2_PIN2__FEC_MDC */
+                                                       596 0x80000000 /* MX51_PAD_DI2_PIN3__FEC_MDIO */
+                                                       598 0x80000000 /* MX51_PAD_DI2_DISP_CLK__FEC_RDATA1 */
+                                                       602 0x80000000 /* MX51_PAD_DI_GP4__FEC_RDATA2 */
+                                                       604 0x80000000 /* MX51_PAD_DISP2_DAT0__FEC_RDATA3 */
+                                                       609 0x80000000 /* MX51_PAD_DISP2_DAT1__FEC_RX_ER */
+                                                       618 0x80000000 /* MX51_PAD_DISP2_DAT6__FEC_TDATA1 */
+                                                       623 0x80000000 /* MX51_PAD_DISP2_DAT7__FEC_TDATA2 */
+                                                       628 0x80000000 /* MX51_PAD_DISP2_DAT8__FEC_TDATA3 */
+                                                       634 0x80000000 /* MX51_PAD_DISP2_DAT9__FEC_TX_EN */
+                                                       639 0x80000000 /* MX51_PAD_DISP2_DAT10__FEC_COL */
+                                                       644 0x80000000 /* MX51_PAD_DISP2_DAT11__FEC_RX_CLK */
+                                                       649 0x80000000 /* MX51_PAD_DISP2_DAT12__FEC_RX_DV */
+                                                       653 0x80000000 /* MX51_PAD_DISP2_DAT13__FEC_TX_CLK */
+                                                       657 0x80000000 /* MX51_PAD_DISP2_DAT14__FEC_RDATA0 */
+                                                       662 0x80000000 /* MX51_PAD_DISP2_DAT15__FEC_TDATA0 */
+                                               >;
+                                       };
                                };
 
                                ecspi1 {
                                                        49 0x1c5        /* MX51_PAD_EIM_D24__UART3_CTS */
                                                >;
                                        };
+
+                                       pinctrl_uart3_2: uart3grp-2 {
+                                               fsl,pins = <
+                                                       434 0x1c5       /* MX51_PAD_UART3_RXD__UART3_RXD */
+                                                       430 0x1c5       /* MX51_PAD_UART3_TXD__UART3_TXD */
+                                               >;
+                                       };
+                               };
+
+                               kpp {
+                                       pinctrl_kpp_1: kppgrp-1 {
+                                               fsl,pins = <
+                                                       438 0xe0        /* MX51_PAD_KEY_ROW0__KEY_ROW0 */
+                                                       439 0xe0        /* MX51_PAD_KEY_ROW1__KEY_ROW1 */
+                                                       440 0xe0        /* MX51_PAD_KEY_ROW2__KEY_ROW2 */
+                                                       441 0xe0        /* MX51_PAD_KEY_ROW3__KEY_ROW3 */
+                                                       442 0xe8        /* MX51_PAD_KEY_COL0__KEY_COL0 */
+                                                       444 0xe8        /* MX51_PAD_KEY_COL1__KEY_COL1 */
+                                                       446 0xe8        /* MX51_PAD_KEY_COL2__KEY_COL2 */
+                                                       448 0xe8        /* MX51_PAD_KEY_COL3__KEY_COL3 */
+                                               >;
+                                       };
                                };
                        };
 
index 4be76f2..e049fd0 100644 (file)
                reg = <0x70000000 0x40000000>;
        };
 
-       soc {
-               aips@50000000 { /* AIPS1 */
-                       spba@50000000 {
-                               esdhc@50004000 { /* ESDHC1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc1_2>;
-                                       cd-gpios = <&gpio1 1 0>;
-                                       wp-gpios = <&gpio1 9 0>;
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@53fa8000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       1077 0x80000000 /* MX53_PAD_GPIO_1__GPIO1_1 */
-                                                       1085 0x80000000 /* MX53_PAD_GPIO_9__GPIO1_9 */
-                                                       486  0x80000000 /* MX53_PAD_EIM_EB3__GPIO2_31 */
-                                                       739  0x80000000 /* MX53_PAD_GPIO_10__GPIO4_0 */
-                                                       218  0x80000000 /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
-                                                       226  0x80000000 /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
-                                                       233  0x80000000 /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
-                                                       241  0x80000000 /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
-                                                       429  0x80000000 /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
-                                                       435  0x80000000 /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
-                                                       441  0x80000000 /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
-                                                       448  0x80000000 /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
-                                                       456  0x80000000 /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
-                                                       464  0x80000000 /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
-                                                       471  0x80000000 /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
-                                                       477  0x80000000 /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
-                                                       492  0x80000000 /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
-                                                       500  0x80000000 /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
-                                                       508  0x80000000 /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
-                                                       516  0x80000000 /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
-                                                       524  0x80000000 /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
-                                                       532  0x80000000 /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
-                                                       540  0x80000000 /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
-                                                       548  0x80000000 /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
-                                                       637  0x80000000 /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
-                                                       642  0x80000000 /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
-                                                       647  0x80000000 /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
-                                                       652  0x80000000 /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
-                                                       657  0x80000000 /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
-                                                       662  0x80000000 /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
-                                                       667  0x80000000 /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
-                                                       611  0x80000000 /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
-                                                       616  0x80000000 /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
-                                                       607  0x80000000 /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
-                                               >;
-                                       };
-                               };
-                       };
-
-                       uart1: serial@53fbc000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart1_2>;
-                               status = "okay";
-                       };
-               };
-       };
-
        eim-cs1@f4000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                };
        };
 };
+
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1_2>;
+       cd-gpios = <&gpio1 1 0>;
+       wp-gpios = <&gpio1 9 0>;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               1077 0x80000000 /* MX53_PAD_GPIO_1__GPIO1_1 */
+                               1085 0x80000000 /* MX53_PAD_GPIO_9__GPIO1_9 */
+                               486  0x80000000 /* MX53_PAD_EIM_EB3__GPIO2_31 */
+                               739  0x80000000 /* MX53_PAD_GPIO_10__GPIO4_0 */
+                               218  0x80000000 /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
+                               226  0x80000000 /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
+                               233  0x80000000 /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
+                               241  0x80000000 /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
+                               429  0x80000000 /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
+                               435  0x80000000 /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
+                               441  0x80000000 /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
+                               448  0x80000000 /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
+                               456  0x80000000 /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
+                               464  0x80000000 /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
+                               471  0x80000000 /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
+                               477  0x80000000 /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
+                               492  0x80000000 /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
+                               500  0x80000000 /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
+                               508  0x80000000 /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
+                               516  0x80000000 /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
+                               524  0x80000000 /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
+                               532  0x80000000 /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
+                               540  0x80000000 /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
+                               548  0x80000000 /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
+                               637  0x80000000 /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
+                               642  0x80000000 /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
+                               647  0x80000000 /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
+                               652  0x80000000 /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
+                               657  0x80000000 /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
+                               662  0x80000000 /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
+                               667  0x80000000 /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
+                               611  0x80000000 /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
+                               616  0x80000000 /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
+                               607  0x80000000 /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
+                       >;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_2>;
+       status = "okay";
+};
index a124d1e..85a89b5 100644 (file)
                reg = <0x70000000 0x80000000>;
        };
 
-       soc {
-               aips@50000000 { /* AIPS1 */
-                       spba@50000000 {
-                               esdhc@50004000 { /* ESDHC1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc1_1>;
-                                       cd-gpios = <&gpio3 13 0>;
-                                       wp-gpios = <&gpio3 14 0>;
-                                       status = "okay";
-                               };
-
-                               ecspi@50010000 { /* ECSPI1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_ecspi1_1>;
-                                       fsl,spi-num-chipselects = <2>;
-                                       cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
-                                       status = "okay";
-
-                                       flash: at45db321d@1 {
-                                               #address-cells = <1>;
-                                               #size-cells = <1>;
-                                               compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
-                                               spi-max-frequency = <25000000>;
-                                               reg = <1>;
-
-                                               partition@0 {
-                                                       label = "U-Boot";
-                                                       reg = <0x0 0x40000>;
-                                                       read-only;
-                                               };
-
-                                               partition@40000 {
-                                                       label = "Kernel";
-                                                       reg = <0x40000 0x3c0000>;
-                                               };
-                                       };
-                               };
-
-                               esdhc@50020000 { /* ESDHC3 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc3_1>;
-                                       cd-gpios = <&gpio3 11 0>;
-                                       wp-gpios = <&gpio3 12 0>;
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@53fa8000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       424  0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
-                                                       449  0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
-                                                       693  0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
-                                                       697  0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
-                                                       701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
-                                                       705  0x80000000 /* MX53_PAD_EIM_DA14__GPIO3_14 */
-                                                       868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
-                                                       873  0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
-                                               >;
-                                       };
-                               };
-                       };
-
-                       uart1: serial@53fbc000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart1_1>;
-                               status = "okay";
-                       };
-               };
-
-               aips@60000000 { /* AIPS2 */
-                       i2c@63fc4000 { /* I2C2 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c2_1>;
-                               status = "okay";
-
-                               pmic: mc13892@08 {
-                                       compatible = "fsl,mc13892", "fsl,mc13xxx";
-                                       reg = <0x08>;
-                               };
-
-                               codec: sgtl5000@0a {
-                                       compatible = "fsl,sgtl5000";
-                                       reg = <0x0a>;
-                               };
-                       };
-
-                       ethernet@63fec000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fec_1>;
-                               phy-mode = "rmii";
-                               phy-reset-gpios = <&gpio7 6 0>;
-                               status = "okay";
-                       };
-               };
-       };
-
        leds {
                compatible = "gpio-leds";
 
                };
        };
 };
+
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1_1>;
+       cd-gpios = <&gpio3 13 0>;
+       wp-gpios = <&gpio3 14 0>;
+       status = "okay";
+};
+
+&ecspi1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ecspi1_1>;
+       fsl,spi-num-chipselects = <2>;
+       cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
+       status = "okay";
+
+       flash: at45db321d@1 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+               spi-max-frequency = <25000000>;
+               reg = <1>;
+
+               partition@0 {
+                       label = "U-Boot";
+                       reg = <0x0 0x40000>;
+                       read-only;
+               };
+
+               partition@40000 {
+                       label = "Kernel";
+                       reg = <0x40000 0x3c0000>;
+               };
+       };
+};
+
+&esdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc3_1>;
+       cd-gpios = <&gpio3 11 0>;
+       wp-gpios = <&gpio3 12 0>;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               424  0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
+                               449  0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
+                               693  0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
+                               697  0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
+                               701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+                               705  0x80000000 /* MX53_PAD_EIM_DA14__GPIO3_14 */
+                               868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+                               873  0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
+                       >;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_1>;
+       status = "okay";
+};
+
+&i2c2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2_1>;
+       status = "okay";
+
+       pmic: mc13892@08 {
+               compatible = "fsl,mc13892", "fsl,mc13xxx";
+               reg = <0x08>;
+       };
+
+       codec: sgtl5000@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_1>;
+       phy-mode = "rmii";
+       phy-reset-gpios = <&gpio7 6 0>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
new file mode 100644 (file)
index 0000000..e54fffd
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx53-tqma53.dtsi"
+
+/ {
+       model = "TQ MBa53 starter kit";
+       compatible = "tq,mba53", "tq,tqma53", "fsl,imx53";
+};
+
+&iomuxc {
+       lvds1 {
+               pinctrl_lvds1_1: lvds1-grp1 {
+                       fsl,pins = <730 0x10000         /* LVDS0_TX3 */
+                                   732 0x10000         /* LVDS0_CLK */
+                                   734 0x10000         /* LVDS0_TX2 */
+                                   736 0x10000         /* LVDS0_TX1 */
+                                   738 0x10000>;       /* LVDS0_TX0 */
+               };
+
+               pinctrl_lvds1_2: lvds1-grp2 {
+                       fsl,pins = <720 0x10000         /* LVDS1_TX3 */
+                                   722 0x10000         /* LVDS1_TX2 */
+                                   724 0x10000         /* LVDS1_CLK */
+                                   726 0x10000         /* LVDS1_TX1 */
+                                   728 0x10000>;       /* LVDS1_TX0 */
+               };
+       };
+
+       disp1 {
+               pinctrl_disp1_1: disp1-grp1 {
+                       fsl,pins = <689 0x10000         /* DISP1_DRDY   */
+                                   482 0x10000         /* DISP1_HSYNC  */
+                                   489 0x10000         /* DISP1_VSYNC  */
+                                   684 0x10000         /* DISP1_DAT_0  */
+                                   515 0x10000         /* DISP1_DAT_22 */
+                                   523 0x10000         /* DISP1_DAT_23 */
+                                   543 0x10000         /* DISP1_DAT_21 */
+                                   553 0x10000         /* DISP1_DAT_20 */
+                                   558 0x10000         /* DISP1_DAT_19 */
+                                   564 0x10000         /* DISP1_DAT_18 */
+                                   570 0x10000         /* DISP1_DAT_17 */
+                                   575 0x10000         /* DISP1_DAT_16 */
+                                   580 0x10000         /* DISP1_DAT_15 */
+                                   585 0x10000         /* DISP1_DAT_14 */
+                                   590 0x10000         /* DISP1_DAT_13 */
+                                   595 0x10000         /* DISP1_DAT_12 */
+                                   628 0x10000         /* DISP1_DAT_11 */
+                                   634 0x10000         /* DISP1_DAT_10 */
+                                   639 0x10000         /* DISP1_DAT_9  */
+                                   644 0x10000         /* DISP1_DAT_8  */
+                                   649 0x10000         /* DISP1_DAT_7  */
+                                   654 0x10000         /* DISP1_DAT_6  */
+                                   659 0x10000         /* DISP1_DAT_5  */
+                                   664 0x10000         /* DISP1_DAT_4  */
+                                   669 0x10000         /* DISP1_DAT_3  */
+                                   674 0x10000         /* DISP1_DAT_2  */
+                                   679 0x10000         /* DISP1_DAT_1  */
+                                   684 0x10000>;       /* DISP1_DAT_0  */
+               };
+       };
+};
+
+&cspi {
+       status = "okay";
+};
+
+&i2c2 {
+       codec: sgtl5000@a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+       };
+
+       expander: pca9554@20 {
+               compatible = "pca9554";
+               reg = <0x20>;
+               interrupts = <109>;
+       };
+
+       sensor2: lm75@49 {
+               compatible = "lm75";
+               reg = <0x49>;
+       };
+};
+
+&fec {
+       status = "okay";
+};
+
+&esdhc2 {
+       status = "okay";
+};
+
+&uart3 {
+       status = "okay";
+};
+
+&ecspi1 {
+       status = "okay";
+};
+
+&uart1 {
+       status = "okay";
+};
+
+&uart2 {
+       status = "okay";
+};
+
+&can1 {
+       status = "okay";
+};
+
+&can2 {
+       status = "okay";
+};
+
+&i2c3 {
+       status = "okay";
+};
index b007553..05cc562 100644 (file)
                reg = <0x70000000 0x40000000>;
        };
 
-       soc {
-               aips@50000000 { /* AIPS1 */
-                       spba@50000000 {
-                               esdhc@50004000 { /* ESDHC1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc1_1>;
-                                       cd-gpios = <&gpio3 13 0>;
-                                       status = "okay";
-                               };
-
-                               ssi2: ssi@50014000 {
-                                       fsl,mode = "i2s-slave";
-                                       status = "okay";
-                               };
-
-                               esdhc@50020000 { /* ESDHC3 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc3_1>;
-                                       cd-gpios = <&gpio3 11 0>;
-                                       wp-gpios = <&gpio3 12 0>;
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@53fa8000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       1071 0x80000000 /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
-                                                       1141 0x80000000 /* MX53_PAD_GPIO_8__GPIO1_8 */
-                                                       982  0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
-                                                       989  0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
-                                                       693  0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
-                                                       697  0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
-                                                       701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
-                                                       868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
-                                                       1149 0x80000000 /* MX53_PAD_GPIO_16__GPIO7_11 */
-                                               >;
-                                       };
-
-                                       led_pin_gpio7_7: led_gpio7_7@0 {
-                                               fsl,pins = <
-                                                       873  0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
-                                               >;
-                                       };
-                               };
-
-                       };
-
-                       uart1: serial@53fbc000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart1_1>;
-                               status = "okay";
-                       };
-               };
-
-               aips@60000000 { /* AIPS2 */
-                       i2c@63fc4000 { /* I2C2 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c2_1>;
-                               status = "okay";
-
-                               sgtl5000: codec@0a {
-                                       compatible = "fsl,sgtl5000";
-                                       reg = <0x0a>;
-                                       VDDA-supply = <&reg_3p2v>;
-                                       VDDIO-supply = <&reg_3p2v>;
-                               };
-                       };
-
-                       i2c@63fc8000 { /* I2C1 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c1_1>;
-                               status = "okay";
-
-                               accelerometer: mma8450@1c {
-                                       compatible = "fsl,mma8450";
-                                       reg = <0x1c>;
-                               };
-
-                               pmic: dialog@48 {
-                                       compatible = "dlg,da9053-aa", "dlg,da9052";
-                                       reg = <0x48>;
-                                       interrupt-parent = <&gpio7>;
-                                       interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
-
-                                       regulators {
-                                               buck1_reg: buck1 {
-                                                       regulator-min-microvolt = <500000>;
-                                                       regulator-max-microvolt = <2075000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               buck2_reg: buck2 {
-                                                       regulator-min-microvolt = <500000>;
-                                                       regulator-max-microvolt = <2075000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               buck3_reg: buck3 {
-                                                       regulator-min-microvolt = <925000>;
-                                                       regulator-max-microvolt = <2500000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               buck4_reg: buck4 {
-                                                       regulator-min-microvolt = <925000>;
-                                                       regulator-max-microvolt = <2500000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo1_reg: ldo1 {
-                                                       regulator-min-microvolt = <600000>;
-                                                       regulator-max-microvolt = <1800000>;
-                                                       regulator-boot-on;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo2_reg: ldo2 {
-                                                       regulator-min-microvolt = <600000>;
-                                                       regulator-max-microvolt = <1800000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo3_reg: ldo3 {
-                                                       regulator-min-microvolt = <600000>;
-                                                       regulator-max-microvolt = <1800000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo4_reg: ldo4 {
-                                                       regulator-min-microvolt = <1725000>;
-                                                       regulator-max-microvolt = <3300000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo5_reg: ldo5 {
-                                                       regulator-min-microvolt = <1725000>;
-                                                       regulator-max-microvolt = <3300000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo6_reg: ldo6 {
-                                                       regulator-min-microvolt = <1200000>;
-                                                       regulator-max-microvolt = <3600000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo7_reg: ldo7 {
-                                                       regulator-min-microvolt = <1200000>;
-                                                       regulator-max-microvolt = <3600000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo8_reg: ldo8 {
-                                                       regulator-min-microvolt = <1200000>;
-                                                       regulator-max-microvolt = <3600000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo9_reg: ldo9 {
-                                                       regulator-min-microvolt = <1200000>;
-                                                       regulator-max-microvolt = <3600000>;
-                                                       regulator-always-on;
-                                               };
-
-                                               ldo10_reg: ldo10 {
-                                                       regulator-min-microvolt = <1250000>;
-                                                       regulator-max-microvolt = <3650000>;
-                                                       regulator-always-on;
-                                               };
-                                       };
-                               };
-                       };
-
-                       audmux@63fd0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_audmux_1>;
-                               status = "okay";
-                       };
-
-                       ethernet@63fec000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fec_1>;
-                               phy-mode = "rmii";
-                               phy-reset-gpios = <&gpio7 6 0>;
-                               status = "okay";
-                       };
-               };
-       };
-
        gpio-keys {
                compatible = "gpio-keys";
 
                mux-ext-port = <5>;
        };
 };
+
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1_1>;
+       cd-gpios = <&gpio3 13 0>;
+       status = "okay";
+};
+
+&ssi2 {
+       fsl,mode = "i2s-slave";
+       status = "okay";
+};
+
+&esdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc3_1>;
+       cd-gpios = <&gpio3 11 0>;
+       wp-gpios = <&gpio3 12 0>;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               1071 0x80000000 /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
+                               1141 0x80000000 /* MX53_PAD_GPIO_8__GPIO1_8 */
+                               982  0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
+                               989  0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
+                               693  0x80000000 /* MX53_PAD_EIM_DA11__GPIO3_11 */
+                               697  0x80000000 /* MX53_PAD_EIM_DA12__GPIO3_12 */
+                               701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+                               868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+                               1149 0x80000000 /* MX53_PAD_GPIO_16__GPIO7_11 */
+                       >;
+               };
+
+               led_pin_gpio7_7: led_gpio7_7@0 {
+                       fsl,pins = <
+                               873  0x80000000 /* MX53_PAD_PATA_DA_1__GPIO7_7 */
+                       >;
+               };
+       };
+
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_1>;
+       status = "okay";
+};
+
+&i2c2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2_1>;
+       status = "okay";
+
+       sgtl5000: codec@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+               VDDA-supply = <&reg_3p2v>;
+               VDDIO-supply = <&reg_3p2v>;
+       };
+};
+
+&i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1_1>;
+       status = "okay";
+
+       accelerometer: mma8450@1c {
+               compatible = "fsl,mma8450";
+               reg = <0x1c>;
+       };
+
+       pmic: dialog@48 {
+               compatible = "dlg,da9053-aa", "dlg,da9052";
+               reg = <0x48>;
+               interrupt-parent = <&gpio7>;
+               interrupts = <11 0x8>; /* low-level active IRQ at GPIO7_11 */
+
+               regulators {
+                       buck1_reg: buck1 {
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2075000>;
+                               regulator-always-on;
+                       };
+
+                       buck2_reg: buck2 {
+                               regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <2075000>;
+                               regulator-always-on;
+                       };
+
+                       buck3_reg: buck3 {
+                               regulator-min-microvolt = <925000>;
+                               regulator-max-microvolt = <2500000>;
+                               regulator-always-on;
+                       };
+
+                       buck4_reg: buck4 {
+                               regulator-min-microvolt = <925000>;
+                               regulator-max-microvolt = <2500000>;
+                               regulator-always-on;
+                       };
+
+                       ldo1_reg: ldo1 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       ldo2_reg: ldo2 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
+                       };
+
+                       ldo3_reg: ldo3 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
+                       };
+
+                       ldo4_reg: ldo4 {
+                               regulator-min-microvolt = <1725000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       ldo5_reg: ldo5 {
+                               regulator-min-microvolt = <1725000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       ldo6_reg: ldo6 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3600000>;
+                               regulator-always-on;
+                       };
+
+                       ldo7_reg: ldo7 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3600000>;
+                               regulator-always-on;
+                       };
+
+                       ldo8_reg: ldo8 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3600000>;
+                               regulator-always-on;
+                       };
+
+                       ldo9_reg: ldo9 {
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3600000>;
+                               regulator-always-on;
+                       };
+
+                       ldo10_reg: ldo10 {
+                               regulator-min-microvolt = <1250000>;
+                               regulator-max-microvolt = <3650000>;
+                               regulator-always-on;
+                       };
+               };
+       };
+};
+
+&audmux {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_audmux_1>;
+       status = "okay";
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_1>;
+       phy-mode = "rmii";
+       phy-reset-gpios = <&gpio7 6 0>;
+       status = "okay";
+};
index 06c6858..995554c 100644 (file)
                reg = <0x70000000 0x40000000>;
        };
 
-       soc {
-               aips@50000000 { /* AIPS1 */
-                       spba@50000000 {
-                               esdhc@50004000 { /* ESDHC1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc1_1>;
-                                       cd-gpios = <&gpio3 13 0>;
-                                       wp-gpios = <&gpio4 11 0>;
-                                       status = "okay";
-                               };
-
-                               esdhc@50008000 { /* ESDHC2 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc2_1>;
-                                       non-removable;
-                                       status = "okay";
-                               };
-
-                               uart3: serial@5000c000 {
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_uart3_1>;
-                                       fsl,uart-has-rtscts;
-                                       status = "okay";
-                               };
-
-                               ecspi@50010000 { /* ECSPI1 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_ecspi1_1>;
-                                       fsl,spi-num-chipselects = <2>;
-                                       cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
-                                       status = "okay";
-
-                                       zigbee: mc1323@0 {
-                                               compatible = "fsl,mc1323";
-                                               spi-max-frequency = <8000000>;
-                                               reg = <0>;
-                                       };
-
-                                       flash: m25p32@1 {
-                                               #address-cells = <1>;
-                                               #size-cells = <1>;
-                                               compatible = "st,m25p32", "st,m25p";
-                                               spi-max-frequency = <20000000>;
-                                               reg = <1>;
-
-                                               partition@0 {
-                                                       label = "U-Boot";
-                                                       reg = <0x0 0x40000>;
-                                                       read-only;
-                                               };
-
-                                               partition@40000 {
-                                                       label = "Kernel";
-                                                       reg = <0x40000 0x3c0000>;
-                                               };
-                                       };
-                               };
-
-                               esdhc@50020000 { /* ESDHC3 */
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_esdhc3_1>;
-                                       non-removable;
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@53fa8000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       982  0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
-                                                       989  0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
-                                                       424  0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
-                                                       701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
-                                                       449  0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
-                                                       43   0x80000000 /* MX53_PAD_KEY_ROW2__GPIO4_11 */
-                                                       868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
-                                               >;
-                                       };
-                               };
-                       };
-
-                       uart1: serial@53fbc000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart1_1>;
-                               status = "okay";
-                       };
-
-                       uart2: serial@53fc0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart2_1>;
-                               status = "okay";
-                       };
-               };
-
-               aips@60000000 { /* AIPS2 */
-                       i2c@63fc4000 { /* I2C2 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c2_1>;
-                               status = "okay";
-
-                               codec: sgtl5000@0a {
-                                       compatible = "fsl,sgtl5000";
-                                       reg = <0x0a>;
-                               };
-
-                               magnetometer: mag3110@0e {
-                                       compatible = "fsl,mag3110";
-                                       reg = <0x0e>;
-                               };
-
-                               touchkey: mpr121@5a {
-                                       compatible = "fsl,mpr121";
-                                       reg = <0x5a>;
-                               };
-                       };
-
-                       i2c@63fc8000 { /* I2C1 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c1_1>;
-                               status = "okay";
-
-                               accelerometer: mma8450@1c {
-                                       compatible = "fsl,mma8450";
-                                       reg = <0x1c>;
-                               };
-
-                               camera: ov5642@3c {
-                                       compatible = "ovti,ov5642";
-                                       reg = <0x3c>;
-                               };
-
-                               pmic: dialog@48 {
-                                       compatible = "dialog,da9053", "dialog,da9052";
-                                       reg = <0x48>;
-                               };
-                       };
-
-                       ethernet@63fec000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_fec_1>;
-                               phy-mode = "rmii";
-                               phy-reset-gpios = <&gpio7 6 0>;
-                               status = "okay";
-                       };
-               };
-       };
-
        gpio-keys {
                compatible = "gpio-keys";
 
                };
        };
 };
+
+&esdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc1_1>;
+       cd-gpios = <&gpio3 13 0>;
+       wp-gpios = <&gpio4 11 0>;
+       status = "okay";
+};
+
+&esdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc2_1>;
+       non-removable;
+       status = "okay";
+};
+
+&uart3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart3_1>;
+       fsl,uart-has-rtscts;
+       status = "okay";
+};
+
+&ecspi1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ecspi1_1>;
+       fsl,spi-num-chipselects = <2>;
+       cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>;
+       status = "okay";
+
+       zigbee: mc1323@0 {
+               compatible = "fsl,mc1323";
+               spi-max-frequency = <8000000>;
+               reg = <0>;
+       };
+
+       flash: m25p32@1 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "st,m25p32", "st,m25p";
+               spi-max-frequency = <20000000>;
+               reg = <1>;
+
+               partition@0 {
+                       label = "U-Boot";
+                       reg = <0x0 0x40000>;
+                       read-only;
+               };
+
+               partition@40000 {
+                       label = "Kernel";
+                       reg = <0x40000 0x3c0000>;
+               };
+       };
+};
+
+&esdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc3_1>;
+       non-removable;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               982  0x80000000 /* MX53_PAD_PATA_DATA14__GPIO2_14 */
+                               989  0x80000000 /* MX53_PAD_PATA_DATA15__GPIO2_15 */
+                               424  0x80000000 /* MX53_PAD_EIM_EB2__GPIO2_30 */
+                               701  0x80000000 /* MX53_PAD_EIM_DA13__GPIO3_13 */
+                               449  0x80000000 /* MX53_PAD_EIM_D19__GPIO3_19 */
+                               43   0x80000000 /* MX53_PAD_KEY_ROW2__GPIO4_11 */
+                               868  0x80000000 /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+                       >;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_1>;
+       status = "okay";
+};
+
+&uart2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart2_1>;
+       status = "okay";
+};
+
+&i2c2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2_1>;
+       status = "okay";
+
+       codec: sgtl5000@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+       };
+
+       magnetometer: mag3110@0e {
+               compatible = "fsl,mag3110";
+               reg = <0x0e>;
+       };
+
+       touchkey: mpr121@5a {
+               compatible = "fsl,mpr121";
+               reg = <0x5a>;
+       };
+};
+
+&i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1_1>;
+       status = "okay";
+
+       accelerometer: mma8450@1c {
+               compatible = "fsl,mma8450";
+               reg = <0x1c>;
+       };
+
+       camera: ov5642@3c {
+               compatible = "ovti,ov5642";
+               reg = <0x3c>;
+       };
+
+       pmic: dialog@48 {
+               compatible = "dialog,da9053", "dialog,da9052";
+               reg = <0x48>;
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_1>;
+       phy-mode = "rmii";
+       phy-reset-gpios = <&gpio7 6 0>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
new file mode 100644 (file)
index 0000000..8278ec5
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "imx53.dtsi"
+
+/ {
+       model = "TQ TQMa53";
+       compatible = "tq,tqma53", "fsl,imx53";
+
+       memory {
+               reg = <0x70000000 0x40000000>; /* Up to 1GiB */
+       };
+
+       regulators {
+               compatible = "simple-bus";
+
+               reg_3p3v: 3p3v {
+                       compatible = "regulator-fixed";
+                       regulator-name = "3P3V";
+                       regulator-min-microvolt = <3300000>;
+                       regulator-max-microvolt = <3300000>;
+                       regulator-always-on;
+               };
+       };
+};
+
+&esdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc2_1>;
+       wp-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 4 0>;
+       status = "disabled";
+};
+
+&uart3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart3_2>;
+       status = "disabled";
+};
+
+&ecspi1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ecspi1_1>;
+       fsl,spi-num-chipselects = <4>;
+       cs-gpios = <&gpio2 30 0>, <&gpio3 19 0>,
+                  <&gpio3 24 0>, <&gpio3 25 0>;
+       status = "disabled";
+};
+
+&esdhc3 { /* EMMC */
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_esdhc3_1>;
+       vmmc-supply = <&reg_3p3v>;
+       non-removable;
+       bus-width = <8>;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       i2s {
+               pinctrl_i2s_1: i2s-grp1 {
+                       fsl,pins = <
+                                1   0x10000    /* I2S_MCLK */
+                                10  0x10000    /* I2S_SCLK */
+                                17  0x10000    /* I2S_DOUT */
+                                23  0x10000    /* I2S_LRCLK*/
+                                30  0x10000    /* I2S_DIN  */
+                       >;
+               };
+       };
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                                610  0x10000   /* MX53_PAD_EIM_CS1__IPU_DI1_PIN6 (VSYNC)*/
+                                711  0x10000   /* MX53_PAD_EIM_DA15__IPU_DI1_PIN4 (HSYNC)*/
+                                873  0x10000   /* MX53_PAD_PATA_DA_1__GPIO7_7 (LCD_BLT_EN)*/
+                                878  0x10000   /* MX53_PAD_PATA_DA_2__GPIO7_8 (LCD_RESET)*/
+                                922  0x10000   /* MX53_PAD_PATA_DATA5__GPIO2_5 (LCD_POWER)*/
+                                928  0x10000   /* MX53_PAD_PATA_DATA6__GPIO2_6 (PMIC_INT)*/
+                                982  0x10000   /* MX53_PAD_PATA_DATA14__GPIO2_14 (CSI_RST)*/
+                                989  0x10000   /* MX53_PAD_PATA_DATA15__GPIO2_15 (CSI_PWDN)*/
+                                1069 0x10000   /* MX53_PAD_GPIO_0__GPIO1_0 (SYSTEM_DOWN)*/
+                                1093 0x10000   /* MX53_PAD_GPIO_3__GPIO1_3 */
+                       >;
+               };
+       };
+};
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_2>;
+       fsl,uart-has-rtscts;
+       status = "disabled";
+};
+
+&uart2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart2_1>;
+       status = "disabled";
+};
+
+&can1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_can1_2>;
+       status = "disabled";
+};
+
+&can2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_can2_1>;
+       status = "disabled";
+};
+
+&i2c3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c3_1>;
+       status = "disabled";
+};
+
+&cspi {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cspi_1>;
+       fsl,spi-num-chipselects = <3>;
+       cs-gpios = <&gpio1 18 0>, <&gpio1 19 0>,
+                  <&gpio1 21 0>;
+       status = "disabled";
+};
+
+&i2c2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2_1>;
+       status = "okay";
+
+       pmic: mc34708@8 {
+               compatible = "fsl,mc34708";
+               reg = <0x8>;
+               fsl,mc13xxx-uses-rtc;
+               interrupt-parent = <&gpio2>;
+               interrupts = <6 8>; /* PDATA_DATA6, low active */
+       };
+
+       sensor1: lm75@48 {
+               compatible = "lm75";
+               reg = <0x48>;
+       };
+
+       eeprom: 24c64@50 {
+               compatible = "at,24c64";
+               pagesize = <32>;
+               reg = <0x50>;
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_fec_1>;
+       phy-mode = "rmii";
+       status = "disabled";
+};
index edc3f1e..d05aa21 100644 (file)
                                        };
                                };
 
+                               csi {
+                                       pinctrl_csi_1: csigrp-1 {
+                                               fsl,pins = <
+                                                       286 0x1d5       /* MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN */
+                                                       291 0x1d5       /* MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC */
+                                                       280 0x1d5       /* MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC */
+                                                       276 0x1d5       /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
+                                                       409 0x1d5       /* MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 */
+                                                       402 0x1d5       /* MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 */
+                                                       395 0x1d5       /* MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 */
+                                                       388 0x1d5       /* MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 */
+                                                       381 0x1d5       /* MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 */
+                                                       374 0x1d5       /* MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 */
+                                                       367 0x1d5       /* MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 */
+                                                       360 0x1d5       /* MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 */
+                                                       352 0x1d5       /* MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 */
+                                                       344 0x1d5       /* MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 */
+                                                       336 0x1d5       /* MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 */
+                                                       328 0x1d5       /* MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 */
+                                                       320 0x1d5       /* MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 */
+                                                       312 0x1d5       /* MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 */
+                                                       304 0x1d5       /* MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 */
+                                                       296 0x1d5       /* MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 */
+                                                       276 0x1d5       /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
+                                               >;
+                                       };
+                               };
+
+                               cspi {
+                                       pinctrl_cspi_1: cspigrp-1 {
+                                               fsl,pins = <
+                                                       998  0x1d5      /* MX53_PAD_SD1_DATA0__CSPI_MISO */
+                                                       1008 0x1d5      /* MX53_PAD_SD1_CMD__CSPI_MOSI */
+                                                       1022 0x1d5      /* MX53_PAD_SD1_CLK__CSPI_SCLK */
+                                               >;
+                                       };
+                               };
+
                                ecspi1 {
                                        pinctrl_ecspi1_1: ecspi1grp-1 {
                                                fsl,pins = <
                                                        853 0x80000000  /* MX53_PAD_PATA_DIOR__CAN1_RXCAN */
                                                >;
                                        };
+
+                                       pinctrl_can1_2: can1grp-2 {
+                                               fsl,pins = <
+                                                       37  0x80000000  /* MX53_PAD_KEY_COL2__CAN1_TXCAN */
+                                                       44  0x80000000  /* MX53_PAD_KEY_ROW2__CAN1_RXCAN */
+                                               >;
+                                       };
                                };
 
                                can2 {
                                        };
                                };
 
+                               owire {
+                                       pinctrl_owire_1: owiregrp-1 {
+                                               fsl,pins = <
+                                                               1166 0x80000000 /* MX53_PAD_GPIO_18__OWIRE_LINE */
+                                               >;
+                                       };
+                               };
+
                                uart1 {
                                        pinctrl_uart1_1: uart1grp-1 {
                                                fsl,pins = <
                                                        880 0x1c5       /* MX53_PAD_PATA_DA_2__UART3_RTS */
                                                >;
                                        };
+
+                                       pinctrl_uart3_2: uart3grp-2 {
+                                               fsl,pins = <
+                                                       884 0x1c5       /* MX53_PAD_PATA_CS_0__UART3_TXD_MUX */
+                                                       888 0x1c5       /* MX53_PAD_PATA_CS_1__UART3_RXD_MUX */
+                                               >;
+                                       };
+
                                };
 
                                uart4 {
                                status = "disabled";
                        };
 
+                       owire: owire@63fa4000 {
+                               compatible = "fsl,imx53-owire", "fsl,imx21-owire";
+                               reg = <0x63fa4000 0x4000>;
+                               clocks = <&clks 159>;
+                               status = "disabled";
+                       };
+
                        ecspi2: ecspi@63fac000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
new file mode 100644 (file)
index 0000000..63fafe2
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/include/ "imx6qdl.dtsi"
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a9";
+                       reg = <0>;
+                       next-level-cache = <&L2>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a9";
+                       reg = <1>;
+                       next-level-cache = <&L2>;
+               };
+       };
+
+       soc {
+               aips1: aips-bus@02000000 {
+                       pxp: pxp@020f0000 {
+                               reg = <0x020f0000 0x4000>;
+                               interrupts = <0 98 0x04>;
+                       };
+
+                       epdc: epdc@020f4000 {
+                               reg = <0x020f4000 0x4000>;
+                               interrupts = <0 97 0x04>;
+                       };
+
+                       lcdif: lcdif@020f8000 {
+                               reg = <0x020f8000 0x4000>;
+                               interrupts = <0 39 0x04>;
+                       };
+               };
+
+               aips2: aips-bus@02100000 {
+                       i2c4: i2c@021f8000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx1-i2c";
+                               reg = <0x021f8000 0x4000>;
+                               interrupts = <0 35 0x04>;
+                               status = "disabled";
+                       };
+               };
+       };
+};
index 5bfa02a..53eb241 100644 (file)
                reg = <0x10000000 0x80000000>;
        };
 
-       soc {
-               gpmi-nand@00112000 {
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_gpmi_nand_1>;
-                       status = "disabled"; /* gpmi nand conflicts with SD */
-               };
-
-               aips-bus@02000000 { /* AIPS1 */
-                       iomuxc@020e0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       176  0x80000000 /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
-                                               >;
-                                       };
-                               };
-
-                               arm2 {
-                                       pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
-                                               fsl,pins = <
-                                                       1363 0x80000000 /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
-                                                       1369 0x80000000 /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
-                                               >;
-                                       };
-                               };
-                       };
-               };
-
-               aips-bus@02100000 { /* AIPS2 */
-                       ethernet@02188000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_enet_2>;
-                               phy-mode = "rgmii";
-                               status = "okay";
-                       };
-
-                       usdhc@02198000 { /* uSDHC3 */
-                               cd-gpios = <&gpio6 11 0>;
-                               wp-gpios = <&gpio6 14 0>;
-                               vmmc-supply = <&reg_3p3v>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc3_1
-                                            &pinctrl_usdhc3_arm2>;
-                               status = "okay";
-                       };
-
-                       usdhc@0219c000 { /* uSDHC4 */
-                               non-removable;
-                               vmmc-supply = <&reg_3p3v>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc4_1>;
-                               status = "okay";
-                       };
-
-                       uart4: serial@021f0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart4_1>;
-                               status = "okay";
-                       };
-               };
-       };
-
        regulators {
                compatible = "simple-bus";
 
                };
        };
 };
+
+&gpmi {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_gpmi_nand_1>;
+       status = "disabled"; /* gpmi nand conflicts with SD */
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               176  0x80000000 /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
+                       >;
+               };
+       };
+
+       arm2 {
+               pinctrl_usdhc3_arm2: usdhc3grp-arm2 {
+                       fsl,pins = <
+                               1363 0x80000000 /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
+                               1369 0x80000000 /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
+                       >;
+               };
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet_2>;
+       phy-mode = "rgmii";
+       status = "okay";
+};
+
+&usdhc3 {
+       cd-gpios = <&gpio6 11 0>;
+       wp-gpios = <&gpio6 14 0>;
+       vmmc-supply = <&reg_3p3v>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc3_1
+                    &pinctrl_usdhc3_arm2>;
+       status = "okay";
+};
+
+&usdhc4 {
+       non-removable;
+       vmmc-supply = <&reg_3p3v>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc4_1>;
+       status = "okay";
+};
+
+&uart4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart4_1>;
+       status = "okay";
+};
index 826e4ad..656d489 100644 (file)
        memory {
                reg = <0x10000000 0x80000000>;
        };
+};
 
-       soc {
-               aips-bus@02000000 { /* AIPS1 */
-                       iomuxc@020e0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
 
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       1376 0x80000000 /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
-                                                       13   0x80000000 /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
-                                               >;
-                                       };
-                               };
-                       };
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               1376 0x80000000 /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
+                               13   0x80000000 /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
+                       >;
                };
+       };
+};
 
-               aips-bus@02100000 { /* AIPS2 */
-                       uart4: serial@021f0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart4_1>;
-                               status = "okay";
-                       };
+&uart4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart4_1>;
+       status = "okay";
+};
 
-                       ethernet@02188000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_enet_2>;
-                               phy-mode = "rgmii";
-                               status = "okay";
-                       };
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet_2>;
+       phy-mode = "rgmii";
+       status = "okay";
+};
 
-                       usdhc@02198000 { /* uSDHC3 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc3_1>;
-                               cd-gpios = <&gpio6 15 0>;
-                               wp-gpios = <&gpio1 13 0>;
-                               status = "okay";
-                       };
-               };
-       };
+&usdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc3_1>;
+       cd-gpios = <&gpio6 15 0>;
+       wp-gpios = <&gpio1 13 0>;
+       status = "okay";
 };
index d152328..2ce355c 100644 (file)
                reg = <0x10000000 0x40000000>;
        };
 
-       soc {
-               aips-bus@02000000 { /* AIPS1 */
-                       spba-bus@02000000 {
-                               ecspi@02008000 { /* eCSPI1 */
-                                       fsl,spi-num-chipselects = <1>;
-                                       cs-gpios = <&gpio3 19 0>;
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_ecspi1_1>;
-                                       status = "okay";
-
-                                       flash: m25p80@0 {
-                                               compatible = "sst,sst25vf016b";
-                                               spi-max-frequency = <20000000>;
-                                               reg = <0>;
-                                       };
-                               };
-
-                               ssi1: ssi@02028000 {
-                                       fsl,mode = "i2s-slave";
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@020e0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       1450 0x80000000 /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
-                                                       1458 0x80000000 /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
-                                                       121  0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
-                                                       144  0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
-                                                       152  0x80000000 /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
-                                                       1262 0x80000000 /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
-                                                       1270 0x1f0b0    /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
-                                                       953  0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
-                                               >;
-                                       };
-                               };
-                       };
-               };
-
-               aips-bus@02100000 { /* AIPS2 */
-                       usb@02184000 { /* USB OTG */
-                               vbus-supply = <&reg_usb_otg_vbus>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usbotg_1>;
-                               disable-over-current;
-                               status = "okay";
-                       };
-
-                       usb@02184200 { /* USB1 */
-                               status = "okay";
-                       };
-
-                       ethernet@02188000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_enet_1>;
-                               phy-mode = "rgmii";
-                               phy-reset-gpios = <&gpio3 23 0>;
-                               status = "okay";
-                       };
-
-                       usdhc@02198000 { /* uSDHC3 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc3_2>;
-                               cd-gpios = <&gpio7 0 0>;
-                               wp-gpios = <&gpio7 1 0>;
-                               vmmc-supply = <&reg_3p3v>;
-                               status = "okay";
-                       };
-
-                       usdhc@0219c000 { /* uSDHC4 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc4_2>;
-                               cd-gpios = <&gpio2 6 0>;
-                               wp-gpios = <&gpio2 7 0>;
-                               vmmc-supply = <&reg_3p3v>;
-                               status = "okay";
-                       };
-
-                       audmux@021d8000 {
-                               status = "okay";
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_audmux_1>;
-                       };
-
-                       uart2: serial@021e8000 {
-                               status = "okay";
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_uart2_1>;
-                       };
-
-                       i2c@021a0000 { /* I2C1 */
-                               status = "okay";
-                               clock-frequency = <100000>;
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_i2c1_1>;
-
-                               codec: sgtl5000@0a {
-                                       compatible = "fsl,sgtl5000";
-                                       reg = <0x0a>;
-                                       clocks = <&clks 169>;
-                                       VDDA-supply = <&reg_2p5v>;
-                                       VDDIO-supply = <&reg_3p3v>;
-                               };
-                       };
-               };
-       };
-
        regulators {
                compatible = "simple-bus";
 
                mux-ext-port = <4>;
        };
 };
+
+&ecspi1 {
+       fsl,spi-num-chipselects = <1>;
+       cs-gpios = <&gpio3 19 0>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ecspi1_1>;
+       status = "okay";
+
+       flash: m25p80@0 {
+               compatible = "sst,sst25vf016b";
+               spi-max-frequency = <20000000>;
+               reg = <0>;
+       };
+};
+
+&ssi1 {
+       fsl,mode = "i2s-slave";
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               1450 0x80000000 /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
+                               1458 0x80000000 /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
+                               121  0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+                               144  0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
+                               152  0x80000000 /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
+                               1262 0x80000000 /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
+                               1270 0x1f0b0    /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
+                               953  0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
+                       >;
+               };
+       };
+};
+
+&usbotg {
+       vbus-supply = <&reg_usb_otg_vbus>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg_1>;
+       disable-over-current;
+       status = "okay";
+};
+
+&usbh1 {
+       status = "okay";
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet_1>;
+       phy-mode = "rgmii";
+       phy-reset-gpios = <&gpio3 23 0>;
+       status = "okay";
+};
+
+&usdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc3_2>;
+       cd-gpios = <&gpio7 0 0>;
+       wp-gpios = <&gpio7 1 0>;
+       vmmc-supply = <&reg_3p3v>;
+       status = "okay";
+};
+
+&usdhc4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc4_2>;
+       cd-gpios = <&gpio2 6 0>;
+       wp-gpios = <&gpio2 7 0>;
+       vmmc-supply = <&reg_3p3v>;
+       status = "okay";
+};
+
+&audmux {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_audmux_1>;
+};
+
+&uart2 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart2_1>;
+};
+
+&i2c1 {
+       status = "okay";
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1_1>;
+
+       codec: sgtl5000@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+               clocks = <&clks 169>;
+               VDDA-supply = <&reg_2p5v>;
+               VDDIO-supply = <&reg_3p3v>;
+       };
+};
index a424025..2dea304 100644 (file)
                reg = <0x10000000 0x40000000>;
        };
 
-       soc {
-               aips-bus@02000000 { /* AIPS1 */
-                       spba-bus@02000000 {
-                               uart1: serial@02020000 {
-                                       pinctrl-names = "default";
-                                       pinctrl-0 = <&pinctrl_uart1_1>;
-                                       status = "okay";
-                               };
-                       };
-
-                       iomuxc@020e0000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_hog>;
-
-                               hog {
-                                       pinctrl_hog: hoggrp {
-                                               fsl,pins = <
-                                                       1004 0x80000000 /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
-                                                       1012 0x80000000 /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
-                                                       1402 0x80000000 /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
-                                                       1410 0x80000000 /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
-                                                       1418 0x80000000 /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
-                                                       1426 0x80000000 /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
-                                               >;
-                                       };
-                               };
-                       };
-               };
-
-               aips-bus@02100000 { /* AIPS2 */
-                       ethernet@02188000 {
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_enet_1>;
-                               phy-mode = "rgmii";
-                               status = "okay";
-                       };
-
-                       usdhc@02194000 { /* uSDHC2 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc2_1>;
-                               cd-gpios = <&gpio2 2 0>;
-                               wp-gpios = <&gpio2 3 0>;
-                               status = "okay";
-                       };
-
-                       usdhc@02198000 { /* uSDHC3 */
-                               pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_usdhc3_1>;
-                               cd-gpios = <&gpio2 0 0>;
-                               wp-gpios = <&gpio2 1 0>;
-                               status = "okay";
-                       };
-               };
-       };
-
        gpio-keys {
                compatible = "gpio-keys";
 
                };
        };
 };
+
+&uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1_1>;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hog>;
+
+       hog {
+               pinctrl_hog: hoggrp {
+                       fsl,pins = <
+                               1004 0x80000000 /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
+                               1012 0x80000000 /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
+                               1402 0x80000000 /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
+                               1410 0x80000000 /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
+                               1418 0x80000000 /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
+                               1426 0x80000000 /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
+                       >;
+               };
+       };
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet_1>;
+       phy-mode = "rgmii";
+       status = "okay";
+};
+
+&usdhc2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc2_1>;
+       cd-gpios = <&gpio2 2 0>;
+       wp-gpios = <&gpio2 3 0>;
+       status = "okay";
+};
+
+&usdhc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc3_1>;
+       cd-gpios = <&gpio2 0 0>;
+       wp-gpios = <&gpio2 1 0>;
+       status = "okay";
+};
index ff1205e..cba021e 100644 (file)
@@ -1,33 +1,16 @@
+
 /*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
+ * Copyright 2013 Freescale Semiconductor, Inc.
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
  */
 
-/include/ "skeleton.dtsi"
+/include/ "imx6qdl.dtsi"
 
 / {
-       aliases {
-               serial0 = &uart1;
-               serial1 = &uart2;
-               serial2 = &uart3;
-               serial3 = &uart4;
-               serial4 = &uart5;
-               gpio0 = &gpio1;
-               gpio1 = &gpio2;
-               gpio2 = &gpio3;
-               gpio3 = &gpio4;
-               gpio4 = &gpio5;
-               gpio5 = &gpio6;
-               gpio6 = &gpio7;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        next-level-cache = <&L2>;
                        operating-points = <
                                /* kHz    uV */
-                               792000  1100000
+                               1200000 1275000
+                               996000  1250000
+                               792000  1150000
                                396000  950000
-                               198000  850000
                        >;
                        clock-latency = <61036>; /* two CLK32 periods */
-                       cpu0-supply = <&reg_cpu>;
+                       clocks = <&clks 104>, <&clks 6>, <&clks 16>,
+                                <&clks 17>, <&clks 170>;
+                       clock-names = "arm", "pll2_pfd2_396m", "step",
+                                     "pll1_sw", "pll1_sys";
+                       arm-supply = <&reg_arm>;
+                       pu-supply = <&reg_pu>;
+                       soc-supply = <&reg_soc>;
                };
 
                cpu@1 {
                };
        };
 
-       intc: interrupt-controller@00a01000 {
-               compatible = "arm,cortex-a9-gic";
-               #interrupt-cells = <3>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               interrupt-controller;
-               reg = <0x00a01000 0x1000>,
-                     <0x00a00100 0x100>;
-       };
-
-       clocks {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               ckil {
-                       compatible = "fsl,imx-ckil", "fixed-clock";
-                       clock-frequency = <32768>;
-               };
-
-               ckih1 {
-                       compatible = "fsl,imx-ckih1", "fixed-clock";
-                       clock-frequency = <0>;
-               };
-
-               osc {
-                       compatible = "fsl,imx-osc", "fixed-clock";
-                       clock-frequency = <24000000>;
-               };
-       };
-
        soc {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "simple-bus";
-               interrupt-parent = <&intc>;
-               ranges;
-
-               dma-apbh@00110000 {
-                       compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
-                       reg = <0x00110000 0x2000>;
-                       clocks = <&clks 106>;
-               };
-
-               nfc: gpmi-nand@00112000 {
-                       compatible = "fsl,imx6q-gpmi-nand";
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
-                       reg-names = "gpmi-nand", "bch";
-                       interrupts = <0 13 0x04>, <0 15 0x04>;
-                       interrupt-names = "gpmi-dma", "bch";
-                       clocks = <&clks 152>, <&clks 153>, <&clks 151>,
-                                <&clks 150>, <&clks 149>;
-                       clock-names = "gpmi_io", "gpmi_apb", "gpmi_bch",
-                                     "gpmi_bch_apb", "per1_bch";
-                       fsl,gpmi-dma-channel = <0>;
-                       status = "disabled";
-               };
-
-               timer@00a00600 {
-                       compatible = "arm,cortex-a9-twd-timer";
-                       reg = <0x00a00600 0x20>;
-                       interrupts = <1 13 0xf01>;
-               };
-
-               L2: l2-cache@00a02000 {
-                       compatible = "arm,pl310-cache";
-                       reg = <0x00a02000 0x1000>;
-                       interrupts = <0 92 0x04>;
-                       cache-unified;
-                       cache-level = <2>;
-               };
-
                aips-bus@02000000 { /* AIPS1 */
-                       compatible = "fsl,aips-bus", "simple-bus";
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       reg = <0x02000000 0x100000>;
-                       ranges;
-
                        spba-bus@02000000 {
-                               compatible = "fsl,spba-bus", "simple-bus";
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               reg = <0x02000000 0x40000>;
-                               ranges;
-
-                               spdif: spdif@02004000 {
-                                       reg = <0x02004000 0x4000>;
-                                       interrupts = <0 52 0x04>;
-                               };
-
-                               ecspi1: ecspi@02008000 {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
-                                       reg = <0x02008000 0x4000>;
-                                       interrupts = <0 31 0x04>;
-                                       clocks = <&clks 112>, <&clks 112>;
-                                       clock-names = "ipg", "per";
-                                       status = "disabled";
-                               };
-
-                               ecspi2: ecspi@0200c000 {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
-                                       reg = <0x0200c000 0x4000>;
-                                       interrupts = <0 32 0x04>;
-                                       clocks = <&clks 113>, <&clks 113>;
-                                       clock-names = "ipg", "per";
-                                       status = "disabled";
-                               };
-
-                               ecspi3: ecspi@02010000 {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
-                                       reg = <0x02010000 0x4000>;
-                                       interrupts = <0 33 0x04>;
-                                       clocks = <&clks 114>, <&clks 114>;
-                                       clock-names = "ipg", "per";
-                                       status = "disabled";
-                               };
-
-                               ecspi4: ecspi@02014000 {
-                                       #address-cells = <1>;
-                                       #size-cells = <0>;
-                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
-                                       reg = <0x02014000 0x4000>;
-                                       interrupts = <0 34 0x04>;
-                                       clocks = <&clks 115>, <&clks 115>;
-                                       clock-names = "ipg", "per";
-                                       status = "disabled";
-                               };
-
                                ecspi5: ecspi@02018000 {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        clock-names = "ipg", "per";
                                        status = "disabled";
                                };
-
-                               uart1: serial@02020000 {
-                                       compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
-                                       reg = <0x02020000 0x4000>;
-                                       interrupts = <0 26 0x04>;
-                                       clocks = <&clks 160>, <&clks 161>;
-                                       clock-names = "ipg", "per";
-                                       status = "disabled";
-                               };
-
-                               esai: esai@02024000 {
-                                       reg = <0x02024000 0x4000>;
-                                       interrupts = <0 51 0x04>;
-                               };
-
-                               ssi1: ssi@02028000 {
-                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
-                                       reg = <0x02028000 0x4000>;
-                                       interrupts = <0 46 0x04>;
-                                       clocks = <&clks 178>;
-                                       fsl,fifo-depth = <15>;
-                                       fsl,ssi-dma-events = <38 37>;
-                                       status = "disabled";
-                               };
-
-                               ssi2: ssi@0202c000 {
-                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
-                                       reg = <0x0202c000 0x4000>;
-                                       interrupts = <0 47 0x04>;
-                                       clocks = <&clks 179>;
-                                       fsl,fifo-depth = <15>;
-                                       fsl,ssi-dma-events = <42 41>;
-                                       status = "disabled";
-                               };
-
-                               ssi3: ssi@02030000 {
-                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
-                                       reg = <0x02030000 0x4000>;
-                                       interrupts = <0 48 0x04>;
-                                       clocks = <&clks 180>;
-                                       fsl,fifo-depth = <15>;
-                                       fsl,ssi-dma-events = <46 45>;
-                                       status = "disabled";
-                               };
-
-                               asrc: asrc@02034000 {
-                                       reg = <0x02034000 0x4000>;
-                                       interrupts = <0 50 0x04>;
-                               };
-
-                               spba@0203c000 {
-                                       reg = <0x0203c000 0x4000>;
-                               };
-                       };
-
-                       vpu: vpu@02040000 {
-                               reg = <0x02040000 0x3c000>;
-                               interrupts = <0 3 0x04 0 12 0x04>;
-                       };
-
-                       aipstz@0207c000 { /* AIPSTZ1 */
-                               reg = <0x0207c000 0x4000>;
-                       };
-
-                       pwm1: pwm@02080000 {
-                               #pwm-cells = <2>;
-                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
-                               reg = <0x02080000 0x4000>;
-                               interrupts = <0 83 0x04>;
-                               clocks = <&clks 62>, <&clks 145>;
-                               clock-names = "ipg", "per";
-                       };
-
-                       pwm2: pwm@02084000 {
-                               #pwm-cells = <2>;
-                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
-                               reg = <0x02084000 0x4000>;
-                               interrupts = <0 84 0x04>;
-                               clocks = <&clks 62>, <&clks 146>;
-                               clock-names = "ipg", "per";
-                       };
-
-                       pwm3: pwm@02088000 {
-                               #pwm-cells = <2>;
-                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
-                               reg = <0x02088000 0x4000>;
-                               interrupts = <0 85 0x04>;
-                               clocks = <&clks 62>, <&clks 147>;
-                               clock-names = "ipg", "per";
-                       };
-
-                       pwm4: pwm@0208c000 {
-                               #pwm-cells = <2>;
-                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
-                               reg = <0x0208c000 0x4000>;
-                               interrupts = <0 86 0x04>;
-                               clocks = <&clks 62>, <&clks 148>;
-                               clock-names = "ipg", "per";
-                       };
-
-                       can1: flexcan@02090000 {
-                               reg = <0x02090000 0x4000>;
-                               interrupts = <0 110 0x04>;
-                       };
-
-                       can2: flexcan@02094000 {
-                               reg = <0x02094000 0x4000>;
-                               interrupts = <0 111 0x04>;
-                       };
-
-                       gpt: gpt@02098000 {
-                               compatible = "fsl,imx6q-gpt";
-                               reg = <0x02098000 0x4000>;
-                               interrupts = <0 55 0x04>;
-                       };
-
-                       gpio1: gpio@0209c000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x0209c000 0x4000>;
-                               interrupts = <0 66 0x04 0 67 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio2: gpio@020a0000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020a0000 0x4000>;
-                               interrupts = <0 68 0x04 0 69 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio3: gpio@020a4000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020a4000 0x4000>;
-                               interrupts = <0 70 0x04 0 71 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio4: gpio@020a8000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020a8000 0x4000>;
-                               interrupts = <0 72 0x04 0 73 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio5: gpio@020ac000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020ac000 0x4000>;
-                               interrupts = <0 74 0x04 0 75 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio6: gpio@020b0000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020b0000 0x4000>;
-                               interrupts = <0 76 0x04 0 77 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       gpio7: gpio@020b4000 {
-                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
-                               reg = <0x020b4000 0x4000>;
-                               interrupts = <0 78 0x04 0 79 0x04>;
-                               gpio-controller;
-                               #gpio-cells = <2>;
-                               interrupt-controller;
-                               #interrupt-cells = <2>;
-                       };
-
-                       kpp: kpp@020b8000 {
-                               reg = <0x020b8000 0x4000>;
-                               interrupts = <0 82 0x04>;
-                       };
-
-                       wdog1: wdog@020bc000 {
-                               compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
-                               reg = <0x020bc000 0x4000>;
-                               interrupts = <0 80 0x04>;
-                               clocks = <&clks 0>;
-                       };
-
-                       wdog2: wdog@020c0000 {
-                               compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
-                               reg = <0x020c0000 0x4000>;
-                               interrupts = <0 81 0x04>;
-                               clocks = <&clks 0>;
-                               status = "disabled";
-                       };
-
-                       clks: ccm@020c4000 {
-                               compatible = "fsl,imx6q-ccm";
-                               reg = <0x020c4000 0x4000>;
-                               interrupts = <0 87 0x04 0 88 0x04>;
-                               #clock-cells = <1>;
-                       };
-
-                       anatop: anatop@020c8000 {
-                               compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
-                               reg = <0x020c8000 0x1000>;
-                               interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
-
-                               regulator-1p1@110 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "vdd1p1";
-                                       regulator-min-microvolt = <800000>;
-                                       regulator-max-microvolt = <1375000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x110>;
-                                       anatop-vol-bit-shift = <8>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <4>;
-                                       anatop-min-voltage = <800000>;
-                                       anatop-max-voltage = <1375000>;
-                               };
-
-                               regulator-3p0@120 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "vdd3p0";
-                                       regulator-min-microvolt = <2800000>;
-                                       regulator-max-microvolt = <3150000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x120>;
-                                       anatop-vol-bit-shift = <8>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <0>;
-                                       anatop-min-voltage = <2625000>;
-                                       anatop-max-voltage = <3400000>;
-                               };
-
-                               regulator-2p5@130 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "vdd2p5";
-                                       regulator-min-microvolt = <2000000>;
-                                       regulator-max-microvolt = <2750000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x130>;
-                                       anatop-vol-bit-shift = <8>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <0>;
-                                       anatop-min-voltage = <2000000>;
-                                       anatop-max-voltage = <2750000>;
-                               };
-
-                               reg_cpu: regulator-vddcore@140 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "cpu";
-                                       regulator-min-microvolt = <725000>;
-                                       regulator-max-microvolt = <1450000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x140>;
-                                       anatop-vol-bit-shift = <0>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <1>;
-                                       anatop-min-voltage = <725000>;
-                                       anatop-max-voltage = <1450000>;
-                               };
-
-                               regulator-vddpu@140 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "vddpu";
-                                       regulator-min-microvolt = <725000>;
-                                       regulator-max-microvolt = <1450000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x140>;
-                                       anatop-vol-bit-shift = <9>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <1>;
-                                       anatop-min-voltage = <725000>;
-                                       anatop-max-voltage = <1450000>;
-                               };
-
-                               regulator-vddsoc@140 {
-                                       compatible = "fsl,anatop-regulator";
-                                       regulator-name = "vddsoc";
-                                       regulator-min-microvolt = <725000>;
-                                       regulator-max-microvolt = <1450000>;
-                                       regulator-always-on;
-                                       anatop-reg-offset = <0x140>;
-                                       anatop-vol-bit-shift = <18>;
-                                       anatop-vol-bit-width = <5>;
-                                       anatop-min-bit-val = <1>;
-                                       anatop-min-voltage = <725000>;
-                                       anatop-max-voltage = <1450000>;
-                               };
-                       };
-
-                       usbphy1: usbphy@020c9000 {
-                               compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
-                               reg = <0x020c9000 0x1000>;
-                               interrupts = <0 44 0x04>;
-                               clocks = <&clks 182>;
-                       };
-
-                       usbphy2: usbphy@020ca000 {
-                               compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
-                               reg = <0x020ca000 0x1000>;
-                               interrupts = <0 45 0x04>;
-                               clocks = <&clks 183>;
-                       };
-
-                       snvs@020cc000 {
-                               compatible = "fsl,sec-v4.0-mon", "simple-bus";
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               ranges = <0 0x020cc000 0x4000>;
-
-                               snvs-rtc-lp@34 {
-                                       compatible = "fsl,sec-v4.0-mon-rtc-lp";
-                                       reg = <0x34 0x58>;
-                                       interrupts = <0 19 0x04 0 20 0x04>;
-                               };
-                       };
-
-                       epit1: epit@020d0000 { /* EPIT1 */
-                               reg = <0x020d0000 0x4000>;
-                               interrupts = <0 56 0x04>;
-                       };
-
-                       epit2: epit@020d4000 { /* EPIT2 */
-                               reg = <0x020d4000 0x4000>;
-                               interrupts = <0 57 0x04>;
-                       };
-
-                       src: src@020d8000 {
-                               compatible = "fsl,imx6q-src";
-                               reg = <0x020d8000 0x4000>;
-                               interrupts = <0 91 0x04 0 96 0x04>;
-                       };
-
-                       gpc: gpc@020dc000 {
-                               compatible = "fsl,imx6q-gpc";
-                               reg = <0x020dc000 0x4000>;
-                               interrupts = <0 89 0x04 0 90 0x04>;
-                       };
-
-                       gpr: iomuxc-gpr@020e0000 {
-                               compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
-                               reg = <0x020e0000 0x38>;
                        };
 
                        iomuxc: iomuxc@020e0000 {
                                        };
                                };
                        };
-
-                       dcic1: dcic@020e4000 {
-                               reg = <0x020e4000 0x4000>;
-                               interrupts = <0 124 0x04>;
-                       };
-
-                       dcic2: dcic@020e8000 {
-                               reg = <0x020e8000 0x4000>;
-                               interrupts = <0 125 0x04>;
-                       };
-
-                       sdma: sdma@020ec000 {
-                               compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
-                               reg = <0x020ec000 0x4000>;
-                               interrupts = <0 2 0x04>;
-                               clocks = <&clks 155>, <&clks 155>;
-                               clock-names = "ipg", "ahb";
-                               fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q-to1.bin";
-                       };
-               };
-
-               aips-bus@02100000 { /* AIPS2 */
-                       compatible = "fsl,aips-bus", "simple-bus";
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       reg = <0x02100000 0x100000>;
-                       ranges;
-
-                       caam@02100000 {
-                               reg = <0x02100000 0x40000>;
-                               interrupts = <0 105 0x04 0 106 0x04>;
-                       };
-
-                       aipstz@0217c000 { /* AIPSTZ2 */
-                               reg = <0x0217c000 0x4000>;
-                       };
-
-                       usbotg: usb@02184000 {
-                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
-                               reg = <0x02184000 0x200>;
-                               interrupts = <0 43 0x04>;
-                               clocks = <&clks 162>;
-                               fsl,usbphy = <&usbphy1>;
-                               fsl,usbmisc = <&usbmisc 0>;
-                               status = "disabled";
-                       };
-
-                       usbh1: usb@02184200 {
-                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
-                               reg = <0x02184200 0x200>;
-                               interrupts = <0 40 0x04>;
-                               clocks = <&clks 162>;
-                               fsl,usbphy = <&usbphy2>;
-                               fsl,usbmisc = <&usbmisc 1>;
-                               status = "disabled";
-                       };
-
-                       usbh2: usb@02184400 {
-                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
-                               reg = <0x02184400 0x200>;
-                               interrupts = <0 41 0x04>;
-                               clocks = <&clks 162>;
-                               fsl,usbmisc = <&usbmisc 2>;
-                               status = "disabled";
-                       };
-
-                       usbh3: usb@02184600 {
-                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
-                               reg = <0x02184600 0x200>;
-                               interrupts = <0 42 0x04>;
-                               clocks = <&clks 162>;
-                               fsl,usbmisc = <&usbmisc 3>;
-                               status = "disabled";
-                       };
-
-                       usbmisc: usbmisc: usbmisc@02184800 {
-                               #index-cells = <1>;
-                               compatible = "fsl,imx6q-usbmisc";
-                               reg = <0x02184800 0x200>;
-                               clocks = <&clks 162>;
-                       };
-
-                       fec: ethernet@02188000 {
-                               compatible = "fsl,imx6q-fec";
-                               reg = <0x02188000 0x4000>;
-                               interrupts = <0 118 0x04 0 119 0x04>;
-                               clocks = <&clks 117>, <&clks 117>, <&clks 190>;
-                               clock-names = "ipg", "ahb", "ptp";
-                               status = "disabled";
-                       };
-
-                       mlb@0218c000 {
-                               reg = <0x0218c000 0x4000>;
-                               interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
-                       };
-
-                       usdhc1: usdhc@02190000 {
-                               compatible = "fsl,imx6q-usdhc";
-                               reg = <0x02190000 0x4000>;
-                               interrupts = <0 22 0x04>;
-                               clocks = <&clks 163>, <&clks 163>, <&clks 163>;
-                               clock-names = "ipg", "ahb", "per";
-                               bus-width = <4>;
-                               status = "disabled";
-                       };
-
-                       usdhc2: usdhc@02194000 {
-                               compatible = "fsl,imx6q-usdhc";
-                               reg = <0x02194000 0x4000>;
-                               interrupts = <0 23 0x04>;
-                               clocks = <&clks 164>, <&clks 164>, <&clks 164>;
-                               clock-names = "ipg", "ahb", "per";
-                               bus-width = <4>;
-                               status = "disabled";
-                       };
-
-                       usdhc3: usdhc@02198000 {
-                               compatible = "fsl,imx6q-usdhc";
-                               reg = <0x02198000 0x4000>;
-                               interrupts = <0 24 0x04>;
-                               clocks = <&clks 165>, <&clks 165>, <&clks 165>;
-                               clock-names = "ipg", "ahb", "per";
-                               bus-width = <4>;
-                               status = "disabled";
-                       };
-
-                       usdhc4: usdhc@0219c000 {
-                               compatible = "fsl,imx6q-usdhc";
-                               reg = <0x0219c000 0x4000>;
-                               interrupts = <0 25 0x04>;
-                               clocks = <&clks 166>, <&clks 166>, <&clks 166>;
-                               clock-names = "ipg", "ahb", "per";
-                               bus-width = <4>;
-                               status = "disabled";
-                       };
-
-                       i2c1: i2c@021a0000 {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
-                               reg = <0x021a0000 0x4000>;
-                               interrupts = <0 36 0x04>;
-                               clocks = <&clks 125>;
-                               status = "disabled";
-                       };
-
-                       i2c2: i2c@021a4000 {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
-                               reg = <0x021a4000 0x4000>;
-                               interrupts = <0 37 0x04>;
-                               clocks = <&clks 126>;
-                               status = "disabled";
-                       };
-
-                       i2c3: i2c@021a8000 {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
-                               reg = <0x021a8000 0x4000>;
-                               interrupts = <0 38 0x04>;
-                               clocks = <&clks 127>;
-                               status = "disabled";
-                       };
-
-                       romcp@021ac000 {
-                               reg = <0x021ac000 0x4000>;
-                       };
-
-                       mmdc0: mmdc@021b0000 { /* MMDC0 */
-                               compatible = "fsl,imx6q-mmdc";
-                               reg = <0x021b0000 0x4000>;
-                       };
-
-                       mmdc1: mmdc@021b4000 { /* MMDC1 */
-                               reg = <0x021b4000 0x4000>;
-                       };
-
-                       weim@021b8000 {
-                               reg = <0x021b8000 0x4000>;
-                               interrupts = <0 14 0x04>;
-                       };
-
-                       ocotp@021bc000 {
-                               reg = <0x021bc000 0x4000>;
-                       };
-
-                       ocotp@021c0000 {
-                               reg = <0x021c0000 0x4000>;
-                               interrupts = <0 21 0x04>;
-                       };
-
-                       tzasc@021d0000 { /* TZASC1 */
-                               reg = <0x021d0000 0x4000>;
-                               interrupts = <0 108 0x04>;
-                       };
-
-                       tzasc@021d4000 { /* TZASC2 */
-                               reg = <0x021d4000 0x4000>;
-                               interrupts = <0 109 0x04>;
-                       };
-
-                       audmux: audmux@021d8000 {
-                               compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
-                               reg = <0x021d8000 0x4000>;
-                               status = "disabled";
-                       };
-
-                       mipi@021dc000 { /* MIPI-CSI */
-                               reg = <0x021dc000 0x4000>;
-                       };
-
-                       mipi@021e0000 { /* MIPI-DSI */
-                               reg = <0x021e0000 0x4000>;
-                       };
-
-                       vdoa@021e4000 {
-                               reg = <0x021e4000 0x4000>;
-                               interrupts = <0 18 0x04>;
-                       };
-
-                       uart2: serial@021e8000 {
-                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
-                               reg = <0x021e8000 0x4000>;
-                               interrupts = <0 27 0x04>;
-                               clocks = <&clks 160>, <&clks 161>;
-                               clock-names = "ipg", "per";
-                               status = "disabled";
-                       };
-
-                       uart3: serial@021ec000 {
-                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
-                               reg = <0x021ec000 0x4000>;
-                               interrupts = <0 28 0x04>;
-                               clocks = <&clks 160>, <&clks 161>;
-                               clock-names = "ipg", "per";
-                               status = "disabled";
-                       };
-
-                       uart4: serial@021f0000 {
-                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
-                               reg = <0x021f0000 0x4000>;
-                               interrupts = <0 29 0x04>;
-                               clocks = <&clks 160>, <&clks 161>;
-                               clock-names = "ipg", "per";
-                               status = "disabled";
-                       };
-
-                       uart5: serial@021f4000 {
-                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
-                               reg = <0x021f4000 0x4000>;
-                               interrupts = <0 30 0x04>;
-                               clocks = <&clks 160>, <&clks 161>;
-                               clock-names = "ipg", "per";
-                               status = "disabled";
-                       };
-               };
-
-               ipu1: ipu@02400000 {
-                       #crtc-cells = <1>;
-                       compatible = "fsl,imx6q-ipu";
-                       reg = <0x02400000 0x400000>;
-                       interrupts = <0 6 0x4 0 5 0x4>;
-                       clocks = <&clks 130>, <&clks 131>, <&clks 132>;
-                       clock-names = "bus", "di0", "di1";
                };
 
                ipu2: ipu@02800000 {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
new file mode 100644 (file)
index 0000000..06ec460
--- /dev/null
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+       aliases {
+               serial0 = &uart1;
+               serial1 = &uart2;
+               serial2 = &uart3;
+               serial3 = &uart4;
+               serial4 = &uart5;
+               gpio0 = &gpio1;
+               gpio1 = &gpio2;
+               gpio2 = &gpio3;
+               gpio3 = &gpio4;
+               gpio4 = &gpio5;
+               gpio5 = &gpio6;
+               gpio6 = &gpio7;
+       };
+
+       intc: interrupt-controller@00a01000 {
+               compatible = "arm,cortex-a9-gic";
+               #interrupt-cells = <3>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               interrupt-controller;
+               reg = <0x00a01000 0x1000>,
+                     <0x00a00100 0x100>;
+       };
+
+       clocks {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ckil {
+                       compatible = "fsl,imx-ckil", "fixed-clock";
+                       clock-frequency = <32768>;
+               };
+
+               ckih1 {
+                       compatible = "fsl,imx-ckih1", "fixed-clock";
+                       clock-frequency = <0>;
+               };
+
+               osc {
+                       compatible = "fsl,imx-osc", "fixed-clock";
+                       clock-frequency = <24000000>;
+               };
+       };
+
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               interrupt-parent = <&intc>;
+               ranges;
+
+               dma-apbh@00110000 {
+                       compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+                       reg = <0x00110000 0x2000>;
+                       clocks = <&clks 106>;
+               };
+
+               gpmi: gpmi-nand@00112000 {
+                       compatible = "fsl,imx6q-gpmi-nand";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
+                       reg-names = "gpmi-nand", "bch";
+                       interrupts = <0 13 0x04>, <0 15 0x04>;
+                       interrupt-names = "gpmi-dma", "bch";
+                       clocks = <&clks 152>, <&clks 153>, <&clks 151>,
+                                <&clks 150>, <&clks 149>;
+                       clock-names = "gpmi_io", "gpmi_apb", "gpmi_bch",
+                                     "gpmi_bch_apb", "per1_bch";
+                       fsl,gpmi-dma-channel = <0>;
+                       status = "disabled";
+               };
+
+               timer@00a00600 {
+                       compatible = "arm,cortex-a9-twd-timer";
+                       reg = <0x00a00600 0x20>;
+                       interrupts = <1 13 0xf01>;
+               };
+
+               L2: l2-cache@00a02000 {
+                       compatible = "arm,pl310-cache";
+                       reg = <0x00a02000 0x1000>;
+                       interrupts = <0 92 0x04>;
+                       cache-unified;
+                       cache-level = <2>;
+               };
+
+               aips-bus@02000000 { /* AIPS1 */
+                       compatible = "fsl,aips-bus", "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x02000000 0x100000>;
+                       ranges;
+
+                       spba-bus@02000000 {
+                               compatible = "fsl,spba-bus", "simple-bus";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               reg = <0x02000000 0x40000>;
+                               ranges;
+
+                               spdif: spdif@02004000 {
+                                       reg = <0x02004000 0x4000>;
+                                       interrupts = <0 52 0x04>;
+                               };
+
+                               ecspi1: ecspi@02008000 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+                                       reg = <0x02008000 0x4000>;
+                                       interrupts = <0 31 0x04>;
+                                       clocks = <&clks 112>, <&clks 112>;
+                                       clock-names = "ipg", "per";
+                                       status = "disabled";
+                               };
+
+                               ecspi2: ecspi@0200c000 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+                                       reg = <0x0200c000 0x4000>;
+                                       interrupts = <0 32 0x04>;
+                                       clocks = <&clks 113>, <&clks 113>;
+                                       clock-names = "ipg", "per";
+                                       status = "disabled";
+                               };
+
+                               ecspi3: ecspi@02010000 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+                                       reg = <0x02010000 0x4000>;
+                                       interrupts = <0 33 0x04>;
+                                       clocks = <&clks 114>, <&clks 114>;
+                                       clock-names = "ipg", "per";
+                                       status = "disabled";
+                               };
+
+                               ecspi4: ecspi@02014000 {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "fsl,imx6q-ecspi", "fsl,imx51-ecspi";
+                                       reg = <0x02014000 0x4000>;
+                                       interrupts = <0 34 0x04>;
+                                       clocks = <&clks 115>, <&clks 115>;
+                                       clock-names = "ipg", "per";
+                                       status = "disabled";
+                               };
+
+                               uart1: serial@02020000 {
+                                       compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+                                       reg = <0x02020000 0x4000>;
+                                       interrupts = <0 26 0x04>;
+                                       clocks = <&clks 160>, <&clks 161>;
+                                       clock-names = "ipg", "per";
+                                       status = "disabled";
+                               };
+
+                               esai: esai@02024000 {
+                                       reg = <0x02024000 0x4000>;
+                                       interrupts = <0 51 0x04>;
+                               };
+
+                               ssi1: ssi@02028000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+                                       reg = <0x02028000 0x4000>;
+                                       interrupts = <0 46 0x04>;
+                                       clocks = <&clks 178>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <38 37>;
+                                       status = "disabled";
+                               };
+
+                               ssi2: ssi@0202c000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+                                       reg = <0x0202c000 0x4000>;
+                                       interrupts = <0 47 0x04>;
+                                       clocks = <&clks 179>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <42 41>;
+                                       status = "disabled";
+                               };
+
+                               ssi3: ssi@02030000 {
+                                       compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
+                                       reg = <0x02030000 0x4000>;
+                                       interrupts = <0 48 0x04>;
+                                       clocks = <&clks 180>;
+                                       fsl,fifo-depth = <15>;
+                                       fsl,ssi-dma-events = <46 45>;
+                                       status = "disabled";
+                               };
+
+                               asrc: asrc@02034000 {
+                                       reg = <0x02034000 0x4000>;
+                                       interrupts = <0 50 0x04>;
+                               };
+
+                               spba@0203c000 {
+                                       reg = <0x0203c000 0x4000>;
+                               };
+                       };
+
+                       vpu: vpu@02040000 {
+                               reg = <0x02040000 0x3c000>;
+                               interrupts = <0 3 0x04 0 12 0x04>;
+                       };
+
+                       aipstz@0207c000 { /* AIPSTZ1 */
+                               reg = <0x0207c000 0x4000>;
+                       };
+
+                       pwm1: pwm@02080000 {
+                               #pwm-cells = <2>;
+                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+                               reg = <0x02080000 0x4000>;
+                               interrupts = <0 83 0x04>;
+                               clocks = <&clks 62>, <&clks 145>;
+                               clock-names = "ipg", "per";
+                       };
+
+                       pwm2: pwm@02084000 {
+                               #pwm-cells = <2>;
+                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+                               reg = <0x02084000 0x4000>;
+                               interrupts = <0 84 0x04>;
+                               clocks = <&clks 62>, <&clks 146>;
+                               clock-names = "ipg", "per";
+                       };
+
+                       pwm3: pwm@02088000 {
+                               #pwm-cells = <2>;
+                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+                               reg = <0x02088000 0x4000>;
+                               interrupts = <0 85 0x04>;
+                               clocks = <&clks 62>, <&clks 147>;
+                               clock-names = "ipg", "per";
+                       };
+
+                       pwm4: pwm@0208c000 {
+                               #pwm-cells = <2>;
+                               compatible = "fsl,imx6q-pwm", "fsl,imx27-pwm";
+                               reg = <0x0208c000 0x4000>;
+                               interrupts = <0 86 0x04>;
+                               clocks = <&clks 62>, <&clks 148>;
+                               clock-names = "ipg", "per";
+                       };
+
+                       can1: flexcan@02090000 {
+                               reg = <0x02090000 0x4000>;
+                               interrupts = <0 110 0x04>;
+                       };
+
+                       can2: flexcan@02094000 {
+                               reg = <0x02094000 0x4000>;
+                               interrupts = <0 111 0x04>;
+                       };
+
+                       gpt: gpt@02098000 {
+                               compatible = "fsl,imx6q-gpt";
+                               reg = <0x02098000 0x4000>;
+                               interrupts = <0 55 0x04>;
+                       };
+
+                       gpio1: gpio@0209c000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x0209c000 0x4000>;
+                               interrupts = <0 66 0x04 0 67 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio2: gpio@020a0000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020a0000 0x4000>;
+                               interrupts = <0 68 0x04 0 69 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio3: gpio@020a4000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020a4000 0x4000>;
+                               interrupts = <0 70 0x04 0 71 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio4: gpio@020a8000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020a8000 0x4000>;
+                               interrupts = <0 72 0x04 0 73 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio5: gpio@020ac000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020ac000 0x4000>;
+                               interrupts = <0 74 0x04 0 75 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio6: gpio@020b0000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020b0000 0x4000>;
+                               interrupts = <0 76 0x04 0 77 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       gpio7: gpio@020b4000 {
+                               compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
+                               reg = <0x020b4000 0x4000>;
+                               interrupts = <0 78 0x04 0 79 0x04>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               interrupt-controller;
+                               #interrupt-cells = <2>;
+                       };
+
+                       kpp: kpp@020b8000 {
+                               reg = <0x020b8000 0x4000>;
+                               interrupts = <0 82 0x04>;
+                       };
+
+                       wdog1: wdog@020bc000 {
+                               compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+                               reg = <0x020bc000 0x4000>;
+                               interrupts = <0 80 0x04>;
+                               clocks = <&clks 0>;
+                       };
+
+                       wdog2: wdog@020c0000 {
+                               compatible = "fsl,imx6q-wdt", "fsl,imx21-wdt";
+                               reg = <0x020c0000 0x4000>;
+                               interrupts = <0 81 0x04>;
+                               clocks = <&clks 0>;
+                               status = "disabled";
+                       };
+
+                       clks: ccm@020c4000 {
+                               compatible = "fsl,imx6q-ccm";
+                               reg = <0x020c4000 0x4000>;
+                               interrupts = <0 87 0x04 0 88 0x04>;
+                               #clock-cells = <1>;
+                       };
+
+                       anatop: anatop@020c8000 {
+                               compatible = "fsl,imx6q-anatop", "syscon", "simple-bus";
+                               reg = <0x020c8000 0x1000>;
+                               interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+                               regulator-1p1@110 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd1p1";
+                                       regulator-min-microvolt = <800000>;
+                                       regulator-max-microvolt = <1375000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x110>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <4>;
+                                       anatop-min-voltage = <800000>;
+                                       anatop-max-voltage = <1375000>;
+                               };
+
+                               regulator-3p0@120 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd3p0";
+                                       regulator-min-microvolt = <2800000>;
+                                       regulator-max-microvolt = <3150000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x120>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <0>;
+                                       anatop-min-voltage = <2625000>;
+                                       anatop-max-voltage = <3400000>;
+                               };
+
+                               regulator-2p5@130 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vdd2p5";
+                                       regulator-min-microvolt = <2000000>;
+                                       regulator-max-microvolt = <2750000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x130>;
+                                       anatop-vol-bit-shift = <8>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-min-bit-val = <0>;
+                                       anatop-min-voltage = <2000000>;
+                                       anatop-max-voltage = <2750000>;
+                               };
+
+                               reg_arm: regulator-vddcore@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "cpu";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <0>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-delay-reg-offset = <0x170>;
+                                       anatop-delay-bit-shift = <24>;
+                                       anatop-delay-bit-width = <2>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
+
+                               reg_pu: regulator-vddpu@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vddpu";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <9>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-delay-reg-offset = <0x170>;
+                                       anatop-delay-bit-shift = <26>;
+                                       anatop-delay-bit-width = <2>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
+
+                               reg_soc: regulator-vddsoc@140 {
+                                       compatible = "fsl,anatop-regulator";
+                                       regulator-name = "vddsoc";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1450000>;
+                                       regulator-always-on;
+                                       anatop-reg-offset = <0x140>;
+                                       anatop-vol-bit-shift = <18>;
+                                       anatop-vol-bit-width = <5>;
+                                       anatop-delay-reg-offset = <0x170>;
+                                       anatop-delay-bit-shift = <28>;
+                                       anatop-delay-bit-width = <2>;
+                                       anatop-min-bit-val = <1>;
+                                       anatop-min-voltage = <725000>;
+                                       anatop-max-voltage = <1450000>;
+                               };
+                       };
+
+                       usbphy1: usbphy@020c9000 {
+                               compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+                               reg = <0x020c9000 0x1000>;
+                               interrupts = <0 44 0x04>;
+                               clocks = <&clks 182>;
+                       };
+
+                       usbphy2: usbphy@020ca000 {
+                               compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
+                               reg = <0x020ca000 0x1000>;
+                               interrupts = <0 45 0x04>;
+                               clocks = <&clks 183>;
+                       };
+
+                       snvs@020cc000 {
+                               compatible = "fsl,sec-v4.0-mon", "simple-bus";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               ranges = <0 0x020cc000 0x4000>;
+
+                               snvs-rtc-lp@34 {
+                                       compatible = "fsl,sec-v4.0-mon-rtc-lp";
+                                       reg = <0x34 0x58>;
+                                       interrupts = <0 19 0x04 0 20 0x04>;
+                               };
+                       };
+
+                       epit1: epit@020d0000 { /* EPIT1 */
+                               reg = <0x020d0000 0x4000>;
+                               interrupts = <0 56 0x04>;
+                       };
+
+                       epit2: epit@020d4000 { /* EPIT2 */
+                               reg = <0x020d4000 0x4000>;
+                               interrupts = <0 57 0x04>;
+                       };
+
+                       src: src@020d8000 {
+                               compatible = "fsl,imx6q-src";
+                               reg = <0x020d8000 0x4000>;
+                               interrupts = <0 91 0x04 0 96 0x04>;
+                       };
+
+                       gpc: gpc@020dc000 {
+                               compatible = "fsl,imx6q-gpc";
+                               reg = <0x020dc000 0x4000>;
+                               interrupts = <0 89 0x04 0 90 0x04>;
+                       };
+
+                       gpr: iomuxc-gpr@020e0000 {
+                               compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
+                               reg = <0x020e0000 0x38>;
+                       };
+
+                       dcic1: dcic@020e4000 {
+                               reg = <0x020e4000 0x4000>;
+                               interrupts = <0 124 0x04>;
+                       };
+
+                       dcic2: dcic@020e8000 {
+                               reg = <0x020e8000 0x4000>;
+                               interrupts = <0 125 0x04>;
+                       };
+
+                       sdma: sdma@020ec000 {
+                               compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <0 2 0x04>;
+                               clocks = <&clks 155>, <&clks 155>;
+                               clock-names = "ipg", "ahb";
+                               fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q.bin";
+                       };
+               };
+
+               aips-bus@02100000 { /* AIPS2 */
+                       compatible = "fsl,aips-bus", "simple-bus";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       reg = <0x02100000 0x100000>;
+                       ranges;
+
+                       caam@02100000 {
+                               reg = <0x02100000 0x40000>;
+                               interrupts = <0 105 0x04 0 106 0x04>;
+                       };
+
+                       aipstz@0217c000 { /* AIPSTZ2 */
+                               reg = <0x0217c000 0x4000>;
+                       };
+
+                       usbotg: usb@02184000 {
+                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+                               reg = <0x02184000 0x200>;
+                               interrupts = <0 43 0x04>;
+                               clocks = <&clks 162>;
+                               fsl,usbphy = <&usbphy1>;
+                               fsl,usbmisc = <&usbmisc 0>;
+                               status = "disabled";
+                       };
+
+                       usbh1: usb@02184200 {
+                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+                               reg = <0x02184200 0x200>;
+                               interrupts = <0 40 0x04>;
+                               clocks = <&clks 162>;
+                               fsl,usbphy = <&usbphy2>;
+                               fsl,usbmisc = <&usbmisc 1>;
+                               status = "disabled";
+                       };
+
+                       usbh2: usb@02184400 {
+                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+                               reg = <0x02184400 0x200>;
+                               interrupts = <0 41 0x04>;
+                               clocks = <&clks 162>;
+                               fsl,usbmisc = <&usbmisc 2>;
+                               status = "disabled";
+                       };
+
+                       usbh3: usb@02184600 {
+                               compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+                               reg = <0x02184600 0x200>;
+                               interrupts = <0 42 0x04>;
+                               clocks = <&clks 162>;
+                               fsl,usbmisc = <&usbmisc 3>;
+                               status = "disabled";
+                       };
+
+                       usbmisc: usbmisc: usbmisc@02184800 {
+                               #index-cells = <1>;
+                               compatible = "fsl,imx6q-usbmisc";
+                               reg = <0x02184800 0x200>;
+                               clocks = <&clks 162>;
+                       };
+
+                       fec: ethernet@02188000 {
+                               compatible = "fsl,imx6q-fec";
+                               reg = <0x02188000 0x4000>;
+                               interrupts = <0 118 0x04 0 119 0x04>;
+                               clocks = <&clks 117>, <&clks 117>, <&clks 190>;
+                               clock-names = "ipg", "ahb", "ptp";
+                               status = "disabled";
+                       };
+
+                       mlb@0218c000 {
+                               reg = <0x0218c000 0x4000>;
+                               interrupts = <0 53 0x04 0 117 0x04 0 126 0x04>;
+                       };
+
+                       usdhc1: usdhc@02190000 {
+                               compatible = "fsl,imx6q-usdhc";
+                               reg = <0x02190000 0x4000>;
+                               interrupts = <0 22 0x04>;
+                               clocks = <&clks 163>, <&clks 163>, <&clks 163>;
+                               clock-names = "ipg", "ahb", "per";
+                               bus-width = <4>;
+                               status = "disabled";
+                       };
+
+                       usdhc2: usdhc@02194000 {
+                               compatible = "fsl,imx6q-usdhc";
+                               reg = <0x02194000 0x4000>;
+                               interrupts = <0 23 0x04>;
+                               clocks = <&clks 164>, <&clks 164>, <&clks 164>;
+                               clock-names = "ipg", "ahb", "per";
+                               bus-width = <4>;
+                               status = "disabled";
+                       };
+
+                       usdhc3: usdhc@02198000 {
+                               compatible = "fsl,imx6q-usdhc";
+                               reg = <0x02198000 0x4000>;
+                               interrupts = <0 24 0x04>;
+                               clocks = <&clks 165>, <&clks 165>, <&clks 165>;
+                               clock-names = "ipg", "ahb", "per";
+                               bus-width = <4>;
+                               status = "disabled";
+                       };
+
+                       usdhc4: usdhc@0219c000 {
+                               compatible = "fsl,imx6q-usdhc";
+                               reg = <0x0219c000 0x4000>;
+                               interrupts = <0 25 0x04>;
+                               clocks = <&clks 166>, <&clks 166>, <&clks 166>;
+                               clock-names = "ipg", "ahb", "per";
+                               bus-width = <4>;
+                               status = "disabled";
+                       };
+
+                       i2c1: i2c@021a0000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+                               reg = <0x021a0000 0x4000>;
+                               interrupts = <0 36 0x04>;
+                               clocks = <&clks 125>;
+                               status = "disabled";
+                       };
+
+                       i2c2: i2c@021a4000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+                               reg = <0x021a4000 0x4000>;
+                               interrupts = <0 37 0x04>;
+                               clocks = <&clks 126>;
+                               status = "disabled";
+                       };
+
+                       i2c3: i2c@021a8000 {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               compatible = "fsl,imx6q-i2c", "fsl,imx21-i2c";
+                               reg = <0x021a8000 0x4000>;
+                               interrupts = <0 38 0x04>;
+                               clocks = <&clks 127>;
+                               status = "disabled";
+                       };
+
+                       romcp@021ac000 {
+                               reg = <0x021ac000 0x4000>;
+                       };
+
+                       mmdc0: mmdc@021b0000 { /* MMDC0 */
+                               compatible = "fsl,imx6q-mmdc";
+                               reg = <0x021b0000 0x4000>;
+                       };
+
+                       mmdc1: mmdc@021b4000 { /* MMDC1 */
+                               reg = <0x021b4000 0x4000>;
+                       };
+
+                       weim@021b8000 {
+                               reg = <0x021b8000 0x4000>;
+                               interrupts = <0 14 0x04>;
+                       };
+
+                       ocotp@021bc000 {
+                               compatible = "fsl,imx6q-ocotp";
+                               reg = <0x021bc000 0x4000>;
+                       };
+
+                       ocotp@021c0000 {
+                               reg = <0x021c0000 0x4000>;
+                               interrupts = <0 21 0x04>;
+                       };
+
+                       tzasc@021d0000 { /* TZASC1 */
+                               reg = <0x021d0000 0x4000>;
+                               interrupts = <0 108 0x04>;
+                       };
+
+                       tzasc@021d4000 { /* TZASC2 */
+                               reg = <0x021d4000 0x4000>;
+                               interrupts = <0 109 0x04>;
+                       };
+
+                       audmux: audmux@021d8000 {
+                               compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
+                               reg = <0x021d8000 0x4000>;
+                               status = "disabled";
+                       };
+
+                       mipi@021dc000 { /* MIPI-CSI */
+                               reg = <0x021dc000 0x4000>;
+                       };
+
+                       mipi@021e0000 { /* MIPI-DSI */
+                               reg = <0x021e0000 0x4000>;
+                       };
+
+                       vdoa@021e4000 {
+                               reg = <0x021e4000 0x4000>;
+                               interrupts = <0 18 0x04>;
+                       };
+
+                       uart2: serial@021e8000 {
+                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+                               reg = <0x021e8000 0x4000>;
+                               interrupts = <0 27 0x04>;
+                               clocks = <&clks 160>, <&clks 161>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+                       };
+
+                       uart3: serial@021ec000 {
+                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+                               reg = <0x021ec000 0x4000>;
+                               interrupts = <0 28 0x04>;
+                               clocks = <&clks 160>, <&clks 161>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+                       };
+
+                       uart4: serial@021f0000 {
+                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+                               reg = <0x021f0000 0x4000>;
+                               interrupts = <0 29 0x04>;
+                               clocks = <&clks 160>, <&clks 161>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+                       };
+
+                       uart5: serial@021f4000 {
+                               compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
+                               reg = <0x021f4000 0x4000>;
+                               interrupts = <0 30 0x04>;
+                               clocks = <&clks 160>, <&clks 161>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+                       };
+               };
+
+               ipu1: ipu@02400000 {
+                       #crtc-cells = <1>;
+                       compatible = "fsl,imx6q-ipu";
+                       reg = <0x02400000 0x400000>;
+                       interrupts = <0 6 0x4 0 5 0x4>;
+                       clocks = <&clks 130>, <&clks 131>, <&clks 132>;
+                       clock-names = "bus", "di0", "di1";
+               };
+       };
+};
index 4ccea21..192cf76 100644 (file)
@@ -5,6 +5,12 @@
                        compatible = "marvell,88f6282-pinctrl";
                        reg = <0x10000 0x20>;
 
+                       pmx_nand: pmx-nand {
+                               marvell,pins = "mpp0", "mpp1", "mpp2", "mpp3",
+                                                       "mpp4", "mpp5", "mpp18", "mpp19";
+                               marvell,function = "nand";
+                       };
+
                        pmx_sata0: pmx-sata0 {
                                marvell,pins = "mpp5", "mpp21", "mpp23";
                                marvell,function = "sata0";
                                marvell,pins = "mpp8", "mpp9";
                                marvell,function = "twsi0";
                        };
+
+                       pmx_twsi1: pmx-twsi1 {
+                               marvell,pins = "mpp36", "mpp37";
+                               marvell,function = "twsi1";
+                       };
+
                        pmx_uart0: pmx-uart0 {
                                marvell,pins = "mpp10", "mpp11";
                                marvell,function = "uart0";
                                marvell,pins = "mpp13", "mpp14";
                                marvell,function = "uart1";
                        };
+                       pmx_sdio: pmx-sdio {
+                               marvell,pins = "mpp12", "mpp13", "mpp14",
+                                              "mpp15", "mpp16", "mpp17";
+                               marvell,function = "sdio";
+                       };
                };
 
                i2c@11100 {
index f2d386c..ef2d8c7 100644 (file)
                        status = "okay";
                        nr-ports = <1>;
                };
+
+               mvsdio@90000 {
+                       pinctrl-0 = <&pmx_sdio>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       /* No CD or WP GPIOs */
+               };
        };
 
        gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
new file mode 100644 (file)
index 0000000..9555a86
--- /dev/null
@@ -0,0 +1,94 @@
+/dts-v1/;
+
+/include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
+
+/ {
+       model = "Globalscale Technologies Guruplug Server Plus";
+       compatible = "globalscale,guruplug-server-plus", "globalscale,guruplug", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x20000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,115200n8 earlyprintk";
+       };
+
+       ocp@f1000000 {
+               pinctrl: pinctrl@10000 {
+
+                       pinctrl-0 = < &pmx_led_health_r &pmx_led_health_g
+                                     &pmx_led_wmode_r &pmx_led_wmode_g >;
+                       pinctrl-names = "default";
+
+                       pmx_led_health_r: pmx-led-health-r {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_health_g: pmx-led-health-g {
+                               marvell,pins = "mpp47";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_wmode_r: pmx-led-wmode-r {
+                               marvell,pins = "mpp48";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_wmode_g: pmx-led-wmode-g {
+                               marvell,pins = "mpp49";
+                               marvell,function = "gpio";
+                       };
+               };
+               serial@12000 {
+                       clock-frequency = <200000000>;
+                       status = "ok";
+               };
+
+               nand@3000000 {
+                       status = "okay";
+
+                       partition@0 {
+                               label = "u-boot";
+                               reg = <0x00000000 0x00100000>;
+                               read-only;
+                       };
+
+                       partition@100000 {
+                               label = "uImage";
+                               reg = <0x00100000 0x00400000>;
+                       };
+
+                       partition@500000 {
+                               label = "data";
+                               reg = <0x00500000 0x1fb00000>;
+                       };
+               };
+
+               sata@80000 {
+                       status = "okay";
+                       nr-ports = <1>;
+               };
+       };
+
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               health-r {
+                       label = "guruplug:red:health";
+                       gpios = <&gpio1 14 1>;
+               };
+               health-g {
+                       label = "guruplug:green:health";
+                       gpios = <&gpio1 15 1>;
+               };
+               wmode-r {
+                       label = "guruplug:red:wmode";
+                       gpios = <&gpio1 16 1>;
+               };
+               wmode-g {
+                       label = "guruplug:green:wmode";
+                       gpios = <&gpio1 17 1>;
+               };
+       };
+};
index 262c654..662dfd8 100644 (file)
                pinctrl: pinctrl@10000 {
 
                        pinctrl-0 = < &pmx_nand &pmx_uart0
-                                     &pmx_led_health &pmx_sdio
+                                     &pmx_led_health
                                      &pmx_sata0 &pmx_sata1
                                      &pmx_led_user1o
                                      &pmx_led_user1g &pmx_led_user0o
                                      &pmx_led_user0g &pmx_led_misc
-                                     &pmx_sdio_cd
                                    >;
                        pinctrl-names = "default";
 
                        status = "okay";
 
                };
+
+               mvsdio@90000 {
+                       pinctrl-0 = <&pmx_sdio &pmx_sdio_cd>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       cd-gpios = <&gpio1 15 0>;
+                       /* No WP GPIO */
+               };
        };
 
        gpio-leds {
index 77d21ab..e8e7ece 100644 (file)
                        gpios = <&gpio0 12 0>;
                };
        };
+
+       gpio_poweroff {
+               compatible = "gpio-poweroff";
+               gpios = <&gpio0 31 0>;
+       };
+
 };
index 5509f96..3a178cf 100644 (file)
        };
 
        ocp@f1000000 {
+               pinctrl: pinctrl@10000 {
+                       pinctrl-0 = < &pmx_led_esata_green
+                                     &pmx_led_esata_red
+                                     &pmx_led_usb_green
+                                     &pmx_led_usb_red
+                                     &pmx_usb_power_off
+                                     &pmx_led_sys_green
+                                     &pmx_led_sys_red
+                                     &pmx_btn_reset
+                                     &pmx_btn_copy
+                                     &pmx_led_copy_green
+                                     &pmx_led_copy_red
+                                     &pmx_led_hdd_green
+                                     &pmx_led_hdd_red
+                                     &pmx_unknown
+                                     &pmx_btn_power
+                                     &pmx_pwr_off >;
+                       pinctrl-names = "default";
+
+                       pmx_led_esata_green: pmx-led-esata-green {
+                               marvell,pins = "mpp12";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_esata_red: pmx-led-esata-red {
+                               marvell,pins = "mpp13";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_usb_green: pmx-led-usb-green {
+                               marvell,pins = "mpp15";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_usb_red: pmx-led-usb-red {
+                               marvell,pins = "mpp16";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_usb_power_off: pmx-usb-power-off {
+                               marvell,pins = "mpp21";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_sys_green: pmx-led-sys-green {
+                               marvell,pins = "mpp28";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_sys_red: pmx-led-sys-red {
+                               marvell,pins = "mpp29";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_btn_reset: pmx-btn-reset {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_btn_copy: pmx-btn-copy {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_copy_green: pmx-led-copy-green {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_copy_red: pmx-led-copy-red {
+                               marvell,pins = "mpp40";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_hdd_green: pmx-led-hdd-green {
+                               marvell,pins = "mpp41";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_hdd_red: pmx-led-hdd-red {
+                               marvell,pins = "mpp42";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_unknown: pmx-unknown {
+                               marvell,pins = "mpp44";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_btn_power: pmx-btn-power {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_pwr_off: pmx-pwr-off {
+                               marvell,pins = "mpp48";
+                               marvell,function = "gpio";
+                       };
+               };
 
                serial@12000 {
                        clock-frequency = <200000000>;
 
                i2c@11000 {
                        status = "okay";
+
+                       adt7476: adt7476a@2e {
+                               compatible = "adt7476";
+                               reg = <0x2e>;
+                       };
                };
 
                nand@3000000 {
                        gpios = <&gpio1 8 0>;
                };
        };
+
+       gpio_poweroff {
+               compatible = "gpio-poweroff";
+               gpios = <&gpio1 16 0>;
+       };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               usb0_power_off: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power Off";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 21 0>;
+               };
+       };
 };
index 49d3d74..ede7fe0 100644 (file)
                                reg = <0x30>;
                        };
                };
+
+               pinctrl: pinctrl@10000 {
+                       pinctrl-0 = < &pmx_nand &pmx_uart0
+                               &pmx_uart1 &pmx_twsi1
+                               &pmx_dip_sw0 &pmx_dip_sw1
+                               &pmx_dip_sw2 &pmx_dip_sw3
+                               &pmx_gpio_0 &pmx_gpio_1
+                               &pmx_gpio_2 &pmx_gpio_3
+                               &pmx_gpio_4 &pmx_gpio_5
+                               &pmx_gpio_6 &pmx_gpio_7
+                               &pmx_led_red &pmx_led_green
+                               &pmx_led_yellow >;
+                       pinctrl-names = "default";
+
+                       pmx_uart0: pmx-uart0 {
+                               marvell,pins = "mpp10", "mpp11", "mpp15",
+                                       "mpp16";
+                               marvell,function = "uart0";
+                       };
+
+                       pmx_uart1: pmx-uart1 {
+                               marvell,pins = "mpp13", "mpp14", "mpp8",
+                                       "mpp9";
+                               marvell,function = "uart1";
+                       };
+
+                       pmx_sysrst: pmx-sysrst {
+                               marvell,pins = "mpp6";
+                               marvell,function = "sysrst";
+                       };
+
+                       pmx_dip_sw0: pmx-dip-sw0 {
+                               marvell,pins = "mpp20";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_dip_sw1: pmx-dip-sw1 {
+                               marvell,pins = "mpp21";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_dip_sw2: pmx-dip-sw2 {
+                               marvell,pins = "mpp22";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_dip_sw3: pmx-dip-sw3 {
+                               marvell,pins = "mpp23";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_0: pmx-gpio-0 {
+                               marvell,pins = "mpp24";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_1: pmx-gpio-1 {
+                               marvell,pins = "mpp25";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_2: pmx-gpio-2 {
+                               marvell,pins = "mpp26";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_3: pmx-gpio-3 {
+                               marvell,pins = "mpp27";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_4: pmx-gpio-4 {
+                               marvell,pins = "mpp28";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_5: pmx-gpio-5 {
+                               marvell,pins = "mpp29";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_6: pmx-gpio-6 {
+                               marvell,pins = "mpp30";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_7: pmx-gpio-7 {
+                               marvell,pins = "mpp31";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_gpio_init: pmx-init {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_usb_oc: pmx-usb-oc {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_red: pmx-led-red {
+                               marvell,pins = "mpp41";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_green: pmx-led-green {
+                               marvell,pins = "mpp42";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_yellow: pmx-led-yellow {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+               };
        };
 
        gpio-leds {
index cd15452..842ff95 100644 (file)
@@ -1,6 +1,7 @@
 /dts-v1/;
 
 /include/ "kirkwood.dtsi"
+/include/ "kirkwood-6282.dtsi"
 
 / {
        model = "Univeral Scientific Industrial Co. Topkick-1281P2";
        };
 
        ocp@f1000000 {
+               pinctrl: pinctrl@10000 {
+                       /*
+                        * GPIO LED layout
+                        *
+                        *       /-SYS_LED(2)
+                        *       |
+                        *       |   /-DISK_LED
+                        *       |   |
+                        *       |   |   /-WLAN_LED(2)
+                        *       |   |   |
+                        * [SW] [*] [*] [*]
+                        */
+
+                       /*
+                        * Switch positions
+                        *
+                        *     /-SW_LEFT(2)
+                        *     |
+                        *     |   /-SW_IDLE
+                        *     |   |
+                        *     |   |   /-SW_RIGHT
+                        *     |   |   |
+                        * PS [L] [I] [R] LEDS
+                        */
+                       pinctrl-0 = < &pmx_led_disk_yellow
+                                     &pmx_sata0_pwr_enable
+                                     &pmx_led_sys_red
+                                     &pmx_led_sys_blue
+                                     &pmx_led_wifi_green
+                                     &pmx_sw_left
+                                     &pmx_sw_right
+                                     &pmx_sw_idle
+                                     &pmx_sw_left2
+                                     &pmx_led_wifi_yellow
+                                     &pmx_uart0
+                                     &pmx_nand
+                                     &pmx_twsi0 >;
+                       pinctrl-names = "default";
+
+                       pmx_led_disk_yellow: pmx-led-disk-yellow {
+                               marvell,pins = "mpp21";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_sata0_pwr_enable: pmx-sata0-pwr-enable {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_sys_red: pmx-led-sys-red {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_sys_blue: pmx-led-sys-blue {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_wifi_green: pmx-led-wifi-green {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_sw_left: pmx-sw-left {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_sw_right: pmx-sw-right {
+                               marvell,pins = "mpp44";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_sw_idle: pmx-sw-idle {
+                               marvell,pins = "mpp45";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_sw_left2: pmx-sw-left2 {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+
+                       pmx_led_wifi_yellow: pmx-led-wifi-yellow {
+                               marvell,pins = "mpp48";
+                               marvell,function = "gpio";
+                       };
+               };
+
                serial@12000 {
                        clock-frequency = <200000000>;
                        status = "ok";
                        status = "okay";
                        nr-ports = <1>;
                };
+
+               i2c@11000 {
+                       status = "ok";
+               };
+
+               mvsdio@90000 {
+                       pinctrl-0 = <&pmx_sdio>;
+                       pinctrl-names = "default";
+                       status = "okay";
+                       /* No CD or WP GPIOs */
+               };
        };
 
        gpio-leds {
index d6ab442..2c738d9 100644 (file)
                        clocks = <&gate_clk 17>;
                        status = "okay";
                };
+
+               mvsdio@90000 {
+                       compatible = "marvell,orion-sdio";
+                       reg = <0x90000 0x200>;
+                       interrupts = <28>;
+                       clocks = <&gate_clk 4>;
+                       status = "disabled";
+               };
        };
 };
index 0b7ee92..3fe8dae 100644 (file)
@@ -1,26 +1,24 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_DOVE=y
 CONFIG_MACH_DOVE_DB=y
 CONFIG_MACH_CM_A510=y
 CONFIG_MACH_DOVE_DT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_HIGHMEM=y
-CONFIG_USE_OF=y
-CONFIG_ATAGS=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
-CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
 CONFIG_VFP=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -32,8 +30,9 @@ CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
@@ -57,7 +56,6 @@ CONFIG_ATA=y
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_MV643XX_ETH=y
-# CONFIG_NETDEV_10000 is not set
 CONFIG_INPUT_POLLDEV=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
@@ -68,10 +66,7 @@ CONFIG_LEGACY_PTY_COUNT=16
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_SERIAL_8250_PCI is not set
 CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
@@ -81,13 +76,11 @@ CONFIG_SPI=y
 CONFIG_SPI_ORION=y
 # CONFIG_HWMON is not set
 CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_IO_ACCESSORS=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_DOVE=y
 CONFIG_NEW_LEDS=y
@@ -104,6 +97,7 @@ CONFIG_MV_XOR=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_UDF_FS=m
@@ -112,24 +106,20 @@ CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_850=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_ISO8859_2=y
 CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO_NULL=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
@@ -138,7 +128,6 @@ CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_BLOWFISH=y
 CONFIG_CRYPTO_TEA=y
 CONFIG_CRYPTO_TWOFISH=y
@@ -147,5 +136,4 @@ CONFIG_CRYPTO_LZO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_DEV_MV_CESA=y
 CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
 CONFIG_LIBCRC32C=y
index cbd91bc..2ec8119 100644 (file)
@@ -14,16 +14,20 @@ CONFIG_MACH_ARMADA_XP=y
 # CONFIG_CACHE_L2X0 is not set
 # CONFIG_SWP_EMULATE is not set
 CONFIG_SMP=y
-# CONFIG_LOCAL_TIMERS is not set
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
 # CONFIG_COMPACTION is not set
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_VFP=y
 CONFIG_NET=y
 CONFIG_INET=y
+CONFIG_BT=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_CFG80211=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
@@ -31,16 +35,34 @@ CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_MVNETA=y
 CONFIG_MARVELL_PHY=y
+CONFIG_MWIFIEX=y
+CONFIG_MWIFIEX_SDIO=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_I2C=y
+CONFIG_SPI=y
+CONFIG_SPI_ORION=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_M25P80=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_MMC=y
+CONFIG_MMC_MVSDIO=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_MV=y
 CONFIG_DMADEVICES=y
 CONFIG_MV_XOR=y
 # CONFIG_IOMMU_SUPPORT is not set
index ab98fdd..720799f 100644 (file)
@@ -24,6 +24,7 @@ extern struct arm_delay_ops {
        void (*delay)(unsigned long);
        void (*const_udelay)(unsigned long);
        void (*udelay)(unsigned long);
+       bool const_clock;
 } arm_delay_ops;
 
 #define __delay(n)             arm_delay_ops.delay(n)
index f30ac3b..80d6fc4 100644 (file)
@@ -247,7 +247,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
+       const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+               L_PTE_NONE | L_PTE_VALID;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index 4dd41fc..170e9f3 100644 (file)
@@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 
@@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
        kretprobe_hash_unlock(current, &flags);
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index 5f73f70..1bdfd87 100644 (file)
@@ -466,8 +466,6 @@ void tick_broadcast(const struct cpumask *mask)
 {
        smp_cross_call(mask, IPI_TIMER);
 }
-#else
-#define smp_timer_broadcast    NULL
 #endif
 
 static void broadcast_timer_set_mode(enum clock_event_mode mode,
@@ -674,6 +672,9 @@ static int cpufreq_callback(struct notifier_block *nb,
        if (freq->flags & CPUFREQ_CONST_LOOPS)
                return NOTIFY_OK;
 
+       if (arm_delay_ops.const_clock)
+               return NOTIFY_OK;
+
        if (!per_cpu(l_p_j_ref, cpu)) {
                per_cpu(l_p_j_ref, cpu) =
                        per_cpu(cpu_data, cpu).loops_per_jiffy;
index 0dc5385..6b93f6a 100644 (file)
@@ -77,6 +77,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
                arm_delay_ops.delay             = __timer_delay;
                arm_delay_ops.const_udelay      = __timer_const_udelay;
                arm_delay_ops.udelay            = __timer_udelay;
+               arm_delay_ops.const_clock       = true;
                delay_calibrated                = true;
        } else {
                pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
index 603c5fd..36469d8 100644 (file)
@@ -2,8 +2,12 @@ if ARCH_DOVE
 
 menu "Marvell Dove Implementations"
 
+config DOVE_LEGACY
+       bool
+
 config MACH_DOVE_DB
        bool "Marvell DB-MV88AP510 Development Board"
+       select DOVE_LEGACY
        select I2C_BOARDINFO
        help
          Say 'Y' here if you want your kernel to support the
@@ -11,6 +15,7 @@ config MACH_DOVE_DB
 
 config MACH_CM_A510
        bool "CompuLab CM-A510 Board"
+       select DOVE_LEGACY
        help
          Say 'Y' here if you want your kernel to support the
          CompuLab CM-A510 Board.
@@ -19,6 +24,8 @@ config MACH_DOVE_DT
        bool "Marvell Dove Flattened Device Tree"
        select MVEBU_CLK_CORE
        select MVEBU_CLK_GATING
+       select REGULATOR
+       select REGULATOR_FIXED_VOLTAGE
        select USE_OF
        help
          Say 'Y' here if you want your kernel to support the
index 5e683ba..3f0a858 100644 (file)
@@ -1,4 +1,6 @@
-obj-y                          += common.o addr-map.o irq.o mpp.o
+obj-y                          += common.o addr-map.o irq.o
+obj-$(CONFIG_DOVE_LEGACY)      += mpp.o
 obj-$(CONFIG_PCI)              += pcie.o
 obj-$(CONFIG_MACH_DOVE_DB)     += dove-db-setup.o
+obj-$(CONFIG_MACH_DOVE_DT)     += board-dt.o
 obj-$(CONFIG_MACH_CM_A510)     += cm-a510.o
diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
new file mode 100644 (file)
index 0000000..fbde1dd
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/mach-dove/board-dt.c
+ *
+ * Marvell Dove 88AP510 System On Chip FDT Board
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/mvebu.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/usb-ehci-orion.h>
+#include <asm/hardware/cache-tauros2.h>
+#include <asm/mach/arch.h>
+#include <mach/pm.h>
+#include <plat/common.h>
+#include <plat/irq.h>
+#include "common.h"
+
+/*
+ * There are still devices that doesn't even know about DT,
+ * get clock gates here and add a clock lookup.
+ */
+static void __init dove_legacy_clk_init(void)
+{
+       struct device_node *np = of_find_compatible_node(NULL, NULL,
+                                        "marvell,dove-gating-clock");
+       struct of_phandle_args clkspec;
+
+       clkspec.np = np;
+       clkspec.args_count = 1;
+
+       clkspec.args[0] = CLOCK_GATING_BIT_GBE;
+       orion_clkdev_add(NULL, "mv643xx_eth_port.0",
+                        of_clk_get_from_provider(&clkspec));
+
+       clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
+       orion_clkdev_add("0", "pcie",
+                        of_clk_get_from_provider(&clkspec));
+
+       clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
+       orion_clkdev_add("1", "pcie",
+                        of_clk_get_from_provider(&clkspec));
+}
+
+static void __init dove_of_clk_init(void)
+{
+       mvebu_clocks_init();
+       dove_legacy_clk_init();
+}
+
+static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
+       .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
+};
+
+static void __init dove_dt_init(void)
+{
+       pr_info("Dove 88AP510 SoC\n");
+
+#ifdef CONFIG_CACHE_TAUROS2
+       tauros2_init(0);
+#endif
+       dove_setup_cpu_mbus();
+
+       /* Setup root of clk tree */
+       dove_of_clk_init();
+
+       /* Internal devices not ported to DT yet */
+       dove_ge00_init(&dove_dt_ge00_data);
+       dove_pcie_init(1, 1);
+
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char * const dove_dt_board_compat[] = {
+       "marvell,dove",
+       NULL
+};
+
+DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)")
+       .map_io         = dove_map_io,
+       .init_early     = dove_init_early,
+       .init_irq       = orion_dt_init_irq,
+       .init_time      = dove_timer_init,
+       .init_machine   = dove_dt_init,
+       .restart        = dove_restart,
+       .dt_compat      = dove_dt_board_compat,
+MACHINE_END
index ea84c53..c6b3b2b 100644 (file)
@@ -360,88 +360,3 @@ void dove_restart(char mode, const char *cmd)
        while (1)
                ;
 }
-
-#if defined(CONFIG_MACH_DOVE_DT)
-/*
- * There are still devices that doesn't even know about DT,
- * get clock gates here and add a clock lookup.
- */
-static void __init dove_legacy_clk_init(void)
-{
-       struct device_node *np = of_find_compatible_node(NULL, NULL,
-                                        "marvell,dove-gating-clock");
-       struct of_phandle_args clkspec;
-
-       clkspec.np = np;
-       clkspec.args_count = 1;
-
-       clkspec.args[0] = CLOCK_GATING_BIT_USB0;
-       orion_clkdev_add(NULL, "orion-ehci.0",
-                        of_clk_get_from_provider(&clkspec));
-
-       clkspec.args[0] = CLOCK_GATING_BIT_USB1;
-       orion_clkdev_add(NULL, "orion-ehci.1",
-                        of_clk_get_from_provider(&clkspec));
-
-       clkspec.args[0] = CLOCK_GATING_BIT_GBE;
-       orion_clkdev_add(NULL, "mv643xx_eth_port.0",
-                        of_clk_get_from_provider(&clkspec));
-
-       clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
-       orion_clkdev_add("0", "pcie",
-                        of_clk_get_from_provider(&clkspec));
-
-       clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
-       orion_clkdev_add("1", "pcie",
-                        of_clk_get_from_provider(&clkspec));
-}
-
-static void __init dove_of_clk_init(void)
-{
-       mvebu_clocks_init();
-       dove_legacy_clk_init();
-}
-
-static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
-       .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
-};
-
-static void __init dove_dt_init(void)
-{
-       pr_info("Dove 88AP510 SoC, TCLK = %d MHz.\n",
-               (dove_tclk + 499999) / 1000000);
-
-#ifdef CONFIG_CACHE_TAUROS2
-       tauros2_init(0);
-#endif
-       dove_setup_cpu_mbus();
-
-       /* Setup root of clk tree */
-       dove_of_clk_init();
-
-       /* Internal devices not ported to DT yet */
-       dove_rtc_init();
-
-       dove_ge00_init(&dove_dt_ge00_data);
-       dove_ehci0_init();
-       dove_ehci1_init();
-       dove_pcie_init(1, 1);
-
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-static const char * const dove_dt_board_compat[] = {
-       "marvell,dove",
-       NULL
-};
-
-DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)")
-       .map_io         = dove_map_io,
-       .init_early     = dove_init_early,
-       .init_irq       = orion_dt_init_irq,
-       .init_time      = dove_timer_init,
-       .init_machine   = dove_dt_init,
-       .restart        = dove_restart,
-       .dt_compat      = dove_dt_board_compat,
-MACHINE_END
-#endif
index fb7cb84..0f39f8c 100644 (file)
@@ -83,6 +83,7 @@ enum imx5_clks {
        ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
        epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
        can_sel, can1_serial_gate, can1_ipg_gate,
+       owire_gate,
        clk_max
 };
 
@@ -233,12 +234,13 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk[epit1_hf_gate] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
        clk[epit2_ipg_gate] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
        clk[epit2_hf_gate] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
+       clk[owire_gate] = imx_clk_gate2("owire_gate", "per_root", MXC_CCM_CCGR2, 22);
 
        for (i = 0; i < ARRAY_SIZE(clk); i++)
                if (IS_ERR(clk[i]))
                        pr_err("i.MX5 clk %d: register failed with %ld\n",
                                i, PTR_ERR(clk[i]));
-       
+
        clk_register_clkdev(clk[gpt_hf_gate], "per", "imx-gpt.0");
        clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
        clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
index 540138c..7b025ee 100644 (file)
@@ -164,8 +164,8 @@ enum mx6q_clks {
        usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
        pll4_audio, pll5_video, pll8_mlb, pll7_usb_host, pll6_enet, ssi1_ipg,
        ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
-       sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref,
-       clk_max
+       sata_ref, sata_ref_100m, pcie_ref, pcie_ref_125m, enet_ref, usbphy1_gate,
+       usbphy2_gate, clk_max
 };
 
 static struct clk *clk[clk_max];
@@ -218,8 +218,21 @@ int __init mx6q_clocks_init(void)
        clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB,       "pll7_usb_host","osc", base + 0x20, 0x3);
        clk[pll8_mlb]      = imx_clk_pllv3(IMX_PLLV3_MLB,       "pll8_mlb",     "osc", base + 0xd0, 0x0);
 
-       clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 6);
-       clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 6);
+       /*
+        * Bit 20 is the reserved and read-only bit, we do this only for:
+        * - Do nothing for usbphy clk_enable/disable
+        * - Keep refcount when do usbphy clk_enable/disable, in that case,
+        * the clk framework may need to enable/disable usbphy's parent
+        */
+       clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
+       clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+       /*
+        * usbphy*_gate needs to be on after system boots up, and software
+        * never needs to control it anymore.
+        */
+       clk[usbphy1_gate] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
+       clk[usbphy2_gate] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
 
        clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
        clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
@@ -446,6 +459,11 @@ int __init mx6q_clocks_init(void)
        for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
                clk_prepare_enable(clk[clks_init_on[i]]);
 
+       if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+               clk_prepare_enable(clk[usbphy1_gate]);
+               clk_prepare_enable(clk[usbphy2_gate]);
+       }
+
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
 
index 1786b2d..9ffd103 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/init.h>
@@ -22,6 +23,7 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
+#include <linux/opp.h>
 #include <linux/phy.h>
 #include <linux/regmap.h>
 #include <linux/micrel_phy.h>
@@ -200,6 +202,64 @@ static void __init imx6q_init_machine(void)
        imx6q_1588_init();
 }
 
+#define OCOTP_CFG3                     0x440
+#define OCOTP_CFG3_SPEED_SHIFT         16
+#define OCOTP_CFG3_SPEED_1P2GHZ                0x3
+
+static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
+{
+       struct device_node *np;
+       void __iomem *base;
+       u32 val;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+       if (!np) {
+               pr_warn("failed to find ocotp node\n");
+               return;
+       }
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_warn("failed to map ocotp\n");
+               goto put_node;
+       }
+
+       val = readl_relaxed(base + OCOTP_CFG3);
+       val >>= OCOTP_CFG3_SPEED_SHIFT;
+       if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
+               if (opp_disable(cpu_dev, 1200000000))
+                       pr_warn("failed to disable 1.2 GHz OPP\n");
+
+put_node:
+       of_node_put(np);
+}
+
+static void __init imx6q_opp_init(struct device *cpu_dev)
+{
+       struct device_node *np;
+
+       np = of_find_node_by_path("/cpus/cpu@0");
+       if (!np) {
+               pr_warn("failed to find cpu0 node\n");
+               return;
+       }
+
+       cpu_dev->of_node = np;
+       if (of_init_opp_table(cpu_dev)) {
+               pr_warn("failed to init OPP table\n");
+               goto put_node;
+       }
+
+       imx6q_opp_check_1p2ghz(cpu_dev);
+
+put_node:
+       of_node_put(np);
+}
+
+struct platform_device imx6q_cpufreq_pdev = {
+       .name = "imx6q-cpufreq",
+};
+
 static void __init imx6q_init_late(void)
 {
        /*
@@ -208,6 +268,11 @@ static void __init imx6q_init_late(void)
         */
        if (imx6q_revision() > IMX_CHIP_REVISION_1_1)
                imx6q_cpuidle_init();
+
+       if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
+               imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+               platform_device_register(&imx6q_cpufreq_pdev);
+       }
 }
 
 static void __init imx6q_map_io(void)
index f91cdff..7b6a64b 100644 (file)
@@ -58,6 +58,13 @@ config ARCH_KIRKWOOD_DT
          Say 'Y' here if you want your kernel to support the
          Marvell Kirkwood using flattened device tree.
 
+config MACH_GURUPLUG_DT
+       bool "Marvell GuruPlug Reference Board (Flattened Device Tree)"
+       select ARCH_KIRKWOOD_DT
+       help
+         Say 'Y' here if you want your kernel to support the
+         Marvell GuruPlug Reference Board (Flattened Device Tree).
+
 config MACH_DREAMPLUG_DT
        bool "Marvell DreamPlug (Flattened Device Tree)"
        select ARCH_KIRKWOOD_DT
index d665309..4cc4bee 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_MACH_T5325)              += t5325-setup.o
 
 obj-$(CONFIG_ARCH_KIRKWOOD_DT)         += board-dt.o
 obj-$(CONFIG_MACH_DREAMPLUG_DT)                += board-dreamplug.o
+obj-$(CONFIG_MACH_GURUPLUG_DT)         += board-guruplug.o
 obj-$(CONFIG_MACH_ICONNECT_DT)         += board-iconnect.o
 obj-$(CONFIG_MACH_DLINK_KIRKWOOD_DT)   += board-dnskw.o
 obj-$(CONFIG_MACH_IB62X0_DT)           += board-ib62x0.o
index 08248e2..0903242 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
-#include <linux/platform_data/mmc-mvsdio.h>
 #include "common.h"
 
 static struct mv643xx_eth_platform_data dreamplug_ge00_data = {
@@ -26,10 +25,6 @@ static struct mv643xx_eth_platform_data dreamplug_ge01_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(1),
 };
 
-static struct mvsdio_platform_data dreamplug_mvsdio_data = {
-       /* unfortunately the CD signal has not been connected */
-};
-
 void __init dreamplug_init(void)
 {
        /*
@@ -37,5 +32,4 @@ void __init dreamplug_init(void)
         */
        kirkwood_ge00_init(&dreamplug_ge00_data);
        kirkwood_ge01_init(&dreamplug_ge01_data);
-       kirkwood_sdio_init(&dreamplug_mvsdio_data);
 }
index 95cc04d..2e73e9d 100644 (file)
@@ -55,10 +55,6 @@ static void __init kirkwood_legacy_clk_init(void)
        orion_clkdev_add("0", "pcie",
                         of_clk_get_from_provider(&clkspec));
 
-       clkspec.args[0] = CGC_BIT_USB0;
-       orion_clkdev_add(NULL, "orion-ehci.0",
-                        of_clk_get_from_provider(&clkspec));
-
        clkspec.args[0] = CGC_BIT_PEX1;
        orion_clkdev_add("1", "pcie",
                         of_clk_get_from_provider(&clkspec));
@@ -66,11 +62,6 @@ static void __init kirkwood_legacy_clk_init(void)
        clkspec.args[0] = CGC_BIT_GE1;
        orion_clkdev_add(NULL, "mv643xx_eth_port.1",
                         of_clk_get_from_provider(&clkspec));
-
-       clkspec.args[0] = CGC_BIT_SDIO;
-       orion_clkdev_add(NULL, "mvsdio",
-                        of_clk_get_from_provider(&clkspec));
-
 }
 
 static void __init kirkwood_of_clk_init(void)
@@ -107,6 +98,9 @@ static void __init kirkwood_dt_init(void)
        if (of_machine_is_compatible("globalscale,dreamplug"))
                dreamplug_init();
 
+       if (of_machine_is_compatible("globalscale,guruplug"))
+               guruplug_dt_init();
+
        if (of_machine_is_compatible("dlink,dns-kirkwood"))
                dnskw_init();
 
@@ -150,14 +144,12 @@ static void __init kirkwood_dt_init(void)
        if (of_machine_is_compatible("usi,topkick"))
                usi_topkick_init();
 
-       if (of_machine_is_compatible("zyxel,nsa310"))
-               nsa310_init();
-
        of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
 }
 
 static const char * const kirkwood_dt_board_compat[] = {
        "globalscale,dreamplug",
+       "globalscale,guruplug",
        "dlink,dns-320",
        "dlink,dns-325",
        "iom,iconnect",
diff --git a/arch/arm/mach-kirkwood/board-guruplug.c b/arch/arm/mach-kirkwood/board-guruplug.c
new file mode 100644 (file)
index 0000000..0a0df45
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/mach-kirkwood/board-guruplug.c
+ *
+ * Marvell Guruplug Reference Board Init for drivers not converted to
+ * flattened device tree yet.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/mmc-mvsdio.h>
+#include "common.h"
+
+static struct mv643xx_eth_platform_data guruplug_ge00_data = {
+       .phy_addr       = MV643XX_ETH_PHY_ADDR(0),
+};
+
+static struct mv643xx_eth_platform_data guruplug_ge01_data = {
+       .phy_addr       = MV643XX_ETH_PHY_ADDR(1),
+};
+
+static struct mvsdio_platform_data guruplug_mvsdio_data = {
+       /* unfortunately the CD signal has not been connected */
+};
+
+void __init guruplug_dt_init(void)
+{
+       /*
+        * Basic setup. Needs to be called early.
+        */
+       kirkwood_ge00_init(&guruplug_ge00_data);
+       kirkwood_ge01_init(&guruplug_ge01_data);
+       kirkwood_sdio_init(&guruplug_mvsdio_data);
+}
index 3264925..7d6dc66 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/mv643xx_eth.h>
-#include <linux/platform_data/mmc-mvsdio.h>
 #include "common.h"
 
 static struct mv643xx_eth_platform_data mplcec4_ge00_data = {
@@ -23,11 +22,6 @@ static struct mv643xx_eth_platform_data mplcec4_ge01_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(2),
 };
 
-static struct mvsdio_platform_data mplcec4_mvsdio_data = {
-       .gpio_card_detect = 47, /* MPP47 used as SD card detect */
-};
-
-
 void __init mplcec4_init(void)
 {
        /*
@@ -35,7 +29,6 @@ void __init mplcec4_init(void)
         */
        kirkwood_ge00_init(&mplcec4_ge00_data);
        kirkwood_ge01_init(&mplcec4_ge01_data);
-       kirkwood_sdio_init(&mplcec4_mvsdio_data);
        kirkwood_pcie_init(KW_PCIE0);
 }
 
index f4632a8..f2ea3b7 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/mv643xx_eth.h>
-#include <linux/gpio.h>
 #include <linux/of.h>
 #include "common.h"
 
@@ -23,13 +22,6 @@ static struct mv643xx_eth_platform_data ns2_ge00_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(8),
 };
 
-#define NS2_GPIO_POWER_OFF     31
-
-static void ns2_power_off(void)
-{
-       gpio_set_value(NS2_GPIO_POWER_OFF, 1);
-}
-
 void __init ns2_init(void)
 {
        /*
@@ -39,10 +31,4 @@ void __init ns2_init(void)
            of_machine_is_compatible("lacie,netspace_mini_v2"))
                ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
        kirkwood_ge00_init(&ns2_ge00_data);
-
-       if (gpio_request(NS2_GPIO_POWER_OFF, "power-off") == 0 &&
-           gpio_direction_output(NS2_GPIO_POWER_OFF, 0) == 0)
-               pm_power_off = ns2_power_off;
-       else
-               pr_err("ns2: failed to configure power-off GPIO\n");
 }
index 970174a..55ade93 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
 #include <mach/kirkwood.h>
 #include <linux/of.h>
 #include "common.h"
-#include "mpp.h"
-
-#define NSA310_GPIO_USB_POWER_OFF      21
-#define NSA310_GPIO_POWER_OFF          48
-
-static unsigned int nsa310_mpp_config[] __initdata = {
-       MPP12_GPIO, /* led esata green */
-       MPP13_GPIO, /* led esata red */
-       MPP15_GPIO, /* led usb green */
-       MPP16_GPIO, /* led usb red */
-       MPP21_GPIO, /* control usb power off */
-       MPP28_GPIO, /* led sys green */
-       MPP29_GPIO, /* led sys red */
-       MPP36_GPIO, /* key reset */
-       MPP37_GPIO, /* key copy */
-       MPP39_GPIO, /* led copy green */
-       MPP40_GPIO, /* led copy red */
-       MPP41_GPIO, /* led hdd green */
-       MPP42_GPIO, /* led hdd red */
-       MPP44_GPIO, /* ?? */
-       MPP46_GPIO, /* key power */
-       MPP48_GPIO, /* control power off */
-       0
-};
-
-static struct i2c_board_info __initdata nsa310_i2c_info[] = {
-       { I2C_BOARD_INFO("adt7476", 0x2e) },
-};
-
-static void nsa310_power_off(void)
-{
-       gpio_set_value(NSA310_GPIO_POWER_OFF, 1);
-}
-
-static int __init nsa310_gpio_request(unsigned int gpio, unsigned long flags,
-                                      const char *label)
-{
-       int err;
-
-       err = gpio_request_one(gpio, flags, label);
-       if (err)
-               pr_err("NSA-310: can't setup GPIO%u (%s), err=%d\n",
-                       gpio, label, err);
-
-       return err;
-}
-
-static void __init nsa310_gpio_init(void)
-{
-       int err;
-
-       err = nsa310_gpio_request(NSA310_GPIO_POWER_OFF, GPIOF_OUT_INIT_LOW,
-                                 "Power Off");
-       if (!err)
-               pm_power_off = nsa310_power_off;
-
-       nsa310_gpio_request(NSA310_GPIO_USB_POWER_OFF, GPIOF_OUT_INIT_LOW,
-                           "USB Power Off");
-}
-
-void __init nsa310_init(void)
-{
-       kirkwood_mpp_conf(nsa310_mpp_config);
-
-       nsa310_gpio_init();
-
-       i2c_register_board_info(0, ARRAY_AND_SIZE(nsa310_i2c_info));
-}
 
 static int __init nsa310_pci_init(void)
 {
index 815fc64..b11d8fd 100644 (file)
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/mv643xx_eth.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
 #include "common.h"
-#include "mpp.h"
 
 static struct mv643xx_eth_platform_data openblocks_ge00_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(0),
 };
 
-static unsigned int openblocks_a6_mpp_config[] __initdata = {
-       MPP0_NF_IO2,
-       MPP1_NF_IO3,
-       MPP2_NF_IO4,
-       MPP3_NF_IO5,
-       MPP4_NF_IO6,
-       MPP5_NF_IO7,
-       MPP6_SYSRST_OUTn,
-       MPP8_UART1_RTS,
-       MPP9_UART1_CTS,
-       MPP10_UART0_TXD,
-       MPP11_UART0_RXD,
-       MPP13_UART1_TXD,
-       MPP14_UART1_RXD,
-       MPP15_UART0_RTS,
-       MPP16_UART0_CTS,
-       MPP18_NF_IO0,
-       MPP19_NF_IO1,
-       MPP20_GPIO, /* DIP SW0 */
-       MPP21_GPIO, /* DIP SW1 */
-       MPP22_GPIO, /* DIP SW2 */
-       MPP23_GPIO, /* DIP SW3 */
-       MPP24_GPIO, /* GPIO 0 */
-       MPP25_GPIO, /* GPIO 1 */
-       MPP26_GPIO, /* GPIO 2 */
-       MPP27_GPIO, /* GPIO 3 */
-       MPP28_GPIO, /* GPIO 4 */
-       MPP29_GPIO, /* GPIO 5 */
-       MPP30_GPIO, /* GPIO 6 */
-       MPP31_GPIO, /* GPIO 7 */
-       MPP36_TW1_SDA,
-       MPP37_TW1_SCK,
-       MPP38_GPIO, /* INIT */
-       MPP39_GPIO, /* USB OC */
-       MPP41_GPIO, /* LED: Red */
-       MPP42_GPIO, /* LED: Green */
-       MPP43_GPIO, /* LED: Yellow */
-       0,
-};
-
 void __init openblocks_a6_init(void)
 {
        /*
         * Basic setup. Needs to be called early.
         */
-       kirkwood_mpp_conf(openblocks_a6_mpp_config);
        kirkwood_ge00_init(&openblocks_ge00_data);
 }
index 23d2dd1..1cc04ec 100644 (file)
 #include <linux/init.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/gpio.h>
-#include <linux/platform_data/mmc-mvsdio.h>
 #include "common.h"
-#include "mpp.h"
 
 static struct mv643xx_eth_platform_data topkick_ge00_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(0),
 };
 
-static struct mvsdio_platform_data topkick_mvsdio_data = {
-       /* unfortunately the CD signal has not been connected */
-};
-
-/*
- * GPIO LED layout
- *
- *       /-SYS_LED(2)
- *       |
- *       |   /-DISK_LED
- *       |   |
- *       |   |   /-WLAN_LED(2)
- *       |   |   |
- * [SW] [*] [*] [*]
- */
-
-/*
- * Switch positions
- *
- *     /-SW_LEFT
- *     |
- *     |   /-SW_IDLE
- *     |   |
- *     |   |   /-SW_RIGHT
- *     |   |   |
- * PS [L] [I] [R] LEDS
- */
-
-static unsigned int topkick_mpp_config[] __initdata = {
-       MPP21_GPIO,     /* DISK_LED           (low active) - yellow */
-       MPP36_GPIO,     /* SATA0 power enable (high active) */
-       MPP37_GPIO,     /* SYS_LED2           (low active) - red */
-       MPP38_GPIO,     /* SYS_LED            (low active) - blue */
-       MPP39_GPIO,     /* WLAN_LED           (low active) - green */
-       MPP43_GPIO,     /* SW_LEFT            (low active) */
-       MPP44_GPIO,     /* SW_RIGHT           (low active) */
-       MPP45_GPIO,     /* SW_IDLE            (low active) */
-       MPP46_GPIO,     /* SW_LEFT            (low active) */
-       MPP48_GPIO,     /* WLAN_LED2          (low active) - yellow */
-       0
-};
-
 void __init usi_topkick_init(void)
 {
        /*
         * Basic setup. Needs to be called early.
         */
-       kirkwood_mpp_conf(topkick_mpp_config);
-
-
        kirkwood_ge00_init(&topkick_ge00_data);
-       kirkwood_sdio_init(&topkick_mvsdio_data);
 }
index e956d02..5ed7056 100644 (file)
@@ -60,6 +60,11 @@ void dreamplug_init(void);
 #else
 static inline void dreamplug_init(void) {};
 #endif
+#ifdef CONFIG_MACH_GURUPLUG_DT
+void guruplug_dt_init(void);
+#else
+static inline void guruplug_dt_init(void) {};
+#endif
 #ifdef CONFIG_MACH_TS219_DT
 void qnap_dt_ts219_init(void);
 #else
@@ -130,12 +135,6 @@ void ns2_init(void);
 static inline void ns2_init(void) {};
 #endif
 
-#ifdef CONFIG_MACH_NSA310_DT
-void nsa310_init(void);
-#else
-static inline void nsa310_init(void) {};
-#endif
-
 #ifdef CONFIG_MACH_OPENBLOCKS_A6_DT
 void openblocks_a6_init(void);
 #else
index 8e3fb08..274ff58 100644 (file)
@@ -34,6 +34,7 @@
 #define ARMADA_370_XP_INT_CONTROL              (0x00)
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS      (0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
+#define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 
 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)
 
+#define ARMADA_370_XP_MAX_PER_CPU_IRQS         (28)
+
 #define ACTIVE_DOORBELLS                       (8)
 
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
 static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
 
+/*
+ * In SMP mode:
+ * For shared global interrupts, mask/unmask global enable bit
+ * For CPU interrtups, mask/unmask the calling CPU's bit
+ */
 static void armada_370_xp_irq_mask(struct irq_data *d)
 {
+#ifdef CONFIG_SMP
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+               writel(hwirq, main_int_base +
+                               ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+       else
+               writel(hwirq, per_cpu_int_base +
+                               ARMADA_370_XP_INT_SET_MASK_OFFS);
+#else
        writel(irqd_to_hwirq(d),
               per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+#endif
 }
 
 static void armada_370_xp_irq_unmask(struct irq_data *d)
 {
+#ifdef CONFIG_SMP
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+       if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+               writel(hwirq, main_int_base +
+                               ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+       else
+               writel(hwirq, per_cpu_int_base +
+                               ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#else
        writel(irqd_to_hwirq(d),
               per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#endif
 }
 
 #ifdef CONFIG_SMP
 static int armada_xp_set_affinity(struct irq_data *d,
                                  const struct cpumask *mask_val, bool force)
 {
+       unsigned long reg;
+       unsigned long new_mask = 0;
+       unsigned long online_mask = 0;
+       unsigned long count = 0;
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       int cpu;
+
+       for_each_cpu(cpu, mask_val) {
+               new_mask |= 1 << cpu_logical_map(cpu);
+               count++;
+       }
+
+       /*
+        * Forbid mutlicore interrupt affinity
+        * This is required since the MPIC HW doesn't limit
+        * several CPUs from acknowledging the same interrupt.
+        */
+       if (count > 1)
+               return -EINVAL;
+
+       for_each_cpu(cpu, cpu_online_mask)
+               online_mask |= 1 << cpu_logical_map(cpu);
+
+       raw_spin_lock(&irq_controller_lock);
+
+       reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+       reg = (reg & (~online_mask)) | new_mask;
+       writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
+
+       raw_spin_unlock(&irq_controller_lock);
+
        return 0;
 }
 #endif
@@ -82,10 +145,17 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
 {
        armada_370_xp_irq_mask(irq_get_irq_data(virq));
        writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
-
-       irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
-                                handle_level_irq);
        irq_set_status_flags(virq, IRQ_LEVEL);
+
+       if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+               irq_set_percpu_devid(virq);
+               irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+                                       handle_percpu_devid_irq);
+
+       } else {
+               irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+                                       handle_level_irq);
+       }
        set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
 
        return 0;
@@ -155,6 +225,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
+
+       /*
+        * Set the default affinity from all CPUs to the boot cpu.
+        * This is required since the MPIC doesn't limit several CPUs
+        * from acknowledging the same interrupt.
+        */
+       cpumask_clear(irq_default_affinity);
+       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+
 #endif
 
        return 0;
@@ -173,7 +252,7 @@ asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
                if (irqnr > 1022)
                        break;
 
-               if (irqnr >= 8) {
+               if (irqnr > 0) {
                        irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
                                        irqnr);
                        handle_IRQ(irqnr, regs);
index ff528df..b068b7f 100644 (file)
@@ -11,7 +11,7 @@ obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o gpmc.o timer.o pm.o \
         omap_device.o sram.o
 
 omap-2-3-common                                = irq.o
-hwmod-common                           = omap_hwmod.o \
+hwmod-common                           = omap_hwmod.o omap_hwmod_reset.o \
                                          omap_hwmod_common_data.o
 clock-common                           = clock.o clock_common_data.o \
                                          clkt_dpll.o clkt_clksel.o
@@ -56,6 +56,7 @@ AFLAGS_sram34xx.o                     :=-Wa,-march=armv7-a
 # Restart code (OMAP4/5 currently in omap4-common.c)
 obj-$(CONFIG_SOC_OMAP2420)             += omap2-restart.o
 obj-$(CONFIG_SOC_OMAP2430)             += omap2-restart.o
+obj-$(CONFIG_SOC_AM33XX)               += am33xx-restart.o
 obj-$(CONFIG_ARCH_OMAP3)               += omap3-restart.o
 
 # Pin multiplexing
diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c
new file mode 100644 (file)
index 0000000..88e4fa8
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * am33xx-restart.c - Code common to all AM33xx machines.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+
+#include "common.h"
+#include "prm-regbits-33xx.h"
+#include "prm33xx.h"
+
+/**
+ * am3xx_restart - trigger a software restart of the SoC
+ * @mode: the "reboot mode", see arch/arm/kernel/{setup,process}.c
+ * @cmd: passed from the userspace program rebooting the system (if provided)
+ *
+ * Resets the SoC.  For @cmd, see the 'reboot' syscall in
+ * kernel/sys.c.  No return value.
+ */
+void am33xx_restart(char mode, const char *cmd)
+{
+       /* TODO: Handle mode and cmd if necessary */
+
+       am33xx_prm_rmw_reg_bits(AM33XX_GLOBAL_WARM_SW_RST_MASK,
+                               AM33XX_GLOBAL_WARM_SW_RST_MASK,
+                               AM33XX_PRM_DEVICE_MOD,
+                               AM33XX_PRM_RSTCTRL_OFFSET);
+
+       /* OCP barrier */
+       (void)am33xx_prm_read_reg(AM33XX_PRM_DEVICE_MOD,
+                                 AM33XX_PRM_RSTCTRL_OFFSET);
+}
index a00d391..25b79a2 100644 (file)
@@ -62,8 +62,7 @@ static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh,
 {
        struct platform_device *pdev;
 
-       pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len,
-                                false);
+       pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len);
        if (IS_ERR(pdev)) {
                WARN(1, "Can't build omap_device for %s:%s.\n",
                     oh->class->name, oh->name);
index 2590463..0274ff7 100644 (file)
@@ -140,6 +140,7 @@ DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")
        .init_machine   = omap_generic_init,
        .init_time      = omap3_am33xx_gptimer_timer_init,
        .dt_compat      = am33xx_boards_compat,
+       .restart        = am33xx_restart,
 MACHINE_END
 #endif
 
index ea64ad6..476b820 100644 (file)
@@ -284,9 +284,10 @@ DEFINE_STRUCT_CLK(dpll_disp_ck, dpll_core_ck_parents, dpll_ddr_ck_ops);
  * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
  * and ALT_CLK1/2)
  */
-DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck, 0x0,
-                  AM33XX_CM_DIV_M2_DPLL_DISP, AM33XX_DPLL_CLKOUT_DIV_SHIFT,
-                  AM33XX_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
+DEFINE_CLK_DIVIDER(dpll_disp_m2_ck, "dpll_disp_ck", &dpll_disp_ck,
+                  CLK_SET_RATE_PARENT, AM33XX_CM_DIV_M2_DPLL_DISP,
+                  AM33XX_DPLL_CLKOUT_DIV_SHIFT, AM33XX_DPLL_CLKOUT_DIV_WIDTH,
+                  CLK_DIVIDER_ONE_BASED, NULL);
 
 /* DPLL_PER */
 static struct dpll_data dpll_per_dd = {
@@ -723,7 +724,8 @@ static struct clk_hw_omap lcd_gclk_hw = {
        .clksel_mask    = AM33XX_CLKSEL_0_1_MASK,
 };
 
-DEFINE_STRUCT_CLK(lcd_gclk, lcd_ck_parents, gpio_fck_ops);
+DEFINE_STRUCT_CLK_FLAGS(lcd_gclk, lcd_ck_parents,
+                       gpio_fck_ops, CLK_SET_RATE_PARENT);
 
 DEFINE_CLK_FIXED_FACTOR(mmc_clk, "dpll_per_m2_ck", &dpll_per_m2_ck, 0x0, 1, 2);
 
index 6ef8758..4579c3c 100644 (file)
@@ -426,6 +426,7 @@ static struct clk dpll4_m5x2_ck_3630 = {
        .parent_names   = dpll4_m5x2_ck_parent_names,
        .num_parents    = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
        .ops            = &dpll4_m5x2_ck_3630_ops,
+       .flags          = CLK_SET_RATE_PARENT,
 };
 
 static struct clk cam_mclk;
@@ -443,7 +444,14 @@ static struct clk_hw_omap cam_mclk_hw = {
        .clkdm_name     = "cam_clkdm",
 };
 
-DEFINE_STRUCT_CLK(cam_mclk, cam_mclk_parent_names, aes2_ick_ops);
+static struct clk cam_mclk = {
+       .name           = "cam_mclk",
+       .hw             = &cam_mclk_hw.hw,
+       .parent_names   = cam_mclk_parent_names,
+       .num_parents    = ARRAY_SIZE(cam_mclk_parent_names),
+       .ops            = &aes2_ick_ops,
+       .flags          = CLK_SET_RATE_PARENT,
+};
 
 static const struct clksel_rate clkout2_src_core_rates[] = {
        { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
index cebe2b3..3d58f33 100644 (file)
@@ -605,15 +605,26 @@ static const char *dpll_usb_ck_parents[] = {
 
 static struct clk dpll_usb_ck;
 
+static const struct clk_ops dpll_usb_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .round_rate     = &omap2_dpll_round_rate,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .get_parent     = &omap2_init_dpll_parent,
+       .init           = &omap2_init_clk_clkdm,
+};
+
 static struct clk_hw_omap dpll_usb_ck_hw = {
        .hw = {
                .clk = &dpll_usb_ck,
        },
        .dpll_data      = &dpll_usb_dd,
+       .clkdm_name     = "l3_init_clkdm",
        .ops            = &clkhwops_omap3_dpll,
 };
 
-DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops);
+DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_usb_ck_ops);
 
 static const char *dpll_usb_clkdcoldo_ck_parents[] = {
        "dpll_usb_ck",
index b402048..60ddd86 100644 (file)
@@ -65,6 +65,17 @@ struct clockdomain;
                .ops = &_clkops_name,                           \
        };
 
+#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,     \
+                               _clkops_name, _flags)           \
+       static struct clk _name = {                             \
+               .name = #_name,                                 \
+               .hw = &_name##_hw.hw,                           \
+               .parent_names = _parent_array_name,             \
+               .num_parents = ARRAY_SIZE(_parent_array_name),  \
+               .ops = &_clkops_name,                           \
+               .flags = _flags,                                \
+       };
+
 #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)          \
        static struct clk_hw_omap _name##_hw = {                \
                .hw = {                                         \
index 058ce3c..325a515 100644 (file)
@@ -241,9 +241,6 @@ int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
index 5fa0b62..64f4baf 100644 (file)
 #ifndef __ARCH_ARM_MACH_OMAP2_CM_33XX_H
 #define __ARCH_ARM_MACH_OMAP2_CM_33XX_H
 
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
 #include "common.h"
 
 #include "cm.h"
 #include "cm-regbits-33xx.h"
-#include "cm33xx.h"
+#include "iomap.h"
 
 /* CM base address */
 #define AM33XX_CM_BASE         0x44e00000
 #define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL                        AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0020)
 
 
+#ifndef __ASSEMBLER__
 extern bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);
 extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs);
 extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs);
@@ -417,4 +413,5 @@ static inline int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs,
 }
 #endif
 
+#endif /* ASSEMBLER */
 #endif
index b435027..0a6b9c7 100644 (file)
@@ -119,6 +119,14 @@ static inline void omap2xxx_restart(char mode, const char *cmd)
 }
 #endif
 
+#ifdef CONFIG_SOC_AM33XX
+void am33xx_restart(char mode, const char *cmd);
+#else
+static inline void am33xx_restart(char mode, const char *cmd)
+{
+}
+#endif
+
 #ifdef CONFIG_ARCH_OMAP3
 void omap3xxx_restart(char mode, const char *cmd);
 #else
index 142d9c6..1ec7f05 100644 (file)
@@ -426,7 +426,7 @@ static void __init omap_init_hdmi_audio(void)
                return;
        }
 
-       pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0, 0);
+       pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
        WARN(IS_ERR(pdev),
             "Can't build omap_device for omap-hdmi-audio-dai.\n");
 
index 0a02aab..3aed4b0 100644 (file)
@@ -500,8 +500,9 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
                if (dd->last_rounded_rate == 0)
                        return -EINVAL;
 
-               /* No freqsel on OMAP4 and OMAP3630 */
-               if (!cpu_is_omap44xx() && !cpu_is_omap3630()) {
+               /* No freqsel on AM335x, OMAP4 and OMAP3630 */
+               if (!soc_is_am33xx() && !cpu_is_omap44xx() &&
+                   !cpu_is_omap3630()) {
                        freqsel = _omap3_dpll_compute_freqsel(clk,
                                                dd->last_rounded_n);
                        WARN_ON(!freqsel);
index 45cc7ed..8a68f1e 100644 (file)
@@ -399,8 +399,18 @@ void __init omap3xxx_check_revision(void)
                }
                break;
        case 0xb944:
-               omap_revision = AM335X_REV_ES1_0;
-               cpu_rev = "1.0";
+               switch (rev) {
+               case 0:
+                       omap_revision = AM335X_REV_ES1_0;
+                       cpu_rev = "1.0";
+                       break;
+               case 1:
+               /* FALLTHROUGH */
+               default:
+                       omap_revision = AM335X_REV_ES2_0;
+                       cpu_rev = "2.0";
+                       break;
+               }
                break;
        case 0xb8f2:
                switch (rev) {
index a898498..c2c798c 100644 (file)
@@ -2054,6 +2054,23 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
        return oh->prcm.omap4.context_lost_counter;
 }
 
+/**
+ * _enable_preprogram - Pre-program an IP block during the _enable() process
+ * @oh: struct omap_hwmod *
+ *
+ * Some IP blocks (such as AESS) require some additional programming
+ * after enable before they can enter idle.  If a function pointer to
+ * do so is present in the hwmod data, then call it and pass along the
+ * return value; otherwise, return 0.
+ */
+static int __init _enable_preprogram(struct omap_hwmod *oh)
+{
+       if (!oh->class->enable_preprogram)
+               return 0;
+
+       return oh->class->enable_preprogram(oh);
+}
+
 /**
  * _enable - enable an omap_hwmod
  * @oh: struct omap_hwmod *
@@ -2160,6 +2177,7 @@ static int _enable(struct omap_hwmod *oh)
                                _update_sysc_cache(oh);
                        _enable_sysc(oh);
                }
+               r = _enable_preprogram(oh);
        } else {
                if (soc_ops.disable_module)
                        soc_ops.disable_module(oh);
@@ -3049,11 +3067,8 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
 static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
                                     struct omap_hwmod_rst_info *ohri)
 {
-       if (ohri->st_shift)
-               pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
-                      oh->name, ohri->name);
-
        return am33xx_prm_deassert_hardreset(ohri->rst_shift,
+                               ohri->st_shift,
                                oh->clkdm->pwrdm.ptr->prcm_offs,
                                oh->prcm.omap4.rstctrl_offs,
                                oh->prcm.omap4.rstst_offs);
index 80c00e7..d43d9b6 100644 (file)
@@ -510,6 +510,7 @@ struct omap_hwmod_omap4_prcm {
  * @rev: revision of the IP class
  * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
  * @reset: ptr to fn to be executed in place of the standard hwmod reset fn
+ * @enable_preprogram:  ptr to fn to be executed during device enable
  *
  * Represent the class of a OMAP hardware "modules" (e.g. timer,
  * smartreflex, gpio, uart...)
@@ -533,6 +534,7 @@ struct omap_hwmod_class {
        u32                                     rev;
        int                                     (*pre_shutdown)(struct omap_hwmod *oh);
        int                                     (*reset)(struct omap_hwmod *oh);
+       int                                     (*enable_preprogram)(struct omap_hwmod *oh);
 };
 
 /**
@@ -679,6 +681,12 @@ extern void __init omap_hwmod_init(void);
 
 const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
 
+/*
+ *
+ */
+
+extern int omap_hwmod_aess_preprogram(struct omap_hwmod *oh);
+
 /*
  * Chip variant-specific hwmod init routines - XXX should be converted
  * to use initcalls once the initial boot ordering is straightened out
index 646c14d..26eee4a 100644 (file)
@@ -262,13 +262,15 @@ static struct omap_hwmod am33xx_wkup_m3_hwmod = {
        .name           = "wkup_m3",
        .class          = &am33xx_wkup_m3_hwmod_class,
        .clkdm_name     = "l4_wkup_aon_clkdm",
-       .flags          = HWMOD_INIT_NO_RESET,  /* Keep hardreset asserted */
+       /* Keep hardreset asserted */
+       .flags          = HWMOD_INIT_NO_RESET | HWMOD_NO_IDLEST,
        .mpu_irqs       = am33xx_wkup_m3_irqs,
        .main_clk       = "dpll_core_m4_div2_ck",
        .prcm           = {
                .omap4  = {
                        .clkctrl_offs   = AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET,
                        .rstctrl_offs   = AM33XX_RM_WKUP_RSTCTRL_OFFSET,
+                       .rstst_offs     = AM33XX_RM_WKUP_RSTST_OFFSET,
                        .modulemode     = MODULEMODE_SWCTRL,
                },
        },
@@ -414,7 +416,6 @@ static struct omap_hwmod am33xx_adc_tsc_hwmod = {
  *    - cEFUSE (doesn't fall under any ocp_if)
  *    - clkdiv32k
  *    - debugss
- *    - ocmc ram
  *    - ocp watch point
  *    - aes0
  *    - sha0
@@ -481,25 +482,6 @@ static struct omap_hwmod am33xx_debugss_hwmod = {
        },
 };
 
-/* ocmcram */
-static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
-       .name = "ocmcram",
-};
-
-static struct omap_hwmod am33xx_ocmcram_hwmod = {
-       .name           = "ocmcram",
-       .class          = &am33xx_ocmcram_hwmod_class,
-       .clkdm_name     = "l3_clkdm",
-       .flags          = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
-       .main_clk       = "l3_gclk",
-       .prcm           = {
-               .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
-                       .modulemode     = MODULEMODE_SWCTRL,
-               },
-       },
-};
-
 /* ocpwp */
 static struct omap_hwmod_class am33xx_ocpwp_hwmod_class = {
        .name           = "ocpwp",
@@ -570,6 +552,25 @@ static struct omap_hwmod am33xx_sha0_hwmod = {
 
 #endif
 
+/* ocmcram */
+static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
+       .name = "ocmcram",
+};
+
+static struct omap_hwmod am33xx_ocmcram_hwmod = {
+       .name           = "ocmcram",
+       .class          = &am33xx_ocmcram_hwmod_class,
+       .clkdm_name     = "l3_clkdm",
+       .flags          = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+       .main_clk       = "l3_gclk",
+       .prcm           = {
+               .omap4  = {
+                       .clkctrl_offs   = AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET,
+                       .modulemode     = MODULEMODE_SWCTRL,
+               },
+       },
+};
+
 /* 'smartreflex' class */
 static struct omap_hwmod_class am33xx_smartreflex_hwmod_class = {
        .name           = "smartreflex",
@@ -783,9 +784,7 @@ static struct omap_hwmod am33xx_elm_hwmod = {
        },
 };
 
-/*
- * 'epwmss' class: ecap0,1,2,  ehrpwm0,1,2
- */
+/* pwmss  */
 static struct omap_hwmod_class_sysconfig am33xx_epwmss_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x4,
@@ -801,18 +800,23 @@ static struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
        .sysc           = &am33xx_epwmss_sysc,
 };
 
-/* ehrpwm0 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = {
-       { .name = "int", .irq = 86 + OMAP_INTC_START, },
-       { .name = "tzint", .irq = 58 + OMAP_INTC_START, },
-       { .irq = -1 },
+static struct omap_hwmod_class am33xx_ecap_hwmod_class = {
+       .name           = "ecap",
 };
 
-static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
-       .name           = "ehrpwm0",
+static struct omap_hwmod_class am33xx_eqep_hwmod_class = {
+       .name           = "eqep",
+};
+
+static struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = {
+       .name           = "ehrpwm",
+};
+
+/* epwmss0 */
+static struct omap_hwmod am33xx_epwmss0_hwmod = {
+       .name           = "epwmss0",
        .class          = &am33xx_epwmss_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
-       .mpu_irqs       = am33xx_ehrpwm0_irqs,
        .main_clk       = "l4ls_gclk",
        .prcm           = {
                .omap4  = {
@@ -822,63 +826,58 @@ static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
        },
 };
 
-/* ehrpwm1 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = {
-       { .name = "int", .irq = 87 + OMAP_INTC_START, },
-       { .name = "tzint", .irq = 59 + OMAP_INTC_START, },
+/* ecap0 */
+static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = {
+       { .irq = 31 + OMAP_INTC_START, },
        { .irq = -1 },
 };
 
-static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
-       .name           = "ehrpwm1",
-       .class          = &am33xx_epwmss_hwmod_class,
+static struct omap_hwmod am33xx_ecap0_hwmod = {
+       .name           = "ecap0",
+       .class          = &am33xx_ecap_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
-       .mpu_irqs       = am33xx_ehrpwm1_irqs,
+       .mpu_irqs       = am33xx_ecap0_irqs,
        .main_clk       = "l4ls_gclk",
-       .prcm           = {
-               .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
-                       .modulemode     = MODULEMODE_SWCTRL,
-               },
-       },
 };
 
-/* ehrpwm2 */
-static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = {
-       { .name = "int", .irq = 39 + OMAP_INTC_START, },
-       { .name = "tzint", .irq = 60 + OMAP_INTC_START, },
+/* eqep0 */
+static struct omap_hwmod_irq_info am33xx_eqep0_irqs[] = {
+       { .irq = 79 + OMAP_INTC_START, },
        { .irq = -1 },
 };
 
-static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
-       .name           = "ehrpwm2",
-       .class          = &am33xx_epwmss_hwmod_class,
+static struct omap_hwmod am33xx_eqep0_hwmod = {
+       .name           = "eqep0",
+       .class          = &am33xx_eqep_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
-       .mpu_irqs       = am33xx_ehrpwm2_irqs,
+       .mpu_irqs       = am33xx_eqep0_irqs,
        .main_clk       = "l4ls_gclk",
-       .prcm           = {
-               .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
-                       .modulemode     = MODULEMODE_SWCTRL,
-               },
-       },
 };
 
-/* ecap0 */
-static struct omap_hwmod_irq_info am33xx_ecap0_irqs[] = {
-       { .irq = 31 + OMAP_INTC_START, },
+/* ehrpwm0 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm0_irqs[] = {
+       { .name = "int", .irq = 86 + OMAP_INTC_START, },
+       { .name = "tzint", .irq = 58 + OMAP_INTC_START, },
        { .irq = -1 },
 };
 
-static struct omap_hwmod am33xx_ecap0_hwmod = {
-       .name           = "ecap0",
+static struct omap_hwmod am33xx_ehrpwm0_hwmod = {
+       .name           = "ehrpwm0",
+       .class          = &am33xx_ehrpwm_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
+       .mpu_irqs       = am33xx_ehrpwm0_irqs,
+       .main_clk       = "l4ls_gclk",
+};
+
+/* epwmss1 */
+static struct omap_hwmod am33xx_epwmss1_hwmod = {
+       .name           = "epwmss1",
        .class          = &am33xx_epwmss_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
-       .mpu_irqs       = am33xx_ecap0_irqs,
        .main_clk       = "l4ls_gclk",
        .prcm           = {
                .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET,
+                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
                        .modulemode     = MODULEMODE_SWCTRL,
                },
        },
@@ -892,13 +891,50 @@ static struct omap_hwmod_irq_info am33xx_ecap1_irqs[] = {
 
 static struct omap_hwmod am33xx_ecap1_hwmod = {
        .name           = "ecap1",
-       .class          = &am33xx_epwmss_hwmod_class,
+       .class          = &am33xx_ecap_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
        .mpu_irqs       = am33xx_ecap1_irqs,
        .main_clk       = "l4ls_gclk",
+};
+
+/* eqep1 */
+static struct omap_hwmod_irq_info am33xx_eqep1_irqs[] = {
+       { .irq = 88 + OMAP_INTC_START, },
+       { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_eqep1_hwmod = {
+       .name           = "eqep1",
+       .class          = &am33xx_eqep_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
+       .mpu_irqs       = am33xx_eqep1_irqs,
+       .main_clk       = "l4ls_gclk",
+};
+
+/* ehrpwm1 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm1_irqs[] = {
+       { .name = "int", .irq = 87 + OMAP_INTC_START, },
+       { .name = "tzint", .irq = 59 + OMAP_INTC_START, },
+       { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_ehrpwm1_hwmod = {
+       .name           = "ehrpwm1",
+       .class          = &am33xx_ehrpwm_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
+       .mpu_irqs       = am33xx_ehrpwm1_irqs,
+       .main_clk       = "l4ls_gclk",
+};
+
+/* epwmss2 */
+static struct omap_hwmod am33xx_epwmss2_hwmod = {
+       .name           = "epwmss2",
+       .class          = &am33xx_epwmss_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
+       .main_clk       = "l4ls_gclk",
        .prcm           = {
                .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET,
+                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
                        .modulemode     = MODULEMODE_SWCTRL,
                },
        },
@@ -912,16 +948,39 @@ static struct omap_hwmod_irq_info am33xx_ecap2_irqs[] = {
 
 static struct omap_hwmod am33xx_ecap2_hwmod = {
        .name           = "ecap2",
+       .class          = &am33xx_ecap_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
        .mpu_irqs       = am33xx_ecap2_irqs,
-       .class          = &am33xx_epwmss_hwmod_class,
+       .main_clk       = "l4ls_gclk",
+};
+
+/* eqep2 */
+static struct omap_hwmod_irq_info am33xx_eqep2_irqs[] = {
+       { .irq = 89 + OMAP_INTC_START, },
+       { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_eqep2_hwmod = {
+       .name           = "eqep2",
+       .class          = &am33xx_eqep_hwmod_class,
        .clkdm_name     = "l4ls_clkdm",
+       .mpu_irqs       = am33xx_eqep2_irqs,
+       .main_clk       = "l4ls_gclk",
+};
+
+/* ehrpwm2 */
+static struct omap_hwmod_irq_info am33xx_ehrpwm2_irqs[] = {
+       { .name = "int", .irq = 39 + OMAP_INTC_START, },
+       { .name = "tzint", .irq = 60 + OMAP_INTC_START, },
+       { .irq = -1 },
+};
+
+static struct omap_hwmod am33xx_ehrpwm2_hwmod = {
+       .name           = "ehrpwm2",
+       .class          = &am33xx_ehrpwm_hwmod_class,
+       .clkdm_name     = "l4ls_clkdm",
+       .mpu_irqs       = am33xx_ehrpwm2_irqs,
        .main_clk       = "l4ls_gclk",
-       .prcm           = {
-               .omap4  = {
-                       .clkctrl_offs   = AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET,
-                       .modulemode     = MODULEMODE_SWCTRL,
-               },
-       },
 };
 
 /*
@@ -1824,6 +1883,7 @@ static struct omap_hwmod am33xx_tptc0_hwmod = {
        .class          = &am33xx_tptc_hwmod_class,
        .clkdm_name     = "l3_clkdm",
        .mpu_irqs       = am33xx_tptc0_irqs,
+       .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
        .main_clk       = "l3_gclk",
        .prcm           = {
                .omap4  = {
@@ -2496,7 +2556,6 @@ static struct omap_hwmod_addr_space am33xx_cpgmac0_addr_space[] = {
        {
                .pa_start       = 0x4a100000,
                .pa_end         = 0x4a100000 + SZ_2K - 1,
-               .flags          = ADDR_TYPE_RT,
        },
        /* cpsw wr */
        {
@@ -2547,162 +2606,202 @@ static struct omap_hwmod_ocp_if am33xx_l4_ls__elm = {
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_epwmss0_addr_space[] = {
        {
                .pa_start       = 0x48300000,
                .pa_end         = 0x48300000 + SZ_16 - 1,
                .flags          = ADDR_TYPE_RT
        },
-       {
-               .pa_start       = 0x48300200,
-               .pa_end         = 0x48300200 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
-       },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm0 = {
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
        .master         = &am33xx_l4_ls_hwmod,
-       .slave          = &am33xx_ehrpwm0_hwmod,
+       .slave          = &am33xx_epwmss0_hwmod,
        .clk            = "l4ls_gclk",
-       .addr           = am33xx_ehrpwm0_addr_space,
+       .addr           = am33xx_epwmss0_addr_space,
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = {
-       {
-               .pa_start       = 0x48302000,
-               .pa_end         = 0x48302000 + SZ_16 - 1,
-               .flags          = ADDR_TYPE_RT
-       },
+static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {
        {
-               .pa_start       = 0x48302200,
-               .pa_end         = 0x48302200 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
+               .pa_start       = 0x48300100,
+               .pa_end         = 0x48300100 + SZ_128 - 1,
        },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm1 = {
-       .master         = &am33xx_l4_ls_hwmod,
-       .slave          = &am33xx_ehrpwm1_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = {
+       .master         = &am33xx_epwmss0_hwmod,
+       .slave          = &am33xx_ecap0_hwmod,
        .clk            = "l4ls_gclk",
-       .addr           = am33xx_ehrpwm1_addr_space,
+       .addr           = am33xx_ecap0_addr_space,
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_eqep0_addr_space[] = {
        {
-               .pa_start       = 0x48304000,
-               .pa_end         = 0x48304000 + SZ_16 - 1,
-               .flags          = ADDR_TYPE_RT
-       },
-       {
-               .pa_start       = 0x48304200,
-               .pa_end         = 0x48304200 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
+               .pa_start       = 0x48300180,
+               .pa_end         = 0x48300180 + SZ_128 - 1,
        },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ehrpwm2 = {
-       .master         = &am33xx_l4_ls_hwmod,
-       .slave          = &am33xx_ehrpwm2_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = {
+       .master         = &am33xx_epwmss0_hwmod,
+       .slave          = &am33xx_eqep0_hwmod,
        .clk            = "l4ls_gclk",
-       .addr           = am33xx_ehrpwm2_addr_space,
+       .addr           = am33xx_eqep0_addr_space,
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap0_addr_space[] = {
-       {
-               .pa_start       = 0x48300000,
-               .pa_end         = 0x48300000 + SZ_16 - 1,
-               .flags          = ADDR_TYPE_RT
-       },
+static struct omap_hwmod_addr_space am33xx_ehrpwm0_addr_space[] = {
        {
-               .pa_start       = 0x48300100,
-               .pa_end         = 0x48300100 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
+               .pa_start       = 0x48300200,
+               .pa_end         = 0x48300200 + SZ_128 - 1,
        },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap0 = {
-       .master         = &am33xx_l4_ls_hwmod,
-       .slave          = &am33xx_ecap0_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = {
+       .master         = &am33xx_epwmss0_hwmod,
+       .slave          = &am33xx_ehrpwm0_hwmod,
        .clk            = "l4ls_gclk",
-       .addr           = am33xx_ecap0_addr_space,
+       .addr           = am33xx_ehrpwm0_addr_space,
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {
+
+static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {
        {
                .pa_start       = 0x48302000,
                .pa_end         = 0x48302000 + SZ_16 - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
+       .master         = &am33xx_l4_ls_hwmod,
+       .slave          = &am33xx_epwmss1_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_epwmss1_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ecap1_addr_space[] = {
        {
                .pa_start       = 0x48302100,
-               .pa_end         = 0x48302100 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
+               .pa_end         = 0x48302100 + SZ_128 - 1,
        },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap1 = {
-       .master         = &am33xx_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = {
+       .master         = &am33xx_epwmss1_hwmod,
        .slave          = &am33xx_ecap1_hwmod,
        .clk            = "l4ls_gclk",
        .addr           = am33xx_ecap1_addr_space,
        .user           = OCP_USER_MPU,
 };
 
-/*
- * Splitting the resources to handle access of PWMSS config space
- * and module specific part independently
- */
-static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {
+static struct omap_hwmod_addr_space am33xx_eqep1_addr_space[] = {
+       {
+               .pa_start       = 0x48302180,
+               .pa_end         = 0x48302180 + SZ_128 - 1,
+       },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = {
+       .master         = &am33xx_epwmss1_hwmod,
+       .slave          = &am33xx_eqep1_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_eqep1_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ehrpwm1_addr_space[] = {
+       {
+               .pa_start       = 0x48302200,
+               .pa_end         = 0x48302200 + SZ_128 - 1,
+       },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = {
+       .master         = &am33xx_epwmss1_hwmod,
+       .slave          = &am33xx_ehrpwm1_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_ehrpwm1_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {
        {
                .pa_start       = 0x48304000,
                .pa_end         = 0x48304000 + SZ_16 - 1,
                .flags          = ADDR_TYPE_RT
        },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
+       .master         = &am33xx_l4_ls_hwmod,
+       .slave          = &am33xx_epwmss2_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_epwmss2_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ecap2_addr_space[] = {
        {
                .pa_start       = 0x48304100,
-               .pa_end         = 0x48304100 + SZ_256 - 1,
-               .flags          = ADDR_TYPE_RT
+               .pa_end         = 0x48304100 + SZ_128 - 1,
        },
        { }
 };
 
-static struct omap_hwmod_ocp_if am33xx_l4_ls__ecap2 = {
-       .master         = &am33xx_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = {
+       .master         = &am33xx_epwmss2_hwmod,
        .slave          = &am33xx_ecap2_hwmod,
        .clk            = "l4ls_gclk",
        .addr           = am33xx_ecap2_addr_space,
        .user           = OCP_USER_MPU,
 };
 
+static struct omap_hwmod_addr_space am33xx_eqep2_addr_space[] = {
+       {
+               .pa_start       = 0x48304180,
+               .pa_end         = 0x48304180 + SZ_128 - 1,
+       },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = {
+       .master         = &am33xx_epwmss2_hwmod,
+       .slave          = &am33xx_eqep2_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_eqep2_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am33xx_ehrpwm2_addr_space[] = {
+       {
+               .pa_start       = 0x48304200,
+               .pa_end         = 0x48304200 + SZ_128 - 1,
+       },
+       { }
+};
+
+static struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = {
+       .master         = &am33xx_epwmss2_hwmod,
+       .slave          = &am33xx_ehrpwm2_hwmod,
+       .clk            = "l4ls_gclk",
+       .addr           = am33xx_ehrpwm2_addr_space,
+       .user           = OCP_USER_MPU,
+};
+
 /* l3s cfg -> gpmc */
 static struct omap_hwmod_addr_space am33xx_gpmc_addr_space[] = {
        {
@@ -3328,6 +3427,13 @@ static struct omap_hwmod_ocp_if am33xx_l3_s__usbss = {
        .flags          = OCPIF_SWSUP_IDLE,
 };
 
+/* l3 main -> ocmc */
+static struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
+       .master         = &am33xx_l3_main_hwmod,
+       .slave          = &am33xx_ocmcram_hwmod,
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
        &am33xx_l4_fw__emif_fw,
        &am33xx_l3_main__emif,
@@ -3385,12 +3491,18 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
        &am33xx_l4_ls__uart6,
        &am33xx_l4_ls__spinlock,
        &am33xx_l4_ls__elm,
-       &am33xx_l4_ls__ehrpwm0,
-       &am33xx_l4_ls__ehrpwm1,
-       &am33xx_l4_ls__ehrpwm2,
-       &am33xx_l4_ls__ecap0,
-       &am33xx_l4_ls__ecap1,
-       &am33xx_l4_ls__ecap2,
+       &am33xx_l4_ls__epwmss0,
+       &am33xx_epwmss0__ecap0,
+       &am33xx_epwmss0__eqep0,
+       &am33xx_epwmss0__ehrpwm0,
+       &am33xx_l4_ls__epwmss1,
+       &am33xx_epwmss1__ecap1,
+       &am33xx_epwmss1__eqep1,
+       &am33xx_epwmss1__ehrpwm1,
+       &am33xx_l4_ls__epwmss2,
+       &am33xx_epwmss2__ecap2,
+       &am33xx_epwmss2__eqep2,
+       &am33xx_epwmss2__ehrpwm2,
        &am33xx_l3_s__gpmc,
        &am33xx_l3_main__lcdc,
        &am33xx_l4_ls__mcspi0,
@@ -3398,6 +3510,7 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
        &am33xx_l3_main__tptc0,
        &am33xx_l3_main__tptc1,
        &am33xx_l3_main__tptc2,
+       &am33xx_l3_main__ocmc,
        &am33xx_l3_s__usbss,
        &am33xx_l4_hs__cpgmac0,
        &am33xx_cpgmac0__mdio,
index 8bb2628..ac7e03e 100644 (file)
@@ -3493,7 +3493,12 @@ static struct omap_hwmod am35xx_emac_hwmod = {
        .name           = "davinci_emac",
        .mpu_irqs       = am35xx_emac_mpu_irqs,
        .class          = &am35xx_emac_class,
-       .flags          = HWMOD_NO_IDLEST,
+       /*
+        * According to Mark Greer, the MPU will not return from WFI
+        * when the EMAC signals an interrupt.
+        * http://www.spinics.net/lists/arm-kernel/msg174734.html
+        */
+       .flags          = (HWMOD_NO_IDLEST | HWMOD_BLOCK_WFI),
 };
 
 /* l3_core -> davinci emac interface */
index 7ec1083..0e47d2e 100644 (file)
@@ -322,6 +322,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
 static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
        .name   = "aess",
        .sysc   = &omap44xx_aess_sysc,
+       .enable_preprogram = omap_hwmod_aess_preprogram,
 };
 
 /* aess */
@@ -348,7 +349,7 @@ static struct omap_hwmod omap44xx_aess_hwmod = {
        .clkdm_name     = "abe_clkdm",
        .mpu_irqs       = omap44xx_aess_irqs,
        .sdma_reqs      = omap44xx_aess_sdma_reqs,
-       .main_clk       = "aess_fck",
+       .main_clk       = "aess_fclk",
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET,
@@ -4241,6 +4242,27 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = {
 
 static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
        {
+               .name           = "dmem",
+               .pa_start       = 0x40180000,
+               .pa_end         = 0x4018ffff
+       },
+       {
+               .name           = "cmem",
+               .pa_start       = 0x401a0000,
+               .pa_end         = 0x401a1fff
+       },
+       {
+               .name           = "smem",
+               .pa_start       = 0x401c0000,
+               .pa_end         = 0x401c5fff
+       },
+       {
+               .name           = "pmem",
+               .pa_start       = 0x401e0000,
+               .pa_end         = 0x401e1fff
+       },
+       {
+               .name           = "mpu",
                .pa_start       = 0x401f1000,
                .pa_end         = 0x401f13ff,
                .flags          = ADDR_TYPE_RT
@@ -4259,6 +4281,27 @@ static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
 
 static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
        {
+               .name           = "dmem_dma",
+               .pa_start       = 0x49080000,
+               .pa_end         = 0x4908ffff
+       },
+       {
+               .name           = "cmem_dma",
+               .pa_start       = 0x490a0000,
+               .pa_end         = 0x490a1fff
+       },
+       {
+               .name           = "smem_dma",
+               .pa_start       = 0x490c0000,
+               .pa_end         = 0x490c5fff
+       },
+       {
+               .name           = "pmem_dma",
+               .pa_start       = 0x490e0000,
+               .pa_end         = 0x490e1fff
+       },
+       {
+               .name           = "dma",
                .pa_start       = 0x490f1000,
                .pa_end         = 0x490f13ff,
                .flags          = ADDR_TYPE_RT
@@ -6268,7 +6311,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l3_main_1__l3_main_3,
        &omap44xx_l3_main_2__l3_main_3,
        &omap44xx_l4_cfg__l3_main_3,
-       /* &omap44xx_aess__l4_abe, */
+       &omap44xx_aess__l4_abe,
        &omap44xx_dsp__l4_abe,
        &omap44xx_l3_main_1__l4_abe,
        &omap44xx_mpu__l4_abe,
@@ -6277,8 +6320,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__l4_wkup,
        &omap44xx_mpu__mpu_private,
        &omap44xx_l4_cfg__ocp_wp_noc,
-       /* &omap44xx_l4_abe__aess, */
-       /* &omap44xx_l4_abe__aess_dma, */
+       &omap44xx_l4_abe__aess,
+       &omap44xx_l4_abe__aess_dma,
        &omap44xx_l3_main_2__c2c,
        &omap44xx_l4_wkup__counter_32k,
        &omap44xx_l4_cfg__ctrl_module_core,
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
new file mode 100644 (file)
index 0000000..65e186c
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * OMAP IP block custom reset and preprogramming stubs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Paul Walmsley
+ *
+ * A small number of IP blocks need custom reset and preprogramming
+ * functions.  The stubs in this file provide a standard way for the
+ * hwmod code to call these functions, which are to be located under
+ * drivers/.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <sound/aess.h>
+
+#include "omap_hwmod.h"
+
+/**
+ * omap_hwmod_aess_preprogram - enable AESS internal autogating
+ * @oh: struct omap_hwmod *
+ *
+ * The AESS will not IdleAck to the PRCM until its internal autogating
+ * is enabled.  Since internal autogating is disabled by default after
+ * AESS reset, we must enable autogating after the hwmod code resets
+ * the AESS.  Returns 0.
+ */
+int omap_hwmod_aess_preprogram(struct omap_hwmod *oh)
+{
+       void __iomem *va;
+
+       va = omap_hwmod_get_mpu_rt_va(oh);
+       if (!va)
+               return -EINVAL;
+
+       aess_enable_autogating(va);
+
+       return 0;
+}
index cd6682d..673a4c1 100644 (file)
@@ -282,19 +282,19 @@ int __init omap2_common_pm_late_init(void)
         * a completely different mechanism.
         * Disable this part if a DT blob is available.
         */
-       if (of_have_populated_dt())
-               return 0;
+       if (!of_have_populated_dt()) {
 
-       /* Init the voltage layer */
-       omap_pmic_late_init();
-       omap_voltage_late_init();
+               /* Init the voltage layer */
+               omap_pmic_late_init();
+               omap_voltage_late_init();
 
-       /* Initialize the voltages */
-       omap3_init_voltages();
-       omap4_init_voltages();
+               /* Initialize the voltages */
+               omap3_init_voltages();
+               omap4_init_voltages();
 
-       /* Smartreflex device init */
-       omap_devinit_smartreflex();
+               /* Smartreflex device init */
+               omap_devinit_smartreflex();
+       }
 
 #ifdef CONFIG_SUSPEND
        suspend_set_ops(&omap_pm_ops);
index b2a4df6..b59d939 100644 (file)
@@ -54,7 +54,6 @@
 #include "powerdomain.h"
 #include "clockdomain.h"
 
-static void (*omap2_sram_idle)(void);
 static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl,
                                  void __iomem *sdrc_power);
 
@@ -163,6 +162,8 @@ static int omap2_allow_mpu_retention(void)
 
 static void omap2_enter_mpu_retention(void)
 {
+       const int zero = 0;
+
        /* The peripherals seem not to be able to wake up the MPU when
         * it is in retention mode. */
        if (omap2_allow_mpu_retention()) {
@@ -179,7 +180,8 @@ static void omap2_enter_mpu_retention(void)
                pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
        }
 
-       omap2_sram_idle();
+       /* WFI */
+       asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc");
 
        pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
 }
@@ -333,11 +335,9 @@ int __init omap2_pm_init(void)
        /*
         * We copy the assembler sleep/wakeup routines to SRAM.
         * These routines need to be in SRAM as that's the only
-        * memory the MPU can see when it wakes up.
+        * memory the MPU can see when it wakes up after the entire
+        * chip enters idle.
         */
-       omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
-                                        omap24xx_idle_loop_suspend_sz);
-
        omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
                                            omap24xx_cpu_suspend_sz);
 
index aa6fd98..ea62e75 100644 (file)
@@ -77,10 +77,20 @@ static int omap4_pm_suspend(void)
                omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
                pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
        }
-       if (ret)
+       if (ret) {
                pr_crit("Could not enter target state in pm_suspend\n");
-       else
+               /*
+                * OMAP4 chip PM currently works only with certain (newer)
+                * versions of bootloaders. This is due to missing code in the
+                * kernel to properly reset and initialize some devices.
+                * Warn the user about the bootloader version being one of the
+                * possible causes.
+                * http://www.spinics.net/lists/arm-kernel/msg218641.html
+                */
+               pr_warn("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
+       } else {
                pr_info("Successfully put all powerdomains to target state\n");
+       }
 
        return 0;
 }
@@ -146,6 +156,13 @@ int __init omap4_pm_init(void)
        }
 
        pr_err("Power Management for TI OMAP4.\n");
+       /*
+        * OMAP4 chip PM currently works only with certain (newer)
+        * versions of bootloaders. This is due to missing code in the
+        * kernel to properly reset and initialize some devices.
+        * http://www.spinics.net/lists/arm-kernel/msg218641.html
+        */
+       pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
 
        ret = pwrdm_for_each(pwrdms_setup, NULL);
        if (ret) {
index 1ac7388..44c0d72 100644 (file)
@@ -110,11 +110,11 @@ int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs)
  * -EINVAL upon an argument error, -EEXIST if the submodule was already out
  * of reset, or -EBUSY if the submodule did not exit reset promptly.
  */
-int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,
                u16 rstctrl_offs, u16 rstst_offs)
 {
        int c;
-       u32 mask = 1 << shift;
+       u32 mask = 1 << st_shift;
 
        /* Check the current status to avoid  de-asserting the line twice */
        if (am33xx_prm_is_hardreset_asserted(shift, inst, rstctrl_offs) == 0)
@@ -122,11 +122,14 @@ int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
 
        /* Clear the reset status by writing 1 to the status bit */
        am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs);
+
        /* de-assert the reset control line */
+       mask = 1 << shift;
+
        am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs);
-       /* wait the status to be set */
 
-       omap_test_timeout(am33xx_prm_is_hardreset_asserted(shift, inst,
+       /* wait the status to be set */
+       omap_test_timeout(am33xx_prm_is_hardreset_asserted(st_shift, inst,
                                                           rstst_offs),
                          MAX_MODULE_HARDRESET_WAIT, c);
 
index 3f25c56..9b9918d 100644 (file)
 #define AM33XX_PM_CEFUSE_PWRSTST_OFFSET                0x0004
 #define AM33XX_PM_CEFUSE_PWRSTST               AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004)
 
+#ifndef __ASSEMBLER__
 extern u32 am33xx_prm_read_reg(s16 inst, u16 idx);
 extern void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx);
 extern u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
@@ -124,6 +125,7 @@ extern void am33xx_prm_global_warm_sw_reset(void);
 extern int am33xx_prm_is_hardreset_asserted(u8 shift, s16 inst,
                u16 rstctrl_offs);
 extern int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs);
-extern int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+extern int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst,
                u16 rstctrl_offs, u16 rstst_offs);
+#endif /* ASSEMBLER */
 #endif
index ce0ccd2..1d3cb25 100644 (file)
 
        .text
 
-/*
- * Forces OMAP into idle state
- *
- * omap24xx_idle_loop_suspend() - This bit of code just executes the WFI
- * for normal idles.
- *
- * Note: This code get's copied to internal SRAM at boot. When the OMAP
- *      wakes up it continues execution at the point it went to sleep.
- */
-       .align  3
-ENTRY(omap24xx_idle_loop_suspend)
-       stmfd   sp!, {r0, lr}           @ save registers on stack
-       mov     r0, #0                  @ clear for mcr setup
-       mcr     p15, 0, r0, c7, c0, 4   @ wait for interrupt
-       ldmfd   sp!, {r0, pc}           @ restore regs and return
-
-ENTRY(omap24xx_idle_loop_suspend_sz)
-       .word   . - omap24xx_idle_loop_suspend
-
 /*
  * omap24xx_cpu_suspend() - Forces OMAP into deep sleep state by completing
  * SDRC shutdown then ARM shutdown.  Upon wake MPU is back on so just restore
index 092aedd..c62116b 100644 (file)
@@ -395,6 +395,7 @@ IS_OMAP_TYPE(3430, 0x3430)
 
 #define AM335X_CLASS           0x33500033
 #define AM335X_REV_ES1_0       AM335X_CLASS
+#define AM335X_REV_ES2_0       (AM335X_CLASS | (0x1 << 8))
 
 #define OMAP443X_CLASS         0x44300044
 #define OMAP4430_REV_ES1_0     (OMAP443X_CLASS | (0x10 << 8))
index bb829e0..d7bc33f 100644 (file)
@@ -152,7 +152,7 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
 
        sr_data->enable_on_init = sr_enable_on_init;
 
-       pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data), 0);
+       pdev = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data));
        if (IS_ERR(pdev))
                pr_warning("%s: Could not build omap_device for %s: %s.\n\n",
                        __func__, name, oh->name);
index b820eda..db26e2e 100644 (file)
@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        unsigned long instr = 0, instrptr;
        int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
        unsigned int type;
-       mm_segment_t fs;
        unsigned int fault;
        u16 tinstr = 0;
        int isize = 4;
@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        instrptr = instruction_pointer(regs);
 
-       fs = get_fs();
-       set_fs(KERNEL_DS);
        if (thumb_mode(regs)) {
-               fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
+               u16 *ptr = (u16 *)(instrptr & ~1);
+               fault = probe_kernel_address(ptr, tinstr);
                if (!fault) {
                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
                            IS_T32(tinstr)) {
                                /* Thumb-2 32-bit */
                                u16 tinst2 = 0;
-                               fault = __get_user(tinst2, (u16 *)(instrptr+2));
+                               fault = probe_kernel_address(ptr + 1, tinst2);
                                instr = (tinstr << 16) | tinst2;
                                thumb2_32b = 1;
                        } else {
@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                        }
                }
        } else
-               fault = __get_user(instr, (u32 *)instrptr);
-       set_fs(fs);
+               fault = probe_kernel_address(instrptr, instr);
 
        if (fault) {
                type = TYPE_FAULT;
index 67c859c..ce66eb9 100644 (file)
@@ -147,15 +147,6 @@ config OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
        help
          PPA routine service ID for setting L2 auxiliary control register.
 
-config OMAP_32K_TIMER_HZ
-       int "Kernel internal timer frequency for 32KHz timer"
-       range 32 1024
-       depends on OMAP_32K_TIMER
-       default "128"
-       help
-         Kernel internal timer frequency should be a divisor of 32768,
-         such as 64 or 128.
-
 config OMAP_DM_TIMER
        bool "Use dual-mode timer"
        depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
index 6d35767..e27d2da 100644 (file)
 #if !defined(__ASM_ARCH_OMAP_TIMEX_H)
 #define __ASM_ARCH_OMAP_TIMEX_H
 
-/*
- * OMAP 32KHz timer updates time one jiffie at a time from a secondary timer,
- * and that's why the CLOCK_TICK_RATE is not 32768.
- */
-#ifdef CONFIG_OMAP_32K_TIMER
-#define CLOCK_TICK_RATE                (CONFIG_OMAP_32K_TIMER_HZ)
-#else
 #define CLOCK_TICK_RATE                (HZ * 100000UL)
-#endif
 
 #endif /* __ASM_ARCH_OMAP_TIMEX_H */
index dd5e56f..8d10dc8 100644 (file)
        .macro  DBGSTR, str
 #ifdef DEBUG
        stmfd   sp!, {r0-r3, ip, lr}
-       add     r0, pc, #4
+       ldr     r0, =1f
        bl      printk
-       b       1f
-       .asciz  KERN_DEBUG "VFP: \str\n"
-       .balign 4
-1:     ldmfd   sp!, {r0-r3, ip, lr}
+       ldmfd   sp!, {r0-r3, ip, lr}
+
+       .pushsection .rodata, "a"
+1:     .ascii  KERN_DEBUG "VFP: \str\n"
+       .byte   0
+       .previous
 #endif
        .endm
 
 #ifdef DEBUG
        stmfd   sp!, {r0-r3, ip, lr}
        mov     r1, \arg
-       add     r0, pc, #4
+       ldr     r0, =1f
        bl      printk
-       b       1f
-       .asciz  KERN_DEBUG "VFP: \str\n"
-       .balign 4
-1:     ldmfd   sp!, {r0-r3, ip, lr}
+       ldmfd   sp!, {r0-r3, ip, lr}
+
+       .pushsection .rodata, "a"
+1:     .ascii  KERN_DEBUG "VFP: \str\n"
+       .byte   0
+       .previous
 #endif
        .endm
 
        mov     r3, \arg3
        mov     r2, \arg2
        mov     r1, \arg1
-       add     r0, pc, #4
+       ldr     r0, =1f
        bl      printk
-       b       1f
-       .asciz  KERN_DEBUG "VFP: \str\n"
-       .balign 4
-1:     ldmfd   sp!, {r0-r3, ip, lr}
+       ldmfd   sp!, {r0-r3, ip, lr}
+
+       .pushsection .rodata, "a"
+1:     .ascii  KERN_DEBUG "VFP: \str\n"
+       .byte   0
+       .previous
 #endif
        .endm
 
index 3b44e0d..5dfbb0b 100644 (file)
@@ -413,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
         * If there isn't a second FP instruction, exit now. Note that
         * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
         */
-       if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
+       if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
                goto exit;
 
        /*
index e60e386..12f2249 100644 (file)
@@ -40,7 +40,7 @@ __SYSCALL(15,  sys_chmod)
 __SYSCALL(16,  sys_lchown16)
 __SYSCALL(17,  sys_ni_syscall)                 /* 17 was sys_break */
 __SYSCALL(18,  sys_ni_syscall)                 /* 18 was sys_stat */
-__SYSCALL(19,  compat_sys_lseek_wrapper)
+__SYSCALL(19,  compat_sys_lseek)
 __SYSCALL(20,  sys_getpid)
 __SYSCALL(21,  compat_sys_mount)
 __SYSCALL(22,  sys_ni_syscall)                 /* 22 was sys_umount */
@@ -113,8 +113,8 @@ __SYSCALL(88,  sys_reboot)
 __SYSCALL(89,  sys_ni_syscall)                 /* 89 was sys_readdir */
 __SYSCALL(90,  sys_ni_syscall)                 /* 90 was sys_mmap */
 __SYSCALL(91,  sys_munmap)
-__SYSCALL(92,  sys_truncate)
-__SYSCALL(93,  sys_ftruncate)
+__SYSCALL(92,  compat_sys_truncate)
+__SYSCALL(93,  compat_sys_ftruncate)
 __SYSCALL(94,  sys_fchmod)
 __SYSCALL(95,  sys_fchown16)
 __SYSCALL(96,  sys_getpriority)
index 6abb057..9416d04 100644 (file)
@@ -58,11 +58,6 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
  * in registers or that take 32-bit parameters which require sign
  * extension.
  */
-compat_sys_lseek_wrapper:
-       sxtw    x1, w1
-       b       sys_lseek
-ENDPROC(compat_sys_lseek_wrapper)
-
 compat_sys_pread64_wrapper:
        orr     x3, x4, x5, lsl #32
        b       sys_pread64
index 2ae6591..9b89257 100644 (file)
@@ -7,6 +7,7 @@ config AVR32
        select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select GENERIC_ATOMIC64
        select HARDIRQS_SW_RESEND
index e2c3287..d232888 100644 (file)
@@ -102,7 +102,4 @@ typedef struct user_fpu_struct elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
-
 #endif /* __ASM_AVR32_ELF_H */
index e98f324..600494c 100644 (file)
@@ -33,6 +33,7 @@ config BLACKFIN
        select ARCH_HAVE_CUSTOM_GPIO_H
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select HAVE_UID16
+       select HAVE_VIRT_TO_BUS
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_ATOMIC64
index 14bc98f..d15cb9b 100644 (file)
@@ -132,7 +132,4 @@ do {                                                                                        \
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 0bdaa51..e1d0b24 100644 (file)
@@ -116,7 +116,7 @@ static const struct seq_operations cplbinfo_sops = {
 
 static int cplbinfo_open(struct inode *inode, struct file *file)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        char cplb_type;
        unsigned int cpu;
        int ret;
index 32b9971..9a4dfc5 100644 (file)
@@ -77,9 +77,6 @@ do {                                                          \
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 /* C6X specific section types */
 #define SHT_C6000_UNWIND       0x70000001
 #define SHT_C6000_PREEMPTMAP   0x70000002
index 0e5c187..bb0ac66 100644 (file)
@@ -43,6 +43,7 @@ config CRIS
        select GENERIC_ATOMIC64
        select HAVE_GENERIC_HARDIRQS
        select HAVE_UID16
+       select HAVE_VIRT_TO_BUS
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
index c4b7171..a1c498d 100644 (file)
@@ -654,7 +654,7 @@ static int sync_serial_release(struct inode *inode, struct file *file)
 
 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
 {
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        unsigned int mask = 0;
        struct sync_port *port;
        DEBUGPOLL(static unsigned int prev_mask = 0);
@@ -685,7 +685,7 @@ static int sync_serial_ioctl_unlocked(struct file *file,
        int return_val = 0;
        unsigned long flags;
 
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        struct sync_port *port;
 
        if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) {
@@ -973,7 +973,7 @@ static long sync_serial_ioctl(struct file *file,
 static ssize_t sync_serial_write(struct file *file, const char *buf,
        size_t count, loff_t *ppos)
 {
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        DECLARE_WAITQUEUE(wait, current);
        struct sync_port *port;
        unsigned long flags;
@@ -1097,7 +1097,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
 static ssize_t sync_serial_read(struct file *file, char *buf,
                                size_t count, loff_t *ppos)
 {
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        int avail;
        struct sync_port *port;
        unsigned char *start;
index f8476d9..877da19 100644 (file)
@@ -3135,11 +3135,10 @@ static long cryptocop_ioctl_unlocked(struct inode *inode,
 static long
 cryptocop_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
        long ret;
 
        mutex_lock(&cryptocop_mutex);
-       ret = cryptocop_ioctl_unlocked(inode, filp, cmd, arg);
+       ret = cryptocop_ioctl_unlocked(file_inode(filp), filp, cmd, arg);
        mutex_unlock(&cryptocop_mutex);
 
        return ret;
index a6a180b..219f704 100644 (file)
@@ -609,7 +609,7 @@ static int sync_serial_release(struct inode *inode, struct file *file)
 
 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
 {
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        unsigned int mask = 0;
        sync_port *port;
        DEBUGPOLL( static unsigned int prev_mask = 0; );
@@ -657,7 +657,7 @@ static int sync_serial_ioctl(struct file *file,
 {
        int return_val = 0;
        int dma_w_size = regk_dma_set_w_size1;
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        sync_port *port;
        reg_sser_rw_tr_cfg tr_cfg;
        reg_sser_rw_rec_cfg rec_cfg;
@@ -979,7 +979,7 @@ static long sync_serial_ioctl(struct file *file,
 static ssize_t sync_serial_write(struct file *file, const char *buf,
                                 size_t count, loff_t *ppos)
 {
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        DECLARE_WAITQUEUE(wait, current);
        struct sync_port *port;
        int trunc_count;
@@ -1102,7 +1102,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
 static ssize_t sync_serial_read(struct file * file, char * buf,
                                size_t count, loff_t *ppos)
 {
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        int avail;
        sync_port *port;
        unsigned char* start;
index 8182f2d..30ded8f 100644 (file)
@@ -86,7 +86,4 @@ typedef unsigned long elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 2d0509d..12369b1 100644 (file)
@@ -6,6 +6,7 @@ config FRV
        select HAVE_PERF_EVENTS
        select HAVE_UID16
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_SHOW
        select HAVE_DEBUG_BUGVERBOSE
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
index 9ccbc80..2bac644 100644 (file)
@@ -137,7 +137,4 @@ do {                                                                                        \
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 385fd30..836f147 100644 (file)
@@ -60,7 +60,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
                                     unsigned long pgoff, unsigned long flags)
 {
        struct vm_area_struct *vma;
-       unsigned long limit;
+       struct vm_unmapped_area_info info;
 
        if (len > TASK_SIZE)
                return -ENOMEM;
@@ -79,39 +79,24 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        }
 
        /* search between the bottom of user VM and the stack grow area */
-       addr = PAGE_SIZE;
-       limit = (current->mm->start_stack - 0x00200000);
-       if (addr + len <= limit) {
-               limit -= len;
-
-               if (addr <= limit) {
-                       vma = find_vma(current->mm, PAGE_SIZE);
-                       for (; vma; vma = vma->vm_next) {
-                               if (addr > limit)
-                                       break;
-                               if (addr + len <= vma->vm_start)
-                                       goto success;
-                               addr = vma->vm_end;
-                       }
-               }
-       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = PAGE_SIZE;
+       info.high_limit = (current->mm->start_stack - 0x00200000);
+       info.align_mask = 0;
+       info.align_offset = 0;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               goto success;
+       VM_BUG_ON(addr != -ENOMEM);
 
        /* search from just above the WorkRAM area to the top of memory */
-       addr = PAGE_ALIGN(0x80000000);
-       limit = TASK_SIZE - len;
-       if (addr <= limit) {
-               vma = find_vma(current->mm, addr);
-               for (; vma; vma = vma->vm_next) {
-                       if (addr > limit)
-                               break;
-                       if (addr + len <= vma->vm_start)
-                               goto success;
-                       addr = vma->vm_end;
-               }
-
-               if (!vma && addr <= limit)
-                       goto success;
-       }
+       info.low_limit = PAGE_ALIGN(0x80000000);
+       info.high_limit = TASK_SIZE;
+       addr = vm_unmapped_area(&info);
+       if (!(addr & ~PAGE_MASK))
+               goto success;
+       VM_BUG_ON(addr != -ENOMEM);
 
 #if 0
        printk("[area] l=%lx (ENOMEM) f='%s'\n",
index 05b613a..ae8551e 100644 (file)
@@ -5,6 +5,7 @@ config H8300
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_ATOMIC64
        select HAVE_UID16
+       select HAVE_VIRT_TO_BUS
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
index 41193c3..6db7124 100644 (file)
@@ -54,9 +54,6 @@ typedef unsigned long elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #define R_H8_NONE       0
 #define R_H8_DIR32      1
 #define R_H8_DIR32_28   2
index 1ba4b3b..1f14e08 100644 (file)
@@ -216,11 +216,6 @@ do {                                       \
  */
 #define ELF_PLATFORM  (NULL)
 
-#ifdef __KERNEL__
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#endif
-
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 struct linux_binprm;
 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
index c90366e..33f3fdc 100644 (file)
@@ -26,6 +26,7 @@ config IA64
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_VIRT_CPU_ACCOUNTING
+       select HAVE_VIRT_TO_BUS
        select ARCH_DISCARD_MEMBLOCK
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
index b5298eb..5a83c5c 100644 (file)
@@ -201,9 +201,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
    relevant until we have real hardware to play with... */
 #define ELF_PLATFORM   NULL
 
-#define SET_PERSONALITY(ex)    \
-       set_personality((current->personality & ~PER_MASK) | PER_LINUX)
-
 #define elf_read_implies_exec(ex, executable_stack)                                    \
        ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
 
index 7026b29..f8280a7 100644 (file)
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address =
                ((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 
        regs->cr_iip = orig_ret_address;
 
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        kretprobe_hash_unlock(current, &flags);
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index ea39eba..433f5e8 100644 (file)
@@ -2221,9 +2221,9 @@ pfm_alloc_file(pfm_context_t *ctx)
        d_add(path.dentry, inode);
 
        file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
-       if (!file) {
+       if (IS_ERR(file)) {
                path_put(&path);
-               return ERR_PTR(-ENFILE);
+               return file;
        }
 
        file->f_flags = O_RDONLY;
index 79802e5..aa527d7 100644 (file)
@@ -301,7 +301,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
 static ssize_t
 salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct proc_dir_entry *entry = PDE(inode);
        struct salinfo_data *data = entry->data;
        char cmd[32];
@@ -463,7 +463,7 @@ retry:
 static ssize_t
 salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct proc_dir_entry *entry = PDE(inode);
        struct salinfo_data *data = entry->data;
        u8 *buf;
@@ -524,7 +524,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
 static ssize_t
 salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct proc_dir_entry *entry = PDE(inode);
        struct salinfo_data *data = entry->data;
        char cmd[32];
index f807721..9262381 100644 (file)
@@ -10,6 +10,7 @@ config M32R
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_ATOMIC64
index 7089616..8acc9da 100644 (file)
@@ -128,7 +128,4 @@ typedef elf_fpreg_t elf_fpregset_t;
    intent than poking at uname or /proc/cpuinfo.  */
 #define ELF_PLATFORM   (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif  /* _ASM_M32R__ELF_H */
index efb1ce1..0e708c7 100644 (file)
@@ -8,6 +8,7 @@ config M68K
        select GENERIC_IRQ_SHOW
        select GENERIC_ATOMIC64
        select HAVE_UID16
+       select HAVE_VIRT_TO_BUS
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
        select GENERIC_CPU_DEVICES
        select GENERIC_STRNCPY_FROM_USER if MMU
index f83c1d0..b1c26de 100644 (file)
@@ -113,7 +113,4 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
 
 #define ELF_PLATFORM  (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
new file mode 100644 (file)
index 0000000..afc8973
--- /dev/null
@@ -0,0 +1,290 @@
+config SYMBOL_PREFIX
+       string
+       default "_"
+
+config METAG
+       def_bool y
+       select EMBEDDED
+       select GENERIC_ATOMIC64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_SHOW
+       select GENERIC_SMP_IDLE_THREAD
+       select HAVE_64BIT_ALIGNED_ACCESS
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_C_RECORDMCOUNT
+       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_GENERIC_HARDIRQS
+       select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZO
+       select HAVE_KERNEL_XZ
+       select HAVE_MEMBLOCK
+       select HAVE_MEMBLOCK_NODE_MAP
+       select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_PERF_EVENTS
+       select HAVE_SYSCALL_TRACEPOINTS
+       select IRQ_DOMAIN
+       select MODULES_USE_ELF_RELA
+       select OF
+       select OF_EARLY_FLATTREE
+       select SPARSE_IRQ
+
+config STACKTRACE_SUPPORT
+       def_bool y
+
+config LOCKDEP_SUPPORT
+       def_bool y
+
+config HAVE_LATENCYTOP_SUPPORT
+       def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+       def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+       bool
+
+config GENERIC_HWEIGHT
+       def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+       def_bool y
+
+config GENERIC_GPIO
+       def_bool n
+
+config NO_IOPORT
+       def_bool y
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+menu "Processor type and features"
+
+config MMU
+       def_bool y
+
+config STACK_GROWSUP
+       def_bool y
+
+config HOTPLUG_CPU
+       bool "Enable CPU hotplug support"
+       depends on SMP
+       help
+         Say Y here to allow turning CPUs off and on. CPUs can be
+         controlled through /sys/devices/system/cpu.
+
+         Say N if you want to disable CPU hotplug.
+
+config HIGHMEM
+       bool "High Memory Support"
+       help
+         The address space of Meta processors is only 4 Gigabytes large
+         and it has to accommodate user address space, kernel address
+         space as well as some memory mapped IO. That means that, if you
+         have a large amount of physical memory and/or IO, not all of the
+         memory can be "permanently mapped" by the kernel. The physical
+         memory that is not permanently mapped is called "high memory".
+
+         Depending on the selected kernel/user memory split, minimum
+         vmalloc space and actual amount of RAM, you may not need this
+         option which should result in a slightly faster kernel.
+
+         If unsure, say n.
+
+source "arch/metag/mm/Kconfig"
+
+source "arch/metag/Kconfig.soc"
+
+config METAG_META12
+       bool
+       help
+         Select this from the SoC config symbol to indicate that it contains a
+         Meta 1.2 core.
+
+config METAG_META21
+       bool
+       help
+         Select this from the SoC config symbol to indicate that it contains a
+         Meta 2.1 core.
+
+config SMP
+       bool "Symmetric multi-processing support"
+       depends on METAG_META21 && METAG_META21_MMU
+       select USE_GENERIC_SMP_HELPERS
+       help
+         This enables support for systems with more than one thread running
+         Linux. If you have a system with only one thread running Linux,
+         say N. Otherwise, say Y.
+
+config NR_CPUS
+       int "Maximum number of CPUs (2-4)" if SMP
+       range 2 4 if SMP
+       default "1" if !SMP
+       default "4" if SMP
+
+config METAG_SMP_WRITE_REORDERING
+       bool
+       help
+         This attempts to prevent cache-memory incoherence due to external
+         reordering of writes from different hardware threads when SMP is
+         enabled. It adds fences (system event 0) to smp_mb and smp_rmb in an
+         attempt to catch some of the cases, and also before writes to shared
+         memory in LOCK1 protected atomics and spinlocks.
+         This will not completely prevent cache incoherency on affected cores.
+
+config METAG_LNKGET_AROUND_CACHE
+       bool
+       depends on METAG_META21
+       help
+         This indicates that the LNKGET/LNKSET instructions go around the
+         cache, which requires some extra cache flushes when the memory needs
+         to be accessed by normal GET/SET instructions too.
+
+choice
+       prompt "Atomicity primitive"
+       default METAG_ATOMICITY_LNKGET
+       help
+         This option selects the mechanism for performing atomic operations.
+
+config METAG_ATOMICITY_IRQSOFF
+       depends on !SMP
+       bool "irqsoff"
+       help
+         This option disables interrupts to achieve atomicity. This mechanism
+         is not SMP-safe.
+
+config METAG_ATOMICITY_LNKGET
+       depends on METAG_META21
+       bool "lnkget/lnkset"
+       help
+         This option uses the LNKGET and LNKSET instructions to achieve
+         atomicity. LNKGET/LNKSET are load-link/store-conditional instructions.
+         Choose this option if your system requires low latency.
+
+config METAG_ATOMICITY_LOCK1
+       depends on SMP
+       bool "lock1"
+       help
+         This option uses the LOCK1 instruction for atomicity. This is mainly
+         provided as a debugging aid if the lnkget/lnkset atomicity primitive
+         isn't working properly.
+
+endchoice
+
+config METAG_FPU
+       bool "FPU Support"
+       depends on METAG_META21
+       default y
+       help
+         This option allows processes to use FPU hardware available with this
+         CPU. If this option is not enabled FPU registers will not be saved
+         and restored on context-switch.
+
+         If you plan on running programs which are compiled to use hard floats
+         say Y here.
+
+config METAG_DSP
+       bool "DSP Support"
+       help
+         This option allows processes to use DSP hardware available
+         with this CPU. If this option is not enabled DSP registers
+         will not be saved and restored on context-switch.
+
+         If you plan on running DSP programs say Y here.
+
+config METAG_PERFCOUNTER_IRQS
+       bool "PerfCounters interrupt support"
+       depends on METAG_META21
+       help
+         This option enables using interrupts to collect information from
+         Performance Counters. This option is supported in new META21
+         (starting from HTP265).
+
+         When disabled, Performance Counters information will be collected
+         based on Timer Interrupt.
+
+config METAG_DA
+       bool "DA support"
+       help
+         Say Y if you plan to use a DA debug adapter with Linux. The presence
+         of the DA will be detected automatically at boot, so it is safe to say
+         Y to this option even when booting without a DA.
+
+         This enables support for services provided by DA JTAG debug adapters,
+         such as:
+         - communication over DA channels (such as the console driver).
+         - use of the DA filesystem.
+
+menu "Boot options"
+
+config METAG_BUILTIN_DTB
+       bool "Embed DTB in kernel image"
+       default y
+       help
+         Embeds a device tree binary in the kernel image.
+
+config METAG_BUILTIN_DTB_NAME
+       string "Built in DTB"
+       depends on METAG_BUILTIN_DTB
+       help
+         Set the name of the DTB to embed (leave blank to pick one
+         automatically based on kernel configuration).
+
+config CMDLINE_BOOL
+       bool "Default bootloader kernel arguments"
+
+config CMDLINE
+       string "Kernel command line"
+       depends on CMDLINE_BOOL
+       help
+         On some architectures there is currently no way for the boot loader
+         to pass arguments to the kernel. For these architectures, you should
+         supply some command-line options at build time by entering them
+         here.
+
+config CMDLINE_FORCE
+       bool "Force default kernel command string"
+       depends on CMDLINE_BOOL
+       help
+         Set this to have arguments from the default kernel command string
+         override those passed by the boot loader.
+
+endmenu
+
+source "kernel/Kconfig.preempt"
+
+source kernel/Kconfig.hz
+
+endmenu
+
+menu "Power management options"
+
+source kernel/power/Kconfig
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/metag/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/metag/Kconfig.debug b/arch/metag/Kconfig.debug
new file mode 100644 (file)
index 0000000..e45bbf6
--- /dev/null
@@ -0,0 +1,40 @@
+menu "Kernel hacking"
+
+config TRACE_IRQFLAGS_SUPPORT
+       bool
+       default y
+
+source "lib/Kconfig.debug"
+
+config DEBUG_STACKOVERFLOW
+       bool "Check for stack overflows"
+       depends on DEBUG_KERNEL
+       help
+         This option will cause messages to be printed if free stack space
+         drops below a certain limit.
+
+config 4KSTACKS
+       bool "Use 4Kb for kernel stacks instead of 8Kb"
+       depends on DEBUG_KERNEL
+       help
+         If you say Y here the kernel will use a 4Kb stacksize for the
+         kernel stack attached to each process/thread. This facilitates
+         running more threads on a system and also reduces the pressure
+         on the VM subsystem for higher order allocations. This option
+         will also use IRQ stacks to compensate for the reduced stackspace.
+
+config METAG_FUNCTION_TRACE
+       bool "Output Meta real-time trace data for function entry/exit"
+       help
+         If you say Y here the kernel will use the Meta hardware trace
+         unit to output information about function entry and exit that
+         can be used by a debugger for profiling and call-graphs.
+
+config METAG_POISON_CATCH_BUFFERS
+       bool "Poison catch buffer contents on kernel entry"
+       help
+         If you say Y here the kernel will write poison data to the
+         catch buffer registers on kernel entry. This will make any
+         problem with catch buffer handling much more apparent.
+
+endmenu
diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc
new file mode 100644 (file)
index 0000000..ec079cf
--- /dev/null
@@ -0,0 +1,55 @@
+choice
+       prompt "SoC Type"
+       default META21_FPGA
+
+config META12_FPGA
+       bool "Meta 1.2 FPGA"
+       select METAG_META12
+       help
+         This is a Meta 1.2 FPGA bitstream, just a bare CPU.
+
+config META21_FPGA
+       bool "Meta 2.1 FPGA"
+       select METAG_META21
+       help
+         This is a Meta 2.1 FPGA bitstream, just a bare CPU.
+
+endchoice
+
+menu "SoC configuration"
+
+if METAG_META21
+
+# Meta 2.x specific options
+
+config METAG_META21_MMU
+       bool "Meta 2.x MMU mode"
+       default y
+       help
+         Use the Meta 2.x MMU in extended mode.
+
+config METAG_UNALIGNED
+       bool "Meta 2.x unaligned access checking"
+       default y
+       help
+         All memory accesses will be checked for alignment and an exception
+         raised on unaligned accesses. This feature does cost performance
+         but without it there will be no notification of this type of error.
+
+config METAG_USER_TCM
+       bool "Meta on-chip memory support for userland"
+       select GENERIC_ALLOCATOR
+       default y
+       help
+         Allow the on-chip memories of Meta SoCs to be used by user
+         applications.
+
+endif
+
+config METAG_HALT_ON_PANIC
+       bool "Halt the core on panic"
+       help
+         Halt the core when a panic occurs. This is useful when running
+         pre-production silicon or in an FPGA environment.
+
+endmenu
diff --git a/arch/metag/Makefile b/arch/metag/Makefile
new file mode 100644 (file)
index 0000000..81bd6a1
--- /dev/null
@@ -0,0 +1,87 @@
+#
+# metag/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" cleaning up for this architecture.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+#               2007,2008,2012 by Imagination Technologies Ltd.
+#
+
+LDFLAGS                                        :=
+OBJCOPYFLAGS                           := -O binary -R .note -R .comment -S
+
+checkflags-$(CONFIG_METAG_META12)      += -DMETAC_1_2
+checkflags-$(CONFIG_METAG_META21)      += -DMETAC_2_1
+CHECKFLAGS                             += -D__metag__ $(checkflags-y)
+
+KBUILD_DEFCONFIG                       := meta2_defconfig
+
+sflags-$(CONFIG_METAG_META12)          += -mmetac=1.2
+ifeq ($(CONFIG_METAG_META12),y)
+# Only use TBI API 1.4 if DSP is enabled for META12 cores
+sflags-$(CONFIG_METAG_DSP)             += -DTBI_1_4
+endif
+sflags-$(CONFIG_METAG_META21)          += -mmetac=2.1 -DTBI_1_4
+
+cflags-$(CONFIG_METAG_FUNCTION_TRACE)  += -mhwtrace-leaf -mhwtrace-retpc
+cflags-$(CONFIG_METAG_META21)          += -mextensions=bex
+
+KBUILD_CFLAGS                          += -pipe
+KBUILD_CFLAGS                          += -ffunction-sections
+
+KBUILD_CFLAGS                          += $(sflags-y) $(cflags-y)
+KBUILD_AFLAGS                          += $(sflags-y)
+
+LDFLAGS_vmlinux                                := $(ldflags-y)
+
+head-y                                 := arch/metag/kernel/head.o
+
+core-y                                 += arch/metag/boot/dts/
+core-y                                 += arch/metag/kernel/
+core-y                                 += arch/metag/mm/
+
+libs-y                                 += arch/metag/lib/
+libs-y                                 += arch/metag/tbx/
+
+boot                                   := arch/metag/boot
+
+boot_targets                           += uImage
+boot_targets                           += uImage.gz
+boot_targets                           += uImage.bz2
+boot_targets                           += uImage.xz
+boot_targets                           += uImage.lzo
+boot_targets                           += uImage.bin
+boot_targets                           += vmlinux.bin
+
+PHONY                                  += $(boot_targets)
+
+all: vmlinux.bin
+
+$(boot_targets): vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+       $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+       $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+       $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+  echo  '* vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)'
+  @echo '  uImage      - Alias to bootable U-Boot image'
+  @echo '  uImage.bin  - Kernel-only image for U-Boot (bin)'
+  @echo '  uImage.gz   - Kernel-only image for U-Boot (gzip)'
+  @echo '  uImage.bz2  - Kernel-only image for U-Boot (bzip2)'
+  @echo '  uImage.xz   - Kernel-only image for U-Boot (xz)'
+  @echo '  uImage.lzo  - Kernel-only image for U-Boot (lzo)'
+  @echo '  dtbs                - Build device tree blobs for enabled boards'
+endef
diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore
new file mode 100644 (file)
index 0000000..a021da2
--- /dev/null
@@ -0,0 +1,4 @@
+vmlinux*
+uImage*
+ramdisk.*
+*.dtb
diff --git a/arch/metag/boot/Makefile b/arch/metag/boot/Makefile
new file mode 100644 (file)
index 0000000..5a1f88c
--- /dev/null
@@ -0,0 +1,68 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2007,2012  Imagination Technologies Ltd.
+#
+
+suffix-y := bin
+suffix-$(CONFIG_KERNEL_GZIP)   := gz
+suffix-$(CONFIG_KERNEL_BZIP2)  := bz2
+suffix-$(CONFIG_KERNEL_XZ)     := xz
+suffix-$(CONFIG_KERNEL_LZO)    := lzo
+
+targets += vmlinux.bin
+targets += uImage
+targets += uImage.gz
+targets += uImage.bz2
+targets += uImage.xz
+targets += uImage.lzo
+targets += uImage.bin
+
+extra-y += vmlinux.bin
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.xz
+extra-y += vmlinux.bin.lzo
+
+UIMAGE_LOADADDR = $(CONFIG_PAGE_OFFSET)
+
+ifeq ($(CONFIG_FUNCTION_TRACER),y)
+orig_cflags := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -pg, , $(orig_cflags))
+endif
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+       $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,gzip)
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,xzkern)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzo)
+
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+       $(call if_changed,uimage,gzip)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+       $(call if_changed,uimage,bzip2)
+
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
+       $(call if_changed,uimage,xz)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+       $(call if_changed,uimage,lzo)
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,uimage,none)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
+       @ln -sf $(notdir $<) $@
+       @echo '  Image $@ is ready'
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
new file mode 100644 (file)
index 0000000..e0b5afd
--- /dev/null
@@ -0,0 +1,16 @@
+dtb-y  += skeleton.dtb
+
+# Built-in dtb
+builtindtb-y                           := skeleton
+
+ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"")
+       builtindtb-y                    := $(CONFIG_METAG_BUILTIN_DTB_NAME)
+endif
+obj-$(CONFIG_METAG_BUILTIN_DTB)        += $(patsubst "%",%,$(builtindtb-y)).dtb.o
+
+targets        += dtbs
+targets        += $(dtb-y)
+
+dtbs: $(addprefix $(obj)/, $(dtb-y))
+
+clean-files += *.dtb
diff --git a/arch/metag/boot/dts/skeleton.dts b/arch/metag/boot/dts/skeleton.dts
new file mode 100644 (file)
index 0000000..7244d1f
--- /dev/null
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
diff --git a/arch/metag/boot/dts/skeleton.dtsi b/arch/metag/boot/dts/skeleton.dtsi
new file mode 100644 (file)
index 0000000..78229ea
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.  The bootloader will typically populate the memory
+ * node.
+ */
+
+/ {
+       compatible = "img,meta";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       chosen { };
+       aliases { };
+       memory { device_type = "memory"; reg = <0 0>; };
+};
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig
new file mode 100644 (file)
index 0000000..c35a75e
--- /dev/null
@@ -0,0 +1,40 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_LOG_BUF_SHIFT=13
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_META12_FPGA=y
+CONFIG_METAG_DA=y
+CONFIG_HZ_100=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig
new file mode 100644 (file)
index 0000000..fb31484
--- /dev/null
@@ -0,0 +1,41 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=13
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_METAG_L2C=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_METAG_HALT_ON_PANIC=y
+CONFIG_METAG_DA=y
+CONFIG_HZ_100=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig
new file mode 100644 (file)
index 0000000..6c7b777
--- /dev/null
@@ -0,0 +1,42 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=13
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_ELF_CORE is not set
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_METAG_L2C=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_METAG_HALT_ON_PANIC=y
+CONFIG_SMP=y
+CONFIG_METAG_DA=y
+CONFIG_HZ_100=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_DA_TTY=y
+CONFIG_DA_CONSOLE=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
new file mode 100644 (file)
index 0000000..6ae0ccb
--- /dev/null
@@ -0,0 +1,54 @@
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += msgbuf.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += switch_to.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += timex.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..307ecd2
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef __ASM_METAG_ATOMIC_H
+#define __ASM_METAG_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+
+#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
+/* The simple UP case. */
+#include <asm-generic/atomic.h>
+#else
+
+#if defined(CONFIG_METAG_ATOMICITY_LOCK1)
+#include <asm/atomic_lock1.h>
+#else
+#include <asm/atomic_lnkget.h>
+#endif
+
+#define atomic_add_negative(a, v)       (atomic_add_return((a), (v)) < 0)
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#endif
+
+#define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
+
+#include <asm-generic/atomic64.h>
+
+#endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
new file mode 100644 (file)
index 0000000..d2e60a1
--- /dev/null
@@ -0,0 +1,234 @@
+#ifndef __ASM_METAG_ATOMIC_LNKGET_H
+#define __ASM_METAG_ATOMIC_LNKGET_H
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_set(v, i)               ((v)->counter = (i))
+
+#include <linux/compiler.h>
+
+#include <asm/barrier.h>
+
+/*
+ * None of these asm statements clobber memory as LNKSET writes around
+ * the cache so the memory it modifies cannot safely be read by any means
+ * other than these accessors.
+ */
+
+static inline int atomic_read(const atomic_t *v)
+{
+       int temp;
+
+       asm volatile (
+               "LNKGETD %0, [%1]\n"
+               : "=da" (temp)
+               : "da" (&v->counter));
+
+       return temp;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       int temp;
+
+       asm volatile (
+               "1:     LNKGETD %0, [%1]\n"
+               "       ADD     %0, %0, %2\n"
+               "       LNKSETD [%1], %0\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp)
+               : "da" (&v->counter), "bd" (i)
+               : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       int temp;
+
+       asm volatile (
+               "1:     LNKGETD %0, [%1]\n"
+               "       SUB     %0, %0, %2\n"
+               "       LNKSETD [%1], %0\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ 1b\n"
+               : "=&d" (temp)
+               : "da" (&v->counter), "bd" (i)
+               : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       int result, temp;
+
+       smp_mb();
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       ADD     %1, %1, %3\n"
+               "       LNKSETD [%2], %1\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ 1b\n"
+               : "=&d" (temp), "=&da" (result)
+               : "da" (&v->counter), "bd" (i)
+               : "cc");
+
+       smp_mb();
+
+       return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       int result, temp;
+
+       smp_mb();
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       SUB     %1, %1, %3\n"
+               "       LNKSETD [%2], %1\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp), "=&da" (result)
+               : "da" (&v->counter), "bd" (i)
+               : "cc");
+
+       smp_mb();
+
+       return result;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+       int temp;
+
+       asm volatile (
+               "1:     LNKGETD %0, [%1]\n"
+               "       AND     %0, %0, %2\n"
+               "       LNKSETD [%1] %0\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp)
+               : "da" (&v->counter), "bd" (~mask)
+               : "cc");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+       int temp;
+
+       asm volatile (
+               "1:     LNKGETD %0, [%1]\n"
+               "       OR      %0, %0, %2\n"
+               "       LNKSETD [%1], %0\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp)
+               : "da" (&v->counter), "bd" (mask)
+               : "cc");
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       int result, temp;
+
+       smp_mb();
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       CMP     %1, %3\n"
+               "       LNKSETDEQ [%2], %4\n"
+               "       BNE     2f\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               "2:\n"
+               : "=&d" (temp), "=&d" (result)
+               : "da" (&v->counter), "bd" (old), "da" (new)
+               : "cc");
+
+       smp_mb();
+
+       return result;
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+       int temp, old;
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       LNKSETD [%2], %3\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp), "=&d" (old)
+               : "da" (&v->counter), "da" (new)
+               : "cc");
+
+       return old;
+}
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int result, temp;
+
+       smp_mb();
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       CMP     %1, %3\n"
+               "       ADD     %0, %1, %4\n"
+               "       LNKSETDNE [%2], %0\n"
+               "       BEQ     2f\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               "2:\n"
+               : "=&d" (temp), "=&d" (result)
+               : "da" (&v->counter), "bd" (u), "bd" (a)
+               : "cc");
+
+       smp_mb();
+
+       return result;
+}
+
+static inline int atomic_sub_if_positive(int i, atomic_t *v)
+{
+       int result, temp;
+
+       asm volatile (
+               "1:     LNKGETD %1, [%2]\n"
+               "       SUBS    %1, %1, %3\n"
+               "       LNKSETDGE [%2], %1\n"
+               "       BLT     2f\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               "2:\n"
+               : "=&d" (temp), "=&da" (result)
+               : "da" (&v->counter), "bd" (i)
+               : "cc");
+
+       return result;
+}
+
+#endif /* __ASM_METAG_ATOMIC_LNKGET_H */
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
new file mode 100644 (file)
index 0000000..e578955
--- /dev/null
@@ -0,0 +1,160 @@
+#ifndef __ASM_METAG_ATOMIC_LOCK1_H
+#define __ASM_METAG_ATOMIC_LOCK1_H
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <linux/compiler.h>
+
+#include <asm/barrier.h>
+#include <asm/global_lock.h>
+
+static inline int atomic_read(const atomic_t *v)
+{
+       return (v)->counter;
+}
+
+/*
+ * atomic_set needs to be take the lock to protect atomic_add_unless from a
+ * possible race, as it reads the counter twice:
+ *
+ *  CPU0                               CPU1
+ *  atomic_add_unless(1, 0)
+ *    ret = v->counter (non-zero)
+ *    if (ret != u)                    v->counter = 0
+ *      v->counter += 1 (counter set to 1)
+ *
+ * Making atomic_set take the lock ensures that ordering and logical
+ * consistency is preserved.
+ */
+static inline int atomic_set(atomic_t *v, int i)
+{
+       unsigned long flags;
+
+       __global_lock1(flags);
+       fence();
+       v->counter = i;
+       __global_unlock1(flags);
+       return i;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       unsigned long flags;
+
+       __global_lock1(flags);
+       fence();
+       v->counter += i;
+       __global_unlock1(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       unsigned long flags;
+
+       __global_lock1(flags);
+       fence();
+       v->counter -= i;
+       __global_unlock1(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       unsigned long result;
+       unsigned long flags;
+
+       __global_lock1(flags);
+       result = v->counter;
+       result += i;
+       fence();
+       v->counter = result;
+       __global_unlock1(flags);
+
+       return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       unsigned long result;
+       unsigned long flags;
+
+       __global_lock1(flags);
+       result = v->counter;
+       result -= i;
+       fence();
+       v->counter = result;
+       __global_unlock1(flags);
+
+       return result;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+       unsigned long flags;
+
+       __global_lock1(flags);
+       fence();
+       v->counter &= ~mask;
+       __global_unlock1(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+       unsigned long flags;
+
+       __global_lock1(flags);
+       fence();
+       v->counter |= mask;
+       __global_unlock1(flags);
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       int ret;
+       unsigned long flags;
+
+       __global_lock1(flags);
+       ret = v->counter;
+       if (ret == old) {
+               fence();
+               v->counter = new;
+       }
+       __global_unlock1(flags);
+
+       return ret;
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int ret;
+       unsigned long flags;
+
+       __global_lock1(flags);
+       ret = v->counter;
+       if (ret != u) {
+               fence();
+               v->counter += a;
+       }
+       __global_unlock1(flags);
+
+       return ret;
+}
+
+static inline int atomic_sub_if_positive(int i, atomic_t *v)
+{
+       int ret;
+       unsigned long flags;
+
+       __global_lock1(flags);
+       ret = v->counter - 1;
+       if (ret >= 0) {
+               fence();
+               v->counter = ret;
+       }
+       __global_unlock1(flags);
+
+       return ret;
+}
+
+#endif /* __ASM_METAG_ATOMIC_LOCK1_H */
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..c90bfc6
--- /dev/null
@@ -0,0 +1,85 @@
+#ifndef _ASM_METAG_BARRIER_H
+#define _ASM_METAG_BARRIER_H
+
+#include <asm/metag_mem.h>
+
+#define nop()          asm volatile ("NOP")
+#define mb()           wmb()
+#define rmb()          barrier()
+
+#ifdef CONFIG_METAG_META21
+
+/* HTP and above have a system event to fence writes */
+static inline void wr_fence(void)
+{
+       volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
+       barrier();
+       *flushptr = 0;
+}
+
+#else /* CONFIG_METAG_META21 */
+
+/*
+ * ATP doesn't have system event to fence writes, so it is necessary to flush
+ * the processor write queues as well as possibly the write combiner (depending
+ * on the page being written).
+ * To ensure the write queues are flushed we do 4 writes to a system event
+ * register (in this case write combiner flush) which will also flush the write
+ * combiner.
+ */
+static inline void wr_fence(void)
+{
+       volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;
+       barrier();
+       *flushptr = 0;
+       *flushptr = 0;
+       *flushptr = 0;
+       *flushptr = 0;
+}
+
+#endif /* !CONFIG_METAG_META21 */
+
+static inline void wmb(void)
+{
+       /* flush writes through the write combiner */
+       wr_fence();
+}
+
+#define read_barrier_depends()  do { } while (0)
+
+#ifndef CONFIG_SMP
+#define fence()                do { } while (0)
+#define smp_mb()        barrier()
+#define smp_rmb()       barrier()
+#define smp_wmb()       barrier()
+#else
+
+#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
+/*
+ * Write to the atomic memory unlock system event register (command 0). This is
+ * needed before a write to shared memory in a critical section, to prevent
+ * external reordering of writes before the fence on other threads with writes
+ * after the fence on this thread (and to prevent the ensuing cache-memory
+ * incoherence). It is therefore ineffective if used after and on the same
+ * thread as a write.
+ */
+static inline void fence(void)
+{
+       volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
+       barrier();
+       *flushptr = 0;
+}
+#define smp_mb()        fence()
+#define smp_rmb()       fence()
+#define smp_wmb()       barrier()
+#else
+#define fence()                do { } while (0)
+#define smp_mb()        barrier()
+#define smp_rmb()       barrier()
+#define smp_wmb()       barrier()
+#endif
+#endif
+#define smp_read_barrier_depends()     do { } while (0)
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+
+#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h
new file mode 100644 (file)
index 0000000..c0d0df0
--- /dev/null
@@ -0,0 +1,132 @@
+#ifndef __ASM_METAG_BITOPS_H
+#define __ASM_METAG_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <asm/global_lock.h>
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit()     barrier()
+#define smp_mb__after_clear_bit()      barrier()
+
+#ifdef CONFIG_SMP
+/*
+ * These functions are the basis of our bit ops.
+ */
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       fence();
+       *p |= mask;
+       __global_unlock1(flags);
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       fence();
+       *p &= ~mask;
+       __global_unlock1(flags);
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       fence();
+       *p ^= mask;
+       __global_unlock1(flags);
+}
+
+static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long old;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       old = *p;
+       if (!(old & mask)) {
+               fence();
+               *p = old | mask;
+       }
+       __global_unlock1(flags);
+
+       return (old & mask) != 0;
+}
+
+static inline int test_and_clear_bit(unsigned int bit,
+                                    volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long old;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       old = *p;
+       if (old & mask) {
+               fence();
+               *p = old & ~mask;
+       }
+       __global_unlock1(flags);
+
+       return (old & mask) != 0;
+}
+
+static inline int test_and_change_bit(unsigned int bit,
+                                     volatile unsigned long *p)
+{
+       unsigned long flags;
+       unsigned long old;
+       unsigned long mask = 1UL << (bit & 31);
+
+       p += bit >> 5;
+
+       __global_lock1(flags);
+       fence();
+       old = *p;
+       *p = old ^ mask;
+       __global_unlock1(flags);
+
+       return (old & mask) != 0;
+}
+
+#else
+#include <asm-generic/bitops/atomic.h>
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_METAG_BITOPS_H */
diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h
new file mode 100644 (file)
index 0000000..d04b48c
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_METAG_BUG_H
+#define _ASM_METAG_BUG_H
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+extern const char *trap_name(int trapno);
+extern void die(const char *str, struct pt_regs *regs, long err,
+               unsigned long addr) __attribute__ ((noreturn));
+
+#endif
diff --git a/arch/metag/include/asm/cache.h b/arch/metag/include/asm/cache.h
new file mode 100644 (file)
index 0000000..a43b650
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __ASM_METAG_CACHE_H
+#define __ASM_METAG_CACHE_H
+
+/* L1 cache line size (64 bytes) */
+#define L1_CACHE_SHIFT         6
+#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+
+/* Meta requires large data items to be 8 byte aligned. */
+#define ARCH_SLAB_MINALIGN     8
+
+/*
+ * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
+ * buffers have cache line alignment.
+ */
+#ifdef CONFIG_METAG_L2C
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+#else
+#define ARCH_DMA_MINALIGN      8
+#endif
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#endif
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h
new file mode 100644 (file)
index 0000000..7787ec5
--- /dev/null
@@ -0,0 +1,250 @@
+#ifndef _METAG_CACHEFLUSH_H
+#define _METAG_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/l2cache.h>
+#include <asm/metag_isa.h>
+#include <asm/metag_mem.h>
+
+void metag_cache_probe(void);
+
+void metag_data_cache_flush_all(const void *start);
+void metag_code_cache_flush_all(const void *start);
+
+/*
+ * Routines to flush physical cache lines that may be used to cache data or code
+ * normally accessed via the linear address range supplied. The region flushed
+ * must either lie in local or global address space determined by the top bit of
+ * the pStart address. If Bytes is >= 4K then the whole of the related cache
+ * state will be flushed rather than a limited range.
+ */
+void metag_data_cache_flush(const void *start, int bytes);
+void metag_code_cache_flush(const void *start, int bytes);
+
+#ifdef CONFIG_METAG_META12
+
+/* Write through, virtually tagged, split I/D cache. */
+
+static inline void __flush_cache_all(void)
+{
+       metag_code_cache_flush_all((void *) PAGE_OFFSET);
+       metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#define flush_cache_all() __flush_cache_all()
+
+/* flush the entire user address space referenced in this mm structure */
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+       if (mm == current->mm)
+               __flush_cache_all();
+}
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/* flush a range of addresses from this mm */
+static inline void flush_cache_range(struct vm_area_struct *vma,
+                                    unsigned long start, unsigned long end)
+{
+       flush_cache_mm(vma->vm_mm);
+}
+
+static inline void flush_cache_page(struct vm_area_struct *vma,
+                                   unsigned long vmaddr, unsigned long pfn)
+{
+       flush_cache_mm(vma->vm_mm);
+}
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE      1
+static inline void flush_dcache_page(struct page *page)
+{
+       metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+                                    struct page *page)
+{
+       metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
+}
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+       metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+       metag_data_cache_flush_all((void *) PAGE_OFFSET);
+}
+
+#else
+
+/* Write through, physically tagged, split I/D cache. */
+
+#define flush_cache_all()                      do { } while (0)
+#define flush_cache_mm(mm)                     do { } while (0)
+#define flush_cache_dup_mm(mm)                 do { } while (0)
+#define flush_cache_range(vma, start, end)     do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+#define flush_icache_page(vma, pg)             do { } while (0)
+#define flush_cache_vmap(start, end)           do { } while (0)
+#define flush_cache_vunmap(start, end)         do { } while (0)
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE      1
+static inline void flush_dcache_page(struct page *page)
+{
+       /* FIXME: We can do better than this. All we are trying to do is
+        * make the i-cache coherent, we should use the PG_arch_1 bit like
+        * e.g. powerpc.
+        */
+#ifdef CONFIG_SMP
+       metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+       metag_code_cache_flush_all((void *) PAGE_OFFSET);
+#endif
+}
+
+#endif
+
+/* Push n pages at kernel virtual address and clear the icache */
+static inline void flush_icache_range(unsigned long address,
+                                     unsigned long endaddr)
+{
+#ifdef CONFIG_SMP
+       metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+       metag_code_cache_flush((void *) address, endaddr - address);
+#endif
+}
+
+static inline void flush_cache_sigtramp(unsigned long addr, int size)
+{
+       /*
+        * Flush the icache in case there was previously some code
+        * fetched from this address, perhaps a previous sigtramp.
+        *
+        * We don't need to flush the dcache, it's write through and
+        * we just wrote the sigtramp code through it.
+        */
+#ifdef CONFIG_SMP
+       metag_out32(1, SYSC_ICACHE_FLUSH);
+#else
+       metag_code_cache_flush((void *) addr, size);
+#endif
+}
+
+#ifdef CONFIG_METAG_L2C
+
+/*
+ * Perform a single specific CACHEWD operation on an address, masking lower bits
+ * of address first.
+ */
+static inline void cachewd_line(void *addr, unsigned int data)
+{
+       unsigned long masked = (unsigned long)addr & -0x40;
+       __builtin_meta2_cachewd((void *)masked, data);
+}
+
+/* Perform a certain CACHEW op on each cache line in a range */
+static inline void cachew_region_op(void *start, unsigned long size,
+                                   unsigned int op)
+{
+       unsigned long offset = (unsigned long)start & 0x3f;
+       int i;
+       if (offset) {
+               size += offset;
+               start -= offset;
+       }
+       i = (size - 1) >> 6;
+       do {
+               __builtin_meta2_cachewd(start, op);
+               start += 0x40;
+       } while (i--);
+}
+
+/* prevent write fence and flushbacks being reordered in L2 */
+static inline void l2c_fence_flush(void *addr)
+{
+       /*
+        * Synchronise by reading back and re-flushing.
+        * It is assumed this access will miss, as the caller should have just
+        * flushed the cache line.
+        */
+       (void)(volatile u8 *)addr;
+       cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
+}
+
+/* prevent write fence and writebacks being reordered in L2 */
+static inline void l2c_fence(void *addr)
+{
+       /*
+        * A write back has occurred, but not necessarily an invalidate, so the
+        * readback in l2c_fence_flush() would hit in the cache and have no
+        * effect. Therefore fully flush the line first.
+        */
+       cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
+       l2c_fence_flush(addr);
+}
+
+/* Used to keep memory consistent when doing DMA. */
+static inline void flush_dcache_region(void *start, unsigned long size)
+{
+       /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
+       if (meta_l2c_is_enabled()) {
+               cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
+               if (meta_l2c_is_writeback())
+                       l2c_fence_flush(start + size - 1);
+       } else {
+               metag_data_cache_flush(start, size);
+       }
+}
+
+/* Write back dirty lines to memory (or do nothing if no writeback caches) */
+static inline void writeback_dcache_region(void *start, unsigned long size)
+{
+       if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
+               cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
+               l2c_fence(start + size - 1);
+       }
+}
+
+/* Invalidate (may also write back if necessary) */
+static inline void invalidate_dcache_region(void *start, unsigned long size)
+{
+       if (meta_l2c_is_enabled())
+               cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
+       else
+               metag_data_cache_flush(start, size);
+}
+#else
+#define flush_dcache_region(s, l)      metag_data_cache_flush((s), (l))
+#define writeback_dcache_region(s, l)  do {} while (0)
+#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
+#endif
+
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, const void *src,
+                                    unsigned long len)
+{
+       memcpy(dst, src, len);
+       flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, const void *src,
+                                      unsigned long len)
+{
+       memcpy(dst, src, len);
+}
+
+#endif /* _METAG_CACHEFLUSH_H */
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h
new file mode 100644 (file)
index 0000000..cf6b44e
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Meta cache partition manipulation.
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ */
+
+#ifndef _METAG_CACHEPART_H_
+#define _METAG_CACHEPART_H_
+
+/**
+ * get_dcache_size() - Get size of data cache.
+ */
+unsigned int get_dcache_size(void);
+
+/**
+ * get_icache_size() - Get size of code cache.
+ */
+unsigned int get_icache_size(void);
+
+/**
+ * get_global_dcache_size() - Get the thread's global dcache.
+ *
+ * Returns the size of the current thread's global dcache partition.
+ */
+unsigned int get_global_dcache_size(void);
+
+/**
+ * get_global_icache_size() - Get the thread's global icache.
+ *
+ * Returns the size of the current thread's global icache partition.
+ */
+unsigned int get_global_icache_size(void);
+
+/**
+ * check_for_dache_aliasing() - Ensure that the bootloader has configured the
+ * dache and icache properly to avoid aliasing
+ * @thread_id: Hardware thread ID
+ *
+ */
+void check_for_cache_aliasing(int thread_id);
+
+#endif
diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h
new file mode 100644 (file)
index 0000000..999bf76
--- /dev/null
@@ -0,0 +1,92 @@
+#ifndef _METAG_CHECKSUM_H
+#define _METAG_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len,
+                               __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+                                       int len, __wsum sum, int *csum_err);
+
+#define csum_partial_copy_nocheck(src, dst, len, sum)  \
+       csum_partial_copy((src), (dst), (len), (sum))
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+       u32 sum = (__force u32)csum;
+       sum = (sum & 0xffff) + (sum >> 16);
+       sum = (sum & 0xffff) + (sum >> 16);
+       return (__force __sum16)~sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+                                       unsigned short len,
+                                       unsigned short proto,
+                                       __wsum sum)
+{
+       unsigned long len_proto = (proto + len) << 8;
+       asm ("ADD    %0, %0, %1\n"
+            "ADDS   %0, %0, %2\n"
+            "ADDCS  %0, %0, #1\n"
+            "ADDS   %0, %0, %3\n"
+            "ADDCS  %0, %0, #1\n"
+            : "=d" (sum)
+            : "d" (daddr), "d" (saddr), "d" (len_proto),
+              "0" (sum)
+            : "cc");
+       return sum;
+}
+
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+                 unsigned short proto, __wsum sum)
+{
+       return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif /* _METAG_CHECKSUM_H */
diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h
new file mode 100644 (file)
index 0000000..3e2915a
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * arch/metag/include/asm/clock.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _METAG_CLOCK_H_
+#define _METAG_CLOCK_H_
+
+#include <asm/mach/arch.h>
+
+/**
+ * struct meta_clock_desc - Meta Core clock callbacks.
+ * @get_core_freq:     Get the frequency of the Meta core. If this is NULL, the
+ *                     core frequency will be determined like this:
+ *                     Meta 1: based on loops_per_jiffy.
+ *                     Meta 2: (EXPAND_TIMER_DIV + 1) MHz.
+ */
+struct meta_clock_desc {
+       unsigned long           (*get_core_freq)(void);
+};
+
+extern struct meta_clock_desc _meta_clock;
+
+/*
+ * Set up the default clock, ensuring all callbacks are valid - only accessible
+ * during boot.
+ */
+void setup_meta_clocks(struct meta_clock_desc *desc);
+
+/**
+ * get_coreclock() - Get the frequency of the Meta core clock.
+ *
+ * Returns:    The Meta core clock frequency in Hz.
+ */
+static inline unsigned long get_coreclock(void)
+{
+       /*
+        * Use the current clock callback. If set correctly this will provide
+        * the most accurate frequency as it can be calculated directly from the
+        * PLL configuration. otherwise a default callback will have been set
+        * instead.
+        */
+       return _meta_clock.get_core_freq();
+}
+
+#endif /* _METAG_CLOCK_H_ */
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h
new file mode 100644 (file)
index 0000000..b1bc1be
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef __ASM_METAG_CMPXCHG_H
+#define __ASM_METAG_CMPXCHG_H
+
+#include <asm/barrier.h>
+
+#if defined(CONFIG_METAG_ATOMICITY_IRQSOFF)
+#include <asm/cmpxchg_irq.h>
+#elif defined(CONFIG_METAG_ATOMICITY_LOCK1)
+#include <asm/cmpxchg_lock1.h>
+#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
+#include <asm/cmpxchg_lnkget.h>
+#endif
+
+extern void __xchg_called_with_bad_pointer(void);
+
+#define __xchg(ptr, x, size)                           \
+({                                                     \
+       unsigned long __xchg__res;                      \
+       volatile void *__xchg_ptr = (ptr);              \
+       switch (size) {                                 \
+       case 4:                                         \
+               __xchg__res = xchg_u32(__xchg_ptr, x);  \
+               break;                                  \
+       case 1:                                         \
+               __xchg__res = xchg_u8(__xchg_ptr, x);   \
+               break;                                  \
+       default:                                        \
+               __xchg_called_with_bad_pointer();       \
+               __xchg__res = x;                        \
+               break;                                  \
+       }                                               \
+                                                       \
+       __xchg__res;                                    \
+})
+
+#define xchg(ptr, x)   \
+       ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))))
+
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 4:
+               return __cmpxchg_u32(ptr, old, new);
+       }
+       __cmpxchg_called_with_bad_pointer();
+       return old;
+}
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#define cmpxchg(ptr, o, n)                                             \
+       ({                                                              \
+               __typeof__(*(ptr)) _o_ = (o);                           \
+               __typeof__(*(ptr)) _n_ = (n);                           \
+               (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+                                              (unsigned long)_n_,      \
+                                              sizeof(*(ptr)));         \
+       })
+
+#endif /* __ASM_METAG_CMPXCHG_H */
diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h
new file mode 100644 (file)
index 0000000..6495731
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef __ASM_METAG_CMPXCHG_IRQ_H
+#define __ASM_METAG_CMPXCHG_IRQ_H
+
+#include <linux/irqflags.h>
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       local_irq_save(flags);
+       retval = *m;
+       *m = val;
+       local_irq_restore(flags);
+       return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       local_irq_save(flags);
+       retval = *m;
+       *m = val & 0xff;
+       local_irq_restore(flags);
+       return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+                                         unsigned long new)
+{
+       __u32 retval;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       retval = *m;
+       if (retval == old)
+               *m = new;
+       local_irq_restore(flags);       /* implies memory barrier  */
+       return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_IRQ_H */
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
new file mode 100644 (file)
index 0000000..0154e28
--- /dev/null
@@ -0,0 +1,86 @@
+#ifndef __ASM_METAG_CMPXCHG_LNKGET_H
+#define __ASM_METAG_CMPXCHG_LNKGET_H
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+       int temp, old;
+
+       smp_mb();
+
+       asm volatile (
+                     "1:       LNKGETD %1, [%2]\n"
+                     " LNKSETD [%2], %3\n"
+                     " DEFR    %0, TXSTAT\n"
+                     " ANDT    %0, %0, #HI(0x3f000000)\n"
+                     " CMPT    %0, #HI(0x02000000)\n"
+                     " BNZ     1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+                     " DCACHE  [%2], %0\n"
+#endif
+                     : "=&d" (temp), "=&d" (old)
+                     : "da" (m), "da" (val)
+                     : "cc"
+                     );
+
+       smp_mb();
+
+       return old;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+       int temp, old;
+
+       smp_mb();
+
+       asm volatile (
+                     "1:       LNKGETD %1, [%2]\n"
+                     " LNKSETD [%2], %3\n"
+                     " DEFR    %0, TXSTAT\n"
+                     " ANDT    %0, %0, #HI(0x3f000000)\n"
+                     " CMPT    %0, #HI(0x02000000)\n"
+                     " BNZ     1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+                     " DCACHE  [%2], %0\n"
+#endif
+                     : "=&d" (temp), "=&d" (old)
+                     : "da" (m), "da" (val & 0xff)
+                     : "cc"
+                     );
+
+       smp_mb();
+
+       return old;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+                                         unsigned long new)
+{
+       __u32 retval, temp;
+
+       smp_mb();
+
+       asm volatile (
+                     "1:       LNKGETD %1, [%2]\n"
+                     " CMP     %1, %3\n"
+                     " LNKSETDEQ [%2], %4\n"
+                     " BNE     2f\n"
+                     " DEFR    %0, TXSTAT\n"
+                     " ANDT    %0, %0, #HI(0x3f000000)\n"
+                     " CMPT    %0, #HI(0x02000000)\n"
+                     " BNZ     1b\n"
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+                     " DCACHE  [%2], %0\n"
+#endif
+                     "2:\n"
+                     : "=&d" (temp), "=&da" (retval)
+                     : "da" (m), "bd" (old), "da" (new)
+                     : "cc"
+                     );
+
+       smp_mb();
+
+       return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_LNKGET_H */
diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h
new file mode 100644 (file)
index 0000000..fd68504
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __ASM_METAG_CMPXCHG_LOCK1_H
+#define __ASM_METAG_CMPXCHG_LOCK1_H
+
+#include <asm/global_lock.h>
+
+/* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       __global_lock2(flags);
+       fence();
+       retval = *m;
+       *m = val;
+       __global_unlock2(flags);
+       return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       __global_lock2(flags);
+       fence();
+       retval = *m;
+       *m = val & 0xff;
+       __global_unlock2(flags);
+       return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+                                         unsigned long new)
+{
+       __u32 retval;
+       unsigned long flags;
+
+       __global_lock2(flags);
+       retval = *m;
+       if (retval == old) {
+               fence();
+               *m = new;
+       }
+       __global_unlock2(flags);
+       return retval;
+}
+
+#endif /* __ASM_METAG_CMPXCHG_LOCK1_H */
diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h
new file mode 100644 (file)
index 0000000..bdbc3a5
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __ASM_METAG_CORE_REG_H_
+#define __ASM_METAG_CORE_REG_H_
+
+#include <asm/metag_regs.h>
+
+extern void core_reg_write(int unit, int reg, int thread, unsigned int val);
+extern unsigned int core_reg_read(int unit, int reg, int thread);
+
+/*
+ * These macros allow direct access from C to any register known to the
+ * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT.
+ */
+
+#define __core_reg_get(reg) ({                                         \
+       unsigned int __grvalue;                                         \
+       asm volatile("MOV       %0," #reg                               \
+                    : "=r" (__grvalue));                               \
+       __grvalue;                                                      \
+})
+
+#define __core_reg_set(reg, value) do {                                        \
+       unsigned int __srvalue = (value);                               \
+       asm volatile("MOV       " #reg ",%0"                            \
+                    :                                                  \
+                    : "r" (__srvalue));                                \
+} while (0)
+
+#define __core_reg_swap(reg, value) do {                               \
+       unsigned int __srvalue = (value);                               \
+       asm volatile("SWAP      " #reg ",%0"                            \
+                    : "+r" (__srvalue));                               \
+       (value) = __srvalue;                                            \
+} while (0)
+
+#endif
diff --git a/arch/metag/include/asm/cpu.h b/arch/metag/include/asm/cpu.h
new file mode 100644 (file)
index 0000000..decf129
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _ASM_METAG_CPU_H
+#define _ASM_METAG_CPU_H
+
+#include <linux/percpu.h>
+
+struct cpuinfo_metag {
+       struct cpu cpu;
+#ifdef CONFIG_SMP
+       unsigned long loops_per_jiffy;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_metag, cpu_data);
+#endif /* _ASM_METAG_CPU_H */
diff --git a/arch/metag/include/asm/da.h b/arch/metag/include/asm/da.h
new file mode 100644 (file)
index 0000000..81bd521
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Meta DA JTAG debugger control.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+#ifndef _METAG_DA_H_
+#define _METAG_DA_H_
+
+#ifdef CONFIG_METAG_DA
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+extern bool _metag_da_present;
+
+/**
+ * metag_da_enabled() - Find whether a DA is currently enabled.
+ *
+ * Returns:    true if a DA was detected, false if not.
+ */
+static inline bool metag_da_enabled(void)
+{
+       return _metag_da_present;
+}
+
+/**
+ * metag_da_probe() - Try and detect a connected DA.
+ *
+ * This is used at start up to detect whether a DA is active.
+ *
+ * Returns:    0 on detection, -err otherwise.
+ */
+int __init metag_da_probe(void);
+
+#else /* !CONFIG_METAG_DA */
+
+#define metag_da_enabled() false
+#define metag_da_probe() do {} while (0)
+
+#endif
+
+#endif /* _METAG_DA_H_ */
diff --git a/arch/metag/include/asm/delay.h b/arch/metag/include/asm/delay.h
new file mode 100644 (file)
index 0000000..9c92f99
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _METAG_DELAY_H
+#define _METAG_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/metag/lib/delay.c
+ */
+
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
+extern void __delay(unsigned long loops);
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n) (__builtin_constant_p(n) ? \
+       ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
+       __udelay(n))
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n) (__builtin_constant_p(n) ? \
+       ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+       __ndelay(n))
+
+#endif /* _METAG_DELAY_H */
diff --git a/arch/metag/include/asm/div64.h b/arch/metag/include/asm/div64.h
new file mode 100644 (file)
index 0000000..0fdd116
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __ASM_DIV64_H__
+#define __ASM_DIV64_H__
+
+#include <asm-generic/div64.h>
+
+extern u64 div_u64(u64 dividend, u64 divisor);
+extern s64 div_s64(s64 dividend, s64 divisor);
+
+#define div_u64 div_u64
+#define div_s64 div_s64
+
+#endif
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..14b23ef
--- /dev/null
@@ -0,0 +1,190 @@
+#ifndef _ASM_METAG_DMA_MAPPING_H
+#define _ASM_METAG_DMA_MAPPING_H
+
+#include <linux/mm.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/scatterlist.h>
+#include <asm/bug.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+                      void *vaddr, dma_addr_t dma_handle);
+
+void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
+void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+                         void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG_ON(!valid_dma_direction(direction));
+       WARN_ON(size == 0);
+       dma_sync_for_device(ptr, size, direction);
+       return virt_to_phys(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+                enum dma_data_direction direction)
+{
+       BUG_ON(!valid_dma_direction(direction));
+       dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+          enum dma_data_direction direction)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(!valid_dma_direction(direction));
+       WARN_ON(nents == 0 || sglist[0].length == 0);
+
+       for_each_sg(sglist, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+       }
+
+       return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+            size_t size, enum dma_data_direction direction)
+{
+       BUG_ON(!valid_dma_direction(direction));
+       dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+                           direction);
+       return page_to_phys(page) + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+              enum dma_data_direction direction)
+{
+       BUG_ON(!valid_dma_direction(direction));
+       dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
+            enum dma_data_direction direction)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(!valid_dma_direction(direction));
+       WARN_ON(nhwentries == 0 || sglist[0].length == 0);
+
+       for_each_sg(sglist, sg, nhwentries, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg->dma_address = sg_phys(sg);
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+       }
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+                          size_t size, enum dma_data_direction direction)
+{
+       dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                             unsigned long offset, size_t size,
+                             enum dma_data_direction direction)
+{
+       dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
+                        direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction direction)
+{
+       dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
+                           direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+                   enum dma_data_direction direction)
+{
+       int i;
+       for (i = 0; i < nelems; i++, sg++)
+               dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+                      enum dma_data_direction direction)
+{
+       int i;
+       for (i = 0; i < nelems; i++, sg++)
+               dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return 0;
+}
+
+#define dma_supported(dev, mask)        (1)
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+
+       *dev->dma_mask = mask;
+
+       return 0;
+}
+
+/*
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+ */
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+              enum dma_data_direction direction)
+{
+}
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+#endif
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
new file mode 100644 (file)
index 0000000..d63b9d0
--- /dev/null
@@ -0,0 +1,128 @@
+#ifndef __ASM_METAG_ELF_H
+#define __ASM_METAG_ELF_H
+
+#define EM_METAG      174
+
+/* Meta relocations */
+#define R_METAG_HIADDR16                 0
+#define R_METAG_LOADDR16                 1
+#define R_METAG_ADDR32                   2
+#define R_METAG_NONE                     3
+#define R_METAG_RELBRANCH                4
+#define R_METAG_GETSETOFF                5
+
+/* Backward compatability */
+#define R_METAG_REG32OP1                 6
+#define R_METAG_REG32OP2                 7
+#define R_METAG_REG32OP3                 8
+#define R_METAG_REG16OP1                 9
+#define R_METAG_REG16OP2                10
+#define R_METAG_REG16OP3                11
+#define R_METAG_REG32OP4                12
+
+#define R_METAG_HIOG                    13
+#define R_METAG_LOOG                    14
+
+/* GNU */
+#define R_METAG_GNU_VTINHERIT           30
+#define R_METAG_GNU_VTENTRY             31
+
+/* PIC relocations */
+#define R_METAG_HI16_GOTOFF             32
+#define R_METAG_LO16_GOTOFF             33
+#define R_METAG_GETSET_GOTOFF           34
+#define R_METAG_GETSET_GOT              35
+#define R_METAG_HI16_GOTPC              36
+#define R_METAG_LO16_GOTPC              37
+#define R_METAG_HI16_PLT                38
+#define R_METAG_LO16_PLT                39
+#define R_METAG_RELBRANCH_PLT           40
+#define R_METAG_GOTOFF                  41
+#define R_METAG_PLT                     42
+#define R_METAG_COPY                    43
+#define R_METAG_JMP_SLOT                44
+#define R_METAG_RELATIVE                45
+#define R_METAG_GLOB_DAT                46
+
+/*
+ * ELF register definitions.
+ */
+
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_gp_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef unsigned long elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_METAG)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS      ELFCLASS32
+#define ELF_DATA       ELFDATA2LSB
+#define ELF_ARCH       EM_METAG
+
+#define ELF_PLAT_INIT(_r, load_addr)   \
+       do { _r->ctx.AX[0].U0 = 0; } while (0)
+
+#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE      PAGE_SIZE
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         0x08000000UL
+
+#define ELF_CORE_COPY_REGS(_dest, _regs)                       \
+       memcpy((char *)&_dest, (char *)_regs, sizeof(struct pt_regs));
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this cpu supports.  */
+
+#define ELF_HWCAP      (0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.  */
+
+#define ELF_PLATFORM  (NULL)
+
+#define SET_PERSONALITY(ex) \
+       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#define STACK_RND_MASK (0)
+
+#ifdef CONFIG_METAG_USER_TCM
+
+struct elf32_phdr;
+struct file;
+
+unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
+                             struct elf32_phdr *eppnt, int prot, int type,
+                             unsigned long total_size);
+
+static inline unsigned long metag_elf_map(struct file *filep,
+                                         unsigned long addr,
+                                         struct elf32_phdr *eppnt, int prot,
+                                         int type, unsigned long total_size)
+{
+       return __metag_elf_map(filep, addr, eppnt, prot, type, total_size);
+}
+#define elf_map metag_elf_map
+
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/fixmap.h b/arch/metag/include/asm/fixmap.h
new file mode 100644 (file)
index 0000000..3331275
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special  addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+#define FIX_N_COLOURS 8
+#ifdef CONFIG_HIGHMEM
+       /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_BEGIN,
+       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#endif
+       __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP     (CONSISTENT_START - PAGE_SIZE)
+#define FIXADDR_SIZE   (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+extern void __this_fixmap_does_not_exist(void);
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static inline unsigned long fix_to_virt(const unsigned int idx)
+{
+       /*
+        * this branch gets completely eliminated after inlining,
+        * except when someone tries to use fixaddr indices in an
+        * illegal way. (such as mixing up address types or using
+        * out-of-range indices).
+        *
+        * If it doesn't get removed, the linker will complain
+        * loudly with a reasonably clear error message..
+        */
+       if (idx >= __end_of_fixed_addresses)
+               __this_fixmap_does_not_exist();
+
+       return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+       BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+       return __virt_to_fix(vaddr);
+}
+
+#define kmap_get_fixmap_pte(vaddr) \
+       pte_offset_kernel( \
+               pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+               (vaddr) \
+       )
+
+/*
+ * Called from pgtable_init()
+ */
+extern void fixrange_init(unsigned long start, unsigned long end,
+       pgd_t *pgd_base);
+
+
+#endif
diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..2901f0f
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_METAG_FTRACE
+#define _ASM_METAG_FTRACE
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_INSN_SIZE       8 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void mcount_wrapper(void);
+#define MCOUNT_ADDR            ((long)(mcount_wrapper))
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+
+struct dyn_arch_ftrace {
+       /* No extra data needed on metag */
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _ASM_METAG_FTRACE */
diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h
new file mode 100644 (file)
index 0000000..fc831c8
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef __ASM_METAG_GLOBAL_LOCK_H
+#define __ASM_METAG_GLOBAL_LOCK_H
+
+#include <asm/metag_mem.h>
+
+/**
+ * __global_lock1() - Acquire global voluntary lock (LOCK1).
+ * @flags:     Variable to store flags into.
+ *
+ * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable
+ * all triggers so we cannot be interrupted, and to enforce a compiler barrier
+ * so that the compiler cannot reorder memory accesses across the lock.
+ *
+ * No other hardware thread will be able to acquire the voluntary or exclusive
+ * locks until the voluntary lock is released with @__global_unlock1, but they
+ * may continue to execute as long as they aren't trying to acquire either of
+ * the locks.
+ */
+#define __global_lock1(flags) do {                                     \
+       unsigned int __trval;                                           \
+       asm volatile("MOV       %0,#0\n\t"                              \
+                    "SWAP      %0,TXMASKI\n\t"                         \
+                    "LOCK1"                                            \
+                    : "=r" (__trval)                                   \
+                    :                                                  \
+                    : "memory");                                       \
+       (flags) = __trval;                                              \
+} while (0)
+
+/**
+ * __global_unlock1() - Release global voluntary lock (LOCK1).
+ * @flags:     Variable to restore flags from.
+ *
+ * Releases the Meta global voluntary lock (LOCK1) acquired with
+ * @__global_lock1, also taking care to re-enable triggers, and to enforce a
+ * compiler barrier so that the compiler cannot reorder memory accesses across
+ * the unlock.
+ *
+ * This immediately allows another hardware thread to acquire the voluntary or
+ * exclusive locks.
+ */
+#define __global_unlock1(flags) do {                                   \
+       unsigned int __trval = (flags);                                 \
+       asm volatile("LOCK0\n\t"                                        \
+                    "MOV       TXMASKI,%0"                             \
+                    :                                                  \
+                    : "r" (__trval)                                    \
+                    : "memory");                                       \
+} while (0)
+
+/**
+ * __global_lock2() - Acquire global exclusive lock (LOCK2).
+ * @flags:     Variable to store flags into.
+ *
+ * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2),
+ * also taking care to disable all triggers so we cannot be interrupted, to take
+ * the atomic lock (system event) and to enforce a compiler barrier so that the
+ * compiler cannot reorder memory accesses across the lock.
+ *
+ * No other hardware thread will be able to execute code until the locks are
+ * released with @__global_unlock2.
+ */
+#define __global_lock2(flags) do {                                     \
+       unsigned int __trval;                                           \
+       unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+       asm volatile("MOV       %0,#0\n\t"                              \
+                    "SWAP      %0,TXMASKI\n\t"                         \
+                    "LOCK2\n\t"                                        \
+                    "SETD      [%1+#0x40],D1RtP"                       \
+                    : "=r&" (__trval)                                  \
+                    : "u" (__aloc_hi)                                  \
+                    : "memory");                                       \
+       (flags) = __trval;                                              \
+} while (0)
+
+/**
+ * __global_unlock2() - Release global exclusive lock (LOCK2).
+ * @flags:     Variable to restore flags from.
+ *
+ * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock
+ * acquired with @__global_lock2, also taking care to release the atomic lock
+ * (system event), re-enable triggers, and to enforce a compiler barrier so that
+ * the compiler cannot reorder memory accesses across the unlock.
+ *
+ * This immediately allows other hardware threads to continue executing and one
+ * of them to acquire locks.
+ */
+#define __global_unlock2(flags) do {                                   \
+       unsigned int __trval = (flags);                                 \
+       unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \
+       asm volatile("SETD      [%1+#0x00],D1RtP\n\t"                   \
+                    "LOCK0\n\t"                                        \
+                    "MOV       TXMASKI,%0"                             \
+                    :                                                  \
+                    : "r" (__trval),                                   \
+                      "u" (__alock_hi)                                 \
+                    : "memory");                                       \
+} while (0)
+
+#endif /* __ASM_METAG_GLOBAL_LOCK_H */
diff --git a/arch/metag/include/asm/gpio.h b/arch/metag/include/asm/gpio.h
new file mode 100644 (file)
index 0000000..b3799d8
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/metag/include/asm/highmem.h b/arch/metag/include/asm/highmem.h
new file mode 100644 (file)
index 0000000..6646a15
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#include <asm/cacheflush.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * Ordering is (from lower to higher memory addresses):
+ *
+ * high_memory
+ *                     Persistent kmap area
+ * PKMAP_BASE
+ *                     fixed_addresses
+ * FIXADDR_START
+ * FIXADDR_TOP
+ *                     Vmalloc area
+ * VMALLOC_START
+ * VMALLOC_END
+ */
+#define PKMAP_BASE             (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP             PTRS_PER_PTE
+#define LAST_PKMAP_MASK                (LAST_PKMAP - 1)
+#define PKMAP_NR(virt)         (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)         (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot              PAGE_KERNEL
+
+static inline void flush_cache_kmaps(void)
+{
+       flush_cache_all();
+}
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+extern pte_t *pkmap_page_table;
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern void kmap_init(void);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
+#endif
+
+#endif
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..f545477
--- /dev/null
@@ -0,0 +1,86 @@
+#ifndef _ASM_METAG_HUGETLB_H
+#define _ASM_METAG_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+int prepare_hugepage_range(struct file *file, unsigned long addr,
+                                               unsigned long len);
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_METAG_HUGETLB_H */
diff --git a/arch/metag/include/asm/hwthread.h b/arch/metag/include/asm/hwthread.h
new file mode 100644 (file)
index 0000000..8f97866
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2008 Imagination Technologies
+ */
+#ifndef __METAG_HWTHREAD_H
+#define __METAG_HWTHREAD_H
+
+#include <linux/bug.h>
+#include <linux/io.h>
+
+#include <asm/metag_mem.h>
+
+#define BAD_HWTHREAD_ID                (0xFFU)
+#define BAD_CPU_ID             (0xFFU)
+
+extern u8 cpu_2_hwthread_id[];
+extern u8 hwthread_id_2_cpu[];
+
+/*
+ * Each hardware thread's Control Unit registers are memory-mapped
+ * and can therefore be accessed by any other hardware thread.
+ *
+ * This helper function returns the memory address where "thread"'s
+ * register "regnum" is mapped.
+ */
+static inline
+void __iomem *__CU_addr(unsigned int thread, unsigned int regnum)
+{
+       unsigned int base, thread_offset, thread_regnum;
+
+       WARN_ON(thread == BAD_HWTHREAD_ID);
+
+       base = T0UCTREG0;       /* Control unit base */
+
+       thread_offset = TnUCTRX_STRIDE * thread;
+       thread_regnum = TXUCTREGn_STRIDE * regnum;
+
+       return (void __iomem *)(base + thread_offset + thread_regnum);
+}
+
+#endif /* __METAG_HWTHREAD_H */
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
new file mode 100644 (file)
index 0000000..9359e50
--- /dev/null
@@ -0,0 +1,165 @@
+#ifndef _ASM_METAG_IO_H
+#define _ASM_METAG_IO_H
+
+#include <linux/types.h>
+
+#define IO_SPACE_LIMIT  0
+
+#define page_to_bus page_to_phys
+#define bus_to_page phys_to_page
+
+/*
+ * Generic I/O
+ */
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+       u8 ret;
+       asm volatile("GETB %0,[%1]"
+                    : "=da" (ret)
+                    : "da" (addr)
+                    : "memory");
+       return ret;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+       u16 ret;
+       asm volatile("GETW %0,[%1]"
+                    : "=da" (ret)
+                    : "da" (addr)
+                    : "memory");
+       return ret;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+       u32 ret;
+       asm volatile("GETD %0,[%1]"
+                    : "=da" (ret)
+                    : "da" (addr)
+                    : "memory");
+       return ret;
+}
+
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+       u64 ret;
+       asm volatile("GETL %0,%t0,[%1]"
+                    : "=da" (ret)
+                    : "da" (addr)
+                    : "memory");
+       return ret;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+       asm volatile("SETB [%0],%1"
+                    :
+                    : "da" (addr),
+                      "da" (b)
+                    : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+       asm volatile("SETW [%0],%1"
+                    :
+                    : "da" (addr),
+                      "da" (b)
+                    : "memory");
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+       asm volatile("SETD [%0],%1"
+                    :
+                    : "da" (addr),
+                      "da" (b)
+                    : "memory");
+}
+
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+       asm volatile("SETL [%0],%1,%t1"
+                    :
+                    : "da" (addr),
+                      "da" (b)
+                    : "memory");
+}
+
+/*
+ * The generic io.h can define all the other generic accessors
+ */
+
+#include <asm-generic/io.h>
+
+/*
+ * Despite being a 32bit architecture, Meta can do 64bit memory accesses
+ * (assuming the bus supports it).
+ */
+
+#define readq  __raw_readq
+#define writeq __raw_writeq
+
+/*
+ * Meta specific I/O for accessing non-MMU areas.
+ *
+ * These can be provided with a physical address rather than an __iomem pointer
+ * and should only be used by core architecture code for accessing fixed core
+ * registers. Generic drivers should use ioremap and the generic I/O accessors.
+ */
+
+#define metag_in8(addr)                __raw_readb((volatile void __iomem *)(addr))
+#define metag_in16(addr)       __raw_readw((volatile void __iomem *)(addr))
+#define metag_in32(addr)       __raw_readl((volatile void __iomem *)(addr))
+#define metag_in64(addr)       __raw_readq((volatile void __iomem *)(addr))
+
+#define metag_out8(b, addr)    __raw_writeb(b, (volatile void __iomem *)(addr))
+#define metag_out16(b, addr)   __raw_writew(b, (volatile void __iomem *)(addr))
+#define metag_out32(b, addr)   __raw_writel(b, (volatile void __iomem *)(addr))
+#define metag_out64(b, addr)   __raw_writeq(b, (volatile void __iomem *)(addr))
+
+/*
+ * io remapping functions
+ */
+
+extern void __iomem *__ioremap(unsigned long offset,
+                              size_t size, unsigned long flags);
+extern void __iounmap(void __iomem *addr);
+
+/**
+ *     ioremap         -       map bus memory into CPU space
+ *     @offset:        bus address of the memory
+ *     @size:          size of the resource to map
+ *
+ *     ioremap performs a platform specific sequence of operations to
+ *     make bus memory CPU accessible via the readb/readw/readl/writeb/
+ *     writew/writel functions and the other mmio helpers. The returned
+ *     address is not guaranteed to be usable directly as a virtual
+ *     address.
+ */
+#define ioremap(offset, size)                   \
+       __ioremap((offset), (size), 0)
+
+#define ioremap_nocache(offset, size)           \
+       __ioremap((offset), (size), 0)
+
+#define ioremap_cached(offset, size)            \
+       __ioremap((offset), (size), _PAGE_CACHEABLE)
+
+#define ioremap_wc(offset, size)                \
+       __ioremap((offset), (size), _PAGE_WR_COMBINE)
+
+#define iounmap(addr)                           \
+       __iounmap(addr)
+
+#endif  /* _ASM_METAG_IO_H */
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h
new file mode 100644 (file)
index 0000000..be0c8f3
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef __ASM_METAG_IRQ_H
+#define __ASM_METAG_IRQ_H
+
+#ifdef CONFIG_4KSTACKS
+extern void irq_ctx_init(int cpu);
+extern void irq_ctx_exit(int cpu);
+# define __ARCH_HAS_DO_SOFTIRQ
+#else
+# define irq_ctx_init(cpu) do { } while (0)
+# define irq_ctx_exit(cpu) do { } while (0)
+#endif
+
+void tbi_startup_interrupt(int);
+void tbi_shutdown_interrupt(int);
+
+struct pt_regs;
+
+int tbisig_map(unsigned int hw);
+extern void do_IRQ(int irq, struct pt_regs *regs);
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+int traps_save_context(void);
+int traps_restore_context(void);
+#endif
+
+#include <asm-generic/irq.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void migrate_irqs(void);
+#endif
+
+#endif /* __ASM_METAG_IRQ_H */
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h
new file mode 100644 (file)
index 0000000..339b16f
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * IRQ flags handling
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() functions from the lowlevel headers.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/core_reg.h>
+#include <asm/metag_regs.h>
+
+#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT
+
+#ifdef CONFIG_SMP
+extern unsigned int get_trigger_mask(void);
+#else
+
+extern unsigned int global_trigger_mask;
+
+static inline unsigned int get_trigger_mask(void)
+{
+       return global_trigger_mask;
+}
+#endif
+
+static inline unsigned long arch_local_save_flags(void)
+{
+       return __core_reg_get(TXMASKI);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+       return (flags & ~INTS_OFF_MASK) == 0;
+}
+
+static inline int arch_irqs_disabled(void)
+{
+       unsigned long flags = arch_local_save_flags();
+
+       return arch_irqs_disabled_flags(flags);
+}
+
+static inline unsigned long __irqs_disabled(void)
+{
+       /*
+        * We shouldn't enable exceptions if they are not already
+        * enabled. This is required for chancalls to work correctly.
+        */
+       return arch_local_save_flags() & INTS_OFF_MASK;
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags = __irqs_disabled();
+
+       asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags)
+                    : "memory");
+
+       return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+       unsigned long flags = __irqs_disabled();
+
+       asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
+}
+
+#ifdef CONFIG_SMP
+/* Avoid circular include dependencies through <linux/preempt.h> */
+void arch_local_irq_enable(void);
+#else
+static inline void arch_local_irq_enable(void)
+{
+       arch_local_irq_restore(get_trigger_mask());
+}
+#endif
+
+#endif /* (__ASSEMBLY__) */
+
+#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h
new file mode 100644 (file)
index 0000000..bffbeaa
--- /dev/null
@@ -0,0 +1,258 @@
+#ifndef _METAG_L2CACHE_H
+#define _METAG_L2CACHE_H
+
+#ifdef CONFIG_METAG_L2C
+
+#include <asm/global_lock.h>
+#include <asm/io.h>
+
+/*
+ * Store the last known value of pfenable (we don't want prefetch enabled while
+ * L2 is off).
+ */
+extern int l2c_pfenable;
+
+/* defined in arch/metag/drivers/core-sysfs.c */
+extern struct sysdev_class cache_sysclass;
+
+static inline void wr_fence(void);
+
+/*
+ * Functions for reading of L2 cache configuration.
+ */
+
+/* Get raw L2 config register (CORE_CONFIG3) */
+static inline unsigned int meta_l2c_config(void)
+{
+       const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3;
+       return *corecfg3;
+}
+
+/* Get whether the L2 is present */
+static inline int meta_l2c_is_present(void)
+{
+       return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT;
+}
+
+/* Get whether the L2 is configured for write-back instead of write-through */
+static inline int meta_l2c_is_writeback(void)
+{
+       return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT;
+}
+
+/* Get whether the L2 is unified instead of separated code/data */
+static inline int meta_l2c_is_unified(void)
+{
+       return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT;
+}
+
+/* Get the L2 cache size in bytes */
+static inline unsigned int meta_l2c_size(void)
+{
+       unsigned int size_s;
+       if (!meta_l2c_is_present())
+               return 0;
+       size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS)
+                       >> METAC_CORECFG3_L2C_SIZE_S;
+       /* L2CSIZE is in KiB */
+       return 1024 << size_s;
+}
+
+/* Get the number of ways in the L2 cache */
+static inline unsigned int meta_l2c_ways(void)
+{
+       unsigned int ways_s;
+       if (!meta_l2c_is_present())
+               return 0;
+       ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS)
+                       >> METAC_CORECFG3_L2C_NUM_WAYS_S;
+       return 0x1 << ways_s;
+}
+
+/* Get the line size of the L2 cache */
+static inline unsigned int meta_l2c_linesize(void)
+{
+       unsigned int line_size;
+       if (!meta_l2c_is_present())
+               return 0;
+       line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS)
+                       >> METAC_CORECFG3_L2C_LINE_SIZE_S;
+       switch (line_size) {
+       case METAC_CORECFG3_L2C_LINE_SIZE_64B:
+               return 64;
+       default:
+               return 0;
+       }
+}
+
+/* Get the revision ID of the L2 cache */
+static inline unsigned int meta_l2c_revision(void)
+{
+       return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS)
+                       >> METAC_CORECFG3_L2C_REV_ID_S;
+}
+
+
+/*
+ * Start an initialisation of the L2 cachelines and wait for completion.
+ * This should only be done in a LOCK1 or LOCK2 critical section while the L2
+ * is disabled.
+ */
+static inline void _meta_l2c_init(void)
+{
+       metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT);
+       while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS)
+               /* do nothing */;
+}
+
+/*
+ * Start a writeback of dirty L2 cachelines and wait for completion.
+ * This should only be done in a LOCK1 or LOCK2 critical section.
+ */
+static inline void _meta_l2c_purge(void)
+{
+       metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE);
+       while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS)
+               /* do nothing */;
+}
+
+/* Set whether the L2 cache is enabled. */
+static inline void _meta_l2c_enable(int enabled)
+{
+       unsigned int enable;
+
+       enable = metag_in32(SYSC_L2C_ENABLE);
+       if (enabled)
+               enable |= SYSC_L2C_ENABLE_ENABLE_BIT;
+       else
+               enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT;
+       metag_out32(enable, SYSC_L2C_ENABLE);
+}
+
+/* Set whether the L2 cache prefetch is enabled. */
+static inline void _meta_l2c_pf_enable(int pfenabled)
+{
+       unsigned int enable;
+
+       enable = metag_in32(SYSC_L2C_ENABLE);
+       if (pfenabled)
+               enable |= SYSC_L2C_ENABLE_PFENABLE_BIT;
+       else
+               enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT;
+       metag_out32(enable, SYSC_L2C_ENABLE);
+}
+
+/* Return whether the L2 cache is enabled */
+static inline int _meta_l2c_is_enabled(void)
+{
+       return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT;
+}
+
+/* Return whether the L2 cache prefetch is enabled */
+static inline int _meta_l2c_pf_is_enabled(void)
+{
+       return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT;
+}
+
+
+/* Return whether the L2 cache is enabled */
+static inline int meta_l2c_is_enabled(void)
+{
+       int en;
+
+       /*
+        * There is no need to lock at the moment, as the enable bit is never
+        * intermediately changed, so we will never see an intermediate result.
+        */
+       en = _meta_l2c_is_enabled();
+
+       return en;
+}
+
+/*
+ * Ensure the L2 cache is disabled.
+ * Return whether the L2 was previously disabled.
+ */
+int meta_l2c_disable(void);
+
+/*
+ * Ensure the L2 cache is enabled.
+ * Return whether the L2 was previously enabled.
+ */
+int meta_l2c_enable(void);
+
+/* Return whether the L2 cache prefetch is enabled */
+static inline int meta_l2c_pf_is_enabled(void)
+{
+       return l2c_pfenable;
+}
+
+/*
+ * Set whether the L2 cache prefetch is enabled.
+ * Return whether the L2 prefetch was previously enabled.
+ */
+int meta_l2c_pf_enable(int pfenable);
+
+/*
+ * Flush the L2 cache.
+ * Return 1 if the L2 is disabled.
+ */
+int meta_l2c_flush(void);
+
+/*
+ * Write back all dirty cache lines in the L2 cache.
+ * Return 1 if the L2 is disabled or there isn't any writeback.
+ */
+static inline int meta_l2c_writeback(void)
+{
+       unsigned long flags;
+       int en;
+
+       /* no need to purge if it's not a writeback cache */
+       if (!meta_l2c_is_writeback())
+               return 1;
+
+       /*
+        * Purge only works if the L2 is enabled, and involves reading back to
+        * detect completion, so keep this operation atomic with other threads.
+        */
+       __global_lock1(flags);
+       en = meta_l2c_is_enabled();
+       if (likely(en)) {
+               wr_fence();
+               _meta_l2c_purge();
+       }
+       __global_unlock1(flags);
+
+       return !en;
+}
+
+#else /* CONFIG_METAG_L2C */
+
+#define meta_l2c_config()              0
+#define meta_l2c_is_present()          0
+#define meta_l2c_is_writeback()                0
+#define meta_l2c_is_unified()          0
+#define meta_l2c_size()                        0
+#define meta_l2c_ways()                        0
+#define meta_l2c_linesize()            0
+#define meta_l2c_revision()            0
+
+#define meta_l2c_is_enabled()          0
+#define _meta_l2c_pf_is_enabled()      0
+#define meta_l2c_pf_is_enabled()       0
+#define meta_l2c_disable()             1
+#define meta_l2c_enable()              0
+#define meta_l2c_pf_enable(X)          0
+static inline int meta_l2c_flush(void)
+{
+       return 1;
+}
+static inline int meta_l2c_writeback(void)
+{
+       return 1;
+}
+
+#endif /* CONFIG_METAG_L2C */
+
+#endif /* _METAG_L2CACHE_H */
diff --git a/arch/metag/include/asm/linkage.h b/arch/metag/include/asm/linkage.h
new file mode 100644 (file)
index 0000000..73bf25b
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .p2align 2
+#define __ALIGN_STR ".p2align 2"
+
+#endif
diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h
new file mode 100644 (file)
index 0000000..12c5664
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * arch/metag/include/asm/mach/arch.h
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * based on the ARM version:
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _METAG_MACH_ARCH_H_
+#define _METAG_MACH_ARCH_H_
+
+#include <linux/stddef.h>
+
+#include <asm/clock.h>
+
+/**
+ * struct machine_desc - Describes a board controlled by a Meta.
+ * @name:              Board/SoC name.
+ * @dt_compat:         Array of device tree 'compatible' strings.
+ * @clocks:            Clock callbacks.
+ *
+ * @nr_irqs:           Maximum number of IRQs.
+ *                     If 0, defaults to NR_IRQS in asm-generic/irq.h.
+ *
+ * @init_early:                Early init callback.
+ * @init_irq:          IRQ init callback for setting up IRQ controllers.
+ * @init_machine:      Arch init callback for setting up devices.
+ * @init_late:         Late init callback.
+ *
+ * This structure is provided by each board which can be controlled by a Meta.
+ * It is chosen by matching the compatible strings in the device tree provided
+ * by the bootloader with the strings in @dt_compat, and sets up any aspects of
+ * the machine that aren't configured with device tree (yet).
+ */
+struct machine_desc {
+       const char              *name;
+       const char              **dt_compat;
+       struct meta_clock_desc  *clocks;
+
+       unsigned int            nr_irqs;
+
+       void                    (*init_early)(void);
+       void                    (*init_irq)(void);
+       void                    (*init_machine)(void);
+       void                    (*init_late)(void);
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p)                       \
+       for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+       /* the default machine is the last one linked in */
+       if (__arch_info_end - 1 < __arch_info_begin)
+               return NULL;
+       return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.  This is built into
+ * a table by the linker.
+ */
+#define MACHINE_START(_type, _name)                    \
+static const struct machine_desc __mach_desc_##_type   \
+__used                                                 \
+__attribute__((__section__(".arch.info.init"))) = {    \
+       .name           = _name,
+
+#define MACHINE_END                            \
+};
+
+#endif /* _METAG_MACH_ARCH_H_ */
diff --git a/arch/metag/include/asm/metag_isa.h b/arch/metag/include/asm/metag_isa.h
new file mode 100644 (file)
index 0000000..c8aa2ae
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * asm/metag_isa.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta instruction set.
+ */
+
+#ifndef _ASM_METAG_ISA_H_
+#define _ASM_METAG_ISA_H_
+
+
+/* L1 cache layout */
+
+/* Data cache line size as bytes and shift */
+#define DCACHE_LINE_BYTES 64
+#define DCACHE_LINE_S     6
+
+/* Number of ways in the data cache */
+#define DCACHE_WAYS       4
+
+/* Instruction cache line size as bytes and shift */
+#define ICACHE_LINE_BYTES 64
+#define ICACHE_LINE_S     6
+
+/* Number of ways in the instruction cache */
+#define ICACHE_WAYS       4
+
+
+/*
+ * CACHEWD/CACHEWL instructions use the bottom 8 bits of the data presented to
+ * control the operation actually achieved.
+ */
+/* Use of these two bits should be discouraged since the bits dont have
+ * consistent meanings
+ */
+#define CACHEW_ICACHE_BIT           0x01
+#define CACHEW_TLBFLUSH_BIT         0x02
+
+#define CACHEW_FLUSH_L1D_L2         0x0
+#define CACHEW_INVALIDATE_L1I       0x1
+#define CACHEW_INVALIDATE_L1DTLB    0x2
+#define CACHEW_INVALIDATE_L1ITLB    0x3
+#define CACHEW_WRITEBACK_L1D_L2     0x4
+#define CACHEW_INVALIDATE_L1D       0x8
+#define CACHEW_INVALIDATE_L1D_L2    0xC
+
+/*
+ * CACHERD/CACHERL instructions use bits 3:5 of the address presented to
+ * control the operation achieved and hence the specific result.
+ */
+#define CACHER_ADDR_BITS            0xFFFFFFC0
+#define CACHER_OPER_BITS            0x00000030
+#define CACHER_OPER_S               4
+#define     CACHER_OPER_LINPHY          0
+#define CACHER_ICACHE_BIT           0x00000008
+#define CACHER_ICACHE_S             3
+
+/*
+ * CACHERD/CACHERL LINPHY Oper result is one/two 32-bit words
+ *
+ *  If CRLINPHY0_VAL_BIT (Bit 0) set then,
+ *      Lower 32-bits corresponds to MMCU_ENTRY_* above.
+ *      Upper 32-bits corresponds to CRLINPHY1_* values below (if requested).
+ *  else
+ *      Lower 32-bits corresponds to CRLINPHY0_* values below.
+ *      Upper 32-bits undefined.
+ */
+#define CRLINPHY0_VAL_BIT      0x00000001
+#define CRLINPHY0_FIRST_BIT    0x00000004 /* Set if VAL=0 due to first level */
+
+#define CRLINPHY1_READ_BIT     0x00000001 /* Set if reads permitted          */
+#define CRLINPHY1_SINGLE_BIT   0x00000004 /* Set if TLB does not cache entry */
+#define CRLINPHY1_PAGEMSK_BITS 0x0000FFF0 /* Set to ((2^n-1)>>12) value      */
+#define CRLINPHY1_PAGEMSK_S    4
+
+#endif /* _ASM_METAG_ISA_H_ */
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
new file mode 100644 (file)
index 0000000..3f7b54d
--- /dev/null
@@ -0,0 +1,1106 @@
+/*
+ * asm/metag_mem.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta (memory-mapped) registers.
+ */
+
+#ifndef _ASM_METAG_MEM_H_
+#define _ASM_METAG_MEM_H_
+
+/*****************************************************************************
+ *                   META MEMORY MAP LINEAR ADDRESS VALUES
+ ****************************************************************************/
+/*
+ * COMMON MEMORY MAP
+ * -----------------
+ */
+
+#define LINSYSTEM_BASE  0x00200000
+#define LINSYSTEM_LIMIT 0x07FFFFFF
+
+/* Linear cache flush now implemented via DCACHE instruction. These defines
+   related to a special region that used to exist for achieving cache flushes.
+ */
+#define         LINSYSLFLUSH_S 0
+
+#define     LINSYSRES0_BASE     0x00200000
+#define     LINSYSRES0_LIMIT    0x01FFFFFF
+
+#define     LINSYSCUSTOM_BASE 0x02000000
+#define     LINSYSCUSTOM_LIMIT   0x02FFFFFF
+
+#define     LINSYSEXPAND_BASE 0x03000000
+#define     LINSYSEXPAND_LIMIT   0x03FFFFFF
+
+#define     LINSYSEVENT_BASE  0x04000000
+#define         LINSYSEVENT_WR_ATOMIC_UNLOCK    0x04000000
+#define         LINSYSEVENT_WR_ATOMIC_LOCK      0x04000040
+#define         LINSYSEVENT_WR_CACHE_DISABLE    0x04000080
+#define         LINSYSEVENT_WR_CACHE_ENABLE     0x040000C0
+#define         LINSYSEVENT_WR_COMBINE_FLUSH    0x04000100
+#define         LINSYSEVENT_WR_FENCE            0x04000140
+#define     LINSYSEVENT_LIMIT   0x04000FFF
+
+#define     LINSYSCFLUSH_BASE   0x04400000
+#define         LINSYSCFLUSH_DCACHE_LINE    0x04400000
+#define         LINSYSCFLUSH_ICACHE_LINE    0x04500000
+#define         LINSYSCFLUSH_MMCU           0x04700000
+#ifndef METAC_1_2
+#define         LINSYSCFLUSH_TxMMCU_BASE    0x04700020
+#define         LINSYSCFLUSH_TxMMCU_STRIDE  0x00000008
+#endif
+#define         LINSYSCFLUSH_ADDR_BITS      0x000FFFFF
+#define         LINSYSCFLUSH_ADDR_S         0
+#define     LINSYSCFLUSH_LIMIT  0x047FFFFF
+
+#define     LINSYSCTRL_BASE     0x04800000
+#define     LINSYSCTRL_LIMIT    0x04FFFFFF
+
+#define     LINSYSMTABLE_BASE   0x05000000
+#define     LINSYSMTABLE_LIMIT  0x05FFFFFF
+
+#define     LINSYSDIRECT_BASE   0x06000000
+#define     LINSYSDIRECT_LIMIT  0x07FFFFFF
+
+#define LINLOCAL_BASE   0x08000000
+#define LINLOCAL_LIMIT  0x7FFFFFFF
+
+#define LINCORE_BASE    0x80000000
+#define LINCORE_LIMIT   0x87FFFFFF
+
+#define LINCORE_CODE_BASE  0x80000000
+#define LINCORE_CODE_LIMIT 0x81FFFFFF
+
+#define LINCORE_DATA_BASE  0x82000000
+#define LINCORE_DATA_LIMIT 0x83FFFFFF
+
+
+/* The core can support locked icache lines in this region */
+#define LINCORE_ICACHE_BASE  0x84000000
+#define LINCORE_ICACHE_LIMIT 0x85FFFFFF
+
+/* The core can support locked dcache lines in this region */
+#define LINCORE_DCACHE_BASE  0x86000000
+#define LINCORE_DCACHE_LIMIT 0x87FFFFFF
+
+#define LINGLOBAL_BASE  0x88000000
+#define LINGLOBAL_LIMIT 0xFFFDFFFF
+
+/*
+ * CHIP Core Register Map
+ * ----------------------
+ */
+#define CORE_HWBASE     0x04800000
+#define PRIV_HWBASE     0x04810000
+#define TRIG_HWBASE     0x04820000
+#define SYSC_HWBASE     0x04830000
+
+/*****************************************************************************
+ *         INTER-THREAD KICK REGISTERS FOR SOFTWARE EVENT GENERATION
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that can be used to supply
+ * kicks to threads that service arbitrary software events.
+ */
+
+#define T0KICK     0x04800800   /* Background kick 0     */
+#define     TXXKICK_MAX 0xFFFF  /* Maximum kicks */
+#define     TnXKICK_STRIDE      0x00001000  /* Thread scale value    */
+#define     TnXKICK_STRIDE_S    12
+#define T0KICKI    0x04800808   /* Interrupt kick 0      */
+#define     TXIKICK_OFFSET  0x00000008  /* Int level offset value */
+#define T1KICK     0x04801800   /* Background kick 1     */
+#define T1KICKI    0x04801808   /* Interrupt kick 1      */
+#define T2KICK     0x04802800   /* Background kick 2     */
+#define T2KICKI    0x04802808   /* Interrupt kick 2      */
+#define T3KICK     0x04803800   /* Background kick 3     */
+#define T3KICKI    0x04803808   /* Interrupt kick 3      */
+
+/*****************************************************************************
+ *                GLOBAL REGISTER ACCESS RESOURCES
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that allow access to the
+ * internal state of all threads in order to allow global set-up of thread
+ * state and external handling of thread events, errors, or debugging.
+ *
+ * The actual unit and register index values needed to access individul
+ * registers are chip specific see - METAC_TXUXX_VALUES in metac_x_y.h.
+ * However two C array initialisers TXUXX_MASKS and TGUXX_MASKS will always be
+ * defined to allow arbitrary loading, display, and saving of all valid
+ * register states without detailed knowledge of their purpose - TXUXX sets
+ * bits for all valid registers and TGUXX sets bits for the sub-set which are
+ * global.
+ */
+
+#define T0UCTREG0   0x04800000  /* Access to all CT regs */
+#define TnUCTRX_STRIDE      0x00001000  /* Thread scale value    */
+#define TXUCTREGn_STRIDE    0x00000008  /* Register scale value  */
+
+#define TXUXXRXDT  0x0480FFF0   /* Data to/from any threads reg */
+#define TXUXXRXRQ  0x0480FFF8
+#define     TXUXXRXRQ_DREADY_BIT 0x80000000  /* Poll for done */
+#define     TXUXXRXRQ_DSPEXT_BIT 0x00020000  /* Addr DSP Regs */
+#define     TXUXXRXRQ_RDnWR_BIT  0x00010000  /* Set for read  */
+#define     TXUXXRXRQ_TX_BITS    0x00003000  /* Thread number */
+#define     TXUXXRXRQ_TX_S       12
+#define     TXUXXRXRQ_RX_BITS    0x000001F0  /* Register num  */
+#define     TXUXXRXRQ_RX_S       4
+#define         TXUXXRXRQ_DSPRARD0    0      /* DSP RAM A Read Pointer 0 */
+#define         TXUXXRXRQ_DSPRARD1    1      /* DSP RAM A Read Pointer 1 */
+#define         TXUXXRXRQ_DSPRAWR0    2      /* DSP RAM A Write Pointer 0 */
+#define         TXUXXRXRQ_DSPRAWR2    3      /* DSP RAM A Write Pointer 1 */
+#define         TXUXXRXRQ_DSPRBRD0    4      /* DSP RAM B Read Pointer 0 */
+#define         TXUXXRXRQ_DSPRBRD1    5      /* DSP RAM B Read Pointer 1 */
+#define         TXUXXRXRQ_DSPRBWR0    6      /* DSP RAM B Write Pointer 0 */
+#define         TXUXXRXRQ_DSPRBWR1    7      /* DSP RAM B Write Pointer 1 */
+#define         TXUXXRXRQ_DSPRARINC0  8      /* DSP RAM A Read Increment 0 */
+#define         TXUXXRXRQ_DSPRARINC1  9      /* DSP RAM A Read Increment 1 */
+#define         TXUXXRXRQ_DSPRAWINC0 10      /* DSP RAM A Write Increment 0 */
+#define         TXUXXRXRQ_DSPRAWINC1 11      /* DSP RAM A Write Increment 1 */
+#define         TXUXXRXRQ_DSPRBRINC0 12      /* DSP RAM B Read Increment 0 */
+#define         TXUXXRXRQ_DSPRBRINC1 13      /* DSP RAM B Read Increment 1 */
+#define         TXUXXRXRQ_DSPRBWINC0 14      /* DSP RAM B Write Increment 0 */
+#define         TXUXXRXRQ_DSPRBWINC1 15      /* DSP RAM B Write Increment 1 */
+
+#define         TXUXXRXRQ_ACC0L0     16      /* Accumulator 0 bottom 32-bits */
+#define         TXUXXRXRQ_ACC1L0     17      /* Accumulator 1 bottom 32-bits */
+#define         TXUXXRXRQ_ACC2L0     18      /* Accumulator 2 bottom 32-bits */
+#define         TXUXXRXRQ_ACC3L0     19      /* Accumulator 3 bottom 32-bits */
+#define         TXUXXRXRQ_ACC0HI     20      /* Accumulator 0 top 8-bits */
+#define         TXUXXRXRQ_ACC1HI     21      /* Accumulator 1 top 8-bits */
+#define         TXUXXRXRQ_ACC2HI     22      /* Accumulator 2 top 8-bits */
+#define         TXUXXRXRQ_ACC3HI     23      /* Accumulator 3 top 8-bits */
+#define     TXUXXRXRQ_UXX_BITS   0x0000000F  /* Unit number   */
+#define     TXUXXRXRQ_UXX_S      0
+
+/*****************************************************************************
+ *          PRIVILEGE CONTROL VALUES FOR MEMORY MAPPED RESOURCES
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that give control over and
+ * the privilege required to access other memory mapped resources. These
+ * registers themselves always require privilege to update them.
+ */
+
+#define TXPRIVREG_STRIDE    0x8 /* Delta between per-thread regs */
+#define TXPRIVREG_STRIDE_S  3
+
+/*
+ * Each bit 0 to 15 defines privilege required to access internal register
+ * regions 0x04800000 to 0x048FFFFF in 64k chunks
+ */
+#define T0PIOREG    0x04810100
+#define T1PIOREG    0x04810108
+#define T2PIOREG    0x04810110
+#define T3PIOREG    0x04810118
+
+/*
+ * Each bit 0 to 31 defines privilege required to use the pair of
+ * system events implemented as writee in the regions 0x04000000 to
+ * 0x04000FFF in 2*64 byte chunks.
+ */
+#define T0PSYREG    0x04810180
+#define T1PSYREG    0x04810188
+#define T2PSYREG    0x04810190
+#define T3PSYREG    0x04810198
+
+/*
+ * CHIP PRIV CONTROLS
+ * ------------------
+ */
+
+/* The TXPIOREG register holds a bit mask directly mappable to
+   corresponding addresses in the range 0x04800000 to 049FFFFF */
+#define     TXPIOREG_ADDR_BITS  0x1F0000 /* Up to 32x64K bytes */
+#define     TXPIOREG_ADDR_S     16
+
+/* Hence based on the _HWBASE values ... */
+#define     TXPIOREG_CORE_BIT       (1<<((0x04800000>>16)&0x1F))
+#define     TXPIOREG_PRIV_BIT       (1<<((0x04810000>>16)&0x1F))
+#define     TXPIOREG_TRIG_BIT       (1<<((0x04820000>>16)&0x1F))
+#define     TXPIOREG_SYSC_BIT       (1<<((0x04830000>>16)&0x1F))
+
+#define     TXPIOREG_WRC_BIT          0x00080000  /* Wr combiner reg priv */
+#define     TXPIOREG_LOCALBUS_RW_BIT  0x00040000  /* Local bus rd/wr priv */
+#define     TXPIOREG_SYSREGBUS_RD_BIT 0x00020000  /* Sys reg bus write priv */
+#define     TXPIOREG_SYSREGBUS_WR_BIT 0x00010000  /* Sys reg bus read priv */
+
+/* CORE region privilege controls */
+#define T0PRIVCORE 0x04800828
+#define         TXPRIVCORE_TXBKICK_BIT   0x001  /* Background kick priv */
+#define         TXPRIVCORE_TXIKICK_BIT   0x002  /* Interrupt kick priv  */
+#define         TXPRIVCORE_TXAMAREGX_BIT 0x004  /* TXAMAREG4|5|6 priv   */
+#define TnPRIVCORE_STRIDE 0x00001000
+
+#define T0PRIVSYSR 0x04810000
+#define     TnPRIVSYSR_STRIDE   0x00000008
+#define     TnPRIVSYSR_STRIDE_S 3
+#define     TXPRIVSYSR_CFLUSH_BIT     0x01
+#define     TXPRIVSYSR_MTABLE_BIT     0x02
+#define     TXPRIVSYSR_DIRECT_BIT     0x04
+#ifdef METAC_1_2
+#define     TXPRIVSYSR_ALL_BITS       0x07
+#else
+#define     TXPRIVSYSR_CORE_BIT       0x08
+#define     TXPRIVSYSR_CORECODE_BIT   0x10
+#define     TXPRIVSYSR_ALL_BITS       0x1F
+#endif
+#define T1PRIVSYSR 0x04810008
+#define T2PRIVSYSR 0x04810010
+#define T3PRIVSYSR 0x04810018
+
+/*****************************************************************************
+ *          H/W TRIGGER STATE/LEVEL REGISTERS AND H/W TRIGGER VECTORS
+ ****************************************************************************/
+/*
+ * These values define memory mapped registers that give control over and
+ * the state of hardware trigger sources both external to the META processor
+ * and internal to it.
+ */
+
+#define HWSTATMETA  0x04820000  /* Hardware status/clear META trig */
+#define         HWSTATMETA_T0HALT_BITS 0xF
+#define         HWSTATMETA_T0HALT_S    0
+#define     HWSTATMETA_T0BHALT_BIT 0x1  /* Background HALT */
+#define     HWSTATMETA_T0IHALT_BIT 0x2  /* Interrupt HALT  */
+#define     HWSTATMETA_T0PHALT_BIT 0x4  /* PF/RO Memory HALT */
+#define     HWSTATMETA_T0AMATR_BIT 0x8  /* AMA trigger */
+#define     HWSTATMETA_TnINT_S     4    /* Shift by (thread*4) */
+#define HWSTATEXT   0x04820010  /* H/W status/clear external trigs  0-31 */
+#define HWSTATEXT2  0x04820018  /* H/W status/clear external trigs 32-63 */
+#define HWSTATEXT4  0x04820020  /* H/W status/clear external trigs 64-95 */
+#define HWSTATEXT6  0x04820028  /* H/W status/clear external trigs 96-128 */
+#define HWLEVELEXT  0x04820030  /* Edge/Level type of external trigs  0-31 */
+#define HWLEVELEXT2 0x04820038  /* Edge/Level type of external trigs 32-63 */
+#define HWLEVELEXT4 0x04820040  /* Edge/Level type of external trigs 64-95 */
+#define HWLEVELEXT6 0x04820048  /* Edge/Level type of external trigs 96-128 */
+#define     HWLEVELEXT_XXX_LEVEL 1  /* Level sense logic in HWSTATEXTn */
+#define     HWLEVELEXT_XXX_EDGE  0
+#define HWMASKEXT   0x04820050  /* Enable/disable of external trigs  0-31 */
+#define HWMASKEXT2  0x04820058  /* Enable/disable of external trigs 32-63 */
+#define HWMASKEXT4  0x04820060  /* Enable/disable of external trigs 64-95 */
+#define HWMASKEXT6  0x04820068  /* Enable/disable of external trigs 96-128 */
+#define T0VECINT_BHALT  0x04820500  /* Background HALT trigger vector */
+#define     TXVECXXX_BITS   0xF       /* Per-trigger vector vals 0,1,4-15 */
+#define     TXVECXXX_S  0
+#define T0VECINT_IHALT  0x04820508  /* Interrupt HALT */
+#define T0VECINT_PHALT  0x04820510  /* PF/RO memory fault */
+#define T0VECINT_AMATR  0x04820518  /* AMA trigger */
+#define     TnVECINT_STRIDE 0x00000020  /* Per thread stride */
+#define HWVEC0EXT   0x04820700  /* Vectors for external triggers  0-31 */
+#define HWVEC20EXT  0x04821700  /* Vectors for external triggers 32-63 */
+#define HWVEC40EXT  0x04822700  /* Vectors for external triggers 64-95 */
+#define HWVEC60EXT  0x04823700  /* Vectors for external triggers 96-127 */
+#define     HWVECnEXT_STRIDE 0x00000008 /* Per trigger stride */
+#define HWVECnEXT_DEBUG 0x1         /* Redirect trigger to debug i/f */
+
+/*
+ * CORE HWCODE-BREAKPOINT REGISTERS/VALUES
+ * ---------------------------------------
+ */
+#define CODEB0ADDR         0x0480FF00  /* Address specifier */
+#define     CODEBXADDR_MATCHX_BITS 0xFFFFFFFC
+#define     CODEBXADDR_MATCHX_S    2
+#define CODEB0CTRL         0x0480FF08  /* Control */
+#define     CODEBXCTRL_MATEN_BIT   0x80000000   /* Match 'Enable'  */
+#define     CODEBXCTRL_MATTXEN_BIT 0x10000000   /* Match threadn enable */
+#define     CODEBXCTRL_HITC_BITS   0x00FF0000   /* Hit counter   */
+#define     CODEBXCTRL_HITC_S      16
+#define           CODEBXHITC_NEXT  0xFF     /* Next 'hit' will trigger */
+#define           CODEBXHITC_HIT1  0x00     /* No 'hits' after trigger */
+#define     CODEBXCTRL_MMASK_BITS  0x0000FFFC   /* Mask ADDR_MATCH bits */
+#define     CODEBXCTRL_MMASK_S     2
+#define     CODEBXCTRL_MATLTX_BITS 0x00000003   /* Match threadn LOCAL addr */
+#define     CODEBXCTRL_MATLTX_S    0            /* Match threadn LOCAL addr */
+#define CODEBnXXXX_STRIDE      0x00000010  /* Stride between CODEB reg sets */
+#define CODEBnXXXX_STRIDE_S    4
+#define CODEBnXXXX_LIMIT       3           /* Sets 0-3 */
+
+/*
+ * CORE DATA-WATCHPOINT REGISTERS/VALUES
+ * -------------------------------------
+ */
+#define DATAW0ADDR         0x0480FF40  /* Address specifier */
+#define     DATAWXADDR_MATCHR_BITS 0xFFFFFFF8
+#define     DATAWXADDR_MATCHR_S    3
+#define     DATAWXADDR_MATCHW_BITS 0xFFFFFFFF
+#define     DATAWXADDR_MATCHW_S    0
+#define DATAW0CTRL         0x0480FF48  /* Control */
+#define     DATAWXCTRL_MATRD_BIT   0x80000000   /* Match 'Read'  */
+#ifndef METAC_1_2
+#define     DATAWXCTRL_MATNOTTX_BIT 0x20000000  /* Invert threadn enable */
+#endif
+#define     DATAWXCTRL_MATWR_BIT   0x40000000   /* Match 'Write' */
+#define     DATAWXCTRL_MATTXEN_BIT 0x10000000   /* Match threadn enable */
+#define     DATAWXCTRL_WRSIZE_BITS 0x0F000000   /* Write Match Size */
+#define     DATAWXCTRL_WRSIZE_S    24
+#define         DATAWWRSIZE_ANY   0         /* Any size transaction matches */
+#define         DATAWWRSIZE_8BIT  1     /* Specific sizes ... */
+#define         DATAWWRSIZE_16BIT 2
+#define         DATAWWRSIZE_32BIT 3
+#define         DATAWWRSIZE_64BIT 4
+#define     DATAWXCTRL_HITC_BITS   0x00FF0000   /* Hit counter   */
+#define     DATAWXCTRL_HITC_S      16
+#define           DATAWXHITC_NEXT  0xFF     /* Next 'hit' will trigger */
+#define           DATAWXHITC_HIT1  0x00     /* No 'hits' after trigger */
+#define     DATAWXCTRL_MMASK_BITS 0x0000FFF8    /* Mask ADDR_MATCH bits */
+#define     DATAWXCTRL_MMASK_S    3
+#define     DATAWXCTRL_MATLTX_BITS 0x00000003   /* Match threadn LOCAL addr */
+#define     DATAWXCTRL_MATLTX_S    0            /* Match threadn LOCAL addr */
+#define DATAW0DMATCH0       0x0480FF50 /* Write match data */
+#define DATAW0DMATCH1       0x0480FF58
+#define DATAW0DMASK0        0x0480FF60 /* Write match data mask */
+#define DATAW0DMASK1        0x0480FF68
+#define DATAWnXXXX_STRIDE      0x00000040  /* Stride between DATAW reg sets */
+#define DATAWnXXXX_STRIDE_S    6
+#define DATAWnXXXX_LIMIT       1           /* Sets 0,1 */
+
+/*
+ * CHIP Automatic Mips Allocation control registers
+ * ------------------------------------------------
+ */
+
+/* CORE memory mapped AMA registers */
+#define T0AMAREG4   0x04800810
+#define     TXAMAREG4_POOLSIZE_BITS 0x3FFFFF00
+#define     TXAMAREG4_POOLSIZE_S    8
+#define     TXAMAREG4_AVALUE_BITS   0x000000FF
+#define     TXAMAREG4_AVALUE_S  0
+#define T0AMAREG5   0x04800818
+#define     TXAMAREG5_POOLC_BITS    0x07FFFFFF
+#define         TXAMAREG5_POOLC_S       0
+#define T0AMAREG6   0x04800820
+#define     TXAMAREG6_DLINEDEF_BITS 0x00FFFFF0
+#define         TXAMAREG6_DLINEDEF_S    0
+#define TnAMAREGX_STRIDE    0x00001000
+
+/*
+ * Memory Management Control Unit Table Entries
+ * --------------------------------------------
+ */
+#define MMCU_ENTRY_S         4            /* -> Entry size                */
+#define MMCU_ENTRY_ADDR_BITS 0xFFFFF000   /* Physical address             */
+#define MMCU_ENTRY_ADDR_S    12           /* -> Page size                 */
+#define MMCU_ENTRY_CWIN_BITS 0x000000C0   /* Caching 'window' selection   */
+#define MMCU_ENTRY_CWIN_S    6
+#define     MMCU_CWIN_UNCACHED  0 /* May not be memory etc.  */
+#define     MMCU_CWIN_BURST     1 /* Cached but LRU unset */
+#define     MMCU_CWIN_C1SET     2 /* Cached in 1 set only */
+#define     MMCU_CWIN_CACHED    3 /* Fully cached            */
+#define MMCU_ENTRY_CACHE_BIT 0x00000080   /* Set for cached region         */
+#define     MMCU_ECACHE1_FULL_BIT  0x00000040 /* Use all the sets */
+#define     MMCU_ECACHE0_BURST_BIT 0x00000040 /* Match bursts     */
+#define MMCU_ENTRY_SYS_BIT   0x00000010   /* Sys-coherent access required  */
+#define MMCU_ENTRY_WRC_BIT   0x00000008   /* Write combining allowed       */
+#define MMCU_ENTRY_PRIV_BIT  0x00000004   /* Privilege required            */
+#define MMCU_ENTRY_WR_BIT    0x00000002   /* Writes allowed                */
+#define MMCU_ENTRY_VAL_BIT   0x00000001   /* Entry is valid                */
+
+#ifdef METAC_2_1
+/*
+ * Extended first-level/top table entries have extra/larger fields in later
+ * cores as bits 11:0 previously had no effect in such table entries.
+ */
+#define MMCU_E1ENT_ADDR_BITS 0xFFFFFFC0   /* Physical address             */
+#define MMCU_E1ENT_ADDR_S    6            /*   -> resolution < page size  */
+#define MMCU_E1ENT_PGSZ_BITS 0x0000001E   /* Page size for 2nd level      */
+#define MMCU_E1ENT_PGSZ_S    1
+#define     MMCU_E1ENT_PGSZ0_POWER   12   /* PgSz  0 -> 4K */
+#define     MMCU_E1ENT_PGSZ_MAX      10   /* PgSz 10 -> 4M maximum */
+#define MMCU_E1ENT_MINIM_BIT 0x00000020
+#endif /* METAC_2_1 */
+
+/* MMCU control register in SYSC region */
+#define MMCU_TABLE_PHYS_ADDR        0x04830010
+#define     MMCU_TABLE_PHYS_ADDR_BITS   0xFFFFFFFC
+#ifdef METAC_2_1
+#define     MMCU_TABLE_PHYS_EXTEND      0x00000001     /* See below */
+#endif
+#define MMCU_DCACHE_CTRL_ADDR       0x04830018
+#define     MMCU_xCACHE_CTRL_ENABLE_BIT     0x00000001
+#define     MMCU_xCACHE_CTRL_PARTITION_BIT  0x00000000 /* See xCPART below */
+#define MMCU_ICACHE_CTRL_ADDR       0x04830020
+
+#ifdef METAC_2_1
+
+/*
+ * Allow direct access to physical memory used to implement MMU table.
+ *
+ * Each is based on a corresponding MMCU_TnLOCAL_TABLE_PHYSn or similar
+ *    MMCU_TnGLOBAL_TABLE_PHYSn register pair (see next).
+ */
+#define LINSYSMEMT0L_BASE   0x05000000
+#define LINSYSMEMT0L_LIMIT  0x051FFFFF
+#define     LINSYSMEMTnX_STRIDE     0x00200000  /*  2MB Local per thread */
+#define     LINSYSMEMTnX_STRIDE_S   21
+#define     LINSYSMEMTXG_OFFSET     0x00800000  /* +2MB Global per thread */
+#define     LINSYSMEMTXG_OFFSET_S   23
+#define LINSYSMEMT1L_BASE   0x05200000
+#define LINSYSMEMT1L_LIMIT  0x053FFFFF
+#define LINSYSMEMT2L_BASE   0x05400000
+#define LINSYSMEMT2L_LIMIT  0x055FFFFF
+#define LINSYSMEMT3L_BASE   0x05600000
+#define LINSYSMEMT3L_LIMIT  0x057FFFFF
+#define LINSYSMEMT0G_BASE   0x05800000
+#define LINSYSMEMT0G_LIMIT  0x059FFFFF
+#define LINSYSMEMT1G_BASE   0x05A00000
+#define LINSYSMEMT1G_LIMIT  0x05BFFFFF
+#define LINSYSMEMT2G_BASE   0x05C00000
+#define LINSYSMEMT2G_LIMIT  0x05DFFFFF
+#define LINSYSMEMT3G_BASE   0x05E00000
+#define LINSYSMEMT3G_LIMIT  0x05FFFFFF
+
+/*
+ * Extended MMU table functionality allows a sparse or flat table to be
+ * described much more efficiently than before.
+ */
+#define MMCU_T0LOCAL_TABLE_PHYS0    0x04830700
+#define   MMCU_TnX_TABLE_PHYSX_STRIDE    0x20   /* Offset per thread */
+#define   MMCU_TnX_TABLE_PHYSX_STRIDE_S  5
+#define   MMCU_TXG_TABLE_PHYSX_OFFSET    0x10   /* Global versus local */
+#define   MMCU_TXG_TABLE_PHYSX_OFFSET_S  4
+#define     MMCU_TBLPHYS0_DCCTRL_BITS       0x000000DF  /* DC controls  */
+#define     MMCU_TBLPHYS0_ENTLB_BIT         0x00000020  /* Cache in TLB */
+#define     MMCU_TBLPHYS0_TBLSZ_BITS        0x00000F00  /* Area supported */
+#define     MMCU_TBLPHYS0_TBLSZ_S           8
+#define         MMCU_TBLPHYS0_TBLSZ0_POWER      22  /* 0 -> 4M */
+#define         MMCU_TBLPHYS0_TBLSZ_MAX         9   /* 9 -> 2G */
+#define     MMCU_TBLPHYS0_LINBASE_BITS      0xFFC00000  /* Linear base */
+#define     MMCU_TBLPHYS0_LINBASE_S         22
+
+#define MMCU_T0LOCAL_TABLE_PHYS1    0x04830708
+#define     MMCU_TBLPHYS1_ADDR_BITS         0xFFFFFFFC  /* Physical base */
+#define     MMCU_TBLPHYS1_ADDR_S            2
+
+#define MMCU_T0GLOBAL_TABLE_PHYS0   0x04830710
+#define MMCU_T0GLOBAL_TABLE_PHYS1   0x04830718
+#define MMCU_T1LOCAL_TABLE_PHYS0    0x04830720
+#define MMCU_T1LOCAL_TABLE_PHYS1    0x04830728
+#define MMCU_T1GLOBAL_TABLE_PHYS0   0x04830730
+#define MMCU_T1GLOBAL_TABLE_PHYS1   0x04830738
+#define MMCU_T2LOCAL_TABLE_PHYS0    0x04830740
+#define MMCU_T2LOCAL_TABLE_PHYS1    0x04830748
+#define MMCU_T2GLOBAL_TABLE_PHYS0   0x04830750
+#define MMCU_T2GLOBAL_TABLE_PHYS1   0x04830758
+#define MMCU_T3LOCAL_TABLE_PHYS0    0x04830760
+#define MMCU_T3LOCAL_TABLE_PHYS1    0x04830768
+#define MMCU_T3GLOBAL_TABLE_PHYS0   0x04830770
+#define MMCU_T3GLOBAL_TABLE_PHYS1   0x04830778
+
+#define MMCU_T0EBWCCTRL             0x04830640
+#define     MMCU_TnEBWCCTRL_BITS    0x00000007
+#define     MMCU_TnEBWCCTRL_S       0
+#define         MMCU_TnEBWCCCTRL_DISABLE_ALL 0
+#define         MMCU_TnEBWCCCTRL_ABIT25      1
+#define         MMCU_TnEBWCCCTRL_ABIT26      2
+#define         MMCU_TnEBWCCCTRL_ABIT27      3
+#define         MMCU_TnEBWCCCTRL_ABIT28      4
+#define         MMCU_TnEBWCCCTRL_ABIT29      5
+#define         MMCU_TnEBWCCCTRL_ABIT30      6
+#define         MMCU_TnEBWCCCTRL_ENABLE_ALL  7
+#define MMCU_TnEBWCCTRL_STRIDE      8
+
+#endif /* METAC_2_1 */
+
+
+/* Registers within the SYSC register region */
+#define METAC_ID                0x04830000
+#define     METAC_ID_MAJOR_BITS     0xFF000000
+#define     METAC_ID_MAJOR_S        24
+#define     METAC_ID_MINOR_BITS     0x00FF0000
+#define     METAC_ID_MINOR_S        16
+#define     METAC_ID_REV_BITS       0x0000FF00
+#define     METAC_ID_REV_S          8
+#define     METAC_ID_MAINT_BITS     0x000000FF
+#define     METAC_ID_MAINT_S        0
+
+#ifdef METAC_2_1
+/* Use of this section is strongly deprecated */
+#define METAC_ID2               0x04830008
+#define     METAC_ID2_DESIGNER_BITS 0xFFFF0000  /* Modified by customer */
+#define     METAC_ID2_DESIGNER_S    16
+#define     METAC_ID2_MINOR2_BITS   0x00000F00  /* 3rd digit of prod rev */
+#define     METAC_ID2_MINOR2_S      8
+#define     METAC_ID2_CONFIG_BITS   0x000000FF  /* Wrapper configuration */
+#define     METAC_ID2_CONFIG_S      0
+
+/* Primary core identification and configuration information */
+#define METAC_CORE_ID           0x04831000
+#define     METAC_COREID_GROUP_BITS   0xFF000000
+#define     METAC_COREID_GROUP_S      24
+#define         METAC_COREID_GROUP_METAG  0x14
+#define     METAC_COREID_ID_BITS      0x00FF0000
+#define     METAC_COREID_ID_S         16
+#define         METAC_COREID_ID_W32       0x10   /* >= for 32-bit pipeline */
+#define     METAC_COREID_CONFIG_BITS  0x0000FFFF
+#define     METAC_COREID_CONFIG_S     0
+#define       METAC_COREID_CFGCACHE_BITS    0x0007
+#define       METAC_COREID_CFGCACHE_S       0
+#define           METAC_COREID_CFGCACHE_NOM       0
+#define           METAC_COREID_CFGCACHE_TYPE0     1
+#define           METAC_COREID_CFGCACHE_NOMMU     1 /* Alias for TYPE0 */
+#define           METAC_COREID_CFGCACHE_NOCACHE   2
+#define           METAC_COREID_CFGCACHE_PRIVNOMMU 3
+#define       METAC_COREID_CFGDSP_BITS      0x0038
+#define       METAC_COREID_CFGDSP_S         3
+#define           METAC_COREID_CFGDSP_NOM       0
+#define           METAC_COREID_CFGDSP_MIN       1
+#define       METAC_COREID_NOFPACC_BIT      0x0040 /* Set if no FPU accum */
+#define       METAC_COREID_CFGFPU_BITS      0x0180
+#define       METAC_COREID_CFGFPU_S         7
+#define           METAC_COREID_CFGFPU_NOM       0
+#define           METAC_COREID_CFGFPU_SNGL      1
+#define           METAC_COREID_CFGFPU_DBL       2
+#define       METAC_COREID_NOAMA_BIT        0x0200 /* Set if no AMA present */
+#define       METAC_COREID_NOCOH_BIT        0x0400 /* Set if no Gbl coherency */
+
+/* Core revision information */
+#define METAC_CORE_REV          0x04831008
+#define     METAC_COREREV_DESIGN_BITS   0xFF000000
+#define     METAC_COREREV_DESIGN_S      24
+#define     METAC_COREREV_MAJOR_BITS    0x00FF0000
+#define     METAC_COREREV_MAJOR_S       16
+#define     METAC_COREREV_MINOR_BITS    0x0000FF00
+#define     METAC_COREREV_MINOR_S       8
+#define     METAC_COREREV_MAINT_BITS    0x000000FF
+#define     METAC_COREREV_MAINT_S       0
+
+/* Configuration information control outside the core */
+#define METAC_CORE_DESIGNER1    0x04831010      /* Arbitrary value */
+#define METAC_CORE_DESIGNER2    0x04831018      /* Arbitrary value */
+
+/* Configuration information covering presence/number of various features */
+#define METAC_CORE_CONFIG2      0x04831020
+#define     METAC_CORECFG2_COREDBGTYPE_BITS 0x60000000   /* Core debug type */
+#define     METAC_CORECFG2_COREDBGTYPE_S    29
+#define     METAC_CORECFG2_DCSMALL_BIT      0x04000000   /* Data cache small */
+#define     METAC_CORECFG2_ICSMALL_BIT      0x02000000   /* Inst cache small */
+#define     METAC_CORECFG2_DCSZNP_BITS      0x01C00000   /* Data cache size np */
+#define     METAC_CORECFG2_DCSZNP_S         22
+#define     METAC_CORECFG2_ICSZNP_BITS      0x00380000  /* Inst cache size np */
+#define     METAC_CORECFG2_ICSZNP_S         19
+#define     METAC_CORECFG2_DCSZ_BITS        0x00070000   /* Data cache size */
+#define     METAC_CORECFG2_DCSZ_S           16
+#define         METAC_CORECFG2_xCSZ_4K          0        /* Allocated values */
+#define         METAC_CORECFG2_xCSZ_8K          1
+#define         METAC_CORECFG2_xCSZ_16K         2
+#define         METAC_CORECFG2_xCSZ_32K         3
+#define         METAC_CORECFG2_xCSZ_64K         4
+#define     METAC_CORE_C2ICSZ_BITS          0x0000E000   /* Inst cache size */
+#define     METAC_CORE_C2ICSZ_S             13
+#define     METAC_CORE_GBLACC_BITS          0x00001800   /* Number of Global Acc */
+#define     METAC_CORE_GBLACC_S             11
+#define     METAC_CORE_GBLDXR_BITS          0x00000700   /* 0 -> 0, R -> 2^(R-1) */
+#define     METAC_CORE_GBLDXR_S             8
+#define     METAC_CORE_GBLAXR_BITS          0x000000E0   /* 0 -> 0, R -> 2^(R-1) */
+#define     METAC_CORE_GBLAXR_S             5
+#define     METAC_CORE_RTTRACE_BIT          0x00000010
+#define     METAC_CORE_WATCHN_BITS          0x0000000C   /* 0 -> 0, N -> 2^N */
+#define     METAC_CORE_WATCHN_S             2
+#define     METAC_CORE_BREAKN_BITS          0x00000003   /* 0 -> 0, N -> 2^N */
+#define     METAC_CORE_BREAKN_S             0
+
+/* Configuration information covering presence/number of various features */
+#define METAC_CORE_CONFIG3      0x04831028
+#define     METAC_CORECFG3_L2C_REV_ID_BITS          0x000F0000   /* Revision of L2 cache */
+#define     METAC_CORECFG3_L2C_REV_ID_S             16
+#define     METAC_CORECFG3_L2C_LINE_SIZE_BITS       0x00003000   /* L2 line size */
+#define     METAC_CORECFG3_L2C_LINE_SIZE_S          12
+#define         METAC_CORECFG3_L2C_LINE_SIZE_64B    0x0          /* 64 bytes */
+#define     METAC_CORECFG3_L2C_NUM_WAYS_BITS        0x00000F00   /* L2 number of ways (2^n) */
+#define     METAC_CORECFG3_L2C_NUM_WAYS_S           8
+#define     METAC_CORECFG3_L2C_SIZE_BITS            0x000000F0   /* L2 size (2^n) */
+#define     METAC_CORECFG3_L2C_SIZE_S               4
+#define     METAC_CORECFG3_L2C_UNIFIED_BIT          0x00000004   /* Unified cache: */
+#define     METAC_CORECFG3_L2C_UNIFIED_S            2
+#define       METAC_CORECFG3_L2C_UNIFIED_UNIFIED    1            /* - Unified D/I cache */
+#define       METAC_CORECFG3_L2C_UNIFIED_SEPARATE   0            /* - Separate D/I cache */
+#define     METAC_CORECFG3_L2C_MODE_BIT             0x00000002   /* Cache Mode: */
+#define     METAC_CORECFG3_L2C_MODE_S               1
+#define       METAC_CORECFG3_L2C_MODE_WRITE_BACK    1            /* - Write back */
+#define       METAC_CORECFG3_L2C_MODE_WRITE_THROUGH 0            /* - Write through */
+#define     METAC_CORECFG3_L2C_HAVE_L2C_BIT         0x00000001   /* Have L2C */
+#define     METAC_CORECFG3_L2C_HAVE_L2C_S           0
+
+#endif /* METAC_2_1 */
+
+#define SYSC_CACHE_MMU_CONFIG       0x04830028
+#ifdef METAC_2_1
+#define     SYSC_CMMUCFG_DCSKEWABLE_BIT 0x00000040
+#define     SYSC_CMMUCFG_ICSKEWABLE_BIT 0x00000020
+#define     SYSC_CMMUCFG_DCSKEWOFF_BIT  0x00000010  /* Skew association override  */
+#define     SYSC_CMMUCFG_ICSKEWOFF_BIT  0x00000008  /* -> default 0 on if present */
+#define     SYSC_CMMUCFG_MODE_BITS      0x00000007  /* Access to old state */
+#define     SYSC_CMMUCFG_MODE_S         0
+#define         SYSC_CMMUCFG_ON             0x7
+#define         SYSC_CMMUCFG_EBYPASS        0x6   /* Enhanced by-pass mode */
+#define         SYSC_CMMUCFG_EBYPASSIC      0x4   /* EB just inst cache */
+#define         SYSC_CMMUCFG_EBYPASSDC      0x2   /* EB just data cache */
+#endif /* METAC_2_1 */
+/* Old definitions, Keep them for now */
+#define         SYSC_CMMUCFG_MMU_ON_BIT     0x1
+#define         SYSC_CMMUCFG_DC_ON_BIT      0x2
+#define         SYSC_CMMUCFG_IC_ON_BIT      0x4
+
+#define SYSC_JTAG_THREAD            0x04830030
+#define     SYSC_JTAG_TX_BITS           0x00000003 /* Read only bits! */
+#define     SYSC_JTAG_TX_S              0
+#define     SYSC_JTAG_PRIV_BIT          0x00000004
+#ifdef METAC_2_1
+#define     SYSC_JTAG_SLAVETX_BITS      0x00000018
+#define     SYSC_JTAG_SLAVETX_S         3
+#endif /* METAC_2_1 */
+
+#define SYSC_DCACHE_FLUSH           0x04830038
+#define SYSC_ICACHE_FLUSH           0x04830040
+#define  SYSC_xCACHE_FLUSH_INIT     0x1
+#define MMCU_DIRECTMAP0_ADDR        0x04830080 /* LINSYSDIRECT_BASE -> */
+#define     MMCU_DIRECTMAPn_STRIDE      0x00000010 /* 4 Region settings */
+#define     MMCU_DIRECTMAPn_S           4
+#define         MMCU_DIRECTMAPn_ADDR_BITS       0xFF800000
+#define         MMCU_DIRECTMAPn_ADDR_S          23
+#define         MMCU_DIRECTMAPn_ADDR_SCALE      0x00800000 /* 8M Regions */
+#ifdef METAC_2_1
+/*
+ * These fields in the above registers provide MMCU_ENTRY_* values
+ *   for each direct mapped region to enable optimisation of these areas.
+ *       (LSB similar to VALID must be set for enhancments to be active)
+ */
+#define         MMCU_DIRECTMAPn_ENHANCE_BIT     0x00000001 /* 0 = no optim */
+#define         MMCU_DIRECTMAPn_DCCTRL_BITS     0x000000DF /* Get DC Ctrl */
+#define         MMCU_DIRECTMAPn_DCCTRL_S        0
+#define         MMCU_DIRECTMAPn_ICCTRL_BITS     0x0000C000 /* Get IC Ctrl */
+#define         MMCU_DIRECTMAPn_ICCTRL_S        8
+#define         MMCU_DIRECTMAPn_ENTLB_BIT       0x00000020 /* Cache in TLB */
+#define         MMCU_DIRECTMAPn_ICCWIN_BITS     0x0000C000 /* Get IC Win Bits */
+#define         MMCU_DIRECTMAPn_ICCWIN_S        14
+#endif /* METAC_2_1 */
+
+#define MMCU_DIRECTMAP1_ADDR        0x04830090
+#define MMCU_DIRECTMAP2_ADDR        0x048300a0
+#define MMCU_DIRECTMAP3_ADDR        0x048300b0
+
+/*
+ * These bits partion each threads use of data cache or instruction cache
+ * resource by modifying the top 4 bits of the address within the cache
+ * storage area.
+ */
+#define SYSC_DCPART0 0x04830200
+#define     SYSC_xCPARTn_STRIDE   0x00000008
+#define     SYSC_xCPARTL_AND_BITS 0x0000000F /* Masks top 4 bits */
+#define     SYSC_xCPARTL_AND_S    0
+#define     SYSC_xCPARTG_AND_BITS 0x00000F00 /* Masks top 4 bits */
+#define     SYSC_xCPARTG_AND_S    8
+#define     SYSC_xCPARTL_OR_BITS  0x000F0000 /* Ors into top 4 bits */
+#define     SYSC_xCPARTL_OR_S     16
+#define     SYSC_xCPARTG_OR_BITS  0x0F000000 /* Ors into top 4 bits */
+#define     SYSC_xCPARTG_OR_S     24
+#define     SYSC_CWRMODE_BIT      0x80000000 /* Write cache mode bit */
+
+#define SYSC_DCPART1 0x04830208
+#define SYSC_DCPART2 0x04830210
+#define SYSC_DCPART3 0x04830218
+#define SYSC_ICPART0 0x04830220
+#define SYSC_ICPART1 0x04830228
+#define SYSC_ICPART2 0x04830230
+#define SYSC_ICPART3 0x04830238
+
+/*
+ * META Core Memory and Cache Update registers
+ */
+#define SYSC_MCMDATAX  0x04830300   /* 32-bit read/write data register */
+#define SYSC_MCMDATAT  0x04830308   /* Read or write data triggers oper */
+#define SYSC_MCMGCTRL  0x04830310   /* Control register */
+#define     SYSC_MCMGCTRL_READ_BIT  0x00000001 /* Set to issue 1st read */
+#define     SYSC_MCMGCTRL_AINC_BIT  0x00000002 /* Set for auto-increment */
+#define     SYSC_MCMGCTRL_ADDR_BITS 0x000FFFFC /* Address or index */
+#define     SYSC_MCMGCTRL_ADDR_S    2
+#define     SYSC_MCMGCTRL_ID_BITS   0x0FF00000 /* Internal memory block Id */
+#define     SYSC_MCMGCTRL_ID_S      20
+#define         SYSC_MCMGID_NODEV       0xFF /* No Device Selected */
+#define         SYSC_MCMGID_DSPRAM0A    0x04 /* DSP RAM D0 block A access */
+#define         SYSC_MCMGID_DSPRAM0B    0x05 /* DSP RAM D0 block B access */
+#define         SYSC_MCMGID_DSPRAM1A    0x06 /* DSP RAM D1 block A access */
+#define         SYSC_MCMGID_DSPRAM1B    0x07 /* DSP RAM D1 block B access */
+#define         SYSC_MCMGID_DCACHEL     0x08 /* DCACHE lines (64-bytes/line) */
+#ifdef METAC_2_1
+#define         SYSC_MCMGID_DCACHETLB   0x09 /* DCACHE TLB ( Read Only )     */
+#endif /* METAC_2_1 */
+#define         SYSC_MCMGID_DCACHET     0x0A /* DCACHE tags (32-bits/line)   */
+#define         SYSC_MCMGID_DCACHELRU   0x0B /* DCACHE LRU (8-bits/line)     */
+#define         SYSC_MCMGID_ICACHEL     0x0C /* ICACHE lines (64-bytes/line  */
+#ifdef METAC_2_1
+#define         SYSC_MCMGID_ICACHETLB   0x0D /* ICACHE TLB (Read Only )     */
+#endif /* METAC_2_1 */
+#define         SYSC_MCMGID_ICACHET     0x0E /* ICACHE Tags (32-bits/line)   */
+#define         SYSC_MCMGID_ICACHELRU   0x0F /* ICACHE LRU (8-bits/line )    */
+#define         SYSC_MCMGID_COREIRAM0   0x10 /* Core code mem id 0 */
+#define         SYSC_MCMGID_COREIRAMn   0x17
+#define         SYSC_MCMGID_COREDRAM0   0x18 /* Core data mem id 0 */
+#define         SYSC_MCMGID_COREDRAMn   0x1F
+#ifdef METAC_2_1
+#define         SYSC_MCMGID_DCACHEST    0x20 /* DCACHE ST ( Read Only )      */
+#define         SYSC_MCMGID_ICACHEST    0x21 /* ICACHE ST ( Read Only )      */
+#define         SYSC_MCMGID_DCACHETLBLRU 0x22 /* DCACHE TLB LRU ( Read Only )*/
+#define         SYSC_MCMGID_ICACHETLBLRU 0x23 /* ICACHE TLB LRU( Read Only ) */
+#define         SYSC_MCMGID_DCACHESTLRU 0x24 /* DCACHE ST LRU ( Read Only )  */
+#define         SYSC_MCMGID_ICACHESTLRU 0x25 /* ICACHE ST LRU ( Read Only )  */
+#define         SYSC_MCMGID_DEBUGTLB    0x26 /* DEBUG TLB ( Read Only )      */
+#define         SYSC_MCMGID_DEBUGST     0x27 /* DEBUG ST ( Read Only )       */
+#define         SYSC_MCMGID_L2CACHEL    0x30 /* L2 Cache Lines (64-bytes/line) */
+#define         SYSC_MCMGID_L2CACHET    0x31 /* L2 Cache Tags (32-bits/line) */
+#define         SYSC_MCMGID_COPROX0     0x70 /* Coprocessor port id 0 */
+#define         SYSC_MCMGID_COPROXn     0x77
+#endif /* METAC_2_1 */
+#define     SYSC_MCMGCTRL_TR31_BIT  0x80000000 /* Trigger 31 on completion */
+#define SYSC_MCMSTATUS 0x04830318   /* Status read only */
+#define     SYSC_MCMSTATUS_IDLE_BIT 0x00000001
+
+/* META System Events */
+#define SYSC_SYS_EVENT            0x04830400
+#define     SYSC_SYSEVT_ATOMIC_BIT      0x00000001
+#define     SYSC_SYSEVT_CACHEX_BIT      0x00000002
+#define SYSC_ATOMIC_LOCK          0x04830408
+#define     SYSC_ATOMIC_STATE_TX_BITS 0x0000000F
+#define     SYSC_ATOMIC_STATE_TX_S    0
+#ifdef METAC_1_2
+#define     SYSC_ATOMIC_STATE_DX_BITS 0x000000F0
+#define     SYSC_ATOMIC_STATE_DX_S    4
+#else /* METAC_1_2 */
+#define     SYSC_ATOMIC_SOURCE_BIT    0x00000010
+#endif /* !METAC_1_2 */
+
+
+#ifdef METAC_2_1
+
+/* These definitions replace the EXPAND_TIMER_DIV register defines which are to
+ * be deprecated.
+ */
+#define SYSC_TIMER_DIV            0x04830140
+#define     SYSC_TIMDIV_BITS      0x000000FF
+#define     SYSC_TIMDIV_S         0
+
+/* META Enhanced by-pass control for local and global region */
+#define MMCU_LOCAL_EBCTRL   0x04830600
+#define MMCU_GLOBAL_EBCTRL  0x04830608
+#define     MMCU_EBCTRL_SINGLE_BIT      0x00000020 /* TLB Uncached */
+/*
+ * These fields in the above registers provide MMCU_ENTRY_* values
+ *   for each direct mapped region to enable optimisation of these areas.
+ */
+#define     MMCU_EBCTRL_DCCTRL_BITS     0x000000C0 /* Get DC Ctrl */
+#define     MMCU_EBCTRL_DCCTRL_S        0
+#define     MMCU_EBCTRL_ICCTRL_BITS     0x0000C000 /* Get DC Ctrl */
+#define     MMCU_EBCTRL_ICCTRL_S        8
+
+/* META Cached Core Mode Registers */
+#define MMCU_T0CCM_ICCTRL   0x04830680     /* Core cached code control */
+#define     MMCU_TnCCM_xxCTRL_STRIDE    8
+#define     MMCU_TnCCM_xxCTRL_STRIDE_S  3
+#define MMCU_T1CCM_ICCTRL   0x04830688
+#define MMCU_T2CCM_ICCTRL   0x04830690
+#define MMCU_T3CCM_ICCTRL   0x04830698
+#define MMCU_T0CCM_DCCTRL   0x048306C0     /* Core cached data control */
+#define MMCU_T1CCM_DCCTRL   0x048306C8
+#define MMCU_T2CCM_DCCTRL   0x048306D0
+#define MMCU_T3CCM_DCCTRL   0x048306D8
+#define     MMCU_TnCCM_ENABLE_BIT       0x00000001
+#define     MMCU_TnCCM_WIN3_BIT         0x00000002
+#define     MMCU_TnCCM_DCWRITE_BIT      0x00000004  /* In DCCTRL only */
+#define     MMCU_TnCCM_REGSZ_BITS       0x00000F00
+#define     MMCU_TnCCM_REGSZ_S          8
+#define         MMCU_TnCCM_REGSZ0_POWER      12     /* RegSz 0 -> 4K */
+#define         MMCU_TnCCM_REGSZ_MAXBYTES    0x00080000  /* 512K max */
+#define     MMCU_TnCCM_ADDR_BITS        0xFFFFF000
+#define     MMCU_TnCCM_ADDR_S           12
+
+#endif /* METAC_2_1 */
+
+/*
+ * Hardware performance counter registers
+ * --------------------------------------
+ */
+#ifdef METAC_2_1
+/* Two Performance Counter Internal Core Events Control registers */
+#define PERF_ICORE0   0x0480FFD0
+#define PERF_ICORE1   0x0480FFD8
+#define     PERFI_CTRL_BITS    0x0000000F
+#define     PERFI_CTRL_S       0
+#define         PERFI_CAH_DMISS    0x0  /* Dcache Misses in cache (TLB Hit) */
+#define         PERFI_CAH_IMISS    0x1  /* Icache Misses in cache (TLB Hit) */
+#define         PERFI_TLB_DMISS    0x2  /* Dcache Misses in per-thread TLB */
+#define         PERFI_TLB_IMISS    0x3  /* Icache Misses in per-thread TLB */
+#define         PERFI_TLB_DWRHITS  0x4  /* DC Write-Hits in per-thread TLB */
+#define         PERFI_TLB_DWRMISS  0x5  /* DC Write-Miss in per-thread TLB */
+#define         PERFI_CAH_DLFETCH  0x8  /* DC Read cache line fetch */
+#define         PERFI_CAH_ILFETCH  0x9  /* DC Read cache line fetch */
+#define         PERFI_CAH_DWFETCH  0xA  /* DC Read cache word fetch */
+#define         PERFI_CAH_IWFETCH  0xB  /* DC Read cache word fetch */
+#endif /* METAC_2_1 */
+
+/* Two memory-mapped hardware performance counter registers */
+#define PERF_COUNT0 0x0480FFE0
+#define PERF_COUNT1 0x0480FFE8
+
+/* Fields in PERF_COUNTn registers */
+#define PERF_COUNT_BITS  0x00ffffff /* Event count value */
+
+#define PERF_THREAD_BITS 0x0f000000 /* Thread mask selects threads */
+#define PERF_THREAD_S    24
+
+#define PERF_CTRL_BITS   0xf0000000 /* Event filter control */
+#define PERF_CTRL_S      28
+
+#define    PERFCTRL_SUPER   0  /* Superthread cycles */
+#define    PERFCTRL_REWIND  1  /* Rewinds due to Dcache Misses */
+#ifdef METAC_2_1
+#define    PERFCTRL_SUPREW  2  /* Rewinds of superthreaded cycles (no mask) */
+
+#define    PERFCTRL_CYCLES  3  /* Counts all cycles (no mask) */
+
+#define    PERFCTRL_PREDBC  4  /* Conditional branch predictions */
+#define    PERFCTRL_MISPBC  5  /* Conditional branch mispredictions */
+#define    PERFCTRL_PREDRT  6  /* Return predictions */
+#define    PERFCTRL_MISPRT  7  /* Return mispredictions */
+#endif /* METAC_2_1 */
+
+#define    PERFCTRL_DHITS   8  /* Dcache Hits */
+#define    PERFCTRL_IHITS   9  /* Icache Hits */
+#define    PERFCTRL_IMISS   10 /* Icache Misses in cache or TLB */
+#ifdef METAC_2_1
+#define    PERFCTRL_DCSTALL 11 /* Dcache+TLB o/p delayed (per-thread) */
+#define    PERFCTRL_ICSTALL 12 /* Icache+TLB o/p delayed (per-thread) */
+
+#define    PERFCTRL_INT     13 /* Internal core delailed events (see next) */
+#define    PERFCTRL_EXT     15 /* External source in core periphery */
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/* These definitions replace the EXPAND_PERFCHANx register defines which are to
+ * be deprecated.
+ */
+#define PERF_CHAN0 0x04830150
+#define PERF_CHAN1 0x04830158
+#define     PERF_CHAN_BITS    0x0000000F
+#define     PERF_CHAN_S       0
+#define         PERFCHAN_WRC_WRBURST   0x0   /* Write combiner write burst */
+#define         PERFCHAN_WRC_WRITE     0x1   /* Write combiner write       */
+#define         PERFCHAN_WRC_RDBURST   0x2   /* Write combiner read burst  */
+#define         PERFCHAN_WRC_READ      0x3   /* Write combiner read        */
+#define         PERFCHAN_PREARB_DELAY  0x4   /* Pre-arbiter delay cycle    */
+                                            /* Cross-bar hold-off cycle:  */
+#define         PERFCHAN_XBAR_HOLDWRAP 0x5   /*    wrapper register        */
+#define         PERFCHAN_XBAR_HOLDSBUS 0x6   /*    system bus (ATP only)   */
+#define         PERFCHAN_XBAR_HOLDCREG 0x9   /*    core registers          */
+#define         PERFCHAN_L2C_MISS      0x6   /* L2 Cache miss              */
+#define         PERFCHAN_L2C_HIT       0x7   /* L2 Cache hit               */
+#define         PERFCHAN_L2C_WRITEBACK 0x8   /* L2 Cache writeback         */
+                                            /* Admission delay cycle:     */
+#define         PERFCHAN_INPUT_CREG    0xB   /*    core registers          */
+#define         PERFCHAN_INPUT_INTR    0xC   /*    internal ram            */
+#define         PERFCHAN_INPUT_WRC     0xD   /*    write combiners(memory) */
+
+/* Should following be removed as not in TRM anywhere? */
+#define         PERFCHAN_XBAR_HOLDINTR 0x8   /*    internal ram            */
+#define         PERFCHAN_INPUT_SBUS    0xA   /*    register port           */
+/* End of remove section. */
+
+#define         PERFCHAN_MAINARB_DELAY 0xF   /* Main arbiter delay cycle   */
+
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Write combiner registers
+ * ------------------------
+ *
+ * These replace the EXPAND_T0WRCOMBINE register defines, which will be
+ * deprecated.
+ */
+#define WRCOMB_CONFIG0             0x04830100
+#define     WRCOMB_LFFEn_BIT           0x00004000  /* Enable auto line full flush */
+#define     WRCOMB_ENABLE_BIT          0x00002000  /* Enable write combiner */
+#define     WRCOMB_TIMEOUT_ENABLE_BIT  0x00001000  /* Timeout flush enable */
+#define     WRCOMB_TIMEOUT_COUNT_BITS  0x000003FF
+#define     WRCOMB_TIMEOUT_COUNT_S     0
+#define WRCOMB_CONFIG4             0x04830180
+#define     WRCOMB_PARTALLOC_BITS      0x000000C0
+#define     WRCOMB_PARTALLOC_S         64
+#define     WRCOMB_PARTSIZE_BITS       0x00000030
+#define     WRCOMB_PARTSIZE_S          4
+#define     WRCOMB_PARTOFFSET_BITS     0x0000000F
+#define     WRCOMB_PARTOFFSET_S        0
+#define WRCOMB_CONFIG_STRIDE       8
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Thread arbiter registers
+ * ------------------------
+ *
+ * These replace the EXPAND_T0ARBITER register defines, which will be
+ * deprecated.
+ */
+#define ARBITER_ARBCONFIG0       0x04830120
+#define     ARBCFG_BPRIORITY_BIT     0x02000000
+#define     ARBCFG_IPRIORITY_BIT     0x01000000
+#define     ARBCFG_PAGE_BITS         0x00FF0000
+#define     ARBCFG_PAGE_S            16
+#define     ARBCFG_BBASE_BITS        0x0000FF00
+#define     ARGCFG_BBASE_S           8
+#define     ARBCFG_IBASE_BITS        0x000000FF
+#define     ARBCFG_IBASE_S           0
+#define ARBITER_TTECONFIG0       0x04820160
+#define     ARBTTE_IUPPER_BITS       0xFF000000
+#define     ARBTTE_IUPPER_S          24
+#define     ARBTTE_ILOWER_BITS       0x00FF0000
+#define     ARBTTE_ILOWER_S          16
+#define     ARBTTE_BUPPER_BITS       0x0000FF00
+#define     ARBTTE_BUPPER_S          8
+#define     ARBTTE_BLOWER_BITS       0x000000FF
+#define     ARBTTE_BLOWER_S          0
+#define ARBITER_STRIDE           8
+#endif /* METAC_2_1 */
+
+/*
+ * Expansion area registers
+ * --------------------------------------
+ */
+
+/* These defines are to be deprecated. See above instead. */
+#define EXPAND_T0WRCOMBINE         0x03000000
+#ifdef METAC_2_1
+#define     EXPWRC_LFFEn_BIT           0x00004000  /* Enable auto line full flush */
+#endif /* METAC_2_1 */
+#define     EXPWRC_ENABLE_BIT          0x00002000  /* Enable write combiner */
+#define     EXPWRC_TIMEOUT_ENABLE_BIT  0x00001000  /* Timeout flush enable */
+#define     EXPWRC_TIMEOUT_COUNT_BITS  0x000003FF
+#define     EXPWRC_TIMEOUT_COUNT_S     0
+#define EXPAND_TnWRCOMBINE_STRIDE  0x00000008
+
+/* These defines are to be deprecated. See above instead. */
+#define EXPAND_T0ARBITER         0x03000020
+#define     EXPARB_BPRIORITY_BIT 0x02000000
+#define     EXPARB_IPRIORITY_BIT 0x01000000
+#define     EXPARB_PAGE_BITS     0x00FF0000
+#define     EXPARB_PAGE_S        16
+#define     EXPARB_BBASE_BITS    0x0000FF00
+#define     EXPARB_BBASE_S       8
+#define     EXPARB_IBASE_BITS    0x000000FF
+#define     EXPARB_IBASE_S       0
+#define EXPAND_TnARBITER_STRIDE  0x00000008
+
+/* These definitions are to be deprecated. See above instead. */
+#define EXPAND_TIMER_DIV   0x03000040
+#define     EXPTIM_DIV_BITS      0x000000FF
+#define     EXPTIM_DIV_S         0
+
+/* These definitions are to be deprecated. See above instead. */
+#define EXPAND_PERFCHAN0   0x03000050
+#define EXPAND_PERFCHAN1   0x03000058
+#define     EXPPERF_CTRL_BITS    0x0000000F
+#define     EXPPERF_CTRL_S       0
+#define         EXPPERF_WRC_WRBURST   0x0   /* Write combiner write burst */
+#define         EXPPERF_WRC_WRITE     0x1   /* Write combiner write       */
+#define         EXPPERF_WRC_RDBURST   0x2   /* Write combiner read burst  */
+#define         EXPPERF_WRC_READ      0x3   /* Write combiner read        */
+#define         EXPPERF_PREARB_DELAY  0x4   /* Pre-arbiter delay cycle    */
+                                           /* Cross-bar hold-off cycle:  */
+#define         EXPPERF_XBAR_HOLDWRAP 0x5   /*    wrapper register        */
+#define         EXPPERF_XBAR_HOLDSBUS 0x6   /*    system bus              */
+#ifdef METAC_1_2
+#define         EXPPERF_XBAR_HOLDLBUS 0x7   /*    local bus               */
+#else /* METAC_1_2 */
+#define         EXPPERF_XBAR_HOLDINTR 0x8   /*    internal ram            */
+#define         EXPPERF_XBAR_HOLDCREG 0x9   /*    core registers          */
+                                           /* Admission delay cycle:     */
+#define         EXPPERF_INPUT_SBUS    0xA   /*    register port           */
+#define         EXPPERF_INPUT_CREG    0xB   /*    core registers          */
+#define         EXPPERF_INPUT_INTR    0xC   /*    internal ram            */
+#define         EXPPERF_INPUT_WRC     0xD   /*    write combiners(memory) */
+#endif /* !METAC_1_2 */
+#define         EXPPERF_MAINARB_DELAY 0xF   /* Main arbiter delay cycle   */
+
+/*
+ * Debug port registers
+ * --------------------------------------
+ */
+
+/* Data Exchange Register */
+#define DBGPORT_MDBGDATAX                    0x0
+
+/* Data Transfer register */
+#define DBGPORT_MDBGDATAT                    0x4
+
+/* Control Register 0 */
+#define DBGPORT_MDBGCTRL0                    0x8
+#define     DBGPORT_MDBGCTRL0_ADDR_BITS      0xFFFFFFFC
+#define     DBGPORT_MDBGCTRL0_ADDR_S         2
+#define     DBGPORT_MDBGCTRL0_AUTOINCR_BIT   0x00000002
+#define     DBGPORT_MDBGCTRL0_RD_BIT         0x00000001
+
+/* Control Register 1 */
+#define DBGPORT_MDBGCTRL1                    0xC
+#ifdef METAC_2_1
+#define    DBGPORT_MDBGCTRL1_DEFERRTHREAD_BITS      0xC0000000
+#define    DBGPORT_MDBGCTRL1_DEFERRTHREAD_S         30
+#endif /* METAC_2_1 */
+#define     DBGPORT_MDBGCTRL1_LOCK2_INTERLOCK_BIT   0x20000000
+#define     DBGPORT_MDBGCTRL1_ATOMIC_INTERLOCK_BIT  0x10000000
+#define     DBGPORT_MDBGCTRL1_TRIGSTATUS_BIT        0x08000000
+#define     DBGPORT_MDBGCTRL1_GBLPORT_IDLE_BIT      0x04000000
+#define     DBGPORT_MDBGCTRL1_COREMEM_IDLE_BIT      0x02000000
+#define     DBGPORT_MDBGCTRL1_READY_BIT             0x01000000
+#ifdef METAC_2_1
+#define     DBGPORT_MDBGCTRL1_DEFERRID_BITS         0x00E00000
+#define     DBGPORT_MDBGCTRL1_DEFERRID_S            21
+#define     DBGPORT_MDBGCTRL1_DEFERR_BIT            0x00100000
+#endif /* METAC_2_1 */
+#define     DBGPORT_MDBGCTRL1_WR_ACTIVE_BIT         0x00040000
+#define     DBGPORT_MDBGCTRL1_COND_LOCK2_BIT        0x00020000
+#define     DBGPORT_MDBGCTRL1_LOCK2_BIT             0x00010000
+#define     DBGPORT_MDBGCTRL1_DIAGNOSE_BIT          0x00008000
+#define     DBGPORT_MDBGCTRL1_FORCEDIAG_BIT         0x00004000
+#define     DBGPORT_MDBGCTRL1_MEMFAULT_BITS         0x00003000
+#define     DBGPORT_MDBGCTRL1_MEMFAULT_S            12
+#define     DBGPORT_MDBGCTRL1_TRIGGER_BIT           0x00000100
+#ifdef METAC_2_1
+#define     DBGPORT_MDBGCTRL1_INTSPECIAL_BIT        0x00000080
+#define     DBGPORT_MDBGCTRL1_INTRUSIVE_BIT         0x00000040
+#endif /* METAC_2_1 */
+#define     DBGPORT_MDBGCTRL1_THREAD_BITS           0x00000030 /* Thread mask selects threads */
+#define     DBGPORT_MDBGCTRL1_THREAD_S              4
+#define     DBGPORT_MDBGCTRL1_TRANS_SIZE_BITS       0x0000000C
+#define     DBGPORT_MDBGCTRL1_TRANS_SIZE_S          2
+#define         DBGPORT_MDBGCTRL1_TRANS_SIZE_32_BIT 0x00000000
+#define         DBGPORT_MDBGCTRL1_TRANS_SIZE_16_BIT 0x00000004
+#define         DBGPORT_MDBGCTRL1_TRANS_SIZE_8_BIT  0x00000008
+#define     DBGPORT_MDBGCTRL1_BYTE_ROUND_BITS       0x00000003
+#define     DBGPORT_MDBGCTRL1_BYTE_ROUND_S          0
+#define         DBGPORT_MDBGCTRL1_BYTE_ROUND_8_BIT  0x00000001
+#define         DBGPORT_MDBGCTRL1_BYTE_ROUND_16_BIT 0x00000002
+
+
+/* L2 Cache registers */
+#define SYSC_L2C_INIT              0x048300C0
+#define SYSC_L2C_INIT_INIT                  1
+#define SYSC_L2C_INIT_IN_PROGRESS           0
+#define SYSC_L2C_INIT_COMPLETE              1
+
+#define SYSC_L2C_ENABLE            0x048300D0
+#define SYSC_L2C_ENABLE_ENABLE_BIT     0x00000001
+#define SYSC_L2C_ENABLE_PFENABLE_BIT   0x00000002
+
+#define SYSC_L2C_PURGE             0x048300C8
+#define SYSC_L2C_PURGE_PURGE                1
+#define SYSC_L2C_PURGE_IN_PROGRESS          0
+#define SYSC_L2C_PURGE_COMPLETE             1
+
+#endif /* _ASM_METAG_MEM_H_ */
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h
new file mode 100644 (file)
index 0000000..acf4b8e
--- /dev/null
@@ -0,0 +1,1184 @@
+/*
+ * asm/metag_regs.h
+ *
+ * Copyright (C) 2000-2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Various defines for Meta core (non memory-mapped) registers.
+ */
+
+#ifndef _ASM_METAG_REGS_H_
+#define _ASM_METAG_REGS_H_
+
+/*
+ * CHIP Unit Identifiers and Valid/Global register number masks
+ * ------------------------------------------------------------
+ */
+#define TXUCT_ID    0x0     /* Control unit regs */
+#ifdef METAC_1_2
+#define     TXUCT_MASK  0xFF0FFFFF  /* Valid regs 0..31  */
+#else
+#define     TXUCT_MASK  0xFF1FFFFF  /* Valid regs 0..31  */
+#endif
+#define     TGUCT_MASK  0x00000000  /* No global regs    */
+#define TXUD0_ID    0x1     /* Data unit regs */
+#define TXUD1_ID    0x2
+#define     TXUDX_MASK  0xFFFFFFFF  /* Valid regs 0..31 */
+#define     TGUDX_MASK  0xFFFF0000  /* Global regs for base inst */
+#define     TXUDXDSP_MASK   0x0F0FFFFF  /* Valid DSP regs */
+#define     TGUDXDSP_MASK   0x0E0E0000  /* Global DSP ACC regs */
+#define TXUA0_ID    0x3     /* Address unit regs */
+#define TXUA1_ID    0x4
+#define     TXUAX_MASK  0x0000FFFF  /* Valid regs   0-15 */
+#define     TGUAX_MASK  0x0000FF00  /* Global regs  8-15 */
+#define TXUPC_ID    0x5     /* PC registers */
+#define     TXUPC_MASK  0x00000003  /* Valid regs   0- 1 */
+#define     TGUPC_MASK  0x00000000  /* No global regs    */
+#define TXUPORT_ID  0x6     /* Ports are not registers */
+#define TXUTR_ID    0x7
+#define     TXUTR_MASK  0x0000005F  /* Valid regs   0-3,4,6 */
+#define     TGUTR_MASK  0x00000000  /* No global regs    */
+#ifdef METAC_2_1
+#define TXUTT_ID    0x8
+#define     TXUTT_MASK  0x0000000F  /* Valid regs   0-3 */
+#define     TGUTT_MASK  0x00000010  /* Global reg   4   */
+#define TXUFP_ID    0x9     /* FPU regs */
+#define     TXUFP_MASK  0x0000FFFF  /* Valid regs   0-15 */
+#define     TGUFP_MASK  0x00000000  /* No global regs    */
+#endif /* METAC_2_1 */
+
+#ifdef METAC_1_2
+#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
+                     TXUAX_MASK, TXUPC_MASK,          0, TXUTR_MASK, \
+                     0, 0, 0, 0, 0, 0, 0, 0                          }
+#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
+                     TGUAX_MASK, TGUPC_MASK,          0, TGUTR_MASK, \
+                     0, 0, 0, 0, 0, 0, 0, 0                          }
+#else /* METAC_1_2 */
+#define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \
+                     TXUAX_MASK, TXUPC_MASK,          0, TXUTR_MASK, \
+                     TXUTT_MASK, TXUFP_MASK,          0,          0, \
+                              0,          0,          0,          0  }
+#define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \
+                     TGUAX_MASK, TGUPC_MASK,          0, TGUTR_MASK, \
+                     TGUTT_MASK, TGUFP_MASK,          0,          0, \
+                              0,          0,          0,          0  }
+#endif /* !METAC_1_2 */
+
+#define TXUXXDSP_MASKS { 0, TXUDXDSP_MASK, TXUDXDSP_MASK, 0, 0, 0, 0, 0, \
+                        0, 0, 0, 0, 0, 0, 0, 0                          }
+#define TGUXXDSP_MASKS { 0, TGUDXDSP_MASK, TGUDXDSP_MASK, 0, 0, 0, 0, 0, \
+                        0, 0, 0, 0, 0, 0, 0, 0                          }
+
+/* -------------------------------------------------------------------------
+;                          DATA AND ADDRESS UNIT REGISTERS
+;  -----------------------------------------------------------------------*/
+/*
+  Thread local D0 registers
+ */
+/*   D0.0    ; Holds 32-bit result, can be used as scratch */
+#define D0Re0 D0.0
+/*   D0.1    ; Used to pass Arg6_32 */
+#define D0Ar6 D0.1
+/*   D0.2    ; Used to pass Arg4_32 */
+#define D0Ar4 D0.2
+/*   D0.3    ; Used to pass Arg2_32 to a called routine (see D1.3 below) */
+#define D0Ar2 D0.3
+/*   D0.4    ; Can be used as scratch; used to save A0FrP in entry sequences */
+#define D0FrT D0.4
+/*   D0.5    ; C compiler assumes preservation, save with D1.5 if used */
+/*   D0.6    ; C compiler assumes preservation, save with D1.6 if used */
+/*   D0.7    ; C compiler assumes preservation, save with D1.7 if used */
+/*   D0.8    ; Use of D0.8 and above is not encouraged */
+/*   D0.9  */
+/*   D0.10 */
+/*   D0.11 */
+/*   D0.12 */
+/*   D0.13 */
+/*   D0.14 */
+/*   D0.15 */
+/*
+   Thread local D1 registers
+ */
+/*   D1.0    ; Holds top 32-bits of 64-bit result, can be used as scratch */
+#define D1Re0 D1.0
+/*   D1.1    ; Used to pass Arg5_32 */
+#define D1Ar5 D1.1
+/*   D1.2    ; Used to pass Arg3_32 */
+#define D1Ar3 D1.2
+/*   D1.3    ; Used to pass Arg1_32 (first 32-bit argument) to a called routine */
+#define D1Ar1 D1.3
+/*   D1.4    ; Used for Return Pointer, save during entry with A0FrP (via D0.4) */
+#define D1RtP D1.4
+/*   D1.5    ; C compiler assumes preservation, save if used */
+/*   D1.6    ; C compiler assumes preservation, save if used */
+/*   D1.7    ; C compiler assumes preservation, save if used */
+/*   D1.8    ; Use of D1.8 and above is not encouraged */
+/*   D1.9  */
+/*   D1.10 */
+/*   D1.11 */
+/*   D1.12 */
+/*   D1.13 */
+/*   D1.14 */
+/*   D1.15 */
+/*
+   Thread local A0 registers
+ */
+/*   A0.0    ; Primary stack pointer */
+#define A0StP A0.0
+/*   A0.1    ; Used as local frame pointer in C, save if used (via D0.4) */
+#define A0FrP A0.1
+/*   A0.2  */
+/*   A0.3  */
+/*   A0.4    ; Use of A0.4 and above is not encouraged */
+/*   A0.5  */
+/*   A0.6  */
+/*   A0.7  */
+/*
+   Thread local A1 registers
+ */
+/*   A1.0    ; Global static chain pointer - do not modify */
+#define A1GbP A1.0
+/*   A1.1    ; Local static chain pointer in C, can be used as scratch */
+#define A1LbP A1.1
+/*   A1.2  */
+/*   A1.3  */
+/*   A1.4    ; Use of A1.4 and above is not encouraged */
+/*   A1.5  */
+/*   A1.6  */
+/*   A1.7  */
+#ifdef METAC_2_1
+/* Renameable registers for use with Fast Interrupts */
+/* The interrupt stack pointer (usually a global register) */
+#define A0IStP A0IReg
+/* The interrupt global pointer (usually a global register) */
+#define A1IGbP A1IReg
+#endif
+/*
+   Further registers may be globally allocated via linkage/loading tools,
+   normally they are not used.
+ */
+/*-------------------------------------------------------------------------
+;                    STACK STRUCTURE and CALLING CONVENTION
+; -----------------------------------------------------------------------*/
+/*
+; Calling convention indicates that the following is the state of the
+; stack frame at the start of a routine-
+;
+;       Arg9_32 [A0StP+#-12]
+;       Arg8_32 [A0StP+#- 8]
+;       Arg7_32 [A0StP+#- 4]
+;   A0StP->
+;
+; Registers D1.3, D0.3, ..., to D0.1 are used to pass Arg1_32 to Arg6_32
+;   respectively. If a routine needs to store them on the stack in order
+;   to make sub-calls or because of the general complexity of the routine it
+;   is best to dump these registers immediately at the start of a routine
+;   using a MSETL or SETL instruction-
+;
+;   MSETL   [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump argments expected
+;or SETL    [A0StP+#8++],D0Ar2       ; Up to two 32-bit args expected
+;
+; For non-leaf routines it is always necessary to save and restore at least
+; the return address value D1RtP on the stack. Also by convention if the
+; frame is saved then a new A0FrP value must be set-up. So for non-leaf
+; routines at this point both these registers must be saved onto the stack
+; using a SETL instruction and the new A0FrP value is then set-up-
+;
+;   MOV     D0FrT,A0FrP
+;   ADD     A0FrP,A0StP,#0
+;   SETL    [A0StP+#8++],D0FrT,D1RtP
+;
+; Registers D0.5, D1.5, to D1.7 are assumed to be preserved across calls so
+;   a SETL or MSETL instruction can be used to save the current state
+;   of these registers if they are modified by the current routine-
+;
+;   MSETL   [A0StP],D0.5,D0.6,D0.7   ; Only save registers modified
+;or SETL    [A0StP+#8++],D0.5        ; Only D0.5 and/or D1.5 modified
+;
+; All of the above sequences can be combined into one maximal case-
+;
+;   MOV     D0FrT,A0FrP              ; Save and calculate new frame pointer
+;   ADD     A0FrP,A0StP,#(ARS)
+;   MSETL   [A0StP],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+;
+; Having completed the above sequence the only remaining task on routine
+; entry is to reserve any local and outgoing argment storage space on the
+; stack. This instruction may be omitted if the size of this region is zero-
+;
+;   ADD     A0StP,A0StP,#(LCS)
+;
+; LCS is the first example use of one of a number of standard local defined
+; values that can be created to make assembler code more readable and
+; potentially more robust-
+;
+; #define ARS   0x18                 ; Register arg bytes saved on stack
+; #define FRS   0x20                 ; Frame save area size in bytes
+; #define LCS   0x00                 ; Locals and Outgoing arg size
+; #define ARO   (LCS+FRS)            ; Stack offset to access args
+;
+; All of the above defines should be undefined (#undef) at the end of each
+; routine to avoid accidental use in the next routine.
+;
+; Given all of the above the following stack structure is expected during
+; the body of a routine if all args passed in registers are saved during
+; entry-
+;
+;                                    ; 'Incoming args area'
+;         Arg10_32 [A0StP+#-((10*4)+ARO)]       Arg9_32  [A0StP+#-(( 9*4)+ARO)]
+;         Arg8_32  [A0StP+#-(( 8*4)+ARO)]       Arg7_32  [A0StP+#-(( 7*4)+ARO)]
+;--- Call point
+; D0Ar6=  Arg6_32  [A0StP+#-(( 6*4)+ARO)] D1Ar5=Arg5_32  [A0StP+#-(( 5*4)+ARO)]
+; D0Ar4=  Arg4_32  [A0StP+#-(( 4*4)+ARO)] D1Ar3=Arg3_32  [A0StP+#-(( 3*4)+ARO)]
+; D0Ar2=  Arg2_32  [A0StP+#-(( 2*4)+ARO)] D1Ar2=Arg1_32  [A0StP+#-(( 1*4)+ARO)]
+;                                    ; 'Frame area'
+; A0FrP-> D0FrT, D1RtP,
+;         D0.5, D1.5,
+;         D0.6, D1.6,
+;         D0.7, D1.7,
+;                                    ; 'Locals area'
+;         Loc0_32  [A0StP+# (( 0*4)-LCS)],      Loc1_32 [A0StP+# (( 1*4)-LCS)]
+;               .... other locals
+;         Locn_32  [A0StP+# (( n*4)-LCS)]
+;                                    ; 'Outgoing args area'
+;         Outm_32  [A0StP+#- ( m*4)]            .... other outgoing args
+;         Out8_32  [A0StP+#- ( 1*4)]            Out7_32  [A0StP+#- ( 1*4)]
+; A0StP-> (Out1_32-Out6_32 in regs D1Ar1-D0Ar6)
+;
+; The exit sequence for a non-leaf routine can use the frame pointer created
+; in the entry sequence to optimise the recovery of the full state-
+;
+;   MGETL   D0FrT,D0.5,D0.6,D0.7,[A0FrP]
+;   SUB     A0StP,A0FrP,#(ARS+FRS)
+;   MOV     A0FrP,D0FrT
+;   MOV     PC,D1RtP
+;
+; Having described the most complex non-leaf case above, it is worth noting
+; that if a routine is a leaf and does not use any of the caller-preserved
+; state. The routine can be implemented as-
+;
+;   ADD     A0StP,A0StP,#LCS
+;   .... body of routine
+;   SUB     A0StP,A0StP,#LCS
+;   MOV     PC,D1RtP
+;
+; The stack adjustments can also be omitted if no local storage is required.
+;
+; Another exit sequence structure is more applicable if for a leaf routine
+; with no local frame pointer saved/generated in which the call saved
+; registers need to be saved and restored-
+;
+;   MSETL   [A0StP],D0.5,D0.6,D0.7   ; Hence FRS is 0x18, ARS is 0x00
+;   ADD     A0StP,A0StP,#LCS
+;   .... body of routine
+;   GETL    D0.5,D1.5,[A0StP+#((0*8)-(FRS+LCS))]
+;   GETL    D0.6,D1.6,[A0StP+#((1*8)-(FRS+LCS))]
+;   GETL    D0.7,D1.7,[A0StP+#((2*8)-(FRS+LCS))]
+;   SUB     A0StP,A0StP,#(ARS+FRS+LCS)
+;   MOV     PC,D1RtP
+;
+; Lastly, to support profiling assembler code should use a fixed entry/exit
+; sequence if the trigger define _GMON_ASM is defined-
+;
+;   #ifndef _GMON_ASM
+;   ... optimised entry code
+;   #else
+;   ; Profiling entry case
+;   MOV     D0FrT,A0FrP              ; Save and calculate new frame pointer
+;   ADD     A0FrP,A0StP,#(ARS)
+;   MSETL   [A0StP],...,D0FrT,... or SETL    [A0FrP],D0FrT,D1RtP
+;   CALLR   D0FrT,_mcount_wrapper
+;   #endif
+;   ... body of routine
+;   #ifndef _GMON_ASM
+;   ... optimised exit code
+;   #else
+;   ; Profiling exit case
+;   MGETL   D0FrT,...,[A0FrP]     or GETL    D0FrT,D1RtP,[A0FrP++]
+;   SUB     A0StP,A0FrP,#(ARS+FRS)
+;   MOV     A0FrP,D0FrT
+;   MOV     PC,D1RtP
+;   #endif
+
+
+; -------------------------------------------------------------------------
+;                         CONTROL UNIT REGISTERS
+; -------------------------------------------------------------------------
+;
+; See the assembler guide, hardware documentation, or the field values
+; defined below for some details of the use of these registers.
+*/
+#define TXENABLE    CT.0    /* Need to define bit-field values in these */
+#define TXMODE      CT.1
+#define TXSTATUS    CT.2    /* DEFAULT 0x00020000 */
+#define TXRPT       CT.3
+#define TXTIMER     CT.4
+#define TXL1START   CT.5
+#define TXL1END     CT.6
+#define TXL1COUNT   CT.7
+#define TXL2START   CT.8
+#define TXL2END     CT.9
+#define TXL2COUNT   CT.10
+#define TXBPOBITS   CT.11
+#define TXMRSIZE    CT.12
+#define TXTIMERI    CT.13
+#define TXDRCTRL    CT.14  /* DEFAULT 0x0XXXF0F0 */
+#define TXDRSIZE    CT.15
+#define TXCATCH0    CT.16
+#define TXCATCH1    CT.17
+#define TXCATCH2    CT.18
+#define TXCATCH3    CT.19
+
+#ifdef METAC_2_1
+#define TXDEFR      CT.20
+#define TXCPRS      CT.21
+#endif
+
+#define TXINTERN0   CT.23
+#define TXAMAREG0   CT.24
+#define TXAMAREG1   CT.25
+#define TXAMAREG2   CT.26
+#define TXAMAREG3   CT.27
+#define TXDIVTIME   CT.28   /* DEFAULT 0x00000001 */
+#define TXPRIVEXT   CT.29   /* DEFAULT 0x003B0000 */
+#define TXTACTCYC   CT.30
+#define TXIDLECYC   CT.31
+
+/*****************************************************************************
+ *                        CONTROL UNIT REGISTER BITS
+ ****************************************************************************/
+/*
+ * The following registers and where appropriate the sub-fields of those
+ * registers are defined for pervasive use in controlling program flow.
+ */
+
+/*
+ * TXENABLE register fields - only the thread id is routinely useful
+ */
+#define TXENABLE_REGNUM 0
+#define TXENABLE_THREAD_BITS       0x00000700
+#define TXENABLE_THREAD_S          8
+#define TXENABLE_REV_STEP_BITS     0x000000F0
+#define TXENABLE_REV_STEP_S        4
+
+/*
+ * TXMODE register - controls extensions of the instruction set
+ */
+#define TXMODE_REGNUM 1
+#define     TXMODE_DEFAULT  0   /* All fields default to zero */
+
+/*
+ * TXSTATUS register - contains a couple of stable bits that can be used
+ *      to determine the privilege processing level and interrupt
+ *      processing level of the current thread.
+ */
+#define TXSTATUS_REGNUM 2
+#define TXSTATUS_PSTAT_BIT         0x00020000   /* -> Privilege active      */
+#define TXSTATUS_PSTAT_S           17
+#define TXSTATUS_ISTAT_BIT         0x00010000   /* -> In interrupt state    */
+#define TXSTATUS_ISTAT_S           16
+
+/*
+ * These are all relatively boring registers, mostly full 32-bit
+ */
+#define TXRPT_REGNUM     3  /* Repeat counter for XFR... instructions   */
+#define TXTIMER_REGNUM   4  /* Timer-- causes timer trigger on overflow */
+#define TXL1START_REGNUM 5  /* Hardware Loop 1 Start-PC/End-PC/Count    */
+#define TXL1END_REGNUM   6
+#define TXL1COUNT_REGNUM 7
+#define TXL2START_REGNUM 8  /* Hardware Loop 2 Start-PC/End-PC/Count    */
+#define TXL2END_REGNUM   9
+#define TXL2COUNT_REGNUM 10
+#define TXBPOBITS_REGNUM 11 /* Branch predict override bits - tune perf */
+#define TXTIMERI_REGNUM  13 /* Timer-- time based interrupt trigger     */
+
+/*
+ * TXDIVTIME register is routinely read to calculate the time-base for
+ * the TXTIMER register.
+ */
+#define TXDIVTIME_REGNUM 28
+#define     TXDIVTIME_DIV_BITS 0x000000FF
+#define     TXDIVTIME_DIV_S    0
+#define     TXDIVTIME_DIV_MIN  0x00000001   /* Maximum resolution       */
+#define     TXDIVTIME_DIV_MAX  0x00000100   /* 1/1 -> 1/256 resolution  */
+#define     TXDIVTIME_BASE_HZ  1000000      /* Timers run at 1Mhz @1/1  */
+
+/*
+ * TXPRIVEXT register can be consulted to decide if write access to a
+ *    part of the threads register set is not permitted when in
+ *    unprivileged mode (PSTAT == 0).
+ */
+#define TXPRIVEXT_REGNUM 29
+#define     TXPRIVEXT_COPRO_BITS    0xFF000000 /* Co-processor 0-7 */
+#define     TXPRIVEXT_COPRO_S       24
+#ifndef METAC_1_2
+#define     TXPRIVEXT_TXTIMER_BIT   0x00080000 /* TXTIMER   priv */
+#define     TXPRIVEXT_TRACE_BIT     0x00040000 /* TTEXEC|TTCTRL|GTEXEC */
+#endif
+#define     TXPRIVEXT_TXTRIGGER_BIT 0x00020000 /* TXSTAT|TXMASK|TXPOLL */
+#define     TXPRIVEXT_TXGBLCREG_BIT 0x00010000 /* Global common regs */
+#define     TXPRIVEXT_CBPRIV_BIT    0x00008000 /* Mem i/f dump priv */
+#define     TXPRIVEXT_ILOCK_BIT     0x00004000 /* LOCK inst priv */
+#define     TXPRIVEXT_TXITACCYC_BIT 0x00002000 /* TXIDLECYC|TXTACTCYC */
+#define     TXPRIVEXT_TXDIVTIME_BIT 0x00001000 /* TXDIVTIME priv */
+#define     TXPRIVEXT_TXAMAREGX_BIT 0x00000800 /* TXAMAREGX priv */
+#define     TXPRIVEXT_TXTIMERI_BIT  0x00000400 /* TXTIMERI  priv */
+#define     TXPRIVEXT_TXSTATUS_BIT  0x00000200 /* TXSTATUS  priv */
+#define     TXPRIVEXT_TXDISABLE_BIT 0x00000100 /* TXENABLE  priv */
+#ifndef METAC_1_2
+#define     TXPRIVEXT_MINIMON_BIT   0x00000080 /* Enable Minim features */
+#define     TXPRIVEXT_OLDBCCON_BIT  0x00000020 /* Restore Static predictions */
+#define     TXPRIVEXT_ALIGNREW_BIT  0x00000010 /* Align & precise checks */
+#endif
+#define     TXPRIVEXT_KEEPPRI_BIT   0x00000008 /* Use AMA_Priority if ISTAT=1*/
+#define     TXPRIVEXT_TXTOGGLEI_BIT 0x00000001 /* TX.....I  priv */
+
+/*
+ * TXTACTCYC register - counts instructions issued for this thread
+ */
+#define TXTACTCYC_REGNUM  30
+#define     TXTACTCYC_COUNT_MASK    0x00FFFFFF
+
+/*
+ * TXIDLECYC register - counts idle cycles
+ */
+#define TXIDLECYC_REGNUM  31
+#define     TXIDLECYC_COUNT_MASK    0x00FFFFFF
+
+/*****************************************************************************
+ *                             DSP EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following values relate to fields and controls that only a program
+ * using the DSP extensions of the META instruction set need to know.
+ */
+
+
+#ifndef METAC_1_2
+/*
+ * Allow co-processor hardware to replace the read pipeline data source in
+ * appropriate cases.
+ */
+#define TXMODE_RDCPEN_BIT       0x00800000
+#endif
+
+/*
+ * Address unit addressing modes
+ */
+#define TXMODE_A1ADDR_BITS  0x00007000
+#define TXMODE_A1ADDR_S     12
+#define TXMODE_A0ADDR_BITS  0x00000700
+#define TXMODE_A0ADDR_S     8
+#define     TXMODE_AXADDR_MODULO 3
+#define     TXMODE_AXADDR_REVB   4
+#define     TXMODE_AXADDR_REVW   5
+#define     TXMODE_AXADDR_REVD   6
+#define     TXMODE_AXADDR_REVL   7
+
+/*
+ * Data unit OverScale select (default 0 -> normal, 1 -> top 16 bits)
+ */
+#define TXMODE_DXOVERSCALE_BIT  0x00000080
+
+/*
+ * Data unit MX mode select (default 0 -> MX16, 1 -> MX8)
+ */
+#define TXMODE_M8_BIT         0x00000040
+
+/*
+ * Data unit accumulator saturation point (default -> 40 bit accumulator)
+ */
+#define TXMODE_DXACCSAT_BIT 0x00000020 /* Set for 32-bit accumulator */
+
+/*
+ * Data unit accumulator saturation enable (default 0 -> no saturation)
+ */
+#define TXMODE_DXSAT_BIT    0x00000010
+
+/*
+ * Data unit master rounding control (default 0 -> normal, 1 -> convergent)
+ */
+#define TXMODE_DXROUNDING_BIT   0x00000008
+
+/*
+ * Data unit product shift for fractional arithmetic (default off)
+ */
+#define TXMODE_DXPRODSHIFT_BIT  0x00000004
+
+/*
+ * Select the arithmetic mode (multiply mostly) for both data units
+ */
+#define TXMODE_DXARITH_BITS 0x00000003
+#define     TXMODE_DXARITH_32  3
+#define     TXMODE_DXARITH_32H 2
+#define     TXMODE_DXARITH_S16 1
+#define     TXMODE_DXARITH_16  0
+
+/*
+ * TXMRSIZE register value only relevant when DSP modulo addressing active
+ */
+#define TXMRSIZE_REGNUM 12
+#define     TXMRSIZE_MIN    0x0002  /* 0, 1 -> normal addressing logic */
+#define     TXMRSIZE_MAX    0xFFFF
+
+/*
+ * TXDRCTRL register can be used to detect the actaul size of the DSP RAM
+ * partitions allocated to this thread.
+ */
+#define TXDRCTRL_REGNUM 14
+#define     TXDRCTRL_SINESIZE_BITS  0x0F000000
+#define     TXDRCTRL_SINESIZE_S     24
+#define     TXDRCTRL_RAMSZPOW_BITS  0x001F0000  /* Limit = (1<<RAMSZPOW)-1 */
+#define     TXDRCTRL_RAMSZPOW_S     16
+#define     TXDRCTRL_D1RSZAND_BITS  0x0000F000  /* Mask top 4 bits - D1 */
+#define     TXDRCTRL_D1RSZAND_S     12
+#define     TXDRCTRL_D0RSZAND_BITS  0x000000F0  /* Mask top 4 bits - D0 */
+#define     TXDRCTRL_D0RSZAND_S     4
+/* Given extracted RAMSZPOW and DnRSZAND fields this returns the size */
+#define     TXDRCTRL_DXSIZE(Pow, AndBits) \
+                               ((((~(AndBits)) & 0x0f) + 1) << ((Pow)-4))
+
+/*
+ * TXDRSIZE register provides modulo addressing options for each DSP RAM
+ */
+#define TXDRSIZE_REGNUM 15
+#define     TXDRSIZE_R1MOD_BITS       0xFFFF0000
+#define     TXDRSIZE_R1MOD_S          16
+#define     TXDRSIZE_R0MOD_BITS       0x0000FFFF
+#define     TXDRSIZE_R0MOD_S          0
+
+#define     TXDRSIZE_RBRAD_SCALE_BITS 0x70000000
+#define     TXDRSIZE_RBRAD_SCALE_S    28
+#define     TXDRSIZE_RBMODSIZE_BITS   0x0FFF0000
+#define     TXDRSIZE_RBMODSIZE_S      16
+#define     TXDRSIZE_RARAD_SCALE_BITS 0x00007000
+#define     TXDRSIZE_RARAD_SCALE_S    12
+#define     TXDRSIZE_RAMODSIZE_BITS   0x00000FFF
+#define     TXDRSIZE_RAMODSIZE_S      0
+
+/*****************************************************************************
+ *                       DEFERRED and BUS ERROR EXTENSION
+ ****************************************************************************/
+
+/*
+ * TXDEFR register - Deferred exception control
+ */
+#define TXDEFR_REGNUM 20
+#define     TXDEFR_DEFAULT  0   /* All fields default to zero */
+
+/*
+ * Bus error state is a multi-bit positive/negative event notification from
+ * the bus infrastructure.
+ */
+#define     TXDEFR_BUS_ERR_BIT    0x80000000  /* Set if error (LSB STATE) */
+#define     TXDEFR_BUS_ERRI_BIT   0x40000000  /* Fetch returned error */
+#define     TXDEFR_BUS_STATE_BITS 0x3F000000  /* Bus event/state data */
+#define     TXDEFR_BUS_STATE_S    24
+#define     TXDEFR_BUS_TRIG_BIT   0x00800000  /* Set when bus error seen */
+
+/*
+ * Bus events are collected by background code in a deferred manner unless
+ * selected to trigger an extended interrupt HALT trigger when they occur.
+ */
+#define     TXDEFR_BUS_ICTRL_BIT  0x00000080  /* Enable interrupt trigger */
+
+/*
+ * CHIP Automatic Mips Allocation control registers
+ * ------------------------------------------------
+ */
+
+/* CT Bank AMA Registers */
+#define TXAMAREG0_REGNUM 24
+#ifdef METAC_1_2
+#define     TXAMAREG0_CTRL_BITS       0x07000000
+#else /* METAC_1_2 */
+#define     TXAMAREG0_RCOFF_BIT       0x08000000
+#define     TXAMAREG0_DLINEHLT_BIT    0x04000000
+#define     TXAMAREG0_DLINEDIS_BIT    0x02000000
+#define     TXAMAREG0_CYCSTRICT_BIT   0x01000000
+#define     TXAMAREG0_CTRL_BITS       (TXAMAREG0_RCOFF_BIT |    \
+                                      TXAMAREG0_DLINEHLT_BIT | \
+                                      TXAMAREG0_DLINEDIS_BIT | \
+                                      TXAMAREG0_CYCSTRICT_BIT)
+#endif /* !METAC_1_2 */
+#define     TXAMAREG0_CTRL_S           24
+#define     TXAMAREG0_MDM_BIT         0x00400000
+#define     TXAMAREG0_MPF_BIT         0x00200000
+#define     TXAMAREG0_MPE_BIT         0x00100000
+#define     TXAMAREG0_MASK_BITS       (TXAMAREG0_MDM_BIT | \
+                                      TXAMAREG0_MPF_BIT | \
+                                      TXAMAREG0_MPE_BIT)
+#define     TXAMAREG0_MASK_S          20
+#define     TXAMAREG0_SDM_BIT         0x00040000
+#define     TXAMAREG0_SPF_BIT         0x00020000
+#define     TXAMAREG0_SPE_BIT         0x00010000
+#define     TXAMAREG0_STATUS_BITS     (TXAMAREG0_SDM_BIT | \
+                                      TXAMAREG0_SPF_BIT | \
+                                      TXAMAREG0_SPE_BIT)
+#define     TXAMAREG0_STATUS_S        16
+#define     TXAMAREG0_PRIORITY_BITS   0x0000FF00
+#define     TXAMAREG0_PRIORITY_S      8
+#define     TXAMAREG0_BVALUE_BITS     0x000000FF
+#define     TXAMAREG0_BVALUE_S  0
+
+#define TXAMAREG1_REGNUM 25
+#define     TXAMAREG1_DELAYC_BITS     0x07FFFFFF
+#define     TXAMAREG1_DELAYC_S  0
+
+#define TXAMAREG2_REGNUM 26
+#ifdef METAC_1_2
+#define     TXAMAREG2_DLINEC_BITS     0x00FFFFFF
+#define     TXAMAREG2_DLINEC_S        0
+#else /* METAC_1_2 */
+#define     TXAMAREG2_IRQPRIORITY_BIT 0xFF000000
+#define     TXAMAREG2_IRQPRIORITY_S   24
+#define     TXAMAREG2_DLINEC_BITS     0x00FFFFF0
+#define     TXAMAREG2_DLINEC_S        4
+#endif /* !METAC_1_2 */
+
+#define TXAMAREG3_REGNUM 27
+#define     TXAMAREG2_AMABLOCK_BIT    0x00080000
+#define     TXAMAREG2_AMAC_BITS       0x0000FFFF
+#define     TXAMAREG2_AMAC_S          0
+
+/*****************************************************************************
+ *                                FPU EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following registers only exist in FPU enabled cores.
+ */
+
+/*
+ * TXMODE register - FPU rounding mode control/status fields
+ */
+#define     TXMODE_FPURMODE_BITS     0x00030000
+#define     TXMODE_FPURMODE_S        16
+#define     TXMODE_FPURMODEWRITE_BIT 0x00040000  /* Set to change FPURMODE */
+
+/*
+ * TXDEFR register - FPU exception handling/state is a significant source
+ *   of deferrable errors. Run-time S/W can move handling to interrupt level
+ *   using DEFR instruction to collect state.
+ */
+#define     TXDEFR_FPE_FE_BITS       0x003F0000  /* Set by FPU_FE events */
+#define     TXDEFR_FPE_FE_S          16
+
+#define     TXDEFR_FPE_INEXACT_FE_BIT   0x010000
+#define     TXDEFR_FPE_UNDERFLOW_FE_BIT 0x020000
+#define     TXDEFR_FPE_OVERFLOW_FE_BIT  0x040000
+#define     TXDEFR_FPE_DIVBYZERO_FE_BIT 0x080000
+#define     TXDEFR_FPE_INVALID_FE_BIT   0x100000
+#define     TXDEFR_FPE_DENORMAL_FE_BIT  0x200000
+
+#define     TXDEFR_FPE_ICTRL_BITS    0x000003F   /* Route to interrupts */
+#define     TXDEFR_FPE_ICTRL_S       0
+
+#define     TXDEFR_FPE_INEXACT_ICTRL_BIT   0x01
+#define     TXDEFR_FPE_UNDERFLOW_ICTRL_BIT 0x02
+#define     TXDEFR_FPE_OVERFLOW_ICTRL_BIT  0x04
+#define     TXDEFR_FPE_DIVBYZERO_ICTRL_BIT 0x08
+#define     TXDEFR_FPE_INVALID_ICTRL_BIT   0x10
+#define     TXDEFR_FPE_DENORMAL_ICTRL_BIT  0x20
+
+/*
+ * DETAILED FPU RELATED VALUES
+ * ---------------------------
+ */
+
+/*
+ * Rounding mode field in TXMODE can hold a number of logical values
+ */
+#define METAG_FPURMODE_TONEAREST  0x0      /* Default */
+#define METAG_FPURMODE_TOWARDZERO 0x1
+#define METAG_FPURMODE_UPWARD     0x2
+#define METAG_FPURMODE_DOWNWARD   0x3
+
+/*
+ * In order to set the TXMODE register field that controls the rounding mode
+ * an extra bit must be set in the value written versus that read in order
+ * to gate writes to the rounding mode field. This allows other non-FPU code
+ * to modify TXMODE without knowledge of the FPU units presence and not
+ * influence the FPU rounding mode. This macro adds the required bit so new
+ * rounding modes are accepted.
+ */
+#define TXMODE_FPURMODE_SET(FPURMode) \
+       (TXMODE_FPURMODEWRITE_BIT + ((FPURMode)<<TXMODE_FPURMODE_S))
+
+/*
+ * To successfully restore TXMODE to zero at the end of the function the
+ * following value (rather than zero) must be used.
+ */
+#define TXMODE_FPURMODE_RESET (TXMODE_FPURMODEWRITE_BIT)
+
+/*
+ * In TXSTATUS a special bit exists to indicate if FPU H/W has been accessed
+ * since it was last reset.
+ */
+#define TXSTATUS_FPACTIVE_BIT  0x01000000
+
+/*
+ * Exception state (see TXDEFR_FPU_FE_*) and enabling (for interrupt
+ * level processing (see TXDEFR_FPU_ICTRL_*) are controlled by similar
+ * bit mask locations within each field.
+ */
+#define METAG_FPU_FE_INEXACT   0x01
+#define METAG_FPU_FE_UNDERFLOW 0x02
+#define METAG_FPU_FE_OVERFLOW  0x04
+#define METAG_FPU_FE_DIVBYZERO 0x08
+#define METAG_FPU_FE_INVALID   0x10
+#define METAG_FPU_FE_DENORMAL  0x20
+#define METAG_FPU_FE_ALL_EXCEPT (METAG_FPU_FE_INEXACT   | \
+                                METAG_FPU_FE_UNDERFLOW | \
+                                METAG_FPU_FE_OVERFLOW  | \
+                                METAG_FPU_FE_DIVBYZERO | \
+                                METAG_FPU_FE_INVALID   | \
+                                METAG_FPU_FE_DENORMAL)
+
+/*****************************************************************************
+ *             THREAD CONTROL, ERROR, OR INTERRUPT STATE EXTENSIONS
+ ****************************************************************************/
+/*
+ * The following values are only relevant to code that externally controls
+ * threads, handles errors/interrupts, and/or set-up interrupt/error handlers
+ * for subsequent use.
+ */
+
+/*
+ * TXENABLE register fields - only ENABLE_BIT is potentially read/write
+ */
+#define TXENABLE_MAJOR_REV_BITS    0xFF000000
+#define TXENABLE_MAJOR_REV_S       24
+#define TXENABLE_MINOR_REV_BITS    0x00FF0000
+#define TXENABLE_MINOR_REV_S       16
+#define TXENABLE_CLASS_BITS        0x0000F000
+#define TXENABLE_CLASS_S           12
+#define TXENABLE_CLASS_DSP             0x0 /* -> DSP Thread */
+#define TXENABLE_CLASS_LDSP            0x8 /* -> DSP LITE Thread */
+#define TXENABLE_CLASS_GP              0xC /* -> General Purpose Thread */
+#define     TXENABLE_CLASSALT_LFPU       0x2 /*  Set to indicate LITE FPU */
+#define     TXENABLE_CLASSALT_FPUR8      0x1 /*  Set to indicate 8xFPU regs */
+#define TXENABLE_MTXARCH_BIT       0x00000800
+#define TXENABLE_STEP_REV_BITS     0x000000F0
+#define TXENABLE_STEP_REV_S        4
+#define TXENABLE_STOPPED_BIT       0x00000004   /* TXOFF due to ENABLE->0 */
+#define TXENABLE_OFF_BIT           0x00000002   /* Thread is in off state */
+#define TXENABLE_ENABLE_BIT        0x00000001   /* Set if running */
+
+/*
+ * TXSTATUS register - used by external/internal interrupt/error handler
+ */
+#define TXSTATUS_CB1MARKER_BIT     0x00800000   /* -> int level mem state */
+#define TXSTATUS_CBMARKER_BIT      0x00400000   /* -> mem i/f state dumped */
+#define TXSTATUS_MEM_FAULT_BITS    0x00300000
+#define TXSTATUS_MEM_FAULT_S       20
+#define     TXSTATUS_MEMFAULT_NONE  0x0 /* -> No memory fault       */
+#define     TXSTATUS_MEMFAULT_GEN   0x1 /* -> General fault         */
+#define     TXSTATUS_MEMFAULT_PF    0x2 /* -> Page fault            */
+#define     TXSTATUS_MEMFAULT_RO    0x3 /* -> Read only fault       */
+#define TXSTATUS_MAJOR_HALT_BITS   0x000C0000
+#define TXSTATUS_MAJOR_HALT_S      18
+#define     TXSTATUS_MAJHALT_TRAP 0x0   /* -> SWITCH inst used      */
+#define     TXSTATUS_MAJHALT_INST 0x1   /* -> Unknown inst or fetch */
+#define     TXSTATUS_MAJHALT_PRIV 0x2   /* -> Internal privilege    */
+#define     TXSTATUS_MAJHALT_MEM  0x3   /* -> Memory i/f fault      */
+#define TXSTATUS_L_STEP_BITS       0x00000800   /* -> Progress of L oper    */
+#define TXSTATUS_LSM_STEP_BITS     0x00000700   /* -> Progress of L/S mult  */
+#define TXSTATUS_LSM_STEP_S        8
+#define TXSTATUS_FLAG_BITS         0x0000001F   /* -> All the flags         */
+#define TXSTATUS_SCC_BIT           0x00000010   /* -> Split-16 flags ...    */
+#define TXSTATUS_SCF_LZ_BIT        0x00000008   /* -> Split-16 Low  Z flag  */
+#define TXSTATUS_SCF_HZ_BIT        0x00000004   /* -> Split-16 High Z flag  */
+#define TXSTATUS_SCF_HC_BIT        0x00000002   /* -> Split-16 High C flag  */
+#define TXSTATUS_SCF_LC_BIT        0x00000001   /* -> Split-16 Low  C flag  */
+#define TXSTATUS_CF_Z_BIT          0x00000008   /* -> Condition Z flag      */
+#define TXSTATUS_CF_N_BIT          0x00000004   /* -> Condition N flag      */
+#define TXSTATUS_CF_O_BIT          0x00000002   /* -> Condition O flag      */
+#define TXSTATUS_CF_C_BIT          0x00000001   /* -> Condition C flag      */
+
+/*
+ * TXCATCH0-3 register contents may store information on a memory operation
+ * that has failed if the bit TXSTATUS_CBMARKER_BIT is set.
+ */
+#define TXCATCH0_REGNUM 16
+#define TXCATCH1_REGNUM 17
+#define     TXCATCH1_ADDR_BITS   0xFFFFFFFF   /* TXCATCH1 is Addr 0-31 */
+#define     TXCATCH1_ADDR_S      0
+#define TXCATCH2_REGNUM 18
+#define     TXCATCH2_DATA0_BITS  0xFFFFFFFF   /* TXCATCH2 is Data 0-31 */
+#define     TXCATCH2_DATA0_S     0
+#define TXCATCH3_REGNUM 19
+#define     TXCATCH3_DATA1_BITS  0xFFFFFFFF   /* TXCATCH3 is Data 32-63 */
+#define     TXCATCH3_DATA1_S     0
+
+/*
+ * Detailed catch state information
+ * --------------------------------
+ */
+
+/* Contents of TXCATCH0 register */
+#define     TXCATCH0_LDRXX_BITS  0xF8000000  /* Load destination reg 0-31 */
+#define     TXCATCH0_LDRXX_S     27
+#define     TXCATCH0_LDDST_BITS  0x07FF0000  /* Load destination bits */
+#define     TXCATCH0_LDDST_S     16
+#define         TXCATCH0_LDDST_D1DSP 0x400   /* One bit set if it's a LOAD */
+#define         TXCATCH0_LDDST_D0DSP 0x200
+#define         TXCATCH0_LDDST_TMPLT 0x100
+#define         TXCATCH0_LDDST_TR    0x080
+#ifdef METAC_2_1
+#define         TXCATCH0_LDDST_FPU   0x040
+#endif
+#define         TXCATCH0_LDDST_PC    0x020
+#define         TXCATCH0_LDDST_A1    0x010
+#define         TXCATCH0_LDDST_A0    0x008
+#define         TXCATCH0_LDDST_D1    0x004
+#define         TXCATCH0_LDDST_D0    0x002
+#define         TXCATCH0_LDDST_CT    0x001
+#ifdef METAC_2_1
+#define     TXCATCH0_WATCHSTOP_BIT 0x00004000  /* Set if Data Watch set fault */
+#endif
+#define     TXCATCH0_WATCHS_BIT  0x00004000  /* Set if Data Watch set fault */
+#define     TXCATCH0_WATCH1_BIT  0x00002000  /* Set if Data Watch 1 matches */
+#define     TXCATCH0_WATCH0_BIT  0x00001000  /* Set if Data Watch 0 matches */
+#define     TXCATCH0_FAULT_BITS  0x00000C00  /* See TXSTATUS_MEMFAULT_*     */
+#define     TXCATCH0_FAULT_S     10
+#define     TXCATCH0_PRIV_BIT    0x00000200  /* Privilege of transaction    */
+#define     TXCATCH0_READ_BIT    0x00000100  /* Set for Read or Load cases  */
+
+#ifdef METAC_2_1
+/* LNKGET Marker bit in TXCATCH0 */
+#define   TXCATCH0_LNKGET_MARKER_BIT 0x00000008
+#define       TXCATCH0_PREPROC_BIT  0x00000004
+#endif
+
+/* Loads are indicated by one of the LDDST bits being set */
+#define     TXCATCH0_LDM16_BIT   0x00000004  /* Load M16 flag */
+#define     TXCATCH0_LDL2L1_BITS 0x00000003  /* Load data size L2,L1 */
+#define     TXCATCH0_LDL2L1_S    0
+
+/* Reads are indicated by the READ bit being set without LDDST bits */
+#define     TXCATCH0_RAXX_BITS   0x0000001F  /* RAXX issue port for read */
+#define     TXCATCH0_RAXX_S      0
+
+/* Write operations are all that remain if READ bit is not set */
+#define     TXCATCH0_WMASK_BITS  0x000000FF  /* Write byte lane mask */
+#define     TXCATCH0_WMASK_S     0
+
+#ifdef METAC_2_1
+
+/* When a FPU exception is signalled then FPUSPEC == FPUSPEC_TAG */
+#define     TXCATCH0_FPURDREG_BITS    0xF8000000
+#define     TXCATCH0_FPURDREG_S       27
+#define     TXCATCH0_FPUR1REG_BITS    0x07C00000
+#define     TXCATCH0_FPUR1REG_S       22
+#define     TXCATCH0_FPUSPEC_BITS     0x000F0000
+#define     TXCATCH0_FPUSPEC_S        16
+#define         TXCATCH0_FPUSPEC_TAG      0xF
+#define     TXCATCH0_FPUINSTA_BIT     0x00001000
+#define     TXCATCH0_FPUINSTQ_BIT     0x00000800
+#define     TXCATCH0_FPUINSTZ_BIT     0x00000400
+#define     TXCATCH0_FPUINSTN_BIT     0x00000200
+#define     TXCATCH0_FPUINSTO3O_BIT   0x00000100
+#define     TXCATCH0_FPUWIDTH_BITS    0x000000C0
+#define     TXCATCH0_FPUWIDTH_S       6
+#define         TXCATCH0_FPUWIDTH_FLOAT   0
+#define         TXCATCH0_FPUWIDTH_DOUBLE  1
+#define         TXCATCH0_FPUWIDTH_PAIRED  2
+#define     TXCATCH0_FPUOPENC_BITS    0x0000003F
+#define     TXCATCH0_FPUOPENC_S       0
+#define         TXCATCH0_FPUOPENC_ADD     0  /* rop1=Rs1, rop3=Rs2 */
+#define         TXCATCH0_FPUOPENC_SUB     1  /* rop1=Rs1, rop3=Rs2 */
+#define         TXCATCH0_FPUOPENC_MUL     2  /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_ATOI    3  /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_ATOX    4  /* rop3=Rs, uses #Imm */
+#define         TXCATCH0_FPUOPENC_ITOA    5  /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_XTOA    6  /* rop3=Rs, uses #Imm */
+#define         TXCATCH0_FPUOPENC_ATOH    7  /* rop2=Rs */
+#define         TXCATCH0_FPUOPENC_HTOA    8  /* rop2=Rs */
+#define         TXCATCH0_FPUOPENC_DTOF    9  /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_FTOD    10 /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_DTOL    11 /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_LTOD    12 /* rop3=Rs */
+#define         TXCATCH0_FPUOPENC_DTOXL   13 /* rop3=Rs, uses #imm */
+#define         TXCATCH0_FPUOPENC_XLTOD   14 /* rop3=Rs, uses #imm */
+#define         TXCATCH0_FPUOPENC_CMP     15 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_MIN     16 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_MAX     17 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_ADDRE   18 /* rop1=Rs1, rop3=Rs2 */
+#define         TXCATCH0_FPUOPENC_SUBRE   19 /* rop1=Rs1, rop3=Rs2 */
+#define         TXCATCH0_FPUOPENC_MULRE   20 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_MXA     21 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define         TXCATCH0_FPUOPENC_MXAS    22 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define         TXCATCH0_FPUOPENC_MAR     23 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_MARS    24 /* rop1=Rs1, rop2=Rs2 */
+#define         TXCATCH0_FPUOPENC_MUZ     25 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define         TXCATCH0_FPUOPENC_MUZS    26 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/
+#define         TXCATCH0_FPUOPENC_RCP     27 /* rop2=Rs */
+#define         TXCATCH0_FPUOPENC_RSQ     28 /* rop2=Rs */
+
+/* For floating point exceptions TXCATCH1 is used to carry extra data */
+#define     TXCATCH1_FPUR2REG_BITS    0xF8000000
+#define     TXCATCH1_FPUR2REG_S       27
+#define     TXCATCH1_FPUR3REG_BITS    0x07C00000  /* Undefined if O3O set */
+#define     TXCATCH1_FPUR3REG_S       22
+#define     TXCATCH1_FPUIMM16_BITS    0x0000FFFF
+#define     TXCATCH1_FPUIMM16_S       0
+
+#endif /* METAC_2_1 */
+
+/*
+ * TXDIVTIME register used to hold the partial base address of memory i/f
+ * state dump area. Now deprecated.
+ */
+#define     TXDIVTIME_CBBASE_MASK    0x03FFFE00
+#define     TXDIVTIME_CBBASE_LINBASE 0x80000000
+#define     TXDIVTIME_CBBASE_LINBOFF 0x00000000 /* BGnd state */
+#define     TXDIVTIME_CBBASE_LINIOFF 0x00000100 /* Int  state */
+
+/*
+ * TXDIVTIME register used to indicate if the read pipeline was dirty when a
+ * thread was interrupted, halted, or generated an exception. It is invalid
+ * to attempt to issue a further pipeline read address while the read
+ * pipeline is in the dirty state.
+ */
+#define     TXDIVTIME_RPDIRTY_BIT   0x80000000
+
+/*
+ * Further bits in the TXDIVTIME register allow interrupt handling code to
+ * short-cut the discovery the most significant bit last read from TXSTATI.
+ *
+ * This is the bit number of the trigger line that a low level interrupt
+ * handler should acknowledge and then perhaps the index of a corresponding
+ * handler function.
+ */
+#define     TXDIVTIME_IRQENC_BITS   0x0F000000
+#define     TXDIVTIME_IRQENC_S      24
+
+/*
+ * If TXDIVTIME_RPVALID_BIT is set the read pipeline contained significant
+ * information when the thread was interrupted|halted|exceptioned. Each slot
+ * containing data is indicated by a one bit in the corresponding
+ * TXDIVTIME_RPMASK_BITS bit (least significance bit relates to first
+ * location in read pipeline - most likely to have the 1 state). Empty slots
+ * contain zeroes with no interlock applied on reads if RPDIRTY is currently
+ * set with RPMASK itself being read-only state.
+ */
+#define     TXDIVTIME_RPMASK_BITS 0x003F0000   /* -> Full (1) Empty (0) */
+#define     TXDIVTIME_RPMASK_S    16
+
+/*
+ * TXPRIVEXT register can be used to single step thread execution and
+ * enforce synchronous memory i/f address checking for debugging purposes.
+ */
+#define     TXPRIVEXT_TXSTEP_BIT    0x00000004
+#define     TXPRIVEXT_MEMCHECK_BIT  0x00000002
+
+/*
+ * TXINTERNx registers holds internal state information for H/W debugging only
+ */
+#define TXINTERN0_REGNUM 23
+#define     TXINTERN0_LOCK2_BITS  0xF0000000
+#define     TXINTERN0_LOCK2_S     28
+#define     TXINTERN0_LOCK1_BITS  0x0F000000
+#define     TXINTERN0_LOCK1_S     24
+#define     TXINTERN0_TIFDF_BITS  0x0000F000
+#define     TXINTERN0_TIFDF_S     12
+#define     TXINTERN0_TIFIB_BITS  0x00000F00
+#define     TXINTERN0_TIFIB_S     8
+#define     TXINTERN0_TIFAF_BITS  0x000000F0
+#define     TXINTERN0_TIFAF_S     4
+#define     TXINTERN0_MSTATE_BITS 0x0000000F
+#define     TXINTERN0_MSTATE_S    0
+
+/*
+ * TXSTAT, TXMASK, TXPOLL, TXSTATI, TXMASKI, TXPOLLI registers from trigger
+ * bank all have similar contents (upper kick count bits not in MASK regs)
+ */
+#define TXSTAT_REGNUM  0
+#define     TXSTAT_TIMER_BIT    0x00000001
+#define     TXSTAT_TIMER_S      0
+#define     TXSTAT_KICK_BIT     0x00000002
+#define     TXSTAT_KICK_S       1
+#define     TXSTAT_DEFER_BIT    0x00000008
+#define     TXSTAT_DEFER_S      3
+#define     TXSTAT_EXTTRIG_BITS 0x0000FFF0
+#define     TXSTAT_EXTTRIG_S    4
+#define     TXSTAT_FPE_BITS     0x003F0000
+#define     TXSTAT_FPE_S        16
+#define     TXSTAT_FPE_DENORMAL_BIT    0x00200000
+#define     TXSTAT_FPE_DENORMAL_S      21
+#define     TXSTAT_FPE_INVALID_BIT     0x00100000
+#define     TXSTAT_FPE_INVALID_S       20
+#define     TXSTAT_FPE_DIVBYZERO_BIT   0x00080000
+#define     TXSTAT_FPE_DIVBYZERO_S     19
+#define     TXSTAT_FPE_OVERFLOW_BIT    0x00040000
+#define     TXSTAT_FPE_OVERFLOW_S      18
+#define     TXSTAT_FPE_UNDERFLOW_BIT   0x00020000
+#define     TXSTAT_FPE_UNDERFLOW_S     17
+#define     TXSTAT_FPE_INEXACT_BIT     0x00010000
+#define     TXSTAT_FPE_INEXACT_S       16
+#define     TXSTAT_BUSERR_BIT          0x00800000   /* Set if bus error/ack state */
+#define     TXSTAT_BUSERR_S            23
+#define         TXSTAT_BUSSTATE_BITS     0xFF000000 /* Read only */
+#define         TXSTAT_BUSSTATE_S        24
+#define     TXSTAT_KICKCNT_BITS 0xFFFF0000
+#define     TXSTAT_KICKCNT_S    16
+#define TXMASK_REGNUM  1
+#define TXSTATI_REGNUM 2
+#define     TXSTATI_BGNDHALT_BIT    0x00000004
+#define TXMASKI_REGNUM 3
+#define TXPOLL_REGNUM  4
+#define TXPOLLI_REGNUM 6
+
+/*
+ * TXDRCTRL register can be used to partition the DSP RAM space available to
+ * this thread at startup. This is achieved by offsetting the region allocated
+ * to each thread.
+ */
+#define     TXDRCTRL_D1PARTOR_BITS  0x00000F00  /* OR's into top 4 bits */
+#define     TXDRCTRL_D1PARTOR_S     8
+#define     TXDRCTRL_D0PARTOR_BITS  0x0000000F  /* OR's into top 4 bits */
+#define     TXDRCTRL_D0PARTOR_S     0
+/* Given extracted Pow and Or fields this is threads base within DSP RAM */
+#define     TXDRCTRL_DXBASE(Pow, Or)  ((Or)<<((Pow)-4))
+
+/*****************************************************************************
+ *                      RUN TIME TRACE CONTROL REGISTERS
+ ****************************************************************************/
+/*
+ * The following values are only relevant to code that implements run-time
+ *  trace features within the META Core
+ */
+#define TTEXEC      TT.0
+#define TTCTRL      TT.1
+#define TTMARK      TT.2
+#define TTREC       TT.3
+#define GTEXEC      TT.4
+
+#define TTEXEC_REGNUM               0
+#define     TTEXEC_EXTTRIGAND_BITS      0x7F000000
+#define     TTEXEC_EXTTRIGAND_S         24
+#define     TTEXEC_EXTTRIGEN_BIT        0x00008000
+#define     TTEXEC_EXTTRIGMATCH_BITS    0x00007F00
+#define     TTEXEC_EXTTRIGMATCH_S       8
+#define     TTEXEC_TCMODE_BITS          0x00000003
+#define     TTEXEC_TCMODE_S             0
+
+#define TTCTRL_REGNUM               1
+#define     TTCTRL_TRACETT_BITS         0x00008000
+#define     TTCTRL_TRACETT_S            15
+#define     TTCTRL_TRACEALL_BITS        0x00002000
+#define     TTCTRL_TRACEALL_S           13
+#ifdef METAC_2_1
+#define     TTCTRL_TRACEALLTAG_BITS     0x00000400
+#define     TTCTRL_TRACEALLTAG_S        10
+#endif /* METAC_2_1 */
+#define     TTCTRL_TRACETAG_BITS        0x00000200
+#define     TTCTRL_TRACETAG_S           9
+#define     TTCTRL_TRACETTPC_BITS       0x00000080
+#define     TTCTRL_TRACETTPC_S          7
+#define     TTCTRL_TRACEMPC_BITS        0x00000020
+#define     TTCTRL_TRACEMPC_S           5
+#define     TTCTRL_TRACEEN_BITS         0x00000008
+#define     TTCTRL_TRACEEN_S            3
+#define     TTCTRL_TRACEEN1_BITS        0x00000004
+#define     TTCTRL_TRACEEN1_S           2
+#define     TTCTRL_TRACEPC_BITS         0x00000002
+#define     TTCTRL_TRACEPC_S            1
+
+#ifdef METAC_2_1
+#define TTMARK_REGNUM   2
+#define TTMARK_BITS                 0xFFFFFFFF
+#define TTMARK_S                    0x0
+
+#define TTREC_REGNUM    3
+#define TTREC_BITS                  0xFFFFFFFFFFFFFFFF
+#define TTREC_S                     0x0
+#endif /* METAC_2_1 */
+
+#define GTEXEC_REGNUM               4
+#define     GTEXEC_DCRUN_BITS           0x80000000
+#define     GTEXEC_DCRUN_S              31
+#define     GTEXEC_ICMODE_BITS          0x0C000000
+#define     GTEXEC_ICMODE_S             26
+#define     GTEXEC_TCMODE_BITS          0x03000000
+#define     GTEXEC_TCMODE_S             24
+#define     GTEXEC_PERF1CMODE_BITS      0x00040000
+#define     GTEXEC_PERF1CMODE_S         18
+#define     GTEXEC_PERF0CMODE_BITS      0x00010000
+#define     GTEXEC_PERF0CMODE_S         16
+#define     GTEXEC_REFMSEL_BITS         0x0000F000
+#define     GTEXEC_REFMSEL_S            12
+#define     GTEXEC_METRICTH_BITS        0x000003FF
+#define     GTEXEC_METRICTH_S           0
+
+#ifdef METAC_2_1
+/*
+ * Clock Control registers
+ * -----------------------
+ */
+#define TXCLKCTRL_REGNUM        22
+
+/*
+ * Default setting is with clocks always on (DEFON), turning all clocks off
+ * can only be done from external devices (OFF), enabling automatic clock
+ * gating will allow clocks to stop as units fall idle.
+ */
+#define TXCLKCTRL_ALL_OFF       0x02222222
+#define TXCLKCTRL_ALL_DEFON     0x01111111
+#define TXCLKCTRL_ALL_AUTO      0x02222222
+
+/*
+ * Individual fields control caches, floating point and main data/addr units
+ */
+#define TXCLKCTRL_CLOCKIC_BITS  0x03000000
+#define TXCLKCTRL_CLOCKIC_S     24
+#define TXCLKCTRL_CLOCKDC_BITS  0x00300000
+#define TXCLKCTRL_CLOCKDC_S     20
+#define TXCLKCTRL_CLOCKFP_BITS  0x00030000
+#define TXCLKCTRL_CLOCKFP_S     16
+#define TXCLKCTRL_CLOCKD1_BITS  0x00003000
+#define TXCLKCTRL_CLOCKD1_S     12
+#define TXCLKCTRL_CLOCKD0_BITS  0x00000300
+#define TXCLKCTRL_CLOCKD0_S     8
+#define TXCLKCTRL_CLOCKA1_BITS  0x00000030
+#define TXCLKCTRL_CLOCKA1_S     4
+#define TXCLKCTRL_CLOCKA0_BITS  0x00000003
+#define TXCLKCTRL_CLOCKA0_S     0
+
+/*
+ * Individual settings for each field are common
+ */
+#define TXCLKCTRL_CLOCKxx_OFF   0
+#define TXCLKCTRL_CLOCKxx_DEFON 1
+#define TXCLKCTRL_CLOCKxx_AUTO  2
+
+#endif /* METAC_2_1 */
+
+#ifdef METAC_2_1
+/*
+ * Fast interrupt new bits
+ * ------------------------------------
+ */
+#define TXSTATUS_IPTOGGLE_BIT           0x80000000 /* Prev PToggle of TXPRIVEXT */
+#define TXSTATUS_ISTATE_BIT             0x40000000 /* IState bit */
+#define TXSTATUS_IWAIT_BIT              0x20000000 /* wait indefinitely in decision step*/
+#define TXSTATUS_IEXCEPT_BIT            0x10000000 /* Indicate an exception occured */
+#define TXSTATUS_IRPCOUNT_BITS          0x0E000000 /* Number of 'dirty' date entries*/
+#define TXSTATUS_IRPCOUNT_S             25
+#define TXSTATUS_IRQSTAT_BITS           0x0000F000 /* IRQEnc bits, trigger or interrupts */
+#define TXSTATUS_IRQSTAT_S              12
+#define TXSTATUS_LNKSETOK_BIT           0x00000020 /* LNKSetOK bit, successful LNKSET */
+
+/* New fields in TXDE for fast interrupt system */
+#define TXDIVTIME_IACTIVE_BIT           0x00008000 /* Enable new interrupt system */
+#define TXDIVTIME_INONEST_BIT           0x00004000 /* Gate nested interrupt */
+#define TXDIVTIME_IREGIDXGATE_BIT       0x00002000 /* gate of the IRegIdex field */
+#define TXDIVTIME_IREGIDX_BITS          0x00001E00 /* Index of A0.0/1 replaces */
+#define TXDIVTIME_IREGIDX_S             9
+#define TXDIVTIME_NOST_BIT              0x00000100 /* disable superthreading bit */
+#endif
+
+#endif /* _ASM_METAG_REGS_H_ */
diff --git a/arch/metag/include/asm/mman.h b/arch/metag/include/asm/mman.h
new file mode 100644 (file)
index 0000000..17999db
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef __METAG_MMAN_H__
+#define __METAG_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+#ifndef __ASSEMBLY__
+#define arch_mmap_check metag_mmap_check
+int metag_mmap_check(unsigned long addr, unsigned long len,
+                    unsigned long flags);
+#endif
+#endif /* __METAG_MMAN_H__ */
diff --git a/arch/metag/include/asm/mmu.h b/arch/metag/include/asm/mmu.h
new file mode 100644 (file)
index 0000000..9c32114
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+#ifdef CONFIG_METAG_USER_TCM
+#include <linux/list.h>
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#include <asm/page.h>
+#endif
+
+typedef struct {
+       /* Software pgd base pointer used for Meta 1.x MMU. */
+       unsigned long pgd_base;
+#ifdef CONFIG_METAG_USER_TCM
+       struct list_head tcm;
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+#if HPAGE_SHIFT < HUGEPT_SHIFT
+       /* last partially filled huge page table address */
+       unsigned long part_huge;
+#endif
+#endif
+} mm_context_t;
+
+/* Given a virtual address, return the pte for the top level 4meg entry
+ * that maps that address.
+ * Returns 0 (an empty pte) if that range is not mapped.
+ */
+unsigned long mmu_read_first_level_page(unsigned long vaddr);
+
+/* Given a linear (virtual) address, return the second level 4k pte
+ * that maps that address.  Returns 0 if the address is not mapped.
+ */
+unsigned long mmu_read_second_level_page(unsigned long vaddr);
+
+/* Get the virtual base address of the MMU */
+unsigned long mmu_get_base(void);
+
+/* Initialize the MMU. */
+void mmu_init(unsigned long mem_end);
+
+#ifdef CONFIG_METAG_META21_MMU
+/*
+ * For cpu "cpu" calculate and return the address of the
+ * MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or
+ * MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space.
+ */
+static inline unsigned long mmu_phys0_addr(unsigned int cpu)
+{
+       unsigned long phys0;
+
+       phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 +
+               (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
+               (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
+
+       return phys0;
+}
+
+/*
+ * For cpu "cpu" calculate and return the address of the
+ * MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or
+ * MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space.
+ */
+static inline unsigned long mmu_phys1_addr(unsigned int cpu)
+{
+       unsigned long phys1;
+
+       phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 +
+               (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) +
+               (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET));
+
+       return phys1;
+}
+#endif /* CONFIG_METAG_META21_MMU */
+
+#endif
diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h
new file mode 100644 (file)
index 0000000..ae2a71b
--- /dev/null
@@ -0,0 +1,113 @@
+#ifndef __METAG_MMU_CONTEXT_H
+#define __METAG_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+#include <linux/io.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+                                 struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,
+                                  struct mm_struct *mm)
+{
+#ifndef CONFIG_METAG_META21_MMU
+       /* We use context to store a pointer to the page holding the
+        * pgd of a process while it is running. While a process is not
+        * running the pgd and context fields should be equal.
+        */
+       mm->context.pgd_base = (unsigned long) mm->pgd;
+#endif
+#ifdef CONFIG_METAG_USER_TCM
+       INIT_LIST_HEAD(&mm->context.tcm);
+#endif
+       return 0;
+}
+
+#ifdef CONFIG_METAG_USER_TCM
+
+#include <linux/slab.h>
+#include <asm/tcm.h>
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+       struct tcm_allocation *pos, *n;
+
+       list_for_each_entry_safe(pos, n,  &mm->context.tcm, list) {
+               tcm_free(pos->tag, pos->addr, pos->size);
+               list_del(&pos->list);
+               kfree(pos);
+       }
+}
+#else
+#define destroy_context(mm)            do { } while (0)
+#endif
+
+#ifdef CONFIG_METAG_META21_MMU
+static inline void load_pgd(pgd_t *pgd, int thread)
+{
+       unsigned long phys0 = mmu_phys0_addr(thread);
+       unsigned long phys1 = mmu_phys1_addr(thread);
+
+       /*
+        *  0x900 2Gb address space
+        *  The permission bits apply to MMU table region which gives a 2MB
+        *  window into physical memory. We especially don't want userland to be
+        *  able to access this.
+        */
+       metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
+                   _PAGE_PRESENT, phys0);
+       /* Set new MMU base address */
+       metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
+}
+#endif
+
+static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifdef CONFIG_METAG_META21_MMU
+       load_pgd(next->pgd, hard_processor_id());
+#else
+       unsigned int i;
+
+       /* prev->context == prev->pgd in the case where we are initially
+          switching from the init task to the first process. */
+       if (prev->context.pgd_base != (unsigned long) prev->pgd) {
+               for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
+                       ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
+       } else
+               prev->pgd = (pgd_t *)mmu_get_base();
+
+       next->pgd = prev->pgd;
+       prev->pgd = (pgd_t *) prev->context.pgd_base;
+
+       for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
+               next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
+
+       flush_cache_all();
+#endif
+       flush_tlb_all();
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       if (prev != next)
+               switch_mmu(prev, next);
+}
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+                              struct mm_struct *next_mm)
+{
+       switch_mmu(prev_mm, next_mm);
+}
+
+#define deactivate_mm(tsk, mm)   do { } while (0)
+
+#endif
diff --git a/arch/metag/include/asm/mmzone.h b/arch/metag/include/asm/mmzone.h
new file mode 100644 (file)
index 0000000..9c88a9c
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef __ASM_METAG_MMZONE_H
+#define __ASM_METAG_MMZONE_H
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+#include <linux/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid)         (node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+       int nid;
+
+       for (nid = 0; nid < MAX_NUMNODES; nid++)
+               if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
+                       break;
+
+       return nid;
+}
+
+static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
+{
+       return NODE_DATA(pfn_to_nid(pfn));
+}
+
+/* arch/metag/mm/numa.c */
+void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end);
+#else
+static inline void
+setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+{
+}
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
+
+#ifdef CONFIG_NUMA
+/* SoC specific mem init */
+void __init soc_mem_setup(void);
+#else
+static inline void __init soc_mem_setup(void) {};
+#endif
+
+#endif /* __ASM_METAG_MMZONE_H */
diff --git a/arch/metag/include/asm/module.h b/arch/metag/include/asm/module.h
new file mode 100644 (file)
index 0000000..e47e609
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ASM_METAG_MODULE_H
+#define _ASM_METAG_MODULE_H
+
+#include <asm-generic/module.h>
+
+struct metag_plt_entry {
+       /* Indirect jump instruction sequence. */
+       unsigned long tramp[2];
+};
+
+struct mod_arch_specific {
+       /* Indices of PLT sections within module. */
+       unsigned int core_plt_section, init_plt_section;
+};
+
+#if defined CONFIG_METAG_META12
+#define MODULE_PROC_FAMILY "META 1.2 "
+#elif defined CONFIG_METAG_META21
+#define MODULE_PROC_FAMILY "META 2.1 "
+#else
+#define MODULE_PROC_FAMILY ""
+#endif
+
+#ifdef CONFIG_4KSTACKS
+#define MODULE_STACKSIZE "4KSTACKS "
+#else
+#define MODULE_STACKSIZE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
+
+#ifdef MODULE
+asm(".section .plt,\"ax\",@progbits; .balign 8; .previous");
+asm(".section .init.plt,\"ax\",@progbits; .balign 8; .previous");
+#endif
+
+#endif /* _ASM_METAG_MODULE_H */
diff --git a/arch/metag/include/asm/page.h b/arch/metag/include/asm/page.h
new file mode 100644 (file)
index 0000000..1e8e281
--- /dev/null
@@ -0,0 +1,128 @@
+#ifndef _METAG_PAGE_H
+#define _METAG_PAGE_H
+
+#include <linux/const.h>
+
+#include <asm/metag_mem.h>
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define PAGE_SHIFT     12
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define PAGE_SHIFT     13
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define PAGE_SHIFT     14
+#endif
+
+#define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK      (~(PAGE_SIZE-1))
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define HPAGE_SHIFT   13
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define HPAGE_SHIFT   14
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define HPAGE_SHIFT   15
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define HPAGE_SHIFT   16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define HPAGE_SHIFT   17
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define HPAGE_SHIFT   18
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define HPAGE_SHIFT   19
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define HPAGE_SHIFT   20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define HPAGE_SHIFT   21
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define HPAGE_SHIFT   22
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define HPAGE_SIZE            (1UL << HPAGE_SHIFT)
+# define HPAGE_MASK            (~(HPAGE_SIZE-1))
+# define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT-PAGE_SHIFT)
+/*
+ * We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level
+ * page tables with normal pages in them.
+ */
+# define HUGEPT_SHIFT          (22)
+# define HUGEPT_ALIGN          (1 << HUGEPT_SHIFT)
+# define HUGEPT_MASK           (HUGEPT_ALIGN - 1)
+# define ALIGN_HUGEPT(x)       ALIGN(x, HUGEPT_ALIGN)
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* On the Meta, we would like to know if the address (heap) we have is
+ * in local or global space.
+ */
+#define is_global_space(addr)  ((addr) > 0x7fffffff)
+#define is_local_space(addr)   (!is_global_space(addr))
+
+extern void clear_page(void *to);
+extern void copy_page(void *to, void *from);
+
+#define clear_user_page(page, vaddr, pg)        clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x)     ((x).pte)
+#define pgd_val(x)     ((x).pgd)
+#define pgprot_val(x)  ((x).pgprot)
+
+#define __pte(x)       ((pte_t) { (x) })
+#define __pgd(x)       ((pgd_t) { (x) })
+#define __pgprot(x)    ((pgprot_t) { (x) })
+
+/* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that
+ * being either global or local space.
+ */
+#define PAGE_OFFSET            (CONFIG_PAGE_OFFSET)
+
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define META_MEMORY_BASE  LINGLOBAL_BASE
+#define META_MEMORY_LIMIT LINGLOBAL_LIMIT
+#else
+#define META_MEMORY_BASE  LINLOCAL_BASE
+#define META_MEMORY_LIMIT LINLOCAL_LIMIT
+#endif
+
+/* Offset between physical and virtual mapping of kernel memory. */
+extern unsigned int meta_memoffset;
+
+#define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset))
+#define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset)))
+
+extern unsigned long pfn_base;
+#define ARCH_PFN_OFFSET         (pfn_base)
+#define virt_to_page(kaddr)     pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page)      __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
+#ifdef CONFIG_FLATMEM
+extern unsigned long max_pfn;
+extern unsigned long min_low_pfn;
+#define pfn_valid(pfn)         ((pfn) >= min_low_pfn && (pfn) < max_pfn)
+#endif
+
+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
+
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_EXEC | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASSMEBLY__ */
+
+#endif /* _METAG_PAGE_H */
diff --git a/arch/metag/include/asm/perf_event.h b/arch/metag/include/asm/perf_event.h
new file mode 100644 (file)
index 0000000..105bbff
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef __ASM_METAG_PERF_EVENT_H
+#define __ASM_METAG_PERF_EVENT_H
+
+#endif /* __ASM_METAG_PERF_EVENT_H */
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
new file mode 100644 (file)
index 0000000..275d928
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef _METAG_PGALLOC_H
+#define _METAG_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/mm.h>
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+       set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
+
+#define pmd_populate(mm, pmd, pte) \
+       set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte)))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Allocate and free page tables.
+ */
+#ifdef CONFIG_METAG_META21_MMU
+static inline void pgd_ctor(pgd_t *pgd)
+{
+       memcpy(pgd + USER_PTRS_PER_PGD,
+              swapper_pg_dir + USER_PTRS_PER_PGD,
+              (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+#else
+#define pgd_ctor(x)    do { } while (0)
+#endif
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+       if (pgd)
+               pgd_ctor(pgd);
+       return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+       free_page((unsigned long)pgd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+                                         unsigned long address)
+{
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
+                                             __GFP_ZERO);
+       return pte;
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+                                     unsigned long address)
+{
+       struct page *pte;
+       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       if (pte)
+               pgtable_page_ctor(pte);
+       return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+       free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+       pgtable_page_dtor(pte);
+       __free_page(pte);
+}
+
+#define __pte_free_tlb(tlb, pte, addr)                         \
+       do {                                                    \
+               pgtable_page_dtor(pte);                         \
+               tlb_remove_page((tlb), (pte));                  \
+       } while (0)
+
+#define check_pgt_cache()      do { } while (0)
+
+#endif
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
new file mode 100644 (file)
index 0000000..1cd13d5
--- /dev/null
@@ -0,0 +1,370 @@
+/*
+ * Macros and functions to manipulate Meta page tables.
+ */
+
+#ifndef _METAG_PGTABLE_H
+#define _METAG_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define CONSISTENT_START       0xF7000000
+#define CONSISTENT_END         0xF73FFFFF
+#define VMALLOC_START          0xF8000000
+#define VMALLOC_END            0xFFFEFFFF
+#else
+#define CONSISTENT_START       0x77000000
+#define CONSISTENT_END         0x773FFFFF
+#define VMALLOC_START          0x78000000
+#define VMALLOC_END            0x7FFFFFFF
+#endif
+
+/*
+ * Definitions for MMU descriptors
+ *
+ * These are the hardware bits in the MMCU pte entries.
+ * Derived from the Meta toolkit headers.
+ */
+#define _PAGE_PRESENT          MMCU_ENTRY_VAL_BIT
+#define _PAGE_WRITE            MMCU_ENTRY_WR_BIT
+#define _PAGE_PRIV             MMCU_ENTRY_PRIV_BIT
+/* Write combine bit - this can cause writes to occur out of order */
+#define _PAGE_WR_COMBINE       MMCU_ENTRY_WRC_BIT
+/* Sys coherent bit - this bit is never used by Linux */
+#define _PAGE_SYS_COHERENT     MMCU_ENTRY_SYS_BIT
+#define _PAGE_ALWAYS_ZERO_1    0x020
+#define _PAGE_CACHE_CTRL0      0x040
+#define _PAGE_CACHE_CTRL1      0x080
+#define _PAGE_ALWAYS_ZERO_2    0x100
+#define _PAGE_ALWAYS_ZERO_3    0x200
+#define _PAGE_ALWAYS_ZERO_4    0x400
+#define _PAGE_ALWAYS_ZERO_5    0x800
+
+/* These are software bits that we stuff into the gaps in the hardware
+ * pte entries that are not used.  Note, these DO get stored in the actual
+ * hardware, but the hardware just does not use them.
+ */
+#define _PAGE_ACCESSED         _PAGE_ALWAYS_ZERO_1
+#define _PAGE_DIRTY            _PAGE_ALWAYS_ZERO_2
+#define _PAGE_FILE             _PAGE_ALWAYS_ZERO_3
+
+/* Pages owned, and protected by, the kernel. */
+#define _PAGE_KERNEL           _PAGE_PRIV
+
+/* No cacheing of this page */
+#define _PAGE_CACHE_WIN0       (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
+/* burst cacheing - good for data streaming */
+#define _PAGE_CACHE_WIN1       (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
+/* One cache way per thread */
+#define _PAGE_CACHE_WIN2       (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
+/* Full on cacheing */
+#define _PAGE_CACHE_WIN3       (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
+
+#define _PAGE_CACHEABLE                (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
+
+/* which bits are used for cache control ... */
+#define _PAGE_CACHE_MASK       (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
+                                _PAGE_WR_COMBINE)
+
+/* This is a mask of the bits that pte_modify is allowed to change. */
+#define _PAGE_CHG_MASK         (PAGE_MASK)
+
+#define _PAGE_SZ_SHIFT         1
+#define _PAGE_SZ_4K            (0x0)
+#define _PAGE_SZ_8K            (0x1 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_16K           (0x2 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_32K           (0x3 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_64K           (0x4 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_128K          (0x5 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_256K          (0x6 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_512K          (0x7 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_1M            (0x8 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_2M            (0x9 << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_4M            (0xa << _PAGE_SZ_SHIFT)
+#define _PAGE_SZ_MASK          (0xf << _PAGE_SZ_SHIFT)
+
+#if defined(CONFIG_PAGE_SIZE_4K)
+#define _PAGE_SZ               (_PAGE_SZ_4K)
+#elif defined(CONFIG_PAGE_SIZE_8K)
+#define _PAGE_SZ               (_PAGE_SZ_8K)
+#elif defined(CONFIG_PAGE_SIZE_16K)
+#define _PAGE_SZ               (_PAGE_SZ_16K)
+#endif
+#define _PAGE_TABLE            (_PAGE_SZ | _PAGE_PRESENT)
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_8K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_16K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_32K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_64K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_128K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_256K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+# define _PAGE_SZHUGE          (_PAGE_SZ_512K)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_1M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_2M)
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
+# define _PAGE_SZHUGE          (_PAGE_SZ_4M)
+#endif
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * Meta, we use that, but "fold" the mid level into the top-level page
+ * table.
+ */
+
+/* PGDIR_SHIFT determines the size of the area a second-level page table can
+ * map. This is always 4MB.
+ */
+
+#define PGDIR_SHIFT    22
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically. First level tables
+ * always map 2Gb (local or global) at a granularity of 4MB, second-level
+ * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
+ * 1024 entries).
+ */
+#define PTRS_PER_PTE   (PGDIR_SIZE/PAGE_SIZE)
+#define HPTRS_PER_PTE  (PGDIR_SIZE/HPAGE_SIZE)
+#define PTRS_PER_PGD   512
+
+#define USER_PTRS_PER_PGD      256
+#define FIRST_USER_ADDRESS     META_MEMORY_BASE
+#define FIRST_USER_PGD_NR      pgd_index(FIRST_USER_ADDRESS)
+
+#define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHEABLE)
+
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
+                                _PAGE_ACCESSED | _PAGE_CACHEABLE)
+#define PAGE_SHARED_C  PAGE_SHARED
+#define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHEABLE)
+#define PAGE_COPY_C    PAGE_COPY
+
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHEABLE)
+#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+                                _PAGE_ACCESSED | _PAGE_WRITE | \
+                                _PAGE_CACHEABLE | _PAGE_KERNEL)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY_C
+#define __P111 PAGE_COPY_C
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED_C
+#define __S111 PAGE_SHARED_C
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page;
+#define ZERO_PAGE(vaddr)       (virt_to_page(empty_zero_page))
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define pte_none(x)            (!pte_val(x))
+#define pte_present(x)         (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp)        do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x)            (!pmd_val(x))
+#define pmd_bad(x)             ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
+                                       != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
+#define pmd_present(x)         (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)          do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x)            pfn_to_page(pte_pfn(x))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_write(pte_t pte)   { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte)   { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)   { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte)    { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)   { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte)     { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte)   { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte)   { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte)   { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+static inline pte_t pte_mkhuge(pte_t pte)    { return pte; }
+
+/*
+ * Macro and implementation to make a page protection as uncacheable.
+ */
+#define pgprot_writecombine(prot)                                      \
+       __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
+
+#define pgprot_noncached(prot)                                         \
+       __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
+
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+       return pte;
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+       unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
+       if (!paddr)
+               return 0;
+       return (unsigned long)__va(paddr);
+}
+
+#define pmd_page(pmd)          (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_shift(pmd)    (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
+                                       >> _PAGE_SZ_SHIFT))
+#define pmd_num_ptrs(pmd)      (PGDIR_SIZE >> pmd_page_shift(pmd))
+
+/*
+ * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
+ * space drop the top bit before indexing the pgd.
+ */
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+#define pgd_index(address)     ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
+                                                       & (PTRS_PER_PGD-1))
+#else
+#define pgd_index(address)     (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#endif
+
+#define pgd_offset(mm, address)        ((mm)->pgd + pgd_index(address))
+
+#define pgd_offset_k(address)  pgd_offset(&init_mm, address)
+
+#define pmd_index(address)     (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/* Find an entry in the second-level page table.. */
+#if !defined(CONFIG_HUGETLB_PAGE)
+  /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
+# define pte_index(pmd, address) \
+       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#else
+  /* some pages are huge, so read 1st level pt to find out */
+# define pte_index(pmd, address) \
+       (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
+#endif
+#define pte_offset_kernel(dir, address) \
+       ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
+#define pte_offset_map(dir, address)           pte_offset_kernel(dir, address)
+#define pte_offset_map_nested(dir, address)    pte_offset_kernel(dir, address)
+
+#define pte_unmap(pte)         do { } while (0)
+#define pte_unmap_nested(pte)  do { } while (0)
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Meta doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+                                   unsigned long address, pte_t *pte)
+{
+}
+
+/*
+ * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
+ * Since PAGE_PRESENT is bit 1, we can use the bits above that.
+ */
+#define __swp_type(x)                  (((x).val >> 1) & 0xff)
+#define __swp_offset(x)                        ((x).val >> 10)
+#define __swp_entry(type, offset)      ((swp_entry_t) { ((type) << 1) | \
+                                        ((offset) << 10) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
+
+#define PTE_FILE_MAX_BITS      22
+#define pte_to_pgoff(x)                (pte_val(x) >> 10)
+#define pgoff_to_pte(x)                __pte(((x) << 10) | _PAGE_FILE)
+
+#define kern_addr_valid(addr)  (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
+       remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+void paging_init(unsigned long mem_end);
+
+#ifdef CONFIG_METAG_META12
+/* This is a workaround for an issue in Meta 1 cores. These cores cache
+ * invalid entries in the TLB so we always need to flush whenever we add
+ * a new pte. Unfortunately we can only flush the whole TLB not shoot down
+ * single entries so this is sub-optimal. This implementation ensures that
+ * we will get a flush at the second attempt, so we may still get repeated
+ * faults, we just don't overflow the kernel stack handling them.
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({                                                                       \
+       int __changed = !pte_same(*(__ptep), __entry);                    \
+       if (__changed) {                                                  \
+               set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+       }                                                                 \
+       flush_tlb_page(__vma, __address);                                 \
+       __changed;                                                        \
+})
+#endif
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _METAG_PGTABLE_H */
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
new file mode 100644 (file)
index 0000000..9b029a7
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 Imagination Technologies
+ */
+
+#ifndef __ASM_METAG_PROCESSOR_H
+#define __ASM_METAG_PROCESSOR_H
+
+#include <linux/atomic.h>
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/metag_regs.h>
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/* The task stops where the kernel starts */
+#define TASK_SIZE      PAGE_OFFSET
+/* Add an extra page of padding at the top of the stack for the guard page. */
+#define STACK_TOP      (TASK_SIZE - PAGE_SIZE)
+#define STACK_TOP_MAX  STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE     META_MEMORY_BASE
+
+typedef struct {
+       unsigned long seg;
+} mm_segment_t;
+
+#ifdef CONFIG_METAG_FPU
+struct meta_fpu_context {
+       TBICTXEXTFPU fpstate;
+       union {
+               struct {
+                       TBICTXEXTBB4 fx8_15;
+                       TBICTXEXTFPACC fpacc;
+               } fx8_15;
+               struct {
+                       TBICTXEXTFPACC fpacc;
+                       TBICTXEXTBB4 unused;
+               } nofx8_15;
+       } extfpstate;
+       bool needs_restore;
+};
+#else
+struct meta_fpu_context {};
+#endif
+
+#ifdef CONFIG_METAG_DSP
+struct meta_ext_context {
+       struct {
+               TBIEXTCTX ctx;
+               TBICTXEXTBB8 bb8;
+               TBIDUAL ax[TBICTXEXTAXX_BYTES / sizeof(TBIDUAL)];
+               TBICTXEXTHL2 hl2;
+               TBICTXEXTTDPR ext;
+               TBICTXEXTRP6 rp;
+       } regs;
+
+       /* DSPRAM A and B save areas. */
+       void *ram[2];
+
+       /* ECH encoded size of DSPRAM save areas. */
+       unsigned int ram_sz[2];
+};
+#else
+struct meta_ext_context {};
+#endif
+
+struct thread_struct {
+       PTBICTX kernel_context;
+       /* A copy of the user process Sig.SaveMask. */
+       unsigned int user_flags;
+       struct meta_fpu_context *fpu_context;
+       void __user *tls_ptr;
+       unsigned short int_depth;
+       unsigned short txdefr_failure;
+       struct meta_ext_context *dsp_context;
+};
+
+#define INIT_THREAD  { \
+       NULL,                   /* kernel_context */    \
+       0,                      /* user_flags */        \
+       NULL,                   /* fpu_context */       \
+       NULL,                   /* tls_ptr */           \
+       1,                      /* int_depth - we start in kernel */    \
+       0,                      /* txdefr_failure */    \
+       NULL,                   /* dsp_context */       \
+}
+
+/* Needed to make #define as we are referencing 'current', that is not visible
+ * yet.
+ *
+ * Stack layout is as below.
+
+      argc            argument counter (integer)
+      argv[0]         program name (pointer)
+      argv[1...N]     program args (pointers)
+      argv[argc-1]    end of args (integer)
+      NULL
+      env[0...N]      environment variables (pointers)
+      NULL
+
+ */
+#define start_thread(regs, pc, usp) do {                                  \
+       unsigned int *argc = (unsigned int *) bprm->exec;                  \
+       set_fs(USER_DS);                                                   \
+       current->thread.int_depth = 1;                                     \
+       /* Force this process down to user land */                         \
+       regs->ctx.SaveMask = TBICTX_PRIV_BIT;                              \
+       regs->ctx.CurrPC = pc;                                             \
+       regs->ctx.AX[0].U0 = usp;                                          \
+       regs->ctx.DX[3].U1 = *((int *)argc);                    /* argc */ \
+       regs->ctx.DX[3].U0 = (int)((int *)argc + 1);            /* argv */ \
+       regs->ctx.DX[2].U1 = (int)((int *)argc +                           \
+                                  regs->ctx.DX[3].U1 + 2);     /* envp */ \
+       regs->ctx.DX[2].U0 = 0;                            /* rtld_fini */ \
+} while (0)
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+#define copy_segments(tsk, mm)         do { } while (0)
+#define release_segments(mm)           do { } while (0)
+
+extern void exit_thread(void);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+#define        thread_saved_pc(tsk)    \
+       ((unsigned long)(tsk)->thread.kernel_context->CurrPC)
+#define thread_saved_sp(tsk)   \
+       ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0)
+#define thread_saved_fp(tsk)   \
+       ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define        KSTK_EIP(tsk)   ((tsk)->thread.kernel_context->CurrPC)
+#define        KSTK_ESP(tsk)   ((tsk)->thread.kernel_context->AX[0].U0)
+
+#define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
+
+#define cpu_relax()     barrier()
+
+extern void setup_priv(void);
+
+static inline unsigned int hard_processor_id(void)
+{
+       unsigned int id;
+
+       asm volatile ("MOV      %0, TXENABLE\n"
+                     "AND      %0, %0, %1\n"
+                     "LSR      %0, %0, %2\n"
+                     : "=&d" (id)
+                     : "I" (TXENABLE_THREAD_BITS),
+                       "K" (TXENABLE_THREAD_S)
+                     );
+
+       return id;
+}
+
+#define OP3_EXIT       0
+
+#define HALT_OK                0
+#define HALT_PANIC     -1
+
+/*
+ * Halt (stop) the hardware thread. This instruction sequence is the
+ * standard way to cause a Meta hardware thread to exit. The exit code
+ * is pushed onto the stack which is interpreted by the debug adapter.
+ */
+static inline void hard_processor_halt(int exit_code)
+{
+       asm volatile ("MOV      D1Ar1, %0\n"
+                     "MOV      D0Ar6, %1\n"
+                     "MSETL    [A0StP],D0Ar6,D0Ar4,D0Ar2\n"
+                     "1:\n"
+                     "SWITCH   #0xC30006\n"
+                     "B                1b\n"
+                     : : "r" (exit_code), "K" (OP3_EXIT));
+}
+
+/* Set these hooks to call SoC specific code to restart/halt/power off. */
+extern void (*soc_restart)(char *cmd);
+extern void (*soc_halt)(void);
+
+extern void show_trace(struct task_struct *tsk, unsigned long *sp,
+                      struct pt_regs *regs);
+
+#endif
diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h
new file mode 100644 (file)
index 0000000..d2aa35d
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ *  arch/metag/include/asm/prom.h
+ *
+ *  Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ *  Based on ARM version:
+ *  Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_METAG_PROM_H
+#define __ASM_METAG_PROM_H
+
+#include <asm/setup.h>
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void copy_fdt(void);
+
+#endif /* __ASM_METAG_PROM_H */
diff --git a/arch/metag/include/asm/ptrace.h b/arch/metag/include/asm/ptrace.h
new file mode 100644 (file)
index 0000000..fcabc18
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef _METAG_PTRACE_H
+#define _METAG_PTRACE_H
+
+#include <linux/compiler.h>
+#include <uapi/asm/ptrace.h>
+#include <asm/tbx.h>
+
+#ifndef __ASSEMBLY__
+
+/* this struct defines the way the registers are stored on the
+   stack during a system call. */
+
+struct pt_regs {
+       TBICTX ctx;
+       TBICTXEXTCB0 extcb0[5];
+};
+
+#define user_mode(regs) (((regs)->ctx.SaveMask & TBICTX_PRIV_BIT) > 0)
+
+#define instruction_pointer(regs) ((unsigned long)(regs)->ctx.CurrPC)
+#define profile_pc(regs) instruction_pointer(regs)
+
+#define task_pt_regs(task) \
+       ((struct pt_regs *)(task_stack_page(task) + \
+                           sizeof(struct thread_info)))
+
+#define current_pt_regs() \
+       ((struct pt_regs *)((char *)current_thread_info() + \
+                           sizeof(struct thread_info)))
+
+int syscall_trace_enter(struct pt_regs *regs);
+void syscall_trace_leave(struct pt_regs *regs);
+
+/* copy a struct user_gp_regs out to user */
+int metag_gp_regs_copyout(const struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         void *kbuf, void __user *ubuf);
+/* copy a struct user_gp_regs in from user */
+int metag_gp_regs_copyin(struct pt_regs *regs,
+                        unsigned int pos, unsigned int count,
+                        const void *kbuf, const void __user *ubuf);
+/* copy a struct user_cb_regs out to user */
+int metag_cb_regs_copyout(const struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         void *kbuf, void __user *ubuf);
+/* copy a struct user_cb_regs in from user */
+int metag_cb_regs_copyin(struct pt_regs *regs,
+                        unsigned int pos, unsigned int count,
+                        const void *kbuf, const void __user *ubuf);
+/* copy a struct user_rp_state out to user */
+int metag_rp_state_copyout(const struct pt_regs *regs,
+                          unsigned int pos, unsigned int count,
+                          void *kbuf, void __user *ubuf);
+/* copy a struct user_rp_state in from user */
+int metag_rp_state_copyin(struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         const void *kbuf, const void __user *ubuf);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _METAG_PTRACE_H */
diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h
new file mode 100644 (file)
index 0000000..e13083b
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _ASM_METAG_SETUP_H
+#define _ASM_METAG_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+void per_cpu_trap_init(unsigned long);
+extern void __init dump_machine_table(void);
+#endif /* _ASM_METAG_SETUP_H */
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h
new file mode 100644 (file)
index 0000000..e0373f8
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/cpumask.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+enum ipi_msg_type {
+       IPI_CALL_FUNC,
+       IPI_CALL_FUNC_SINGLE,
+       IPI_RESCHEDULE,
+};
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
+
+asmlinkage void secondary_start_kernel(void);
+
+extern void secondary_startup(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void __cpu_die(unsigned int cpu);
+extern int __cpu_disable(void);
+extern void cpu_die(void);
+#endif
+
+extern void smp_init_cpus(void);
+#endif /* __ASM_SMP_H */
diff --git a/arch/metag/include/asm/sparsemem.h b/arch/metag/include/asm/sparsemem.h
new file mode 100644 (file)
index 0000000..03fe255
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __ASM_METAG_SPARSEMEM_H
+#define __ASM_METAG_SPARSEMEM_H
+
+/*
+ * SECTION_SIZE_BITS           2^N: how big each section will be
+ * MAX_PHYSADDR_BITS           2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS            2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS      26
+#define MAX_PHYSADDR_BITS      32
+#define MAX_PHYSMEM_BITS       32
+
+#endif /* __ASM_METAG_SPARSEMEM_H */
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
new file mode 100644 (file)
index 0000000..86a7cf3
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifdef CONFIG_METAG_ATOMICITY_LOCK1
+#include <asm/spinlock_lock1.h>
+#else
+#include <asm/spinlock_lnkget.h>
+#endif
+
+#define arch_spin_unlock_wait(lock) \
+       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+#define        arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define        arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock)  cpu_relax()
+#define arch_read_relax(lock)  cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h
new file mode 100644 (file)
index 0000000..ad8436f
--- /dev/null
@@ -0,0 +1,249 @@
+#ifndef __ASM_SPINLOCK_LNKGET_H
+#define __ASM_SPINLOCK_LNKGET_H
+
+/*
+ * None of these asm statements clobber memory as LNKSET writes around
+ * the cache so the memory it modifies cannot safely be read by any means
+ * other than these accessors.
+ */
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+       int ret;
+
+       asm volatile ("LNKGETD  %0, [%1]\n"
+                     "TST      %0, #1\n"
+                     "MOV      %0, #1\n"
+                     "XORZ      %0, %0, %0\n"
+                     : "=&d" (ret)
+                     : "da" (&lock->lock)
+                     : "cc");
+       return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       int tmp;
+
+       asm volatile ("1:     LNKGETD %0,[%1]\n"
+                     "       TST     %0, #1\n"
+                     "       ADD     %0, %0, #1\n"
+                     "       LNKSETDZ [%1], %0\n"
+                     "       BNZ     1b\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       BNZ     1b\n"
+                     : "=&d" (tmp)
+                     : "da" (&lock->lock)
+                     : "cc");
+
+       smp_mb();
+}
+
+/* Returns 0 if failed to acquire lock */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       int tmp;
+
+       asm volatile ("       LNKGETD %0,[%1]\n"
+                     "       TST     %0, #1\n"
+                     "       ADD     %0, %0, #1\n"
+                     "       LNKSETDZ [%1], %0\n"
+                     "       BNZ     1f\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       MOV     %0, #1\n"
+                     "1:     XORNZ   %0, %0, %0\n"
+                     : "=&d" (tmp)
+                     : "da" (&lock->lock)
+                     : "cc");
+
+       smp_mb();
+
+       return tmp;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       smp_mb();
+
+       asm volatile ("       SETD    [%0], %1\n"
+                     :
+                     : "da" (&lock->lock), "da" (0)
+                     : "memory");
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       asm volatile ("1:     LNKGETD %0,[%1]\n"
+                     "       CMP     %0, #0\n"
+                     "       ADD     %0, %0, %2\n"
+                     "       LNKSETDZ [%1], %0\n"
+                     "       BNZ     1b\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       BNZ     1b\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock), "bd" (0x80000000)
+                     : "cc");
+
+       smp_mb();
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       asm volatile ("       LNKGETD %0,[%1]\n"
+                     "       CMP     %0, #0\n"
+                     "       ADD     %0, %0, %2\n"
+                     "       LNKSETDZ [%1], %0\n"
+                     "       BNZ     1f\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       MOV     %0,#1\n"
+                     "1:     XORNZ   %0, %0, %0\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock), "bd" (0x80000000)
+                     : "cc");
+
+       smp_mb();
+
+       return tmp;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       smp_mb();
+
+       asm volatile ("       SETD    [%0], %1\n"
+                     :
+                     : "da" (&rw->lock), "da" (0)
+                     : "memory");
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+       int ret;
+
+       asm volatile ("LNKGETD  %0, [%1]\n"
+                     "CMP      %0, #0\n"
+                     "MOV      %0, #1\n"
+                     "XORNZ     %0, %0, %0\n"
+                     : "=&d" (ret)
+                     : "da" (&rw->lock)
+                     : "cc");
+       return ret;
+}
+
+/*
+ * Read locks are a bit more hairy:
+ *  - Exclusively load the lock value.
+ *  - Increment it.
+ *  - Store new lock value if positive, and we still own this location.
+ *    If the value is negative, we've already failed.
+ *  - If we failed to store the value, we want a negative result.
+ *  - If we failed, try again.
+ * Unlocking is similarly hairy.  We may have multiple read locks
+ * currently active.  However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       asm volatile ("1:     LNKGETD %0,[%1]\n"
+                     "       ADDS    %0, %0, #1\n"
+                     "       LNKSETDPL [%1], %0\n"
+                     "       BMI     1b\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       BNZ     1b\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock)
+                     : "cc");
+
+       smp_mb();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       smp_mb();
+
+       asm volatile ("1:     LNKGETD %0,[%1]\n"
+                     "       SUB     %0, %0, #1\n"
+                     "       LNKSETD [%1], %0\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       BNZ     1b\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock)
+                     : "cc", "memory");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       asm volatile ("       LNKGETD %0,[%1]\n"
+                     "       ADDS    %0, %0, #1\n"
+                     "       LNKSETDPL [%1], %0\n"
+                     "       BMI     1f\n"
+                     "       DEFR    %0, TXSTAT\n"
+                     "       ANDT    %0, %0, #HI(0x3f000000)\n"
+                     "       CMPT    %0, #HI(0x02000000)\n"
+                     "       MOV     %0,#1\n"
+                     "       BZ      2f\n"
+                     "1:     MOV     %0,#0\n"
+                     "2:\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock)
+                     : "cc");
+
+       smp_mb();
+
+       return tmp;
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+       int tmp;
+
+       asm volatile ("LNKGETD  %0, [%1]\n"
+                     "CMP      %0, %2\n"
+                     "MOV      %0, #1\n"
+                     "XORZ     %0, %0, %0\n"
+                     : "=&d" (tmp)
+                     : "da" (&rw->lock), "bd" (0x80000000)
+                     : "cc");
+       return tmp;
+}
+
+#define        arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define        arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock)  cpu_relax()
+#define arch_read_relax(lock)  cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_LNKGET_H */
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
new file mode 100644 (file)
index 0000000..c630444
--- /dev/null
@@ -0,0 +1,184 @@
+#ifndef __ASM_SPINLOCK_LOCK1_H
+#define __ASM_SPINLOCK_LOCK1_H
+
+#include <asm/bug.h>
+#include <asm/global_lock.h>
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+       int ret;
+
+       barrier();
+       ret = lock->lock;
+       WARN_ON(ret != 0 && ret != 1);
+       return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int we_won = 0;
+       unsigned long flags;
+
+again:
+       __global_lock1(flags);
+       if (lock->lock == 0) {
+               fence();
+               lock->lock = 1;
+               we_won = 1;
+       }
+       __global_unlock1(flags);
+       if (we_won == 0)
+               goto again;
+       WARN_ON(lock->lock != 1);
+}
+
+/* Returns 0 if failed to acquire lock */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned long flags;
+       unsigned int ret;
+
+       __global_lock1(flags);
+       ret = lock->lock;
+       if (ret == 0) {
+               fence();
+               lock->lock = 1;
+       }
+       __global_unlock1(flags);
+       return (ret == 0);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       barrier();
+       WARN_ON(!lock->lock);
+       lock->lock = 0;
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned long flags;
+       unsigned int we_won = 0;
+
+again:
+       __global_lock1(flags);
+       if (rw->lock == 0) {
+               fence();
+               rw->lock = 0x80000000;
+               we_won = 1;
+       }
+       __global_unlock1(flags);
+       if (we_won == 0)
+               goto again;
+       WARN_ON(rw->lock != 0x80000000);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned long flags;
+       unsigned int ret;
+
+       __global_lock1(flags);
+       ret = rw->lock;
+       if (ret == 0) {
+               fence();
+               rw->lock = 0x80000000;
+       }
+       __global_unlock1(flags);
+
+       return (ret == 0);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       barrier();
+       WARN_ON(rw->lock != 0x80000000);
+       rw->lock = 0;
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+       unsigned int ret;
+
+       barrier();
+       ret = rw->lock;
+       return (ret == 0);
+}
+
+/*
+ * Read locks are a bit more hairy:
+ *  - Exclusively load the lock value.
+ *  - Increment it.
+ *  - Store new lock value if positive, and we still own this location.
+ *    If the value is negative, we've already failed.
+ *  - If we failed to store the value, we want a negative result.
+ *  - If we failed, try again.
+ * Unlocking is similarly hairy.  We may have multiple read locks
+ * currently active.  However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned long flags;
+       unsigned int we_won = 0, ret;
+
+again:
+       __global_lock1(flags);
+       ret = rw->lock;
+       if (ret < 0x80000000) {
+               fence();
+               rw->lock = ret + 1;
+               we_won = 1;
+       }
+       __global_unlock1(flags);
+       if (!we_won)
+               goto again;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned long flags;
+       unsigned int ret;
+
+       __global_lock1(flags);
+       fence();
+       ret = rw->lock--;
+       __global_unlock1(flags);
+       WARN_ON(ret == 0);
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned long flags;
+       unsigned int ret;
+
+       __global_lock1(flags);
+       ret = rw->lock;
+       if (ret < 0x80000000) {
+               fence();
+               rw->lock = ret + 1;
+       }
+       __global_unlock1(flags);
+       return (ret < 0x80000000);
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+       unsigned int ret;
+
+       barrier();
+       ret = rw->lock;
+       return (ret < 0x80000000);
+}
+
+#endif /* __ASM_SPINLOCK_LOCK1_H */
diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h
new file mode 100644 (file)
index 0000000..b763914
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_METAG_SPINLOCK_TYPES_H
+#define _ASM_METAG_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+       volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+
+typedef struct {
+       volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED                { 0 }
+
+#endif /* _ASM_METAG_SPINLOCK_TYPES_H */
diff --git a/arch/metag/include/asm/stacktrace.h b/arch/metag/include/asm/stacktrace.h
new file mode 100644 (file)
index 0000000..2830a0f
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+struct stackframe {
+       unsigned long fp;
+       unsigned long sp;
+       unsigned long lr;
+       unsigned long pc;
+};
+
+struct metag_frame {
+       unsigned long fp;
+       unsigned long lr;
+};
+
+extern int unwind_frame(struct stackframe *frame);
+extern void walk_stackframe(struct stackframe *frame,
+                           int (*fn)(struct stackframe *, void *), void *data);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/metag/include/asm/string.h b/arch/metag/include/asm/string.h
new file mode 100644 (file)
index 0000000..53e3806
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _METAG_STRING_H_
+#define _METAG_STRING_H_
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#endif /* _METAG_STRING_H_ */
diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h
new file mode 100644 (file)
index 0000000..1fd6a58
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_METAG_SWITCH_H
+#define _ASM_METAG_SWITCH_H
+
+/* metag SWITCH codes */
+#define __METAG_SW_PERM_BREAK  0x400002        /* compiled in breakpoint */
+#define __METAG_SW_SYS_LEGACY  0x440000        /* legacy system calls */
+#define __METAG_SW_SYS         0x440001        /* system calls */
+
+/* metag SWITCH instruction encoding */
+#define __METAG_SW_ENCODING(TYPE)      (0xaf000000 | (__METAG_SW_##TYPE))
+
+#endif /* _ASM_METAG_SWITCH_H */
diff --git a/arch/metag/include/asm/syscall.h b/arch/metag/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..24fc979
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Imagination Technologies Ltd.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_METAG_SYSCALL_H
+#define _ASM_METAG_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+#include <asm/switch.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+                                 struct pt_regs *regs)
+{
+       unsigned long insn;
+
+       /*
+        * FIXME there's no way to find out how we got here other than to
+        * examine the memory at the PC to see if it is a syscall
+        * SWITCH instruction.
+        */
+       if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4)))
+               return -1;
+
+       if (insn == __METAG_SW_ENCODING(SYS))
+               return regs->ctx.DX[0].U1;
+       else
+               return -1L;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+       /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
+{
+       unsigned long error = regs->ctx.DX[0].U0;
+       return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->ctx.DX[0].U0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       regs->ctx.DX[0].U0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        unsigned long *args)
+{
+       unsigned int reg, j;
+       BUG_ON(i + n > 6);
+
+       for (j = i, reg = 6 - i; j < (i + n); j++, reg--) {
+               if (reg % 2)
+                       args[j] = regs->ctx.DX[(reg + 1) / 2].U0;
+               else
+                       args[j] = regs->ctx.DX[reg / 2].U1;
+       }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        const unsigned long *args)
+{
+       unsigned int reg;
+       BUG_ON(i + n > 6);
+
+       for (reg = 6 - i; i < (i + n); i++, reg--) {
+               if (reg % 2)
+                       regs->ctx.DX[(reg + 1) / 2].U0 = args[i];
+               else
+                       regs->ctx.DX[reg / 2].U1 = args[i];
+       }
+}
+
+#define NR_syscalls __NR_syscalls
+
+/* generic syscall table */
+extern const void *sys_call_table[];
+
+#endif /* _ASM_METAG_SYSCALL_H */
diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h
new file mode 100644 (file)
index 0000000..a02b955
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef _ASM_METAG_SYSCALLS_H
+#define _ASM_METAG_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+
+/* kernel/signal.c */
+#define sys_rt_sigreturn sys_rt_sigreturn
+asmlinkage long sys_rt_sigreturn(void);
+
+#include <asm-generic/syscalls.h>
+
+/* kernel/sys_metag.c */
+asmlinkage int sys_metag_setglobalbit(char __user *, int);
+asmlinkage void sys_metag_set_fpu_flags(unsigned int);
+asmlinkage int sys_metag_set_tls(void __user *);
+asmlinkage void *sys_metag_get_tls(void);
+
+asmlinkage long sys_truncate64_metag(const char __user *, unsigned long,
+                                    unsigned long);
+asmlinkage long sys_ftruncate64_metag(unsigned int, unsigned long,
+                                     unsigned long);
+asmlinkage long sys_fadvise64_64_metag(int, unsigned long, unsigned long,
+                                      unsigned long, unsigned long, int);
+asmlinkage long sys_readahead_metag(int, unsigned long, unsigned long, size_t);
+asmlinkage ssize_t sys_pread64_metag(unsigned long, char __user *, size_t,
+                                    unsigned long, unsigned long);
+asmlinkage ssize_t sys_pwrite64_metag(unsigned long, char __user *, size_t,
+                                     unsigned long, unsigned long);
+asmlinkage long sys_sync_file_range_metag(int, unsigned long, unsigned long,
+                                         unsigned long, unsigned long,
+                                         unsigned int);
+
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+                   int syscall);
+
+#endif /* _ASM_METAG_SYSCALLS_H */
diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h
new file mode 100644 (file)
index 0000000..287b36f
--- /dev/null
@@ -0,0 +1,1425 @@
+/*
+ * asm/tbx.h
+ *
+ * Copyright (C) 2000-2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Thread binary interface header
+ */
+
+#ifndef _ASM_METAG_TBX_H_
+#define _ASM_METAG_TBX_H_
+
+/* for CACHEW_* values */
+#include <asm/metag_isa.h>
+/* for LINSYSEVENT_* addresses */
+#include <asm/metag_mem.h>
+
+#ifdef  TBI_1_4
+#ifndef TBI_MUTEXES_1_4
+#define TBI_MUTEXES_1_4
+#endif
+#ifndef TBI_SEMAPHORES_1_4
+#define TBI_SEMAPHORES_1_4
+#endif
+#ifndef TBI_ASYNC_SWITCH_1_4
+#define TBI_ASYNC_SWITCH_1_4
+#endif
+#ifndef TBI_FASTINT_1_4
+#define TBI_FASTINT_1_4
+#endif
+#endif
+
+
+/* Id values in the TBI system describe a segment using an arbitrary
+   integer value and flags in the bottom 8 bits, the SIGPOLL value is
+   used in cases where control over blocking or polling behaviour is
+   needed. */
+#define TBID_SIGPOLL_BIT    0x02 /* Set bit in an Id value to poll vs block */
+/* Extended segment identifiers use strings in the string table */
+#define TBID_IS_SEGSTR( Id ) (((Id) & (TBID_SEGTYPE_BITS>>1)) == 0)
+
+/* Segment identifiers contain the following related bit-fields */
+#define TBID_SEGTYPE_BITS   0x0F /* One of the predefined segment types */
+#define TBID_SEGTYPE_S      0
+#define TBID_SEGSCOPE_BITS  0x30 /* Indicates the scope of the segment */
+#define TBID_SEGSCOPE_S     4
+#define TBID_SEGGADDR_BITS  0xC0 /* Indicates access possible via pGAddr */
+#define TBID_SEGGADDR_S     6
+
+/* Segments of memory can only really contain a few types of data */
+#define TBID_SEGTYPE_TEXT   0x02 /* Code segment */
+#define TBID_SEGTYPE_DATA   0x04 /* Data segment */
+#define TBID_SEGTYPE_STACK  0x06 /* Stack segment */
+#define TBID_SEGTYPE_HEAP   0x0A /* Heap segment */
+#define TBID_SEGTYPE_ROOT   0x0C /* Root block segments */
+#define TBID_SEGTYPE_STRING 0x0E /* String table segment */
+
+/* Segments have one of three possible scopes */
+#define TBID_SEGSCOPE_INIT     0 /* Temporary area for initialisation phase */
+#define TBID_SEGSCOPE_LOCAL    1 /* Private to this thread */
+#define TBID_SEGSCOPE_GLOBAL   2 /* Shared globally throughout the system */
+#define TBID_SEGSCOPE_SHARED   3 /* Limited sharing between local/global */
+
+/* For segment specifier a further field in two of the remaining bits
+   indicates the usefulness of the pGAddr field in the segment descriptor
+   descriptor. */
+#define TBID_SEGGADDR_NULL     0 /* pGAddr is NULL -> SEGSCOPE_(LOCAL|INIT) */
+#define TBID_SEGGADDR_READ     1 /* Only read    via pGAddr */
+#define TBID_SEGGADDR_WRITE    2 /* Full access  via pGAddr */
+#define TBID_SEGGADDR_EXEC     3 /* Only execute via pGAddr */
+
+/* The following values are common to both segment and signal Id value and
+   live in the top 8 bits of the Id values. */
+
+/* The ISTAT bit indicates if segments are related to interrupt vs
+   background level interfaces a thread can still handle all triggers at
+   either level, but can also split these up if it wants to. */
+#define TBID_ISTAT_BIT    0x01000000
+#define TBID_ISTAT_S      24
+
+/* Privilege needed to access a segment is indicated by the next bit.
+   
+   This bit is set to mirror the current privilege level when starting a
+   search for a segment - setting it yourself toggles the automatically
+   generated state which is only useful to emulate unprivileged behaviour
+   or access unprivileged areas of memory while at privileged level. */
+#define TBID_PSTAT_BIT    0x02000000
+#define TBID_PSTAT_S      25
+
+/* The top six bits of a signal/segment specifier identifies a thread within
+   the system. This represents a segments owner. */
+#define TBID_THREAD_BITS  0xFC000000
+#define TBID_THREAD_S     26
+
+/* Special thread id values */
+#define TBID_THREAD_NULL   (-32) /* Never matches any thread/segment id used */
+#define TBID_THREAD_GLOBAL (-31) /* Things global to all threads */
+#define TBID_THREAD_HOST   ( -1) /* Host interface */
+#define TBID_THREAD_EXTIO  (TBID_THREAD_HOST)   /* Host based ExtIO i/f */
+
+/* Virtual Id's are used for external thread interface structures or the
+   above special Id's */
+#define TBID_IS_VIRTTHREAD( Id ) ((Id) < 0)
+
+/* Real Id's are used for actual hardware threads that are local */
+#define TBID_IS_REALTHREAD( Id ) ((Id) >= 0)
+
+/* Generate a segment Id given Thread, Scope, and Type */
+#define TBID_SEG( Thread, Scope, Type )                           (\
+    ((Thread)<<TBID_THREAD_S) + ((Scope)<<TBID_SEGSCOPE_S) + (Type))
+
+/* Generate a signal Id given Thread and SigNum */
+#define TBID_SIG( Thread, SigNum )                                        (\
+    ((Thread)<<TBID_THREAD_S) + ((SigNum)<<TBID_SIGNUM_S) + TBID_SIGNAL_BIT)
+
+/* Generate an Id that solely represents a thread - useful for cache ops */
+#define TBID_THD( Thread ) ((Thread)<<TBID_THREAD_S)
+#define TBID_THD_NULL      ((TBID_THREAD_NULL)  <<TBID_THREAD_S)
+#define TBID_THD_GLOBAL    ((TBID_THREAD_GLOBAL)<<TBID_THREAD_S)
+
+/* Common exception handler (see TBID_SIGNUM_XXF below) receives hardware
+   generated fault codes TBIXXF_SIGNUM_xxF in it's SigNum parameter */
+#define TBIXXF_SIGNUM_IIF   0x01 /* General instruction fault */
+#define TBIXXF_SIGNUM_PGF   0x02 /* Privilege general fault */
+#define TBIXXF_SIGNUM_DHF   0x03 /* Data access watchpoint HIT */
+#define TBIXXF_SIGNUM_IGF   0x05 /* Code fetch general read failure */
+#define TBIXXF_SIGNUM_DGF   0x07 /* Data access general read/write fault */
+#define TBIXXF_SIGNUM_IPF   0x09 /* Code fetch page fault */
+#define TBIXXF_SIGNUM_DPF   0x0B /* Data access page fault */
+#define TBIXXF_SIGNUM_IHF   0x0D /* Instruction breakpoint HIT */
+#define TBIXXF_SIGNUM_DWF   0x0F /* Data access read-only fault */
+
+/* Hardware signals communicate events between processing levels within a
+   single thread all the _xxF cases are exceptions and are routed via a
+   common exception handler, _SWx are software trap events and kicks including
+   __TBISignal generated kicks, and finally _TRx are hardware triggers */
+#define TBID_SIGNUM_SW0     0x00 /* SWITCH GROUP 0 - Per thread user */
+#define TBID_SIGNUM_SW1     0x01 /* SWITCH GROUP 1 - Per thread system */
+#define TBID_SIGNUM_SW2     0x02 /* SWITCH GROUP 2 - Internal global request */
+#define TBID_SIGNUM_SW3     0x03 /* SWITCH GROUP 3 - External global request */
+#ifdef TBI_1_4
+#define TBID_SIGNUM_FPE     0x04 /* Deferred exception - Any IEEE 754 exception */
+#define TBID_SIGNUM_FPD     0x05 /* Deferred exception - Denormal exception */
+/* Reserved 0x6 for a reserved deferred exception */
+#define TBID_SIGNUM_BUS     0x07 /* Deferred exception - Bus Error */
+/* Reserved 0x08-0x09 */
+#else
+/* Reserved 0x04-0x09 */
+#endif
+#define TBID_SIGNUM_SWS     0x0A /* KICK received with SigMask != 0 */
+#define TBID_SIGNUM_SWK     0x0B /* KICK received with SigMask == 0 */
+/* Reserved 0x0C-0x0F */
+#define TBID_SIGNUM_TRT     0x10 /* Timer trigger */
+#define TBID_SIGNUM_LWK     0x11 /* Low level kick (handler provided by TBI) */
+#define TBID_SIGNUM_XXF     0x12 /* Fault handler - receives ALL _xxF sigs */
+#ifdef TBI_1_4
+#define TBID_SIGNUM_DFR     0x13 /* Deferred Exception handler */
+#else
+#define TBID_SIGNUM_FPE     0x13 /* FPE Exception handler */
+#endif
+/* External trigger one group 0x14 to 0x17 - per thread */
+#define TBID_SIGNUM_TR1(Thread) (0x14+(Thread))
+#define TBID_SIGNUM_T10     0x14
+#define TBID_SIGNUM_T11     0x15
+#define TBID_SIGNUM_T12     0x16
+#define TBID_SIGNUM_T13     0x17
+/* External trigger two group 0x18 to 0x1b - per thread */
+#define TBID_SIGNUM_TR2(Thread) (0x18+(Thread))
+#define TBID_SIGNUM_T20     0x18
+#define TBID_SIGNUM_T21     0x19
+#define TBID_SIGNUM_T22     0x1A
+#define TBID_SIGNUM_T23     0x1B
+#define TBID_SIGNUM_TR3     0x1C /* External trigger N-4 (global) */
+#define TBID_SIGNUM_TR4     0x1D /* External trigger N-3 (global) */
+#define TBID_SIGNUM_TR5     0x1E /* External trigger N-2 (global) */
+#define TBID_SIGNUM_TR6     0x1F /* External trigger N-1 (global) */
+#define TBID_SIGNUM_MAX     0x1F
+
+/* Return the trigger register(TXMASK[I]/TXSTAT[I]) bits related to
+   each hardware signal, sometimes this is a many-to-one relationship. */
+#define TBI_TRIG_BIT(SigNum)                                      (\
+    ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\
+    ( ((SigNum) == TBID_SIGNUM_SWS) ||                             \
+      ((SigNum) == TBID_SIGNUM_SWK)    ) ?                         \
+                         TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT    )
+
+/* Return the hardware trigger vector number for entries in the
+   HWVEC0EXT table that will generate the required internal trigger. */
+#define TBI_TRIG_VEC(SigNum)                                      (\
+    ((SigNum) >= TBID_SIGNUM_T10) ? ((SigNum)-TBID_SIGNUM_TRT) : -1)
+
+/* Default trigger masks for each thread at background/interrupt level */
+#define TBI_TRIGS_INIT( Thread )                           (\
+    TXSTAT_KICK_BIT + TBI_TRIG_BIT(TBID_SIGNUM_TR1(Thread)) )
+#define TBI_INTS_INIT( Thread )                            (\
+    TXSTAT_KICK_BIT + TXSTATI_BGNDHALT_BIT                  \
+                    + TBI_TRIG_BIT(TBID_SIGNUM_TR2(Thread)) )
+
+#ifndef __ASSEMBLY__
+/* A spin-lock location is a zero-initialised location in memory */
+typedef volatile int TBISPIN, *PTBISPIN;
+
+/* A kick location is a hardware location you can write to
+ * in order to cause a kick
+ */
+typedef volatile int *PTBIKICK;
+
+#if defined(METAC_1_0) || defined(METAC_1_1)
+/* Macro to perform a kick */
+#define TBI_KICK( pKick ) do { pKick[0] = 1; } while (0)
+#else
+/* #define METAG_LIN_VALUES before including machine.h if required */
+#ifdef LINSYSEVENT_WR_COMBINE_FLUSH
+/* Macro to perform a kick - write combiners must be flushed */
+#define TBI_KICK( pKick )                                                do {\
+    volatile int *pFlush = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;    \
+    pFlush[0] = 0;                                                           \
+    pKick[0]  = 1;                                                } while (0)
+#endif
+#endif /* if defined(METAC_1_0) || defined(METAC_1_1) */
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* 64-bit dual unit state value */
+typedef struct _tbidual_tag_ {
+    /* 32-bit value from a pair of registers in data or address units */
+    int U0, U1;
+} TBIDUAL, *PTBIDUAL;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBIDUAL */
+#define TBIDUAL_U0      (0)
+#define TBIDUAL_U1      (4)
+
+#define TBIDUAL_BYTES   (8)
+
+#define TBICTX_CRIT_BIT 0x0001  /* ASync state saved in TBICTX */
+#define TBICTX_SOFT_BIT 0x0002  /* Sync state saved in TBICTX (other bits 0) */
+#ifdef TBI_FASTINT_1_4
+#define TBICTX_FINT_BIT 0x0004  /* Using Fast Interrupts */
+#endif
+#define TBICTX_FPAC_BIT 0x0010  /* FPU state in TBICTX, FPU active on entry */
+#define TBICTX_XMCC_BIT 0x0020  /* Bit to identify a MECC task */
+#define TBICTX_CBUF_BIT 0x0040  /* Hardware catch buffer flag from TXSTATUS */
+#define TBICTX_CBRP_BIT 0x0080  /* Read pipeline dirty from TXDIVTIME */
+#define TBICTX_XDX8_BIT 0x0100  /* Saved DX.8 to DX.15 too */
+#define TBICTX_XAXX_BIT 0x0200  /* Save remaining AX registers to AX.7 */
+#define TBICTX_XHL2_BIT 0x0400  /* Saved hardware loop registers too */
+#define TBICTX_XTDP_BIT 0x0800  /* Saved DSP registers too */
+#define TBICTX_XEXT_BIT 0x1000  /* Set if TBICTX.Ext.Ctx contains extended
+                                   state save area, otherwise TBICTX.Ext.AX2
+                                   just holds normal A0.2 and A1.2 states */
+#define TBICTX_WAIT_BIT 0x2000  /* Causes wait for trigger - sticky toggle */
+#define TBICTX_XCBF_BIT 0x4000  /* Catch buffer or RD extracted into TBICTX */
+#define TBICTX_PRIV_BIT 0x8000  /* Set if system uses 'privileged' model */
+
+#ifdef METAC_1_0
+#define TBICTX_XAX3_BIT 0x0200  /* Saved AX.5 to AX.7 for XAXX */
+#define TBICTX_AX_REGS  5       /* Ax.0 to Ax.4 are core GP regs on CHORUS */
+#else
+#define TBICTX_XAX4_BIT 0x0200  /* Saved AX.4 to AX.7 for XAXX */
+#define TBICTX_AX_REGS  4       /* Default is Ax.0 to Ax.3 */
+#endif
+
+#ifdef TBI_1_4
+#define TBICTX_CFGFPU_FX16_BIT  0x00010000               /* Save FX.8 to FX.15 too */
+
+/* The METAC_CORE_ID_CONFIG field indicates omitted DSP resources */
+#define METAC_COREID_CFGXCTX_MASK( Value )                                 (\
+       ( (((Value & METAC_COREID_CFGDSP_BITS)>>                                \
+                    METAC_COREID_CFGDSP_S      ) == METAC_COREID_CFGDSP_MIN) ? \
+                ~(TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+                             \
+                  TBICTX_XAXX_BIT+TBICTX_XDX8_BIT ) : ~0U )                    )
+#endif
+
+/* Extended context state provides a standardised method for registering the
+   arguments required by __TBICtxSave to save the additional register states
+   currently in use by non general purpose code. The state of the __TBIExtCtx
+   variable in the static space of the thread forms an extension of the base
+   context of the thread.
+   
+   If ( __TBIExtCtx.Ctx.SaveMask == 0 ) then pExt is assumed to be NULL and
+   the empty state of  __TBIExtCtx is represented by the fact that
+   TBICTX.SaveMask does not have the bit TBICTX_XEXT_BIT set.
+   
+   If ( __TBIExtCtx.Ctx.SaveMask != 0 ) then pExt should point at a suitably
+   sized extended context save area (usually at the end of the stack space
+   allocated by the current routine). This space should allow for the
+   displaced state of A0.2 and A1.2 to be saved along with the other extended
+   states indicated via __TBIExtCtx.Ctx.SaveMask. */
+#ifndef __ASSEMBLY__
+typedef union _tbiextctx_tag_ {
+    long long Val;
+    TBIDUAL AX2;
+    struct _tbiextctxext_tag {
+#ifdef TBI_1_4
+        short DspramSizes;      /* DSPRAM sizes. Encoding varies between
+                                   TBICtxAlloc and the ECH scheme. */
+#else
+        short Reserved0;
+#endif
+        short SaveMask;         /* Flag bits for state saved */
+        PTBIDUAL pExt;          /* AX[2] state saved first plus Xxxx state */
+    
+    } Ctx;
+    
+} TBIEXTCTX, *PTBIEXTCTX;
+
+/* Automatic registration of extended context save for __TBINestInts */
+extern TBIEXTCTX __TBIExtCtx;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBIEXTCTX */
+#define TBIEXTCTX_AX2           (0)
+#define TBIEXTCTX_Ctx           (0)
+#define TBIEXTCTX_Ctx_SaveMask  (TBIEXTCTX_Ctx + 2)
+#define TBIEXTCTX_Ctx_pExt      (TBIEXTCTX_Ctx + 2 + 2)
+
+/* Extended context data size calculation constants */
+#define TBICTXEXT_BYTES          (8)
+#define TBICTXEXTBB8_BYTES     (8*8)
+#define TBICTXEXTAX3_BYTES     (3*8)
+#define TBICTXEXTAX4_BYTES     (4*8)
+#ifdef METAC_1_0
+#define TBICTXEXTAXX_BYTES     TBICTXEXTAX3_BYTES
+#else
+#define TBICTXEXTAXX_BYTES     TBICTXEXTAX4_BYTES
+#endif
+#define TBICTXEXTHL2_BYTES     (3*8)
+#define TBICTXEXTTDR_BYTES    (27*8)
+#define TBICTXEXTTDP_BYTES TBICTXEXTTDR_BYTES
+
+#ifdef TBI_1_4
+#define TBICTXEXTFX8_BYTES     (4*8)
+#define TBICTXEXTFPAC_BYTES    (1*4 + 2*2 + 4*8)
+#define TBICTXEXTFACF_BYTES    (3*8)
+#endif
+
+/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
+#define TBICTXEXT_MAXBITS  (TBICTX_XEXT_BIT|                \
+                            TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
+                            TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
+
+/* Maximum size of the extended context save area for current variant */
+#define TBICTXEXT_MAXBYTES (TBICTXEXT_BYTES+TBICTXEXTBB8_BYTES+\
+                         TBICTXEXTAXX_BYTES+TBICTXEXTHL2_BYTES+\
+                                            TBICTXEXTTDP_BYTES )
+
+#ifdef TBI_FASTINT_1_4
+/* Maximum flag bits to be set via the TBICTX_EXTSET macro */
+#define TBICTX2EXT_MAXBITS (TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\
+                            TBICTX_XHL2_BIT|TBICTX_XTDP_BIT )
+
+/* Maximum size of the extended context save area for current variant */
+#define TBICTX2EXT_MAXBYTES (TBICTXEXTBB8_BYTES+TBICTXEXTAXX_BYTES\
+                             +TBICTXEXTHL2_BYTES+TBICTXEXTTDP_BYTES )
+#endif
+
+/* Specify extended resources being used by current routine, code must be
+   assembler generated to utilise extended resources-
+
+        MOV     D0xxx,A0StP             ; Perform alloca - routine should
+        ADD     A0StP,A0StP,#SaveSize   ; setup/use A0FrP to access locals
+        MOVT    D1xxx,#SaveMask         ; TBICTX_XEXT_BIT MUST be set
+        SETL    [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
+        
+    NB: OG(___TBIExtCtx) is a special case supported for SETL/GETL operations
+        on 64-bit sizes structures only, other accesses must be based on use
+        of OGA(___TBIExtCtx). 
+
+   At exit of routine-
+   
+        MOV     D0xxx,#0                ; Clear extended context save state
+        MOV     D1xxx,#0
+        SETL    [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx
+        SUB     A0StP,A0StP,#SaveSize   ; If original A0StP required
+        
+    NB: Both the setting and clearing of the whole __TBIExtCtx MUST be done
+        atomically in one 64-bit write operation.
+
+   For simple interrupt handling only via __TBINestInts there should be no
+   impact of the __TBIExtCtx system. If pre-emptive scheduling is being
+   performed however (assuming __TBINestInts has already been called earlier
+   on) then the following logic will correctly call __TBICtxSave if required
+   and clear out the currently selected background task-
+   
+        if ( __TBIExtCtx.Ctx.SaveMask & TBICTX_XEXT_BIT )
+        {
+            / * Store extended states in pCtx * /
+            State.Sig.SaveMask |= __TBIExtCtx.Ctx.SaveMask;
+        
+            (void) __TBICtxSave( State, (void *) __TBIExtCtx.Ctx.pExt );
+            __TBIExtCtx.Val   = 0;
+        }
+        
+    and when restoring task states call __TBICtxRestore-
+    
+        / * Restore state from pCtx * /
+        State.Sig.pCtx     = pCtx;
+        State.Sig.SaveMask = pCtx->SaveMask;
+
+        if ( State.Sig.SaveMask & TBICTX_XEXT_BIT )
+        {
+            / * Restore extended states from pCtx * /
+            __TBIExtCtx.Val = pCtx->Ext.Val;
+            
+            (void) __TBICtxRestore( State, (void *) __TBIExtCtx.Ctx.pExt );
+        }   
+   
+ */
+
+/* Critical thread state save area */
+#ifndef __ASSEMBLY__
+typedef struct _tbictx_tag_ {
+    /* TXSTATUS_FLAG_BITS and TXSTATUS_LSM_STEP_BITS from TXSTATUS */
+    short Flags;
+    /* Mask indicates any extended context state saved; 0 -> Never run */
+    short SaveMask;
+    /* Saved PC value */
+    int CurrPC;
+    /* Saved critical register states */
+    TBIDUAL DX[8];
+    /* Background control register states - for cores without catch buffer
+       base in DIVTIME the TXSTATUS bits RPVALID and RPMASK are stored with
+       the real state TXDIVTIME in CurrDIVTIME */
+    int CurrRPT, CurrBPOBITS, CurrMODE, CurrDIVTIME;
+    /* Saved AX register states */
+    TBIDUAL AX[2];
+    TBIEXTCTX Ext;
+    TBIDUAL AX3[TBICTX_AX_REGS-3];
+    
+    /* Any CBUF state to be restored by a handler return must be stored here.
+       Other extended state can be stored anywhere - see __TBICtxSave and
+       __TBICtxRestore. */
+    
+} TBICTX, *PTBICTX;
+
+#ifdef TBI_FASTINT_1_4
+typedef struct _tbictx2_tag_ {
+    TBIDUAL AX[2];    /* AU.0, AU.1 */
+    TBIDUAL DX[2];    /* DU.0, DU.4 */
+    int     CurrMODE;
+    int     CurrRPT;
+    int     CurrSTATUS;
+    void   *CurrPC;   /* PC in PC address space */
+} TBICTX2, *PTBICTX2;
+/* TBICTX2 is followed by:
+ *   TBICTXEXTCB0                if TXSTATUS.CBMarker
+ *   TBIDUAL * TXSTATUS.IRPCount if TXSTATUS.IRPCount > 0
+ *   TBICTXGP                    if using __TBIStdRootIntHandler or __TBIStdCtxSwitchRootIntHandler
+ */
+
+typedef struct _tbictxgp_tag_ {
+    short    DspramSizes;
+    short    SaveMask;
+    void    *pExt;
+    TBIDUAL  DX[6]; /* DU.1-DU.3, DU.5-DU.7 */
+    TBIDUAL  AX[2]; /* AU.2-AU.3 */
+} TBICTXGP, *PTBICTXGP;
+
+#define TBICTXGP_DspramSizes (0)
+#define TBICTXGP_SaveMask    (TBICTXGP_DspramSizes + 2)
+#define TBICTXGP_MAX_BYTES   (2 + 2 + 4 + 8*(6+2))
+
+#endif
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBICTX */
+#define TBICTX_Flags            (0)
+#define TBICTX_SaveMask         (2)
+#define TBICTX_CurrPC           (4)
+#define TBICTX_DX               (2 + 2 + 4)
+#define TBICTX_CurrRPT          (2 + 2 + 4 + 8 * 8)
+#define TBICTX_CurrMODE         (2 + 2 + 4 + 8 * 8 + 4 + 4)
+#define TBICTX_AX               (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4)
+#define TBICTX_Ext              (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4 + 2 * 8)
+#define TBICTX_Ext_AX2          (TBICTX_Ext + TBIEXTCTX_AX2)
+#define TBICTX_Ext_AX2_U0       (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U0)
+#define TBICTX_Ext_AX2_U1       (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U1)
+#define TBICTX_Ext_Ctx_pExt     (TBICTX_Ext + TBIEXTCTX_Ctx_pExt)
+#define TBICTX_Ext_Ctx_SaveMask (TBICTX_Ext + TBIEXTCTX_Ctx_SaveMask)
+
+#ifdef TBI_FASTINT_1_4
+#define TBICTX2_BYTES (8 * 2 + 8 * 2 + 4 + 4 + 4 + 4)
+#define TBICTXEXTCB0_BYTES (4 + 4 + 8)
+
+#define TBICTX2_CRIT_MAX_BYTES (TBICTX2_BYTES + TBICTXEXTCB0_BYTES + 6 * TBIDUAL_BYTES)
+#define TBI_SWITCH_NEXT_PC(PC, EXTRA) ((PC) + (EXTRA & 1) ? 8 : 4)
+#endif
+
+#ifndef __ASSEMBLY__
+/* Extended thread state save areas - catch buffer state element */
+typedef struct _tbictxextcb0_tag_ {
+    /* Flags data and address value - see METAC_CATCH_VALUES in machine.h */
+    unsigned long CBFlags, CBAddr;
+    /* 64-bit data */
+    TBIDUAL CBData;
+    
+} TBICTXEXTCB0, *PTBICTXEXTCB0;
+
+/* Read pipeline state saved on later cores after single catch buffer slot */
+typedef struct _tbictxextrp6_tag_ {
+    /* RPMask is TXSTATUS_RPMASK_BITS only, reserved is undefined */
+    unsigned long RPMask, Reserved0;
+    TBIDUAL CBData[6];
+    
+} TBICTXEXTRP6, *PTBICTXEXTRP6;
+
+/* Extended thread state save areas - 8 DU register pairs */
+typedef struct _tbictxextbb8_tag_ {
+    /* Remaining Data unit registers in 64-bit pairs */
+    TBIDUAL UX[8];
+    
+} TBICTXEXTBB8, *PTBICTXEXTBB8;
+
+/* Extended thread state save areas - 3 AU register pairs */
+typedef struct _tbictxextbb3_tag_ {
+    /* Remaining Address unit registers in 64-bit pairs */
+    TBIDUAL UX[3];
+    
+} TBICTXEXTBB3, *PTBICTXEXTBB3;
+
+/* Extended thread state save areas - 4 AU register pairs or 4 FX pairs */
+typedef struct _tbictxextbb4_tag_ {
+    /* Remaining Address unit or FPU registers in 64-bit pairs */
+    TBIDUAL UX[4];
+    
+} TBICTXEXTBB4, *PTBICTXEXTBB4;
+
+/* Extended thread state save areas - Hardware loop states (max 2) */
+typedef struct _tbictxexthl2_tag_ {
+    /* Hardware looping register states */
+    TBIDUAL Start, End, Count;
+    
+} TBICTXEXTHL2, *PTBICTXEXTHL2;
+
+/* Extended thread state save areas - DSP register states */
+typedef struct _tbictxexttdp_tag_ {
+    /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
+    TBIDUAL Acc32[1];
+    /* DSP > 32-bit accumulator bits 63:32 of ACX.0 (zero-extended) */
+    TBIDUAL Acc64[1];
+    /* Twiddle register state, and three phase increment states */
+    TBIDUAL PReg[4];
+    /* Modulo region size, padded to 64-bits */
+    int CurrMRSIZE, Reserved0;
+    
+} TBICTXEXTTDP, *PTBICTXEXTTDP;
+
+/* Extended thread state save areas - DSP register states including DSP RAM */
+typedef struct _tbictxexttdpr_tag_ {
+    /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */
+    TBIDUAL Acc32[1];
+    /* DSP 40-bit accumulator register state (Bits 39:8 of ACX.0) */
+    TBIDUAL Acc40[1];
+    /* DSP RAM Pointers */
+    TBIDUAL RP0[2],  WP0[2],  RP1[2],  WP1[2];
+    /* DSP RAM Increments */
+    TBIDUAL RPI0[2], WPI0[2], RPI1[2], WPI1[2];
+    /* Template registers */
+    unsigned long Tmplt[16];
+    /* Modulo address region size and DSP RAM module region sizes */
+    int CurrMRSIZE, CurrDRSIZE;
+    
+} TBICTXEXTTDPR, *PTBICTXEXTTDPR;
+
+#ifdef TBI_1_4
+/* The METAC_ID_CORE register state is a marker for the FPU
+   state that is then stored after this core header structure.  */
+#define TBICTXEXTFPU_CONFIG_MASK  ( (METAC_COREID_NOFPACC_BIT+     \
+                                     METAC_COREID_CFGFPU_BITS ) << \
+                                     METAC_COREID_CONFIG_BITS       )
+
+/* Recorded FPU exception state from TXDEFR in DefrFpu */
+#define TBICTXEXTFPU_DEFRFPU_MASK (TXDEFR_FPU_FE_BITS)
+
+/* Extended thread state save areas - FPU register states */
+typedef struct _tbictxextfpu_tag_ {
+    /* Stored METAC_CORE_ID CONFIG */
+    int CfgFpu;
+    /* Stored deferred TXDEFR bits related to FPU
+     *
+     * This is encoded as follows in order to fit into 16-bits:
+     * DefrFPU:15 - 14 <= 0
+     *        :13 -  8 <= TXDEFR:21-16
+     *        : 7 -  6 <= 0
+     *        : 5 -  0 <= TXDEFR:5-0
+     */
+    short DefrFpu;
+
+    /* TXMODE bits related to FPU */
+    short ModeFpu;
+    
+    /* FPU Even/Odd register states */
+    TBIDUAL FX[4];
+   
+    /* if CfgFpu & TBICTX_CFGFPU_FX16_BIT  -> 1 then TBICTXEXTBB4 holds FX.8-15 */
+    /* if CfgFpu & TBICTX_CFGFPU_NOACF_BIT -> 0 then TBICTXEXTFPACC holds state */
+} TBICTXEXTFPU, *PTBICTXEXTFPU;
+
+/* Extended thread state save areas - FPU accumulator state */
+typedef struct _tbictxextfpacc_tag_ {
+    /* FPU accumulator register state - three 64-bit parts */
+    TBIDUAL FAcc32[3];
+    
+} TBICTXEXTFPACC, *PTBICTXEXTFPACC;
+#endif
+
+/* Prototype TBI structure */
+struct _tbi_tag_ ;
+
+/* A 64-bit return value used commonly in the TBI APIs */
+typedef union _tbires_tag_ {
+    /* Save and load this value to get/set the whole result quickly */
+    long long Val;
+
+    /* Parameter of a fnSigs or __TBICtx* call */
+    struct _tbires_sig_tag_ { 
+        /* TXMASK[I] bits zeroed upto and including current trigger level */
+        unsigned short TrigMask;
+        /* Control bits for handlers - see PTBIAPIFN documentation below */
+        unsigned short SaveMask;
+        /* Pointer to the base register context save area of the thread */
+        PTBICTX pCtx;
+    } Sig;
+
+    /* Result of TBIThrdPrivId call */
+    struct _tbires_thrdprivid_tag_ {
+        /* Basic thread identifier; just TBID_THREAD_BITS */
+        int Id;
+        /* None thread number bits; TBID_ISTAT_BIT+TBID_PSTAT_BIT */
+        int Priv;
+    } Thrd;
+
+    /* Parameter and Result of a __TBISwitch call */
+    struct _tbires_switch_tag_ { 
+        /* Parameter passed across context switch */
+        void *pPara;
+        /* Thread context of other Thread includng restore flags */
+        PTBICTX pCtx;
+    } Switch;
+    
+    /* For extended S/W events only */
+    struct _tbires_ccb_tag_ {
+        void *pCCB;
+        int COff;
+    } CCB;
+
+    struct _tbires_tlb_tag_ {
+        int Leaf;  /* TLB Leaf data */
+        int Flags; /* TLB Flags */
+    } Tlb;
+
+#ifdef TBI_FASTINT_1_4
+    struct _tbires_intr_tag_ {
+      short    TrigMask;
+      short    SaveMask;
+      PTBICTX2 pCtx;
+    } Intr;
+#endif
+
+} TBIRES, *PTBIRES;
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* Prototype for all signal handler functions, called via ___TBISyncTrigger or
+   ___TBIASyncTrigger.
+   
+   State.Sig.TrigMask will indicate the bits set within TXMASKI at
+          the time of the handler call that have all been cleared to prevent
+          nested interrupt occuring immediately.
+   
+   State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger
+          occurs at background level and TBICTX_CRIT_BIT and optionally
+          TBICTX_CBUF_BIT when a trigger occurs at interrupt level.
+          
+          TBICTX_CBUF_BIT reflects the state of TXSTATUS_CBMARKER_BIT for
+          the interrupted background thread.
+   
+   State.Sig.pCtx will point at a TBICTX structure generated to hold the
+          critical state of the interrupted thread at interrupt level and
+          should be set to NULL when called at background level.
+        
+   Triggers will indicate the status of TXSTAT or TXSTATI sampled by the
+          code that called the handler.
+          
+   InstOrSWSId is defined firstly as 'Inst' if the SigNum is TBID_SIGNUM_SWx
+          and hold the actual SWITCH instruction detected, secondly if SigNum
+          is TBID_SIGNUM_SWS the 'SWSId' is defined to hold the Id of the
+          software signal detected, in other cases the value of this
+          parameter is undefined.
+   
+   pTBI   points at the PTBI structure related to the thread and processing
+          level involved.
+
+   TBIRES return value at both processing levels is similar in terms of any
+          changes that the handler makes. By default the State argument value
+          passed in should be returned.
+          
+      Sig.TrigMask value is bits to OR back into TXMASKI when the handler
+          completes to enable currently disabled interrupts.
+          
+      Sig.SaveMask value is ignored.
+   
+      Sig.pCtx is ignored.
+
+ */
+typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum,
+                             int Triggers, int InstOrSWSId,
+                             volatile struct _tbi_tag_ *pTBI );
+#endif /* ifndef __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+/* The global memory map is described by a list of segment descriptors */
+typedef volatile struct _tbiseg_tag_ {
+    volatile struct _tbiseg_tag_ *pLink;
+    int Id;                           /* Id of the segment */
+    TBISPIN Lock;                     /* Spin-lock for struct (normally 0) */
+    unsigned int Bytes;               /* Size of region in bytes */
+    void *pGAddr;                     /* Base addr of region in global space */
+    void *pLAddr;                     /* Base addr of region in local space */
+    int Data[2];                      /* Segment specific data (may be extended) */
+
+} TBISEG, *PTBISEG;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Offsets of fields in TBISEG structure */
+#define TBISEG_pLink    ( 0)
+#define TBISEG_Id       ( 4)
+#define TBISEG_Lock     ( 8)
+#define TBISEG_Bytes    (12)
+#define TBISEG_pGAddr   (16)
+#define TBISEG_pLAddr   (20)
+#define TBISEG_Data     (24)
+
+#ifndef __ASSEMBLY__
+typedef volatile struct _tbi_tag_ {
+    int SigMask;                      /* Bits set to represent S/W events */
+    PTBIKICK pKick;                   /* Kick addr for S/W events */
+    void *pCCB;                       /* Extended S/W events */
+    PTBISEG pSeg;                     /* Related segment structure */
+    PTBIAPIFN fnSigs[TBID_SIGNUM_MAX+1];/* Signal handler API table */
+} *PTBI, TBI;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Byte offsets of fields within TBI */
+#define TBI_SigMask     (0)
+#define TBI_pKick       (4)
+#define TBI_pCCB        (8)
+#define TBI_pSeg       (12)
+#define TBI_fnSigs     (16)
+
+#ifdef TBI_1_4
+#ifndef __ASSEMBLY__
+/* This handler should be used for TBID_SIGNUM_DFR */
+extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum,
+                               int Triggers, int InstOrSWSId,
+                               volatile struct _tbi_tag_ *pTBI );
+#endif
+#endif
+
+/* String table entry - special values */
+#define METAG_TBI_STRS (0x5300) /* Tag      : If entry is valid */
+#define METAG_TBI_STRE (0x4500) /* Tag      : If entry is end of table */
+#define METAG_TBI_STRG (0x4700) /* Tag      : If entry is a gap */
+#define METAG_TBI_STRX (0x5A00) /* TransLen : If no translation present */
+
+#ifndef __ASSEMBLY__
+typedef volatile struct _tbistr_tag_ {
+    short Bytes;                      /* Length of entry in Bytes */
+    short Tag;                        /* Normally METAG_TBI_STRS(0x5300) */
+    short Len;                        /* Length of the string entry (incl null) */
+    short TransLen;                   /* Normally METAG_TBI_STRX(0x5A00) */
+    char String[8];                   /* Zero terminated (may-be bigger) */
+
+} TBISTR, *PTBISTR;
+#endif /* ifndef __ASSEMBLY__ */
+
+/* Cache size information - available as fields of Data[1] of global heap
+   segment */
+#define METAG_TBI_ICACHE_SIZE_S    0             /* see comments below */
+#define METAG_TBI_ICACHE_SIZE_BITS 0x0000000F
+#define METAG_TBI_ICACHE_FILL_S    4
+#define METAG_TBI_ICACHE_FILL_BITS 0x000000F0
+#define METAG_TBI_DCACHE_SIZE_S    8
+#define METAG_TBI_DCACHE_SIZE_BITS 0x00000F00
+#define METAG_TBI_DCACHE_FILL_S    12
+#define METAG_TBI_DCACHE_FILL_BITS 0x0000F000
+
+/* METAG_TBI_xCACHE_SIZE
+   Describes the physical cache size rounded up to the next power of 2
+   relative to a 16K (2^14) cache. These sizes are encoded as a signed addend
+   to this base power of 2, for example
+      4K -> 2^12 -> -2  (i.e. 12-14)
+      8K -> 2^13 -> -1
+     16K -> 2^14 ->  0
+     32K -> 2^15 -> +1
+     64K -> 2^16 -> +2
+    128K -> 2^17 -> +3
+
+   METAG_TBI_xCACHE_FILL
+   Describes the physical cache size within the power of 2 area given by
+   the value above. For example a 10K cache may be represented as having
+   nearest size 16K with a fill of 10 sixteenths. This is encoded as the
+   number of unused 1/16ths, for example
+     0000 ->  0 -> 16/16
+     0001 ->  1 -> 15/16
+     0010 ->  2 -> 14/16
+     ...
+     1111 -> 15 ->  1/16
+ */
+
+#define METAG_TBI_CACHE_SIZE_BASE_LOG2 14
+
+/* Each declaration made by this macro generates a TBISTR entry */
+#ifndef __ASSEMBLY__
+#define TBISTR_DECL( Name, Str )                                       \
+    __attribute__ ((__section__ (".tbistr") )) const char Name[] = #Str
+#endif
+
+/* META timer values - see below for Timer support routines */
+#define TBI_TIMERWAIT_MIN (-16)         /* Minimum 'recommended' period */
+#define TBI_TIMERWAIT_MAX (-0x7FFFFFFF) /* Maximum 'recommended' period */
+
+#ifndef __ASSEMBLY__
+/* These macros allow direct access from C to any register known to the
+   assembler or defined in machine.h. Example candidates are TXTACTCYC,
+   TXIDLECYC, and TXPRIVEXT. Note that when higher level macros and routines
+   like the timer and trigger handling features below these should be used in
+   preference to this direct low-level access mechanism. */
+#define TBI_GETREG( Reg )                                  __extension__ ({\
+   int __GRValue;                                                          \
+   __asm__ volatile ("MOV\t%0," #Reg "\t/* (*TBI_GETREG OK) */" :          \
+                     "=r" (__GRValue) );                                   \
+    __GRValue;                                                            })
+
+#define TBI_SETREG( Reg, Value )                                       do {\
+   int __SRValue = Value;                                                  \
+   __asm__ volatile ("MOV\t" #Reg ",%0\t/* (*TBI_SETREG OK) */" :          \
+                     : "r" (__SRValue) );                       } while (0)
+
+#define TBI_SWAPREG( Reg, Value )                                      do {\
+   int __XRValue = (Value);                                                \
+   __asm__ volatile ("SWAP\t" #Reg ",%0\t/* (*TBI_SWAPREG OK) */" :        \
+                     "=r" (__XRValue) : "0" (__XRValue) );                 \
+   Value = __XRValue;                                           } while (0)
+
+/* Obtain and/or release global critical section lock given that interrupts
+   are already disabled and/or should remain disabled. */
+#define TBI_NOINTSCRITON                                             do {\
+   __asm__ volatile ("LOCK1\t\t/* (*TBI_NOINTSCRITON OK) */");} while (0)
+#define TBI_NOINTSCRITOFF                                             do {\
+   __asm__ volatile ("LOCK0\t\t/* (*TBI_NOINTSCRITOFF OK) */");} while (0)
+/* Optimised in-lining versions of the above macros */
+
+#define TBI_LOCK( TrigState )                                          do {\
+   int __TRValue;                                                          \
+   int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000;                \
+   __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_LOCK ... */\n\t"               \
+                     "SWAP\t%0,TXMASKI\t/* ... */\n\t"                     \
+                     "LOCK2\t\t/* ... */\n\t"                              \
+                     "SETD\t[%1+#0x40],D1RtP /* ... OK) */" :              \
+                     "=r&" (__TRValue) : "u" (__ALOCKHI) );                \
+   TrigState = __TRValue;                                       } while (0)
+#define TBI_CRITON( TrigState )                                        do {\
+   int __TRValue;                                                          \
+   __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_CRITON ... */\n\t"             \
+                     "SWAP\t%0,TXMASKI\t/* ... */\n\t"                     \
+                     "LOCK1\t\t/* ... OK) */" :                            \
+                     "=r" (__TRValue) );                                   \
+   TrigState = __TRValue;                                       } while (0)
+
+#define TBI_INTSX( TrigState )                                         do {\
+   int __TRValue = TrigState;                                              \
+   __asm__ volatile ("SWAP\t%0,TXMASKI\t/* (*TBI_INTSX OK) */" :           \
+                     "=r" (__TRValue) : "0" (__TRValue) );                 \
+   TrigState = __TRValue;                                       } while (0)
+
+#define TBI_UNLOCK( TrigState )                                        do {\
+   int __TRValue = TrigState;                                              \
+   int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000;                \
+   __asm__ volatile ("SETD\t[%1+#0x00],D1RtP\t/* (*TBI_UNLOCK ... */\n\t"  \
+                     "LOCK0\t\t/* ... */\n\t"                              \
+                     "MOV\tTXMASKI,%0\t/* ... OK) */" :                    \
+                     : "r" (__TRValue), "u" (__ALOCKHI) );      } while (0)
+
+#define TBI_CRITOFF( TrigState )                                       do {\
+   int __TRValue = TrigState;                                              \
+   __asm__ volatile ("LOCK0\t\t/* (*TBI_CRITOFF ... */\n\t"                \
+                     "MOV\tTXMASKI,%0\t/* ... OK) */" :                    \
+                     : "r" (__TRValue) );                       } while (0)
+
+#define TBI_TRIGSX( SrcDst ) do { TBI_SWAPREG( TXMASK, SrcDst );} while (0)
+
+/* Composite macros to perform logic ops on INTS or TRIGS masks */
+#define TBI_INTSOR( Bits )                                              do {\
+    int __TT = 0; TBI_INTSX(__TT);                                          \
+    __TT |= (Bits); TBI_INTSX(__TT);                             } while (0)
+    
+#define TBI_INTSAND( Bits )                                             do {\
+    int __TT = 0; TBI_INTSX(__TT);                                          \
+    __TT &= (Bits); TBI_INTSX(__TT);                             } while (0)
+
+#ifdef TBI_1_4
+#define TBI_DEFRICTRLSOR( Bits )                                        do {\
+    int __TT = TBI_GETREG( CT.20 );                                         \
+    __TT |= (Bits); TBI_SETREG( CT.20, __TT);                    } while (0)
+    
+#define TBI_DEFRICTRLSAND( Bits )                                       do {\
+    int __TT = TBI_GETREG( TXDEFR );                                        \
+    __TT &= (Bits); TBI_SETREG( CT.20, __TT);                    } while (0)
+#endif
+
+#define TBI_TRIGSOR( Bits )                                             do {\
+    int __TT = TBI_GETREG( TXMASK );                                        \
+    __TT |= (Bits); TBI_SETREG( TXMASK, __TT);                   } while (0)
+    
+#define TBI_TRIGSAND( Bits )                                            do {\
+    int __TT = TBI_GETREG( TXMASK );                                        \
+    __TT &= (Bits); TBI_SETREG( TXMASK, __TT);                   } while (0)
+
+/* Macros to disable and re-enable interrupts using TBI_INTSX, deliberate
+   traps and exceptions can still be handled within the critical section. */
+#define TBI_STOPINTS( Value )                                           do {\
+    int __TT = TBI_GETREG( TXMASKI );                                       \
+    __TT &= TXSTATI_BGNDHALT_BIT; TBI_INTSX( __TT );                        \
+    Value = __TT;                                                } while (0)
+#define TBI_RESTINTS( Value )                                           do {\
+    int __TT = Value; TBI_INTSX( __TT );                         } while (0)
+
+/* Return pointer to segment list at current privilege level */
+PTBISEG __TBISegList( void );
+
+/* Search the segment list for a match given Id, pStart can be NULL */
+PTBISEG __TBIFindSeg( PTBISEG pStart, int Id );
+
+/* Prepare a new segment structure using space from within another */
+PTBISEG __TBINewSeg( PTBISEG pFromSeg, int Id, unsigned int Bytes );
+
+/* Prepare a new segment using any global or local heap segments available */
+PTBISEG __TBIMakeNewSeg( int Id, unsigned int Bytes );
+
+/* Insert a new segment into the segment list so __TBIFindSeg can locate it */
+void __TBIAddSeg( PTBISEG pSeg );
+#define __TBIADDSEG_DEF     /* Some versions failed to define this */
+
+/* Return Id of current thread; TBID_ISTAT_BIT+TBID_THREAD_BITS */
+int __TBIThreadId( void );
+
+/* Return TBIRES.Thrd data for current thread */
+TBIRES __TBIThrdPrivId( void );
+
+/* Return pointer to current threads TBI root block.
+   Id implies whether Int or Background root block is required */
+PTBI __TBI( int Id );
+
+/* Try to set Mask bit using the spin-lock protocol, return 0 if fails and 
+   new state if succeeds */
+int __TBIPoll( PTBISPIN pLock, int Mask );
+
+/* Set Mask bits via the spin-lock protocol in *pLock, return new state */
+int __TBISpin( PTBISPIN pLock, int Mask );
+
+/* Default handler set up for all TBI.fnSigs entries during initialisation */
+TBIRES __TBIUnExpXXX( TBIRES State, int SigNum,
+                   int Triggers, int Inst, PTBI pTBI );
+
+/* Call this routine to service triggers at background processing level. The
+   TBID_POLL_BIT of the Id parameter value will be used to indicate that the
+   routine should return if no triggers need to be serviced initially. If this
+   bit is not set the routine will block until one trigger handler is serviced
+   and then behave like the poll case servicing any remaining triggers
+   actually outstanding before returning. Normally the State parameter should
+   be simply initialised to zero and the result should be ignored, other
+   values/options are for internal use only. */
+TBIRES __TBISyncTrigger( TBIRES State, int Id );
+
+/* Call this routine to enable processing of triggers by signal handlers at
+   interrupt level. The State parameter value passed is returned by this
+   routine. The State.Sig.TrigMask field also specifies the initial
+   state of the interrupt mask register TXMASKI to be setup by the call.
+   The other parts of the State parameter are ignored unless the PRIV bit is
+   set in the SaveMask field. In this case the State.Sig.pCtx field specifies
+   the base of the stack to which the interrupt system should switch into
+   as it saves the state of the previously executing code. In the case the
+   thread will be unprivileged as it continues execution at the return
+   point of this routine and it's future state will be effectively never
+   trusted to be valid. */
+TBIRES __TBIASyncTrigger( TBIRES State );
+
+/* Call this to swap soft threads executing at the background processing level.
+   The TBIRES returned to the new thread will be the same as the NextThread
+   value specified to the call. The NextThread.Switch.pCtx value specifies
+   which thread context to restore and the NextThread.Switch.Para value can
+   hold an arbitrary expression to be passed between the threads. The saved
+   state of the previous thread will be stored in a TBICTX descriptor created
+   on it's stack and the address of this will be stored into the *rpSaveCtx
+   location specified. */
+TBIRES __TBISwitch( TBIRES NextThread, PTBICTX *rpSaveCtx );
+
+/* Call this to initialise a stack frame ready for further use, up to four
+   32-bit arguments may be specified after the fixed args to be passed via
+   the new stack pStack to the routine specified via fnMain. If the
+   main-line routine ever returns the thread will operate as if main itself
+   had returned and terminate with the return code given. */
+typedef int (*PTBIMAINFN)( TBIRES Arg /*, <= 4 additional 32-bit args */ );
+PTBICTX __TBISwitchInit( void *pStack, PTBIMAINFN fnMain, ... );
+
+/* Call this to resume a thread from a saved synchronous TBICTX state.
+   The TBIRES returned to the new thread will be the same as the NextThread
+   value specified to the call. The NextThread.Switch.pCtx value specifies
+   which thread context to restore and the NextThread.Switch.Para value can
+   hold an arbitrary expression to be passed between the threads. The context
+   of the calling thread is lost and this routine never returns to the
+   caller. The TrigsMask value supplied is ored into TXMASKI to enable
+   interrupts after the context of the new thread is established. */
+void __TBISyncResume( TBIRES NextThread, int TrigsMask );
+
+/* Call these routines to save and restore the extended states of
+   scheduled tasks. */
+void *__TBICtxSave( TBIRES State, void *pExt );
+void *__TBICtxRestore( TBIRES State, void *pExt );
+
+#ifdef TBI_1_4
+#ifdef TBI_FASTINT_1_4
+/* Call these routines to copy the GP state to a separate buffer
+ * Only necessary for context switching.
+ */
+PTBICTXGP __TBICtx2SaveCrit( PTBICTX2 pCurrentCtx, PTBICTX2 pSaveCtx );
+void *__TBICtx2SaveGP( PTBICTXGP pCurrentCtxGP, PTBICTXGP pSaveCtxGP );
+
+/* Call these routines to save and restore the extended states of
+   scheduled tasks. */
+void *__TBICtx2Save( PTBICTXGP pCtxGP, short SaveMask, void *pExt );
+void *__TBICtx2Restore( PTBICTX2 pCtx, short SaveMask, void *pExt );
+#endif
+
+/* If FPAC flag is set then significant FPU context exists. Call these routine
+   to save and restore it */
+void *__TBICtxFPUSave( TBIRES State, void *pExt );
+void *__TBICtxFPURestore( TBIRES State, void *pExt );
+
+#ifdef TBI_FASTINT_1_4
+extern void *__TBICtx2FPUSave (PTBICTXGP, short, void*);
+extern void *__TBICtx2FPURestore (PTBICTXGP, short, void*);
+#endif
+#endif
+
+#ifdef TBI_1_4
+/* Call these routines to save and restore DSPRAM. */
+void *__TBIDspramSaveA (short DspramSizes, void *pExt);
+void *__TBIDspramSaveB (short DspramSizes, void *pExt);
+void *__TBIDspramRestoreA (short DspramSizes, void *pExt);
+void *__TBIDspramRestoreB (short DspramSizes, void *pExt);
+#endif
+
+/* This routine should be used at the entrypoint of interrupt handlers to
+   re-enable higher priority interrupts and/or save state from the previously
+   executing background code. State is a TBIRES.Sig parameter with NoNestMask
+   indicating the triggers (if any) that should remain disabled and SaveMask
+   CBUF bit indicating the if the hardware catch buffer is dirty. Optionally
+   any number of extended state bits X??? including XCBF can be specified to
+   force a nested state save call to __TBICtxSave before the current routine
+   continues. (In the latter case __TBICtxRestore should be called to restore
+   any extended states before the background thread of execution is resumed) 
+   
+   By default (no X??? bits specified in SaveMask) this routine performs a
+   sub-call to __TBICtxSave with the pExt and State parameters specified IF
+   some triggers could be serviced while the current interrupt handler
+   executes and the hardware catch buffer is actually dirty. In this case
+   this routine provides the XCBF bit in State.Sig.SaveMask to force the
+   __TBICtxSave to extract the current catch state.
+   
+   The NoNestMask parameter should normally indicate that the same or lower
+   triggers than those provoking the current handler call should not be
+   serviced in nested calls, zero may be specified if all possible interrupts
+   are to be allowed.
+   
+   The TBIRES.Sig value returned will be similar to the State parameter
+   specified with the XCBF bit ORed into it's SaveMask if a context save was
+   required and fewer bits set in it's TrigMask corresponding to the same/lower
+   priority interrupt triggers still not enabled. */
+TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask );
+
+/* This routine causes the TBICTX structure specified in State.Sig.pCtx to
+   be restored. This implies that execution will not return to the caller.
+   The State.Sig.TrigMask field will be restored during the context switch
+   such that any immediately occuring interrupts occur in the context of the
+   newly specified task. The State.Sig.SaveMask parameter is ignored. */
+void __TBIASyncResume( TBIRES State );
+
+/* Call this routine to enable fastest possible processing of one or more
+   interrupt triggers via a unified signal handler. The handler concerned
+   must simple return after servicing the related hardware.
+   The State.Sig.TrigMask parameter indicates the interrupt triggers to be
+   enabled and the Thin.Thin.fnHandler specifies the routine to call and
+   the whole Thin parameter value will be passed to this routine unaltered as
+   it's first parameter. */
+void __TBIASyncThin( TBIRES State, TBIRES Thin );
+
+/* Do this before performing your own direct spin-lock access - use TBI_LOCK */
+int __TBILock( void );
+
+/* Do this after performing your own direct spin-lock access - use TBI_UNLOCK */
+void __TBIUnlock( int TrigState );
+
+/* Obtain and release global critical section lock - only stops execution
+   of interrupts on this thread and similar critical section code on other
+   local threads - use TBI_CRITON or TBI_CRITOFF */
+int __TBICritOn( void );
+void __TBICritOff( int TrigState );
+
+/* Change INTS (TXMASKI) - return old state - use TBI_INTSX */
+int __TBIIntsX( int NewMask );
+
+/* Change TRIGS (TXMASK) - return old state - use TBI_TRIGSX */
+int __TBITrigsX( int NewMask );
+
+/* This function initialises a timer for first use, only the TBID_ISTAT_BIT
+   of the Id parameter is used to indicate which timer is to be modified. The
+   Wait value should either be zero to disable the timer concerned or be in
+   the recommended TBI_TIMERWAIT_* range to specify the delay required before
+   the first timer trigger occurs.
+      
+   The TBID_ISTAT_BIT of the Id parameter similar effects all other timer
+   support functions (see below). */
+void __TBITimerCtrl( int Id, int Wait );
+
+/* This routine returns a 64-bit time stamp value that is initialised to zero
+   via a __TBITimerCtrl timer enabling call. */
+long long __TBITimeStamp( int Id );
+
+/* To manage a periodic timer each period elapsed should be subracted from
+   the current timer value to attempt to set up the next timer trigger. The
+   Wait parameter should be a value in the recommended TBI_TIMERWAIT_* range.
+   The return value is the new aggregate value that the timer was updated to,
+   if this is less than zero then a timer trigger is guaranteed to be
+   generated after the number of ticks implied, if a positive result is
+   returned either itterative or step-wise corrective action must be taken to
+   resynchronise the timer and hence provoke a future timer trigger. */
+int __TBITimerAdd( int Id, int Wait );
+
+/* String table search function, pStart is first entry to check or NULL,
+   pStr is string data to search for and MatchLen is either length of string
+   to compare for an exact match or negative length to compare for partial
+   match. */
+const TBISTR *__TBIFindStr( const TBISTR *pStart,
+                            const char *pStr, int MatchLen );
+
+/* String table translate function, pStr is text to translate and Len is
+   it's length. Value returned may not be a string pointer if the
+   translation value is really some other type, 64-bit alignment of the return
+   pointer is guaranteed so almost any type including a structure could be
+   located with this routine. */ 
+const void *__TBITransStr( const char *pStr, int Len );
+
+
+
+/* Arbitrary physical memory access windows, use different Channels to avoid
+   conflict/thrashing within a single piece of code. */
+void *__TBIPhysAccess( int Channel, int PhysAddr, int Bytes );
+void __TBIPhysRelease( int Channel, void *pLinAddr );
+
+#ifdef METAC_1_0
+/* Data cache function nullified because data cache is off */
+#define TBIDCACHE_FLUSH( pAddr )
+#define TBIDCACHE_PRELOAD( Type, pAddr ) ((Type) (pAddr))
+#define TBIDCACHE_REFRESH( Type, pAddr ) ((Type) (pAddr))
+#endif
+#ifdef METAC_1_1
+/* To flush a single cache line from the data cache using a linear address */
+#define TBIDCACHE_FLUSH( pAddr )          ((volatile char *) \
+                 (((unsigned int) (pAddr))>>LINSYSLFLUSH_S))[0] = 0
+
+extern void * __builtin_dcache_preload (void *);
+
+/* Try to ensure that the data at the address concerned is in the cache */
+#define TBIDCACHE_PRELOAD( Type, Addr )                                    \
+  ((Type) __builtin_dcache_preload ((void *)(Addr)))
+
+extern void * __builtin_dcache_refresh (void *);
+
+/* Flush any old version of data from address and re-load a new copy */
+#define TBIDCACHE_REFRESH( Type, Addr )                   __extension__ ({ \
+  Type __addr = (Type)(Addr);                                              \
+  (void)__builtin_dcache_refresh ((void *)(((unsigned int)(__addr))>>6));  \
+  __addr; })
+
+#endif
+#ifndef METAC_1_0
+#ifndef METAC_1_1
+/* Support for DCACHE builtin */
+extern void __builtin_dcache_flush (void *);
+
+/* To flush a single cache line from the data cache using a linear address */
+#define TBIDCACHE_FLUSH( Addr )                                            \
+  __builtin_dcache_flush ((void *)(Addr))
+
+extern void * __builtin_dcache_preload (void *);
+
+/* Try to ensure that the data at the address concerned is in the cache */
+#define TBIDCACHE_PRELOAD( Type, Addr )                                    \
+  ((Type) __builtin_dcache_preload ((void *)(Addr)))
+
+extern void * __builtin_dcache_refresh (void *);
+
+/* Flush any old version of data from address and re-load a new copy */
+#define TBIDCACHE_REFRESH( Type, Addr )                                    \
+  ((Type) __builtin_dcache_refresh ((void *)(Addr)))
+
+#endif
+#endif
+
+/* Flush the MMCU cache */
+#define TBIMCACHE_FLUSH() { ((volatile int *) LINSYSCFLUSH_MMCU)[0] = 0; }
+
+#ifdef METAC_2_1
+/* Obtain the MMU table entry for the specified address */
+#define TBIMTABLE_LEAFDATA(ADDR) TBIXCACHE_RD((int)(ADDR) & (-1<<6))
+
+#ifndef __ASSEMBLY__
+/* Obtain the full MMU table entry for the specified address */
+#define TBIMTABLE_DATA(ADDR) __extension__ ({ TBIRES __p;                     \
+                                              __p.Val = TBIXCACHE_RL((int)(ADDR) & (-1<<6));   \
+                                              __p; })
+#endif
+#endif
+
+/* Combine a physical base address, and a linear address
+ * Internal use only
+ */
+#define _TBIMTABLE_LIN2PHYS(PHYS, LIN, LMASK) (void*)(((int)(PHYS)&0xFFFFF000)\
+                                               +((int)(LIN)&(LMASK)))
+
+/* Convert a linear to a physical address */
+#define TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR)                                    \
+          (((LEAFDATA) & CRLINPHY0_VAL_BIT)                                   \
+              ? _TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR, 0x00000FFF)               \
+              : 0)
+
+/* Debug support - using external debugger or host */
+void __TBIDumpSegListEntries( void );
+void __TBILogF( const char *pFmt, ... );
+void __TBIAssert( const char *pFile, int LineNum, const char *pExp );
+void __TBICont( const char *pMsg, ... ); /* TBIAssert -> 'wait for continue' */
+
+/* Array of signal name data for debug messages */
+extern const char __TBISigNames[];
+#endif /* ifndef __ASSEMBLY__ */
+
+
+
+/* Scale of sub-strings in the __TBISigNames string list */
+#define TBI_SIGNAME_SCALE   4
+#define TBI_SIGNAME_SCALE_S 2
+
+#define TBI_1_3 
+
+#ifdef TBI_1_3
+
+#ifndef __ASSEMBLY__
+#define TBIXCACHE_RD(ADDR)                                 __extension__ ({\
+    void * __Addr = (void *)(ADDR);                                        \
+    int __Data;                                                            \
+    __asm__ volatile ( "CACHERD\t%0,[%1+#0]" :                             \
+                       "=r" (__Data) : "r" (__Addr) );                     \
+    __Data;                                                               })
+
+#define TBIXCACHE_RL(ADDR)                                 __extension__ ({\
+    void * __Addr = (void *)(ADDR);                                        \
+    long long __Data;                                                      \
+    __asm__ volatile ( "CACHERL\t%0,%t0,[%1+#0]" :                         \
+                       "=d" (__Data) : "r" (__Addr) );                     \
+    __Data;                                                               })
+
+#define TBIXCACHE_WD(ADDR, DATA)                                      do {\
+    void * __Addr = (void *)(ADDR);                                       \
+    int __Data = DATA;                                                    \
+    __asm__ volatile ( "CACHEWD\t[%0+#0],%1" :                            \
+                       : "r" (__Addr), "r" (__Data) );          } while(0)
+
+#define TBIXCACHE_WL(ADDR, DATA)                                      do {\
+    void * __Addr = (void *)(ADDR);                                       \
+    long long __Data = DATA;                                              \
+    __asm__ volatile ( "CACHEWL\t[%0+#0],%1,%t1" :                        \
+                       : "r" (__Addr), "r" (__Data) );          } while(0)
+
+#ifdef TBI_4_0
+
+#define TBICACHE_FLUSH_L1D_L2(ADDR)                                       \
+  TBIXCACHE_WD(ADDR, CACHEW_FLUSH_L1D_L2)
+#define TBICACHE_WRITEBACK_L1D_L2(ADDR)                                   \
+  TBIXCACHE_WD(ADDR, CACHEW_WRITEBACK_L1D_L2)
+#define TBICACHE_INVALIDATE_L1D(ADDR)                                     \
+  TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D)
+#define TBICACHE_INVALIDATE_L1D_L2(ADDR)                                  \
+  TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D_L2)
+#define TBICACHE_INVALIDATE_L1DTLB(ADDR)                                  \
+  TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1DTLB)
+#define TBICACHE_INVALIDATE_L1I(ADDR)                                     \
+  TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1I)
+#define TBICACHE_INVALIDATE_L1ITLB(ADDR)                                  \
+  TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1ITLB)
+
+#endif /* TBI_4_0 */
+#endif /* ifndef __ASSEMBLY__ */
+
+/* 
+ * Calculate linear PC value from real PC and Minim mode control, the LSB of
+ * the result returned indicates if address compression has occured.
+ */
+#ifndef __ASSEMBLY__
+#define METAG_LINPC( PCVal )                                              (\
+    ( (TBI_GETREG(TXPRIVEXT) & TXPRIVEXT_MINIMON_BIT) != 0 ) ?           ( \
+        ( ((PCVal) & 0x00900000) == 0x00900000 ) ?                         \
+          (((PCVal) & 0xFFE00000) + (((PCVal) & 0x001FFFFC)>>1) + 1) :     \
+        ( ((PCVal) & 0x00800000) == 0x00000000 ) ?                         \
+          (((PCVal) & 0xFF800000) + (((PCVal) & 0x007FFFFC)>>1) + 1) :     \
+                                                             (PCVal)   )   \
+                                                                 : (PCVal) )
+#define METAG_LINPC_X2BIT 0x00000001       /* Make (Size>>1) if compressed */
+
+/* Convert an arbitrary Linear address into a valid Minim PC or return 0 */
+#define METAG_PCMINIM( LinVal )                                           (\
+        (((LinVal) & 0x00980000) == 0x00880000) ?                          \
+            (((LinVal) & 0xFFE00000) + (((LinVal) & 0x000FFFFE)<<1)) :     \
+        (((LinVal) & 0x00C00000) == 0x00000000) ?                          \
+            (((LinVal) & 0xFF800000) + (((LinVal) & 0x003FFFFE)<<1)) : 0   )
+
+/* Reverse a METAG_LINPC conversion step to return the original PCVal */
+#define METAG_PCLIN( LinVal )                              ( 0xFFFFFFFC & (\
+        ( (LinVal & METAG_LINPC_X2BIT) != 0 ) ? METAG_PCMINIM( LinVal ) :  \
+                                                               (LinVal)   ))
+
+/*
+ * Flush the MMCU Table cache privately for each thread. On cores that do not
+ * support per-thread flushing it will flush all threads mapping data.
+ */
+#define TBIMCACHE_TFLUSH(Thread)                                   do {\
+    ((volatile int *)( LINSYSCFLUSH_TxMMCU_BASE            +           \
+                      (LINSYSCFLUSH_TxMMCU_STRIDE*(Thread)) ))[0] = 0; \
+                                                             } while(0)
+
+/*
+ * To flush a single linear-matched cache line from the code cache. In
+ * cases where Minim is possible the METAC_LINPC operation must be used
+ * to pre-process the address being flushed.
+ */
+#define TBIICACHE_FLUSH( pAddr ) TBIXCACHE_WD (pAddr, CACHEW_ICACHE_BIT)
+
+/* To flush a single linear-matched mapping from code/data MMU table cache */
+#define TBIMCACHE_AFLUSH( pAddr, SegType )                                \
+    TBIXCACHE_WD(pAddr, CACHEW_TLBFLUSH_BIT + (                           \
+                 ((SegType) == TBID_SEGTYPE_TEXT) ? CACHEW_ICACHE_BIT : 0 ))
+
+/*
+ * To flush translation data corresponding to a range of addresses without
+ * using TBITCACHE_FLUSH to flush all of this threads translation data. It
+ * is necessary to know what stride (>= 4K) must be used to flush a specific
+ * region.
+ *
+ * For example direct mapped regions use the maximum page size (512K) which may
+ * mean that only one flush is needed to cover the sub-set of the direct
+ * mapped area used since it was setup.
+ *
+ * The function returns the stride on which flushes should be performed.
+ *
+ * If 0 is returned then the region is not subject to MMU caching, if -1 is
+ * returned then this indicates that only TBIMCACHE_TFLUSH can be used to
+ * flush the region concerned rather than TBIMCACHE_AFLUSH which this
+ * function is designed to support.
+ */
+int __TBIMMUCacheStride( const void *pStart, int Bytes );
+
+/*
+ * This function will use the above lower level functions to achieve a MMU
+ * table data flush in an optimal a fashion as possible. On a system that
+ * supports linear address based caching this function will also call the
+ * code or data cache flush functions to maintain address/data coherency.
+ *
+ * SegType should be TBID_SEGTYPE_TEXT if the address range is for code or
+ * any other value such as TBID_SEGTYPE_DATA for data. If an area is
+ * used in both ways then call this function twice; once for each.
+ */
+void __TBIMMUCacheFlush( const void *pStart, int Bytes, int SegType );
+
+/*
+ * Cached Core mode setup and flush functions allow one code and one data
+ * region of the corresponding global or local cache partion size to be
+ * locked into the corresponding cache memory. This prevents normal LRU
+ * logic discarding the code or data and avoids write-thru bandwidth in
+ * data areas. Code mappings are selected by specifying TBID_SEGTYPE_TEXT
+ * for SegType, otherwise data mappings are created.
+ * 
+ * Mode supplied should always contain the VALID bit and WINx selection data.
+ * Data areas will be mapped read-only if the WRITE bit is not added.
+ *
+ * The address returned by the Opt function will either be the same as that
+ * passed in (if optimisation cannot be supported) or the base of the new core
+ * cached region in linear address space. The returned address must be passed
+ * into the End function to remove the mapping when required. If a non-core
+ * cached memory address is passed into it the End function has no effect.
+ * Note that the region accessed MUST be flushed from the appropriate cache
+ * before the End function is called to deliver correct operation.
+ */
+void *__TBICoreCacheOpt( const void *pStart, int Bytes, int SegType, int Mode );
+void __TBICoreCacheEnd( const void *pOpt, int Bytes, int SegType );
+
+/*
+ * Optimise physical access channel and flush side effects before releasing
+ * the channel. If pStart is NULL the whole region must be flushed and this is
+ * done automatically by the channel release function if optimisation is
+ * enabled. Flushing the specific region that may have been accessed before
+ * release should optimises this process. On physically cached systems we do
+ * not flush the code/data caches only the MMU table data needs flushing.
+ */
+void __TBIPhysOptim( int Channel, int IMode, int DMode );
+void __TBIPhysFlush( int Channel, const void *pStart, int Bytes );
+#endif
+#endif /* ifdef TBI_1_3 */
+
+#endif /* _ASM_METAG_TBX_H_ */
diff --git a/arch/metag/include/asm/tcm.h b/arch/metag/include/asm/tcm.h
new file mode 100644 (file)
index 0000000..7711c31
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef __ASM_TCM_H__
+#define __ASM_TCM_H__
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+
+struct tcm_allocation {
+       struct list_head list;
+       unsigned int tag;
+       unsigned long addr;
+       unsigned long size;
+};
+
+/*
+ * TCM memory region descriptor.
+ */
+struct tcm_region {
+       unsigned int tag;
+       struct resource res;
+};
+
+#define TCM_INVALID_TAG        0xffffffff
+
+unsigned long tcm_alloc(unsigned int tag, size_t len);
+void tcm_free(unsigned int tag, unsigned long addr, size_t len);
+unsigned int tcm_lookup_tag(unsigned long p);
+
+int tcm_add_region(struct tcm_region *reg);
+
+#endif
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
new file mode 100644 (file)
index 0000000..0ecd34d
--- /dev/null
@@ -0,0 +1,155 @@
+/* thread_info.h: Meta low-level thread information
+ *
+ * Copyright (C) 2002  David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ *
+ * Meta port by Imagination Technologies
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#include <linux/compiler.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must
+ *   also be changed
+ */
+#ifndef __ASSEMBLY__
+
+/* This must be 8 byte aligned so we can ensure stack alignment. */
+struct thread_info {
+       struct task_struct *task;       /* main task structure */
+       struct exec_domain *exec_domain;        /* execution domain */
+       unsigned long flags;    /* low level flags */
+       unsigned long status;   /* thread-synchronous flags */
+       u32 cpu;                /* current CPU */
+       int preempt_count;      /* 0 => preemptable, <0 => BUG */
+
+       mm_segment_t addr_limit;        /* thread address space */
+       struct restart_block restart_block;
+
+       u8 supervisor_stack[0];
+};
+
+#else /* !__ASSEMBLY__ */
+
+#include <generated/asm-offsets.h>
+
+#endif
+
+#define PREEMPT_ACTIVE         0x10000000
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_SHIFT           12
+#else
+#define THREAD_SHIFT           13
+#endif
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+#define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
+#else
+#define THREAD_SIZE_ORDER      0
+#endif
+
+#define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#define STACK_WARN             (THREAD_SIZE/8)
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk)                  \
+{                                              \
+       .task           = &tsk,                 \
+       .exec_domain    = &default_exec_domain, \
+       .flags          = 0,                    \
+       .cpu            = 0,                    \
+       .preempt_count  = INIT_PREEMPT_COUNT,   \
+       .addr_limit     = KERNEL_DS,            \
+       .restart_block = {                      \
+               .fn = do_no_restart_syscall,    \
+       },                                      \
+}
+
+#define init_thread_info       (init_thread_union.thread_info)
+#define init_stack             (init_thread_union.stack)
+
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("A0StP") __used;
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+       return (struct thread_info *)(current_stack_pointer &
+                                     ~(THREAD_SIZE - 1));
+}
+
+#define __HAVE_ARCH_KSTACK_END
+static inline int kstack_end(void *addr)
+{
+       return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1))
+                                + sizeof(struct thread_info));
+}
+
+#endif
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE      0       /* syscall trace active */
+#define TIF_SIGPENDING         1       /* signal pending */
+#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
+#define TIF_SINGLESTEP         3       /* restore singlestep on return to user
+                                          mode */
+#define TIF_SYSCALL_AUDIT      4       /* syscall auditing active */
+#define TIF_SECCOMP            5       /* secure computing */
+#define TIF_RESTORE_SIGMASK    6       /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME      7       /* callback before returning to user */
+#define TIF_POLLING_NRFLAG      8      /* true if poll_idle() is polling
+                                          TIF_NEED_RESCHED */
+#define TIF_MEMDIE             9       /* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT  10     /* syscall tracepoint instrumentation */
+
+
+#define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP                (1<<TIF_SINGLESTEP)
+#define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP           (1<<TIF_SECCOMP)
+#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
+#define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
+
+/* work to do in syscall trace */
+#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
+                                _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
+                                _TIF_SYSCALL_TRACEPOINT)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK      (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING      | \
+                                _TIF_NEED_RESCHED  | _TIF_SYSCALL_AUDIT   | \
+                                _TIF_SINGLESTEP    | _TIF_RESTORE_SIGMASK | \
+                                _TIF_NOTIFY_RESUME)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK         (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
+                                _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
+
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/include/asm/tlb.h b/arch/metag/include/asm/tlb.h
new file mode 100644 (file)
index 0000000..048282f
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef __ASM_METAG_TLB_H
+#define __ASM_METAG_TLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+/* Note, read http://lkml.org/lkml/2004/1/15/6 */
+
+#ifdef CONFIG_METAG_META12
+
+#define tlb_start_vma(tlb, vma)                                                      \
+       do {                                                                  \
+               if (!tlb->fullmm)                                             \
+                       flush_cache_range(vma, vma->vm_start, vma->vm_end);   \
+       } while (0)
+
+#define tlb_end_vma(tlb, vma)                                                \
+       do {                                                                  \
+               if (!tlb->fullmm)                                             \
+                       flush_tlb_range(vma, vma->vm_start, vma->vm_end);     \
+       } while (0)
+
+
+#else
+
+#define tlb_start_vma(tlb, vma)                        do { } while (0)
+#define tlb_end_vma(tlb, vma)                  do { } while (0)
+
+#endif
+
+#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
+#define tlb_flush(tlb)                         flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/arch/metag/include/asm/tlbflush.h b/arch/metag/include/asm/tlbflush.h
new file mode 100644 (file)
index 0000000..566acf9
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __ASM_METAG_TLBFLUSH_H
+#define __ASM_METAG_TLBFLUSH_H
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <asm/metag_mem.h>
+#include <asm/pgalloc.h>
+
+/*
+ * TLB flushing:
+ *
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(mm, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * FIXME: Meta 2 can flush single TLB entries.
+ *
+ */
+
+#if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP)
+static inline void __flush_tlb(void)
+{
+       /* flush TLB entries for just the current hardware thread */
+       int thread = hard_processor_id();
+       metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE +
+                       LINSYSCFLUSH_TxMMCU_STRIDE * thread));
+}
+#else
+static inline void __flush_tlb(void)
+{
+       /* flush TLB entries for all hardware threads */
+       metag_out32(0, LINSYSCFLUSH_MMCU);
+}
+#endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */
+
+#define flush_tlb() __flush_tlb()
+
+#define flush_tlb_all() __flush_tlb()
+
+#define local_flush_tlb_all() __flush_tlb()
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       if (mm == current->active_mm)
+               __flush_tlb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                                 unsigned long addr)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+                                         unsigned long end)
+{
+       flush_tlb_all();
+}
+
+#endif /* __ASM_METAG_TLBFLUSH_H */
+
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
new file mode 100644 (file)
index 0000000..23f5118
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef _ASM_METAG_TOPOLOGY_H
+#define _ASM_METAG_TOPOLOGY_H
+
+#ifdef CONFIG_NUMA
+
+/* sched_domains SD_NODE_INIT for Meta machines */
+#define SD_NODE_INIT (struct sched_domain) {           \
+       .parent                 = NULL,                 \
+       .child                  = NULL,                 \
+       .groups                 = NULL,                 \
+       .min_interval           = 8,                    \
+       .max_interval           = 32,                   \
+       .busy_factor            = 32,                   \
+       .imbalance_pct          = 125,                  \
+       .cache_nice_tries       = 2,                    \
+       .busy_idx               = 3,                    \
+       .idle_idx               = 2,                    \
+       .newidle_idx            = 0,                    \
+       .wake_idx               = 0,                    \
+       .forkexec_idx           = 0,                    \
+       .flags                  = SD_LOAD_BALANCE       \
+                               | SD_BALANCE_FORK       \
+                               | SD_BALANCE_EXEC       \
+                               | SD_BALANCE_NEWIDLE    \
+                               | SD_SERIALIZE,         \
+       .last_balance           = jiffies,              \
+       .balance_interval       = 1,                    \
+       .nr_balance_failed      = 0,                    \
+}
+
+#define cpu_to_node(cpu)       ((void)(cpu), 0)
+#define parent_node(node)      ((void)(node), 0)
+
+#define cpumask_of_node(node)  ((void)node, cpu_online_mask)
+
+#define pcibus_to_node(bus)    ((void)(bus), -1)
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+                                       cpu_all_mask : \
+                                       cpumask_of_node(pcibus_to_node(bus)))
+
+#endif
+
+#define mc_capable()    (1)
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
+extern cpumask_t cpu_core_map[NR_CPUS];
+
+#define topology_core_cpumask(cpu)     (&cpu_core_map[cpu])
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_METAG_TOPOLOGY_H */
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h
new file mode 100644 (file)
index 0000000..ac80874
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (C) 2005,2008 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _METAG_TBIVECTORS_H
+#define _METAG_TBIVECTORS_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/tbx.h>
+
+typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *);
+
+extern TBIRES kick_handler(TBIRES, int, int, int, PTBI);
+struct kick_irq_handler {
+       struct list_head list;
+       kick_irq_func_t func;
+};
+
+extern void kick_register_func(struct kick_irq_handler *);
+extern void kick_unregister_func(struct kick_irq_handler *);
+
+extern void head_end(TBIRES, unsigned long);
+extern void restart_critical_section(TBIRES State);
+extern TBIRES tail_end_sys(TBIRES, int, int *);
+static inline TBIRES tail_end(TBIRES state)
+{
+       return tail_end_sys(state, -1, NULL);
+}
+
+DECLARE_PER_CPU(PTBI, pTBI);
+extern PTBI pTBI_get(unsigned int);
+
+extern int ret_from_fork(TBIRES arg);
+
+extern int do_page_fault(struct pt_regs *regs, unsigned long address,
+                        unsigned int write_access, unsigned int trapno);
+
+extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst,
+                           PTBI pTBI);
+
+#endif
+
+#endif /* _METAG_TBIVECTORS_H */
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
new file mode 100644 (file)
index 0000000..0748b0a
--- /dev/null
@@ -0,0 +1,241 @@
+#ifndef __METAG_UACCESS_H
+#define __METAG_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+
+#define VERIFY_READ    0
+#define VERIFY_WRITE   1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
+
+#define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
+#define USER_DS                MAKE_MM_SEG(PAGE_OFFSET)
+
+#define get_ds()       (KERNEL_DS)
+#define get_fs()        (current_thread_info()->addr_limit)
+#define set_fs(x)       (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)       ((a).seg == (b).seg)
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+/*
+ * Explicitly allow NULL pointers here. Parts of the kernel such
+ * as readv/writev use access_ok to validate pointers, but want
+ * to allow NULL pointers for various reasons. NULL pointers are
+ * safe to allow through because the first page is not mappable on
+ * Meta.
+ *
+ * We also wish to avoid letting user code access the system area
+ * and the kernel half of the address space.
+ */
+#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
+                               ((addr) > PAGE_OFFSET &&                \
+                                (addr) < LINCORE_BASE))
+
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+       return __kernel_ok || !__user_bad(addr, size);
+}
+
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
+                                               (unsigned long)(size))
+
+static inline int verify_area(int type, const void *addr, unsigned long size)
+{
+       return access_ok(type, addr, size) ? 0 : -EFAULT;
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+       unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *regs);
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+#define put_user(x, ptr) \
+       __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern void __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size)               \
+({                                                      \
+       long __pu_err;                                  \
+       __put_user_size((x), (ptr), (size), __pu_err);  \
+       __pu_err;                                       \
+})
+
+#define __put_user_check(x, ptr, size)                         \
+({                                                              \
+       long __pu_err = -EFAULT;                                \
+       __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
+       if (access_ok(VERIFY_WRITE, __pu_addr, size))           \
+               __put_user_size((x), __pu_addr, (size), __pu_err);      \
+       __pu_err;                                               \
+})
+
+extern long __put_user_asm_b(unsigned int x, void __user *addr);
+extern long __put_user_asm_w(unsigned int x, void __user *addr);
+extern long __put_user_asm_d(unsigned int x, void __user *addr);
+extern long __put_user_asm_l(unsigned long long x, void __user *addr);
+
+#define __put_user_size(x, ptr, size, retval)                  \
+do {                                                            \
+       retval = 0;                                             \
+       switch (size) {                                         \
+       case 1:                                                         \
+               retval = __put_user_asm_b((unsigned int)x, ptr); break; \
+       case 2:                                                         \
+               retval = __put_user_asm_w((unsigned int)x, ptr); break; \
+       case 4:                                                         \
+               retval = __put_user_asm_d((unsigned int)x, ptr); break; \
+       case 8:                                                         \
+               retval = __put_user_asm_l((unsigned long long)x, ptr); break; \
+       default:                                                        \
+               __put_user_bad();                                       \
+       }                                                               \
+} while (0)
+
+#define get_user(x, ptr) \
+       __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __get_user_bad(void);
+
+#define __get_user_nocheck(x, ptr, size)                       \
+({                                                              \
+       long __gu_err, __gu_val;                                \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
+       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       __gu_err;                                               \
+})
+
+#define __get_user_check(x, ptr, size)                                 \
+({                                                                      \
+       long __gu_err = -EFAULT, __gu_val = 0;                          \
+       const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
+       if (access_ok(VERIFY_READ, __gu_addr, size))                    \
+               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       __gu_err;                                                       \
+})
+
+extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
+extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
+extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
+
+#define __get_user_size(x, ptr, size, retval)                  \
+do {                                                            \
+       retval = 0;                                             \
+       switch (size) {                                         \
+       case 1:                                                 \
+               x = __get_user_asm_b(ptr, &retval); break;      \
+       case 2:                                                 \
+               x = __get_user_asm_w(ptr, &retval); break;      \
+       case 4:                                                 \
+               x = __get_user_asm_d(ptr, &retval); break;      \
+       default:                                                \
+               (x) = __get_user_bad();                         \
+       }                                                       \
+} while (0)
+
+/*
+ * Copy a null terminated string from userspace.
+ *
+ * Must return:
+ * -EFAULT             for an exception
+ * count               if we hit the buffer limit
+ * bytes copied                if we hit a null byte
+ * (without the null byte)
+ */
+
+extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
+                                            long count);
+
+#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+extern long __must_check strnlen_user(const char __user *src, long count);
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+extern unsigned long __must_check __copy_user_zeroing(void *to,
+                                                     const void __user *from,
+                                                     unsigned long n);
+
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       if (access_ok(VERIFY_READ, from, n))
+               return __copy_user_zeroing(to, from, n);
+       return n;
+}
+
+#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user_inatomic __copy_from_user
+
+extern unsigned long __must_check __copy_user(void __user *to,
+                                             const void *from,
+                                             unsigned long n);
+
+static inline unsigned long copy_to_user(void __user *to, const void *from,
+                                        unsigned long n)
+{
+       if (access_ok(VERIFY_WRITE, to, n))
+               return __copy_user(to, from, n);
+       return n;
+}
+
+#define __copy_to_user(to, from, n) __copy_user(to, from, n)
+#define __copy_to_user_inatomic __copy_to_user
+
+/*
+ * Zero Userspace
+ */
+
+extern unsigned long __must_check __do_clear_user(void __user *to,
+                                                 unsigned long n);
+
+static inline unsigned long clear_user(void __user *to, unsigned long n)
+{
+       if (access_ok(VERIFY_WRITE, to, n))
+               return __do_clear_user(to, n);
+       return n;
+}
+
+#define __clear_user(to, n)            __do_clear_user(to, n)
+
+#endif /* _METAG_UACCESS_H */
diff --git a/arch/metag/include/asm/unistd.h b/arch/metag/include/asm/unistd.h
new file mode 100644 (file)
index 0000000..32955a1
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/metag/include/asm/user_gateway.h b/arch/metag/include/asm/user_gateway.h
new file mode 100644 (file)
index 0000000..e404c09
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies
+ */
+
+#ifndef __ASM_METAG_USER_GATEWAY_H
+#define __ASM_METAG_USER_GATEWAY_H
+
+#include <asm/page.h>
+
+/* Page of kernel code accessible to userspace. */
+#define USER_GATEWAY_PAGE      0x6ffff000
+/* Offset of TLS pointer array in gateway page. */
+#define USER_GATEWAY_TLS       0x100
+
+#ifndef __ASSEMBLY__
+
+extern char __user_gateway_start;
+extern char __user_gateway_end;
+
+/* Kernel mapping of the gateway page. */
+extern void *gateway_page;
+
+static inline void set_gateway_tls(void __user *tls_ptr)
+{
+       void **gateway_tls = (void **)(gateway_page + USER_GATEWAY_TLS +
+                                      hard_processor_id() * 4);
+
+       *gateway_tls = (__force void *)tls_ptr;
+#ifdef CONFIG_METAG_META12
+       /* Avoid cache aliases on virtually tagged cache. */
+       __builtin_dcache_flush((void *)USER_GATEWAY_PAGE + USER_GATEWAY_TLS +
+                                      hard_processor_id() * sizeof(void *));
+#endif
+}
+
+extern int __kuser_get_tls(void);
+extern char *__kuser_get_tls_end[];
+
+extern int __kuser_cmpxchg(int, int, unsigned long *);
+extern char *__kuser_cmpxchg_end[];
+
+#endif
+
+#endif
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
new file mode 100644 (file)
index 0000000..876c71f
--- /dev/null
@@ -0,0 +1,13 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+header-y += byteorder.h
+header-y += ptrace.h
+header-y += resource.h
+header-y += sigcontext.h
+header-y += siginfo.h
+header-y += swab.h
+header-y += unistd.h
+
+generic-y += mman.h
+generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/byteorder.h b/arch/metag/include/uapi/asm/byteorder.h
new file mode 100644 (file)
index 0000000..9558416
--- /dev/null
@@ -0,0 +1 @@
+#include <linux/byteorder/little_endian.h>
diff --git a/arch/metag/include/uapi/asm/ptrace.h b/arch/metag/include/uapi/asm/ptrace.h
new file mode 100644 (file)
index 0000000..45d9780
--- /dev/null
@@ -0,0 +1,113 @@
+#ifndef _UAPI_METAG_PTRACE_H
+#define _UAPI_METAG_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * These are the layouts of the regsets returned by the GETREGSET ptrace call
+ */
+
+/* user_gp_regs::status */
+
+/* CBMarker bit (indicates catch state / catch replay) */
+#define USER_GP_REGS_STATUS_CATCH_BIT          (1 << 22)
+#define USER_GP_REGS_STATUS_CATCH_S            22
+/* LSM_STEP field (load/store multiple step) */
+#define USER_GP_REGS_STATUS_LSM_STEP_BITS      (0x7 << 8)
+#define USER_GP_REGS_STATUS_LSM_STEP_S         8
+/* SCC bit (indicates split 16x16 condition flags) */
+#define USER_GP_REGS_STATUS_SCC_BIT            (1 << 4)
+#define USER_GP_REGS_STATUS_SCC_S              4
+
+/* normal condition flags */
+/* CF_Z bit (Zero flag) */
+#define USER_GP_REGS_STATUS_CF_Z_BIT           (1 << 3)
+#define USER_GP_REGS_STATUS_CF_Z_S             3
+/* CF_N bit (Negative flag) */
+#define USER_GP_REGS_STATUS_CF_N_BIT           (1 << 2)
+#define USER_GP_REGS_STATUS_CF_N_S             2
+/* CF_V bit (oVerflow flag) */
+#define USER_GP_REGS_STATUS_CF_V_BIT           (1 << 1)
+#define USER_GP_REGS_STATUS_CF_V_S             1
+/* CF_C bit (Carry flag) */
+#define USER_GP_REGS_STATUS_CF_C_BIT           (1 << 0)
+#define USER_GP_REGS_STATUS_CF_C_S             0
+
+/* split 16x16 condition flags */
+/* SCF_LZ bit (Low Zero flag) */
+#define USER_GP_REGS_STATUS_SCF_LZ_BIT         (1 << 3)
+#define USER_GP_REGS_STATUS_SCF_LZ_S           3
+/* SCF_HZ bit (High Zero flag) */
+#define USER_GP_REGS_STATUS_SCF_HZ_BIT         (1 << 2)
+#define USER_GP_REGS_STATUS_SCF_HZ_S           2
+/* SCF_HC bit (High Carry flag) */
+#define USER_GP_REGS_STATUS_SCF_HC_BIT         (1 << 1)
+#define USER_GP_REGS_STATUS_SCF_HC_S           1
+/* SCF_LC bit (Low Carry flag) */
+#define USER_GP_REGS_STATUS_SCF_LC_BIT         (1 << 0)
+#define USER_GP_REGS_STATUS_SCF_LC_S           0
+
+/**
+ * struct user_gp_regs - User general purpose registers
+ * @dx:                GP data unit regs (dx[reg][unit] = D{unit:0-1}.{reg:0-7})
+ * @ax:                GP address unit regs (ax[reg][unit] = A{unit:0-1}.{reg:0-3})
+ * @pc:                PC register
+ * @status:    TXSTATUS register (condition flags, LSM_STEP etc)
+ * @rpt:       TXRPT registers (branch repeat counter)
+ * @bpobits:   TXBPOBITS register ("branch prediction other" bits)
+ * @mode:      TXMODE register
+ * @_pad1:     Reserved padding to make sizeof obviously 64bit aligned
+ *
+ * This is the user-visible general purpose register state structure.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS.
+ *
+ * It is also used in the signal context.
+ */
+struct user_gp_regs {
+       unsigned long dx[8][2];
+       unsigned long ax[4][2];
+       unsigned long pc;
+       unsigned long status;
+       unsigned long rpt;
+       unsigned long bpobits;
+       unsigned long mode;
+       unsigned long _pad1;
+};
+
+/**
+ * struct user_cb_regs - User catch buffer registers
+ * @flags:     TXCATCH0 register (fault flags)
+ * @addr:      TXCATCH1 register (fault address)
+ * @data:      TXCATCH2 and TXCATCH3 registers (low and high data word)
+ *
+ * This is the user-visible catch buffer register state structure containing
+ * information about a failed memory access, and allowing the access to be
+ * modified and replayed.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_METAG_CBUF.
+ */
+struct user_cb_regs {
+       unsigned long flags;
+       unsigned long addr;
+       unsigned long long data;
+};
+
+/**
+ * struct user_rp_state - User read pipeline state
+ * @entries:   Read pipeline entries
+ * @mask:      Mask of valid pipeline entries (RPMask from TXDIVTIME register)
+ *
+ * This is the user-visible read pipeline state structure containing the entries
+ * currently in the read pipeline and the mask of valid entries.
+ *
+ * It can be accessed through PTRACE_GETREGSET with NT_METAG_RPIPE.
+ */
+struct user_rp_state {
+       unsigned long long entries[6];
+       unsigned long mask;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_METAG_PTRACE_H */
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
new file mode 100644 (file)
index 0000000..526d23c
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _UAPI_METAG_RESOURCE_H
+#define _UAPI_METAG_RESOURCE_H
+
+#define _STK_LIM_MAX    (1 << 28)
+#include <asm-generic/resource.h>
+
+#endif /* _UAPI_METAG_RESOURCE_H */
diff --git a/arch/metag/include/uapi/asm/sigcontext.h b/arch/metag/include/uapi/asm/sigcontext.h
new file mode 100644 (file)
index 0000000..ef79a91
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef _ASM_METAG_SIGCONTEXT_H
+#define _ASM_METAG_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * In a sigcontext structure we need to store the active state of the
+ * user process so that it does not get trashed when we call the signal
+ * handler. That not really the same as a user context that we are
+ * going to store on syscall etc.
+ */
+struct sigcontext {
+       struct user_gp_regs regs;       /* needs to be first */
+
+       /*
+        * Catch registers describing a memory fault.
+        * If USER_GP_REGS_STATUS_CATCH_BIT is set in regs.status then catch
+        * buffers have been saved and will be replayed on sigreturn.
+        * Clear that bit to discard the catch state instead of replaying it.
+        */
+       struct user_cb_regs cb;
+
+       /*
+        * Read pipeline state. This will get restored on sigreturn.
+        */
+       struct user_rp_state rp;
+
+       unsigned long oldmask;
+};
+
+#endif
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h
new file mode 100644 (file)
index 0000000..b2e0c8b
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _METAG_SIGINFO_H
+#define _METAG_SIGINFO_H
+
+#define __ARCH_SI_TRAPNO
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/arch/metag/include/uapi/asm/swab.h b/arch/metag/include/uapi/asm/swab.h
new file mode 100644 (file)
index 0000000..1076b3a
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __ASM_METAG_SWAB_H
+#define __ASM_METAG_SWAB_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm-generic/swab.h>
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+       return __builtin_metag_bswaps(x);
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+       return __builtin_metag_bswap(x);
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+       return __builtin_metag_bswapll(x);
+}
+#define __arch_swab64 __arch_swab64
+
+#endif /* __ASM_METAG_SWAB_H */
diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..b80b8e8
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+/* metag-specific syscalls. */
+#define __NR_metag_setglobalbit                (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_metag_setglobalbit, sys_metag_setglobalbit)
+#define __NR_metag_set_fpu_flags       (__NR_arch_specific_syscall + 2)
+__SYSCALL(__NR_metag_set_fpu_flags, sys_metag_set_fpu_flags)
+#define __NR_metag_set_tls             (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_metag_set_tls, sys_metag_set_tls)
+#define __NR_metag_get_tls             (__NR_arch_specific_syscall + 4)
+__SYSCALL(__NR_metag_get_tls, sys_metag_get_tls)
diff --git a/arch/metag/kernel/.gitignore b/arch/metag/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile
new file mode 100644 (file)
index 0000000..d7675f4
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Makefile for the Linux/Meta kernel.
+#
+
+extra-y        += head.o
+extra-y        += vmlinux.lds
+
+obj-y  += cachepart.o
+obj-y  += clock.o
+obj-y  += core_reg.o
+obj-y  += devtree.o
+obj-y  += dma.o
+obj-y  += irq.o
+obj-y  += kick.o
+obj-y  += machines.o
+obj-y  += process.o
+obj-y  += ptrace.o
+obj-y  += setup.o
+obj-y  += signal.o
+obj-y  += stacktrace.o
+obj-y  += sys_metag.o
+obj-y  += tbiunexp.o
+obj-y  += time.o
+obj-y  += topology.o
+obj-y  += traps.o
+obj-y  += user_gateway.o
+
+obj-$(CONFIG_PERF_EVENTS)              += perf/
+
+obj-$(CONFIG_METAG_COREMEM)            += coremem.o
+obj-$(CONFIG_METAG_DA)                 += da.o
+obj-$(CONFIG_DYNAMIC_FTRACE)           += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER)          += ftrace_stub.o
+obj-$(CONFIG_MODULES)                  += metag_ksyms.o
+obj-$(CONFIG_MODULES)                  += module.o
+obj-$(CONFIG_PERF_EVENTS)              += perf_callchain.o
+obj-$(CONFIG_SMP)                      += smp.o
+obj-$(CONFIG_METAG_SUSPEND_MEM)                += suspend.o
+obj-$(CONFIG_METAG_USER_TCM)           += tcm.o
diff --git a/arch/metag/kernel/asm-offsets.c b/arch/metag/kernel/asm-offsets.c
new file mode 100644 (file)
index 0000000..bfc9205
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ */
+
+#include <linux/kbuild.h>
+#include <linux/thread_info.h>
+
+int main(void)
+{
+       DEFINE(THREAD_INFO_SIZE, sizeof(struct thread_info));
+       return 0;
+}
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
new file mode 100644 (file)
index 0000000..3a589df
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Meta cache partition manipulation.
+ *
+ * Copyright 2010 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <asm/processor.h>
+#include <asm/cachepart.h>
+#include <asm/metag_isa.h>
+#include <asm/metag_mem.h>
+
+#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
+#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
+
+#define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */
+#define ICACHE 0
+#define DCACHE 1
+
+/* The CORE_CONFIG2 register is not available on Meta 1 */
+#ifdef CONFIG_METAG_META21
+unsigned int get_dcache_size(void)
+{
+       unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
+       return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)
+                               >> METAC_CORECFG2_DCSZ_S);
+}
+
+unsigned int get_icache_size(void)
+{
+       unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
+       return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)
+                               >> METAC_CORE_C2ICSZ_S);
+}
+
+unsigned int get_global_dcache_size(void)
+{
+       unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id()));
+       unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;
+       return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;
+}
+
+unsigned int get_global_icache_size(void)
+{
+       unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id()));
+       unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS;
+       return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4;
+}
+
+static unsigned int get_thread_cache_size(unsigned int cache, int thread_id)
+{
+       unsigned int cache_size;
+       unsigned int t_cache_part;
+       unsigned int isEnabled;
+       unsigned int offset = 0;
+       isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 :
+               metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1);
+       if (!isEnabled)
+               return 0;
+#if PAGE_OFFSET >= LINGLOBAL_BASE
+       /* Checking for global cache */
+       cache_size = (cache == DCACHE ? get_global_dache_size() :
+               get_global_icache_size());
+       offset = 8;
+#else
+       cache_size = (cache == DCACHE ? get_dcache_size() :
+               get_icache_size());
+#endif
+       t_cache_part = (cache == DCACHE ?
+               (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF :
+               (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF);
+       switch (t_cache_part) {
+       case 0xF:
+               return cache_size;
+       case 0x7:
+               return cache_size / 2;
+       case 0x3:
+               return cache_size / 4;
+       case 0x1:
+               return cache_size / 8;
+       case 0:
+               return cache_size / 16;
+       }
+       return -1;
+}
+
+void check_for_cache_aliasing(int thread_id)
+{
+       unsigned int thread_cache_size;
+       unsigned int cache_type;
+       for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) {
+               thread_cache_size =
+                               get_thread_cache_size(cache_type, thread_id);
+               if (thread_cache_size < 0)
+                       pr_emerg("Can't read %s cache size", \
+                                cache_type ? "DCACHE" : "ICACHE");
+               else if (thread_cache_size == 0)
+                       /* Cache is off. No need to check for aliasing */
+                       continue;
+               if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) {
+                       pr_emerg("Cache aliasing detected in %s on Thread %d",
+                                cache_type ? "DCACHE" : "ICACHE", thread_id);
+                       pr_warn("Total %s size: %u bytes",
+                               cache_type ? "DCACHE" : "ICACHE ",
+                               cache_type ? get_dcache_size()
+                               : get_icache_size());
+                       pr_warn("Thread %s size: %d bytes",
+                               cache_type ? "CACHE" : "ICACHE",
+                               thread_cache_size);
+                       pr_warn("Page Size: %lu bytes", PAGE_SIZE);
+               }
+       }
+}
+
+#else
+
+void check_for_cache_aliasing(int thread_id)
+{
+       return;
+}
+
+#endif
diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c
new file mode 100644 (file)
index 0000000..defc840
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * arch/metag/kernel/clock.c
+ *
+ * Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/param.h>
+#include <asm/clock.h>
+
+struct meta_clock_desc _meta_clock;
+
+/* Default machine get_core_freq callback. */
+static unsigned long get_core_freq_default(void)
+{
+#ifdef CONFIG_METAG_META21
+       /*
+        * Meta 2 cores divide down the core clock for the Meta timers, so we
+        * can estimate the core clock from the divider.
+        */
+       return (metag_in32(EXPAND_TIMER_DIV) + 1) * 1000000;
+#else
+       /*
+        * On Meta 1 we don't know the core clock, but assuming the Meta timer
+        * is correct it can be estimated based on loops_per_jiffy.
+        */
+       return (loops_per_jiffy * HZ * 5) >> 1;
+#endif
+}
+
+/**
+ * setup_meta_clocks() - Set up the Meta clock.
+ * @desc:      Clock descriptor usually provided by machine description
+ *
+ * Ensures all callbacks are valid.
+ */
+void __init setup_meta_clocks(struct meta_clock_desc *desc)
+{
+       /* copy callbacks */
+       if (desc)
+               _meta_clock = *desc;
+
+       /* set fallback functions */
+       if (!_meta_clock.get_core_freq)
+               _meta_clock.get_core_freq = get_core_freq_default;
+}
+
diff --git a/arch/metag/kernel/core_reg.c b/arch/metag/kernel/core_reg.c
new file mode 100644 (file)
index 0000000..671cce8
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ *  Support for reading and writing Meta core internal registers.
+ *
+ *  Copyright (C) 2011 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+
+#include <asm/core_reg.h>
+#include <asm/global_lock.h>
+#include <asm/hwthread.h>
+#include <asm/io.h>
+#include <asm/metag_mem.h>
+#include <asm/metag_regs.h>
+
+#define UNIT_BIT_MASK          TXUXXRXRQ_UXX_BITS
+#define REG_BIT_MASK           TXUXXRXRQ_RX_BITS
+#define THREAD_BIT_MASK                TXUXXRXRQ_TX_BITS
+
+#define UNIT_SHIFTS            TXUXXRXRQ_UXX_S
+#define REG_SHIFTS             TXUXXRXRQ_RX_S
+#define THREAD_SHIFTS          TXUXXRXRQ_TX_S
+
+#define UNIT_VAL(x)            (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK)
+#define REG_VAL(x)             (((x) << REG_SHIFTS) & REG_BIT_MASK)
+#define THREAD_VAL(x)          (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK)
+
+/*
+ * core_reg_write() - modify the content of a register in a core unit.
+ * @unit:      The unit to be modified.
+ * @reg:       Register number within the unit.
+ * @thread:    The thread we want to access.
+ * @val:       The new value to write.
+ *
+ * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,
+ * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,
+ * TXPOLLI_REGNUM, etc).
+ */
+void core_reg_write(int unit, int reg, int thread, unsigned int val)
+{
+       unsigned long flags;
+
+       /* TXUCT_ID has its own memory mapped registers */
+       if (unit == TXUCT_ID) {
+               void __iomem *cu_reg = __CU_addr(thread, reg);
+               metag_out32(val, cu_reg);
+               return;
+       }
+
+       __global_lock2(flags);
+
+       /* wait for ready */
+       while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
+               udelay(10);
+
+       /* set the value to write */
+       metag_out32(val, TXUXXRXDT);
+
+       /* set the register to write */
+       val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread);
+       metag_out32(val, TXUXXRXRQ);
+
+       /* wait for finish */
+       while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
+               udelay(10);
+
+       __global_unlock2(flags);
+}
+EXPORT_SYMBOL(core_reg_write);
+
+/*
+ * core_reg_read() - read the content of a register in a core unit.
+ * @unit:      The unit to be modified.
+ * @reg:       Register number within the unit.
+ * @thread:    The thread we want to access.
+ *
+ * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID,
+ * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM,
+ * TXPOLLI_REGNUM, etc).
+ */
+unsigned int core_reg_read(int unit, int reg, int thread)
+{
+       unsigned long flags;
+       unsigned int val;
+
+       /* TXUCT_ID has its own memory mapped registers */
+       if (unit == TXUCT_ID) {
+               void __iomem *cu_reg = __CU_addr(thread, reg);
+               val = metag_in32(cu_reg);
+               return val;
+       }
+
+       __global_lock2(flags);
+
+       /* wait for ready */
+       while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
+               udelay(10);
+
+       /* set the register to read */
+       val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) |
+                                                       TXUXXRXRQ_RDnWR_BIT);
+       metag_out32(val, TXUXXRXRQ);
+
+       /* wait for finish */
+       while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT))
+               udelay(10);
+
+       /* read the register value */
+       val = metag_in32(TXUXXRXDT);
+
+       __global_unlock2(flags);
+
+       return val;
+}
+EXPORT_SYMBOL(core_reg_read);
diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c
new file mode 100644 (file)
index 0000000..52aabb6
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Meta DA JTAG debugger control.
+ *
+ * Copyright 2012 Imagination Technologies Ltd.
+ */
+
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <asm/da.h>
+#include <asm/metag_mem.h>
+
+bool _metag_da_present;
+
+int __init metag_da_probe(void)
+{
+       _metag_da_present = (metag_in32(T0VECINT_BHALT) == 1);
+       if (_metag_da_present)
+               pr_info("DA present\n");
+       else
+               pr_info("DA not present\n");
+       return 0;
+}
diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c
new file mode 100644 (file)
index 0000000..7cd0252
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ *  linux/arch/metag/kernel/devtree.c
+ *
+ *  Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ *  Based on ARM version:
+ *  Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/mach/arch.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+       pr_err("%s(%llx, %llx)\n",
+              __func__, base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+       return alloc_bootmem_align(size, align);
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt:                virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+       struct boot_param_header *devtree = dt;
+       struct machine_desc *mdesc, *mdesc_best = NULL;
+       unsigned int score, mdesc_score = ~1;
+       unsigned long dt_root;
+       const char *model;
+
+       /* check device tree validity */
+       if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+               return NULL;
+
+       /* Search the mdescs for the 'best' compatible value match */
+       initial_boot_params = devtree;
+       dt_root = of_get_flat_dt_root();
+
+       for_each_machine_desc(mdesc) {
+               score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+               if (score > 0 && score < mdesc_score) {
+                       mdesc_best = mdesc;
+                       mdesc_score = score;
+               }
+       }
+       if (!mdesc_best) {
+               const char *prop;
+               long size;
+
+               pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ ");
+
+               prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+               if (prop) {
+                       while (size > 0) {
+                               printk("'%s' ", prop);
+                               size -= strlen(prop) + 1;
+                               prop += strlen(prop) + 1;
+                       }
+               }
+               printk("]\n\n");
+
+               dump_machine_table(); /* does not return */
+       }
+
+       model = of_get_flat_dt_prop(dt_root, "model", NULL);
+       if (!model)
+               model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+       if (!model)
+               model = "<unknown>";
+       pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
+
+       /* Retrieve various information from the /chosen node */
+       of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+       return mdesc_best;
+}
+
+/**
+ * copy_fdt - Copy device tree into non-init memory.
+ *
+ * We must copy the flattened device tree blob into non-init memory because the
+ * unflattened device tree will reference the strings in it directly.
+ */
+void __init copy_fdt(void)
+{
+       void *alloc = early_init_dt_alloc_memory_arch(
+                       be32_to_cpu(initial_boot_params->totalsize), 0x40);
+       if (alloc) {
+               memcpy(alloc, initial_boot_params,
+                      be32_to_cpu(initial_boot_params->totalsize));
+               initial_boot_params = alloc;
+       }
+}
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
new file mode 100644 (file)
index 0000000..8c00ded
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+ *  Meta version derived from arch/powerpc/lib/dma-noncoherent.c
+ *    Copyright (C) 2008 Imagination Technologies Ltd.
+ *
+ *  PowerPC version derived from arch/arm/mm/consistent.c
+ *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * Consistent memory allocators.  Used for DMA devices that want to
+ * share uncached memory with the processor core.  The function return
+ * is the virtual address and 'dma_handle' is the physical address.
+ * Mostly stolen from the ARM port, with some changes for PowerPC.
+ *                                             -- Dan
+ *
+ * Reorganized to get rid of the arch-specific consistent_* functions
+ * and provide non-coherent implementations for the DMA API. -Matt
+ *
+ * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
+ * implementation. This is pulled straight from ARM and barely
+ * modified. -Matt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+
+#define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - CONSISTENT_START) \
+                                       >> PAGE_SHIFT)
+
+static u64 get_coherent_dma_mask(struct device *dev)
+{
+       u64 mask = ~0ULL;
+
+       if (dev) {
+               mask = dev->coherent_dma_mask;
+
+               /*
+                * Sanity check the DMA mask - it must be non-zero, and
+                * must be able to be satisfied by a DMA allocation.
+                */
+               if (mask == 0) {
+                       dev_warn(dev, "coherent DMA mask is unset\n");
+                       return 0;
+               }
+       }
+
+       return mask;
+}
+/*
+ * This is the page table (2MB) covering uncached, DMA consistent allocations
+ */
+static pte_t *consistent_pte;
+static DEFINE_SPINLOCK(consistent_lock);
+
+/*
+ * VM region handling support.
+ *
+ * This should become something generic, handling VM region allocations for
+ * vmalloc and similar (ioremap, module space, etc).
+ *
+ * I envisage vmalloc()'s supporting vm_struct becoming:
+ *
+ *  struct vm_struct {
+ *    struct metag_vm_region   region;
+ *    unsigned long    flags;
+ *    struct page      **pages;
+ *    unsigned int     nr_pages;
+ *    unsigned long    phys_addr;
+ *  };
+ *
+ * get_vm_area() would then call metag_vm_region_alloc with an appropriate
+ * struct metag_vm_region head (eg):
+ *
+ *  struct metag_vm_region vmalloc_head = {
+ *     .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
+ *     .vm_start       = VMALLOC_START,
+ *     .vm_end         = VMALLOC_END,
+ *  };
+ *
+ * However, vmalloc_head.vm_start is variable (typically, it is dependent on
+ * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
+ * would have to initialise this each time prior to calling
+ * metag_vm_region_alloc().
+ */
+struct metag_vm_region {
+       struct list_head vm_list;
+       unsigned long vm_start;
+       unsigned long vm_end;
+       struct page             *vm_pages;
+       int                     vm_active;
+};
+
+static struct metag_vm_region consistent_head = {
+       .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
+       .vm_start = CONSISTENT_START,
+       .vm_end = CONSISTENT_END,
+};
+
+static struct metag_vm_region *metag_vm_region_alloc(struct metag_vm_region
+                                                    *head, size_t size,
+                                                    gfp_t gfp)
+{
+       unsigned long addr = head->vm_start, end = head->vm_end - size;
+       unsigned long flags;
+       struct metag_vm_region *c, *new;
+
+       new = kmalloc(sizeof(struct metag_vm_region), gfp);
+       if (!new)
+               goto out;
+
+       spin_lock_irqsave(&consistent_lock, flags);
+
+       list_for_each_entry(c, &head->vm_list, vm_list) {
+               if ((addr + size) < addr)
+                       goto nospc;
+               if ((addr + size) <= c->vm_start)
+                       goto found;
+               addr = c->vm_end;
+               if (addr > end)
+                       goto nospc;
+       }
+
+found:
+       /*
+        * Insert this entry _before_ the one we found.
+        */
+       list_add_tail(&new->vm_list, &c->vm_list);
+       new->vm_start = addr;
+       new->vm_end = addr + size;
+       new->vm_active = 1;
+
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       return new;
+
+nospc:
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       kfree(new);
+out:
+       return NULL;
+}
+
+static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region
+                                                   *head, unsigned long addr)
+{
+       struct metag_vm_region *c;
+
+       list_for_each_entry(c, &head->vm_list, vm_list) {
+               if (c->vm_active && c->vm_start == addr)
+                       goto out;
+       }
+       c = NULL;
+out:
+       return c;
+}
+
+/*
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+void *dma_alloc_coherent(struct device *dev, size_t size,
+                        dma_addr_t *handle, gfp_t gfp)
+{
+       struct page *page;
+       struct metag_vm_region *c;
+       unsigned long order;
+       u64 mask = get_coherent_dma_mask(dev);
+       u64 limit;
+
+       if (!consistent_pte) {
+               pr_err("%s: not initialised\n", __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       if (!mask)
+               goto no_page;
+       size = PAGE_ALIGN(size);
+       limit = (mask + 1) & ~mask;
+       if ((limit && size >= limit)
+           || size >= (CONSISTENT_END - CONSISTENT_START)) {
+               pr_warn("coherent allocation too big (requested %#x mask %#Lx)\n",
+                       size, mask);
+               return NULL;
+       }
+
+       order = get_order(size);
+
+       if (mask != 0xffffffff)
+               gfp |= GFP_DMA;
+
+       page = alloc_pages(gfp, order);
+       if (!page)
+               goto no_page;
+
+       /*
+        * Invalidate any data that might be lurking in the
+        * kernel direct-mapped region for device DMA.
+        */
+       {
+               void *kaddr = page_address(page);
+               memset(kaddr, 0, size);
+               flush_dcache_region(kaddr, size);
+       }
+
+       /*
+        * Allocate a virtual address in the consistent mapping region.
+        */
+       c = metag_vm_region_alloc(&consistent_head, size,
+                                 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+       if (c) {
+               unsigned long vaddr = c->vm_start;
+               pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
+               struct page *end = page + (1 << order);
+
+               c->vm_pages = page;
+               split_page(page, order);
+
+               /*
+                * Set the "dma handle"
+                */
+               *handle = page_to_bus(page);
+
+               do {
+                       BUG_ON(!pte_none(*pte));
+
+                       SetPageReserved(page);
+                       set_pte_at(&init_mm, vaddr,
+                                  pte, mk_pte(page,
+                                              pgprot_writecombine
+                                              (PAGE_KERNEL)));
+                       page++;
+                       pte++;
+                       vaddr += PAGE_SIZE;
+               } while (size -= PAGE_SIZE);
+
+               /*
+                * Free the otherwise unused pages.
+                */
+               while (page < end) {
+                       __free_page(page);
+                       page++;
+               }
+
+               return (void *)c->vm_start;
+       }
+
+       if (page)
+               __free_pages(page, order);
+no_page:
+       return NULL;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+/*
+ * free a page as defined by the above mapping.
+ */
+void dma_free_coherent(struct device *dev, size_t size,
+                      void *vaddr, dma_addr_t dma_handle)
+{
+       struct metag_vm_region *c;
+       unsigned long flags, addr;
+       pte_t *ptep;
+
+       size = PAGE_ALIGN(size);
+
+       spin_lock_irqsave(&consistent_lock, flags);
+
+       c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr);
+       if (!c)
+               goto no_area;
+
+       c->vm_active = 0;
+       if ((c->vm_end - c->vm_start) != size) {
+               pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+                      __func__, c->vm_end - c->vm_start, size);
+               dump_stack();
+               size = c->vm_end - c->vm_start;
+       }
+
+       ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+       addr = c->vm_start;
+       do {
+               pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
+               unsigned long pfn;
+
+               ptep++;
+               addr += PAGE_SIZE;
+
+               if (!pte_none(pte) && pte_present(pte)) {
+                       pfn = pte_pfn(pte);
+
+                       if (pfn_valid(pfn)) {
+                               struct page *page = pfn_to_page(pfn);
+                               ClearPageReserved(page);
+
+                               __free_page(page);
+                               continue;
+                       }
+               }
+
+               pr_crit("%s: bad page in kernel page table\n",
+                       __func__);
+       } while (size -= PAGE_SIZE);
+
+       flush_tlb_kernel_range(c->vm_start, c->vm_end);
+
+       list_del(&c->vm_list);
+
+       spin_unlock_irqrestore(&consistent_lock, flags);
+
+       kfree(c);
+       return;
+
+no_area:
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       pr_err("%s: trying to free invalid coherent area: %p\n",
+              __func__, vaddr);
+       dump_stack();
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+
+static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+
+       unsigned long flags, user_size, kern_size;
+       struct metag_vm_region *c;
+
+       user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+       spin_lock_irqsave(&consistent_lock, flags);
+       c = metag_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+       spin_unlock_irqrestore(&consistent_lock, flags);
+
+       if (c) {
+               unsigned long off = vma->vm_pgoff;
+
+               kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
+
+               if (off < kern_size &&
+                   user_size <= (kern_size - off)) {
+                       ret = remap_pfn_range(vma, vma->vm_start,
+                                             page_to_pfn(c->vm_pages) + off,
+                                             user_size << PAGE_SHIFT,
+                                             vma->vm_page_prot);
+               }
+       }
+
+
+       return ret;
+}
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL(dma_mmap_coherent);
+
+int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+                         void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL(dma_mmap_writecombine);
+
+
+
+
+/*
+ * Initialise the consistent memory allocation.
+ */
+static int __init dma_alloc_init(void)
+{
+       pgd_t *pgd, *pgd_k;
+       pud_t *pud, *pud_k;
+       pmd_t *pmd, *pmd_k;
+       pte_t *pte;
+       int ret = 0;
+
+       do {
+               int offset = pgd_index(CONSISTENT_START);
+               pgd = pgd_offset(&init_mm, CONSISTENT_START);
+               pud = pud_alloc(&init_mm, pgd, CONSISTENT_START);
+               pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START);
+               if (!pmd) {
+                       pr_err("%s: no pmd tables\n", __func__);
+                       ret = -ENOMEM;
+                       break;
+               }
+               WARN_ON(!pmd_none(*pmd));
+
+               pte = pte_alloc_kernel(pmd, CONSISTENT_START);
+               if (!pte) {
+                       pr_err("%s: no pte tables\n", __func__);
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               pgd_k = ((pgd_t *) mmu_get_base()) + offset;
+               pud_k = pud_offset(pgd_k, CONSISTENT_START);
+               pmd_k = pmd_offset(pud_k, CONSISTENT_START);
+               set_pmd(pmd_k, *pmd);
+
+               consistent_pte = pte;
+       } while (0);
+
+       return ret;
+}
+early_initcall(dma_alloc_init);
+
+/*
+ * make an area consistent to devices.
+ */
+void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+{
+       /*
+        * Ensure any writes get through the write combiner. This is necessary
+        * even with DMA_FROM_DEVICE, or the write may dirty the cache after
+        * we've invalidated it and get written back during the DMA.
+        */
+
+       barrier();
+
+       switch (dma_direction) {
+       case DMA_BIDIRECTIONAL:
+               /*
+                * Writeback to ensure the device can see our latest changes and
+                * so that we have no dirty lines, and invalidate the cache
+                * lines too in preparation for receiving the buffer back
+                * (dma_sync_for_cpu) later.
+                */
+               flush_dcache_region(vaddr, size);
+               break;
+       case DMA_TO_DEVICE:
+               /*
+                * Writeback to ensure the device can see our latest changes.
+                * There's no need to invalidate as the device shouldn't write
+                * to the buffer.
+                */
+               writeback_dcache_region(vaddr, size);
+               break;
+       case DMA_FROM_DEVICE:
+               /*
+                * Invalidate to ensure we have no dirty lines that could get
+                * written back during the DMA. It's also safe to flush
+                * (writeback) here if necessary.
+                */
+               invalidate_dcache_region(vaddr, size);
+               break;
+       case DMA_NONE:
+               BUG();
+       }
+
+       wmb();
+}
+EXPORT_SYMBOL(dma_sync_for_device);
+
+/*
+ * make an area consistent to the core.
+ */
+void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+{
+       /*
+        * Hardware L2 cache prefetch doesn't occur across 4K physical
+        * boundaries, however according to Documentation/DMA-API-HOWTO.txt
+        * kmalloc'd memory is DMA'able, so accesses in nearby memory could
+        * trigger a cache fill in the DMA buffer.
+        *
+        * This should never cause dirty lines, so a flush or invalidate should
+        * be safe to allow us to see data from the device.
+        */
+       if (_meta_l2c_pf_is_enabled()) {
+               switch (dma_direction) {
+               case DMA_BIDIRECTIONAL:
+               case DMA_FROM_DEVICE:
+                       invalidate_dcache_region(vaddr, size);
+                       break;
+               case DMA_TO_DEVICE:
+                       /* The device shouldn't have written to the buffer */
+                       break;
+               case DMA_NONE:
+                       BUG();
+               }
+       }
+
+       rmb();
+}
+EXPORT_SYMBOL(dma_sync_for_cpu);
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..a774f32
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2008 Imagination Technologies Ltd.
+ * Licensed under the GPL
+ *
+ * Dynamic ftrace support.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+
+#define D04_MOVT_TEMPLATE      0x02200005
+#define D04_CALL_TEMPLATE      0xAC200005
+#define D1RTP_MOVT_TEMPLATE    0x03200005
+#define D1RTP_CALL_TEMPLATE    0xAC200006
+
+static const unsigned long NOP[2] = {0xa0fffffe, 0xa0fffffe};
+static unsigned long movt_and_call_insn[2];
+
+static unsigned char *ftrace_nop_replace(void)
+{
+       return (char *)&NOP[0];
+}
+
+static unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+       unsigned long hi16, low16;
+
+       hi16 = (addr & 0xffff0000) >> 13;
+       low16 = (addr & 0x0000ffff) << 3;
+
+       /*
+        * The compiler makes the call to mcount_wrapper()
+        * (Meta's wrapper around mcount()) through the register
+        * D0.4. So whenever we're patching one of those compiler-generated
+        * calls we also need to go through D0.4. Otherwise use D1RtP.
+        */
+       if (pc == (unsigned long)&ftrace_call) {
+               writel(D1RTP_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]);
+               writel(D1RTP_CALL_TEMPLATE | low16, &movt_and_call_insn[1]);
+       } else {
+               writel(D04_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]);
+               writel(D04_CALL_TEMPLATE | low16, &movt_and_call_insn[1]);
+       }
+
+       return (unsigned char *)&movt_and_call_insn[0];
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
+                             unsigned char *new_code)
+{
+       unsigned char replaced[MCOUNT_INSN_SIZE];
+
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+        *  as well as code changing.
+        *
+        * No real locking needed, this code is run through
+        * kstop_machine.
+        */
+
+       /* read the text we want to modify */
+       if (probe_kernel_read(replaced, (void *)pc, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure it is what we expect it to be */
+       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+               return -EINVAL;
+
+       /* replace the text with the new text */
+       if (probe_kernel_write((void *)pc, new_code, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       int ret;
+       unsigned long pc;
+       unsigned char old[MCOUNT_INSN_SIZE], *new;
+
+       pc = (unsigned long)&ftrace_call;
+       memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+       new = ftrace_call_replace(pc, (unsigned long)func);
+       ret = ftrace_modify_code(pc, old, new);
+
+       return ret;
+}
+
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_call_replace(ip, addr);
+       new = ftrace_nop_replace();
+
+       return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_nop_replace();
+       new = ftrace_call_replace(ip, addr);
+
+       return ftrace_modify_code(ip, old, new);
+}
+
+/* run from kstop_machine */
+int __init ftrace_dyn_arch_init(void *data)
+{
+       /* The return code is returned via data */
+       writel(0, data);
+
+       return 0;
+}
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S
new file mode 100644 (file)
index 0000000..e70bff7
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2008 Imagination Technologies Ltd.
+ * Licensed under the GPL
+ *
+ */
+
+#include <asm/ftrace.h>
+
+       .text
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .global _mcount_wrapper
+       .type   _mcount_wrapper,function
+_mcount_wrapper:
+       MOV     PC,D0.4
+
+       .global _ftrace_caller
+       .type   _ftrace_caller,function
+_ftrace_caller:
+       MOVT    D0Re0,#HI(_function_trace_stop)
+       ADD     D0Re0,D0Re0,#LO(_function_trace_stop)
+       GETD    D0Re0,[D0Re0]
+       CMP     D0Re0,#0
+       BEQ     $Lcall_stub
+       MOV     PC,D0.4
+$Lcall_stub:
+       MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
+       MOV     D1Ar1, D0.4
+       MOV     D0Ar2, D1RtP
+       SUB     D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE
+
+       .global _ftrace_call
+_ftrace_call:
+       MOVT    D1RtP,#HI(_ftrace_stub)
+       CALL    D1RtP,#LO(_ftrace_stub)
+       GETL    D0.4,  D1RtP, [A0StP++#(-8)]
+       GETL    D0Ar2, D1Ar1, [A0StP++#(-8)]
+       GETL    D0Ar4, D1Ar3, [A0StP++#(-8)]
+       GETL    D0Ar6, D1Ar5, [A0StP++#(-8)]
+       MOV     PC, D0.4
+#else
+
+       .global _mcount_wrapper
+       .type   _mcount_wrapper,function
+_mcount_wrapper:
+       MOVT    D0Re0,#HI(_function_trace_stop)
+       ADD     D0Re0,D0Re0,#LO(_function_trace_stop)
+       GETD    D0Re0,[D0Re0]
+       CMP     D0Re0,#0
+       BEQ     $Lcall_mcount
+       MOV     PC,D0.4
+$Lcall_mcount:
+       MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
+       MOV     D1Ar1, D0.4
+       MOV     D0Ar2, D1RtP
+       MOVT    D0Re0,#HI(_ftrace_trace_function)
+       ADD     D0Re0,D0Re0,#LO(_ftrace_trace_function)
+       GET     D1Ar3,[D0Re0]
+       MOVT    D1Re0,#HI(_ftrace_stub)
+       ADD     D1Re0,D1Re0,#LO(_ftrace_stub)
+       CMP     D1Ar3,D1Re0
+       BEQ     $Ltrace_exit
+       MOV     D1RtP,D1Ar3
+       SUB     D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE
+       SWAP    PC,D1RtP
+$Ltrace_exit:
+       GETL    D0.4,  D1RtP, [A0StP++#(-8)]
+       GETL    D0Ar2, D1Ar1, [A0StP++#(-8)]
+       GETL    D0Ar4, D1Ar3, [A0StP++#(-8)]
+       GETL    D0Ar6, D1Ar5, [A0StP++#(-8)]
+       MOV     PC, D0.4
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+       .global _ftrace_stub
+_ftrace_stub:
+       MOV     PC,D1RtP
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S
new file mode 100644 (file)
index 0000000..969dffa
--- /dev/null
@@ -0,0 +1,57 @@
+       ! Copyright 2005,2006,2007,2009 Imagination Technologies
+
+#include <linux/init.h>
+#include <generated/asm-offsets.h>
+#undef __exit
+
+       __HEAD
+       ! Setup the stack and get going into _metag_start_kernel
+       .global __start
+       .type   __start,function
+__start:
+       ! D1Ar1 contains pTBI (ISTAT)
+       ! D0Ar2 contains pTBI
+       ! D1Ar3 contains __pTBISegs
+       ! D0Ar4 contains kernel arglist pointer
+
+       MOVT    D0Re0,#HI(___pTBIs)
+       ADD     D0Re0,D0Re0,#LO(___pTBIs)
+       SETL    [D0Re0],D0Ar2,D1Ar1
+       MOVT    D0Re0,#HI(___pTBISegs)
+       ADD     D0Re0,D0Re0,#LO(___pTBISegs)
+       SETD    [D0Re0],D1Ar3
+       MOV     A0FrP,#0
+       MOV     D0Re0,#0
+       MOV     D1Re0,#0
+       MOV     D1Ar3,#0
+       MOV     D1Ar1,D0Ar4                     !Store kernel boot params
+       MOV     D1Ar5,#0
+       MOV     D0Ar6,#0
+#ifdef CONFIG_METAG_DSP
+       MOV     D0.8,#0
+#endif
+       MOVT    A0StP,#HI(_init_thread_union)
+       ADD     A0StP,A0StP,#LO(_init_thread_union)
+       ADD     A0StP,A0StP,#THREAD_INFO_SIZE
+       MOVT    D1RtP,#HI(_metag_start_kernel)
+       CALL    D1RtP,#LO(_metag_start_kernel)
+       .size   __start,.-__start
+
+       !! Needed by TBX
+       .global __exit
+       .type   __exit,function
+__exit:
+       XOR     TXENABLE,D0Re0,D0Re0
+       .size   __exit,.-__exit
+
+#ifdef CONFIG_SMP
+       .global _secondary_startup
+       .type _secondary_startup,function
+_secondary_startup:
+       MOVT    A0StP,#HI(_secondary_data_stack)
+       ADD     A0StP,A0StP,#LO(_secondary_data_stack)
+       GETD    A0StP,[A0StP]
+       ADD     A0StP,A0StP,#THREAD_INFO_SIZE
+       B       _secondary_start_kernel
+       .size   _secondary_startup,.-_secondary_startup
+#endif
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
new file mode 100644 (file)
index 0000000..87707ef
--- /dev/null
@@ -0,0 +1,323 @@
+/*
+ * Linux/Meta general interrupt handling code
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irqchip/metag-ext.h>
+#include <linux/irqchip/metag.h>
+#include <linux/irqdomain.h>
+#include <linux/ratelimit.h>
+
+#include <asm/core_reg.h>
+#include <asm/mach/arch.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_4KSTACKS
+union irq_ctx {
+       struct thread_info      tinfo;
+       u32                     stack[THREAD_SIZE/sizeof(u32)];
+};
+
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
+#endif
+
+struct irq_domain *root_domain;
+
+static unsigned int startup_meta_irq(struct irq_data *data)
+{
+       tbi_startup_interrupt(data->hwirq);
+       return 0;
+}
+
+static void shutdown_meta_irq(struct irq_data *data)
+{
+       tbi_shutdown_interrupt(data->hwirq);
+}
+
+void do_IRQ(int irq, struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+#ifdef CONFIG_4KSTACKS
+       struct irq_desc *desc;
+       union irq_ctx *curctx, *irqctx;
+       u32 *isp;
+#endif
+
+       irq_enter();
+
+       irq = irq_linear_revmap(root_domain, irq);
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+       /* Debugging check for stack overflow: is there less than 1KB free? */
+       {
+               unsigned long sp;
+
+               sp = __core_reg_get(A0StP);
+               sp &= THREAD_SIZE - 1;
+
+               if (unlikely(sp > (THREAD_SIZE - 1024)))
+                       pr_err("Stack overflow in do_IRQ: %ld\n", sp);
+       }
+#endif
+
+
+#ifdef CONFIG_4KSTACKS
+       curctx = (union irq_ctx *) current_thread_info();
+       irqctx = hardirq_ctx[smp_processor_id()];
+
+       /*
+        * this is where we switch to the IRQ stack. However, if we are
+        * already using the IRQ stack (because we interrupted a hardirq
+        * handler) we can't do that and just have to keep using the
+        * current stack (which is the irq stack already after all)
+        */
+       if (curctx != irqctx) {
+               /* build the stack frame on the IRQ stack */
+               isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
+               irqctx->tinfo.task = curctx->tinfo.task;
+
+               /*
+                * Copy the softirq bits in preempt_count so that the
+                * softirq checks work in the hardirq context.
+                */
+               irqctx->tinfo.preempt_count =
+                       (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+                       (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
+               desc = irq_to_desc(irq);
+
+               asm volatile (
+                       "MOV   D0.5,%0\n"
+                       "MOV   D1Ar1,%1\n"
+                       "MOV   D1RtP,%2\n"
+                       "MOV   D0Ar2,%3\n"
+                       "SWAP  A0StP,D0.5\n"
+                       "SWAP  PC,D1RtP\n"
+                       "MOV   A0StP,D0.5\n"
+                       :
+                       : "r" (isp), "r" (irq), "r" (desc->handle_irq),
+                         "r" (desc)
+                       : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
+                         "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
+                         "D0.5"
+                       );
+       } else
+#endif
+               generic_handle_irq(irq);
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
+#ifdef CONFIG_4KSTACKS
+
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+       union irq_ctx *irqctx;
+
+       if (hardirq_ctx[cpu])
+               return;
+
+       irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       hardirq_ctx[cpu] = irqctx;
+
+       irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = 0;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+
+       softirq_ctx[cpu] = irqctx;
+
+       pr_info("CPU %u irqstacks, hard=%p soft=%p\n",
+               cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
+}
+
+void irq_ctx_exit(int cpu)
+{
+       hardirq_ctx[smp_processor_id()] = NULL;
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+       unsigned long flags;
+       struct thread_info *curctx;
+       union irq_ctx *irqctx;
+       u32 *isp;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       if (local_softirq_pending()) {
+               curctx = current_thread_info();
+               irqctx = softirq_ctx[smp_processor_id()];
+               irqctx->tinfo.task = curctx->task;
+
+               /* build the stack frame on the softirq stack */
+               isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
+
+               asm volatile (
+                       "MOV   D0.5,%0\n"
+                       "SWAP  A0StP,D0.5\n"
+                       "CALLR D1RtP,___do_softirq\n"
+                       "MOV   A0StP,D0.5\n"
+                       :
+                       : "r" (isp)
+                       : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
+                         "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
+                         "D0.5"
+                       );
+               /*
+                * Shouldn't happen, we returned above if in_interrupt():
+                */
+               WARN_ON_ONCE(softirq_count());
+       }
+
+       local_irq_restore(flags);
+}
+#endif
+
+static struct irq_chip meta_irq_type = {
+       .name = "META-IRQ",
+       .irq_startup = startup_meta_irq,
+       .irq_shutdown = shutdown_meta_irq,
+};
+
+/**
+ * tbisig_map() - Map a TBI signal number to a virtual IRQ number.
+ * @hw:                Number of the TBI signal. Must be in range.
+ *
+ * Returns:    The virtual IRQ number of the TBI signal number IRQ specified by
+ *             @hw.
+ */
+int tbisig_map(unsigned int hw)
+{
+       return irq_create_mapping(root_domain, hw);
+}
+
+/**
+ * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number
+ * @d:         root irq domain
+ * @irq:       virtual irq number
+ * @hw:                hardware irq number (TBI signal number)
+ *
+ * This sets up a virtual irq for a specified TBI signal number.
+ */
+static int metag_tbisig_map(struct irq_domain *d, unsigned int irq,
+                           irq_hw_number_t hw)
+{
+#ifdef CONFIG_SMP
+       irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq);
+#else
+       irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq);
+#endif
+       return 0;
+}
+
+static const struct irq_domain_ops metag_tbisig_domain_ops = {
+       .map = metag_tbisig_map,
+};
+
+/*
+ * void init_IRQ(void)
+ *
+ * Parameters: None
+ *
+ * Returns:    Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the IRQ handling routines.
+ */
+void __init init_IRQ(void)
+{
+       root_domain = irq_domain_add_linear(NULL, 32,
+                                           &metag_tbisig_domain_ops, NULL);
+       if (unlikely(!root_domain))
+               panic("init_IRQ: cannot add root IRQ domain");
+
+       irq_ctx_init(smp_processor_id());
+
+       init_internal_IRQ();
+       init_external_IRQ();
+
+       if (machine_desc->init_irq)
+               machine_desc->init_irq();
+}
+
+int __init arch_probe_nr_irqs(void)
+{
+       if (machine_desc->nr_irqs)
+               nr_irqs = machine_desc->nr_irqs;
+       return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_chip *chip = irq_data_get_irq_chip(data);
+
+       raw_spin_lock_irq(&desc->lock);
+       if (chip->irq_set_affinity)
+               chip->irq_set_affinity(data, cpumask_of(cpu), false);
+       raw_spin_unlock_irq(&desc->lock);
+}
+
+/*
+ * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+       unsigned int i, cpu = smp_processor_id();
+       struct irq_desc *desc;
+
+       for_each_irq_desc(i, desc) {
+               struct irq_data *data = irq_desc_get_irq_data(desc);
+               unsigned int newcpu;
+
+               if (irqd_is_per_cpu(data))
+                       continue;
+
+               if (!cpumask_test_cpu(cpu, data->affinity))
+                       continue;
+
+               newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+
+               if (newcpu >= nr_cpu_ids) {
+                       pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+                                           i, cpu);
+
+                       cpumask_setall(data->affinity);
+                       newcpu = cpumask_any_and(data->affinity,
+                                                cpu_online_mask);
+               }
+
+               route_irq(data, i, newcpu);
+       }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c
new file mode 100644 (file)
index 0000000..50fcbec
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (C) 2009 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * The Meta KICK interrupt mechanism is generally a useful feature, so
+ * we provide an interface for registering multiple interrupt
+ * handlers. All the registered interrupt handlers are "chained". When
+ * a KICK interrupt is received the first function in the list is
+ * called. If that interrupt handler cannot handle the KICK the next
+ * one is called, then the next until someone handles it (or we run
+ * out of functions). As soon as one function handles the interrupt no
+ * other handlers are called.
+ *
+ * The only downside of chaining interrupt handlers is that each
+ * handler must be able to detect whether the KICK was intended for it
+ * or not.  For example, when the IPI handler runs and it sees that
+ * there are no IPI messages it must not signal that the KICK was
+ * handled, thereby giving the other handlers a chance to run.
+ *
+ * The reason that we provide our own interface for calling KICK
+ * handlers instead of using the generic kernel infrastructure is that
+ * the KICK handlers require access to a CPU's pTBI structure. So we
+ * pass it as an argument.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#include <asm/traps.h>
+
+/*
+ * All accesses/manipulations of kick_handlers_list should be
+ * performed while holding kick_handlers_lock.
+ */
+static DEFINE_SPINLOCK(kick_handlers_lock);
+static LIST_HEAD(kick_handlers_list);
+
+void kick_register_func(struct kick_irq_handler *kh)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&kick_handlers_lock, flags);
+
+       list_add_tail(&kh->list, &kick_handlers_list);
+
+       spin_unlock_irqrestore(&kick_handlers_lock, flags);
+}
+EXPORT_SYMBOL(kick_register_func);
+
+void kick_unregister_func(struct kick_irq_handler *kh)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&kick_handlers_lock, flags);
+
+       list_del(&kh->list);
+
+       spin_unlock_irqrestore(&kick_handlers_lock, flags);
+}
+EXPORT_SYMBOL(kick_unregister_func);
+
+TBIRES
+kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
+{
+       struct kick_irq_handler *kh;
+       struct list_head *lh;
+       int handled = 0;
+       TBIRES ret;
+
+       head_end(State, ~INTS_OFF_MASK);
+
+       /* If we interrupted user code handle any critical sections. */
+       if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
+               restart_critical_section(State);
+
+       trace_hardirqs_off();
+
+       /*
+        * There is no need to disable interrupts here because we
+        * can't nest KICK interrupts in a KICK interrupt handler.
+        */
+       spin_lock(&kick_handlers_lock);
+
+       list_for_each(lh, &kick_handlers_list) {
+               kh = list_entry(lh, struct kick_irq_handler, list);
+
+               ret = kh->func(State, SigNum, Triggers, Inst, pTBI, &handled);
+               if (handled)
+                       break;
+       }
+
+       spin_unlock(&kick_handlers_lock);
+
+       WARN_ON(!handled);
+
+       return tail_end(ret);
+}
diff --git a/arch/metag/kernel/machines.c b/arch/metag/kernel/machines.c
new file mode 100644 (file)
index 0000000..1edf6ba
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *  arch/metag/kernel/machines.c
+ *
+ *  Copyright (C) 2012 Imagination Technologies Ltd.
+ *
+ *  Generic Meta Boards.
+ */
+
+#include <linux/init.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+
+static const char *meta_boards_compat[] __initdata = {
+       "img,meta",
+       NULL,
+};
+
+MACHINE_START(META, "Generic Meta")
+       .dt_compat      = meta_boards_compat,
+MACHINE_END
diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c
new file mode 100644 (file)
index 0000000..ec872ef
--- /dev/null
@@ -0,0 +1,49 @@
+#include <linux/export.h>
+
+#include <asm/div64.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/string.h>
+#include <asm/tbx.h>
+
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+#ifdef CONFIG_FLATMEM
+/* needed for the pfn_valid macro */
+EXPORT_SYMBOL(max_pfn);
+EXPORT_SYMBOL(min_low_pfn);
+#endif
+
+/* TBI symbols */
+EXPORT_SYMBOL(__TBI);
+EXPORT_SYMBOL(__TBIFindSeg);
+EXPORT_SYMBOL(__TBIPoll);
+EXPORT_SYMBOL(__TBITimeStamp);
+
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+/* libgcc functions */
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__ashrdi3);
+DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__cmpdi2);
+DECLARE_EXPORT(__ucmpdi2);
+
+/* Maths functions */
+EXPORT_SYMBOL(div_u64);
+EXPORT_SYMBOL(div_s64);
+
+/* String functions */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(mcount_wrapper);
+#endif
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c
new file mode 100644 (file)
index 0000000..986331c
--- /dev/null
@@ -0,0 +1,284 @@
+/*  Kernel module help for Meta.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+*/
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sort.h>
+
+#include <asm/unaligned.h>
+
+/* Count how many different relocations (different symbol, different
+   addend) */
+static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
+{
+       unsigned int i, r_info, r_addend, _count_relocs;
+
+       _count_relocs = 0;
+       r_info = 0;
+       r_addend = 0;
+       for (i = 0; i < num; i++)
+               /* Only count relbranch relocs, others don't need stubs */
+               if (ELF32_R_TYPE(rela[i].r_info) == R_METAG_RELBRANCH &&
+                   (r_info != ELF32_R_SYM(rela[i].r_info) ||
+                    r_addend != rela[i].r_addend)) {
+                       _count_relocs++;
+                       r_info = ELF32_R_SYM(rela[i].r_info);
+                       r_addend = rela[i].r_addend;
+               }
+
+       return _count_relocs;
+}
+
+static int relacmp(const void *_x, const void *_y)
+{
+       const Elf32_Rela *x, *y;
+
+       y = (Elf32_Rela *)_x;
+       x = (Elf32_Rela *)_y;
+
+       /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
+        * make the comparison cheaper/faster. It won't affect the sorting or
+        * the counting algorithms' performance
+        */
+       if (x->r_info < y->r_info)
+               return -1;
+       else if (x->r_info > y->r_info)
+               return 1;
+       else if (x->r_addend < y->r_addend)
+               return -1;
+       else if (x->r_addend > y->r_addend)
+               return 1;
+       else
+               return 0;
+}
+
+static void relaswap(void *_x, void *_y, int size)
+{
+       uint32_t *x, *y, tmp;
+       int i;
+
+       y = (uint32_t *)_x;
+       x = (uint32_t *)_y;
+
+       for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
+               tmp = x[i];
+               x[i] = y[i];
+               y[i] = tmp;
+       }
+}
+
+/* Get the potential trampolines size required of the init and
+   non-init sections */
+static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
+                                 const Elf32_Shdr *sechdrs,
+                                 const char *secstrings,
+                                 int is_init)
+{
+       unsigned long ret = 0;
+       unsigned i;
+
+       /* Everything marked ALLOC (this includes the exported
+          symbols) */
+       for (i = 1; i < hdr->e_shnum; i++) {
+               /* If it's called *.init*, and we're not init, we're
+                  not interested */
+               if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL)
+                   != is_init)
+                       continue;
+
+               /* We don't want to look at debug sections. */
+               if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != NULL)
+                       continue;
+
+               if (sechdrs[i].sh_type == SHT_RELA) {
+                       pr_debug("Found relocations in section %u\n", i);
+                       pr_debug("Ptr: %p.  Number: %u\n",
+                                (void *)hdr + sechdrs[i].sh_offset,
+                                sechdrs[i].sh_size / sizeof(Elf32_Rela));
+
+                       /* Sort the relocation information based on a symbol and
+                        * addend key. This is a stable O(n*log n) complexity
+                        * alogrithm but it will reduce the complexity of
+                        * count_relocs() to linear complexity O(n)
+                        */
+                       sort((void *)hdr + sechdrs[i].sh_offset,
+                            sechdrs[i].sh_size / sizeof(Elf32_Rela),
+                            sizeof(Elf32_Rela), relacmp, relaswap);
+
+                       ret += count_relocs((void *)hdr
+                                            + sechdrs[i].sh_offset,
+                                            sechdrs[i].sh_size
+                                            / sizeof(Elf32_Rela))
+                               * sizeof(struct metag_plt_entry);
+               }
+       }
+
+       return ret;
+}
+
+int module_frob_arch_sections(Elf32_Ehdr *hdr,
+                             Elf32_Shdr *sechdrs,
+                             char *secstrings,
+                             struct module *me)
+{
+       unsigned int i;
+
+       /* Find .plt and .init.plt sections */
+       for (i = 0; i < hdr->e_shnum; i++) {
+               if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0)
+                       me->arch.init_plt_section = i;
+               else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
+                       me->arch.core_plt_section = i;
+       }
+       if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+               pr_err("Module doesn't contain .plt or .init.plt sections.\n");
+               return -ENOEXEC;
+       }
+
+       /* Override their sizes */
+       sechdrs[me->arch.core_plt_section].sh_size
+               = get_plt_size(hdr, sechdrs, secstrings, 0);
+       sechdrs[me->arch.core_plt_section].sh_type = SHT_NOBITS;
+       sechdrs[me->arch.init_plt_section].sh_size
+               = get_plt_size(hdr, sechdrs, secstrings, 1);
+       sechdrs[me->arch.init_plt_section].sh_type = SHT_NOBITS;
+       return 0;
+}
+
+/* Set up a trampoline in the PLT to bounce us to the distant function */
+static uint32_t do_plt_call(void *location, Elf32_Addr val,
+                           Elf32_Shdr *sechdrs, struct module *mod)
+{
+       struct metag_plt_entry *entry;
+       /* Instructions used to do the indirect jump.  */
+       uint32_t tramp[2];
+
+       /* We have to trash a register, so we assume that any control
+          transfer more than 21-bits away must be a function call
+          (so we can use a call-clobbered register).  */
+
+       /* MOVT D0Re0,#HI(v) */
+       tramp[0] = 0x02000005 | (((val & 0xffff0000) >> 16) << 3);
+       /* JUMP D0Re0,#LO(v) */
+       tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3);
+
+       /* Init, or core PLT? */
+       if (location >= mod->module_core
+           && location < mod->module_core + mod->core_size)
+               entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+       else
+               entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
+
+       /* Find this entry, or if that fails, the next avail. entry */
+       while (entry->tramp[0])
+               if (entry->tramp[0] == tramp[0] && entry->tramp[1] == tramp[1])
+                       return (uint32_t)entry;
+               else
+                       entry++;
+
+       entry->tramp[0] = tramp[0];
+       entry->tramp[1] = tramp[1];
+
+       return (uint32_t)entry;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+                  const char *strtab,
+                  unsigned int symindex,
+                  unsigned int relsec,
+                  struct module *me)
+{
+       unsigned int i;
+       Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+       Elf32_Sym *sym;
+       Elf32_Addr relocation;
+       uint32_t *location;
+       int32_t value;
+
+       pr_debug("Applying relocate section %u to %u\n", relsec,
+                sechdrs[relsec].sh_info);
+       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+               /* This is where to make the change */
+               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+                       + rel[i].r_offset;
+               /* This is the symbol it is referring to.  Note that all
+                  undefined symbols have been resolved.  */
+               sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+                       + ELF32_R_SYM(rel[i].r_info);
+               relocation = sym->st_value + rel[i].r_addend;
+
+               switch (ELF32_R_TYPE(rel[i].r_info)) {
+               case R_METAG_NONE:
+                       break;
+               case R_METAG_HIADDR16:
+                       relocation >>= 16;
+               case R_METAG_LOADDR16:
+                       *location = (*location & 0xfff80007) |
+                               ((relocation & 0xffff) << 3);
+                       break;
+               case R_METAG_ADDR32:
+                       /*
+                        * Packed data structures may cause a misaligned
+                        * R_METAG_ADDR32 to be emitted.
+                        */
+                       put_unaligned(relocation, location);
+                       break;
+               case R_METAG_GETSETOFF:
+                       *location += ((relocation & 0xfff) << 7);
+                       break;
+               case R_METAG_RELBRANCH:
+                       if (*location & (0x7ffff << 5)) {
+                               pr_err("bad relbranch relocation\n");
+                               break;
+                       }
+
+                       /* This jump is too big for the offset slot. Build
+                        * a PLT to jump through to get to where we want to go.
+                        * NB: 21bit check - not scaled to 19bit yet
+                        */
+                       if (((int32_t)(relocation -
+                                      (uint32_t)location) > 0xfffff) ||
+                           ((int32_t)(relocation -
+                                      (uint32_t)location) < -0xfffff)) {
+                               relocation = do_plt_call(location, relocation,
+                                                        sechdrs, me);
+                       }
+
+                       value = relocation - (uint32_t)location;
+
+                       /* branch instruction aligned */
+                       value /= 4;
+
+                       if ((value > 0x7ffff) || (value < -0x7ffff)) {
+                               /*
+                                * this should have been caught by the code
+                                * above!
+                                */
+                               pr_err("overflow of relbranch reloc\n");
+                       }
+
+                       *location = (*location & (~(0x7ffff << 5))) |
+                               ((value & 0x7ffff) << 5);
+                       break;
+
+               default:
+                       pr_err("module %s: Unknown relocation: %u\n",
+                              me->name, ELF32_R_TYPE(rel[i].r_info));
+                       return -ENOEXEC;
+               }
+       }
+       return 0;
+}
diff --git a/arch/metag/kernel/perf/Makefile b/arch/metag/kernel/perf/Makefile
new file mode 100644 (file)
index 0000000..b158cb2
--- /dev/null
@@ -0,0 +1,3 @@
+# Makefile for performance event core
+
+obj-y += perf_event.o
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
new file mode 100644 (file)
index 0000000..a876d5f
--- /dev/null
@@ -0,0 +1,861 @@
+/*
+ * Meta performance counter support.
+ *  Copyright (C) 2012 Imagination Technologies Ltd
+ *
+ * This code is based on the sh pmu code:
+ *  Copyright (C) 2009 Paul Mundt
+ *
+ * and on the arm pmu code:
+ *  Copyright (C) 2009 picoChip Designs, Ltd., James Iles
+ *  Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqchip/metag.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+#include <asm/core_reg.h>
+#include <asm/hwthread.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "perf_event.h"
+
+static int _hw_perf_event_init(struct perf_event *);
+static void _hw_perf_event_destroy(struct perf_event *);
+
+/* Determines which core type we are */
+static struct metag_pmu *metag_pmu __read_mostly;
+
+/* Processor specific data */
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+/* PMU admin */
+const char *perf_pmu_name(void)
+{
+       if (metag_pmu)
+               return metag_pmu->pmu.name;
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+       if (metag_pmu)
+               return metag_pmu->max_events;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static inline int metag_pmu_initialised(void)
+{
+       return !!metag_pmu;
+}
+
+static void release_pmu_hardware(void)
+{
+       int irq;
+       unsigned int version = (metag_pmu->version &
+                       (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
+                       METAC_ID_REV_S;
+
+       /* Early cores don't have overflow interrupts */
+       if (version < 0x0104)
+               return;
+
+       irq = internal_irq_map(17);
+       if (irq >= 0)
+               free_irq(irq, (void *)1);
+
+       irq = internal_irq_map(16);
+       if (irq >= 0)
+               free_irq(irq, (void *)0);
+}
+
+static int reserve_pmu_hardware(void)
+{
+       int err = 0, irq[2];
+       unsigned int version = (metag_pmu->version &
+                       (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
+                       METAC_ID_REV_S;
+
+       /* Early cores don't have overflow interrupts */
+       if (version < 0x0104)
+               goto out;
+
+       /*
+        * Bit 16 on HWSTATMETA is the interrupt for performance counter 0;
+        * similarly, 17 is the interrupt for performance counter 1.
+        * We can't (yet) interrupt on the cycle counter, because it's a
+        * register, however it holds a 32-bit value as opposed to 24-bit.
+        */
+       irq[0] = internal_irq_map(16);
+       if (irq[0] < 0) {
+               pr_err("unable to map internal IRQ %d\n", 16);
+               goto out;
+       }
+       err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING,
+                       "metagpmu0", (void *)0);
+       if (err) {
+               pr_err("unable to request IRQ%d for metag PMU counters\n",
+                               irq[0]);
+               goto out;
+       }
+
+       irq[1] = internal_irq_map(17);
+       if (irq[1] < 0) {
+               pr_err("unable to map internal IRQ %d\n", 17);
+               goto out_irq1;
+       }
+       err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING,
+                       "metagpmu1", (void *)1);
+       if (err) {
+               pr_err("unable to request IRQ%d for metag PMU counters\n",
+                               irq[1]);
+               goto out_irq1;
+       }
+
+       return 0;
+
+out_irq1:
+       free_irq(irq[0], (void *)0);
+out:
+       return err;
+}
+
+/* PMU operations */
+static void metag_pmu_enable(struct pmu *pmu)
+{
+}
+
+static void metag_pmu_disable(struct pmu *pmu)
+{
+}
+
+static int metag_pmu_event_init(struct perf_event *event)
+{
+       int err = 0;
+       atomic_t *active_events = &metag_pmu->active_events;
+
+       if (!metag_pmu_initialised()) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       event->destroy = _hw_perf_event_destroy;
+
+       if (!atomic_inc_not_zero(active_events)) {
+               mutex_lock(&metag_pmu->reserve_mutex);
+               if (atomic_read(active_events) == 0)
+                       err = reserve_pmu_hardware();
+
+               if (!err)
+                       atomic_inc(active_events);
+
+               mutex_unlock(&metag_pmu->reserve_mutex);
+       }
+
+       /* Hardware and caches counters */
+       switch (event->attr.type) {
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               err = _hw_perf_event_init(event);
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
+       if (err)
+               event->destroy(event);
+
+out:
+       return err;
+}
+
+void metag_pmu_event_update(struct perf_event *event,
+               struct hw_perf_event *hwc, int idx)
+{
+       u64 prev_raw_count, new_raw_count;
+       s64 delta;
+
+       /*
+        * If this counter is chained, it may be that the previous counter
+        * value has been changed beneath us.
+        *
+        * To get around this, we read and exchange the new raw count, then
+        * add the delta (new - prev) to the generic counter atomically.
+        *
+        * Without interrupts, this is the simplest approach.
+        */
+again:
+       prev_raw_count = local64_read(&hwc->prev_count);
+       new_raw_count = metag_pmu->read(idx);
+
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+                       new_raw_count) != prev_raw_count)
+               goto again;
+
+       /*
+        * Calculate the delta and add it to the counter.
+        */
+       delta = new_raw_count - prev_raw_count;
+
+       local64_add(delta, &event->count);
+}
+
+int metag_pmu_event_set_period(struct perf_event *event,
+               struct hw_perf_event *hwc, int idx)
+{
+       s64 left = local64_read(&hwc->period_left);
+       s64 period = hwc->sample_period;
+       int ret = 0;
+
+       if (unlikely(left <= -period)) {
+               left = period;
+               local64_set(&hwc->period_left, left);
+               hwc->last_period = period;
+               ret = 1;
+       }
+
+       if (unlikely(left <= 0)) {
+               left += period;
+               local64_set(&hwc->period_left, left);
+               hwc->last_period = period;
+               ret = 1;
+       }
+
+       if (left > (s64)metag_pmu->max_period)
+               left = metag_pmu->max_period;
+
+       if (metag_pmu->write)
+               metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD);
+
+       perf_event_update_userpage(event);
+
+       return ret;
+}
+
+static void metag_pmu_start(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       /*
+        * We always have to reprogram the period, so ignore PERF_EF_RELOAD.
+        */
+       if (flags & PERF_EF_RELOAD)
+               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+       hwc->state = 0;
+
+       /*
+        * Reset the period.
+        * Some counters can't be stopped (i.e. are core global), so when the
+        * counter was 'stopped' we merely disabled the IRQ. If we don't reset
+        * the period, then we'll either: a) get an overflow too soon;
+        * or b) too late if the overflow happened since disabling.
+        * Obviously, this has little bearing on cores without the overflow
+        * interrupt, as the performance counter resets to zero on write
+        * anyway.
+        */
+       if (metag_pmu->max_period)
+               metag_pmu_event_set_period(event, hwc, hwc->idx);
+       cpuc->events[idx] = event;
+       metag_pmu->enable(hwc, idx);
+}
+
+static void metag_pmu_stop(struct perf_event *event, int flags)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       /*
+        * We should always update the counter on stop; see comment above
+        * why.
+        */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               metag_pmu_event_update(event, hwc, hwc->idx);
+               metag_pmu->disable(hwc, hwc->idx);
+               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       }
+}
+
+static int metag_pmu_add(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = 0, ret = 0;
+
+       perf_pmu_disable(event->pmu);
+
+       /* check whether we're counting instructions */
+       if (hwc->config == 0x100) {
+               if (__test_and_set_bit(METAG_INST_COUNTER,
+                               cpuc->used_mask)) {
+                       ret = -EAGAIN;
+                       goto out;
+               }
+               idx = METAG_INST_COUNTER;
+       } else {
+               /* Check whether we have a spare counter */
+               idx = find_first_zero_bit(cpuc->used_mask,
+                               atomic_read(&metag_pmu->active_events));
+               if (idx >= METAG_INST_COUNTER) {
+                       ret = -EAGAIN;
+                       goto out;
+               }
+
+               __set_bit(idx, cpuc->used_mask);
+       }
+       hwc->idx = idx;
+
+       /* Make sure the counter is disabled */
+       metag_pmu->disable(hwc, idx);
+
+       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+       if (flags & PERF_EF_START)
+               metag_pmu_start(event, PERF_EF_RELOAD);
+
+       perf_event_update_userpage(event);
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
+}
+
+static void metag_pmu_del(struct perf_event *event, int flags)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       WARN_ON(idx < 0);
+       metag_pmu_stop(event, PERF_EF_UPDATE);
+       cpuc->events[idx] = NULL;
+       __clear_bit(idx, cpuc->used_mask);
+
+       perf_event_update_userpage(event);
+}
+
+static void metag_pmu_read(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       /* Don't read disabled counters! */
+       if (hwc->idx < 0)
+               return;
+
+       metag_pmu_event_update(event, hwc, hwc->idx);
+}
+
+static struct pmu pmu = {
+       .pmu_enable     = metag_pmu_enable,
+       .pmu_disable    = metag_pmu_disable,
+
+       .event_init     = metag_pmu_event_init,
+
+       .add            = metag_pmu_add,
+       .del            = metag_pmu_del,
+       .start          = metag_pmu_start,
+       .stop           = metag_pmu_stop,
+       .read           = metag_pmu_read,
+};
+
+/* Core counter specific functions */
+static const int metag_general_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES] = 0x03,
+       [PERF_COUNT_HW_INSTRUCTIONS] = 0x100,
+       [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
+       [PERF_COUNT_HW_CACHE_MISSES] = -1,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
+       [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+       [PERF_COUNT_HW_BUS_CYCLES] = -1,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1,
+       [PERF_COUNT_HW_REF_CPU_CYCLES] = -1,
+};
+
+static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0x08,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0x09,
+                       [C(RESULT_MISS)] = 0x0a,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0xd0,
+                       [C(RESULT_MISS)] = 0xd2,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = 0xd4,
+                       [C(RESULT_MISS)] = 0xd5,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0xd1,
+                       [C(RESULT_MISS)] = 0xd3,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+               },
+       },
+};
+
+
+static void _hw_perf_event_destroy(struct perf_event *event)
+{
+       atomic_t *active_events = &metag_pmu->active_events;
+       struct mutex *pmu_mutex = &metag_pmu->reserve_mutex;
+
+       if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) {
+               release_pmu_hardware();
+               mutex_unlock(pmu_mutex);
+       }
+}
+
+static int _hw_perf_cache_event(int config, int *evp)
+{
+       unsigned long type, op, result;
+       int ev;
+
+       if (!metag_pmu->cache_events)
+               return -EINVAL;
+
+       /* Unpack config */
+       type = config & 0xff;
+       op = (config >> 8) & 0xff;
+       result = (config >> 16) & 0xff;
+
+       if (type >= PERF_COUNT_HW_CACHE_MAX ||
+                       op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+                       result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return -EINVAL;
+
+       ev = (*metag_pmu->cache_events)[type][op][result];
+       if (ev == 0)
+               return -EOPNOTSUPP;
+       if (ev == -1)
+               return -EINVAL;
+       *evp = ev;
+       return 0;
+}
+
+static int _hw_perf_event_init(struct perf_event *event)
+{
+       struct perf_event_attr *attr = &event->attr;
+       struct hw_perf_event *hwc = &event->hw;
+       int mapping = 0, err;
+
+       switch (attr->type) {
+       case PERF_TYPE_HARDWARE:
+               if (attr->config >= PERF_COUNT_HW_MAX)
+                       return -EINVAL;
+
+               mapping = metag_pmu->event_map(attr->config);
+               break;
+
+       case PERF_TYPE_HW_CACHE:
+               err = _hw_perf_cache_event(attr->config, &mapping);
+               if (err)
+                       return err;
+               break;
+       }
+
+       /* Return early if the event is unsupported */
+       if (mapping == -1)
+               return -EINVAL;
+
+       /*
+        * Early cores have "limited" counters - they have no overflow
+        * interrupts - and so are unable to do sampling without extra work
+        * and timer assistance.
+        */
+       if (metag_pmu->max_period == 0) {
+               if (hwc->sample_period)
+                       return -EINVAL;
+       }
+
+       /*
+        * Don't assign an index until the event is placed into the hardware.
+        * -1 signifies that we're still deciding where to put it. On SMP
+        * systems each core has its own set of counters, so we can't do any
+        * constraint checking yet.
+        */
+       hwc->idx = -1;
+
+       /* Store the event encoding */
+       hwc->config |= (unsigned long)mapping;
+
+       /*
+        * For non-sampling runs, limit the sample_period to half of the
+        * counter width. This way, the new counter value should be less
+        * likely to overtake the previous one (unless there are IRQ latency
+        * issues...)
+        */
+       if (metag_pmu->max_period) {
+               if (!hwc->sample_period) {
+                       hwc->sample_period = metag_pmu->max_period >> 1;
+                       hwc->last_period = hwc->sample_period;
+                       local64_set(&hwc->period_left, hwc->sample_period);
+               }
+       }
+
+       return 0;
+}
+
+static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
+{
+       struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+       unsigned int config = event->config;
+       unsigned int tmp = config & 0xf0;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /*
+        * Check if we're enabling the instruction counter (index of
+        * MAX_HWEVENTS - 1)
+        */
+       if (METAG_INST_COUNTER == idx) {
+               WARN_ONCE((config != 0x100),
+                       "invalid configuration (%d) for counter (%d)\n",
+                       config, idx);
+
+               /* Reset the cycle count */
+               __core_reg_set(TXTACTCYC, 0);
+               goto unlock;
+       }
+
+       /* Check for a core internal or performance channel event. */
+       if (tmp) {
+               void *perf_addr = (void *)PERF_COUNT(idx);
+
+               /*
+                * Anything other than a cycle count will write the low-
+                * nibble to the correct counter register.
+                */
+               switch (tmp) {
+               case 0xd0:
+                       perf_addr = (void *)PERF_ICORE(idx);
+                       break;
+
+               case 0xf0:
+                       perf_addr = (void *)PERF_CHAN(idx);
+                       break;
+               }
+
+               metag_out32((tmp & 0x0f), perf_addr);
+
+               /*
+                * Now we use the high nibble as the performance event to
+                * to count.
+                */
+               config = tmp >> 4;
+       }
+
+       /*
+        * Enabled counters start from 0. Early cores clear the count on
+        * write but newer cores don't, so we make sure that the count is
+        * set to 0.
+        */
+       tmp = ((config & 0xf) << 28) |
+                       ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
+       metag_out32(tmp, PERF_COUNT(idx));
+unlock:
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
+{
+       struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+       unsigned int tmp = 0;
+       unsigned long flags;
+
+       /*
+        * The cycle counter can't be disabled per se, as it's a hardware
+        * thread register which is always counting. We merely return if this
+        * is the counter we're attempting to disable.
+        */
+       if (METAG_INST_COUNTER == idx)
+               return;
+
+       /*
+        * The counter value _should_ have been read prior to disabling,
+        * as if we're running on an early core then the value gets reset to
+        * 0, and any read after that would be useless. On the newer cores,
+        * however, it's better to read-modify-update this for purposes of
+        * the overflow interrupt.
+        * Here we remove the thread id AND the event nibble (there are at
+        * least two events that count events that are core global and ignore
+        * the thread id mask). This only works because we don't mix thread
+        * performance counts, and event 0x00 requires a thread id mask!
+        */
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       tmp = metag_in32(PERF_COUNT(idx));
+       tmp &= 0x00ffffff;
+       metag_out32(tmp, PERF_COUNT(idx));
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u64 metag_pmu_read_counter(int idx)
+{
+       u32 tmp = 0;
+
+       /* The act of reading the cycle counter also clears it */
+       if (METAG_INST_COUNTER == idx) {
+               __core_reg_swap(TXTACTCYC, tmp);
+               goto out;
+       }
+
+       tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
+out:
+       return tmp;
+}
+
+static void metag_pmu_write_counter(int idx, u32 val)
+{
+       struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+       u32 tmp = 0;
+       unsigned long flags;
+
+       /*
+        * This _shouldn't_ happen, but if it does, then we can just
+        * ignore the write, as the register is read-only and clear-on-write.
+        */
+       if (METAG_INST_COUNTER == idx)
+               return;
+
+       /*
+        * We'll keep the thread mask and event id, and just update the
+        * counter itself. Also , we should bound the value to 24-bits.
+        */
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       val &= 0x00ffffff;
+       tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000;
+       val |= tmp;
+       metag_out32(val, PERF_COUNT(idx));
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int metag_pmu_event_map(int idx)
+{
+       return metag_general_events[idx];
+}
+
+static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
+{
+       int idx = (int)dev;
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+       struct perf_event *event = cpuhw->events[idx];
+       struct hw_perf_event *hwc = &event->hw;
+       struct pt_regs *regs = get_irq_regs();
+       struct perf_sample_data sampledata;
+       unsigned long flags;
+       u32 counter = 0;
+
+       /*
+        * We need to stop the core temporarily from generating another
+        * interrupt while we disable this counter. However, we don't want
+        * to flag the counter as free
+        */
+       __global_lock2(flags);
+       counter = metag_in32(PERF_COUNT(idx));
+       metag_out32((counter & 0x00ffffff), PERF_COUNT(idx));
+       __global_unlock2(flags);
+
+       /* Update the counts and reset the sample period */
+       metag_pmu_event_update(event, hwc, idx);
+       perf_sample_data_init(&sampledata, 0, hwc->last_period);
+       metag_pmu_event_set_period(event, hwc, idx);
+
+       /*
+        * Enable the counter again once core overflow processing has
+        * completed.
+        */
+       if (!perf_event_overflow(event, &sampledata, regs))
+               metag_out32(counter, PERF_COUNT(idx));
+
+       return IRQ_HANDLED;
+}
+
+static struct metag_pmu _metag_pmu = {
+       .handle_irq     = metag_pmu_counter_overflow,
+       .enable         = metag_pmu_enable_counter,
+       .disable        = metag_pmu_disable_counter,
+       .read           = metag_pmu_read_counter,
+       .write          = metag_pmu_write_counter,
+       .event_map      = metag_pmu_event_map,
+       .cache_events   = &metag_pmu_cache_events,
+       .max_period     = MAX_PERIOD,
+       .max_events     = MAX_HWEVENTS,
+};
+
+/* PMU CPU hotplug notifier */
+static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
+               unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned int)hcpu;
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+       if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+               return NOTIFY_DONE;
+
+       memset(cpuc, 0, sizeof(struct cpu_hw_events));
+       raw_spin_lock_init(&cpuc->pmu_lock);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata metag_pmu_notifier = {
+       .notifier_call = metag_pmu_cpu_notify,
+};
+
+/* PMU Initialisation */
+static int __init init_hw_perf_events(void)
+{
+       int ret = 0, cpu;
+       u32 version = *(u32 *)METAC_ID;
+       int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S;
+       int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS))
+                       >> METAC_ID_REV_S;
+
+       /* Not a Meta 2 core, then not supported */
+       if (0x02 > major) {
+               pr_info("no hardware counter support available\n");
+               goto out;
+       } else if (0x02 == major) {
+               metag_pmu = &_metag_pmu;
+
+               if (min_rev < 0x0104) {
+                       /*
+                        * A core without overflow interrupts, and clear-on-
+                        * write counters.
+                        */
+                       metag_pmu->handle_irq = NULL;
+                       metag_pmu->write = NULL;
+                       metag_pmu->max_period = 0;
+               }
+
+               metag_pmu->name = "Meta 2";
+               metag_pmu->version = version;
+               metag_pmu->pmu = pmu;
+       }
+
+       pr_info("enabled with %s PMU driver, %d counters available\n",
+                       metag_pmu->name, metag_pmu->max_events);
+
+       /* Initialise the active events and reservation mutex */
+       atomic_set(&metag_pmu->active_events, 0);
+       mutex_init(&metag_pmu->reserve_mutex);
+
+       /* Clear the counters */
+       metag_out32(0, PERF_COUNT(0));
+       metag_out32(0, PERF_COUNT(1));
+
+       for_each_possible_cpu(cpu) {
+               struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+               memset(cpuc, 0, sizeof(struct cpu_hw_events));
+               raw_spin_lock_init(&cpuc->pmu_lock);
+       }
+
+       register_cpu_notifier(&metag_pmu_notifier);
+       ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW);
+out:
+       return ret;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/metag/kernel/perf/perf_event.h b/arch/metag/kernel/perf/perf_event.h
new file mode 100644 (file)
index 0000000..fd10a13
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Meta performance counter support.
+ *  Copyright (C) 2012 Imagination Technologies Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef METAG_PERF_EVENT_H_
+#define METAG_PERF_EVENT_H_
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+/* For performance counter definitions */
+#include <asm/metag_mem.h>
+
+/*
+ * The Meta core has two performance counters, with 24-bit resolution. Newer
+ * cores generate an overflow interrupt on transition from 0xffffff to 0.
+ *
+ * Each counter consists of the counter id, hardware thread id, and the count
+ * itself; each counter can be assigned to multiple hardware threads at any
+ * one time, with the returned count being an aggregate of events. A small
+ * number of events are thread global, i.e. they count the aggregate of all
+ * threads' events, regardless of the thread selected.
+ *
+ * Newer cores can store an arbitrary 24-bit number in the counter, whereas
+ * older cores will clear the counter bits on write.
+ *
+ * We also have a pseudo-counter in the form of the thread active cycles
+ * counter (which, incidentally, is also bound to
+ */
+
+#define MAX_HWEVENTS           3
+#define MAX_PERIOD             ((1UL << 24) - 1)
+#define METAG_INST_COUNTER     (MAX_HWEVENTS - 1)
+
+/**
+ * struct cpu_hw_events - a processor core's performance events
+ * @events:    an array of perf_events active for a given index.
+ * @used_mask: a bitmap of in-use counters.
+ * @pmu_lock:  a perf counter lock
+ *
+ * This is a per-cpu/core structure that maintains a record of its
+ * performance counters' state.
+ */
+struct cpu_hw_events {
+       struct perf_event       *events[MAX_HWEVENTS];
+       unsigned long           used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+       raw_spinlock_t          pmu_lock;
+};
+
+/**
+ * struct metag_pmu - the Meta PMU structure
+ * @pmu:               core pmu structure
+ * @name:              pmu name
+ * @version:           core version
+ * @handle_irq:                overflow interrupt handler
+ * @enable:            enable a counter
+ * @disable:           disable a counter
+ * @read:              read the value of a counter
+ * @write:             write a value to a counter
+ * @event_map:         kernel event to counter event id map
+ * @cache_events:      kernel cache counter to core cache counter map
+ * @max_period:                maximum value of the counter before overflow
+ * @max_events:                maximum number of counters available at any one time
+ * @active_events:     number of active counters
+ * @reserve_mutex:     counter reservation mutex
+ *
+ * This describes the main functionality and data used by the performance
+ * event core.
+ */
+struct metag_pmu {
+       struct pmu      pmu;
+       const char      *name;
+       u32             version;
+       irqreturn_t     (*handle_irq)(int irq_num, void *dev);
+       void            (*enable)(struct hw_perf_event *evt, int idx);
+       void            (*disable)(struct hw_perf_event *evt, int idx);
+       u64             (*read)(int idx);
+       void            (*write)(int idx, u32 val);
+       int             (*event_map)(int idx);
+       const int       (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX];
+       u32             max_period;
+       int             max_events;
+       atomic_t        active_events;
+       struct mutex    reserve_mutex;
+};
+
+/* Convenience macros for accessing the perf counters */
+/* Define some convenience accessors */
+#define PERF_COUNT(x)  (PERF_COUNT0 + (sizeof(u64) * (x)))
+#define PERF_ICORE(x)  (PERF_ICORE0 + (sizeof(u64) * (x)))
+#define PERF_CHAN(x)   (PERF_CHAN0 + (sizeof(u64) * (x)))
+
+/* Cache index macros */
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_OP_UNSUPPORTED   0xfffe
+#define CACHE_OP_NONSENSE      0xffff
+
+#endif
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
new file mode 100644 (file)
index 0000000..3156334
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Perf callchain handling code.
+ *
+ *   Based on the ARM perf implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static bool is_valid_call(unsigned long calladdr)
+{
+       unsigned int callinsn;
+
+       /* Check the possible return address is aligned. */
+       if (!(calladdr & 0x3)) {
+               if (!get_user(callinsn, (unsigned int *)calladdr)) {
+                       /* Check for CALLR or SWAP PC,D1RtP. */
+                       if ((callinsn & 0xff000000) == 0xab000000 ||
+                           callinsn == 0xa3200aa0)
+                               return true;
+               }
+       }
+       return false;
+}
+
+static struct metag_frame __user *
+user_backtrace(struct metag_frame __user *user_frame,
+              struct perf_callchain_entry *entry)
+{
+       struct metag_frame frame;
+       unsigned long calladdr;
+
+       /* We cannot rely on having frame pointers in user code. */
+       while (1) {
+               /* Also check accessibility of one struct frame beyond */
+               if (!access_ok(VERIFY_READ, user_frame, sizeof(frame)))
+                       return 0;
+               if (__copy_from_user_inatomic(&frame, user_frame,
+                                             sizeof(frame)))
+                       return 0;
+
+               --user_frame;
+
+               calladdr = frame.lr - 4;
+               if (is_valid_call(calladdr)) {
+                       perf_callchain_store(entry, calladdr);
+                       return user_frame;
+               }
+       }
+
+       return 0;
+}
+
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+       unsigned long sp = regs->ctx.AX[0].U0;
+       struct metag_frame __user *frame;
+
+       frame = (struct metag_frame __user *)sp;
+
+       --frame;
+
+       while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame)
+               frame = user_backtrace(frame, entry);
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int
+callchain_trace(struct stackframe *fr,
+               void *data)
+{
+       struct perf_callchain_entry *entry = data;
+       perf_callchain_store(entry, fr->pc);
+       return 0;
+}
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+       struct stackframe fr;
+
+       fr.fp = regs->ctx.AX[1].U0;
+       fr.sp = regs->ctx.AX[0].U0;
+       fr.lr = regs->ctx.DX[4].U1;
+       fr.pc = regs->ctx.CurrPC;
+       walk_stackframe(&fr, callchain_trace, entry);
+}
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
new file mode 100644 (file)
index 0000000..c6efe62
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies
+ *
+ * This file contains the architecture-dependent parts of process handling.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/reboot.h>
+#include <linux/elfcore.h>
+#include <linux/fs.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <linux/pm.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <asm/core_reg.h>
+#include <asm/user_gateway.h>
+#include <asm/tcm.h>
+#include <asm/traps.h>
+#include <asm/switch_to.h>
+
+/*
+ * Wait for the next interrupt and enable local interrupts
+ */
+static inline void arch_idle(void)
+{
+       int tmp;
+
+       /*
+        * Quickly jump straight into the interrupt entry point without actually
+        * triggering an interrupt. When TXSTATI gets read the processor will
+        * block until an interrupt is triggered.
+        */
+       asm volatile (/* Switch into ISTAT mode */
+                     "RTH\n\t"
+                     /* Enable local interrupts */
+                     "MOV      TXMASKI, %1\n\t"
+                     /*
+                      * We can't directly "SWAP PC, PCX", so we swap via a
+                      * temporary. Essentially we do:
+                      *  PCX_new = 1f (the place to continue execution)
+                      *  PC = PCX_old
+                      */
+                     "ADD      %0, CPC0, #(1f-.)\n\t"
+                     "SWAP     PCX, %0\n\t"
+                     "MOV      PC, %0\n"
+                     /* Continue execution here with interrupts enabled */
+                     "1:"
+                     : "=a" (tmp)
+                     : "r" (get_trigger_mask()));
+}
+
+void cpu_idle(void)
+{
+       set_thread_flag(TIF_POLLING_NRFLAG);
+
+       while (1) {
+               tick_nohz_idle_enter();
+               rcu_idle_enter();
+
+               while (!need_resched()) {
+                       /*
+                        * We need to disable interrupts here to ensure we don't
+                        * miss a wakeup call.
+                        */
+                       local_irq_disable();
+                       if (!need_resched()) {
+#ifdef CONFIG_HOTPLUG_CPU
+                               if (cpu_is_offline(smp_processor_id()))
+                                       cpu_die();
+#endif
+                               arch_idle();
+                       } else {
+                               local_irq_enable();
+                       }
+               }
+
+               rcu_idle_exit();
+               tick_nohz_idle_exit();
+               schedule_preempt_disabled();
+        }
+}
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void (*soc_restart)(char *cmd);
+void (*soc_halt)(void);
+
+void machine_restart(char *cmd)
+{
+       if (soc_restart)
+               soc_restart(cmd);
+       hard_processor_halt(HALT_OK);
+}
+
+void machine_halt(void)
+{
+       if (soc_halt)
+               soc_halt();
+       smp_send_stop();
+       hard_processor_halt(HALT_OK);
+}
+
+void machine_power_off(void)
+{
+       if (pm_power_off)
+               pm_power_off();
+       smp_send_stop();
+       hard_processor_halt(HALT_OK);
+}
+
+#define FLAG_Z 0x8
+#define FLAG_N 0x4
+#define FLAG_O 0x2
+#define FLAG_C 0x1
+
+void show_regs(struct pt_regs *regs)
+{
+       int i;
+       const char *AX0_names[] = {"A0StP", "A0FrP"};
+       const char *AX1_names[] = {"A1GbP", "A1LbP"};
+
+       const char *DX0_names[] = {
+               "D0Re0",
+               "D0Ar6",
+               "D0Ar4",
+               "D0Ar2",
+               "D0FrT",
+               "D0.5 ",
+               "D0.6 ",
+               "D0.7 "
+       };
+
+       const char *DX1_names[] = {
+               "D1Re0",
+               "D1Ar5",
+               "D1Ar3",
+               "D1Ar1",
+               "D1RtP",
+               "D1.5 ",
+               "D1.6 ",
+               "D1.7 "
+       };
+
+       pr_info(" pt_regs @ %p\n", regs);
+       pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
+       pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
+               regs->ctx.Flags & FLAG_Z ? 'Z' : 'z',
+               regs->ctx.Flags & FLAG_N ? 'N' : 'n',
+               regs->ctx.Flags & FLAG_O ? 'O' : 'o',
+               regs->ctx.Flags & FLAG_C ? 'C' : 'c');
+       pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT);
+       pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC);
+
+       /* AX regs */
+       for (i = 0; i < 2; i++) {
+               pr_info(" %s = 0x%08x    ",
+                       AX0_names[i],
+                       regs->ctx.AX[i].U0);
+               printk(" %s = 0x%08x\n",
+                       AX1_names[i],
+                       regs->ctx.AX[i].U1);
+       }
+
+       if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
+               pr_warn(" Extended state present - AX2.[01] will be WRONG\n");
+
+       /* Special place with AXx.2 */
+       pr_info(" A0.2  = 0x%08x    ",
+               regs->ctx.Ext.AX2.U0);
+       printk(" A1.2  = 0x%08x\n",
+               regs->ctx.Ext.AX2.U1);
+
+       /* 'extended' AX regs (nominally, just AXx.3) */
+       for (i = 0; i < (TBICTX_AX_REGS - 3); i++) {
+               pr_info(" A0.%d  = 0x%08x    ", i + 3, regs->ctx.AX3[i].U0);
+               printk(" A1.%d  = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1);
+       }
+
+       for (i = 0; i < 8; i++) {
+               pr_info(" %s = 0x%08x    ", DX0_names[i], regs->ctx.DX[i].U0);
+               printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1);
+       }
+
+       show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs);
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+               unsigned long arg, struct task_struct *tsk)
+{
+       struct pt_regs *childregs = task_pt_regs(tsk);
+       void *kernel_context = ((void *) childregs +
+                               sizeof(struct pt_regs));
+       unsigned long global_base;
+
+       BUG_ON(((unsigned long)childregs) & 0x7);
+       BUG_ON(((unsigned long)kernel_context) & 0x7);
+
+       memset(&tsk->thread.kernel_context, 0,
+                       sizeof(tsk->thread.kernel_context));
+
+       tsk->thread.kernel_context = __TBISwitchInit(kernel_context,
+                                                    ret_from_fork,
+                                                    0, 0);
+
+       if (unlikely(tsk->flags & PF_KTHREAD)) {
+               /*
+                * Make sure we don't leak any kernel data to child's regs
+                * if kernel thread becomes a userspace thread in the future
+                */
+               memset(childregs, 0 , sizeof(struct pt_regs));
+
+               global_base = __core_reg_get(A1GbP);
+               childregs->ctx.AX[0].U1 = (unsigned long) global_base;
+               childregs->ctx.AX[0].U0 = (unsigned long) kernel_context;
+               /* Set D1Ar1=arg and D1RtP=usp (fn) */
+               childregs->ctx.DX[4].U1 = usp;
+               childregs->ctx.DX[3].U1 = arg;
+               tsk->thread.int_depth = 2;
+               return 0;
+       }
+       /*
+        * Get a pointer to where the new child's register block should have
+        * been pushed.
+        * The Meta's stack grows upwards, and the context is the the first
+        * thing to be pushed by TBX (phew)
+        */
+       *childregs = *current_pt_regs();
+       /* Set the correct stack for the clone mode */
+       if (usp)
+               childregs->ctx.AX[0].U0 = ALIGN(usp, 8);
+       tsk->thread.int_depth = 1;
+
+       /* set return value for child process */
+       childregs->ctx.DX[0].U0 = 0;
+
+       /* The TLS pointer is passed as an argument to sys_clone. */
+       if (clone_flags & CLONE_SETTLS)
+               tsk->thread.tls_ptr =
+                               (__force void __user *)childregs->ctx.DX[1].U1;
+
+#ifdef CONFIG_METAG_FPU
+       if (tsk->thread.fpu_context) {
+               struct meta_fpu_context *ctx;
+
+               ctx = kmemdup(tsk->thread.fpu_context,
+                             sizeof(struct meta_fpu_context), GFP_ATOMIC);
+               tsk->thread.fpu_context = ctx;
+       }
+#endif
+
+#ifdef CONFIG_METAG_DSP
+       if (tsk->thread.dsp_context) {
+               struct meta_ext_context *ctx;
+               int i;
+
+               ctx = kmemdup(tsk->thread.dsp_context,
+                             sizeof(struct meta_ext_context), GFP_ATOMIC);
+               for (i = 0; i < 2; i++)
+                       ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i],
+                                             GFP_ATOMIC);
+               tsk->thread.dsp_context = ctx;
+       }
+#endif
+
+       return 0;
+}
+
+#ifdef CONFIG_METAG_FPU
+static void alloc_fpu_context(struct thread_struct *thread)
+{
+       thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context),
+                                     GFP_ATOMIC);
+}
+
+static void clear_fpu(struct thread_struct *thread)
+{
+       thread->user_flags &= ~TBICTX_FPAC_BIT;
+       kfree(thread->fpu_context);
+       thread->fpu_context = NULL;
+}
+#else
+static void clear_fpu(struct thread_struct *thread)
+{
+}
+#endif
+
+#ifdef CONFIG_METAG_DSP
+static void clear_dsp(struct thread_struct *thread)
+{
+       if (thread->dsp_context) {
+               kfree(thread->dsp_context->ram[0]);
+               kfree(thread->dsp_context->ram[1]);
+
+               kfree(thread->dsp_context);
+
+               thread->dsp_context = NULL;
+       }
+
+       __core_reg_set(D0.8, 0);
+}
+#else
+static void clear_dsp(struct thread_struct *thread)
+{
+}
+#endif
+
+struct task_struct *__sched __switch_to(struct task_struct *prev,
+                                       struct task_struct *next)
+{
+       TBIRES to, from;
+
+       to.Switch.pCtx = next->thread.kernel_context;
+       to.Switch.pPara = prev;
+
+#ifdef CONFIG_METAG_FPU
+       if (prev->thread.user_flags & TBICTX_FPAC_BIT) {
+               struct pt_regs *regs = task_pt_regs(prev);
+               TBIRES state;
+
+               state.Sig.SaveMask = prev->thread.user_flags;
+               state.Sig.pCtx = &regs->ctx;
+
+               if (!prev->thread.fpu_context)
+                       alloc_fpu_context(&prev->thread);
+               if (prev->thread.fpu_context)
+                       __TBICtxFPUSave(state, prev->thread.fpu_context);
+       }
+       /*
+        * Force a restore of the FPU context next time this process is
+        * scheduled.
+        */
+       if (prev->thread.fpu_context)
+               prev->thread.fpu_context->needs_restore = true;
+#endif
+
+
+       from = __TBISwitch(to, &prev->thread.kernel_context);
+
+       /* Restore TLS pointer for this process. */
+       set_gateway_tls(current->thread.tls_ptr);
+
+       return (struct task_struct *) from.Switch.pPara;
+}
+
+void flush_thread(void)
+{
+       clear_fpu(&current->thread);
+       clear_dsp(&current->thread);
+}
+
+/*
+ * Free current thread data structures etc.
+ */
+void exit_thread(void)
+{
+       clear_fpu(&current->thread);
+       clear_dsp(&current->thread);
+}
+
+/* TODO: figure out how to unwind the kernel stack here to figure out
+ * where we went to sleep. */
+unsigned long get_wchan(struct task_struct *p)
+{
+       return 0;
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+       /* Returning 0 indicates that the FPU state was not stored (as it was
+        * not in use) */
+       return 0;
+}
+
+#ifdef CONFIG_METAG_USER_TCM
+
+#define ELF_MIN_ALIGN  PAGE_SIZE
+
+#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
+#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
+#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
+
+#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
+
+unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
+                             struct elf_phdr *eppnt, int prot, int type,
+                             unsigned long total_size)
+{
+       unsigned long map_addr, size;
+       unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr);
+       unsigned long raw_size = eppnt->p_filesz + page_off;
+       unsigned long off = eppnt->p_offset - page_off;
+       unsigned int tcm_tag;
+       addr = ELF_PAGESTART(addr);
+       size = ELF_PAGEALIGN(raw_size);
+
+       /* mmap() will return -EINVAL if given a zero size, but a
+        * segment with zero filesize is perfectly valid */
+       if (!size)
+               return addr;
+
+       tcm_tag = tcm_lookup_tag(addr);
+
+       if (tcm_tag != TCM_INVALID_TAG)
+               type &= ~MAP_FIXED;
+
+       /*
+       * total_size is the size of the ELF (interpreter) image.
+       * The _first_ mmap needs to know the full size, otherwise
+       * randomization might put this image into an overlapping
+       * position with the ELF binary image. (since size < total_size)
+       * So we first map the 'big' image - and unmap the remainder at
+       * the end. (which unmap is needed for ELF images with holes.)
+       */
+       if (total_size) {
+               total_size = ELF_PAGEALIGN(total_size);
+               map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
+               if (!BAD_ADDR(map_addr))
+                       vm_munmap(map_addr+size, total_size-size);
+       } else
+               map_addr = vm_mmap(filep, addr, size, prot, type, off);
+
+       if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
+               struct tcm_allocation *tcm;
+               unsigned long tcm_addr;
+
+               tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+               if (!tcm)
+                       return -ENOMEM;
+
+               tcm_addr = tcm_alloc(tcm_tag, raw_size);
+               if (tcm_addr != addr) {
+                       kfree(tcm);
+                       return -ENOMEM;
+               }
+
+               tcm->tag = tcm_tag;
+               tcm->addr = tcm_addr;
+               tcm->size = raw_size;
+
+               list_add(&tcm->list, &current->mm->context.tcm);
+
+               eppnt->p_vaddr = map_addr;
+               if (copy_from_user((void *) addr, (void __user *) map_addr,
+                                  raw_size))
+                       return -EFAULT;
+       }
+
+       return map_addr;
+}
+#endif
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
new file mode 100644 (file)
index 0000000..47a8828
--- /dev/null
@@ -0,0 +1,380 @@
+/*
+ *  Copyright (C) 2005-2012 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/uaccess.h>
+#include <trace/syscall.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * user_regset definitions.
+ */
+
+int metag_gp_regs_copyout(const struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         void *kbuf, void __user *ubuf)
+{
+       const void *ptr;
+       unsigned long data;
+       int ret;
+
+       /* D{0-1}.{0-7} */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 regs->ctx.DX, 0, 4*16);
+       if (ret)
+               goto out;
+       /* A{0-1}.{0-1} */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 regs->ctx.AX, 4*16, 4*20);
+       if (ret)
+               goto out;
+       /* A{0-1}.2 */
+       if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
+               ptr = regs->ctx.Ext.Ctx.pExt;
+       else
+               ptr = &regs->ctx.Ext.AX2;
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 ptr, 4*20, 4*22);
+       if (ret)
+               goto out;
+       /* A{0-1}.3 */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &regs->ctx.AX3, 4*22, 4*24);
+       if (ret)
+               goto out;
+       /* PC */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &regs->ctx.CurrPC, 4*24, 4*25);
+       if (ret)
+               goto out;
+       /* TXSTATUS */
+       data = (unsigned long)regs->ctx.Flags;
+       if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+               data |= USER_GP_REGS_STATUS_CATCH_BIT;
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &data, 4*25, 4*26);
+       if (ret)
+               goto out;
+       /* TXRPT, TXBPOBITS, TXMODE */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &regs->ctx.CurrRPT, 4*26, 4*29);
+       if (ret)
+               goto out;
+       /* Padding */
+       ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                      4*29, 4*30);
+out:
+       return ret;
+}
+
+int metag_gp_regs_copyin(struct pt_regs *regs,
+                        unsigned int pos, unsigned int count,
+                        const void *kbuf, const void __user *ubuf)
+{
+       void *ptr;
+       unsigned long data;
+       int ret;
+
+       /* D{0-1}.{0-7} */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                regs->ctx.DX, 0, 4*16);
+       if (ret)
+               goto out;
+       /* A{0-1}.{0-1} */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                regs->ctx.AX, 4*16, 4*20);
+       if (ret)
+               goto out;
+       /* A{0-1}.2 */
+       if (regs->ctx.SaveMask & TBICTX_XEXT_BIT)
+               ptr = regs->ctx.Ext.Ctx.pExt;
+       else
+               ptr = &regs->ctx.Ext.AX2;
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                ptr, 4*20, 4*22);
+       if (ret)
+               goto out;
+       /* A{0-1}.3 */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->ctx.AX3, 4*22, 4*24);
+       if (ret)
+               goto out;
+       /* PC */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->ctx.CurrPC, 4*24, 4*25);
+       if (ret)
+               goto out;
+       /* TXSTATUS */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &data, 4*25, 4*26);
+       if (ret)
+               goto out;
+       regs->ctx.Flags = data & 0xffff;
+       if (data & USER_GP_REGS_STATUS_CATCH_BIT)
+               regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBUF_BIT;
+       else
+               regs->ctx.SaveMask &= ~TBICTX_CBUF_BIT;
+       /* TXRPT, TXBPOBITS, TXMODE */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->ctx.CurrRPT, 4*26, 4*29);
+out:
+       return ret;
+}
+
+static int metag_gp_regs_get(struct task_struct *target,
+                            const struct user_regset *regset,
+                            unsigned int pos, unsigned int count,
+                            void *kbuf, void __user *ubuf)
+{
+       const struct pt_regs *regs = task_pt_regs(target);
+       return metag_gp_regs_copyout(regs, pos, count, kbuf, ubuf);
+}
+
+static int metag_gp_regs_set(struct task_struct *target,
+                            const struct user_regset *regset,
+                            unsigned int pos, unsigned int count,
+                            const void *kbuf, const void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+       return metag_gp_regs_copyin(regs, pos, count, kbuf, ubuf);
+}
+
+int metag_cb_regs_copyout(const struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         void *kbuf, void __user *ubuf)
+{
+       int ret;
+
+       /* TXCATCH{0-3} */
+       if (regs->ctx.SaveMask & TBICTX_XCBF_BIT)
+               ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                         regs->extcb0, 0, 4*4);
+       else
+               ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                              0, 4*4);
+       return ret;
+}
+
+int metag_cb_regs_copyin(struct pt_regs *regs,
+                        unsigned int pos, unsigned int count,
+                        const void *kbuf, const void __user *ubuf)
+{
+       int ret;
+
+       /* TXCATCH{0-3} */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                regs->extcb0, 0, 4*4);
+       return ret;
+}
+
+static int metag_cb_regs_get(struct task_struct *target,
+                            const struct user_regset *regset,
+                            unsigned int pos, unsigned int count,
+                            void *kbuf, void __user *ubuf)
+{
+       const struct pt_regs *regs = task_pt_regs(target);
+       return metag_cb_regs_copyout(regs, pos, count, kbuf, ubuf);
+}
+
+static int metag_cb_regs_set(struct task_struct *target,
+                            const struct user_regset *regset,
+                            unsigned int pos, unsigned int count,
+                            const void *kbuf, const void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+       return metag_cb_regs_copyin(regs, pos, count, kbuf, ubuf);
+}
+
+int metag_rp_state_copyout(const struct pt_regs *regs,
+                          unsigned int pos, unsigned int count,
+                          void *kbuf, void __user *ubuf)
+{
+       unsigned long mask;
+       u64 *ptr;
+       int ret, i;
+
+       /* Empty read pipeline */
+       if (!(regs->ctx.SaveMask & TBICTX_CBRP_BIT)) {
+               ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                              0, 4*13);
+               goto out;
+       }
+
+       mask = (regs->ctx.CurrDIVTIME & TXDIVTIME_RPMASK_BITS) >>
+               TXDIVTIME_RPMASK_S;
+
+       /* Read pipeline entries */
+       ptr = (void *)&regs->extcb0[1];
+       for (i = 0; i < 6; ++i, ++ptr) {
+               if (mask & (1 << i))
+                       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                                 ptr, 8*i, 8*(i + 1));
+               else
+                       ret = user_regset_copyout_zero(&pos, &count, &kbuf,
+                                                      &ubuf, 8*i, 8*(i + 1));
+               if (ret)
+                       goto out;
+       }
+       /* Mask of entries */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &mask, 4*12, 4*13);
+out:
+       return ret;
+}
+
+int metag_rp_state_copyin(struct pt_regs *regs,
+                         unsigned int pos, unsigned int count,
+                         const void *kbuf, const void __user *ubuf)
+{
+       struct user_rp_state rp;
+       unsigned long long *ptr;
+       int ret, i;
+
+       /* Read the entire pipeline before making any changes */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &rp, 0, 4*13);
+       if (ret)
+               goto out;
+
+       /* Write pipeline entries */
+       ptr = (void *)&regs->extcb0[1];
+       for (i = 0; i < 6; ++i, ++ptr)
+               if (rp.mask & (1 << i))
+                       *ptr = rp.entries[i];
+
+       /* Update RPMask in TXDIVTIME */
+       regs->ctx.CurrDIVTIME &= ~TXDIVTIME_RPMASK_BITS;
+       regs->ctx.CurrDIVTIME |= (rp.mask << TXDIVTIME_RPMASK_S)
+                                & TXDIVTIME_RPMASK_BITS;
+
+       /* Set/clear flags to indicate catch/read pipeline state */
+       if (rp.mask)
+               regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBRP_BIT;
+       else
+               regs->ctx.SaveMask &= ~TBICTX_CBRP_BIT;
+out:
+       return ret;
+}
+
+static int metag_rp_state_get(struct task_struct *target,
+                             const struct user_regset *regset,
+                             unsigned int pos, unsigned int count,
+                             void *kbuf, void __user *ubuf)
+{
+       const struct pt_regs *regs = task_pt_regs(target);
+       return metag_rp_state_copyout(regs, pos, count, kbuf, ubuf);
+}
+
+static int metag_rp_state_set(struct task_struct *target,
+                             const struct user_regset *regset,
+                             unsigned int pos, unsigned int count,
+                             const void *kbuf, const void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+       return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf);
+}
+
+enum metag_regset {
+       REGSET_GENERAL,
+       REGSET_CBUF,
+       REGSET_READPIPE,
+};
+
+static const struct user_regset metag_regsets[] = {
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n = ELF_NGREG,
+               .size = sizeof(long),
+               .align = sizeof(long long),
+               .get = metag_gp_regs_get,
+               .set = metag_gp_regs_set,
+       },
+       [REGSET_CBUF] = {
+               .core_note_type = NT_METAG_CBUF,
+               .n = sizeof(struct user_cb_regs) / sizeof(long),
+               .size = sizeof(long),
+               .align = sizeof(long long),
+               .get = metag_cb_regs_get,
+               .set = metag_cb_regs_set,
+       },
+       [REGSET_READPIPE] = {
+               .core_note_type = NT_METAG_RPIPE,
+               .n = sizeof(struct user_rp_state) / sizeof(long),
+               .size = sizeof(long),
+               .align = sizeof(long long),
+               .get = metag_rp_state_get,
+               .set = metag_rp_state_set,
+       },
+};
+
+static const struct user_regset_view user_metag_view = {
+       .name = "metag",
+       .e_machine = EM_METAG,
+       .regsets = metag_regsets,
+       .n = ARRAY_SIZE(metag_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_metag_view;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+       /* nothing to do.. */
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+                unsigned long data)
+{
+       int ret;
+
+       switch (request) {
+       default:
+               ret = ptrace_request(child, request, addr, data);
+               break;
+       }
+
+       return ret;
+}
+
+int syscall_trace_enter(struct pt_regs *regs)
+{
+       int ret = 0;
+
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               ret = tracehook_report_syscall_entry(regs);
+
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->ctx.DX[0].U1);
+
+       return ret ? -1 : regs->ctx.DX[0].U1;
+}
+
+void syscall_trace_leave(struct pt_regs *regs)
+{
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_exit(regs, regs->ctx.DX[0].U1);
+
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
new file mode 100644 (file)
index 0000000..8792461
--- /dev/null
@@ -0,0 +1,631 @@
+/*
+ * Copyright (C) 2005-2012 Imagination Technologies Ltd.
+ *
+ * This file contains the architecture-dependant parts of system setup.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/of_fdt.h>
+#include <linux/pfn.h>
+#include <linux/root_dev.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+
+#include <asm/cachepart.h>
+#include <asm/clock.h>
+#include <asm/core_reg.h>
+#include <asm/cpu.h>
+#include <asm/da.h>
+#include <asm/highmem.h>
+#include <asm/hwthread.h>
+#include <asm/l2cache.h>
+#include <asm/mach/arch.h>
+#include <asm/metag_mem.h>
+#include <asm/metag_regs.h>
+#include <asm/mmu.h>
+#include <asm/mmzone.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+
+/* Priv protect as many registers as possible. */
+#define DEFAULT_PRIV   (TXPRIVEXT_COPRO_BITS           | \
+                        TXPRIVEXT_TXTRIGGER_BIT        | \
+                        TXPRIVEXT_TXGBLCREG_BIT        | \
+                        TXPRIVEXT_ILOCK_BIT            | \
+                        TXPRIVEXT_TXITACCYC_BIT        | \
+                        TXPRIVEXT_TXDIVTIME_BIT        | \
+                        TXPRIVEXT_TXAMAREGX_BIT        | \
+                        TXPRIVEXT_TXTIMERI_BIT         | \
+                        TXPRIVEXT_TXSTATUS_BIT         | \
+                        TXPRIVEXT_TXDISABLE_BIT)
+
+/* Meta2 specific bits. */
+#ifdef CONFIG_METAG_META12
+#define META2_PRIV     0
+#else
+#define META2_PRIV     (TXPRIVEXT_TXTIMER_BIT          | \
+                        TXPRIVEXT_TRACE_BIT)
+#endif
+
+/* Unaligned access checking bits. */
+#ifdef CONFIG_METAG_UNALIGNED
+#define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT
+#else
+#define UNALIGNED_PRIV 0
+#endif
+
+#define PRIV_BITS      (DEFAULT_PRIV                   | \
+                        META2_PRIV                     | \
+                        UNALIGNED_PRIV)
+
+/*
+ * Protect access to:
+ * 0x06000000-0x07ffffff Direct mapped region
+ * 0x05000000-0x05ffffff MMU table region (Meta1)
+ * 0x04400000-0x047fffff Cache flush region
+ * 0x84000000-0x87ffffff Core cache memory region (Meta2)
+ *
+ * Allow access to:
+ * 0x80000000-0x81ffffff Core code memory region (Meta2)
+ */
+#ifdef CONFIG_METAG_META12
+#define PRIVSYSR_BITS  TXPRIVSYSR_ALL_BITS
+#else
+#define PRIVSYSR_BITS  (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT)
+#endif
+
+/* Protect all 0x02xxxxxx and 0x048xxxxx. */
+#define PIOREG_BITS    0xffffffff
+
+/*
+ * Protect all 0x04000xx0 (system events)
+ * except write combiner flush and write fence (system events 4 and 5).
+ */
+#define PSYREG_BITS    0xfffffffb
+
+
+extern char _heap_start[];
+
+#ifdef CONFIG_METAG_BUILTIN_DTB
+extern u32 __dtb_start[];
+#endif
+
+#ifdef CONFIG_DA_CONSOLE
+/* Our early channel based console driver */
+extern struct console dash_console;
+#endif
+
+struct machine_desc *machine_desc __initdata;
+
+/*
+ * Map a Linux CPU number to a hardware thread ID
+ * In SMP this will be setup with the correct mapping at startup; in UP this
+ * will map to the HW thread on which we are running.
+ */
+u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
+};
+
+/*
+ * Map a hardware thread ID to a Linux CPU number
+ * In SMP this will be fleshed out with the correct CPU ID for a particular
+ * hardware thread. In UP this will be initialised with the boot CPU ID.
+ */
+u8 hwthread_id_2_cpu[4] __read_mostly = {
+       [0 ... 3] = BAD_CPU_ID
+};
+
+/* The relative offset of the MMU mapped memory (from ldlk or bootloader)
+ * to the real physical memory.  This is needed as we have to use the
+ * physical addresses in the MMU tables (pte entries), and not the virtual
+ * addresses.
+ * This variable is used in the __pa() and __va() macros, and should
+ * probably only be used via them.
+ */
+unsigned int meta_memoffset;
+EXPORT_SYMBOL(meta_memoffset);
+
+static char __initdata *original_cmd_line;
+
+DEFINE_PER_CPU(PTBI, pTBI);
+
+/*
+ * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g.
+ *
+ *     "hwthread_map=0:1,1:2,2:3,3:0"
+ *
+ *     Linux CPU ID    HWTHREAD_ID
+ *     ---------------------------
+ *         0                 1
+ *         1                 2
+ *         2                 3
+ *         3                 0
+ */
+static int __init parse_hwthread_map(char *p)
+{
+       int cpu;
+
+       while (*p) {
+               cpu = (*p++) - '0';
+               if (cpu < 0 || cpu > 9)
+                       goto err_cpu;
+
+               p++;            /* skip semi-colon */
+               cpu_2_hwthread_id[cpu] = (*p++) - '0';
+               if (cpu_2_hwthread_id[cpu] >= 4)
+                       goto err_thread;
+               hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu;
+
+               if (*p == ',')
+                       p++;            /* skip comma */
+       }
+
+       return 0;
+err_cpu:
+       pr_err("%s: hwthread_map cpu argument out of range\n", __func__);
+       return -EINVAL;
+err_thread:
+       pr_err("%s: hwthread_map thread argument out of range\n", __func__);
+       return -EINVAL;
+}
+early_param("hwthread_map", parse_hwthread_map);
+
+void __init dump_machine_table(void)
+{
+       struct machine_desc *p;
+       const char **compat;
+
+       pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n");
+       for_each_machine_desc(p) {
+               pr_info("\t%s\t[", p->name);
+               for (compat = p->dt_compat; compat && *compat; ++compat)
+                       printk(" '%s'", *compat);
+               printk(" ]\n");
+       }
+
+       pr_info("\nPlease check your kernel config and/or bootloader.\n");
+
+       hard_processor_halt(HALT_PANIC);
+}
+
+#ifdef CONFIG_METAG_HALT_ON_PANIC
+static int metag_panic_event(struct notifier_block *this, unsigned long event,
+                            void *ptr)
+{
+       hard_processor_halt(HALT_PANIC);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block metag_panic_block = {
+       metag_panic_event,
+       NULL,
+       0
+};
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+       unsigned long start_pfn;
+       unsigned long text_start = (unsigned long)(&_stext);
+       unsigned long cpu = smp_processor_id();
+       unsigned long heap_start, heap_end;
+       unsigned long start_pte;
+       PTBI _pTBI;
+       PTBISEG p_heap;
+       int heap_id, i;
+
+       metag_cache_probe();
+
+       metag_da_probe();
+#ifdef CONFIG_DA_CONSOLE
+       if (metag_da_enabled()) {
+               /* An early channel based console driver */
+               register_console(&dash_console);
+               add_preferred_console("ttyDA", 1, NULL);
+       }
+#endif
+
+       /* try interpreting the argument as a device tree */
+       machine_desc = setup_machine_fdt(original_cmd_line);
+       /* if it doesn't look like a device tree it must be a command line */
+       if (!machine_desc) {
+#ifdef CONFIG_METAG_BUILTIN_DTB
+               /* try the embedded device tree */
+               machine_desc = setup_machine_fdt(__dtb_start);
+               if (!machine_desc)
+                       panic("Invalid embedded device tree.");
+#else
+               /* use the default machine description */
+               machine_desc = default_machine_desc();
+#endif
+#ifndef CONFIG_CMDLINE_FORCE
+               /* append the bootloader cmdline to any builtin fdt cmdline */
+               if (boot_command_line[0] && original_cmd_line[0])
+                       strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+               strlcat(boot_command_line, original_cmd_line,
+                       COMMAND_LINE_SIZE);
+#endif
+       }
+       setup_meta_clocks(machine_desc->clocks);
+
+       *cmdline_p = boot_command_line;
+       parse_early_param();
+
+       /*
+        * Make sure we don't alias in dcache or icache
+        */
+       check_for_cache_aliasing(cpu);
+
+
+#ifdef CONFIG_METAG_HALT_ON_PANIC
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &metag_panic_block);
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
+       if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT))
+               panic("Privilege must be enabled for this thread.");
+
+       _pTBI = __TBI(TBID_ISTAT_BIT);
+
+       per_cpu(pTBI, cpu) = _pTBI;
+
+       if (!per_cpu(pTBI, cpu))
+               panic("No TBI found!");
+
+       /*
+        * Initialize all interrupt vectors to our copy of __TBIUnExpXXX,
+        * rather than the version from the bootloader. This makes call
+        * stacks easier to understand and may allow us to unmap the
+        * bootloader at some point.
+        *
+        * We need to keep the LWK handler that TBI installed in order to
+        * be able to do inter-thread comms.
+        */
+       for (i = 0; i <= TBID_SIGNUM_MAX; i++)
+               if (i != TBID_SIGNUM_LWK)
+                       _pTBI->fnSigs[i] = __TBIUnExpXXX;
+
+       /* A Meta requirement is that the kernel is loaded (virtually)
+        * at the PAGE_OFFSET.
+        */
+       if (PAGE_OFFSET != text_start)
+               panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.",
+                     PAGE_OFFSET, text_start);
+
+       start_pte = mmu_read_second_level_page(text_start);
+
+       /*
+        * Kernel pages should have the PRIV bit set by the bootloader.
+        */
+       if (!(start_pte & _PAGE_KERNEL))
+               panic("kernel pte does not have PRIV set");
+
+       /*
+        * See __pa and __va in include/asm/page.h.
+        * This value is negative when running in local space but the
+        * calculations work anyway.
+        */
+       meta_memoffset = text_start - (start_pte & PAGE_MASK);
+
+       /* Now lets look at the heap space */
+       heap_id = (__TBIThreadId() & TBID_THREAD_BITS)
+               + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP);
+
+       p_heap = __TBIFindSeg(NULL, heap_id);
+
+       if (!p_heap)
+               panic("Could not find heap from TBI!");
+
+       /* The heap begins at the first full page after the kernel data. */
+       heap_start = (unsigned long) &_heap_start;
+
+       /* The heap ends at the end of the heap segment specified with
+        * ldlk.
+        */
+       if (is_global_space(text_start)) {
+               pr_debug("WARNING: running in global space!\n");
+               heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes;
+       } else {
+               heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes;
+       }
+
+       ROOT_DEV = Root_RAM0;
+
+       /* init_mm is the mm struct used for the first task.  It is then
+        * cloned for all other tasks spawned from that task.
+        *
+        * Note - we are using the virtual addresses here.
+        */
+       init_mm.start_code = (unsigned long)(&_stext);
+       init_mm.end_code = (unsigned long)(&_etext);
+       init_mm.end_data = (unsigned long)(&_edata);
+       init_mm.brk = (unsigned long)heap_start;
+
+       min_low_pfn = PFN_UP(__pa(text_start));
+       max_low_pfn = PFN_DOWN(__pa(heap_end));
+
+       pfn_base = min_low_pfn;
+
+       /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node()
+        * call later makes sure to keep the rounded up pages marked reserved.
+        */
+       max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1);
+       max_pfn &= ~((1 << MAX_ORDER) - 1);
+
+       start_pfn = PFN_UP(__pa(heap_start));
+
+       if (min_low_pfn & ((1 << MAX_ORDER) - 1)) {
+               /* Theoretically, we could expand the space that the
+                * bootmem allocator covers - much as we do for the
+                * 'high' address, and then tell the bootmem system
+                * that the lowest chunk is 'not available'.  Right
+                * now it is just much easier to constrain the
+                * user to always MAX_ORDER align their kernel space.
+                */
+
+               panic("Kernel must be %d byte aligned, currently at %#lx.",
+                     1 << (MAX_ORDER + PAGE_SHIFT),
+                     min_low_pfn << PAGE_SHIFT);
+       }
+
+#ifdef CONFIG_HIGHMEM
+       highstart_pfn = highend_pfn = max_pfn;
+       high_memory = (void *) __va(PFN_PHYS(highstart_pfn));
+#else
+       high_memory = (void *)__va(PFN_PHYS(max_pfn));
+#endif
+
+       paging_init(heap_end);
+
+       setup_priv();
+
+       /* Setup the boot cpu's mapping. The rest will be setup below. */
+       cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id();
+       hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id();
+
+       /* Copy device tree blob into non-init memory before unflattening */
+       copy_fdt();
+       unflatten_device_tree();
+
+#ifdef CONFIG_SMP
+       smp_init_cpus();
+#endif
+
+       if (machine_desc->init_early)
+               machine_desc->init_early();
+}
+
+static int __init customize_machine(void)
+{
+       /* customizes platform devices, or adds new ones */
+       if (machine_desc->init_machine)
+               machine_desc->init_machine();
+       return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_machine_late(void)
+{
+       if (machine_desc->init_late)
+               machine_desc->init_late();
+       return 0;
+}
+late_initcall(init_machine_late);
+
+#ifdef CONFIG_PROC_FS
+/*
+ *     Get CPU information for use by the procfs.
+ */
+static const char *get_cpu_capabilities(unsigned int txenable)
+{
+#ifdef CONFIG_METAG_META21
+       /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */
+       int coreid = metag_in32(METAC_CORE_ID);
+       unsigned int dsp_type = (coreid >> 3) & 7;
+       unsigned int fpu_type = (coreid >> 7) & 3;
+
+       switch (dsp_type | fpu_type << 3) {
+       case (0x00): return "EDSP";
+       case (0x01): return "DSP";
+       case (0x08): return "EDSP+LFPU";
+       case (0x09): return "DSP+LFPU";
+       case (0x10): return "EDSP+FPU";
+       case (0x11): return "DSP+FPU";
+       }
+       return "UNKNOWN";
+
+#else
+       if (!(txenable & TXENABLE_CLASS_BITS))
+               return "DSP";
+       else
+               return "";
+#endif
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       const char *cpu;
+       unsigned int txenable, thread_id, major, minor;
+       unsigned long clockfreq = get_coreclock();
+#ifdef CONFIG_SMP
+       int i;
+       unsigned long lpj;
+#endif
+
+       cpu = "META";
+
+       txenable = __core_reg_get(TXENABLE);
+       major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S;
+       minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S;
+       thread_id = (txenable >> 8) & 0x3;
+
+#ifdef CONFIG_SMP
+       for_each_online_cpu(i) {
+               lpj = per_cpu(cpu_data, i).loops_per_jiffy;
+               txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM,
+                                                       cpu_2_hwthread_id[i]);
+
+               seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
+                             "Clocking:\t%lu.%1luMHz\n"
+                             "BogoMips:\t%lu.%02lu\n"
+                             "Calibration:\t%lu loops\n"
+                             "Capabilities:\t%s\n\n",
+                             cpu, major, minor, i,
+                             clockfreq / 1000000, (clockfreq / 100000) % 10,
+                             lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100,
+                             lpj,
+                             get_cpu_capabilities(txenable));
+       }
+#else
+       seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n"
+                  "Clocking:\t%lu.%1luMHz\n"
+                  "BogoMips:\t%lu.%02lu\n"
+                  "Calibration:\t%lu loops\n"
+                  "Capabilities:\t%s\n",
+                  cpu, major, minor, thread_id,
+                  clockfreq / 1000000, (clockfreq / 100000) % 10,
+                  loops_per_jiffy / (500000 / HZ),
+                  (loops_per_jiffy / (5000 / HZ)) % 100,
+                  loops_per_jiffy,
+                  get_cpu_capabilities(txenable));
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_METAG_L2C
+       if (meta_l2c_is_present()) {
+               seq_printf(m, "L2 cache:\t%s\n"
+                             "L2 cache size:\t%d KB\n",
+                             meta_l2c_is_enabled() ? "enabled" : "disabled",
+                             meta_l2c_size() >> 10);
+       }
+#endif
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       return (void *)(*pos == 0);
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+const struct seq_operations cpuinfo_op = {
+       .start = c_start,
+       .next  = c_next,
+       .stop  = c_stop,
+       .show  = show_cpuinfo,
+};
+#endif /* CONFIG_PROC_FS */
+
+void __init metag_start_kernel(char *args)
+{
+       /* Zero the timer register so timestamps are from the point at
+        * which the kernel started running.
+        */
+       __core_reg_set(TXTIMER, 0);
+
+       /* Clear the bss. */
+       memset(__bss_start, 0,
+              (unsigned long)__bss_stop - (unsigned long)__bss_start);
+
+       /* Remember where these are for use in setup_arch */
+       original_cmd_line = args;
+
+       current_thread_info()->cpu = hard_processor_id();
+
+       start_kernel();
+}
+
+/**
+ * setup_priv() - Set up privilege protection registers.
+ *
+ * Set up privilege protection registers such as TXPRIVEXT to prevent userland
+ * from touching our precious registers and sensitive memory areas.
+ */
+void setup_priv(void)
+{
+       unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S;
+
+       __core_reg_set(TXPRIVEXT, PRIV_BITS);
+
+       metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset);
+       metag_out32(PIOREG_BITS,   T0PIOREG   + offset);
+       metag_out32(PSYREG_BITS,   T0PSYREG   + offset);
+}
+
+PTBI pTBI_get(unsigned int cpu)
+{
+       return per_cpu(pTBI, cpu);
+}
+EXPORT_SYMBOL(pTBI_get);
+
+#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
+char capabilites[] = "dsp fpu";
+#elif defined(CONFIG_METAG_DSP)
+char capabilites[] = "dsp";
+#elif defined(CONFIG_METAG_FPU)
+char capabilites[] = "fpu";
+#else
+char capabilites[] = "";
+#endif
+
+static struct ctl_table caps_kern_table[] = {
+       {
+               .procname       = "capabilities",
+               .data           = capabilites,
+               .maxlen         = sizeof(capabilites),
+               .mode           = 0444,
+               .proc_handler   = proc_dostring,
+       },
+       {}
+};
+
+static struct ctl_table caps_root_table[] = {
+       {
+               .procname       = "kernel",
+               .mode           = 0555,
+               .child          = caps_kern_table,
+       },
+       {}
+};
+
+static int __init capabilities_register_sysctl(void)
+{
+       struct ctl_table_header *caps_table_header;
+
+       caps_table_header = register_sysctl_table(caps_root_table);
+       if (!caps_table_header) {
+               pr_err("Unable to register CAPABILITIES sysctl\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+core_initcall(capabilities_register_sysctl);
diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c
new file mode 100644 (file)
index 0000000..3be61cf
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ *  Copyright (C) 1991,1992  Linus Torvalds
+ *  Copyright (C) 2005-2012  Imagination Technologies Ltd.
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+#include <asm/switch.h>
+#include <asm/syscall.h>
+#include <asm/syscalls.h>
+
+#define REG_FLAGS      ctx.SaveMask
+#define REG_RETVAL     ctx.DX[0].U0
+#define REG_SYSCALL    ctx.DX[0].U1
+#define REG_SP         ctx.AX[0].U0
+#define REG_ARG1       ctx.DX[3].U1
+#define REG_ARG2       ctx.DX[3].U0
+#define REG_ARG3       ctx.DX[2].U1
+#define REG_PC         ctx.CurrPC
+#define REG_RTP                ctx.DX[4].U1
+
+struct rt_sigframe {
+       struct siginfo info;
+       struct ucontext uc;
+       unsigned long retcode[2];
+};
+
+static int restore_sigcontext(struct pt_regs *regs,
+                             struct sigcontext __user *sc)
+{
+       int err;
+
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+       err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL,
+                                  &sc->regs);
+       if (!err)
+               err = metag_cb_regs_copyin(regs, 0,
+                                          sizeof(struct user_cb_regs), NULL,
+                                          &sc->cb);
+       if (!err)
+               err = metag_rp_state_copyin(regs, 0,
+                                           sizeof(struct user_rp_state), NULL,
+                                           &sc->rp);
+
+       /* This is a user-mode context. */
+       regs->REG_FLAGS |= TBICTX_PRIV_BIT;
+
+       return err;
+}
+
+long sys_rt_sigreturn(void)
+{
+       /* NOTE - Meta stack goes UPWARDS - so we wind the stack back */
+       struct pt_regs *regs = current_pt_regs();
+       struct rt_sigframe __user *frame;
+       sigset_t set;
+
+       frame = (__force struct rt_sigframe __user *)(regs->REG_SP -
+                                                     sizeof(*frame));
+
+       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+               goto badframe;
+
+       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+               goto badframe;
+
+       set_current_blocked(&set);
+
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+               goto badframe;
+
+       if (restore_altstack(&frame->uc.uc_stack))
+               goto badframe;
+
+       return regs->REG_RETVAL;
+
+badframe:
+       force_sig(SIGSEGV, current);
+
+       return 0;
+}
+
+static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+                           unsigned long mask)
+{
+       int err;
+
+       err = metag_gp_regs_copyout(regs, 0, sizeof(struct user_gp_regs), NULL,
+                                   &sc->regs);
+
+       if (!err)
+               err = metag_cb_regs_copyout(regs, 0,
+                                           sizeof(struct user_cb_regs), NULL,
+                                           &sc->cb);
+       if (!err)
+               err = metag_rp_state_copyout(regs, 0,
+                                            sizeof(struct user_rp_state), NULL,
+                                            &sc->rp);
+
+       /* OK, clear that cbuf flag in the old context, or our stored
+        * catch buffer will be restored when we go to call the signal
+        * handler. Also clear out the CBRP RA/RD pipe bit incase
+        * that is pending as well!
+        * Note that as we have already stored this context, these
+        * flags will get restored on sigreturn to their original
+        * state.
+        */
+       regs->REG_FLAGS &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT |
+                            TBICTX_CBRP_BIT);
+
+       /* Clear out the LSM_STEP bits in case we are in the middle of
+        * and MSET/MGET.
+        */
+       regs->ctx.Flags &= ~TXSTATUS_LSM_STEP_BITS;
+
+       err |= __put_user(mask, &sc->oldmask);
+
+       return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static void __user *get_sigframe(struct k_sigaction *ka, unsigned long sp,
+                                size_t frame_size)
+{
+       /* Meta stacks grows upwards */
+       if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
+               sp = current->sas_ss_sp;
+
+       sp = (sp + 7) & ~7;                     /* 8byte align stack */
+
+       return (void __user *)sp;
+}
+
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+                         sigset_t *set, struct pt_regs *regs)
+{
+       struct rt_sigframe __user *frame;
+       int err = -EFAULT;
+       unsigned long code;
+
+       frame = get_sigframe(ka, regs->REG_SP, sizeof(*frame));
+       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+               goto out;
+
+       err = copy_siginfo_to_user(&frame->info, info);
+
+       /* Create the ucontext.  */
+       err |= __put_user(0, &frame->uc.uc_flags);
+       err |= __put_user(0, (unsigned long __user *)&frame->uc.uc_link);
+       err |= __save_altstack(&frame->uc.uc_stack, regs->REG_SP);
+       err |= setup_sigcontext(&frame->uc.uc_mcontext,
+                               regs, set->sig[0]);
+       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+       if (err)
+               goto out;
+
+       /* Set up to return from userspace.  */
+
+       /* MOV D1Re0 (D1.0), #__NR_rt_sigreturn */
+       code = 0x03000004 | (__NR_rt_sigreturn << 3);
+       err |= __put_user(code, (unsigned long __user *)(&frame->retcode[0]));
+
+       /* SWITCH #__METAG_SW_SYS */
+       code = __METAG_SW_ENCODING(SYS);
+       err |= __put_user(code, (unsigned long __user *)(&frame->retcode[1]));
+
+       if (err)
+               goto out;
+
+       /* Set up registers for signal handler */
+       regs->REG_RTP = (unsigned long) frame->retcode;
+       regs->REG_SP = (unsigned long) frame + sizeof(*frame);
+       regs->REG_ARG1 = sig;
+       regs->REG_ARG2 = (unsigned long) &frame->info;
+       regs->REG_ARG3 = (unsigned long) &frame->uc;
+       regs->REG_PC = (unsigned long) ka->sa.sa_handler;
+
+       pr_debug("SIG deliver (%s:%d): sp=%p pc=%08x pr=%08x\n",
+                current->comm, current->pid, frame, regs->REG_PC,
+                regs->REG_RTP);
+
+       /* Now pass size of 'new code' into sigtramp so we can do a more
+        * effective cache flush - directed rather than 'full flush'.
+        */
+       flush_cache_sigtramp(regs->REG_RTP, sizeof(frame->retcode));
+out:
+       if (err) {
+               force_sigsegv(sig, current);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void handle_signal(unsigned long sig, siginfo_t *info,
+                         struct k_sigaction *ka, struct pt_regs *regs)
+{
+       sigset_t *oldset = sigmask_to_save();
+
+       /* Set up the stack frame */
+       if (setup_rt_frame(sig, ka, info, oldset, regs))
+               return;
+
+       signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP));
+}
+
+ /*
+  * Notes for Meta.
+  * We have moved from the old 2.4.9 SH way of using syscall_nr (in the stored
+  * context) to passing in the syscall flag on the stack.
+  * This is because having syscall_nr in our context does not fit with TBX, and
+  * corrupted the stack.
+  */
+static int do_signal(struct pt_regs *regs, int syscall)
+{
+       unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
+       struct k_sigaction ka;
+       siginfo_t info;
+       int signr;
+       int restart = 0;
+
+       /*
+        * By the end of rt_sigreturn the context describes the point that the
+        * signal was taken (which may happen to be just before a syscall if
+        * it's already been restarted). This should *never* be mistaken for a
+        * system call in need of restarting.
+        */
+       if (syscall == __NR_rt_sigreturn)
+               syscall = -1;
+
+       /* Did we come from a system call? */
+       if (syscall >= 0) {
+               continue_addr = regs->REG_PC;
+               restart_addr = continue_addr - 4;
+               retval = regs->REG_RETVAL;
+
+               /*
+                * Prepare for system call restart. We do this here so that a
+                * debugger will see the already changed PC.
+                */
+               switch (retval) {
+               case -ERESTART_RESTARTBLOCK:
+                       restart = -2;
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+                       ++restart;
+                       regs->REG_PC = restart_addr;
+                       break;
+               }
+       }
+
+       /*
+        * Get the signal to deliver. When running under ptrace, at this point
+        * the debugger may change all our registers ...
+        */
+       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+       /*
+        * Depending on the signal settings we may need to revert the decision
+        * to restart the system call. But skip this if a debugger has chosen to
+        * restart at a different PC.
+        */
+       if (regs->REG_PC != restart_addr)
+               restart = 0;
+       if (signr > 0) {
+               if (unlikely(restart)) {
+                       if (retval == -ERESTARTNOHAND
+                           || retval == -ERESTART_RESTARTBLOCK
+                           || (retval == -ERESTARTSYS
+                               && !(ka.sa.sa_flags & SA_RESTART))) {
+                               regs->REG_RETVAL = -EINTR;
+                               regs->REG_PC = continue_addr;
+                       }
+               }
+
+               /* Whee! Actually deliver the signal.  */
+               handle_signal(signr, &info, &ka, regs);
+               return 0;
+       }
+
+       /* Handlerless -ERESTART_RESTARTBLOCK re-enters via restart_syscall */
+       if (unlikely(restart < 0))
+               regs->REG_SYSCALL = __NR_restart_syscall;
+
+       /*
+        * If there's no signal to deliver, we just put the saved sigmask back.
+        */
+       restore_saved_sigmask();
+
+       return restart;
+}
+
+int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+                   int syscall)
+{
+       do {
+               if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+                       schedule();
+               } else {
+                       if (unlikely(!user_mode(regs)))
+                               return 0;
+                       local_irq_enable();
+                       if (thread_flags & _TIF_SIGPENDING) {
+                               int restart = do_signal(regs, syscall);
+                               if (unlikely(restart)) {
+                                       /*
+                                        * Restart without handlers.
+                                        * Deal with it without leaving
+                                        * the kernel space.
+                                        */
+                                       return restart;
+                               }
+                               syscall = -1;
+                       } else {
+                               clear_thread_flag(TIF_NOTIFY_RESUME);
+                               tracehook_notify_resume(regs);
+                       }
+               }
+               local_irq_disable();
+               thread_flags = current_thread_info()->flags;
+       } while (thread_flags & _TIF_WORK_MASK);
+       return 0;
+}
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
new file mode 100644 (file)
index 0000000..4b6d1f1
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ *  Copyright (C) 2009,2010,2011 Imagination Technologies Ltd.
+ *
+ *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cachepart.h>
+#include <asm/core_reg.h>
+#include <asm/cpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/tlbflush.h>
+#include <asm/hwthread.h>
+#include <asm/traps.h>
+
+DECLARE_PER_CPU(PTBI, pTBI);
+
+void *secondary_data_stack;
+
+/*
+ * structures for inter-processor calls
+ * - A collection of single bit ipi messages.
+ */
+struct ipi_data {
+       spinlock_t lock;
+       unsigned long ipi_count;
+       unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
+       .lock   = __SPIN_LOCK_UNLOCKED(ipi_data.lock),
+};
+
+static DEFINE_SPINLOCK(boot_lock);
+
+/*
+ * "thread" is assumed to be a valid Meta hardware thread ID.
+ */
+int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
+{
+       u32 val;
+
+       /*
+        * set synchronisation state between this boot processor
+        * and the secondary one
+        */
+       spin_lock(&boot_lock);
+
+       core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup);
+       core_reg_write(TXUPC_ID, 1, thread, 0);
+
+       /*
+        * Give the thread privilege (PSTAT) and clear potentially problematic
+        * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP).
+        */
+       core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT);
+
+       /* Clear the minim enable bit. */
+       val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread);
+       core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80);
+
+       /*
+        * set the ThreadEnable bit (0x1) in the TXENABLE register
+        * for the specified thread - off it goes!
+        */
+       val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread);
+       core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1);
+
+       /*
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+       spin_unlock(&boot_lock);
+
+       return 0;
+}
+
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+       unsigned int thread = cpu_2_hwthread_id[cpu];
+       int ret;
+
+       load_pgd(swapper_pg_dir, thread);
+
+       flush_tlb_all();
+
+       /*
+        * Tell the secondary CPU where to find its idle thread's stack.
+        */
+       secondary_data_stack = task_stack_page(idle);
+
+       wmb();
+
+       /*
+        * Now bring the CPU into our world.
+        */
+       ret = boot_secondary(thread, idle);
+       if (ret == 0) {
+               unsigned long timeout;
+
+               /*
+                * CPU was successfully started, wait for it
+                * to come online or time out.
+                */
+               timeout = jiffies + HZ;
+               while (time_before(jiffies, timeout)) {
+                       if (cpu_online(cpu))
+                               break;
+
+                       udelay(10);
+                       barrier();
+               }
+
+               if (!cpu_online(cpu))
+                       ret = -EIO;
+       }
+
+       secondary_data_stack = NULL;
+
+       if (ret) {
+               pr_crit("CPU%u: processor failed to boot\n", cpu);
+
+               /*
+                * FIXME: We need to clean up the new idle thread. --rmk
+                */
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static DECLARE_COMPLETION(cpu_killed);
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpuexit __cpu_disable(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct task_struct *p;
+
+       /*
+        * Take this CPU offline.  Once we clear this, we can't return,
+        * and we must not schedule until we're ready to give up the cpu.
+        */
+       set_cpu_online(cpu, false);
+
+       /*
+        * OK - migrate IRQs away from this CPU
+        */
+       migrate_irqs();
+
+       /*
+        * Flush user cache and TLB mappings, and then remove this CPU
+        * from the vm mask set of all processes.
+        */
+       flush_cache_all();
+       local_flush_tlb_all();
+
+       read_lock(&tasklist_lock);
+       for_each_process(p) {
+               if (p->mm)
+                       cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
+       }
+       read_unlock(&tasklist_lock);
+
+       return 0;
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpuexit __cpu_die(unsigned int cpu)
+{
+       if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
+               pr_err("CPU%u: unable to kill\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we do not return from this function. If this cpu is
+ * brought online again it will need to run secondary_startup().
+ */
+void __cpuexit cpu_die(void)
+{
+       local_irq_disable();
+       idle_task_exit();
+
+       complete(&cpu_killed);
+
+       asm ("XOR       TXENABLE, D0Re0,D0Re0\n");
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+       struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
+
+       cpu_info->loops_per_jiffy = loops_per_jiffy;
+}
+
+/*
+ * This is the secondary CPU boot entry.  We're using this CPUs
+ * idle thread stack and the global page tables.
+ */
+asmlinkage void secondary_start_kernel(void)
+{
+       struct mm_struct *mm = &init_mm;
+       unsigned int cpu = smp_processor_id();
+
+       /*
+        * All kernel threads share the same mm context; grab a
+        * reference and switch to it.
+        */
+       atomic_inc(&mm->mm_users);
+       atomic_inc(&mm->mm_count);
+       current->active_mm = mm;
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
+       enter_lazy_tlb(mm, current);
+       local_flush_tlb_all();
+
+       /*
+        * TODO: Some day it might be useful for each Linux CPU to
+        * have its own TBI structure. That would allow each Linux CPU
+        * to run different interrupt handlers for the same IRQ
+        * number.
+        *
+        * For now, simply copying the pointer to the boot CPU's TBI
+        * structure is sufficient because we always want to run the
+        * same interrupt handler whatever CPU takes the interrupt.
+        */
+       per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
+
+       if (!per_cpu(pTBI, cpu))
+               panic("No TBI found!");
+
+       per_cpu_trap_init(cpu);
+
+       preempt_disable();
+
+       setup_priv();
+
+       /*
+        * Enable local interrupts.
+        */
+       tbi_startup_interrupt(TBID_SIGNUM_TRT);
+       notify_cpu_starting(cpu);
+       local_irq_enable();
+
+       pr_info("CPU%u (thread %u): Booted secondary processor\n",
+               cpu, cpu_2_hwthread_id[cpu]);
+
+       calibrate_delay();
+       smp_store_cpu_info(cpu);
+
+       /*
+        * OK, now it's safe to let the boot CPU continue
+        */
+       set_cpu_online(cpu, true);
+
+       /*
+        * Check for cache aliasing.
+        * Preemption is disabled
+        */
+       check_for_cache_aliasing(cpu);
+
+       /*
+        * OK, it's off to the idle thread for us
+        */
+       cpu_idle();
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+       int cpu;
+       unsigned long bogosum = 0;
+
+       for_each_online_cpu(cpu)
+               bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+       pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+               num_online_cpus(),
+               bogosum / (500000/HZ),
+               (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       unsigned int cpu = smp_processor_id();
+
+       init_new_context(current, &init_mm);
+       current_thread_info()->cpu = cpu;
+
+       smp_store_cpu_info(cpu);
+       init_cpu_present(cpu_possible_mask);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT);
+
+       if (!per_cpu(pTBI, cpu))
+               panic("No TBI found!");
+}
+
+static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg);
+
+static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
+{
+       unsigned long flags;
+       unsigned int cpu;
+       cpumask_t map;
+
+       cpumask_clear(&map);
+       local_irq_save(flags);
+
+       for_each_cpu(cpu, mask) {
+               struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+
+               spin_lock(&ipi->lock);
+
+               /*
+                * KICK interrupts are queued in hardware so we'll get
+                * multiple interrupts if we call smp_cross_call()
+                * multiple times for one msg. The problem is that we
+                * only have one bit for each message - we can't queue
+                * them in software.
+                *
+                * The first time through ipi_handler() we'll clear
+                * the msg bit, having done all the work. But when we
+                * return we'll get _another_ interrupt (and another,
+                * and another until we've handled all the queued
+                * KICKs). Running ipi_handler() when there's no work
+                * to do is bad because that's how kick handler
+                * chaining detects who the KICK was intended for.
+                * See arch/metag/kernel/kick.c for more details.
+                *
+                * So only add 'cpu' to 'map' if we haven't already
+                * queued a KICK interrupt for 'msg'.
+                */
+               if (!(ipi->bits & (1 << msg))) {
+                       ipi->bits |= 1 << msg;
+                       cpumask_set_cpu(cpu, &map);
+               }
+
+               spin_unlock(&ipi->lock);
+       }
+
+       /*
+        * Call the platform specific cross-CPU call function.
+        */
+       smp_cross_call(map, msg);
+
+       local_irq_restore(flags);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void show_ipi_list(struct seq_file *p)
+{
+       unsigned int cpu;
+
+       seq_puts(p, "IPI:");
+
+       for_each_present_cpu(cpu)
+               seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
+
+       seq_putc(p, '\n');
+}
+
+static DEFINE_SPINLOCK(stop_lock);
+
+/*
+ * Main handler for inter-processor interrupts
+ *
+ * For Meta, the ipimask now only identifies a single
+ * category of IPI (Bit 1 IPIs have been replaced by a
+ * different mechanism):
+ *
+ *  Bit 0 - Inter-processor function call
+ */
+static int do_IPI(struct pt_regs *regs)
+{
+       unsigned int cpu = smp_processor_id();
+       struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       unsigned long msgs, nextmsg;
+       int handled = 0;
+
+       ipi->ipi_count++;
+
+       spin_lock(&ipi->lock);
+       msgs = ipi->bits;
+       nextmsg = msgs & -msgs;
+       ipi->bits &= ~nextmsg;
+       spin_unlock(&ipi->lock);
+
+       if (nextmsg) {
+               handled = 1;
+
+               nextmsg = ffz(~nextmsg);
+               switch (nextmsg) {
+               case IPI_RESCHEDULE:
+                       scheduler_ipi();
+                       break;
+
+               case IPI_CALL_FUNC:
+                       generic_smp_call_function_interrupt();
+                       break;
+
+               case IPI_CALL_FUNC_SINGLE:
+                       generic_smp_call_function_single_interrupt();
+                       break;
+
+               default:
+                       pr_crit("CPU%u: Unknown IPI message 0x%lx\n",
+                               cpu, nextmsg);
+                       break;
+               }
+       }
+
+       set_irq_regs(old_regs);
+
+       return handled;
+}
+
+void smp_send_reschedule(int cpu)
+{
+       send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+static void stop_this_cpu(void *data)
+{
+       unsigned int cpu = smp_processor_id();
+
+       if (system_state == SYSTEM_BOOTING ||
+           system_state == SYSTEM_RUNNING) {
+               spin_lock(&stop_lock);
+               pr_crit("CPU%u: stopping\n", cpu);
+               dump_stack();
+               spin_unlock(&stop_lock);
+       }
+
+       set_cpu_online(cpu, false);
+
+       local_irq_disable();
+
+       hard_processor_halt(HALT_OK);
+}
+
+void smp_send_stop(void)
+{
+       smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+/*
+ * not supported here
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
+/*
+ * We use KICKs for inter-processor interrupts.
+ *
+ * For every CPU in "callmap" the IPI data must already have been
+ * stored in that CPU's "ipi_data" member prior to calling this
+ * function.
+ */
+static void kick_raise_softirq(cpumask_t callmap, unsigned int irq)
+{
+       int cpu;
+
+       for_each_cpu(cpu, &callmap) {
+               unsigned int thread;
+
+               thread = cpu_2_hwthread_id[cpu];
+
+               BUG_ON(thread == BAD_HWTHREAD_ID);
+
+               metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE));
+       }
+}
+
+static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers,
+                  int Inst, PTBI pTBI, int *handled)
+{
+       *handled = do_IPI((struct pt_regs *)State.Sig.pCtx);
+
+       return State;
+}
+
+static struct kick_irq_handler ipi_irq = {
+       .func = ipi_handler,
+};
+
+static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg)
+{
+       kick_raise_softirq(callmap, 1);
+}
+
+static inline unsigned int get_core_count(void)
+{
+       int i;
+       unsigned int ret = 0;
+
+       for (i = 0; i < CONFIG_NR_CPUS; i++) {
+               if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i))
+                       ret++;
+       }
+
+       return ret;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+       unsigned int i, ncores = get_core_count();
+
+       /* If no hwthread_map early param was set use default mapping */
+       for (i = 0; i < NR_CPUS; i++)
+               if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) {
+                       cpu_2_hwthread_id[i] = i;
+                       hwthread_id_2_cpu[i] = i;
+               }
+
+       for (i = 0; i < ncores; i++)
+               set_cpu_possible(i, true);
+
+       kick_register_func(&ipi_irq);
+}
diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..5510361
--- /dev/null
@@ -0,0 +1,187 @@
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_FRAME_POINTER)
+
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+
+static unsigned long tbi_boing_addr;
+static unsigned long tbi_boing_size;
+
+static void tbi_boing_init(void)
+{
+       /* We need to know where TBIBoingVec is and it's size */
+       unsigned long size;
+       unsigned long offset;
+       char modname[MODULE_NAME_LEN];
+       char name[KSYM_NAME_LEN];
+       tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec");
+       if (!tbi_boing_addr)
+               tbi_boing_addr = 1;
+       else if (!lookup_symbol_attrs(tbi_boing_addr, &size,
+                                     &offset, modname, name))
+               tbi_boing_size = size;
+}
+#endif
+
+#define ALIGN_DOWN(addr, size)  ((addr)&(~((size)-1)))
+
+/*
+ * Unwind the current stack frame and store the new register values in the
+ * structure passed as argument. Unwinding is equivalent to a function return,
+ * hence the new PC value rather than LR should be used for backtrace.
+ */
+int notrace unwind_frame(struct stackframe *frame)
+{
+       struct metag_frame *fp = (struct metag_frame *)frame->fp;
+       unsigned long lr;
+       unsigned long fpnew;
+
+       if (frame->fp & 0x7)
+               return -EINVAL;
+
+       fpnew = fp->fp;
+       lr = fp->lr - 4;
+
+#ifdef CONFIG_KALLSYMS
+       /* If we've reached TBIBoingVec then we're at an interrupt
+        * entry point or a syscall entry point. The frame pointer
+        * points to a pt_regs which can be used to continue tracing on
+        * the other side of the boing.
+        */
+       if (!tbi_boing_addr)
+               tbi_boing_init();
+       if (tbi_boing_size && lr >= tbi_boing_addr &&
+           lr < tbi_boing_addr + tbi_boing_size) {
+               struct pt_regs *regs = (struct pt_regs *)fpnew;
+               if (user_mode(regs))
+                       return -EINVAL;
+               fpnew = regs->ctx.AX[1].U0;
+               lr = regs->ctx.DX[4].U1;
+       }
+#endif
+
+       /* stack grows up, so frame pointers must decrease */
+       if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) +
+                    sizeof(struct thread_info)) || fpnew >= (unsigned long)fp)
+               return -EINVAL;
+
+       /* restore the registers from the stack frame */
+       frame->fp = fpnew;
+       frame->pc = lr;
+
+       return 0;
+}
+#else
+int notrace unwind_frame(struct stackframe *frame)
+{
+       struct metag_frame *sp = (struct metag_frame *)frame->sp;
+
+       if (frame->sp & 0x7)
+               return -EINVAL;
+
+       while (!kstack_end(sp)) {
+               unsigned long addr = sp->lr - 4;
+               sp--;
+
+               if (__kernel_text_address(addr)) {
+                       frame->sp = (unsigned long)sp;
+                       frame->pc = addr;
+                       return 0;
+               }
+       }
+       return -EINVAL;
+}
+#endif
+
+void notrace walk_stackframe(struct stackframe *frame,
+                    int (*fn)(struct stackframe *, void *), void *data)
+{
+       while (1) {
+               int ret;
+
+               if (fn(frame, data))
+                       break;
+               ret = unwind_frame(frame);
+               if (ret < 0)
+                       break;
+       }
+}
+EXPORT_SYMBOL(walk_stackframe);
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace_data {
+       struct stack_trace *trace;
+       unsigned int no_sched_functions;
+       unsigned int skip;
+};
+
+static int save_trace(struct stackframe *frame, void *d)
+{
+       struct stack_trace_data *data = d;
+       struct stack_trace *trace = data->trace;
+       unsigned long addr = frame->pc;
+
+       if (data->no_sched_functions && in_sched_functions(addr))
+               return 0;
+       if (data->skip) {
+               data->skip--;
+               return 0;
+       }
+
+       trace->entries[trace->nr_entries++] = addr;
+
+       return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       struct stack_trace_data data;
+       struct stackframe frame;
+
+       data.trace = trace;
+       data.skip = trace->skip;
+
+       if (tsk != current) {
+#ifdef CONFIG_SMP
+               /*
+                * What guarantees do we have here that 'tsk' is not
+                * running on another CPU?  For now, ignore it as we
+                * can't guarantee we won't explode.
+                */
+               if (trace->nr_entries < trace->max_entries)
+                       trace->entries[trace->nr_entries++] = ULONG_MAX;
+               return;
+#else
+               data.no_sched_functions = 1;
+               frame.fp = thread_saved_fp(tsk);
+               frame.sp = thread_saved_sp(tsk);
+               frame.lr = 0;           /* recovered from the stack */
+               frame.pc = thread_saved_pc(tsk);
+#endif
+       } else {
+               register unsigned long current_sp asm ("A0StP");
+
+               data.no_sched_functions = 0;
+               frame.fp = (unsigned long)__builtin_frame_address(0);
+               frame.sp = current_sp;
+               frame.lr = (unsigned long)__builtin_return_address(0);
+               frame.pc = (unsigned long)save_stack_trace_tsk;
+       }
+
+       walk_stackframe(&frame, save_trace, &data);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+#endif
diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c
new file mode 100644 (file)
index 0000000..efe833a
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/Meta
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm/core_reg.h>
+#include <asm/global_lock.h>
+#include <asm/switch.h>
+#include <asm/syscall.h>
+#include <asm/syscalls.h>
+#include <asm/user_gateway.h>
+
+#define merge_64(hi, lo) ((((unsigned long long)(hi)) << 32) + \
+                         ((lo) & 0xffffffffUL))
+
+int metag_mmap_check(unsigned long addr, unsigned long len,
+                    unsigned long flags)
+{
+       /* We can't have people trying to write to the bottom of the
+        * memory map, there are mysterious unspecified things there that
+        * we don't want people trampling on.
+        */
+       if ((flags & MAP_FIXED) && (addr < TASK_UNMAPPED_BASE))
+               return -EINVAL;
+
+       return 0;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+                         unsigned long prot, unsigned long flags,
+                         unsigned long fd, unsigned long pgoff)
+{
+       /* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */
+       if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
+               return -EINVAL;
+
+       pgoff >>= PAGE_SHIFT - 12;
+
+       return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+}
+
+asmlinkage int sys_metag_setglobalbit(char __user *addr, int mask)
+{
+       char tmp;
+       int ret = 0;
+       unsigned int flags;
+
+       if (!((__force unsigned int)addr >= LINCORE_BASE))
+               return -EFAULT;
+
+       __global_lock2(flags);
+
+       metag_data_cache_flush((__force void *)addr, sizeof(mask));
+
+       ret = __get_user(tmp, addr);
+       if (ret)
+               goto out;
+       tmp |= mask;
+       ret = __put_user(tmp, addr);
+
+       metag_data_cache_flush((__force void *)addr, sizeof(mask));
+
+out:
+       __global_unlock2(flags);
+
+       return ret;
+}
+
+#define TXDEFR_FPU_MASK ((0x1f << 16) | 0x1f)
+
+asmlinkage void sys_metag_set_fpu_flags(unsigned int flags)
+{
+       unsigned int temp;
+
+       flags &= TXDEFR_FPU_MASK;
+
+       temp = __core_reg_get(TXDEFR);
+       temp &= ~TXDEFR_FPU_MASK;
+       temp |= flags;
+       __core_reg_set(TXDEFR, temp);
+}
+
+asmlinkage int sys_metag_set_tls(void __user *ptr)
+{
+       current->thread.tls_ptr = ptr;
+       set_gateway_tls(ptr);
+
+       return 0;
+}
+
+asmlinkage void *sys_metag_get_tls(void)
+{
+       return (__force void *)current->thread.tls_ptr;
+}
+
+asmlinkage long sys_truncate64_metag(const char __user *path, unsigned long lo,
+                                    unsigned long hi)
+{
+       return sys_truncate64(path, merge_64(hi, lo));
+}
+
+asmlinkage long sys_ftruncate64_metag(unsigned int fd, unsigned long lo,
+                                     unsigned long hi)
+{
+       return sys_ftruncate64(fd, merge_64(hi, lo));
+}
+
+asmlinkage long sys_fadvise64_64_metag(int fd, unsigned long offs_lo,
+                                      unsigned long offs_hi,
+                                      unsigned long len_lo,
+                                      unsigned long len_hi, int advice)
+{
+       return sys_fadvise64_64(fd, merge_64(offs_hi, offs_lo),
+                               merge_64(len_hi, len_lo), advice);
+}
+
+asmlinkage long sys_readahead_metag(int fd, unsigned long lo, unsigned long hi,
+                                   size_t count)
+{
+       return sys_readahead(fd, merge_64(hi, lo), count);
+}
+
+asmlinkage ssize_t sys_pread64_metag(unsigned long fd, char __user *buf,
+                                    size_t count, unsigned long lo,
+                                    unsigned long hi)
+{
+       return sys_pread64(fd, buf, count, merge_64(hi, lo));
+}
+
+asmlinkage ssize_t sys_pwrite64_metag(unsigned long fd, char __user *buf,
+                                     size_t count, unsigned long lo,
+                                     unsigned long hi)
+{
+       return sys_pwrite64(fd, buf, count, merge_64(hi, lo));
+}
+
+asmlinkage long sys_sync_file_range_metag(int fd, unsigned long offs_lo,
+                                         unsigned long offs_hi,
+                                         unsigned long len_lo,
+                                         unsigned long len_hi,
+                                         unsigned int flags)
+{
+       return sys_sync_file_range(fd, merge_64(offs_hi, offs_lo),
+                                  merge_64(len_hi, len_lo), flags);
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/*
+ * We need wrappers for anything with unaligned 64bit arguments
+ */
+#define sys_truncate64         sys_truncate64_metag
+#define sys_ftruncate64                sys_ftruncate64_metag
+#define sys_fadvise64_64       sys_fadvise64_64_metag
+#define sys_readahead          sys_readahead_metag
+#define sys_pread64            sys_pread64_metag
+#define sys_pwrite64           sys_pwrite64_metag
+#define sys_sync_file_range    sys_sync_file_range_metag
+
+/*
+ * Note that we can't include <linux/unistd.h> here since the header
+ * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
+ */
+const void *sys_call_table[__NR_syscalls] = {
+       [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/metag/kernel/tbiunexp.S b/arch/metag/kernel/tbiunexp.S
new file mode 100644 (file)
index 0000000..907bbe0
--- /dev/null
@@ -0,0 +1,22 @@
+/* Pass a breakpoint through to Codescape */
+
+#include <asm/tbx.h>
+
+       .text
+        .global        ___TBIUnExpXXX
+        .type   ___TBIUnExpXXX,function
+___TBIUnExpXXX:
+       TSTT    D0Ar2,#TBICTX_CRIT_BIT  ! Result of nestable int call?
+       BZ      $LTBINormCase           ! UnExpXXX at background level
+       MOV     D0Re0,TXMASKI           ! Read TXMASKI
+       XOR     TXMASKI,D1Re0,D1Re0     ! Turn off BGNDHALT handling!
+       OR      D0Ar2,D0Ar2,D0Re0       ! Preserve bits cleared
+$LTBINormCase:
+       MSETL   [A0StP],D0Ar6,D0Ar4,D0Ar2       ! Save args on stack
+       SETL    [A0StP++],D0Ar2,D1Ar1   ! Init area for returned values
+       SWITCH  #0xC20208               ! Total stack frame size 8 Dwords
+                                       !            write back size 2 Dwords
+       GETL    D0Re0,D1Re0,[--A0StP]   ! Get result
+       SUB     A0StP,A0StP,#(8*3)      ! Recover stack frame
+       MOV     PC,D1RtP
+        .size          ___TBIUnExpXXX,.-___TBIUnExpXXX
diff --git a/arch/metag/kernel/tcm.c b/arch/metag/kernel/tcm.c
new file mode 100644 (file)
index 0000000..5d102b3
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <asm/tcm.h>
+
+struct tcm_pool {
+       struct list_head list;
+       unsigned int tag;
+       unsigned long start;
+       unsigned long end;
+       struct gen_pool *pool;
+};
+
+static LIST_HEAD(pool_list);
+
+static struct tcm_pool *find_pool(unsigned int tag)
+{
+       struct list_head *lh;
+       struct tcm_pool *pool;
+
+       list_for_each(lh, &pool_list) {
+               pool = list_entry(lh, struct tcm_pool, list);
+               if (pool->tag == tag)
+                       return pool;
+       }
+
+       return NULL;
+}
+
+/**
+ * tcm_alloc - allocate memory from a TCM pool
+ * @tag: tag of the pool to allocate memory from
+ * @len: number of bytes to be allocated
+ *
+ * Allocate the requested number of bytes from the pool matching
+ * the specified tag. Returns the address of the allocated memory
+ * or zero on failure.
+ */
+unsigned long tcm_alloc(unsigned int tag, size_t len)
+{
+       unsigned long vaddr;
+       struct tcm_pool *pool;
+
+       pool = find_pool(tag);
+       if (!pool)
+               return 0;
+
+       vaddr = gen_pool_alloc(pool->pool, len);
+       if (!vaddr)
+               return 0;
+
+       return vaddr;
+}
+
+/**
+ * tcm_free - free a block of memory to a TCM pool
+ * @tag: tag of the pool to free memory to
+ * @addr: address of the memory to be freed
+ * @len: number of bytes to be freed
+ *
+ * Free the requested number of bytes at a specific address to the
+ * pool matching the specified tag.
+ */
+void tcm_free(unsigned int tag, unsigned long addr, size_t len)
+{
+       struct tcm_pool *pool;
+
+       pool = find_pool(tag);
+       if (!pool)
+               return;
+       gen_pool_free(pool->pool, addr, len);
+}
+
+/**
+ * tcm_lookup_tag - find the tag matching an address
+ * @p: memory address to lookup the tag for
+ *
+ * Find the tag of the tcm memory region that contains the
+ * specified address. Returns %TCM_INVALID_TAG if no such
+ * memory region could be found.
+ */
+unsigned int tcm_lookup_tag(unsigned long p)
+{
+       struct list_head *lh;
+       struct tcm_pool *pool;
+       unsigned long addr = (unsigned long) p;
+
+       list_for_each(lh, &pool_list) {
+               pool = list_entry(lh, struct tcm_pool, list);
+               if (addr >= pool->start && addr < pool->end)
+                       return pool->tag;
+       }
+
+       return TCM_INVALID_TAG;
+}
+
+/**
+ * tcm_add_region - add a memory region to TCM pool list
+ * @reg: descriptor of region to be added
+ *
+ * Add a region of memory to the TCM pool list. Returns 0 on success.
+ */
+int __init tcm_add_region(struct tcm_region *reg)
+{
+       struct tcm_pool *pool;
+
+       pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool) {
+               pr_err("Failed to alloc memory for TCM pool!\n");
+               return -ENOMEM;
+       }
+
+       pool->tag = reg->tag;
+       pool->start = reg->res.start;
+       pool->end = reg->res.end;
+
+       /*
+        * 2^3 = 8 bytes granularity to allow for 64bit access alignment.
+        * -1 = NUMA node specifier.
+        */
+       pool->pool = gen_pool_create(3, -1);
+
+       if (!pool->pool) {
+               pr_err("Failed to create TCM pool!\n");
+               kfree(pool);
+               return -ENOMEM;
+       }
+
+       if (gen_pool_add(pool->pool, reg->res.start,
+                        reg->res.end - reg->res.start + 1, -1)) {
+               pr_err("Failed to add memory to TCM pool!\n");
+               return -ENOMEM;
+       }
+       pr_info("Added %s TCM pool (%08x bytes @ %08x)\n",
+               reg->res.name, reg->res.end - reg->res.start + 1,
+               reg->res.start);
+
+       list_add_tail(&pool->list, &pool_list);
+
+       return 0;
+}
diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c
new file mode 100644 (file)
index 0000000..17dc107
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2005-2013 Imagination Technologies Ltd.
+ *
+ * This file contains the Meta-specific time handling details.
+ *
+ */
+
+#include <linux/init.h>
+
+#include <clocksource/metag_generic.h>
+
+void __init time_init(void)
+{
+       metag_generic_timer_init();
+}
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c
new file mode 100644 (file)
index 0000000..bec3dec
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ *  Copyright (C) 2007  Paul Mundt
+ *  Copyright (C) 2010  Imagination Technolohies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/topology.h>
+
+#include <asm/cpu.h>
+
+DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data);
+
+cpumask_t cpu_core_map[NR_CPUS];
+
+static cpumask_t cpu_coregroup_map(unsigned int cpu)
+{
+       return *cpu_possible_mask;
+}
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+       return &cpu_core_map[cpu];
+}
+
+int arch_update_cpu_topology(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu)
+               cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+
+       return 0;
+}
+
+static int __init topology_init(void)
+{
+       int i, ret;
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       for_each_online_node(i)
+               register_one_node(i);
+#endif
+
+       for_each_present_cpu(i) {
+               struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i);
+#ifdef CONFIG_HOTPLUG_CPU
+               cpuinfo->cpu.hotpluggable = 1;
+#endif
+               ret = register_cpu(&cpuinfo->cpu, i);
+               if (unlikely(ret))
+                       pr_warn("%s: register_cpu %d failed (%d)\n",
+                               __func__, i, ret);
+       }
+
+#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
+       /*
+        * In the UP case, make sure the CPU association is still
+        * registered under each node. Without this, sysfs fails
+        * to make the connection between nodes other than node0
+        * and cpu0.
+        */
+       for_each_online_node(i)
+               if (i != numa_node_id())
+                       register_cpu_under_node(raw_smp_processor_id(), i);
+#endif
+
+       return 0;
+}
+subsys_initcall(topology_init);
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
new file mode 100644 (file)
index 0000000..8961f24
--- /dev/null
@@ -0,0 +1,995 @@
+/*
+ *  Meta exception handling.
+ *
+ *  Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/preempt.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#include <linux/kexec.h>
+#include <linux/unistd.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+#include <asm/bug.h>
+#include <asm/core_reg.h>
+#include <asm/irqflags.h>
+#include <asm/siginfo.h>
+#include <asm/traps.h>
+#include <asm/hwthread.h>
+#include <asm/switch.h>
+#include <asm/user_gateway.h>
+#include <asm/syscall.h>
+#include <asm/syscalls.h>
+
+/* Passing syscall arguments as long long is quicker. */
+typedef unsigned int (*LPSYSCALL) (unsigned long long,
+                                  unsigned long long,
+                                  unsigned long long);
+
+/*
+ * Users of LNKSET should compare the bus error bits obtained from DEFR
+ * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
+ * different cores revisions.
+ */
+#define TXDEFR_LNKSET_SUCCESS 0x02000000
+#define TXDEFR_LNKSET_FAILURE 0x04000000
+
+/*
+ * Our global TBI handle.  Initialised from setup.c/setup_arch.
+ */
+DECLARE_PER_CPU(PTBI, pTBI);
+
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(unsigned int, trigger_mask);
+#else
+unsigned int global_trigger_mask;
+EXPORT_SYMBOL(global_trigger_mask);
+#endif
+
+unsigned long per_cpu__stack_save[NR_CPUS];
+
+static const char * const trap_names[] = {
+       [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
+       [TBIXXF_SIGNUM_PGF] = "Privilege violation",
+       [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
+       [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
+       [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
+       [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
+       [TBIXXF_SIGNUM_DPF] = "Data access page fault",
+       [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
+       [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
+};
+
+const char *trap_name(int trapno)
+{
+       if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
+                       && trap_names[trapno])
+               return trap_names[trapno];
+       return "Unknown fault";
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *regs, long err,
+        unsigned long addr)
+{
+       static int die_counter;
+
+       oops_enter();
+
+       spin_lock_irq(&die_lock);
+       console_verbose();
+       bust_spinlocks(1);
+       pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
+              trap_name(err & 0xffff), addr, ++die_counter);
+
+       print_modules();
+       show_regs(regs);
+
+       pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+              task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
+
+       bust_spinlocks(0);
+       add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+       if (kexec_should_crash(current))
+               crash_kexec(regs);
+
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops)
+               panic("Fatal exception");
+
+       spin_unlock_irq(&die_lock);
+       oops_exit();
+       do_exit(SIGSEGV);
+}
+
+#ifdef CONFIG_METAG_DSP
+/*
+ * The ECH encoding specifies the size of a DSPRAM as,
+ *
+ *             "slots" / 4
+ *
+ * A "slot" is the size of two DSPRAM bank entries; an entry from
+ * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
+ * entry is 4 bytes.
+ */
+#define SLOT_SZ        8
+static inline unsigned int decode_dspram_size(unsigned int size)
+{
+       unsigned int _sz = size & 0x7f;
+
+       return _sz * SLOT_SZ * 4;
+}
+
+static void dspram_save(struct meta_ext_context *dsp_ctx,
+                       unsigned int ramA_sz, unsigned int ramB_sz)
+{
+       unsigned int ram_sz[2];
+       int i;
+
+       ram_sz[0] = ramA_sz;
+       ram_sz[1] = ramB_sz;
+
+       for (i = 0; i < 2; i++) {
+               if (ram_sz[i] != 0) {
+                       unsigned int sz;
+
+                       if (i == 0)
+                               sz = decode_dspram_size(ram_sz[i] >> 8);
+                       else
+                               sz = decode_dspram_size(ram_sz[i]);
+
+                       if (dsp_ctx->ram[i] == NULL) {
+                               dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
+
+                               if (dsp_ctx->ram[i] == NULL)
+                                       panic("couldn't save DSP context");
+                       } else {
+                               if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
+                                       kfree(dsp_ctx->ram[i]);
+
+                                       dsp_ctx->ram[i] = kmalloc(sz,
+                                                                 GFP_KERNEL);
+
+                                       if (dsp_ctx->ram[i] == NULL)
+                                               panic("couldn't save DSP context");
+                               }
+                       }
+
+                       if (i == 0)
+                               __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
+                       else
+                               __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
+
+                       dsp_ctx->ram_sz[i] = ram_sz[i];
+               }
+       }
+}
+#endif /* CONFIG_METAG_DSP */
+
+/*
+ * Allow interrupts to be nested and save any "extended" register
+ * context state, e.g. DSP regs and RAMs.
+ */
+static void nest_interrupts(TBIRES State, unsigned long mask)
+{
+#ifdef CONFIG_METAG_DSP
+       struct meta_ext_context *dsp_ctx;
+       unsigned int D0_8;
+
+       /*
+        * D0.8 may contain an ECH encoding. The upper 16 bits
+        * tell us what DSP resources the current process is
+        * using. OR the bits into the SaveMask so that
+        * __TBINestInts() knows what resources to save as
+        * part of this context.
+        *
+        * Don't save the context if we're nesting interrupts in the
+        * kernel because the kernel doesn't use DSP hardware.
+        */
+       D0_8 = __core_reg_get(D0.8);
+
+       if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
+               State.Sig.SaveMask |= (D0_8 >> 16);
+
+               dsp_ctx = current->thread.dsp_context;
+               if (dsp_ctx == NULL) {
+                       dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
+                       if (dsp_ctx == NULL)
+                               panic("couldn't save DSP context: ENOMEM");
+
+                       current->thread.dsp_context = dsp_ctx;
+               }
+
+               current->thread.user_flags |= (D0_8 & 0xffff0000);
+               __TBINestInts(State, &dsp_ctx->regs, mask);
+               dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
+       } else
+               __TBINestInts(State, NULL, mask);
+#else
+       __TBINestInts(State, NULL, mask);
+#endif
+}
+
+void head_end(TBIRES State, unsigned long mask)
+{
+       unsigned int savemask = (unsigned short)State.Sig.SaveMask;
+       unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
+
+       if (savemask & TBICTX_PRIV_BIT) {
+               ctx_savemask |= TBICTX_PRIV_BIT;
+               current->thread.user_flags = savemask;
+       }
+
+       /* Always undo the sleep bit */
+       ctx_savemask &= ~TBICTX_WAIT_BIT;
+
+       /* Always save the catch buffer and RD pipe if they are dirty */
+       savemask |= TBICTX_XCBF_BIT;
+
+       /* Only save the catch and RD if we have not already done so.
+        * Note - the RD bits are in the pCtx only, and not in the
+        * State.SaveMask.
+        */
+       if ((savemask & TBICTX_CBUF_BIT) ||
+           (ctx_savemask & TBICTX_CBRP_BIT)) {
+               /* Have we already saved the buffers though?
+                * - See TestTrack 5071 */
+               if (ctx_savemask & TBICTX_XCBF_BIT) {
+                       /* Strip off the bits so the call to __TBINestInts
+                        * won't save the buffers again. */
+                       savemask &= ~TBICTX_CBUF_BIT;
+                       ctx_savemask &= ~TBICTX_CBRP_BIT;
+               }
+       }
+
+#ifdef CONFIG_METAG_META21
+       {
+               unsigned int depth, txdefr;
+
+               /*
+                * Save TXDEFR state.
+                *
+                * The process may have been interrupted after a LNKSET, but
+                * before it could read the DEFR state, so we mustn't lose that
+                * state or it could end up retrying an atomic operation that
+                * succeeded.
+                *
+                * All interrupts are disabled at this point so we
+                * don't need to perform any locking. We must do this
+                * dance before we use LNKGET or LNKSET.
+                */
+               BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
+
+               depth = current->thread.int_depth++;
+
+               txdefr = __core_reg_get(TXDEFR);
+
+               txdefr &= TXDEFR_BUS_STATE_BITS;
+               if (txdefr & TXDEFR_LNKSET_SUCCESS)
+                       current->thread.txdefr_failure &= ~(1 << depth);
+               else
+                       current->thread.txdefr_failure |= (1 << depth);
+       }
+#endif
+
+       State.Sig.SaveMask = savemask;
+       State.Sig.pCtx->SaveMask = ctx_savemask;
+
+       nest_interrupts(State, mask);
+
+#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
+       /* Poison the catch registers.  This shows up any mistakes we have
+        * made in their handling MUCH quicker.
+        */
+       __core_reg_set(TXCATCH0, 0x87650021);
+       __core_reg_set(TXCATCH1, 0x87654322);
+       __core_reg_set(TXCATCH2, 0x87654323);
+       __core_reg_set(TXCATCH3, 0x87654324);
+#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
+}
+
+TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
+{
+       struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
+       unsigned long flags;
+
+       local_irq_disable();
+
+       if (user_mode(regs)) {
+               flags = current_thread_info()->flags;
+               if (flags & _TIF_WORK_MASK &&
+                   do_work_pending(regs, flags, syscall)) {
+                       *restart = 1;
+                       return State;
+               }
+
+#ifdef CONFIG_METAG_FPU
+               if (current->thread.fpu_context &&
+                   current->thread.fpu_context->needs_restore) {
+                       __TBICtxFPURestore(State, current->thread.fpu_context);
+                       /*
+                        * Clearing this bit ensures the FP unit is not made
+                        * active again unless it is used.
+                        */
+                       State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
+                       current->thread.fpu_context->needs_restore = false;
+               }
+               State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
+#endif
+       }
+
+       /* TBI will turn interrupts back on at some point. */
+       if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
+               trace_hardirqs_on();
+
+#ifdef CONFIG_METAG_DSP
+       /*
+        * If we previously saved an extended context then restore it
+        * now. Otherwise, clear D0.8 because this process is not
+        * using DSP hardware.
+        */
+       if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
+               unsigned int D0_8;
+               struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
+
+               /* Make sure we're going to return to userland. */
+               BUG_ON(current->thread.int_depth != 1);
+
+               if (dsp_ctx->ram_sz[0] > 0)
+                       __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
+                                           dsp_ctx->ram[0]);
+               if (dsp_ctx->ram_sz[1] > 0)
+                       __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
+                                           dsp_ctx->ram[1]);
+
+               State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
+               __TBICtxRestore(State, current->thread.dsp_context);
+               D0_8 = __core_reg_get(D0.8);
+               D0_8 |= current->thread.user_flags & 0xffff0000;
+               D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
+               __core_reg_set(D0.8, D0_8);
+       } else
+               __core_reg_set(D0.8, 0);
+#endif /* CONFIG_METAG_DSP */
+
+#ifdef CONFIG_METAG_META21
+       {
+               unsigned int depth, txdefr;
+
+               /*
+                * If there hasn't been a LNKSET since the last LNKGET then the
+                * link flag will be set, causing the next LNKSET to succeed if
+                * the addresses match. The two LNK operations may not be a pair
+                * (e.g. see atomic_read()), so the LNKSET should fail.
+                * We use a conditional-never LNKSET to clear the link flag
+                * without side effects.
+                */
+               asm volatile("LNKSETDNV [D0Re0],D0Re0");
+
+               depth = --current->thread.int_depth;
+
+               BUG_ON(user_mode(regs) && depth);
+
+               txdefr = __core_reg_get(TXDEFR);
+
+               txdefr &= ~TXDEFR_BUS_STATE_BITS;
+
+               /* Do we need to restore a failure code into TXDEFR? */
+               if (current->thread.txdefr_failure & (1 << depth))
+                       txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
+               else
+                       txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
+
+               __core_reg_set(TXDEFR, txdefr);
+       }
+#endif
+       return State;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * If we took an interrupt in the middle of __kuser_get_tls then we need
+ * to rewind the PC to the start of the function in case the process
+ * gets migrated to another thread (SMP only) and it reads the wrong tls
+ * data.
+ */
+static inline void _restart_critical_section(TBIRES State)
+{
+       unsigned long get_tls_start;
+       unsigned long get_tls_end;
+
+       get_tls_start = (unsigned long)__kuser_get_tls -
+               (unsigned long)&__user_gateway_start;
+
+       get_tls_start += USER_GATEWAY_PAGE;
+
+       get_tls_end = (unsigned long)__kuser_get_tls_end -
+               (unsigned long)&__user_gateway_start;
+
+       get_tls_end += USER_GATEWAY_PAGE;
+
+       if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
+           (State.Sig.pCtx->CurrPC < get_tls_end))
+               State.Sig.pCtx->CurrPC = get_tls_start;
+}
+#else
+/*
+ * If we took an interrupt in the middle of
+ * __kuser_cmpxchg then we need to rewind the PC to the
+ * start of the function.
+ */
+static inline void _restart_critical_section(TBIRES State)
+{
+       unsigned long cmpxchg_start;
+       unsigned long cmpxchg_end;
+
+       cmpxchg_start = (unsigned long)__kuser_cmpxchg -
+               (unsigned long)&__user_gateway_start;
+
+       cmpxchg_start += USER_GATEWAY_PAGE;
+
+       cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
+               (unsigned long)&__user_gateway_start;
+
+       cmpxchg_end += USER_GATEWAY_PAGE;
+
+       if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
+           (State.Sig.pCtx->CurrPC < cmpxchg_end))
+               State.Sig.pCtx->CurrPC = cmpxchg_start;
+}
+#endif
+
+/* Used by kick_handler() */
+void restart_critical_section(TBIRES State)
+{
+       _restart_critical_section(State);
+}
+
+TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
+                      PTBI pTBI)
+{
+       head_end(State, ~INTS_OFF_MASK);
+
+       /* If we interrupted user code handle any critical sections. */
+       if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
+               _restart_critical_section(State);
+
+       trace_hardirqs_off();
+
+       do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
+
+       return tail_end(State);
+}
+
+static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
+{
+       return pbuf->CBFlags & TXCATCH0_READ_BIT;
+}
+
+static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
+{
+       return pbuf->CBAddr;
+}
+
+static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
+                           int signo, int code, int trapno)
+{
+       if (user_mode(regs)) {
+               siginfo_t info;
+
+               if (show_unhandled_signals && unhandled_signal(current, signo)
+                   && printk_ratelimit()) {
+
+                       pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
+                               current->pid, regs->ctx.CurrPC, addr,
+                               trapno, trap_name(trapno));
+                       print_vma_addr(" in ", regs->ctx.CurrPC);
+                       print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
+                       printk("\n");
+                       show_regs(regs);
+               }
+
+               info.si_signo = signo;
+               info.si_errno = 0;
+               info.si_code = code;
+               info.si_addr = (__force void __user *)addr;
+               info.si_trapno = trapno;
+               force_sig_info(signo, &info, current);
+       } else {
+               die("Oops", regs, trapno, addr);
+       }
+}
+
+static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
+                            unsigned int data_address, int trapno)
+{
+       int ret;
+
+       ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
+
+       return ret;
+}
+
+static unsigned long get_inst_fault_address(struct pt_regs *regs)
+{
+       return regs->ctx.CurrPC;
+}
+
+TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
+                    int Inst, PTBI pTBI)
+{
+       struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
+       PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
+       unsigned long data_address;
+
+       head_end(State, ~INTS_OFF_MASK);
+
+       /* Hardware breakpoint or data watch */
+       if ((SigNum == TBIXXF_SIGNUM_IHF) ||
+           ((SigNum == TBIXXF_SIGNUM_DHF) &&
+            (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
+                                 TXCATCH0_WATCH0_BIT)))) {
+               State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
+                                     pTBI);
+               return tail_end(State);
+       }
+
+       local_irq_enable();
+
+       data_address = fault_address(pcbuf);
+
+       switch (SigNum) {
+       case TBIXXF_SIGNUM_IGF:
+               /* 1st-level entry invalid (instruction fetch) */
+       case TBIXXF_SIGNUM_IPF: {
+               /* 2nd-level entry invalid (instruction fetch) */
+               unsigned long addr = get_inst_fault_address(regs);
+               do_page_fault(regs, addr, 0, SigNum);
+               break;
+       }
+
+       case TBIXXF_SIGNUM_DGF:
+               /* 1st-level entry invalid (data access) */
+       case TBIXXF_SIGNUM_DPF:
+               /* 2nd-level entry invalid (data access) */
+       case TBIXXF_SIGNUM_DWF:
+               /* Write to read only page */
+               handle_data_fault(pcbuf, regs, data_address, SigNum);
+               break;
+
+       case TBIXXF_SIGNUM_IIF:
+               /* Illegal instruction */
+               unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
+                               SigNum);
+               break;
+
+       case TBIXXF_SIGNUM_DHF:
+               /* Unaligned access */
+               unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
+                               SigNum);
+               break;
+       case TBIXXF_SIGNUM_PGF:
+               /* Privilege violation */
+               unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
+                               SigNum);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       return tail_end(State);
+}
+
+static bool switch_is_syscall(unsigned int inst)
+{
+       return inst == __METAG_SW_ENCODING(SYS);
+}
+
+static bool switch_is_legacy_syscall(unsigned int inst)
+{
+       return inst == __METAG_SW_ENCODING(SYS_LEGACY);
+}
+
+static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
+{
+       regs->ctx.CurrPC += 4;
+}
+
+static inline int test_syscall_work(void)
+{
+       return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
+}
+
+TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
+                      int Inst, PTBI pTBI)
+{
+       struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
+       unsigned int sysnumber;
+       unsigned long long a1_a2, a3_a4, a5_a6;
+       LPSYSCALL syscall_entry;
+       int restart;
+
+       head_end(State, ~INTS_OFF_MASK);
+
+       /*
+        * If this is not a syscall SWITCH it could be a breakpoint.
+        */
+       if (!switch_is_syscall(Inst)) {
+               /*
+                * Alert the user if they're trying to use legacy system
+                * calls. This suggests they need to update their C
+                * library and build against up to date kernel headers.
+                */
+               if (switch_is_legacy_syscall(Inst))
+                       pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
+               /*
+                * We don't know how to handle the SWITCH and cannot
+                * safely ignore it, so treat all unknown switches
+                * (including breakpoints) as traps.
+                */
+               force_sig(SIGTRAP, current);
+               return tail_end(State);
+       }
+
+       local_irq_enable();
+
+restart_syscall:
+       restart = 0;
+       sysnumber = regs->ctx.DX[0].U1;
+
+       if (test_syscall_work())
+               sysnumber = syscall_trace_enter(regs);
+
+       /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
+       step_over_switch(regs, Inst);
+
+       if (sysnumber >= __NR_syscalls) {
+               pr_debug("unknown syscall number: %d\n", sysnumber);
+               syscall_entry = (LPSYSCALL) sys_ni_syscall;
+       } else {
+               syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
+       }
+
+       /* Use 64bit loads for speed. */
+       a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
+       a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
+       a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
+
+       /* here is the actual call to the syscall handler functions */
+       regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
+
+       if (test_syscall_work())
+               syscall_trace_leave(regs);
+
+       State = tail_end_sys(State, sysnumber, &restart);
+       /* Handlerless restarts shouldn't go via userland */
+       if (restart)
+               goto restart_syscall;
+       return State;
+}
+
+TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
+                      int Inst, PTBI pTBI)
+{
+       struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
+
+       /*
+        * This can be caused by any user process simply executing an unusual
+        * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
+        * thread to stop, so signal a SIGTRAP instead.
+        */
+       head_end(State, ~INTS_OFF_MASK);
+       if (user_mode(regs))
+               force_sig(SIGTRAP, current);
+       else
+               State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
+       return tail_end(State);
+}
+
+#ifdef CONFIG_METAG_META21
+TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
+{
+       struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
+       unsigned int error_state = Triggers;
+       siginfo_t info;
+
+       head_end(State, ~INTS_OFF_MASK);
+
+       local_irq_enable();
+
+       info.si_signo = SIGFPE;
+
+       if (error_state & TXSTAT_FPE_INVALID_BIT)
+               info.si_code = FPE_FLTINV;
+       else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
+               info.si_code = FPE_FLTDIV;
+       else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
+               info.si_code = FPE_FLTOVF;
+       else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
+               info.si_code = FPE_FLTUND;
+       else if (error_state & TXSTAT_FPE_INEXACT_BIT)
+               info.si_code = FPE_FLTRES;
+       else
+               info.si_code = 0;
+       info.si_errno = 0;
+       info.si_addr = (__force void __user *)regs->ctx.CurrPC;
+       force_sig_info(SIGFPE, &info, current);
+
+       return tail_end(State);
+}
+#endif
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+struct traps_context {
+       PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
+};
+
+static struct traps_context *metag_traps_context;
+
+int traps_save_context(void)
+{
+       unsigned long cpu = smp_processor_id();
+       PTBI _pTBI = per_cpu(pTBI, cpu);
+       struct traps_context *context;
+
+       context = kzalloc(sizeof(*context), GFP_ATOMIC);
+       if (!context)
+               return -ENOMEM;
+
+       memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
+
+       metag_traps_context = context;
+       return 0;
+}
+
+int traps_restore_context(void)
+{
+       unsigned long cpu = smp_processor_id();
+       PTBI _pTBI = per_cpu(pTBI, cpu);
+       struct traps_context *context = metag_traps_context;
+
+       metag_traps_context = NULL;
+
+       memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
+
+       kfree(context);
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static inline unsigned int _get_trigger_mask(void)
+{
+       unsigned long cpu = smp_processor_id();
+       return per_cpu(trigger_mask, cpu);
+}
+
+unsigned int get_trigger_mask(void)
+{
+       return _get_trigger_mask();
+}
+EXPORT_SYMBOL(get_trigger_mask);
+
+static void set_trigger_mask(unsigned int mask)
+{
+       unsigned long cpu = smp_processor_id();
+       per_cpu(trigger_mask, cpu) = mask;
+}
+
+void arch_local_irq_enable(void)
+{
+       preempt_disable();
+       arch_local_irq_restore(_get_trigger_mask());
+       preempt_enable_no_resched();
+}
+EXPORT_SYMBOL(arch_local_irq_enable);
+#else
+static void set_trigger_mask(unsigned int mask)
+{
+       global_trigger_mask = mask;
+}
+#endif
+
+void __cpuinit per_cpu_trap_init(unsigned long cpu)
+{
+       TBIRES int_context;
+       unsigned int thread = cpu_2_hwthread_id[cpu];
+
+       set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
+                        TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
+                        TBI_TRIG_BIT(TBID_SIGNUM_SW1) |
+                        TBI_TRIG_BIT(TBID_SIGNUM_SWS));
+
+       /* non-priv - use current stack */
+       int_context.Sig.pCtx = NULL;
+       /* Start with interrupts off */
+       int_context.Sig.TrigMask = INTS_OFF_MASK;
+       int_context.Sig.SaveMask = 0;
+
+       /* And call __TBIASyncTrigger() */
+       __TBIASyncTrigger(int_context);
+}
+
+void __init trap_init(void)
+{
+       unsigned long cpu = smp_processor_id();
+       PTBI _pTBI = per_cpu(pTBI, cpu);
+
+       _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
+       _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
+       _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
+       _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
+       _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
+       _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler;
+
+#ifdef CONFIG_METAG_META21
+       _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
+       _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
+#endif
+
+       per_cpu_trap_init(cpu);
+}
+
+void tbi_startup_interrupt(int irq)
+{
+       unsigned long cpu = smp_processor_id();
+       PTBI _pTBI = per_cpu(pTBI, cpu);
+
+       BUG_ON(irq > TBID_SIGNUM_MAX);
+
+       /* For TR1 and TR2, the thread id is encoded in the irq number */
+       if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
+               cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
+
+       set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
+
+       _pTBI->fnSigs[irq] = trigger_handler;
+}
+
+void tbi_shutdown_interrupt(int irq)
+{
+       unsigned long cpu = smp_processor_id();
+       PTBI _pTBI = per_cpu(pTBI, cpu);
+
+       BUG_ON(irq > TBID_SIGNUM_MAX);
+
+       set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
+
+       _pTBI->fnSigs[irq] = __TBIUnExpXXX;
+}
+
+int ret_from_fork(TBIRES arg)
+{
+       struct task_struct *prev = arg.Switch.pPara;
+       struct task_struct *tsk = current;
+       struct pt_regs *regs = task_pt_regs(tsk);
+       int (*fn)(void *);
+       TBIRES Next;
+
+       schedule_tail(prev);
+
+       if (tsk->flags & PF_KTHREAD) {
+               fn = (void *)regs->ctx.DX[4].U1;
+               BUG_ON(!fn);
+
+               fn((void *)regs->ctx.DX[3].U1);
+       }
+
+       if (test_syscall_work())
+               syscall_trace_leave(regs);
+
+       preempt_disable();
+
+       Next.Sig.TrigMask = get_trigger_mask();
+       Next.Sig.SaveMask = 0;
+       Next.Sig.pCtx = &regs->ctx;
+
+       set_gateway_tls(current->thread.tls_ptr);
+
+       preempt_enable_no_resched();
+
+       /* And interrupts should come back on when we resume the real usermode
+        * code. Call __TBIASyncResume()
+        */
+       __TBIASyncResume(tail_end(Next));
+       /* ASyncResume should NEVER return */
+       BUG();
+       return 0;
+}
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+               struct pt_regs *regs)
+{
+       unsigned long addr;
+#ifdef CONFIG_FRAME_POINTER
+       unsigned long fp, fpnew;
+       unsigned long stack;
+#endif
+
+       if (regs && user_mode(regs))
+               return;
+
+       printk("\nCall trace: ");
+#ifdef CONFIG_KALLSYMS
+       printk("\n");
+#endif
+
+       if (!tsk)
+               tsk = current;
+
+#ifdef CONFIG_FRAME_POINTER
+       if (regs) {
+               print_ip_sym(regs->ctx.CurrPC);
+               fp = regs->ctx.AX[1].U0;
+       } else {
+               fp = __core_reg_get(A0FrP);
+       }
+
+       /* detect when the frame pointer has been used for other purposes and
+        * doesn't point to the stack (it may point completely elsewhere which
+        * kstack_end may not detect).
+        */
+       stack = (unsigned long)task_stack_page(tsk);
+       while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
+               addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
+               if (kernel_text_address(addr))
+                       print_ip_sym(addr);
+               else
+                       break;
+               /* stack grows up, so frame pointers must decrease */
+               fpnew = __raw_readl((unsigned long *)(fp + 0));
+               if (fpnew >= fp)
+                       break;
+               fp = fpnew;
+       }
+#else
+       while (!kstack_end(sp)) {
+               addr = (*sp--) - 4;
+               if (kernel_text_address(addr))
+                       print_ip_sym(addr);
+       }
+#endif
+
+       printk("\n");
+
+       debug_show_held_locks(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+       if (!tsk)
+               tsk = current;
+       if (tsk == current)
+               sp = (unsigned long *)current_stack_pointer;
+       else
+               sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
+
+       show_trace(tsk, sp, NULL);
+}
+
+void dump_stack(void)
+{
+       show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/metag/kernel/user_gateway.S b/arch/metag/kernel/user_gateway.S
new file mode 100644 (file)
index 0000000..7167f3e
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ * This file contains code that can be accessed from userspace and can
+ * access certain kernel data structures without the overhead of a system
+ * call.
+ */
+
+#include <asm/metag_regs.h>
+#include <asm/user_gateway.h>
+
+/*
+ * User helpers.
+ *
+ * These are segment of kernel provided user code reachable from user space
+ * at a fixed address in kernel memory.  This is used to provide user space
+ * with some operations which require kernel help because of unimplemented
+ * native feature and/or instructions in some Meta CPUs. The idea is for
+ * this code to be executed directly in user mode for best efficiency but
+ * which is too intimate with the kernel counter part to be left to user
+ * libraries.  The kernel reserves the right to change this code as needed
+ * without warning. Only the entry points and their results are guaranteed
+ * to be stable.
+ *
+ * Each segment is 64-byte aligned.  This mechanism should be used only for
+ * for things that are really small and justified, and not be abused freely.
+ */
+       .text
+       .global ___user_gateway_start
+___user_gateway_start:
+
+       /* get_tls
+        * Offset:       0
+        * Description:  Get the TLS pointer for this process.
+        */
+       .global ___kuser_get_tls
+       .type   ___kuser_get_tls,function
+___kuser_get_tls:
+       MOVT    D1Ar1,#HI(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
+       ADD     D1Ar1,D1Ar1,#LO(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
+       MOV     D1Ar3,TXENABLE
+       AND     D1Ar3,D1Ar3,#(TXENABLE_THREAD_BITS)
+       LSR     D1Ar3,D1Ar3,#(TXENABLE_THREAD_S - 2)
+       GETD    D0Re0,[D1Ar1+D1Ar3]
+___kuser_get_tls_end:          /* Beyond this point the read will complete */
+       MOV     PC,D1RtP
+       .size   ___kuser_get_tls,.-___kuser_get_tls
+       .global ___kuser_get_tls_end
+
+       /* cmpxchg
+        * Offset:       64
+        * Description:  Replace the value at 'ptr' with 'newval' if the current
+        *               value is 'oldval'. Return zero if we succeeded,
+        *               non-zero otherwise.
+        *
+        * Reference prototype:
+        *
+        *      int __kuser_cmpxchg(int oldval, int newval, unsigned long *ptr)
+        *
+        */
+       .balign 64
+       .global ___kuser_cmpxchg
+       .type   ___kuser_cmpxchg,function
+___kuser_cmpxchg:
+#ifdef CONFIG_SMP
+       /*
+        * We must use LNKGET/LNKSET with an SMP kernel because the other method
+        * does not provide atomicity across multiple CPUs.
+        */
+0:     LNKGETD D0Re0,[D1Ar3]
+       CMP     D0Re0,D1Ar1
+       LNKSETDZ [D1Ar3],D0Ar2
+       BNZ     1f
+       DEFR    D0Re0,TXSTAT
+       ANDT    D0Re0,D0Re0,#HI(0x3f000000)
+       CMPT    D0Re0,#HI(0x02000000)
+       BNE     0b
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+       DCACHE  [D1Ar3], D0Re0
+#endif
+1:     MOV     D0Re0,#1
+       XORZ    D0Re0,D0Re0,D0Re0
+       MOV     PC,D1RtP
+#else
+       GETD    D0Re0,[D1Ar3]
+       CMP     D0Re0,D1Ar1
+       SETDZ   [D1Ar3],D0Ar2
+___kuser_cmpxchg_end:          /* Beyond this point the write will complete */
+       MOV     D0Re0,#1
+       XORZ    D0Re0,D0Re0,D0Re0
+       MOV     PC,D1RtP
+#endif /* CONFIG_SMP */
+       .size   ___kuser_cmpxchg,.-___kuser_cmpxchg
+       .global ___kuser_cmpxchg_end
+
+       .global ___user_gateway_end
+___user_gateway_end:
diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S
new file mode 100644 (file)
index 0000000..e12055e
--- /dev/null
@@ -0,0 +1,71 @@
+/* ld script to make Meta Linux kernel */
+
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-metag", "elf32-metag", "elf32-metag")
+OUTPUT_ARCH(metag)
+ENTRY(__start)
+
+_jiffies = _jiffies_64;
+SECTIONS
+{
+  . = CONFIG_PAGE_OFFSET;
+  _text = .;
+  __text = .;
+  __stext = .;
+  HEAD_TEXT_SECTION
+  .text : {
+       TEXT_TEXT
+       SCHED_TEXT
+       LOCK_TEXT
+       KPROBES_TEXT
+       IRQENTRY_TEXT
+       *(.text.*)
+       *(.gnu.warning)
+       }
+
+  __etext = .;                 /* End of text section */
+
+  __sdata = .;
+  RO_DATA_SECTION(PAGE_SIZE)
+  RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+  __edata = .;                 /* End of data section */
+
+  EXCEPTION_TABLE(16)
+  NOTES
+
+  . = ALIGN(PAGE_SIZE);                /* Init code and data */
+  ___init_begin = .;
+  INIT_TEXT_SECTION(PAGE_SIZE)
+  INIT_DATA_SECTION(16)
+
+  .init.arch.info : {
+         ___arch_info_begin = .;
+         *(.arch.info.init)
+         ___arch_info_end = .;
+  }
+
+  PERCPU_SECTION(L1_CACHE_BYTES)
+
+  ___init_end = .;
+
+  BSS_SECTION(0, PAGE_SIZE, 0)
+
+  __end = .;
+
+  . = ALIGN(PAGE_SIZE);
+  __heap_start = .;
+
+  DWARF_DEBUG
+
+  /* When something in the kernel is NOT compiled as a module, the
+   * module cleanup code and data are put into these segments.  Both
+   * can then be thrown away, as cleanup code is never called unless
+   * it's a module.
+   */
+  DISCARDS
+}
diff --git a/arch/metag/lib/Makefile b/arch/metag/lib/Makefile
new file mode 100644 (file)
index 0000000..a41d24e
--- /dev/null
@@ -0,0 +1,22 @@
+#
+# Makefile for Meta-specific library files.
+#
+
+lib-y += usercopy.o
+lib-y += copy_page.o
+lib-y += clear_page.o
+lib-y += memcpy.o
+lib-y += memmove.o
+lib-y += memset.o
+lib-y += delay.o
+lib-y += div64.o
+lib-y += muldi3.o
+lib-y += ashrdi3.o
+lib-y += ashldi3.o
+lib-y += lshrdi3.o
+lib-y += divsi3.o
+lib-y += modsi3.o
+lib-y += cmpdi2.o
+lib-y += ucmpdi2.o
+lib-y += ip_fast_csum.o
+lib-y += checksum.o
diff --git a/arch/metag/lib/ashldi3.S b/arch/metag/lib/ashldi3.S
new file mode 100644 (file)
index 0000000..78d6974
--- /dev/null
@@ -0,0 +1,33 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit arithmetic shift left routine.
+!
+
+       .text
+       .global ___ashldi3
+       .type   ___ashldi3,function
+
+___ashldi3:
+       MOV     D0Re0,D0Ar2
+       MOV     D1Re0,D1Ar1
+       CMP     D1Ar3,#0                ! COUNT == 0
+       MOVEQ   PC,D1RtP                ! Yes, return
+
+       SUBS    D0Ar4,D1Ar3,#32         ! N = COUNT - 32
+       BGE     $L10
+
+!! Shift < 32
+       NEG     D0Ar4,D0Ar4             ! N = - N
+       LSL     D1Re0,D1Re0,D1Ar3       ! HI = HI << COUNT
+       LSR     D0Ar6,D0Re0,D0Ar4       ! TMP= LO >> -(COUNT - 32)
+       OR      D1Re0,D1Re0,D0Ar6       ! HI = HI | TMP
+       SWAP    D0Ar4,D1Ar3
+       LSL     D0Re0,D0Re0,D0Ar4       ! LO = LO << COUNT
+       MOV     PC,D1RtP
+
+$L10:
+!! Shift >= 32
+       LSL     D1Re0,D0Re0,D0Ar4       ! HI = LO << N
+       MOV     D0Re0,#0                ! LO = 0
+       MOV     PC,D1RtP
+       .size ___ashldi3,.-___ashldi3
diff --git a/arch/metag/lib/ashrdi3.S b/arch/metag/lib/ashrdi3.S
new file mode 100644 (file)
index 0000000..7cb7ed3
--- /dev/null
@@ -0,0 +1,33 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit arithmetic shift right routine.
+!
+
+       .text
+       .global ___ashrdi3
+       .type   ___ashrdi3,function
+
+___ashrdi3:
+       MOV     D0Re0,D0Ar2
+       MOV     D1Re0,D1Ar1
+       CMP     D1Ar3,#0                ! COUNT == 0
+       MOVEQ   PC,D1RtP                ! Yes, return
+
+       MOV     D0Ar4,D1Ar3
+       SUBS    D1Ar3,D1Ar3,#32         ! N = COUNT - 32
+       BGE     $L20
+
+!! Shift < 32
+       NEG     D1Ar3,D1Ar3             ! N = - N
+       LSR     D0Re0,D0Re0,D0Ar4       ! LO = LO >> COUNT
+       LSL     D0Ar6,D1Re0,D1Ar3       ! TMP= HI << -(COUNT - 32)
+       OR      D0Re0,D0Re0,D0Ar6       ! LO = LO | TMP
+       SWAP    D1Ar3,D0Ar4
+       ASR     D1Re0,D1Re0,D1Ar3       ! HI = HI >> COUNT
+       MOV     PC,D1RtP
+$L20:
+!! Shift >= 32
+       ASR     D0Re0,D1Re0,D1Ar3       ! LO = HI >> N
+       ASR     D1Re0,D1Re0,#31         ! HI = HI >> 31
+       MOV     PC,D1RtP
+       .size ___ashrdi3,.-___ashrdi3
diff --git a/arch/metag/lib/checksum.c b/arch/metag/lib/checksum.c
new file mode 100644 (file)
index 0000000..44d2e19
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ *
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             IP/TCP/UDP checksumming routines
+ *
+ * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
+ *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *             Tom May, <ftom@netcom.com>
+ *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ *             Lots of code moved from tcp.c and ip.c; see those files
+ *             for more names.
+ *
+ * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
+ *             Fixed some nasty bugs, causing some horrible crashes.
+ *             A: At some points, the sum (%0) was used as
+ *             length-counter instead of the length counter
+ *             (%1). Thanks to Roman Hodek for pointing this out.
+ *             B: GCC seems to mess up if one uses too many
+ *             data-registers to hold input values and one tries to
+ *             specify d0 and d1 as scratch registers. Letting gcc
+ *             choose these registers itself solves the problem.
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ */
+
+/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
+ kills, so most of the assembly has to go. */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+
+#include <asm/byteorder.h>
+
+static inline unsigned short from32to16(unsigned int x)
+{
+       /* add up 16-bit and 16-bit for 16+c bit */
+       x = (x & 0xffff) + (x >> 16);
+       /* add up carry.. */
+       x = (x & 0xffff) + (x >> 16);
+       return x;
+}
+
+static unsigned int do_csum(const unsigned char *buff, int len)
+{
+       int odd;
+       unsigned int result = 0;
+
+       if (len <= 0)
+               goto out;
+       odd = 1 & (unsigned long) buff;
+       if (odd) {
+#ifdef __LITTLE_ENDIAN
+               result += (*buff << 8);
+#else
+               result = *buff;
+#endif
+               len--;
+               buff++;
+       }
+       if (len >= 2) {
+               if (2 & (unsigned long) buff) {
+                       result += *(unsigned short *) buff;
+                       len -= 2;
+                       buff += 2;
+               }
+               if (len >= 4) {
+                       const unsigned char *end = buff + ((unsigned)len & ~3);
+                       unsigned int carry = 0;
+                       do {
+                               unsigned int w = *(unsigned int *) buff;
+                               buff += 4;
+                               result += carry;
+                               result += w;
+                               carry = (w > result);
+                       } while (buff < end);
+                       result += carry;
+                       result = (result & 0xffff) + (result >> 16);
+               }
+               if (len & 2) {
+                       result += *(unsigned short *) buff;
+                       buff += 2;
+               }
+       }
+       if (len & 1)
+#ifdef __LITTLE_ENDIAN
+               result += *buff;
+#else
+               result += (*buff << 8);
+#endif
+       result = from32to16(result);
+       if (odd)
+               result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+       return result;
+}
+EXPORT_SYMBOL(ip_fast_csum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum wsum)
+{
+       unsigned int sum = (__force unsigned int)wsum;
+       unsigned int result = do_csum(buff, len);
+
+       /* add in old sum, and carry.. */
+       result += sum;
+       if (sum > result)
+               result += 1;
+       return (__force __wsum)result;
+}
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+__sum16 ip_compute_csum(const void *buff, int len)
+{
+       return (__force __sum16)~do_csum(buff, len);
+}
+EXPORT_SYMBOL(ip_compute_csum);
+
+/*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+                                               __wsum sum, int *csum_err)
+{
+       int missing;
+
+       missing = __copy_from_user(dst, src, len);
+       if (missing) {
+               memset(dst + len - missing, 0, missing);
+               *csum_err = -EFAULT;
+       } else
+               *csum_err = 0;
+
+       return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+/*
+ * copy from ds while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
+{
+       memcpy(dst, src, len);
+       return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/metag/lib/clear_page.S b/arch/metag/lib/clear_page.S
new file mode 100644 (file)
index 0000000..43144ee
--- /dev/null
@@ -0,0 +1,17 @@
+        ! Copyright 2007,2008,2009 Imagination Technologies Ltd.
+
+#include <asm/page.h>
+
+        .text
+        .global        _clear_page
+        .type   _clear_page,function
+       !! D1Ar1 - page
+_clear_page:
+       MOV  TXRPT,#((PAGE_SIZE / 8) - 1)
+       MOV  D0Re0,#0
+       MOV  D1Re0,#0
+$Lclear_page_loop:
+       SETL [D1Ar1++],D0Re0,D1Re0
+       BR   $Lclear_page_loop
+       MOV  PC,D1RtP
+        .size  _clear_page,.-_clear_page
diff --git a/arch/metag/lib/cmpdi2.S b/arch/metag/lib/cmpdi2.S
new file mode 100644 (file)
index 0000000..9c5c663
--- /dev/null
@@ -0,0 +1,32 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit signed compare routine.
+!
+
+       .text
+       .global ___cmpdi2
+       .type   ___cmpdi2,function
+
+!         low    high
+! s64 a  (D0Ar2, D1Ar1)
+! s64 b  (D0Ar4, D1Ar3)
+___cmpdi2:
+       ! start at 1 (equal) and conditionally increment or decrement
+       MOV     D0Re0,#1
+
+       ! high words differ?
+       CMP     D1Ar1,D1Ar3
+       BNE     $Lhigh_differ
+
+       ! unsigned compare low words
+       CMP     D0Ar2,D0Ar4
+       SUBLO   D0Re0,D0Re0,#1
+       ADDHI   D0Re0,D0Re0,#1
+       MOV     PC,D1RtP
+
+$Lhigh_differ:
+       ! signed compare high words
+       SUBLT   D0Re0,D0Re0,#1
+       ADDGT   D0Re0,D0Re0,#1
+       MOV     PC,D1RtP
+       .size ___cmpdi2,.-___cmpdi2
diff --git a/arch/metag/lib/copy_page.S b/arch/metag/lib/copy_page.S
new file mode 100644 (file)
index 0000000..91f7d46
--- /dev/null
@@ -0,0 +1,20 @@
+        ! Copyright 2007,2008 Imagination Technologies Ltd.
+
+#include <asm/page.h>
+
+        .text
+        .global        _copy_page
+        .type   _copy_page,function
+       !! D1Ar1 - to
+       !! D0Ar2 - from
+_copy_page:
+       MOV  D0FrT,#PAGE_SIZE
+$Lcopy_page_loop:
+       GETL D0Re0,D1Re0,[D0Ar2++]
+       GETL D0Ar6,D1Ar5,[D0Ar2++]
+       SETL [D1Ar1++],D0Re0,D1Re0
+       SETL [D1Ar1++],D0Ar6,D1Ar5
+       SUBS D0FrT,D0FrT,#16
+       BNZ  $Lcopy_page_loop
+       MOV  PC,D1RtP
+        .size  _copy_page,.-_copy_page
diff --git a/arch/metag/lib/delay.c b/arch/metag/lib/delay.c
new file mode 100644 (file)
index 0000000..0b308f4
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ *     Precise Delay Loops for Meta
+ *
+ *     Copyright (C) 1993 Linus Torvalds
+ *     Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *     Copyright (C) 2007,2009 Imagination Technologies Ltd.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#include <asm/core_reg.h>
+#include <asm/processor.h>
+
+/*
+ * TXTACTCYC is only 24 bits, so on chips with fast clocks it will wrap
+ * many times per-second. If it does wrap __delay will return prematurely,
+ * but this is only likely with large delay values.
+ *
+ * We also can't implement read_current_timer() with TXTACTCYC due to
+ * this wrapping behaviour.
+ */
+#define rdtimer(t) t = __core_reg_get(TXTACTCYC)
+
+void __delay(unsigned long loops)
+{
+       unsigned long bclock, now;
+
+       rdtimer(bclock);
+       do {
+               asm("NOP");
+               rdtimer(now);
+       } while ((now-bclock) < loops);
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+       u64 loops = (u64)xloops * (u64)loops_per_jiffy * HZ;
+       __delay(loops >> 32);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+       __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+       __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/metag/lib/div64.S b/arch/metag/lib/div64.S
new file mode 100644 (file)
index 0000000..1cfc934
--- /dev/null
@@ -0,0 +1,108 @@
+! Copyright (C) 2012 Imagination Technologies Ltd.
+!
+! Signed/unsigned 64-bit division routines.
+!
+
+       .text
+       .global _div_u64
+       .type   _div_u64,function
+
+_div_u64:
+$L1:
+       ORS     A0.3,D1Ar3,D0Ar4
+       BNE     $L3
+$L2:
+       MOV     D0Re0,D0Ar2
+       MOV     D1Re0,D1Ar1
+       MOV     PC,D1RtP
+$L3:
+       CMP     D1Ar3,D1Ar1
+       CMPEQ   D0Ar4,D0Ar2
+       MOV     D0Re0,#1
+       MOV     D1Re0,#0
+       BHS     $L6
+$L4:
+       ADDS    D0Ar6,D0Ar4,D0Ar4
+       ADD     D1Ar5,D1Ar3,D1Ar3
+       ADDCS   D1Ar5,D1Ar5,#1
+       CMP     D1Ar5,D1Ar3
+       CMPEQ   D0Ar6,D0Ar4
+       BLO     $L6
+$L5:
+       MOV     D0Ar4,D0Ar6
+       MOV     D1Ar3,D1Ar5
+       ADDS    D0Re0,D0Re0,D0Re0
+       ADD     D1Re0,D1Re0,D1Re0
+       ADDCS   D1Re0,D1Re0,#1
+       CMP     D1Ar3,D1Ar1
+       CMPEQ   D0Ar4,D0Ar2
+       BLO     $L4
+$L6:
+       ORS     A0.3,D1Re0,D0Re0
+       MOV     D0Ar6,#0
+       MOV     D1Ar5,D0Ar6
+       BEQ     $L10
+$L7:
+       CMP     D1Ar1,D1Ar3
+       CMPEQ   D0Ar2,D0Ar4
+       BLO     $L9
+$L8:
+       ADDS    D0Ar6,D0Ar6,D0Re0
+       ADD     D1Ar5,D1Ar5,D1Re0
+       ADDCS   D1Ar5,D1Ar5,#1
+
+       SUBS    D0Ar2,D0Ar2,D0Ar4
+       SUB     D1Ar1,D1Ar1,D1Ar3
+       SUBCS   D1Ar1,D1Ar1,#1
+$L9:
+       LSL     A0.3,D1Re0,#31
+       LSR     D0Re0,D0Re0,#1
+       LSR     D1Re0,D1Re0,#1
+       OR      D0Re0,D0Re0,A0.3
+       LSL     A0.3,D1Ar3,#31
+       LSR     D0Ar4,D0Ar4,#1
+       LSR     D1Ar3,D1Ar3,#1
+       OR      D0Ar4,D0Ar4,A0.3
+       ORS     A0.3,D1Re0,D0Re0
+       BNE     $L7
+$L10:
+       MOV     D0Re0,D0Ar6
+       MOV     D1Re0,D1Ar5
+       MOV     PC,D1RtP
+       .size _div_u64,.-_div_u64
+
+       .text
+       .global _div_s64
+       .type   _div_s64,function
+_div_s64:
+       MSETL   [A0StP],D0FrT,D0.5
+       XOR     D0.5,D0Ar2,D0Ar4
+       XOR     D1.5,D1Ar1,D1Ar3
+       TSTT    D1Ar1,#HI(0x80000000)
+       BZ      $L25
+
+       NEGS    D0Ar2,D0Ar2
+       NEG     D1Ar1,D1Ar1
+       SUBCS   D1Ar1,D1Ar1,#1
+$L25:
+       TSTT    D1Ar3,#HI(0x80000000)
+       BZ      $L27
+
+       NEGS    D0Ar4,D0Ar4
+       NEG     D1Ar3,D1Ar3
+       SUBCS   D1Ar3,D1Ar3,#1
+$L27:
+       CALLR   D1RtP,_div_u64
+       TSTT    D1.5,#HI(0x80000000)
+       BZ      $L29
+
+       NEGS    D0Re0,D0Re0
+       NEG     D1Re0,D1Re0
+       SUBCS   D1Re0,D1Re0,#1
+$L29:
+
+       GETL    D0FrT,D1RtP,[A0StP+#(-16)]
+       GETL    D0.5,D1.5,[A0StP+#(-8)]
+       SUB     A0StP,A0StP,#16
+       MOV     PC,D1RtP
+       .size _div_s64,.-_div_s64
diff --git a/arch/metag/lib/divsi3.S b/arch/metag/lib/divsi3.S
new file mode 100644 (file)
index 0000000..7c8a8ae
--- /dev/null
@@ -0,0 +1,100 @@
+! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+!               Imagination Technologies Ltd
+!
+! Integer divide routines.
+!
+
+       .text
+       .global ___udivsi3
+       .type   ___udivsi3,function
+       .align  2
+___udivsi3:
+!!
+!! Since core is signed divide case, just set control variable
+!!
+       MOV     D1Re0,D0Ar2             ! Au already in A1Ar1, Bu -> D1Re0
+       MOV     D0Re0,#0                ! Result is 0
+       MOV     D0Ar4,#0                ! Return positive result
+       B       $LIDMCUStart
+       .size   ___udivsi3,.-___udivsi3
+
+!!
+!! 32-bit division signed i/p - passed signed 32-bit numbers
+!!
+       .global ___divsi3
+       .type   ___divsi3,function
+       .align  2
+___divsi3:
+!!
+!! A already in D1Ar1, B already in D0Ar2 -> make B abs(B)
+!!
+       MOV     D1Re0,D0Ar2             ! A already in A1Ar1, B -> D1Re0
+       MOV     D0Re0,#0                ! Result is 0
+       XOR     D0Ar4,D1Ar1,D1Re0       ! D0Ar4 -ive if result is -ive
+       ABS     D1Ar1,D1Ar1             ! abs(A) -> Au
+       ABS     D1Re0,D1Re0             ! abs(B) -> Bu
+$LIDMCUStart:
+       CMP     D1Ar1,D1Re0             ! Is ( Au > Bu )?
+       LSR     D1Ar3,D1Ar1,#2          ! Calculate (Au & (~3)) >> 2
+       CMPHI   D1Re0,D1Ar3             ! OR ( (Au & (~3)) <= (Bu << 2) )?
+       LSLSHI  D1Ar3,D1Re0,#1          ! Buq = Bu << 1
+       BLS     $LIDMCUSetup            ! Yes: Do normal divide
+!!
+!! Quick divide setup can assume that CurBit only needs to start at 2
+!!
+$LIDMCQuick:
+       CMP     D1Ar1,D1Ar3             ! ( A >= Buq )?
+       ADDCC   D0Re0,D0Re0,#2          ! If yes result += 2
+       SUBCC   D1Ar1,D1Ar1,D1Ar3       !        and A -= Buq
+       CMP     D1Ar1,D1Re0             ! ( A >= Bu )?
+       ADDCC   D0Re0,D0Re0,#1          ! If yes result += 1
+       SUBCC   D1Ar1,D1Ar1,D1Re0       !        and A -= Bu
+       ORS     D0Ar4,D0Ar4,D0Ar4       ! Return neg result?
+       NEG     D0Ar2,D0Re0             ! Calulate neg result
+       MOVMI   D0Re0,D0Ar2             ! Yes: Take neg result
+$LIDMCRet:
+       MOV     PC,D1RtP
+!!
+!!  Setup for general unsigned divide code
+!!
+!!      D0Re0 is used to form the result, already set to Zero
+!!      D1Re0 is the input Bu value, this gets trashed
+!!      D0Ar6 is curbit which is set to 1 at the start and shifted up
+!!      D0Ar4 is negative if we should return a negative result
+!!      D1Ar1 is the input Au value, eventually this holds the remainder
+!!
+$LIDMCUSetup:
+       CMP     D1Ar1,D1Re0             ! Is ( Au < Bu )?
+       MOV     D0Ar6,#1                ! Set curbit to 1
+       BCS     $LIDMCRet               ! Yes: Return 0 remainder Au
+!!
+!! Calculate alignment using FFB instruction
+!!
+       FFB     D1Ar5,D1Ar1             ! Find first bit of Au
+       ANDN    D1Ar5,D1Ar5,#31         ! Handle exceptional case.
+       ORN     D1Ar5,D1Ar5,#31         ! if N bit set, set to 31
+       FFB     D1Ar3,D1Re0             ! Find first bit of Bu
+       ANDN    D1Ar3,D1Ar3,#31         ! Handle exceptional case.
+       ORN     D1Ar3,D1Ar3,#31         ! if N bit set, set to 31
+       SUBS    D1Ar3,D1Ar5,D1Ar3       ! calculate diff, ffbA - ffbB
+       MOV     D0Ar2,D1Ar3             ! copy into bank 0
+       LSLGT   D1Re0,D1Re0,D1Ar3       ! ( > 0) ? left shift B
+       LSLGT   D0Ar6,D0Ar6,D0Ar2       ! ( > 0) ? left shift curbit
+!!
+!! Now we start the divide proper, logic is
+!!
+!!       if ( A >= B ) add curbit to result and subtract B from A
+!!       shift curbit and B down by 1 in either case
+!!
+$LIDMCLoop:
+       CMP     D1Ar1, D1Re0            ! ( A >= B )?
+       ADDCC   D0Re0, D0Re0, D0Ar6     ! If yes result += curbit
+       SUBCC   D1Ar1, D1Ar1, D1Re0     ! and A -= B
+       LSRS    D0Ar6, D0Ar6, #1        ! Shift down curbit, is it zero?
+       LSR     D1Re0, D1Re0, #1        ! Shift down B
+       BNZ     $LIDMCLoop               ! Was single bit in curbit lost?
+       ORS     D0Ar4,D0Ar4,D0Ar4       ! Return neg result?
+       NEG     D0Ar2,D0Re0             ! Calulate neg result
+       MOVMI   D0Re0,D0Ar2             ! Yes: Take neg result
+       MOV     PC,D1RtP
+       .size   ___divsi3,.-___divsi3
diff --git a/arch/metag/lib/ip_fast_csum.S b/arch/metag/lib/ip_fast_csum.S
new file mode 100644 (file)
index 0000000..533b1e7
--- /dev/null
@@ -0,0 +1,32 @@
+
+       .text
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+ *
+ */
+       .global _ip_fast_csum
+       .type   _ip_fast_csum,function
+_ip_fast_csum:
+       !! TXRPT needs loops - 1
+       SUBS    TXRPT,D0Ar2,#1
+       MOV     D0Re0,#0
+       BLO     $Lfast_csum_exit
+$Lfast_csum_loop:
+       GETD    D1Ar3,[D1Ar1++]
+       ADDS    D0Re0,D0Re0,D1Ar3
+       ADDCS   D0Re0,D0Re0,#1
+       BR      $Lfast_csum_loop
+       LSR     D0Ar4,D0Re0,#16
+       AND     D0Re0,D0Re0,#0xffff
+       AND     D0Ar4,D0Ar4,#0xffff
+       ADD     D0Re0,D0Re0,D0Ar4
+       LSR     D0Ar4,D0Re0,#16
+       ADD     D0Re0,D0Re0,D0Ar4
+       XOR     D0Re0,D0Re0,#-1
+       AND     D0Re0,D0Re0,#0xffff
+$Lfast_csum_exit:
+       MOV     PC,D1RtP
+       .size _ip_fast_csum,.-_ip_fast_csum
diff --git a/arch/metag/lib/lshrdi3.S b/arch/metag/lib/lshrdi3.S
new file mode 100644 (file)
index 0000000..47f7202
--- /dev/null
@@ -0,0 +1,33 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit logical shift right routine.
+!
+
+       .text
+       .global ___lshrdi3
+       .type   ___lshrdi3,function
+
+___lshrdi3:
+       MOV     D0Re0,D0Ar2
+       MOV     D1Re0,D1Ar1
+       CMP     D1Ar3,#0                ! COUNT == 0
+       MOVEQ   PC,D1RtP                ! Yes, return
+
+       MOV     D0Ar4,D1Ar3
+       SUBS    D1Ar3,D1Ar3,#32         ! N = COUNT - 32
+       BGE     $L30
+
+!! Shift < 32
+       NEG     D1Ar3,D1Ar3             ! N = - N
+       LSR     D0Re0,D0Re0,D0Ar4       ! LO = LO >> COUNT
+       LSL     D0Ar6,D1Re0,D1Ar3       ! TMP= HI << -(COUNT - 32)
+       OR      D0Re0,D0Re0,D0Ar6       ! LO = LO | TMP
+       SWAP    D1Ar3,D0Ar4
+       LSR     D1Re0,D1Re0,D1Ar3       ! HI = HI >> COUNT
+       MOV     PC,D1RtP
+$L30:
+!! Shift >= 32
+       LSR     D0Re0,D1Re0,D1Ar3       ! LO = HI >> N
+       MOV     D1Re0,#0                ! HI = 0
+       MOV     PC,D1RtP
+       .size ___lshrdi3,.-___lshrdi3
diff --git a/arch/metag/lib/memcpy.S b/arch/metag/lib/memcpy.S
new file mode 100644 (file)
index 0000000..46b7a2b
--- /dev/null
@@ -0,0 +1,185 @@
+!   Copyright (C) 2008-2012 Imagination Technologies Ltd.
+
+       .text
+       .global _memcpy
+       .type   _memcpy,function
+! D1Ar1 dst
+! D0Ar2 src
+! D1Ar3 cnt
+! D0Re0 dst
+_memcpy:
+       CMP     D1Ar3, #16
+       MOV     A1.2, D0Ar2             ! source pointer
+       MOV     A0.2, D1Ar1             ! destination pointer
+       MOV     A0.3, D1Ar1             ! for return value
+! If there are less than 16 bytes to copy use the byte copy loop
+       BGE     $Llong_copy
+
+$Lbyte_copy:
+! Simply copy a byte at a time
+       SUBS    TXRPT, D1Ar3, #1
+       BLT     $Lend
+$Lloop_byte:
+       GETB    D1Re0, [A1.2++]
+       SETB    [A0.2++], D1Re0
+       BR      $Lloop_byte
+
+$Lend:
+! Finally set return value and return
+       MOV     D0Re0, A0.3
+       MOV     PC, D1RtP
+
+$Llong_copy:
+       ANDS    D1Ar5, D1Ar1, #7        ! test destination alignment
+       BZ      $Laligned_dst
+
+! The destination address is not 8 byte aligned. We will copy bytes from
+! the source to the destination until the remaining data has an 8 byte
+! destination address alignment (i.e we should never copy more than 7
+! bytes here).
+$Lalign_dst:
+       GETB    D0Re0, [A1.2++]
+       ADD     D1Ar5, D1Ar5, #1        ! dest is aligned when D1Ar5 reaches #8
+       SUB     D1Ar3, D1Ar3, #1        ! decrement count of remaining bytes
+       SETB    [A0.2++], D0Re0
+       CMP     D1Ar5, #8
+       BNE     $Lalign_dst
+
+! We have at least (16 - 7) = 9 bytes to copy - calculate the number of 8 byte
+! blocks, then jump to the unaligned copy loop or fall through to the aligned
+! copy loop as appropriate.
+$Laligned_dst:
+       MOV     D0Ar4, A1.2
+       LSR     D1Ar5, D1Ar3, #3        ! D1Ar5 = number of 8 byte blocks
+       ANDS    D0Ar4, D0Ar4, #7        ! test source alignment
+       BNZ     $Lunaligned_copy        ! if unaligned, use unaligned copy loop
+
+! Both source and destination are 8 byte aligned - the easy case.
+$Laligned_copy:
+       LSRS    D1Ar5, D1Ar3, #5        ! D1Ar5 = number of 32 byte blocks
+       BZ      $Lbyte_copy
+       SUB     TXRPT, D1Ar5, #1
+
+$Laligned_32:
+       GETL    D0Re0, D1Re0, [A1.2++]
+       GETL    D0Ar6, D1Ar5, [A1.2++]
+       SETL    [A0.2++], D0Re0, D1Re0
+       SETL    [A0.2++], D0Ar6, D1Ar5
+       GETL    D0Re0, D1Re0, [A1.2++]
+       GETL    D0Ar6, D1Ar5, [A1.2++]
+       SETL    [A0.2++], D0Re0, D1Re0
+       SETL    [A0.2++], D0Ar6, D1Ar5
+       BR      $Laligned_32
+
+! If there are any remaining bytes use the byte copy loop, otherwise we are done
+       ANDS    D1Ar3, D1Ar3, #0x1f
+       BNZ     $Lbyte_copy
+       B       $Lend
+
+! The destination is 8 byte aligned but the source is not, and there are 8
+! or more bytes to be copied.
+$Lunaligned_copy:
+! Adjust the source pointer (A1.2) to the 8 byte boundary before its
+! current value
+       MOV     D0Ar4, A1.2
+       MOV     D0Ar6, A1.2
+       ANDMB   D0Ar4, D0Ar4, #0xfff8
+       MOV     A1.2, D0Ar4
+! Save the number of bytes of mis-alignment in D0Ar4 for use later
+       SUBS    D0Ar6, D0Ar6, D0Ar4
+       MOV     D0Ar4, D0Ar6
+! if there is no mis-alignment after all, use the aligned copy loop
+       BZ      $Laligned_copy
+
+! prefetch 8 bytes
+       GETL    D0Re0, D1Re0, [A1.2]
+
+       SUB     TXRPT, D1Ar5, #1
+
+! There are 3 mis-alignment cases to be considered. Less than 4 bytes, exactly
+! 4 bytes, and more than 4 bytes.
+       CMP     D0Ar6, #4
+       BLT     $Lunaligned_1_2_3       ! use 1-3 byte mis-alignment loop
+       BZ      $Lunaligned_4           ! use 4 byte mis-alignment loop
+
+! The mis-alignment is more than 4 bytes
+$Lunaligned_5_6_7:
+       SUB     D0Ar6, D0Ar6, #4
+! Calculate the bit offsets required for the shift operations necesssary
+! to align the data.
+! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset)
+       MULW    D0Ar6, D0Ar6, #8
+       MOV     D1Ar5, #32
+       SUB     D1Ar5, D1Ar5, D0Ar6
+! Move data 4 bytes before we enter the main loop
+       MOV     D0Re0, D1Re0
+
+$Lloop_5_6_7:
+       GETL    D0Ar2, D1Ar1, [++A1.2]
+! form 64-bit data in D0Re0, D1Re0
+       LSR     D0Re0, D0Re0, D0Ar6
+       MOV     D1Re0, D0Ar2
+       LSL     D1Re0, D1Re0, D1Ar5
+       ADD     D0Re0, D0Re0, D1Re0
+
+       LSR     D0Ar2, D0Ar2, D0Ar6
+       LSL     D1Re0, D1Ar1, D1Ar5
+       ADD     D1Re0, D1Re0, D0Ar2
+
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D1Ar1
+       BR      $Lloop_5_6_7
+
+       B       $Lunaligned_end
+
+$Lunaligned_1_2_3:
+! Calculate the bit offsets required for the shift operations necesssary
+! to align the data.
+! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset)
+       MULW    D0Ar6, D0Ar6, #8
+       MOV     D1Ar5, #32
+       SUB     D1Ar5, D1Ar5, D0Ar6
+
+$Lloop_1_2_3:
+! form 64-bit data in D0Re0,D1Re0
+       LSR     D0Re0, D0Re0, D0Ar6
+       LSL     D1Ar1, D1Re0, D1Ar5
+       ADD     D0Re0, D0Re0, D1Ar1
+       MOV     D0Ar2, D1Re0
+       LSR     D0FrT, D0Ar2, D0Ar6
+       GETL    D0Ar2, D1Ar1, [++A1.2]
+
+       MOV     D1Re0, D0Ar2
+       LSL     D1Re0, D1Re0, D1Ar5
+       ADD     D1Re0, D1Re0, D0FrT
+
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D0Ar2
+       MOV     D1Re0, D1Ar1
+       BR      $Lloop_1_2_3
+
+       B       $Lunaligned_end
+
+! The 4 byte mis-alignment case - this does not require any shifting, just a
+! shuffling of registers.
+$Lunaligned_4:
+       MOV     D0Re0, D1Re0
+$Lloop_4:
+       GETL    D0Ar2, D1Ar1, [++A1.2]
+       MOV     D1Re0, D0Ar2
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D1Ar1
+       BR      $Lloop_4
+
+$Lunaligned_end:
+! If there are no remaining bytes to copy, we are done.
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lend
+! Re-adjust the source pointer (A1.2) back to the actual (unaligned) byte
+! address of the remaining bytes, and fall through to the byte copy loop.
+       MOV     D0Ar6, A1.2
+       ADD     D1Ar5, D0Ar4, D0Ar6
+       MOV     A1.2, D1Ar5
+       B       $Lbyte_copy
+
+       .size _memcpy,.-_memcpy
diff --git a/arch/metag/lib/memmove.S b/arch/metag/lib/memmove.S
new file mode 100644 (file)
index 0000000..228ea04
--- /dev/null
@@ -0,0 +1,345 @@
+!   Copyright (C) 2008-2012 Imagination Technologies Ltd.
+
+       .text
+       .global _memmove
+       .type   _memmove,function
+! D1Ar1 dst
+! D0Ar2 src
+! D1Ar3 cnt
+! D0Re0 dst
+_memmove:
+       CMP     D1Ar3, #0
+       MOV     D0Re0, D1Ar1
+       BZ      $LEND2
+       MSETL   [A0StP], D0.5, D0.6, D0.7
+       MOV     D1Ar5, D0Ar2
+       CMP     D1Ar1, D1Ar5
+       BLT     $Lforwards_copy
+       SUB     D0Ar4, D1Ar1, D1Ar3
+       ADD     D0Ar4, D0Ar4, #1
+       CMP     D0Ar2, D0Ar4
+       BLT     $Lforwards_copy
+       ! should copy backwards
+       MOV     D1Re0, D0Ar2
+       ! adjust pointer to the end of mem
+       ADD     D0Ar2, D1Re0, D1Ar3
+       ADD     D1Ar1, D1Ar1, D1Ar3
+
+       MOV     A1.2, D0Ar2
+       MOV     A0.2, D1Ar1
+       CMP     D1Ar3, #8
+       BLT     $Lbbyte_loop
+
+       MOV     D0Ar4, D0Ar2
+       MOV     D1Ar5, D1Ar1
+
+       ! test 8 byte alignment
+       ANDS    D1Ar5, D1Ar5, #7
+       BNE     $Lbdest_unaligned
+
+       ANDS    D0Ar4, D0Ar4, #7
+       BNE     $Lbsrc_unaligned
+
+       LSR     D1Ar5, D1Ar3, #3
+
+$Lbaligned_loop:
+       GETL    D0Re0, D1Re0, [--A1.2]
+       SETL    [--A0.2], D0Re0, D1Re0
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lbaligned_loop
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lbbyte_loop_exit
+$Lbbyte_loop:
+       GETB    D1Re0, [--A1.2]
+       SETB    [--A0.2], D1Re0
+       SUBS    D1Ar3, D1Ar3, #1
+       BNE     $Lbbyte_loop
+$Lbbyte_loop_exit:
+       MOV     D0Re0, A0.2
+$LEND:
+       SUB     A0.2, A0StP, #24
+       MGETL   D0.5, D0.6, D0.7, [A0.2]
+       SUB     A0StP, A0StP, #24
+$LEND2:
+       MOV     PC, D1RtP
+
+$Lbdest_unaligned:
+       GETB    D0Re0, [--A1.2]
+       SETB    [--A0.2], D0Re0
+       SUBS    D1Ar5, D1Ar5, #1
+       SUB     D1Ar3, D1Ar3, #1
+       BNE     $Lbdest_unaligned
+       CMP     D1Ar3, #8
+       BLT     $Lbbyte_loop
+$Lbsrc_unaligned:
+       LSR     D1Ar5, D1Ar3, #3
+       ! adjust A1.2
+       MOV     D0Ar4, A1.2
+       ! save original address
+       MOV     D0Ar6, A1.2
+
+       ADD     D0Ar4, D0Ar4, #7
+       ANDMB   D0Ar4, D0Ar4, #0xfff8
+       ! new address is the 8-byte aligned one above the original
+       MOV     A1.2, D0Ar4
+
+       ! A0.2 dst 64-bit is aligned
+       ! measure the gap size
+       SUB     D0Ar6, D0Ar4, D0Ar6
+       MOVS    D0Ar4, D0Ar6
+       ! keep this information for the later adjustment
+       ! both aligned
+       BZ      $Lbaligned_loop
+
+       ! prefetch
+       GETL    D0Re0, D1Re0, [--A1.2]
+
+       CMP     D0Ar6, #4
+       BLT     $Lbunaligned_1_2_3
+       ! 32-bit aligned
+       BZ      $Lbaligned_4
+
+       SUB     D0Ar6, D0Ar6, #4
+       ! D1.6 stores the gap size in bits
+       MULW    D1.6, D0Ar6, #8
+       MOV     D0.6, #32
+       ! D0.6 stores the complement of the gap size
+       SUB     D0.6, D0.6, D1.6
+
+$Lbunaligned_5_6_7:
+       GETL    D0.7, D1.7, [--A1.2]
+       ! form 64-bit data in D0Re0, D1Re0
+       MOV     D1Re0, D0Re0
+       ! D1Re0 << gap-size
+       LSL     D1Re0, D1Re0, D1.6
+       MOV     D0Re0, D1.7
+       ! D0Re0 >> complement
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D1.5, D0Re0
+       ! combine the both
+       ADD     D1Re0, D1Re0, D1.5
+
+       MOV     D1.5, D1.7
+       LSL     D1.5, D1.5, D1.6
+       MOV     D0Re0, D0.7
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D0.5, D1.5
+       ADD     D0Re0, D0Re0, D0.5
+
+       SETL    [--A0.2], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lbunaligned_5_6_7
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lbbyte_loop_exit
+       ! Adjust A1.2
+       ! A1.2 <- A1.2 +8 - gapsize
+       ADD     A1.2, A1.2, #8
+       SUB     A1.2, A1.2, D0Ar4
+       B       $Lbbyte_loop
+
+$Lbunaligned_1_2_3:
+       MULW    D1.6, D0Ar6, #8
+       MOV     D0.6, #32
+       SUB     D0.6, D0.6, D1.6
+
+$Lbunaligned_1_2_3_loop:
+       GETL    D0.7, D1.7, [--A1.2]
+       ! form 64-bit data in D0Re0, D1Re0
+       LSL     D1Re0, D1Re0, D1.6
+       ! save D0Re0 for later use
+       MOV     D0.5, D0Re0
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D1.5, D0Re0
+       ADD     D1Re0, D1Re0, D1.5
+
+       ! orignal data in D0Re0
+       MOV     D1.5, D0.5
+       LSL     D1.5, D1.5, D1.6
+       MOV     D0Re0, D1.7
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D0.5, D1.5
+       ADD     D0Re0, D0Re0, D0.5
+
+       SETL    [--A0.2], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lbunaligned_1_2_3_loop
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lbbyte_loop_exit
+       ! Adjust A1.2
+       ADD     A1.2, A1.2, #8
+       SUB     A1.2, A1.2, D0Ar4
+       B       $Lbbyte_loop
+
+$Lbaligned_4:
+       GETL    D0.7, D1.7, [--A1.2]
+       MOV     D1Re0, D0Re0
+       MOV     D0Re0, D1.7
+       SETL    [--A0.2], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lbaligned_4
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lbbyte_loop_exit
+       ! Adjust A1.2
+       ADD     A1.2, A1.2, #8
+       SUB     A1.2, A1.2, D0Ar4
+       B       $Lbbyte_loop
+
+$Lforwards_copy:
+       MOV     A1.2, D0Ar2
+       MOV     A0.2, D1Ar1
+       CMP     D1Ar3, #8
+       BLT     $Lfbyte_loop
+
+       MOV     D0Ar4, D0Ar2
+       MOV     D1Ar5, D1Ar1
+
+       ANDS    D1Ar5, D1Ar5, #7
+       BNE     $Lfdest_unaligned
+
+       ANDS    D0Ar4, D0Ar4, #7
+       BNE     $Lfsrc_unaligned
+
+       LSR     D1Ar5, D1Ar3, #3
+
+$Lfaligned_loop:
+       GETL    D0Re0, D1Re0, [A1.2++]
+       SUBS    D1Ar5, D1Ar5, #1
+       SETL    [A0.2++], D0Re0, D1Re0
+       BNE     $Lfaligned_loop
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lfbyte_loop_exit
+$Lfbyte_loop:
+       GETB    D1Re0, [A1.2++]
+       SETB    [A0.2++], D1Re0
+       SUBS    D1Ar3, D1Ar3, #1
+       BNE     $Lfbyte_loop
+$Lfbyte_loop_exit:
+       MOV     D0Re0, D1Ar1
+       B       $LEND
+
+$Lfdest_unaligned:
+       GETB    D0Re0, [A1.2++]
+       ADD     D1Ar5, D1Ar5, #1
+       SUB     D1Ar3, D1Ar3, #1
+       SETB    [A0.2++], D0Re0
+       CMP     D1Ar5, #8
+       BNE     $Lfdest_unaligned
+       CMP     D1Ar3, #8
+       BLT     $Lfbyte_loop
+$Lfsrc_unaligned:
+       ! adjust A1.2
+       LSR     D1Ar5, D1Ar3, #3
+
+       MOV     D0Ar4, A1.2
+       MOV     D0Ar6, A1.2
+       ANDMB   D0Ar4, D0Ar4, #0xfff8
+       MOV     A1.2, D0Ar4
+
+       ! A0.2 dst 64-bit is aligned
+       SUB     D0Ar6, D0Ar6, D0Ar4
+       ! keep the information for the later adjustment
+       MOVS    D0Ar4, D0Ar6
+
+       ! both aligned
+       BZ      $Lfaligned_loop
+
+       ! prefetch
+       GETL    D0Re0, D1Re0, [A1.2]
+
+       CMP     D0Ar6, #4
+       BLT     $Lfunaligned_1_2_3
+       BZ      $Lfaligned_4
+
+       SUB     D0Ar6, D0Ar6, #4
+       MULW    D0.6, D0Ar6, #8
+       MOV     D1.6, #32
+       SUB     D1.6, D1.6, D0.6
+
+$Lfunaligned_5_6_7:
+       GETL    D0.7, D1.7, [++A1.2]
+       ! form 64-bit data in D0Re0, D1Re0
+       MOV     D0Re0, D1Re0
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D1Re0, D0.7
+       LSL     D1Re0, D1Re0, D1.6
+       MOV     D0.5, D1Re0
+       ADD     D0Re0, D0Re0, D0.5
+
+       MOV     D0.5, D0.7
+       LSR     D0.5, D0.5, D0.6
+       MOV     D1Re0, D1.7
+       LSL     D1Re0, D1Re0, D1.6
+       MOV     D1.5, D0.5
+       ADD     D1Re0, D1Re0, D1.5
+
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lfunaligned_5_6_7
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lfbyte_loop_exit
+       ! Adjust A1.2
+       ADD     A1.2, A1.2, D0Ar4
+       B       $Lfbyte_loop
+
+$Lfunaligned_1_2_3:
+       MULW    D0.6, D0Ar6, #8
+       MOV     D1.6, #32
+       SUB     D1.6, D1.6, D0.6
+
+$Lfunaligned_1_2_3_loop:
+       GETL    D0.7, D1.7, [++A1.2]
+       ! form 64-bit data in D0Re0, D1Re0
+       LSR     D0Re0, D0Re0, D0.6
+       MOV     D1.5, D1Re0
+       LSL     D1Re0, D1Re0, D1.6
+       MOV     D0.5, D1Re0
+       ADD     D0Re0, D0Re0, D0.5
+
+       MOV     D0.5, D1.5
+       LSR     D0.5, D0.5, D0.6
+       MOV     D1Re0, D0.7
+       LSL     D1Re0, D1Re0, D1.6
+       MOV     D1.5, D0.5
+       ADD     D1Re0, D1Re0, D1.5
+
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lfunaligned_1_2_3_loop
+
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lfbyte_loop_exit
+       ! Adjust A1.2
+       ADD     A1.2, A1.2, D0Ar4
+       B       $Lfbyte_loop
+
+$Lfaligned_4:
+       GETL    D0.7, D1.7, [++A1.2]
+       MOV     D0Re0, D1Re0
+       MOV     D1Re0, D0.7
+       SETL    [A0.2++], D0Re0, D1Re0
+       MOV     D0Re0, D0.7
+       MOV     D1Re0, D1.7
+       SUBS    D1Ar5, D1Ar5, #1
+       BNE     $Lfaligned_4
+       ANDS    D1Ar3, D1Ar3, #7
+       BZ      $Lfbyte_loop_exit
+       ! Adjust A1.2
+       ADD     A1.2, A1.2, D0Ar4
+       B       $Lfbyte_loop
+
+       .size _memmove,.-_memmove
diff --git a/arch/metag/lib/memset.S b/arch/metag/lib/memset.S
new file mode 100644 (file)
index 0000000..721085b
--- /dev/null
@@ -0,0 +1,86 @@
+!   Copyright (C) 2008-2012 Imagination Technologies Ltd.
+
+       .text
+       .global _memset
+       .type   _memset,function
+! D1Ar1 dst
+! D0Ar2 c
+! D1Ar3 cnt
+! D0Re0 dst
+_memset:
+       AND     D0Ar2,D0Ar2,#0xFF       ! Ensure a byte input value
+       MULW    D0Ar2,D0Ar2,#0x0101     ! Duplicate byte value into  0-15
+       ANDS    D0Ar4,D1Ar1,#7          ! Extract bottom LSBs of dst
+       LSL     D0Re0,D0Ar2,#16         ! Duplicate byte value into 16-31
+       ADD     A0.2,D0Ar2,D0Re0        ! Duplicate byte value into 4 (A0.2)
+       MOV     D0Re0,D1Ar1             ! Return dst
+       BZ      $LLongStub              ! if start address is aligned
+       ! start address is not aligned on an 8 byte boundary, so we
+       ! need the number of bytes up to the next 8 byte address
+       ! boundary, or the length of the string if less than 8, in D1Ar5
+       MOV     D0Ar2,#8                ! Need 8 - N in D1Ar5 ...
+       SUB     D1Ar5,D0Ar2,D0Ar4       !            ... subtract N
+       CMP     D1Ar3,D1Ar5
+       MOVMI   D1Ar5,D1Ar3
+       B       $LByteStub              ! dst is mis-aligned, do $LByteStub
+
+!
+! Preamble to LongLoop which generates 4*8 bytes per interation (5 cycles)
+!
+$LLongStub:
+       LSRS    D0Ar2,D1Ar3,#5
+       AND     D1Ar3,D1Ar3,#0x1F
+       MOV     A1.2,A0.2
+       BEQ     $LLongishStub
+       SUB     TXRPT,D0Ar2,#1
+       CMP     D1Ar3,#0
+$LLongLoop:
+       SETL    [D1Ar1++],A0.2,A1.2
+       SETL    [D1Ar1++],A0.2,A1.2
+       SETL    [D1Ar1++],A0.2,A1.2
+       SETL    [D1Ar1++],A0.2,A1.2
+       BR      $LLongLoop
+       BZ      $Lexit
+!
+! Preamble to LongishLoop which generates 1*8 bytes per interation (2 cycles)
+!
+$LLongishStub:
+       LSRS    D0Ar2,D1Ar3,#3
+       AND     D1Ar3,D1Ar3,#0x7
+       MOV     D1Ar5,D1Ar3
+       BEQ     $LByteStub
+       SUB     TXRPT,D0Ar2,#1
+       CMP     D1Ar3,#0
+$LLongishLoop:
+       SETL    [D1Ar1++],A0.2,A1.2
+       BR      $LLongishLoop
+       BZ      $Lexit
+!
+! This does a byte structured burst of up to 7 bytes
+!
+!      D1Ar1 should point to the location required
+!      D1Ar3 should be the remaining total byte count
+!      D1Ar5 should be burst size (<= D1Ar3)
+!
+$LByteStub:
+       SUBS    D1Ar3,D1Ar3,D1Ar5       ! Reduce count
+       ADD     D1Ar1,D1Ar1,D1Ar5       ! Advance pointer to end of area
+       MULW    D1Ar5,D1Ar5,#4          ! Scale to (1*4), (2*4), (3*4)
+       SUB     D1Ar5,D1Ar5,#(8*4)      ! Rebase to -(7*4), -(6*4), -(5*4), ...
+       MOV     A1.2,D1Ar5
+       SUB     PC,CPC1,A1.2            ! Jump into table below
+       SETB    [D1Ar1+#(-7)],A0.2
+       SETB    [D1Ar1+#(-6)],A0.2
+       SETB    [D1Ar1+#(-5)],A0.2
+       SETB    [D1Ar1+#(-4)],A0.2
+       SETB    [D1Ar1+#(-3)],A0.2
+       SETB    [D1Ar1+#(-2)],A0.2
+       SETB    [D1Ar1+#(-1)],A0.2
+!
+! Return if all data has been output, otherwise do $LLongStub
+!
+       BNZ     $LLongStub
+$Lexit:
+       MOV     PC,D1RtP
+        .size _memset,.-_memset
+
diff --git a/arch/metag/lib/modsi3.S b/arch/metag/lib/modsi3.S
new file mode 100644 (file)
index 0000000..210cfa8
--- /dev/null
@@ -0,0 +1,38 @@
+! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+!               Imagination Technologies Ltd
+!
+! Integer modulus routines.
+!
+!!
+!! 32-bit modulus unsigned i/p - passed unsigned 32-bit numbers
+!!
+       .text
+       .global ___umodsi3
+       .type   ___umodsi3,function
+       .align  2
+___umodsi3:
+       MOV     D0FrT,D1RtP             ! Save original return address
+       CALLR   D1RtP,___udivsi3
+       MOV     D1RtP,D0FrT             ! Recover return address
+       MOV     D0Re0,D1Ar1             ! Return remainder
+       MOV     PC,D1RtP
+       .size   ___umodsi3,.-___umodsi3
+
+!!
+!! 32-bit modulus signed i/p - passed signed 32-bit numbers
+!!
+       .global ___modsi3
+       .type   ___modsi3,function
+       .align  2
+___modsi3:
+       MOV     D0FrT,D1RtP             ! Save original return address
+       MOV     A0.2,D1Ar1              ! Save A in A0.2
+       CALLR   D1RtP,___divsi3
+       MOV     D1RtP,D0FrT             ! Recover return address
+       MOV     D1Re0,A0.2              ! Recover A
+       MOV     D0Re0,D1Ar1             ! Return remainder
+       ORS     D1Re0,D1Re0,D1Re0       ! Was A negative?
+       NEG     D1Ar1,D1Ar1             ! Negate remainder
+       MOVMI   D0Re0,D1Ar1             ! Return neg remainder
+       MOV     PC, D1RtP
+       .size   ___modsi3,.-___modsi3
diff --git a/arch/metag/lib/muldi3.S b/arch/metag/lib/muldi3.S
new file mode 100644 (file)
index 0000000..ee66ca8
--- /dev/null
@@ -0,0 +1,44 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit multiply routine.
+!
+
+!
+! 64-bit signed/unsigned multiply
+!
+! A = D1Ar1:D0Ar2 = a 2^48 + b 2^32 +  c 2^16 + d 2^0
+!
+! B = D1Ar3:D0Ar4 = w 2^48 + x 2^32 +  y 2^16 + z 2^0
+!
+       .text
+       .global ___muldi3
+       .type   ___muldi3,function
+
+___muldi3:
+       MULD    D1Re0,D1Ar1,D0Ar4       ! (a 2^48 + b 2^32)(y 2^16 + z 2^0)
+       MULD    D0Re0,D0Ar2,D1Ar3       ! (w 2^48 + x 2^32)(c 2^16 + d 2^0)
+       ADD     D1Re0,D1Re0,D0Re0
+
+       MULW    D0Re0,D0Ar2,D0Ar4       ! (d 2^0)  * (z 2^0)
+
+       RTDW    D0Ar2,D0Ar2
+       MULW    D0Ar6,D0Ar2,D0Ar4       ! (c 2^16)(z 2^0)
+       LSR     D1Ar5,D0Ar6,#16
+       LSL     D0Ar6,D0Ar6,#16
+       ADDS    D0Re0,D0Re0,D0Ar6
+       ADDCS   D1Re0,D1Re0,#1
+       RTDW    D0Ar4,D0Ar4
+       ADD     D1Re0,D1Re0,D1Ar5
+
+       MULW    D0Ar6,D0Ar2,D0Ar4       ! (c 2^16)(y 2^16)
+       ADD     D1Re0,D1Re0,D0Ar6
+
+       RTDW    D0Ar2,D0Ar2
+       MULW    D0Ar6,D0Ar2,D0Ar4       ! (d 2^0)(y 2^16)
+       LSR     D1Ar5,D0Ar6,#16
+       LSL     D0Ar6,D0Ar6,#16
+       ADDS    D0Re0,D0Re0,D0Ar6
+       ADD     D1Re0,D1Re0,D1Ar5
+       ADDCS   D1Re0,D1Re0,#1
+       MOV     PC, D1RtP
+       .size ___muldi3,.-___muldi3
diff --git a/arch/metag/lib/ucmpdi2.S b/arch/metag/lib/ucmpdi2.S
new file mode 100644 (file)
index 0000000..6f3347f
--- /dev/null
@@ -0,0 +1,27 @@
+! Copyright (C) 2012 by Imagination Technologies Ltd.
+!
+! 64-bit unsigned compare routine.
+!
+
+       .text
+       .global ___ucmpdi2
+       .type   ___ucmpdi2,function
+
+!         low    high
+! u64 a  (D0Ar2, D1Ar1)
+! u64 b  (D0Ar4, D1Ar3)
+___ucmpdi2:
+       ! start at 1 (equal) and conditionally increment or decrement
+       MOV     D0Re0,#1
+
+       ! high words
+       CMP     D1Ar1,D1Ar3
+       ! or if equal, low words
+       CMPEQ   D0Ar2,D0Ar4
+
+       ! unsigned compare
+       SUBLO   D0Re0,D0Re0,#1
+       ADDHI   D0Re0,D0Re0,#1
+
+       MOV     PC,D1RtP
+       .size ___ucmpdi2,.-___ucmpdi2
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
new file mode 100644 (file)
index 0000000..b3ebfe9
--- /dev/null
@@ -0,0 +1,1354 @@
+/*
+ * User address space access functions.
+ * The non-inlined parts of asm-metag/uaccess.h are here.
+ *
+ * Copyright (C) 2006, Imagination Technologies.
+ * Copyright (C) 2000, Axis Communications AB.
+ *
+ * Written by Hans-Peter Nilsson.
+ * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
+ * Modified for Meta by Will Newton.
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <asm/cache.h>                 /* def of L1_CACHE_BYTES */
+
+#define USE_RAPF
+#define RAPF_MIN_BUF_SIZE      (3*L1_CACHE_BYTES)
+
+
+/* The "double write" in this code is because the Meta will not fault
+ * immediately unless the memory pipe is forced to by e.g. a data stall or
+ * another memory op. The second write should be discarded by the write
+ * combiner so should have virtually no cost.
+ */
+
+#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       asm volatile (                                           \
+               COPY                                             \
+               "1:\n"                                           \
+               "       .section .fixup,\"ax\"\n"                \
+               "       MOV D1Ar1,#0\n"                          \
+               FIXUP                                            \
+               "       MOVT    D1Ar1,#HI(1b)\n"                 \
+               "       JUMP    D1Ar1,#LO(1b)\n"                 \
+               "       .previous\n"                             \
+               "       .section __ex_table,\"a\"\n"             \
+               TENTRY                                           \
+               "       .previous\n"                             \
+               : "=r" (to), "=r" (from), "=r" (ret)             \
+               : "0" (to), "1" (from), "2" (ret)                \
+               : "D1Ar1", "memory")
+
+
+#define __asm_copy_to_user_1(to, from, ret)    \
+       __asm_copy_user_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"    \
+               "       SETB [%0],D1Ar1\n"      \
+               "2:     SETB [%0++],D1Ar1\n",   \
+               "3:     ADD  %2,%2,#1\n",       \
+               "       .long 2b,3b\n")
+
+#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "       SETW [%0],D1Ar1\n"              \
+               "2:     SETW [%0++],D1Ar1\n" COPY,      \
+               "3:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+
+#define __asm_copy_to_user_2(to, from, ret) \
+       __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_3(to, from, ret) \
+       __asm_copy_to_user_2x_cont(to, from, ret,       \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "4:     SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n",               \
+               "       .long 4b,5b\n")
+
+#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "       SETD [%0],D1Ar1\n"              \
+               "2:     SETD [%0++],D1Ar1\n" COPY,      \
+               "3:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+
+#define __asm_copy_to_user_4(to, from, ret) \
+       __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_5(to, from, ret) \
+       __asm_copy_to_user_4x_cont(to, from, ret,       \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "4:     SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n",               \
+               "       .long 4b,5b\n")
+
+#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_4x_cont(to, from, ret,       \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "       SETW [%0],D1Ar1\n"              \
+               "4:     SETW [%0++],D1Ar1\n" COPY,      \
+               "5:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 4b,5b\n" TENTRY)
+
+#define __asm_copy_to_user_6(to, from, ret) \
+       __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_7(to, from, ret) \
+       __asm_copy_to_user_6x_cont(to, from, ret,       \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "6:     SETB [%0++],D1Ar1\n",           \
+               "7:     ADD  %2,%2,#1\n",               \
+               "       .long 6b,7b\n")
+
+#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_4x_cont(to, from, ret,       \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "       SETD [%0],D1Ar1\n"              \
+               "4:     SETD [%0++],D1Ar1\n" COPY,      \
+               "5:     ADD  %2,%2,#4\n"  FIXUP,        \
+               "       .long 4b,5b\n" TENTRY)
+
+#define __asm_copy_to_user_8(to, from, ret) \
+       __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_9(to, from, ret) \
+       __asm_copy_to_user_8x_cont(to, from, ret,       \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "6:     SETB [%0++],D1Ar1\n",           \
+               "7:     ADD  %2,%2,#1\n",               \
+               "       .long 6b,7b\n")
+
+#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_8x_cont(to, from, ret,       \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "       SETW [%0],D1Ar1\n"              \
+               "6:     SETW [%0++],D1Ar1\n" COPY,      \
+               "7:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 6b,7b\n" TENTRY)
+
+#define __asm_copy_to_user_10(to, from, ret) \
+       __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_11(to, from, ret) \
+       __asm_copy_to_user_10x_cont(to, from, ret,      \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "8:     SETB [%0++],D1Ar1\n",           \
+               "9:     ADD  %2,%2,#1\n",               \
+               "       .long 8b,9b\n")
+
+#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_8x_cont(to, from, ret,       \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "       SETD [%0],D1Ar1\n"              \
+               "6:     SETD [%0++],D1Ar1\n" COPY,      \
+               "7:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 6b,7b\n" TENTRY)
+#define __asm_copy_to_user_12(to, from, ret) \
+       __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_13(to, from, ret) \
+       __asm_copy_to_user_12x_cont(to, from, ret,      \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "8:     SETB [%0++],D1Ar1\n",           \
+               "9:     ADD  %2,%2,#1\n",               \
+               "       .long 8b,9b\n")
+
+#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_12x_cont(to, from, ret,      \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "       SETW [%0],D1Ar1\n"              \
+               "8:     SETW [%0++],D1Ar1\n" COPY,      \
+               "9:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 8b,9b\n" TENTRY)
+
+#define __asm_copy_to_user_14(to, from, ret) \
+       __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_15(to, from, ret) \
+       __asm_copy_to_user_14x_cont(to, from, ret,      \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "       SETB [%0],D1Ar1\n"              \
+               "10:    SETB [%0++],D1Ar1\n",           \
+               "11:    ADD  %2,%2,#1\n",               \
+               "       .long 10b,11b\n")
+
+#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_to_user_12x_cont(to, from, ret,      \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "       SETD [%0],D1Ar1\n"              \
+               "8:     SETD [%0++],D1Ar1\n" COPY,      \
+               "9:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 8b,9b\n" TENTRY)
+
+#define __asm_copy_to_user_16(to, from, ret) \
+               __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_to_user_8x64(to, from, ret) \
+       asm volatile (                                  \
+               "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
+               "       SETL [%0],D0Ar2,D1Ar1\n"        \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+               "3:     ADD  %2,%2,#8\n"                \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+               "       .long 2b,3b\n"                  \
+               "       .previous\n"                    \
+               : "=r" (to), "=r" (from), "=r" (ret)    \
+               : "0" (to), "1" (from), "2" (ret)       \
+               : "D1Ar1", "D0Ar2", "memory")
+
+/*
+ *     optimized copying loop using RAPF when 64 bit aligned
+ *
+ *     n               will be automatically decremented inside the loop
+ *     ret             will be left intact. if error occurs we will rewind
+ *                     so that the original non optimized code will fill up
+ *                     this value correctly.
+ *
+ *     on fault:
+ *             >       n will hold total number of uncopied bytes
+ *
+ *             >       {'to','from'} will be rewind back so that
+ *                     the non-optimized code will do the proper fix up
+ *
+ *     DCACHE drops the cacheline which helps in reducing cache
+ *     pollution.
+ *
+ *     We introduce an extra SETL at the end of the loop to
+ *     ensure we don't fall off the loop before we catch all
+ *     erros.
+ *
+ *     NOTICE:
+ *             LSM_STEP in TXSTATUS must be cleared in fix up code.
+ *             since we're using M{S,G}ETL, a fault might happen at
+ *             any address in the middle of M{S,G}ETL causing
+ *             the value of LSM_STEP to be incorrect which can
+ *             cause subsequent use of M{S,G}ET{L,D} to go wrong.
+ *             ie: if LSM_STEP was 1 when a fault occurs, the
+ *             next call to M{S,G}ET{L,D} will skip the first
+ *             copy/getting as it think that the first 1 has already
+ *             been done.
+ *
+ */
+#define __asm_copy_user_64bit_rapf_loop(                               \
+               to, from, ret, n, id, FIXUP)                            \
+       asm volatile (                                                  \
+               ".balign 8\n"                                           \
+               "MOV    RAPF, %1\n"                                     \
+               "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
+               "MOV    D0Ar6, #0\n"                                    \
+               "LSR    D1Ar5, %3, #6\n"                                \
+               "SUB    TXRPT, D1Ar5, #2\n"                             \
+               "MOV    RAPF, %1\n"                                     \
+               "$Lloop"id":\n"                                         \
+               "ADD    RAPF, %1, #64\n"                                \
+               "21:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #32\n"                                  \
+               "23:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "24:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #32\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+               "25:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "26:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #32\n"                                  \
+               "27:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "28:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %0, %0, #8\n"                                   \
+               "29:\n"                                                 \
+               "SETL   [%0++], D0.7, D1.7\n"                           \
+               "SUB    %3, %3, #32\n"                                  \
+               "1:"                                                    \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
+               "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
+               "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
+               "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
+               "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
+               "SUB A0StP, A0StP, #40\n"                               \
+               "       .section .fixup,\"ax\"\n"                       \
+               "4:\n"                                                  \
+               "       ADD     %0, %0, #8\n"                           \
+               "3:\n"                                                  \
+               "       MOV     D0Ar2, TXSTATUS\n"                      \
+               "       MOV     D1Ar1, TXSTATUS\n"                      \
+               "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
+               "       MOV     TXSTATUS, D1Ar1\n"                      \
+                       FIXUP                                           \
+               "       MOVT    D0Ar2,#HI(1b)\n"                        \
+               "       JUMP    D0Ar2,#LO(1b)\n"                        \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .long 21b,3b\n"                                 \
+               "       .long 22b,3b\n"                                 \
+               "       .long 23b,3b\n"                                 \
+               "       .long 24b,3b\n"                                 \
+               "       .long 25b,3b\n"                                 \
+               "       .long 26b,3b\n"                                 \
+               "       .long 27b,3b\n"                                 \
+               "       .long 28b,3b\n"                                 \
+               "       .long 29b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+               : "D1Ar1", "D0Ar2", "memory")
+
+/*     rewind 'to' and 'from'  pointers when a fault occurs
+ *
+ *     Rationale:
+ *             A fault always occurs on writing to user buffer. A fault
+ *             is at a single address, so we need to rewind by only 4
+ *             bytes.
+ *             Since we do a complete read from kernel buffer before
+ *             writing, we need to rewind it also. The amount to be
+ *             rewind equals the number of faulty writes in MSETD
+ *             which is: [4 - (LSM_STEP-1)]*8
+ *             LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ *             and stored in D0Ar2
+ *
+ *             NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ *                     LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ *                     a fault happens at the 4th write, LSM_STEP will be 0
+ *                     instead of 4. The code copes with that.
+ *
+ *             n is updated by the number of successful writes, which is:
+ *             n = n - (LSM_STEP-1)*8
+ */
+#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+               "AND    D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+               "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
+               "LSL    D0Ar2, D0Ar2, #3\n"                             \
+               "LSL    D1Ar1, D1Ar1, #3\n"                             \
+               "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
+               "SUB    %0, %0, #8\n"                                   \
+               "SUB    %1,     %1,D0Ar2\n"                             \
+               "SUB    %3, %3, D1Ar1\n")
+
+/*
+ *     optimized copying loop using RAPF when 32 bit aligned
+ *
+ *     n               will be automatically decremented inside the loop
+ *     ret             will be left intact. if error occurs we will rewind
+ *                     so that the original non optimized code will fill up
+ *                     this value correctly.
+ *
+ *     on fault:
+ *             >       n will hold total number of uncopied bytes
+ *
+ *             >       {'to','from'} will be rewind back so that
+ *                     the non-optimized code will do the proper fix up
+ *
+ *     DCACHE drops the cacheline which helps in reducing cache
+ *     pollution.
+ *
+ *     We introduce an extra SETD at the end of the loop to
+ *     ensure we don't fall off the loop before we catch all
+ *     erros.
+ *
+ *     NOTICE:
+ *             LSM_STEP in TXSTATUS must be cleared in fix up code.
+ *             since we're using M{S,G}ETL, a fault might happen at
+ *             any address in the middle of M{S,G}ETL causing
+ *             the value of LSM_STEP to be incorrect which can
+ *             cause subsequent use of M{S,G}ET{L,D} to go wrong.
+ *             ie: if LSM_STEP was 1 when a fault occurs, the
+ *             next call to M{S,G}ET{L,D} will skip the first
+ *             copy/getting as it think that the first 1 has already
+ *             been done.
+ *
+ */
+#define __asm_copy_user_32bit_rapf_loop(                               \
+                       to,     from, ret, n, id, FIXUP)                \
+       asm volatile (                                                  \
+               ".balign 8\n"                                           \
+               "MOV    RAPF, %1\n"                                     \
+               "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
+               "MOV    D0Ar6, #0\n"                                    \
+               "LSR    D1Ar5, %3, #6\n"                                \
+               "SUB    TXRPT, D1Ar5, #2\n"                             \
+               "MOV    RAPF, %1\n"                                     \
+       "$Lloop"id":\n"                                                 \
+               "ADD    RAPF, %1, #64\n"                                \
+               "21:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "23:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "24:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "25:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "26:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "27:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "28:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+               "29:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "30:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "31:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "32:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "33:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "34:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+               "35:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "36:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %0, %0, #4\n"                                   \
+               "37:\n"                                                 \
+               "SETD   [%0++], D0.7\n"                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "1:"                                                    \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
+               "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
+               "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
+               "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
+               "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
+               "SUB A0StP, A0StP, #40\n"                               \
+               "       .section .fixup,\"ax\"\n"                       \
+               "4:\n"                                                  \
+               "       ADD             %0, %0, #4\n"                   \
+               "3:\n"                                                  \
+               "       MOV     D0Ar2, TXSTATUS\n"                      \
+               "       MOV     D1Ar1, TXSTATUS\n"                      \
+               "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
+               "       MOV     TXSTATUS, D1Ar1\n"                      \
+                       FIXUP                                           \
+               "       MOVT    D0Ar2,#HI(1b)\n"                        \
+               "       JUMP    D0Ar2,#LO(1b)\n"                        \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .long 21b,3b\n"                                 \
+               "       .long 22b,3b\n"                                 \
+               "       .long 23b,3b\n"                                 \
+               "       .long 24b,3b\n"                                 \
+               "       .long 25b,3b\n"                                 \
+               "       .long 26b,3b\n"                                 \
+               "       .long 27b,3b\n"                                 \
+               "       .long 28b,3b\n"                                 \
+               "       .long 29b,3b\n"                                 \
+               "       .long 30b,3b\n"                                 \
+               "       .long 31b,3b\n"                                 \
+               "       .long 32b,3b\n"                                 \
+               "       .long 33b,3b\n"                                 \
+               "       .long 34b,3b\n"                                 \
+               "       .long 35b,3b\n"                                 \
+               "       .long 36b,3b\n"                                 \
+               "       .long 37b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+               : "D1Ar1", "D0Ar2", "memory")
+
+/*     rewind 'to' and 'from'  pointers when a fault occurs
+ *
+ *     Rationale:
+ *             A fault always occurs on writing to user buffer. A fault
+ *             is at a single address, so we need to rewind by only 4
+ *             bytes.
+ *             Since we do a complete read from kernel buffer before
+ *             writing, we need to rewind it also. The amount to be
+ *             rewind equals the number of faulty writes in MSETD
+ *             which is: [4 - (LSM_STEP-1)]*4
+ *             LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ *             and stored in D0Ar2
+ *
+ *             NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ *                     LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ *                     a fault happens at the 4th write, LSM_STEP will be 0
+ *                     instead of 4. The code copes with that.
+ *
+ *             n is updated by the number of successful writes, which is:
+ *             n = n - (LSM_STEP-1)*4
+ */
+#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+               "AND    D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+               "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
+               "LSL    D0Ar2, D0Ar2, #2\n"                             \
+               "LSL    D1Ar1, D1Ar1, #2\n"                             \
+               "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
+               "SUB    %0, %0, #4\n"                                   \
+               "SUB    %1,     %1,     D0Ar2\n"                        \
+               "SUB    %3, %3, D1Ar1\n")
+
+unsigned long __copy_user(void __user *pdst, const void *psrc,
+                         unsigned long n)
+{
+       register char __user *dst asm ("A0.2") = pdst;
+       register const char *src asm ("A1.2") = psrc;
+       unsigned long retn = 0;
+
+       if (n == 0)
+               return 0;
+
+       if ((unsigned long) src & 1) {
+               __asm_copy_to_user_1(dst, src, retn);
+               n--;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+               while (n > 0) {
+                       __asm_copy_to_user_1(dst, src, retn);
+                       n--;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_to_user_2(dst, src, retn);
+               n -= 2;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+               while (n >= 2) {
+                       __asm_copy_to_user_2(dst, src, retn);
+                       n -= 2;
+               }
+       }
+
+#ifdef USE_RAPF
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
+               if (n >= RAPF_MIN_BUF_SIZE) {
+                       /* copy user using 64 bit rapf copy */
+                       __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
+                                                       n, "64cu");
+               }
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
+               }
+       }
+       if (n >= RAPF_MIN_BUF_SIZE) {
+               /* copy user using 32 bit rapf copy */
+               __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
+       }
+#else
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
+               }
+       }
+#endif
+
+       while (n >= 16) {
+               __asm_copy_to_user_16(dst, src, retn);
+               n -= 16;
+       }
+
+       while (n >= 4) {
+               __asm_copy_to_user_4(dst, src, retn);
+               n -= 4;
+       }
+
+       switch (n) {
+       case 0:
+               break;
+       case 1:
+               __asm_copy_to_user_1(dst, src, retn);
+               break;
+       case 2:
+               __asm_copy_to_user_2(dst, src, retn);
+               break;
+       case 3:
+               __asm_copy_to_user_3(dst, src, retn);
+               break;
+       }
+
+       return retn;
+}
+EXPORT_SYMBOL(__copy_user);
+
+#define __asm_copy_from_user_1(to, from, ret) \
+       __asm_copy_user_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"    \
+               "2:     SETB [%0++],D1Ar1\n",   \
+               "3:     ADD  %2,%2,#1\n"        \
+               "       SETB [%0++],D1Ar1\n",   \
+               "       .long 2b,3b\n")
+
+#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "2:     SETW [%0++],D1Ar1\n" COPY,      \
+               "3:     ADD  %2,%2,#2\n"                \
+               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 2b,3b\n" TENTRY)
+
+#define __asm_copy_from_user_2(to, from, ret) \
+       __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_3(to, from, ret)          \
+       __asm_copy_from_user_2x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "4:     SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 4b,5b\n")
+
+#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "2:     SETD [%0++],D1Ar1\n" COPY,      \
+               "3:     ADD  %2,%2,#4\n"                \
+               "       SETD [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 2b,3b\n" TENTRY)
+
+#define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_5(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "4:     SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 4b,5b\n")
+
+#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_4x_cont(to, from, ret,     \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "4:     SETW [%0++],D1Ar1\n" COPY,      \
+               "5:     ADD  %2,%2,#2\n"                \
+               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 4b,5b\n" TENTRY)
+
+#define __asm_copy_from_user_6(to, from, ret) \
+       __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_7(to, from, ret) \
+       __asm_copy_from_user_6x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "6:     SETB [%0++],D1Ar1\n",           \
+               "7:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 6b,7b\n")
+
+#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_4x_cont(to, from, ret,     \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "4:     SETD [%0++],D1Ar1\n" COPY,      \
+               "5:     ADD  %2,%2,#4\n"                        \
+               "       SETD [%0++],D1Ar1\n" FIXUP,             \
+               "       .long 4b,5b\n" TENTRY)
+
+#define __asm_copy_from_user_8(to, from, ret) \
+       __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_9(to, from, ret) \
+       __asm_copy_from_user_8x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "6:     SETB [%0++],D1Ar1\n",           \
+               "7:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 6b,7b\n")
+
+#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_8x_cont(to, from, ret,     \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "6:     SETW [%0++],D1Ar1\n" COPY,      \
+               "7:     ADD  %2,%2,#2\n"                \
+               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 6b,7b\n" TENTRY)
+
+#define __asm_copy_from_user_10(to, from, ret) \
+       __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_11(to, from, ret)         \
+       __asm_copy_from_user_10x_cont(to, from, ret,    \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "8:     SETB [%0++],D1Ar1\n",           \
+               "9:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 8b,9b\n")
+
+#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_8x_cont(to, from, ret,     \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "6:     SETD [%0++],D1Ar1\n" COPY,      \
+               "7:     ADD  %2,%2,#4\n"                \
+               "       SETD [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 6b,7b\n" TENTRY)
+
+#define __asm_copy_from_user_12(to, from, ret) \
+       __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_13(to, from, ret) \
+       __asm_copy_from_user_12x_cont(to, from, ret,    \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "8:     SETB [%0++],D1Ar1\n",           \
+               "9:     ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 8b,9b\n")
+
+#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_12x_cont(to, from, ret,    \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "8:     SETW [%0++],D1Ar1\n" COPY,      \
+               "9:     ADD  %2,%2,#2\n"                \
+               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 8b,9b\n" TENTRY)
+
+#define __asm_copy_from_user_14(to, from, ret) \
+       __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_15(to, from, ret) \
+       __asm_copy_from_user_14x_cont(to, from, ret,    \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "10:    SETB [%0++],D1Ar1\n",           \
+               "11:    ADD  %2,%2,#1\n"                \
+               "       SETB [%0++],D1Ar1\n",           \
+               "       .long 10b,11b\n")
+
+#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_from_user_12x_cont(to, from, ret,    \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "8:     SETD [%0++],D1Ar1\n" COPY,      \
+               "9:     ADD  %2,%2,#4\n"                \
+               "       SETD [%0++],D1Ar1\n" FIXUP,     \
+               "       .long 8b,9b\n" TENTRY)
+
+#define __asm_copy_from_user_16(to, from, ret) \
+       __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+
+#define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
+               "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+               "       MOV D1Ar1,#0\n"                 \
+               "       MOV D0Ar2,#0\n"                 \
+               "3:     ADD  %2,%2,#8\n"                \
+               "       SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+               "       .long 2b,3b\n"                  \
+               "       .previous\n"                    \
+               : "=a" (to), "=r" (from), "=r" (ret)    \
+               : "0" (to), "1" (from), "2" (ret)       \
+               : "D1Ar1", "D0Ar2", "memory")
+
+/*     rewind 'from' pointer when a fault occurs
+ *
+ *     Rationale:
+ *             A fault occurs while reading from user buffer, which is the
+ *             source. Since the fault is at a single address, we only
+ *             need to rewind by 8 bytes.
+ *             Since we don't write to kernel buffer until we read first,
+ *             the kernel buffer is at the right state and needn't be
+ *             corrected.
+ */
+#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)     \
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+               "SUB    %1, %1, #8\n")
+
+/*     rewind 'from' pointer when a fault occurs
+ *
+ *     Rationale:
+ *             A fault occurs while reading from user buffer, which is the
+ *             source. Since the fault is at a single address, we only
+ *             need to rewind by 4 bytes.
+ *             Since we don't write to kernel buffer until we read first,
+ *             the kernel buffer is at the right state and needn't be
+ *             corrected.
+ */
+#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)     \
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+               "SUB    %1, %1, #4\n")
+
+
+/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+   userland.  The return-value is the number of bytes that were
+   inaccessible.  */
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                                 unsigned long n)
+{
+       register char *dst asm ("A0.2") = pdst;
+       register const char __user *src asm ("A1.2") = psrc;
+       unsigned long retn = 0;
+
+       if (n == 0)
+               return 0;
+
+       if ((unsigned long) src & 1) {
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+               while (n > 0) {
+                       __asm_copy_from_user_1(dst, src, retn);
+                       n--;
+                       if (retn)
+                               goto copy_exception_bytes;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+               while (n >= 2) {
+                       __asm_copy_from_user_2(dst, src, retn);
+                       n -= 2;
+                       if (retn)
+                               goto copy_exception_bytes;
+               }
+       }
+
+       /* We only need one check after the unalignment-adjustments,
+          because if both adjustments were done, either both or
+          neither reference had an exception.  */
+       if (retn != 0)
+               goto copy_exception_bytes;
+
+#ifdef USE_RAPF
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+               if (n >= RAPF_MIN_BUF_SIZE) {
+                       /* Copy using fast 64bit rapf */
+                       __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
+                                                       n, "64cuz");
+               }
+               while (n >= 8) {
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+                               goto copy_exception_bytes;
+               }
+       }
+
+       if (n >= RAPF_MIN_BUF_SIZE) {
+               /* Copy using fast 32bit rapf */
+               __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
+                                               n, "32cuz");
+       }
+#else
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+               while (n >= 8) {
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+                               goto copy_exception_bytes;
+               }
+       }
+#endif
+
+       while (n >= 4) {
+               __asm_copy_from_user_4(dst, src, retn);
+               n -= 4;
+
+               if (retn)
+                       goto copy_exception_bytes;
+       }
+
+       /* If we get here, there were no memory read faults.  */
+       switch (n) {
+               /* These copies are at least "naturally aligned" (so we don't
+                  have to check each byte), due to the src alignment code.
+                  The *_3 case *will* get the correct count for retn.  */
+       case 0:
+               /* This case deliberately left in (if you have doubts check the
+                  generated assembly code).  */
+               break;
+       case 1:
+               __asm_copy_from_user_1(dst, src, retn);
+               break;
+       case 2:
+               __asm_copy_from_user_2(dst, src, retn);
+               break;
+       case 3:
+               __asm_copy_from_user_3(dst, src, retn);
+               break;
+       }
+
+       /* If we get here, retn correctly reflects the number of failing
+          bytes.  */
+       return retn;
+
+ copy_exception_bytes:
+       /* We already have "retn" bytes cleared, and need to clear the
+          remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
+          memset is preferred here, since this isn't speed-critical code and
+          we'd rather have this a leaf-function than calling memset.  */
+       {
+               char *endp;
+               for (endp = dst + n; dst < endp; dst++)
+                       *dst = 0;
+       }
+
+       return retn + n;
+}
+EXPORT_SYMBOL(__copy_user_zeroing);
+
+#define __asm_clear_8x64(to, ret) \
+       asm volatile (                                  \
+               "       MOV  D0Ar2,#0\n"                \
+               "       MOV  D1Ar1,#0\n"                \
+               "       SETL [%0],D0Ar2,D1Ar1\n"        \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+               "3:     ADD  %1,%1,#8\n"                \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+               "       .long 2b,3b\n"                  \
+               "       .previous\n"                    \
+               : "=r" (to), "=r" (ret) \
+               : "0" (to), "1" (ret)   \
+               : "D1Ar1", "D0Ar2", "memory")
+
+/* Zero userspace.  */
+
+#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
+       asm volatile (                                  \
+               "       MOV D1Ar1,#0\n"                 \
+                       CLEAR                           \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+                       FIXUP                           \
+               "       MOVT    D1Ar1,#HI(1b)\n"        \
+               "       JUMP    D1Ar1,#LO(1b)\n"        \
+               "       .previous\n"                    \
+               "       .section __ex_table,\"a\"\n"    \
+                       TENTRY                          \
+               "       .previous"                      \
+               : "=r" (to), "=r" (ret)                 \
+               : "0" (to), "1" (ret)                   \
+               : "D1Ar1", "memory")
+
+#define __asm_clear_1(to, ret) \
+       __asm_clear(to, ret,                    \
+               "       SETB [%0],D1Ar1\n"      \
+               "2:     SETB [%0++],D1Ar1\n",   \
+               "3:     ADD  %1,%1,#1\n",       \
+               "       .long 2b,3b\n")
+
+#define __asm_clear_2(to, ret) \
+       __asm_clear(to, ret,                    \
+               "       SETW [%0],D1Ar1\n"      \
+               "2:     SETW [%0++],D1Ar1\n",   \
+               "3:     ADD  %1,%1,#2\n",       \
+               "       .long 2b,3b\n")
+
+#define __asm_clear_3(to, ret) \
+       __asm_clear(to, ret,                    \
+                "2:    SETW [%0++],D1Ar1\n"    \
+                "      SETB [%0],D1Ar1\n"      \
+                "3:    SETB [%0++],D1Ar1\n",   \
+                "4:    ADD  %1,%1,#2\n"        \
+                "5:    ADD  %1,%1,#1\n",       \
+                "      .long 2b,4b\n"          \
+                "      .long 3b,5b\n")
+
+#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
+       __asm_clear(to, ret,                            \
+               "       SETD [%0],D1Ar1\n"              \
+               "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
+               "3:     ADD  %1,%1,#4\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+
+#define __asm_clear_4(to, ret) \
+       __asm_clear_4x_cont(to, ret, "", "", "")
+
+#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
+       __asm_clear_4x_cont(to, ret,                    \
+               "       SETD [%0],D1Ar1\n"              \
+               "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
+               "5:     ADD  %1,%1,#4\n" FIXUP,         \
+               "       .long 4b,5b\n" TENTRY)
+
+#define __asm_clear_8(to, ret) \
+       __asm_clear_8x_cont(to, ret, "", "", "")
+
+#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
+       __asm_clear_8x_cont(to, ret,                    \
+               "       SETD [%0],D1Ar1\n"              \
+               "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
+               "7:     ADD  %1,%1,#4\n" FIXUP,         \
+               "       .long 6b,7b\n" TENTRY)
+
+#define __asm_clear_12(to, ret) \
+       __asm_clear_12x_cont(to, ret, "", "", "")
+
+#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
+       __asm_clear_12x_cont(to, ret,                   \
+               "       SETD [%0],D1Ar1\n"              \
+               "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
+               "9:     ADD  %1,%1,#4\n" FIXUP,         \
+               "       .long 8b,9b\n" TENTRY)
+
+#define __asm_clear_16(to, ret) \
+       __asm_clear_16x_cont(to, ret, "", "", "")
+
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
+{
+       register char __user *dst asm ("D0Re0") = pto;
+       register unsigned long n asm ("D1Re0") = pn;
+       register unsigned long retn asm ("D0Ar6") = 0;
+
+       if ((unsigned long) dst & 1) {
+               __asm_clear_1(dst, retn);
+               n--;
+       }
+
+       if ((unsigned long) dst & 2) {
+               __asm_clear_2(dst, retn);
+               n -= 2;
+       }
+
+       /* 64 bit copy loop */
+       if (!((__force unsigned long) dst & 7)) {
+               while (n >= 8) {
+                       __asm_clear_8x64(dst, retn);
+                       n -= 8;
+               }
+       }
+
+       while (n >= 16) {
+               __asm_clear_16(dst, retn);
+               n -= 16;
+       }
+
+       while (n >= 4) {
+               __asm_clear_4(dst, retn);
+               n -= 4;
+       }
+
+       switch (n) {
+       case 0:
+               break;
+       case 1:
+               __asm_clear_1(dst, retn);
+               break;
+       case 2:
+               __asm_clear_2(dst, retn);
+               break;
+       case 3:
+               __asm_clear_3(dst, retn);
+               break;
+       }
+
+       return retn;
+}
+EXPORT_SYMBOL(__do_clear_user);
+
+unsigned char __get_user_asm_b(const void __user *addr, long *err)
+{
+       register unsigned char x asm ("D0Re0") = 0;
+       asm volatile (
+               "       GETB %0,[%2]\n"
+               "1:\n"
+               "       GETB %0,[%2]\n"
+               "2:\n"
+               "       .section .fixup,\"ax\"\n"
+               "3:     MOV     D0FrT,%3\n"
+               "       SETD    [%1],D0FrT\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               "       .previous\n"
+               "       .section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               "       .previous\n"
+               : "=r" (x)
+               : "r" (err), "r" (addr), "P" (-EFAULT)
+               : "D0FrT");
+       return x;
+}
+EXPORT_SYMBOL(__get_user_asm_b);
+
+unsigned short __get_user_asm_w(const void __user *addr, long *err)
+{
+       register unsigned short x asm ("D0Re0") = 0;
+       asm volatile (
+               "       GETW %0,[%2]\n"
+               "1:\n"
+               "       GETW %0,[%2]\n"
+               "2:\n"
+               "       .section .fixup,\"ax\"\n"
+               "3:     MOV     D0FrT,%3\n"
+               "       SETD    [%1],D0FrT\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               "       .previous\n"
+               "       .section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               "       .previous\n"
+               : "=r" (x)
+               : "r" (err), "r" (addr), "P" (-EFAULT)
+               : "D0FrT");
+       return x;
+}
+EXPORT_SYMBOL(__get_user_asm_w);
+
+unsigned int __get_user_asm_d(const void __user *addr, long *err)
+{
+       register unsigned int x asm ("D0Re0") = 0;
+       asm volatile (
+               "       GETD %0,[%2]\n"
+               "1:\n"
+               "       GETD %0,[%2]\n"
+               "2:\n"
+               "       .section .fixup,\"ax\"\n"
+               "3:     MOV     D0FrT,%3\n"
+               "       SETD    [%1],D0FrT\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               "       .previous\n"
+               "       .section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               "       .previous\n"
+               : "=r" (x)
+               : "r" (err), "r" (addr), "P" (-EFAULT)
+               : "D0FrT");
+       return x;
+}
+EXPORT_SYMBOL(__get_user_asm_d);
+
+long __put_user_asm_b(unsigned int x, void __user *addr)
+{
+       register unsigned int err asm ("D0Re0") = 0;
+       asm volatile (
+               "       MOV  %0,#0\n"
+               "       SETB [%2],%1\n"
+               "1:\n"
+               "       SETB [%2],%1\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     MOV     %0,%3\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               ".previous"
+               : "=r"(err)
+               : "d" (x), "a" (addr), "P"(-EFAULT)
+               : "D0FrT");
+       return err;
+}
+EXPORT_SYMBOL(__put_user_asm_b);
+
+long __put_user_asm_w(unsigned int x, void __user *addr)
+{
+       register unsigned int err asm ("D0Re0") = 0;
+       asm volatile (
+               "       MOV  %0,#0\n"
+               "       SETW [%2],%1\n"
+               "1:\n"
+               "       SETW [%2],%1\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     MOV     %0,%3\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               ".previous"
+               : "=r"(err)
+               : "d" (x), "a" (addr), "P"(-EFAULT)
+               : "D0FrT");
+       return err;
+}
+EXPORT_SYMBOL(__put_user_asm_w);
+
+long __put_user_asm_d(unsigned int x, void __user *addr)
+{
+       register unsigned int err asm ("D0Re0") = 0;
+       asm volatile (
+               "       MOV  %0,#0\n"
+               "       SETD [%2],%1\n"
+               "1:\n"
+               "       SETD [%2],%1\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     MOV     %0,%3\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               ".previous"
+               : "=r"(err)
+               : "d" (x), "a" (addr), "P"(-EFAULT)
+               : "D0FrT");
+       return err;
+}
+EXPORT_SYMBOL(__put_user_asm_d);
+
+long __put_user_asm_l(unsigned long long x, void __user *addr)
+{
+       register unsigned int err asm ("D0Re0") = 0;
+       asm volatile (
+               "       MOV  %0,#0\n"
+               "       SETL [%2],%1,%t1\n"
+               "1:\n"
+               "       SETL [%2],%1,%t1\n"
+               "2:\n"
+               ".section .fixup,\"ax\"\n"
+               "3:     MOV     %0,%3\n"
+               "       MOVT    D0FrT,#HI(2b)\n"
+               "       JUMP    D0FrT,#LO(2b)\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               "       .long 1b,3b\n"
+               ".previous"
+               : "=r"(err)
+               : "d" (x), "a" (addr), "P"(-EFAULT)
+               : "D0FrT");
+       return err;
+}
+EXPORT_SYMBOL(__put_user_asm_l);
+
+long strnlen_user(const char __user *src, long count)
+{
+       long res;
+
+       if (!access_ok(VERIFY_READ, src, 0))
+               return 0;
+
+       asm volatile (" MOV     D0Ar4, %1\n"
+                     " MOV     D0Ar6, %2\n"
+                     "0:\n"
+                     " SUBS    D0FrT, D0Ar6, #0\n"
+                     " SUB     D0Ar6, D0Ar6, #1\n"
+                     " BLE     2f\n"
+                     " GETB    D0FrT, [D0Ar4+#1++]\n"
+                     "1:\n"
+                     " TST     D0FrT, #255\n"
+                     " BNE     0b\n"
+                     "2:\n"
+                     " SUB     %0, %2, D0Ar6\n"
+                     "3:\n"
+                     " .section .fixup,\"ax\"\n"
+                     "4:\n"
+                     " MOV     %0, #0\n"
+                     " MOVT    D0FrT,#HI(3b)\n"
+                     " JUMP    D0FrT,#LO(3b)\n"
+                     " .previous\n"
+                     " .section __ex_table,\"a\"\n"
+                     " .long 1b,4b\n"
+                     " .previous\n"
+                     : "=r" (res)
+                     : "r" (src), "r" (count)
+                     : "D0FrT", "D0Ar4", "D0Ar6", "cc");
+
+       return res;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+long __strncpy_from_user(char *dst, const char __user *src, long count)
+{
+       long res;
+
+       if (count == 0)
+               return 0;
+
+       /*
+        * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
+        *  So do we.
+        *
+        *  This code is deduced from:
+        *
+        *      char tmp2;
+        *      long tmp1, tmp3;
+        *      tmp1 = count;
+        *      while ((*dst++ = (tmp2 = *src++)) != 0
+        *             && --tmp1)
+        *        ;
+        *
+        *      res = count - tmp1;
+        *
+        *  with tweaks.
+        */
+
+       asm volatile (" MOV  %0,%3\n"
+                     "1:\n"
+                     " GETB D0FrT,[%2++]\n"
+                     "2:\n"
+                     " CMP  D0FrT,#0\n"
+                     " SETB [%1++],D0FrT\n"
+                     " BEQ  3f\n"
+                     " SUBS %0,%0,#1\n"
+                     " BNZ  1b\n"
+                     "3:\n"
+                     " SUB  %0,%3,%0\n"
+                     "4:\n"
+                     " .section .fixup,\"ax\"\n"
+                     "5:\n"
+                     " MOV  %0,%7\n"
+                     " MOVT    D0FrT,#HI(4b)\n"
+                     " JUMP    D0FrT,#LO(4b)\n"
+                     " .previous\n"
+                     " .section __ex_table,\"a\"\n"
+                     " .long 2b,5b\n"
+                     " .previous"
+                     : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
+                     : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
+                     : "D0FrT", "memory", "cc");
+
+       return res;
+}
+EXPORT_SYMBOL(__strncpy_from_user);
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
new file mode 100644 (file)
index 0000000..cd7f2f2
--- /dev/null
@@ -0,0 +1,153 @@
+menu "Memory management options"
+
+config PAGE_OFFSET
+       hex "Kernel page offset address"
+       default "0x40000000"
+       help
+         This option allows you to set the virtual address at which the
+         kernel will be mapped to.
+endmenu
+
+config KERNEL_4M_PAGES
+       bool "Map kernel with 4MB pages"
+       depends on METAG_META21_MMU
+       default y
+       help
+         Map the kernel with large pages to reduce TLB pressure.
+
+choice
+       prompt "User page size"
+       default PAGE_SIZE_4K
+
+config PAGE_SIZE_4K
+       bool "4kB"
+       help
+         This is the default page size used by all Meta cores.
+
+config PAGE_SIZE_8K
+       bool "8kB"
+       depends on METAG_META21_MMU
+       help
+         This enables 8kB pages as supported by Meta 2.x and later MMUs.
+
+config PAGE_SIZE_16K
+       bool "16kB"
+       depends on METAG_META21_MMU
+       help
+         This enables 16kB pages as supported by Meta 2.x and later MMUs.
+
+endchoice
+
+config NUMA
+       bool "Non Uniform Memory Access (NUMA) Support"
+       help
+         Some Meta systems have MMU-mappable on-chip memories with
+         lower latencies than main memory. This enables support for
+         these blocks by binding them to nodes and allowing
+         memory policies to be used for prioritizing and controlling
+         allocation behaviour.
+
+config FORCE_MAX_ZONEORDER
+       int "Maximum zone order"
+       range 10 32
+       default "10"
+       help
+         The kernel memory allocator divides physically contiguous memory
+         blocks into "zones", where each zone is a power of two number of
+         pages.  This option selects the largest power of two that the kernel
+         keeps in the memory allocator.  If you need to allocate very large
+         blocks of physically contiguous memory, then you may need to
+         increase this value.
+
+         This config option is actually maximum order plus one. For example,
+         a value of 11 means that the largest free memory block is 2^10 pages.
+
+         The page size is not necessarily 4KB.  Keep this in mind
+         when choosing a value for this option.
+
+config METAG_L2C
+       bool "Level 2 Cache Support"
+       depends on METAG_META21
+       help
+         Press y here to enable support for the Meta Level 2 (L2) cache. This
+         will enable the cache at start up if it hasn't already been enabled
+         by the bootloader.
+
+         If the bootloader enables the L2 you must press y here to ensure the
+         kernel takes the appropriate actions to keep the cache coherent.
+
+config NODES_SHIFT
+       int
+       default "1"
+       depends on NEED_MULTIPLE_NODES
+
+config ARCH_FLATMEM_ENABLE
+       def_bool y
+       depends on !NUMA
+
+config ARCH_SPARSEMEM_ENABLE
+       def_bool y
+       select SPARSEMEM_STATIC
+
+config ARCH_SPARSEMEM_DEFAULT
+       def_bool y
+
+config MAX_ACTIVE_REGIONS
+       int
+       default "2" if SPARSEMEM
+       default "1"
+
+config ARCH_POPULATES_NODE_MAP
+       def_bool y
+
+config ARCH_SELECT_MEMORY_MODEL
+       def_bool y
+
+config SYS_SUPPORTS_HUGETLBFS
+       def_bool y
+       depends on METAG_META21_MMU
+
+choice
+       prompt "HugeTLB page size"
+       depends on METAG_META21_MMU && HUGETLB_PAGE
+       default HUGETLB_PAGE_SIZE_1M
+
+config HUGETLB_PAGE_SIZE_8K
+       bool "8kB"
+       depends on PAGE_SIZE_4K
+
+config HUGETLB_PAGE_SIZE_16K
+       bool "16kB"
+       depends on PAGE_SIZE_4K || PAGE_SIZE_8K
+
+config HUGETLB_PAGE_SIZE_32K
+       bool "32kB"
+
+config HUGETLB_PAGE_SIZE_64K
+       bool "64kB"
+
+config HUGETLB_PAGE_SIZE_128K
+       bool "128kB"
+
+config HUGETLB_PAGE_SIZE_256K
+       bool "256kB"
+
+config HUGETLB_PAGE_SIZE_512K
+       bool "512kB"
+
+config HUGETLB_PAGE_SIZE_1M
+       bool "1MB"
+
+config HUGETLB_PAGE_SIZE_2M
+       bool "2MB"
+
+config HUGETLB_PAGE_SIZE_4M
+       bool "4MB"
+
+endchoice
+
+config METAG_COREMEM
+       bool
+       default y if SUSPEND
+
+source "mm/Kconfig"
diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile
new file mode 100644 (file)
index 0000000..9943311
--- /dev/null
@@ -0,0 +1,19 @@
+#
+# Makefile for the linux Meta-specific parts of the memory manager.
+#
+
+obj-y                          += cache.o
+obj-y                          += extable.o
+obj-y                          += fault.o
+obj-y                          += init.o
+obj-y                          += ioremap.o
+obj-y                          += maccess.o
+
+mmu-y                          := mmu-meta1.o
+mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o
+obj-y                          += $(mmu-y)
+
+obj-$(CONFIG_HIGHMEM)          += highmem.o
+obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
+obj-$(CONFIG_METAG_L2C)                += l2cache.o
+obj-$(CONFIG_NUMA)             += numa.o
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c
new file mode 100644 (file)
index 0000000..b5d3b2e
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * arch/metag/mm/cache.c
+ *
+ * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Cache control code
+ */
+
+#include <linux/export.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <asm/core_reg.h>
+#include <asm/global_lock.h>
+#include <asm/metag_isa.h>
+#include <asm/metag_mem.h>
+#include <asm/metag_regs.h>
+
+#define DEFAULT_CACHE_WAYS_LOG2        2
+
+/*
+ * Size of a set in the caches. Initialised for default 16K stride, adjusted
+ * according to values passed through TBI global heap segment via LDLK (on ATP)
+ * or config registers (on HTP/MTP)
+ */
+static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
+                                       - DEFAULT_CACHE_WAYS_LOG2;
+static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
+                                       - DEFAULT_CACHE_WAYS_LOG2;
+/*
+ * The number of sets in the caches. Initialised for HTP/ATP, adjusted
+ * according to NOMMU setting in config registers
+ */
+static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
+static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
+
+#ifndef CONFIG_METAG_META12
+/**
+ * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache
+ */
+static volatile u32 lnkget_testdata[16] __initdata __aligned(64);
+
+#define LNKGET_CONSTANT 0xdeadbeef
+
+void __init metag_lnkget_probe(void)
+{
+       int temp;
+       long flags;
+
+       /*
+        * It's conceivable the user has configured a globally coherent cache
+        * shared with non-Linux hardware threads, so use LOCK2 to prevent them
+        * from executing and causing cache eviction during the test.
+        */
+       __global_lock2(flags);
+
+       /* read a value to bring it into the cache */
+       (void)lnkget_testdata[0];
+       lnkget_testdata[0] = 0;
+
+       /* lnkget/lnkset it to modify it */
+       asm volatile(
+               "1:     LNKGETD %0, [%1]\n"
+               "       LNKSETD [%1], %2\n"
+               "       DEFR    %0, TXSTAT\n"
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"
+               "       CMPT    %0, #HI(0x02000000)\n"
+               "       BNZ     1b\n"
+               : "=&d" (temp)
+               : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT)
+               : "cc");
+
+       /* re-read it to see if the cached value changed */
+       temp = lnkget_testdata[0];
+
+       __global_unlock2(flags);
+
+       /* flush the cache line to fix any incoherency */
+       __builtin_dcache_flush((void *)&lnkget_testdata[0]);
+
+#if defined(CONFIG_METAG_LNKGET_AROUND_CACHE)
+       /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */
+       if (temp == LNKGET_CONSTANT)
+               pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n");
+#elif defined(CONFIG_METAG_ATOMICITY_LNKGET)
+       /*
+        * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary
+        * because the kernel is configured to use LNKGET/SET for atomicity
+        */
+       WARN(temp != LNKGET_CONSTANT,
+            "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
+            "Expect kernel failure as it's used for atomicity primitives\n");
+#elif defined(CONFIG_SMP)
+       /*
+        * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the
+        * gateway page won't flush and userland could break.
+        */
+       WARN(temp != LNKGET_CONSTANT,
+            "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"
+            "Expect userland failure as it's used for user gateway page\n");
+#else
+       /*
+        * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it
+        * doesn't actually matter as it doesn't have any effect on !SMP &&
+        * !ATOMICITY_LNKGET.
+        */
+       if (temp != LNKGET_CONSTANT)
+               pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n");
+#endif
+}
+#endif /* !CONFIG_METAG_META12 */
+
+/**
+ * metag_cache_probe() - Probe L1 cache configuration.
+ *
+ * Probe the L1 cache configuration to aid the L1 physical cache flushing
+ * functions.
+ */
+void __init metag_cache_probe(void)
+{
+#ifndef CONFIG_METAG_META12
+       int coreid = metag_in32(METAC_CORE_ID);
+       int config = metag_in32(METAC_CORE_CONFIG2);
+       int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
+
+       if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
+           cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
+               icache_sets_log2 = 1;
+               dcache_sets_log2 = 1;
+       }
+
+       /* For normal size caches, the smallest size is 4Kb.
+          For small caches, the smallest size is 64b */
+       icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
+                               ? 6 : 12;
+       icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
+                               >> METAC_CORE_C2ICSZ_S;
+       icache_set_shift -= icache_sets_log2;
+
+       dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
+                               ? 6 : 12;
+       dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
+                               >> METAC_CORECFG2_DCSZ_S;
+       dcache_set_shift -= dcache_sets_log2;
+
+       metag_lnkget_probe();
+#else
+       /* Extract cache sizes from global heap segment */
+       unsigned long val, u;
+       int width, shift, addend;
+       PTBISEG seg;
+
+       seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
+                                         TBID_SEGSCOPE_GLOBAL,
+                                         TBID_SEGTYPE_HEAP));
+       if (seg != NULL) {
+               val = seg->Data[1];
+
+               /* Work out width of I-cache size bit-field */
+               u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
+                      >> METAG_TBI_ICACHE_SIZE_S;
+               width = 0;
+               while (u & 1) {
+                       width++;
+                       u >>= 1;
+               }
+               /* Extract sign-extended size addend value */
+               shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
+               addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
+                                << shift)
+                       >> (shift + METAG_TBI_ICACHE_SIZE_S);
+               /* Now calculate I-cache set size */
+               icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
+                                   - DEFAULT_CACHE_WAYS_LOG2)
+                                       + addend;
+
+               /* Similarly for D-cache */
+               u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
+                      >> METAG_TBI_DCACHE_SIZE_S;
+               width = 0;
+               while (u & 1) {
+                       width++;
+                       u >>= 1;
+               }
+               shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
+               addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
+                                << shift)
+                       >> (shift + METAG_TBI_DCACHE_SIZE_S);
+               dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
+                                   - DEFAULT_CACHE_WAYS_LOG2)
+                                       + addend;
+       }
+#endif
+}
+
+static void metag_phys_data_cache_flush(const void *start)
+{
+       unsigned long flush0, flush1, flush2, flush3;
+       int loops, step;
+       int thread;
+       int part, offset;
+       int set_shift;
+
+       /* Use a sequence of writes to flush the cache region requested */
+       thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
+                                         >> TXENABLE_THREAD_S;
+
+       /* Cache is broken into sets which lie in contiguous RAMs */
+       set_shift = dcache_set_shift;
+
+       /* Move to the base of the physical cache flush region */
+       flush0 = LINSYSCFLUSH_DCACHE_LINE;
+       step   = 64;
+
+       /* Get partition data for this thread */
+       part = metag_in32(SYSC_DCPART0 +
+                             (SYSC_xCPARTn_STRIDE * thread));
+
+       if ((int)start < 0)
+               /* Access Global vs Local partition */
+               part >>= SYSC_xCPARTG_AND_S
+                       - SYSC_xCPARTL_AND_S;
+
+       /* Extract offset and move SetOff */
+       offset = (part & SYSC_xCPARTL_OR_BITS)
+                       >> SYSC_xCPARTL_OR_S;
+       flush0 += (offset << (set_shift - 4));
+
+       /* Shrink size */
+       part = (part & SYSC_xCPARTL_AND_BITS)
+                       >> SYSC_xCPARTL_AND_S;
+       loops = ((part + 1) << (set_shift - 4));
+
+       /* Reduce loops by step of cache line size */
+       loops /= step;
+
+       flush1 = flush0 + (1 << set_shift);
+       flush2 = flush0 + (2 << set_shift);
+       flush3 = flush0 + (3 << set_shift);
+
+       if (dcache_sets_log2 == 1) {
+               flush2 = flush1;
+               flush3 = flush1 + step;
+               flush1 = flush0 + step;
+               step  <<= 1;
+               loops >>= 1;
+       }
+
+       /* Clear loops ways in cache */
+       while (loops-- != 0) {
+               /* Clear the ways. */
+#if 0
+               /*
+                * GCC doesn't generate very good code for this so we
+                * provide inline assembly instead.
+                */
+               metag_out8(0, flush0);
+               metag_out8(0, flush1);
+               metag_out8(0, flush2);
+               metag_out8(0, flush3);
+
+               flush0 += step;
+               flush1 += step;
+               flush2 += step;
+               flush3 += step;
+#else
+               asm volatile (
+                       "SETB\t[%0+%4++],%5\n"
+                       "SETB\t[%1+%4++],%5\n"
+                       "SETB\t[%2+%4++],%5\n"
+                       "SETB\t[%3+%4++],%5\n"
+                       : "+e" (flush0),
+                         "+e" (flush1),
+                         "+e" (flush2),
+                         "+e" (flush3)
+                       : "e" (step), "a" (0));
+#endif
+       }
+}
+
+void metag_data_cache_flush_all(const void *start)
+{
+       if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
+               /* No need to flush the data cache it's not actually enabled */
+               return;
+
+       metag_phys_data_cache_flush(start);
+}
+
+void metag_data_cache_flush(const void *start, int bytes)
+{
+       unsigned long flush0;
+       int loops, step;
+
+       if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
+               /* No need to flush the data cache it's not actually enabled */
+               return;
+
+       if (bytes >= 4096) {
+               metag_phys_data_cache_flush(start);
+               return;
+       }
+
+       /* Use linear cache flush mechanism on META IP */
+       flush0 = (int)start;
+       loops  = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
+                                       (DCACHE_LINE_BYTES - 1);
+       loops  >>= DCACHE_LINE_S;
+
+#define PRIM_FLUSH(addr, offset) do {                  \
+       int __addr = ((int) (addr)) + ((offset) * 64);  \
+       __builtin_dcache_flush((void *)(__addr));       \
+       } while (0)
+
+#define LOOP_INC (4*64)
+
+       do {
+               /* By default stop */
+               step = 0;
+
+               switch (loops) {
+               /* Drop Thru Cases! */
+               default:
+                       PRIM_FLUSH(flush0, 3);
+                       loops -= 4;
+                       step = 1;
+               case 3:
+                       PRIM_FLUSH(flush0, 2);
+               case 2:
+                       PRIM_FLUSH(flush0, 1);
+               case 1:
+                       PRIM_FLUSH(flush0, 0);
+                       flush0 += LOOP_INC;
+               case 0:
+                       break;
+               }
+       } while (step);
+}
+EXPORT_SYMBOL(metag_data_cache_flush);
+
+static void metag_phys_code_cache_flush(const void *start, int bytes)
+{
+       unsigned long flush0, flush1, flush2, flush3, end_set;
+       int loops, step;
+       int thread;
+       int set_shift, set_size;
+       int part, offset;
+
+       /* Use a sequence of writes to flush the cache region requested */
+       thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
+                                         >> TXENABLE_THREAD_S;
+       set_shift = icache_set_shift;
+
+       /* Move to the base of the physical cache flush region */
+       flush0 = LINSYSCFLUSH_ICACHE_LINE;
+       step   = 64;
+
+       /* Get partition code for this thread */
+       part = metag_in32(SYSC_ICPART0 +
+                         (SYSC_xCPARTn_STRIDE * thread));
+
+       if ((int)start < 0)
+               /* Access Global vs Local partition */
+               part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
+
+       /* Extract offset and move SetOff */
+       offset = (part & SYSC_xCPARTL_OR_BITS)
+                       >> SYSC_xCPARTL_OR_S;
+       flush0 += (offset << (set_shift - 4));
+
+       /* Shrink size */
+       part = (part & SYSC_xCPARTL_AND_BITS)
+                       >> SYSC_xCPARTL_AND_S;
+       loops = ((part + 1) << (set_shift - 4));
+
+       /* Where does the Set end? */
+       end_set = flush0 + loops;
+       set_size = loops;
+
+#ifdef CONFIG_METAG_META12
+       if ((bytes < 4096) && (bytes < loops)) {
+               /* Unreachable on HTP/MTP */
+               /* Only target the sets that could be relavent */
+               flush0 += (loops - step) & ((int) start);
+               loops = (((int) start) & (step-1)) + bytes + step - 1;
+       }
+#endif
+
+       /* Reduce loops by step of cache line size */
+       loops /= step;
+
+       flush1 = flush0 + (1<<set_shift);
+       flush2 = flush0 + (2<<set_shift);
+       flush3 = flush0 + (3<<set_shift);
+
+       if (icache_sets_log2 == 1) {
+               flush2 = flush1;
+               flush3 = flush1 + step;
+               flush1 = flush0 + step;
+#if 0
+               /* flush0 will stop one line early in this case
+                * (flush1 will do the final line).
+                * However we don't correct end_set here at the moment
+                * because it will never wrap on HTP/MTP
+                */
+               end_set -= step;
+#endif
+               step  <<= 1;
+               loops >>= 1;
+       }
+
+       /* Clear loops ways in cache */
+       while (loops-- != 0) {
+#if 0
+               /*
+                * GCC doesn't generate very good code for this so we
+                * provide inline assembly instead.
+                */
+               /* Clear the ways */
+               metag_out8(0, flush0);
+               metag_out8(0, flush1);
+               metag_out8(0, flush2);
+               metag_out8(0, flush3);
+
+               flush0 += step;
+               flush1 += step;
+               flush2 += step;
+               flush3 += step;
+#else
+               asm volatile (
+                       "SETB\t[%0+%4++],%5\n"
+                       "SETB\t[%1+%4++],%5\n"
+                       "SETB\t[%2+%4++],%5\n"
+                       "SETB\t[%3+%4++],%5\n"
+                       : "+e" (flush0),
+                         "+e" (flush1),
+                         "+e" (flush2),
+                         "+e" (flush3)
+                       : "e" (step), "a" (0));
+#endif
+
+               if (flush0 == end_set) {
+                       /* Wrap within Set 0 */
+                       flush0 -= set_size;
+                       flush1 -= set_size;
+                       flush2 -= set_size;
+                       flush3 -= set_size;
+               }
+       }
+}
+
+void metag_code_cache_flush_all(const void *start)
+{
+       if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
+               /* No need to flush the code cache it's not actually enabled */
+               return;
+
+       metag_phys_code_cache_flush(start, 4096);
+}
+EXPORT_SYMBOL(metag_code_cache_flush_all);
+
+void metag_code_cache_flush(const void *start, int bytes)
+{
+#ifndef CONFIG_METAG_META12
+       void *flush;
+       int loops, step;
+#endif /* !CONFIG_METAG_META12 */
+
+       if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
+               /* No need to flush the code cache it's not actually enabled */
+               return;
+
+#ifdef CONFIG_METAG_META12
+       /* CACHEWD isn't available on Meta1, so always do full cache flush */
+       metag_phys_code_cache_flush(start, bytes);
+
+#else /* CONFIG_METAG_META12 */
+       /* If large size do full physical cache flush */
+       if (bytes >= 4096) {
+               metag_phys_code_cache_flush(start, bytes);
+               return;
+       }
+
+       /* Use linear cache flush mechanism on META IP */
+       flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
+       loops  = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
+               (ICACHE_LINE_BYTES-1);
+       loops  >>= ICACHE_LINE_S;
+
+#define PRIM_IFLUSH(addr, offset) \
+       __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
+
+#define LOOP_INC (4*64)
+
+       do {
+               /* By default stop */
+               step = 0;
+
+               switch (loops) {
+               /* Drop Thru Cases! */
+               default:
+                       PRIM_IFLUSH(flush, 3);
+                       loops -= 4;
+                       step = 1;
+               case 3:
+                       PRIM_IFLUSH(flush, 2);
+               case 2:
+                       PRIM_IFLUSH(flush, 1);
+               case 1:
+                       PRIM_IFLUSH(flush, 0);
+                       flush += LOOP_INC;
+               case 0:
+                       break;
+               }
+       } while (step);
+#endif /* !CONFIG_METAG_META12 */
+}
+EXPORT_SYMBOL(metag_code_cache_flush);
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c
new file mode 100644 (file)
index 0000000..2a21eae
--- /dev/null
@@ -0,0 +1,15 @@
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+       const struct exception_table_entry *fixup;
+       unsigned long pc = instruction_pointer(regs);
+
+       fixup = search_exception_tables(pc);
+       if (fixup)
+               regs->ctx.CurrPC = fixup->fixup;
+
+       return fixup != NULL;
+}
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
new file mode 100644 (file)
index 0000000..2c75bf7
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ *  Meta page fault handling.
+ *
+ *  Copyright (C) 2005-2012 Imagination Technologies Ltd.
+ */
+
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+
+#include <asm/tlbflush.h>
+#include <asm/mmu.h>
+#include <asm/traps.h>
+
+/* Clear any pending catch buffer state. */
+static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
+                            unsigned int trapno)
+{
+       PTBICTXEXTCB0 cbuf = regs->extcb0;
+
+       switch (trapno) {
+               /* Instruction fetch faults leave no catch buffer state. */
+       case TBIXXF_SIGNUM_IGF:
+       case TBIXXF_SIGNUM_IPF:
+               return;
+       default:
+               if (cbuf[0].CBAddr == addr) {
+                       cbuf[0].CBAddr = 0;
+                       cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
+
+                       /* And, as this is the ONLY catch entry, we
+                        * need to clear the cbuf bit from the context!
+                        */
+                       regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
+                                               TBICTX_XCBF_BIT);
+
+                       return;
+               }
+               pr_err("Failed to clear cbuf entry!\n");
+       }
+}
+
+int show_unhandled_signals = 1;
+
+int do_page_fault(struct pt_regs *regs, unsigned long address,
+                 unsigned int write_access, unsigned int trapno)
+{
+       struct task_struct *tsk;
+       struct mm_struct *mm;
+       struct vm_area_struct *vma, *prev_vma;
+       siginfo_t info;
+       int fault;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                               (write_access ? FAULT_FLAG_WRITE : 0);
+
+       tsk = current;
+
+       if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int offset = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pud_t *pud, *pud_k;
+               pmd_t *pmd, *pmd_k;
+               pte_t *pte_k;
+
+               pgd = ((pgd_t *)mmu_get_base()) + offset;
+               pgd_k = swapper_pg_dir + offset;
+
+               /* This will never happen with the folded page table. */
+               if (!pgd_present(*pgd)) {
+                       if (!pgd_present(*pgd_k))
+                               goto bad_area_nosemaphore;
+                       set_pgd(pgd, *pgd_k);
+                       return 0;
+               }
+
+               pud = pud_offset(pgd, address);
+               pud_k = pud_offset(pgd_k, address);
+               if (!pud_present(*pud_k))
+                       goto bad_area_nosemaphore;
+               set_pud(pud, *pud_k);
+
+               pmd = pmd_offset(pud, address);
+               pmd_k = pmd_offset(pud_k, address);
+               if (!pmd_present(*pmd_k))
+                       goto bad_area_nosemaphore;
+               set_pmd(pmd, *pmd_k);
+
+               pte_k = pte_offset_kernel(pmd_k, address);
+               if (!pte_present(*pte_k))
+                       goto bad_area_nosemaphore;
+
+               /* May only be needed on Chorus2 */
+               flush_tlb_all();
+               return 0;
+       }
+
+       mm = tsk->mm;
+
+       if (in_atomic() || !mm)
+               goto no_context;
+
+retry:
+       down_read(&mm->mmap_sem);
+
+       vma = find_vma_prev(mm, address, &prev_vma);
+
+       if (!vma || address < vma->vm_start)
+               goto check_expansion;
+
+good_area:
+       if (write_access) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto bad_area;
+       } else {
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+                       goto bad_area;
+       }
+
+       /*
+        * If for any reason at all we couldn't handle the fault,
+        * make sure we exit gracefully rather than endlessly redo
+        * the fault.
+        */
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return 0;
+
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+       }
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR)
+                       tsk->maj_flt++;
+               else
+                       tsk->min_flt++;
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       flags |= FAULT_FLAG_TRIED;
+
+                       /*
+                        * No need to up_read(&mm->mmap_sem) as we would
+                        * have already released it in __lock_page_or_retry
+                        * in mm/filemap.c.
+                        */
+
+                       goto retry;
+               }
+       }
+
+       up_read(&mm->mmap_sem);
+       return 0;
+
+check_expansion:
+       vma = prev_vma;
+       if (vma && (expand_stack(vma, address) == 0))
+               goto good_area;
+
+bad_area:
+       up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+       if (user_mode(regs)) {
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               info.si_code = SEGV_MAPERR;
+               info.si_addr = (__force void __user *)address;
+               info.si_trapno = trapno;
+
+               if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+                   printk_ratelimit()) {
+                       pr_info("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
+                              task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+                              tsk->comm, task_pid_nr(tsk), address,
+                              regs->ctx.CurrPC, regs->ctx.AX[0].U0,
+                              write_access, trapno, trap_name(trapno));
+                       print_vma_addr(" in ", regs->ctx.CurrPC);
+                       print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
+                       printk("\n");
+                       show_regs(regs);
+               }
+               force_sig_info(SIGSEGV, &info, tsk);
+               return 1;
+       }
+       goto no_context;
+
+do_sigbus:
+       up_read(&mm->mmap_sem);
+
+       /*
+        * Send a sigbus, regardless of whether we were in kernel
+        * or user mode.
+        */
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (__force void __user *)address;
+       info.si_trapno = trapno;
+       force_sig_info(SIGBUS, &info, tsk);
+
+       /* Kernel mode? Handle exceptions or die */
+       if (!user_mode(regs))
+               goto no_context;
+
+       return 1;
+
+       /*
+        * We ran out of memory, or some other thing happened to us that made
+        * us unable to handle the page fault gracefully.
+        */
+out_of_memory:
+       up_read(&mm->mmap_sem);
+       if (user_mode(regs))
+               do_group_exit(SIGKILL);
+
+no_context:
+       /* Are we prepared to handle this kernel fault?  */
+       if (fixup_exception(regs)) {
+               clear_cbuf_entry(regs, address, trapno);
+               return 1;
+       }
+
+       die("Oops", regs, (write_access << 15) | trapno, address);
+       do_exit(SIGKILL);
+}
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
new file mode 100644 (file)
index 0000000..d71f621
--- /dev/null
@@ -0,0 +1,133 @@
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+unsigned long highstart_pfn, highend_pfn;
+
+void *kmap(struct page *page)
+{
+       might_sleep();
+       if (!PageHighMem(page))
+               return page_address(page);
+       return kmap_high(page);
+}
+EXPORT_SYMBOL(kmap);
+
+void kunmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+EXPORT_SYMBOL(kunmap);
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+
+void *kmap_atomic(struct page *page)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       int type;
+
+       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+       set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+       int idx, type;
+
+       if (kvaddr >= (void *)FIXADDR_START) {
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+
+               kmap_atomic_idx_pop();
+       }
+
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       int type;
+
+       pagefault_disable();
+
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+       set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
+       flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+
+       return (void *)vaddr;
+}
+
+struct page *kmap_atomic_to_page(void *ptr)
+{
+       unsigned long vaddr = (unsigned long)ptr;
+       int idx;
+       pte_t *pte;
+
+       if (vaddr < FIXADDR_START)
+               return virt_to_page(ptr);
+
+       idx = virt_to_fix(vaddr);
+       pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+       return pte_page(*pte);
+}
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..3c52fa6
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * arch/metag/mm/hugetlbpage.c
+ *
+ * METAG HugeTLB page support.
+ *
+ * Cloned from SuperH
+ *
+ * Cloned from sparc64 by Paul Mundt.
+ *
+ * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+int prepare_hugepage_range(struct file *file, unsigned long addr,
+                                               unsigned long len)
+{
+       struct mm_struct *mm = current->mm;
+       struct hstate *h = hstate_file(file);
+       struct vm_area_struct *vma;
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (addr & ~huge_page_mask(h))
+               return -EINVAL;
+       if (TASK_SIZE - len < addr)
+               return -EINVAL;
+
+       vma = find_vma(mm, ALIGN_HUGEPT(addr));
+       if (vma && !(vma->vm_flags & MAP_HUGETLB))
+               return -EINVAL;
+
+       vma = find_vma(mm, addr);
+       if (vma) {
+               if (addr + len > vma->vm_start)
+                       return -EINVAL;
+               if (!(vma->vm_flags & MAP_HUGETLB) &&
+                   (ALIGN_HUGEPT(addr + len) > vma->vm_start))
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                       unsigned long addr, unsigned long sz)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = pgd_offset(mm, addr);
+       pud = pud_offset(pgd, addr);
+       pmd = pmd_offset(pud, addr);
+       pte = pte_alloc_map(mm, NULL, pmd, addr);
+       pgd->pgd &= ~_PAGE_SZ_MASK;
+       pgd->pgd |= _PAGE_SZHUGE;
+
+       return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte = NULL;
+
+       pgd = pgd_offset(mm, addr);
+       pud = pud_offset(pgd, addr);
+       pmd = pmd_offset(pud, addr);
+       pte = pte_offset_kernel(pmd, addr);
+
+       return pte;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm,
+                             unsigned long address, int write)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return pmd_page_shift(pmd) > PAGE_SHIFT;
+}
+
+int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+                            pmd_t *pmd, int write)
+{
+       return NULL;
+}
+
+#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+/*
+ * Look for an unmapped area starting after another hugetlb vma.
+ * There are guaranteed to be no huge pte's spare if all the huge pages are
+ * full size (4MB), so in that case compile out this search.
+ */
+#if HPAGE_SHIFT == HUGEPT_SHIFT
+static inline unsigned long
+hugetlb_get_unmapped_area_existing(unsigned long len)
+{
+       return 0;
+}
+#else
+static unsigned long
+hugetlb_get_unmapped_area_existing(unsigned long len)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long start_addr, addr;
+       int after_huge;
+
+       if (mm->context.part_huge) {
+               start_addr = mm->context.part_huge;
+               after_huge = 1;
+       } else {
+               start_addr = TASK_UNMAPPED_BASE;
+               after_huge = 0;
+       }
+new_search:
+       addr = start_addr;
+
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
+                       /*
+                        * Start a new search - just in case we missed
+                        * some holes.
+                        */
+                       if (start_addr != TASK_UNMAPPED_BASE) {
+                               start_addr = TASK_UNMAPPED_BASE;
+                               goto new_search;
+                       }
+                       return 0;
+               }
+               /* skip ahead if we've aligned right over some vmas */
+               if (vma && vma->vm_end <= addr)
+                       continue;
+               /* space before the next vma? */
+               if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
+                           <= vma->vm_start)) {
+                       unsigned long end = addr + len;
+                       if (end & HUGEPT_MASK)
+                               mm->context.part_huge = end;
+                       else if (addr == mm->context.part_huge)
+                               mm->context.part_huge = 0;
+                       return addr;
+               }
+               if (vma && (vma->vm_flags & MAP_HUGETLB)) {
+                       /* space after a huge vma in 2nd level page table? */
+                       if (vma->vm_end & HUGEPT_MASK) {
+                               after_huge = 1;
+                               /* no need to align to the next PT block */
+                               addr = vma->vm_end;
+                               continue;
+                       }
+               }
+               after_huge = 0;
+               addr = ALIGN_HUGEPT(vma->vm_end);
+       }
+}
+#endif
+
+/* Do a full search to find an area without any nearby normal pages. */
+static unsigned long
+hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+{
+       struct vm_unmapped_area_info info;
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & HUGEPT_MASK;
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
+}
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct hstate *h = hstate_file(file);
+
+       if (len & ~huge_page_mask(h))
+               return -EINVAL;
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED) {
+               if (prepare_hugepage_range(file, addr, len))
+                       return -EINVAL;
+               return addr;
+       }
+
+       if (addr) {
+               addr = ALIGN(addr, huge_page_size(h));
+               if (!prepare_hugepage_range(file, addr, len))
+                       return addr;
+       }
+
+       /*
+        * Look for an existing hugetlb vma with space after it (this is to to
+        * minimise fragmentation caused by huge pages.
+        */
+       addr = hugetlb_get_unmapped_area_existing(len);
+       if (addr)
+               return addr;
+
+       /*
+        * Find an unmapped naturally aligned set of 4MB blocks that we can use
+        * for huge pages.
+        */
+       return hugetlb_get_unmapped_area_new_pmd(len);
+}
+
+#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+
+/* necessary for boot time 4MB huge page allocation */
+static __init int setup_hugepagesz(char *opt)
+{
+       unsigned long ps = memparse(opt, &opt);
+       if (ps == (1 << HPAGE_SHIFT)) {
+               hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
+       } else {
+               pr_err("hugepagesz: Unsupported page size %lu M\n",
+                      ps >> 20);
+               return 0;
+       }
+       return 1;
+}
+__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
new file mode 100644 (file)
index 0000000..504a398
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+ *  Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/pagemap.h>
+#include <linux/percpu.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/tlb.h>
+#include <asm/user_gateway.h>
+#include <asm/mmzone.h>
+#include <asm/fixmap.h>
+
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
+
+unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+extern char __user_gateway_start;
+extern char __user_gateway_end;
+
+void *gateway_page;
+
+/*
+ * Insert the gateway page into a set of page tables, creating the
+ * page tables if necessary.
+ */
+static void insert_gateway_page(pgd_t *pgd, unsigned long address)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       BUG_ON(!pgd_present(*pgd));
+
+       pud = pud_offset(pgd, address);
+       BUG_ON(!pud_present(*pud));
+
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd)) {
+               pte = alloc_bootmem_pages(PAGE_SIZE);
+               set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
+       }
+
+       pte = pte_offset_kernel(pmd, address);
+       set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
+}
+
+/* Alloc and map a page in a known location accessible to userspace. */
+static void __init user_gateway_init(void)
+{
+       unsigned long address = USER_GATEWAY_PAGE;
+       int offset = pgd_index(address);
+       pgd_t *pgd;
+
+       gateway_page = alloc_bootmem_pages(PAGE_SIZE);
+
+       pgd = swapper_pg_dir + offset;
+       insert_gateway_page(pgd, address);
+
+#ifdef CONFIG_METAG_META12
+       /*
+        * Insert the gateway page into our current page tables even
+        * though we've already inserted it into our reference page
+        * table (swapper_pg_dir). This is because with a META1 mmu we
+        * copy just the user address range and not the gateway page
+        * entry on context switch, see switch_mmu().
+        */
+       pgd = (pgd_t *)mmu_get_base() + offset;
+       insert_gateway_page(pgd, address);
+#endif /* CONFIG_METAG_META12 */
+
+       BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
+
+       gateway_page += (address & ~PAGE_MASK);
+
+       memcpy(gateway_page, &__user_gateway_start,
+              &__user_gateway_end - &__user_gateway_start);
+
+       /*
+        * We don't need to flush the TLB here, there should be no mapping
+        * present at boot for this address and only valid mappings are in
+        * the TLB (apart from on Meta 1.x, but those cached invalid
+        * mappings should be impossible to hit here).
+        *
+        * We don't flush the code cache here even though we have written
+        * code through the data cache and they may not be coherent. At
+        * this point we assume there is no stale data in the code cache
+        * for this address so there is no need to flush.
+        */
+}
+
+static void __init allocate_pgdat(unsigned int nid)
+{
+       unsigned long start_pfn, end_pfn;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       unsigned long phys;
+#endif
+
+       get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       phys = __memblock_alloc_base(sizeof(struct pglist_data),
+                               SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
+       /* Retry with all of system memory */
+       if (!phys)
+               phys = __memblock_alloc_base(sizeof(struct pglist_data),
+                                            SMP_CACHE_BYTES,
+                                            memblock_end_of_DRAM());
+       if (!phys)
+               panic("Can't allocate pgdat for node %d\n", nid);
+
+       NODE_DATA(nid) = __va(phys);
+       memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
+       NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+#endif
+
+       NODE_DATA(nid)->node_start_pfn = start_pfn;
+       NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+static void __init bootmem_init_one_node(unsigned int nid)
+{
+       unsigned long total_pages, paddr;
+       unsigned long end_pfn;
+       struct pglist_data *p;
+
+       p = NODE_DATA(nid);
+
+       /* Nothing to do.. */
+       if (!p->node_spanned_pages)
+               return;
+
+       end_pfn = p->node_start_pfn + p->node_spanned_pages;
+#ifdef CONFIG_HIGHMEM
+       if (end_pfn > max_low_pfn)
+               end_pfn = max_low_pfn;
+#endif
+
+       total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
+
+       paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+       if (!paddr)
+               panic("Can't allocate bootmap for nid[%d]\n", nid);
+
+       init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
+
+       free_bootmem_with_active_regions(nid, end_pfn);
+
+       /*
+        * XXX Handle initial reservations for the system memory node
+        * only for the moment, we'll refactor this later for handling
+        * reservations in other nodes.
+        */
+       if (nid == 0) {
+               struct memblock_region *reg;
+
+               /* Reserve the sections we're already using. */
+               for_each_memblock(reserved, reg) {
+                       unsigned long size = reg->size;
+
+#ifdef CONFIG_HIGHMEM
+                       /* ...but not highmem */
+                       if (PFN_DOWN(reg->base) >= highstart_pfn)
+                               continue;
+
+                       if (PFN_UP(reg->base + size) > highstart_pfn)
+                               size = (highstart_pfn - PFN_DOWN(reg->base))
+                                      << PAGE_SHIFT;
+#endif
+
+                       reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
+               }
+       }
+
+       sparse_memory_present_with_active_regions(nid);
+}
+
+static void __init do_init_bootmem(void)
+{
+       struct memblock_region *reg;
+       int i;
+
+       /* Add active regions with valid PFNs. */
+       for_each_memblock(memory, reg) {
+               unsigned long start_pfn, end_pfn;
+               start_pfn = memblock_region_memory_base_pfn(reg);
+               end_pfn = memblock_region_memory_end_pfn(reg);
+               memblock_set_node(PFN_PHYS(start_pfn),
+                                 PFN_PHYS(end_pfn - start_pfn), 0);
+       }
+
+       /* All of system RAM sits in node 0 for the non-NUMA case */
+       allocate_pgdat(0);
+       node_set_online(0);
+
+       soc_mem_setup();
+
+       for_each_online_node(i)
+               bootmem_init_one_node(i);
+
+       sparse_init();
+}
+
+extern char _heap_start[];
+
+static void __init init_and_reserve_mem(void)
+{
+       unsigned long start_pfn, heap_start;
+       u64 base = min_low_pfn << PAGE_SHIFT;
+       u64 size = (max_low_pfn << PAGE_SHIFT) - base;
+
+       heap_start = (unsigned long) &_heap_start;
+
+       memblock_add(base, size);
+
+       /*
+        * Partially used pages are not usable - thus
+        * we are rounding upwards:
+        */
+       start_pfn = PFN_UP(__pa(heap_start));
+
+       /*
+        * Reserve the kernel text.
+        */
+       memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
+
+#ifdef CONFIG_HIGHMEM
+       /*
+        * Add & reserve highmem, so page structures are initialised.
+        */
+       base = highstart_pfn << PAGE_SHIFT;
+       size = (highend_pfn << PAGE_SHIFT) - base;
+       if (size) {
+               memblock_add(base, size);
+               memblock_reserve(base, size);
+       }
+#endif
+}
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * Ensure we have allocated page tables in swapper_pg_dir for the
+ * fixed mappings range from 'start' to 'end'.
+ */
+static void __init allocate_pgtables(unsigned long start, unsigned long end)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int i, j;
+       unsigned long vaddr;
+
+       vaddr = start;
+       i = pgd_index(vaddr);
+       j = pmd_index(vaddr);
+       pgd = swapper_pg_dir + i;
+
+       for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+               pmd = (pmd_t *)pgd;
+               for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
+                       vaddr += PMD_SIZE;
+
+                       if (!pmd_none(*pmd))
+                               continue;
+
+                       pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+                       pmd_populate_kernel(&init_mm, pmd, pte);
+               }
+               j = 0;
+       }
+}
+
+static void __init fixedrange_init(void)
+{
+       unsigned long vaddr, end;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       /*
+        * Fixed mappings:
+        */
+       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+       end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
+       allocate_pgtables(vaddr, end);
+
+       /*
+        * Permanent kmaps:
+        */
+       vaddr = PKMAP_BASE;
+       allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
+
+       pgd = swapper_pg_dir + pgd_index(vaddr);
+       pud = pud_offset(pgd, vaddr);
+       pmd = pmd_offset(pud, vaddr);
+       pte = pte_offset_kernel(pmd, vaddr);
+       pkmap_page_table = pte;
+}
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/metag/kernel/setup.c.
+ */
+void __init paging_init(unsigned long mem_end)
+{
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       int nid;
+
+       init_and_reserve_mem();
+
+       memblock_allow_resize();
+
+       memblock_dump_all();
+
+       nodes_clear(node_online_map);
+
+       init_new_context(&init_task, &init_mm);
+
+       memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+       do_init_bootmem();
+       mmu_init(mem_end);
+
+#ifdef CONFIG_HIGHMEM
+       fixedrange_init();
+       kmap_init();
+#endif
+
+       /* Initialize the zero page to a bootmem page, already zeroed. */
+       empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+
+       user_gateway_init();
+
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
+       for_each_online_node(nid) {
+               pg_data_t *pgdat = NODE_DATA(nid);
+               unsigned long low, start_pfn;
+
+               start_pfn = pgdat->bdata->node_min_pfn;
+               low = pgdat->bdata->node_low_pfn;
+
+               if (max_zone_pfns[ZONE_NORMAL] < low)
+                       max_zone_pfns[ZONE_NORMAL] = low;
+
+#ifdef CONFIG_HIGHMEM
+               max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+#endif
+               pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
+                       nid, start_pfn, low);
+       }
+
+       free_area_init_nodes(max_zone_pfns);
+}
+
+void __init mem_init(void)
+{
+       int nid;
+
+#ifdef CONFIG_HIGHMEM
+       unsigned long tmp;
+       for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
+               struct page *page = pfn_to_page(tmp);
+               ClearPageReserved(page);
+               init_page_count(page);
+               __free_page(page);
+               totalhigh_pages++;
+       }
+       totalram_pages += totalhigh_pages;
+       num_physpages += totalhigh_pages;
+#endif /* CONFIG_HIGHMEM */
+
+       for_each_online_node(nid) {
+               pg_data_t *pgdat = NODE_DATA(nid);
+               unsigned long node_pages = 0;
+
+               num_physpages += pgdat->node_present_pages;
+
+               if (pgdat->node_spanned_pages)
+                       node_pages = free_all_bootmem_node(pgdat);
+
+               totalram_pages += node_pages;
+       }
+
+       pr_info("Memory: %luk/%luk available\n",
+               (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
+               num_physpages << (PAGE_SHIFT - 10));
+
+       show_mem(0);
+
+       return;
+}
+
+static void free_init_pages(char *what, unsigned long begin, unsigned long end)
+{
+       unsigned long addr;
+
+       for (addr = begin; addr < end; addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               init_page_count(virt_to_page(addr));
+               memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
+               free_page(addr);
+               totalram_pages++;
+       }
+       pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+}
+
+void free_initmem(void)
+{
+       free_init_pages("unused kernel memory",
+                       (unsigned long)(&__init_begin),
+                       (unsigned long)(&__init_end));
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       end = end & PAGE_MASK;
+       free_init_pages("initrd memory", start, end);
+}
+#endif
+
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+                                           unsigned long end)
+{
+       pr_err("%s(%lx, %lx)\n",
+              __func__, start, end);
+}
+#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/metag/mm/ioremap.c b/arch/metag/mm/ioremap.c
new file mode 100644 (file)
index 0000000..a136a43
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Re-map IO memory to kernel address space so that we can access it.
+ * Needed for memory-mapped I/O devices mapped outside our normal DRAM
+ * window (that is, all memory-mapped I/O devices).
+ *
+ * Copyright (C) 1995,1996 Linus Torvalds
+ *
+ * Meta port based on CRIS-port by Axis Communications AB
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *__ioremap(unsigned long phys_addr, size_t size,
+                       unsigned long flags)
+{
+       unsigned long addr;
+       struct vm_struct *area;
+       unsigned long offset, last_addr;
+       pgprot_t prot;
+
+       /* Don't allow wraparound or zero size */
+       last_addr = phys_addr + size - 1;
+       if (!size || last_addr < phys_addr)
+               return NULL;
+
+       /* Custom region addresses are accessible and uncached by default. */
+       if (phys_addr >= LINSYSCUSTOM_BASE &&
+           phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
+               return (__force void __iomem *) phys_addr;
+
+       /*
+        * Mappings have to be page-aligned
+        */
+       offset = phys_addr & ~PAGE_MASK;
+       phys_addr &= PAGE_MASK;
+       size = PAGE_ALIGN(last_addr+1) - phys_addr;
+       prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
+                       _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
+                       flags);
+
+       /*
+        * Ok, go for it..
+        */
+       area = get_vm_area(size, VM_IOREMAP);
+       if (!area)
+               return NULL;
+       area->phys_addr = phys_addr;
+       addr = (unsigned long) area->addr;
+       if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
+               vunmap((void *) addr);
+               return NULL;
+       }
+       return (__force void __iomem *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+void __iounmap(void __iomem *addr)
+{
+       struct vm_struct *p;
+
+       if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
+           (__force unsigned long)addr < (LINSYSCUSTOM_BASE +
+                                          LINSYSCUSTOM_LIMIT))
+               return;
+
+       p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
+       if (unlikely(!p)) {
+               pr_err("iounmap: bad address %p\n", addr);
+               return;
+       }
+
+       kfree(p);
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c
new file mode 100644 (file)
index 0000000..c64ee61
--- /dev/null
@@ -0,0 +1,192 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <asm/l2cache.h>
+#include <asm/metag_isa.h>
+
+/* If non-0, then initialise the L2 cache */
+static int l2cache_init = 1;
+/* If non-0, then initialise the L2 cache prefetch */
+static int l2cache_init_pf = 1;
+
+int l2c_pfenable;
+
+static volatile u32 l2c_testdata[16] __initdata __aligned(64);
+
+static int __init parse_l2cache(char *p)
+{
+       char *cp = p;
+
+       if (get_option(&cp, &l2cache_init) != 1) {
+               pr_err("Bad l2cache parameter (%s)\n", p);
+               return 1;
+       }
+       return 0;
+}
+early_param("l2cache", parse_l2cache);
+
+static int __init parse_l2cache_pf(char *p)
+{
+       char *cp = p;
+
+       if (get_option(&cp, &l2cache_init_pf) != 1) {
+               pr_err("Bad l2cache_pf parameter (%s)\n", p);
+               return 1;
+       }
+       return 0;
+}
+early_param("l2cache_pf", parse_l2cache_pf);
+
+static int __init meta_l2c_setup(void)
+{
+       /*
+        * If the L2 cache isn't even present, don't do anything, but say so in
+        * the log.
+        */
+       if (!meta_l2c_is_present()) {
+               pr_info("L2 Cache: Not present\n");
+               return 0;
+       }
+
+       /*
+        * Check whether the line size is recognised.
+        */
+       if (!meta_l2c_linesize()) {
+               pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
+                            meta_l2c_config());
+       }
+
+       /*
+        * Initialise state.
+        */
+       l2c_pfenable = _meta_l2c_pf_is_enabled();
+
+       /*
+        * Enable the L2 cache and print to log whether it was already enabled
+        * by the bootloader.
+        */
+       if (l2cache_init) {
+               pr_info("L2 Cache: Enabling... ");
+               if (meta_l2c_enable())
+                       pr_cont("already enabled\n");
+               else
+                       pr_cont("done\n");
+       } else {
+               pr_info("L2 Cache: Not enabling\n");
+       }
+
+       /*
+        * Enable L2 cache prefetch.
+        */
+       if (l2cache_init_pf) {
+               pr_info("L2 Cache: Enabling prefetch... ");
+               if (meta_l2c_pf_enable(1))
+                       pr_cont("already enabled\n");
+               else
+                       pr_cont("done\n");
+       } else {
+               pr_info("L2 Cache: Not enabling prefetch\n");
+       }
+
+       return 0;
+}
+core_initcall(meta_l2c_setup);
+
+int meta_l2c_disable(void)
+{
+       unsigned long flags;
+       int en;
+
+       if (!meta_l2c_is_present())
+               return 1;
+
+       /*
+        * Prevent other threads writing during the writeback, otherwise the
+        * writes will get "lost" when the L2 is disabled.
+        */
+       __global_lock2(flags);
+       en = meta_l2c_is_enabled();
+       if (likely(en)) {
+               _meta_l2c_pf_enable(0);
+               wr_fence();
+               _meta_l2c_purge();
+               _meta_l2c_enable(0);
+       }
+       __global_unlock2(flags);
+
+       return !en;
+}
+
+int meta_l2c_enable(void)
+{
+       unsigned long flags;
+       int en;
+
+       if (!meta_l2c_is_present())
+               return 0;
+
+       /*
+        * Init (clearing the L2) can happen while the L2 is disabled, so other
+        * threads are safe to continue executing, however we must not init the
+        * cache if it's already enabled (dirty lines would be discarded), so
+        * this operation should still be atomic with other threads.
+        */
+       __global_lock1(flags);
+       en = meta_l2c_is_enabled();
+       if (likely(!en)) {
+               _meta_l2c_init();
+               _meta_l2c_enable(1);
+               _meta_l2c_pf_enable(l2c_pfenable);
+       }
+       __global_unlock1(flags);
+
+       return en;
+}
+
+int meta_l2c_pf_enable(int pfenable)
+{
+       unsigned long flags;
+       int en = l2c_pfenable;
+
+       if (!meta_l2c_is_present())
+               return 0;
+
+       /*
+        * We read modify write the enable register, so this operation must be
+        * atomic with other threads.
+        */
+       __global_lock1(flags);
+       en = l2c_pfenable;
+       l2c_pfenable = pfenable;
+       if (meta_l2c_is_enabled())
+               _meta_l2c_pf_enable(pfenable);
+       __global_unlock1(flags);
+
+       return en;
+}
+
+int meta_l2c_flush(void)
+{
+       unsigned long flags;
+       int en;
+
+       /*
+        * Prevent other threads writing during the writeback. This also
+        * involves read modify writes.
+        */
+       __global_lock2(flags);
+       en = meta_l2c_is_enabled();
+       if (likely(en)) {
+               _meta_l2c_pf_enable(0);
+               wr_fence();
+               _meta_l2c_purge();
+               _meta_l2c_enable(0);
+               _meta_l2c_init();
+               _meta_l2c_enable(1);
+               _meta_l2c_pf_enable(l2c_pfenable);
+       }
+       __global_unlock2(flags);
+
+       return !en;
+}
diff --git a/arch/metag/mm/maccess.c b/arch/metag/mm/maccess.c
new file mode 100644 (file)
index 0000000..eba2cfc
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * safe read and write memory routines callable while atomic
+ *
+ * Copyright 2012 Imagination Technologies
+ */
+
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+/*
+ * The generic probe_kernel_write() uses the user copy code which can split the
+ * writes if the source is unaligned, and repeats writes to make exceptions
+ * precise. We override it here to avoid these things happening to memory mapped
+ * IO memory where they could have undesired effects.
+ * Due to the use of CACHERD instruction this only works on Meta2 onwards.
+ */
+#ifdef CONFIG_METAG_META21
+long probe_kernel_write(void *dst, const void *src, size_t size)
+{
+       unsigned long ldst = (unsigned long)dst;
+       void __iomem *iodst = (void __iomem *)dst;
+       unsigned long lsrc = (unsigned long)src;
+       const u8 *psrc = (u8 *)src;
+       unsigned int pte, i;
+       u8 bounce[8] __aligned(8);
+
+       if (!size)
+               return 0;
+
+       /* Use the write combine bit to decide is the destination is MMIO. */
+       pte = __builtin_meta2_cacherd(dst);
+
+       /* Check the mapping is valid and writeable. */
+       if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
+           != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT))
+               return -EFAULT;
+
+       /* Fall back to generic version for cases we're not interested in. */
+       if (pte & MMCU_ENTRY_WRC_BIT    || /* write combined memory */
+           (ldst & (size - 1))         || /* destination unaligned */
+           size > 8                    || /* more than max write size */
+           (size & (size - 1)))           /* non power of 2 size */
+               return __probe_kernel_write(dst, src, size);
+
+       /* If src is unaligned, copy to the aligned bounce buffer first. */
+       if (lsrc & (size - 1)) {
+               for (i = 0; i < size; ++i)
+                       bounce[i] = psrc[i];
+               psrc = bounce;
+       }
+
+       switch (size) {
+       case 1:
+               writeb(*psrc, iodst);
+               break;
+       case 2:
+               writew(*(const u16 *)psrc, iodst);
+               break;
+       case 4:
+               writel(*(const u32 *)psrc, iodst);
+               break;
+       case 8:
+               writeq(*(const u64 *)psrc, iodst);
+               break;
+       }
+       return 0;
+}
+#endif
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
new file mode 100644 (file)
index 0000000..91f4255
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ *  Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
+ *
+ * Meta 1 MMU handling code.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+
+#include <asm/mmu.h>
+
+#define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
+
+/*
+ * This contains the physical address of the top level 2k pgd table.
+ */
+static unsigned long mmu_base_phys;
+
+/*
+ * Given a physical address, return a mapped virtual address that can be used
+ * to access that location.
+ * In practice, we use the DirectMap region to make this happen.
+ */
+static unsigned long map_addr(unsigned long phys)
+{
+       static unsigned long dm_base = 0xFFFFFFFF;
+       int offset;
+
+       offset = phys - dm_base;
+
+       /* Are we in the current map range ? */
+       if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
+               /* Calculate new DM area */
+               dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
+
+               /* Actually map it in! */
+               metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
+
+               /* And calculate how far into that area our reference is */
+               offset = phys - dm_base;
+       }
+
+       return DM3_BASE + offset;
+}
+
+/*
+ * Return the physical address of the base of our pgd table.
+ */
+static inline unsigned long __get_mmu_base(void)
+{
+       unsigned long base_phys;
+       unsigned int stride;
+
+       if (is_global_space(PAGE_OFFSET))
+               stride = 4;
+       else
+               stride = hard_processor_id();   /* [0..3] */
+
+       base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
+       base_phys += (0x800 * stride);
+
+       return base_phys;
+}
+
+/* Given a virtual address, return the virtual address of the relevant pgd */
+static unsigned long pgd_entry_addr(unsigned long virt)
+{
+       unsigned long pgd_phys;
+       unsigned long pgd_virt;
+
+       if (!mmu_base_phys)
+               mmu_base_phys = __get_mmu_base();
+
+       /*
+        * Are we trying to map a global address.  If so, then index
+        * the global pgd table instead of our local one.
+        */
+       if (is_global_space(virt)) {
+               /* Scale into 2gig map */
+               virt &= ~0x80000000;
+       }
+
+       /* Base of the pgd table plus our 4Meg entry, 4bytes each */
+       pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
+
+       pgd_virt = map_addr(pgd_phys);
+
+       return pgd_virt;
+}
+
+/* Given a virtual address, return the virtual address of the relevant pte */
+static unsigned long pgtable_entry_addr(unsigned long virt)
+{
+       unsigned long pgtable_phys;
+       unsigned long pgtable_virt, pte_virt;
+
+       /* Find the physical address of the 4MB page table*/
+       pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
+
+       /* Map it to a virtual address */
+       pgtable_virt = map_addr(pgtable_phys);
+
+       /* And index into it for our pte */
+       pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
+
+       return pte_virt;
+}
+
+unsigned long mmu_read_first_level_page(unsigned long vaddr)
+{
+       return metag_in32(pgd_entry_addr(vaddr));
+}
+
+unsigned long mmu_read_second_level_page(unsigned long vaddr)
+{
+       return metag_in32(pgtable_entry_addr(vaddr));
+}
+
+unsigned long mmu_get_base(void)
+{
+       static unsigned long __base;
+
+       /* Find the base of our MMU pgd table */
+       if (!__base)
+               __base = pgd_entry_addr(0);
+
+       return __base;
+}
+
+void __init mmu_init(unsigned long mem_end)
+{
+       unsigned long entry, addr;
+       pgd_t *p_swapper_pg_dir;
+
+       /*
+        * Now copy over any MMU pgd entries already in the mmu page tables
+        * over to our root init process (swapper_pg_dir) map.  This map is
+        * then inherited by all other processes, which means all processes
+        * inherit a map of the kernel space.
+        */
+       addr = PAGE_OFFSET;
+       entry = pgd_index(PAGE_OFFSET);
+       p_swapper_pg_dir = pgd_offset_k(0) + entry;
+
+       while (addr <= META_MEMORY_LIMIT) {
+               unsigned long pgd_entry;
+               /* copy over the current MMU value */
+               pgd_entry = mmu_read_first_level_page(addr);
+               pgd_val(*p_swapper_pg_dir) = pgd_entry;
+
+               p_swapper_pg_dir++;
+               addr += PGDIR_SIZE;
+               entry++;
+       }
+}
diff --git a/arch/metag/mm/mmu-meta2.c b/arch/metag/mm/mmu-meta2.c
new file mode 100644 (file)
index 0000000..81dcbb0
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
+ *
+ * Meta 2 enhanced mode MMU handling code.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/bootmem.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+
+unsigned long mmu_read_first_level_page(unsigned long vaddr)
+{
+       unsigned int cpu = hard_processor_id();
+       unsigned long offset, linear_base, linear_limit;
+       unsigned int phys0;
+       pgd_t *pgd, entry;
+
+       if (is_global_space(vaddr))
+               vaddr &= ~0x80000000;
+
+       offset = vaddr >> PGDIR_SHIFT;
+
+       phys0 = metag_in32(mmu_phys0_addr(cpu));
+
+       /* Top bit of linear base is always zero. */
+       linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
+
+       /* Limit in the range 0 (4MB) to 9 (2GB). */
+       linear_limit = 1 << ((phys0 >> 8) & 0xf);
+       linear_limit += linear_base;
+
+       /*
+        * If offset is below linear base or above the limit then no
+        * mapping exists.
+        */
+       if (offset < linear_base || offset > linear_limit)
+               return 0;
+
+       offset -= linear_base;
+       pgd = (pgd_t *)mmu_get_base();
+       entry = pgd[offset];
+
+       return pgd_val(entry);
+}
+
+unsigned long mmu_read_second_level_page(unsigned long vaddr)
+{
+       return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
+}
+
+unsigned long mmu_get_base(void)
+{
+       unsigned int cpu = hard_processor_id();
+       unsigned long stride;
+
+       stride = cpu * LINSYSMEMTnX_STRIDE;
+
+       /*
+        * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
+        * used as an offset to the start of the top-level pgd table.
+        */
+       stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
+
+       if (is_global_space(PAGE_OFFSET))
+               stride += LINSYSMEMTXG_OFFSET;
+
+       return LINSYSMEMT0L_BASE + stride;
+}
+
+#define FIRST_LEVEL_MASK       0xffffffc0
+#define SECOND_LEVEL_MASK      0xfffff000
+#define SECOND_LEVEL_ALIGN     64
+
+static void repriv_mmu_tables(void)
+{
+       unsigned long phys0_addr;
+       unsigned int g;
+
+       /*
+        * Check that all the mmu table regions are priv protected, and if not
+        * fix them and emit a warning. If we left them without priv protection
+        * then userland processes would have access to a 2M window into
+        * physical memory near where the page tables are.
+        */
+       phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
+       for (g = 0; g < 2; ++g) {
+               unsigned int t, phys0;
+               unsigned long flags;
+               for (t = 0; t < 4; ++t) {
+                       __global_lock2(flags);
+                       phys0 = metag_in32(phys0_addr);
+                       if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
+                               pr_warn("Fixing priv protection on T%d %s MMU table region\n",
+                                       t,
+                                       g ? "global" : "local");
+                               phys0 |= _PAGE_PRIV;
+                               metag_out32(phys0, phys0_addr);
+                       }
+                       __global_unlock2(flags);
+
+                       phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
+               }
+
+               phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
+                           - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
+       }
+}
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+static void mmu_resume(void)
+{
+       /*
+        * If a full suspend to RAM has happened then the original bad MMU table
+        * priv may have been restored, so repriv them again.
+        */
+       repriv_mmu_tables();
+}
+#else
+#define mmu_resume NULL
+#endif /* CONFIG_METAG_SUSPEND_MEM */
+
+static struct syscore_ops mmu_syscore_ops = {
+       .resume  = mmu_resume,
+};
+
+void __init mmu_init(unsigned long mem_end)
+{
+       unsigned long entry, addr;
+       pgd_t *p_swapper_pg_dir;
+#ifdef CONFIG_KERNEL_4M_PAGES
+       unsigned long mem_size = mem_end - PAGE_OFFSET;
+       unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
+       unsigned int second_level_entry = 0;
+       unsigned long *second_level_table;
+#endif
+
+       /*
+        * Now copy over any MMU pgd entries already in the mmu page tables
+        * over to our root init process (swapper_pg_dir) map.  This map is
+        * then inherited by all other processes, which means all processes
+        * inherit a map of the kernel space.
+        */
+       addr = META_MEMORY_BASE;
+       entry = pgd_index(META_MEMORY_BASE);
+       p_swapper_pg_dir = pgd_offset_k(0) + entry;
+
+       while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
+               unsigned long pgd_entry;
+               /* copy over the current MMU value */
+               pgd_entry = mmu_read_first_level_page(addr);
+               pgd_val(*p_swapper_pg_dir) = pgd_entry;
+
+               p_swapper_pg_dir++;
+               addr += PGDIR_SIZE;
+               entry++;
+       }
+
+#ifdef CONFIG_KERNEL_4M_PAGES
+       /*
+        * At this point we can also map the kernel with 4MB pages to
+        * reduce TLB pressure.
+        */
+       second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
+
+       addr = PAGE_OFFSET;
+       entry = pgd_index(PAGE_OFFSET);
+       p_swapper_pg_dir = pgd_offset_k(0) + entry;
+
+       while (pages > 0) {
+               unsigned long phys_addr, second_level_phys;
+               pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
+
+               phys_addr = __pa(addr);
+
+               second_level_phys = __pa(pte);
+
+               pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
+                                              FIRST_LEVEL_MASK) |
+                                             _PAGE_SZ_4M |
+                                             _PAGE_PRESENT);
+
+               pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
+                                _PAGE_PRESENT | _PAGE_DIRTY |
+                                _PAGE_ACCESSED | _PAGE_WRITE |
+                                _PAGE_CACHEABLE | _PAGE_KERNEL);
+
+               p_swapper_pg_dir++;
+               addr += PGDIR_SIZE;
+               /* Second level pages must be 64byte aligned. */
+               second_level_entry += (SECOND_LEVEL_ALIGN /
+                                      sizeof(unsigned long));
+               pages--;
+       }
+       load_pgd(swapper_pg_dir, hard_processor_id());
+       flush_tlb_all();
+#endif
+
+       repriv_mmu_tables();
+       register_syscore_ops(&mmu_syscore_ops);
+}
diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c
new file mode 100644 (file)
index 0000000..9ae578c
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ *  Multiple memory node support for Meta machines
+ *
+ *  Copyright (C) 2007  Paul Mundt
+ *  Copyright (C) 2010  Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL_GPL(node_data);
+
+extern char _heap_start[];
+
+/*
+ * On Meta machines the conventional approach is to stash system RAM
+ * in node 0, and other memory blocks in to node 1 and up, ordered by
+ * latency. Each node's pgdat is node-local at the beginning of the node,
+ * immediately followed by the node mem map.
+ */
+void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+{
+       unsigned long bootmap_pages, bootmem_paddr;
+       unsigned long start_pfn, end_pfn;
+       unsigned long pgdat_paddr;
+
+       /* Don't allow bogus node assignment */
+       BUG_ON(nid > MAX_NUMNODES || nid <= 0);
+
+       start_pfn = start >> PAGE_SHIFT;
+       end_pfn = end >> PAGE_SHIFT;
+
+       memblock_add(start, end - start);
+
+       memblock_set_node(PFN_PHYS(start_pfn),
+                         PFN_PHYS(end_pfn - start_pfn), nid);
+
+       /* Node-local pgdat */
+       pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data),
+                                         SMP_CACHE_BYTES, end);
+       NODE_DATA(nid) = __va(pgdat_paddr);
+       memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
+       NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+       NODE_DATA(nid)->node_start_pfn = start_pfn;
+       NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+
+       /* Node-local bootmap */
+       bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+       bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
+                                           PAGE_SIZE, end);
+       init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+                         start_pfn, end_pfn);
+
+       free_bootmem_with_active_regions(nid, end_pfn);
+
+       /* Reserve the pgdat and bootmap space with the bootmem allocator */
+       reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK,
+                            sizeof(struct pglist_data), BOOTMEM_DEFAULT);
+       reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
+                            bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
+
+       /* It's up */
+       node_set_online(nid);
+
+       /* Kick sparsemem */
+       sparse_memory_present_with_active_regions(nid);
+}
+
+void __init __weak soc_mem_setup(void)
+{
+}
diff --git a/arch/metag/tbx/Makefile b/arch/metag/tbx/Makefile
new file mode 100644 (file)
index 0000000..e994239
--- /dev/null
@@ -0,0 +1,21 @@
+#
+# Makefile for TBX library files..
+#
+
+asflags-y              += -mmetac=2.1 -Wa,-mfpu=metac21 -mdsp
+asflags-$(CONFIG_SMP)  += -DTBX_PERCPU_SP_SAVE
+
+ccflags-y              += -mmetac=2.1
+
+lib-y                  += tbicore.o
+lib-y                  += tbictx.o
+lib-y                  += tbidefr.o
+lib-y                  += tbilogf.o
+lib-y                  += tbipcx.o
+lib-y                  += tbiroot.o
+lib-y                  += tbisoft.o
+lib-y                  += tbistring.o
+lib-y                  += tbitimer.o
+
+lib-$(CONFIG_METAG_DSP)        += tbidspram.o
+lib-$(CONFIG_METAG_FPU)        += tbictxfpu.o
diff --git a/arch/metag/tbx/tbicore.S b/arch/metag/tbx/tbicore.S
new file mode 100644 (file)
index 0000000..a0838eb
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * tbicore.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Core functions needed to support use of the thread binary interface for META
+ * processors
+ */
+
+       .file   "tbicore.S"
+/* Get data structures and defines from the TBI C header */
+#include <asm/metag_mem.h>
+#include <asm/metag_regs.h>
+#include <asm/tbx.h>
+
+       .data
+       .balign 8
+       .global ___pTBISegs
+       .type   ___pTBISegs,object
+___pTBISegs:
+       .quad   0               /* Segment list pointer with it's */
+       .size   ___pTBISegs,.-___pTBISegs
+                                       /* own id or spin-lock location */
+/*
+ * Return ___pTBISegs value specific to privilege level - not very complicated
+ * at the moment
+ *
+ * Register Usage: D0Re0 is the result, D1Re0 is used as a scratch
+ */
+       .text
+       .balign 4
+       .global ___TBISegList
+       .type   ___TBISegList,function
+___TBISegList:
+       MOVT    A1LbP,#HI(___pTBISegs)
+       ADD     A1LbP,A1LbP,#LO(___pTBISegs)
+       GETL    D0Re0,D1Re0,[A1LbP]
+       MOV     PC,D1RtP
+       .size   ___TBISegList,.-___TBISegList
+
+/*
+ * Search the segment list for a match given Id, pStart can be NULL
+ *
+ * Register Usage: D1Ar1 is pSeg, D0Ar2 is Id, D0Re0 is the result
+ *                 D0Ar4, D1Ar3 are used as a scratch
+ *                 NB: The PSTAT bit if Id in D0Ar2 may be toggled
+ */
+       .text
+       .balign 4
+       .global ___TBIFindSeg
+       .type   ___TBIFindSeg,function
+___TBIFindSeg:
+       MOVT    A1LbP,#HI(___pTBISegs)
+       ADD     A1LbP,A1LbP,#LO(___pTBISegs)
+       GETL    D1Ar3,D0Ar4,[A1LbP]     /* Read segment list head */
+       MOV     D0Re0,TXSTATUS          /* What priv level are we at? */
+       CMP     D1Ar1,#0                /* Is pStart provided? */
+/* Disable privilege adaption for now */
+       ANDT    D0Re0,D0Re0,#0  /*HI(TXSTATUS_PSTAT_BIT)  ; Is PSTAT set? Zero if not */
+       LSL     D0Re0,D0Re0,#(TBID_PSTAT_S-TXSTATUS_PSTAT_S)
+       XOR     D0Ar2,D0Ar2,D0Re0       /* Toggle Id PSTAT if privileged */
+       MOVNZ   D1Ar3,D1Ar1             /* Use pStart if provided */
+$LFindSegLoop:                 
+       ADDS    D0Re0,D1Ar3,#0          /* End of list? Load result into D0Re0 */
+       MOVZ    PC,D1RtP                /* If result is NULL we leave */
+       GETL    D1Ar3,D0Ar4,[D1Ar3]     /* Read pLink and Id */
+       CMP     D0Ar4,D0Ar2             /* Does it match? */
+       BNZ     $LFindSegLoop           /* Loop if there is no match */
+       TST     D0Re0,D0Re0             /* Clear zero flag - we found it! */
+       MOV     PC,D1RtP                /* Return */
+       .size   ___TBIFindSeg,.-___TBIFindSeg
+
+/* Useful offsets to encode the lower bits of the lock/unlock addresses */
+#define UON  (LINSYSEVENT_WR_ATOMIC_LOCK   & 0xFFF8)
+#define UOFF (LINSYSEVENT_WR_ATOMIC_UNLOCK & 0xFFF8)
+
+/*
+ * Perform a whole spin-lock sequence as used by the TBISignal routine
+ *
+ * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result
+ *                 (All other usage due to ___TBIPoll - D0Ar6, D1Re0)
+ */
+       .text
+       .balign 4
+       .global ___TBISpin
+       .type   ___TBISpin,function
+___TBISpin:
+       SETL    [A0StP++],D0FrT,D1RtP   /* Save our return address */
+       ORS     D0Re0,D0Re0,#1          /* Clear zero flag */
+       MOV     D1RtP,PC                /* Setup return address to form loop */
+$LSpinLoop:
+       BNZ     ___TBIPoll              /* Keep repeating if fail to set */
+       GETL    D0FrT,D1RtP,[--A0StP]   /* Restore return address */
+       MOV     PC,D1RtP                /* Return */
+       .size   ___TBISpin,.-___TBISpin
+
+/*
+ * Perform an attempt to gain access to a spin-lock and set some bits
+ * 
+ * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result
+ *                 !!On return Zero flag is SET if we are sucessfull!!
+ *                 A0.3 is used to hold base address of system event region
+ *                 D1Re0 use to hold TXMASKI while interrupts are off
+ */
+       .text
+       .balign 4
+       .global ___TBIPoll
+       .type   ___TBIPoll,function
+___TBIPoll:
+       MOV     D1Re0,#0                /* Prepare to disable ints */
+       MOVT    A0.3,#HI(LINSYSEVENT_WR_ATOMIC_LOCK)
+       SWAP    D1Re0,TXMASKI           /* Really stop ints */
+       LOCK2                           /* Gain all locks */
+       SET     [A0.3+#UON],D1RtP       /* Stop shared memory access too */
+       DCACHE  [D1Ar1],A0.3            /* Flush Cache line */
+       GETD    D0Re0,[D1Ar1]           /* Get new state from memory or hit */
+       DCACHE  [D1Ar1],A0.3            /* Flush Cache line */
+       GETD    D0Re0,[D1Ar1]           /* Get current state */
+       TST     D0Re0,D0Ar2             /* Are we clear to send? */
+       ORZ     D0Re0,D0Re0,D0Ar2       /* Yes: So set bits and */
+       SETDZ   [D1Ar1],D0Re0           /*      transmit new state */
+       SET     [A0.3+#UOFF],D1RtP      /* Allow shared memory access */
+       LOCK0                           /* Release all locks */
+       MOV     TXMASKI,D1Re0           /* Allow ints */
+$LPollEnd:
+       XORNZ   D0Re0,D0Re0,D0Re0       /* No: Generate zero result */
+       MOV     PC,D1RtP                /* Return (NZ indicates failure) */
+       .size   ___TBIPoll,.-___TBIPoll
+
+/*
+ * End of tbicore.S
+ */
diff --git a/arch/metag/tbx/tbictx.S b/arch/metag/tbx/tbictx.S
new file mode 100644 (file)
index 0000000..19af983
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * tbictx.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Explicit state save and restore routines forming part of the thread binary
+ * interface for META processors
+ */
+
+       .file   "tbictx.S"
+#include <asm/metag_regs.h>
+#include <asm/tbx.h>
+
+#ifdef METAC_1_0
+/* Ax.4 is NOT saved in XAX3 */
+#define A0_4
+#else
+/* Ax.4 is saved in XAX4 */
+#define A0_4 A0.4,
+#endif
+
+
+/* Size of the TBICTX structure */
+#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
+
+/*
+ * TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask )
+ */
+       .text
+       .balign 4
+       .global ___TBINestInts
+       .type   ___TBINestInts,function
+___TBINestInts:
+       XOR     D0Ar4,D0Ar4,#-1                 /* D0Ar4 = ~TrigBit */
+       AND     D0Ar4,D0Ar4,#0xFFFF             /* D0Ar4 &= 0xFFFF */
+       MOV     D0Ar6,TXMASKI                   /* BGNDHALT currently enabled? */
+       TSTT    D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XCBF_BIT
+       AND     D0Ar4,D0Ar2,D0Ar4               /* D0Ar4 = Ints to allow */
+       XOR     D0Ar2,D0Ar2,D0Ar4               /* Less Ints in TrigMask */
+       BNZ     ___TBINestInts2                 /* Jump if ctx save required! */
+       TSTT    D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT  /* Is catch state dirty? */
+       OR      D0Ar4,D0Ar4,D0Ar6               /* Or in TXMASKI BGNDHALT if set */
+       TSTNZ   D0Ar4,D0Ar4                     /* Yes: AND triggers enabled */
+       MOV     D0Re0,D0Ar2                     /* Update State argument */
+       MOV     D1Re0,D1Ar1                     /*  with less Ints in TrigMask */
+       MOVZ    TXMASKI,D0Ar4                   /* Early return: Enable Ints */
+       MOVZ    PC,D1RtP                        /* Early return */
+       .size   ___TBINestInts,.-___TBINestInts
+/*
+ * Drop thru into sub-function-
+ */
+       .global ___TBINestInts2
+       .type   ___TBINestInts2,function
+___TBINestInts2:
+       MOV     D0FrT,A0FrP                     /* Full entry sequence so we */
+       ADD     A0FrP,A0StP,#0                  /*     can make sub-calls */
+       MSETL   [A0StP],D0FrT,D0.5,D0.6         /*     and preserve our result */
+       ORT     D0Ar2,D0Ar2,#TBICTX_XCBF_BIT    /* Add in XCBF save request */
+       MOV     D0.5,D0Ar2                      /* Save State in DX.5 */
+       MOV     D1.5,D1Ar1
+       OR      D0.6,D0Ar4,D0Ar6                /* Save TrigMask in D0.6 */
+       MOVT    D1RtP,#HI(___TBICtxSave)        /* Save catch buffer */
+       CALL    D1RtP,#LO(___TBICtxSave)
+       MOV     TXMASKI,D0.6                    /* Allow Ints */
+       MOV     D0Re0,D0.5                      /* Return State */
+       MOV     D1Re0,D1.5
+       MGETL   D0FrT,D0.5,D0.6,[A0FrP]         /* Full exit sequence */
+       SUB     A0StP,A0FrP,#(8*3)
+       MOV     A0FrP,D0FrT
+       MOV     PC,D1RtP
+       .size   ___TBINestInts2,.-___TBINestInts2
+
+/*
+ * void *__TBICtxSave( TBIRES State, void *pExt )
+ *
+ *       D0Ar2 contains TBICTX_*_BIT values that control what
+ *          extended data is to be saved beyond the end of D1Ar1.
+ *       These bits must be ored into the SaveMask of this structure.
+ *
+ *       Virtually all possible scratch registers are used.
+ *
+ *       The D1Ar1 parameter is only used as the basis for saving
+ *       CBUF state.
+ */
+/*
+ *       If TBICTX_XEXT_BIT is specified in State. then State.pCtx->Ext is
+ *       utilised to save the base address of the context save area and
+ *       the extended states saved. The XEXT flag then indicates that the
+ *       original state of the A0.2 and A1.2 registers from TBICTX.Ext.AX2
+ *       are stored as the first part of the extended state structure.
+ */
+       .balign 4
+       .global ___TBICtxSave
+       .type   ___TBICtxSave,function
+___TBICtxSave:
+       GETD    D0Re0,[D1Ar1+#TBICTX_SaveMask-2]        /* Get SaveMask */
+       TSTT    D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
+                                               /* Just XCBF to save? */
+       MOV     A0.2,D1Ar3                      /* Save pointer into A0.2 */
+       MOV     A1.2,D1RtP                      /* Free off D0FrT:D1RtP pair */
+       BZ      $LCtxSaveCBUF                   /* Yes: Only XCBF may be saved */
+       TSTT    D0Ar2,#TBICTX_XEXT_BIT          /* Extended base-state model? */
+       BZ      $LCtxSaveXDX8
+       GETL    D0Ar6,D1Ar5,[D1Ar1+#TBICTX_Ext_AX2]     /* Get A0.2, A1.2 state */
+       MOV     D0Ar4,D0Ar2                     /* Extract Ctx.SaveFlags value */
+       ANDMT   D0Ar4,D0Ar4,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
+       SETD    [D1Ar1+#TBICTX_Ext_Ctx_pExt],A0.2
+       SETD    [D1Ar1+#TBICTX_Ext_Ctx_SaveMask-2],D0Ar4
+       SETL    [A0.2++],D0Ar6,D1Ar5            /* Save A0.2, A1.2 state */
+$LCtxSaveXDX8:
+       TSTT    D0Ar2,#TBICTX_XDX8_BIT          /* Save extended DX regs? */
+       BZ      $LCtxSaveXAXX
+/*
+ * Save 8 extra DX registers
+ */
+       MSETL   [A0.2],D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15
+$LCtxSaveXAXX:
+       TSTT    D0Ar2,#TBICTX_XAXX_BIT          /* Save extended AX regs? */
+       SWAP    D0Re0,A0.2                      /* pDst into D0Re0 */
+       BZ      $LCtxSaveXHL2
+/*
+ * Save 4 extra AX registers
+ */
+       MSETL   [D0Re0], A0_4 A0.5,A0.6,A0.7    /* Save 8*3 bytes */
+$LCtxSaveXHL2:
+       TSTT    D0Ar2,#TBICTX_XHL2_BIT          /* Save hardware-loop regs? */
+       SWAP    D0Re0,A0.2                      /* pDst back into A0.2 */
+       MOV     D0Ar6,TXL1START
+       MOV     D1Ar5,TXL2START
+       BZ      $LCtxSaveXTDP
+/*
+ * Save hardware loop registers
+ */
+       SETL    [A0.2++],D0Ar6,D1Ar5            /* Save 8*1 bytes */
+       MOV     D0Ar6,TXL1END
+       MOV     D1Ar5,TXL2END
+       MOV     D0FrT,TXL1COUNT
+       MOV     D1RtP,TXL2COUNT
+       MSETL   [A0.2],D0Ar6,D0FrT              /* Save 8*2 bytes */
+/*
+ * Clear loop counters to disable any current loops
+ */
+       XOR     TXL1COUNT,D0FrT,D0FrT
+       XOR     TXL2COUNT,D1RtP,D1RtP
+$LCtxSaveXTDP:
+       TSTT    D0Ar2,#TBICTX_XTDP_BIT          /* Save per-thread DSP regs? */
+       BZ      $LCtxSaveCBUF
+/*
+ * Save per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero)
+ */
+#ifndef CTX_NO_DSP
+D      SETL    [A0.2++],AC0.0,AC1.0            /* Save ACx.0 lower 32-bits */
+DH     SETL    [A0.2++],AC0.0,AC1.0            /* Save ACx.0 upper 32-bits */
+D      SETL    [A0.2++],D0AR.0,D1AR.0          /* Save DSP RAM registers */
+D      SETL    [A0.2++],D0AR.1,D1AR.1
+D      SETL    [A0.2++],D0AW.0,D1AW.0
+D      SETL    [A0.2++],D0AW.1,D1AW.1
+D      SETL    [A0.2++],D0BR.0,D1BR.0
+D      SETL    [A0.2++],D0BR.1,D1BR.1
+D      SETL    [A0.2++],D0BW.0,D1BW.0
+D      SETL    [A0.2++],D0BW.1,D1BW.1
+D      SETL    [A0.2++],D0ARI.0,D1ARI.0
+D      SETL    [A0.2++],D0ARI.1,D1ARI.1
+D      SETL    [A0.2++],D0AWI.0,D1AWI.0
+D      SETL    [A0.2++],D0AWI.1,D1AWI.1
+D      SETL    [A0.2++],D0BRI.0,D1BRI.0
+D      SETL    [A0.2++],D0BRI.1,D1BRI.1
+D      SETL    [A0.2++],D0BWI.0,D1BWI.0
+D      SETL    [A0.2++],D0BWI.1,D1BWI.1
+D      SETD    [A0.2++],T0
+D      SETD    [A0.2++],T1
+D      SETD    [A0.2++],T2
+D      SETD    [A0.2++],T3
+D      SETD    [A0.2++],T4
+D      SETD    [A0.2++],T5
+D      SETD    [A0.2++],T6
+D      SETD    [A0.2++],T7
+D      SETD    [A0.2++],T8
+D      SETD    [A0.2++],T9
+D      SETD    [A0.2++],TA
+D      SETD    [A0.2++],TB
+D      SETD    [A0.2++],TC
+D      SETD    [A0.2++],TD
+D      SETD    [A0.2++],TE
+D      SETD    [A0.2++],TF
+#else
+       ADD     A0.2,A0.2,#(8*18+4*16)
+#endif
+       MOV     D0Ar6,TXMRSIZE
+       MOV     D1Ar5,TXDRSIZE
+       SETL    [A0.2++],D0Ar6,D1Ar5            /* Save 8*1 bytes */
+       
+$LCtxSaveCBUF:
+#ifdef TBI_1_3
+       MOV     D0Ar4,D0Re0                     /* Copy Ctx Flags */
+       ANDT    D0Ar4,D0Ar4,#TBICTX_XCBF_BIT    /*   mask XCBF if already set */
+       XOR     D0Ar4,D0Ar4,#-1
+       AND     D0Ar2,D0Ar2,D0Ar4               /*   remove XCBF if already set */
+#endif
+       TSTT    D0Ar2,#TBICTX_XCBF_BIT          /* Want to save CBUF? */
+       ANDT    D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
+       OR      D0Ar2,D0Ar2,D0Re0               /* Generate new SaveMask */
+       SETD    [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in bits saved to TBICTX */
+       MOV     D0Re0,A0.2                      /* Return end of save area */
+       MOV     D0Ar4,TXDIVTIME                 /* Get TXDIVTIME */
+       MOVZ    PC,A1.2                         /* No: Early return */
+       TSTT    D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT  /* Need to save CBUF? */
+       MOVZ    PC,A1.2                         /* No: Early return */
+       ORT     D0Ar2,D0Ar2,#TBICTX_XCBF_BIT
+       SETD    [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in XCBF bit to TBICTX */
+       ADD     A0.2,D1Ar1,#TBICTX_BYTES        /* Dump CBUF state after TBICTX */
+/*
+ * Save CBUF
+ */
+       SETD    [A0.2+# 0],TXCATCH0             /* Restore TXCATCHn */
+       SETD    [A0.2+# 4],TXCATCH1
+       TSTT    D0Ar2,#TBICTX_CBRP_BIT          /* ... RDDIRTY was/is set */
+       SETD    [A0.2+# 8],TXCATCH2
+       SETD    [A0.2+#12],TXCATCH3
+       BZ      $LCtxSaveComplete
+       SETL    [A0.2+#(2*8)],RD                /* Save read pipeline */
+       SETL    [A0.2+#(3*8)],RD                /* Save read pipeline */
+       SETL    [A0.2+#(4*8)],RD                /* Save read pipeline */
+       SETL    [A0.2+#(5*8)],RD                /* Save read pipeline */
+       SETL    [A0.2+#(6*8)],RD                /* Save read pipeline */
+       SETL    [A0.2+#(7*8)],RD                /* Save read pipeline */
+       AND     TXDIVTIME,D0Ar4,#TXDIVTIME_DIV_BITS /* Clear RPDIRTY */
+$LCtxSaveComplete:
+       MOV     PC,A1.2                         /* Return */
+       .size   ___TBICtxSave,.-___TBICtxSave
+
+/*
+ * void *__TBICtxRestore( TBIRES State, void *pExt )
+ *
+ *                 D0Ar2 contains TBICTX_*_BIT values that control what
+ *                    extended data is to be recovered from D1Ar3 (pExt).
+ *
+ *                 Virtually all possible scratch registers are used.
+ */
+/*
+ *     If TBICTX_XEXT_BIT is specified in State. Then the saved state of
+ *       the orginal A0.2 and A1.2 is restored from pExt and the XEXT
+ *       related flags are removed from State.pCtx->SaveMask.
+ *
+ */
+       .balign 4
+       .global ___TBICtxRestore
+       .type   ___TBICtxRestore,function
+___TBICtxRestore:
+       GETD    D0Ar6,[D1Ar1+#TBICTX_CurrMODE]  /* Get TXMODE Value */
+       ANDST   D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT
+       MOV     D1Re0,D0Ar2                     /* Keep flags in D1Re0 */
+       MOV     D0Re0,D1Ar3                     /* D1Ar3 is default result */
+       MOVZ    PC,D1RtP                        /* Early return, nothing to do */
+       ANDT    D0Ar6,D0Ar6,#0xE000             /* Top bits of TXMODE required */
+       MOV     A0.3,D0Ar6                      /* Save TXMODE for later */
+       TSTT    D1Re0,#TBICTX_XEXT_BIT          /* Check for XEXT bit */
+       BZ      $LCtxRestXDX8
+       GETD    D0Ar4,[D1Ar1+#TBICTX_SaveMask-2]/* Get current SaveMask */
+       GETL    D0Ar6,D1Ar5,[D0Re0++]           /* Restore A0.2, A1.2 state */
+       ANDMT   D0Ar4,D0Ar4,#(0xFFFF-(TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT))
+       SETD    [D1Ar1+#TBICTX_SaveMask-2],D0Ar4/* New SaveMask */
+#ifdef METAC_1_0
+       SETD    [D1Ar1+#TBICTX_Ext_AX2_U0],D0Ar6
+       MOV     D0Ar6,D1Ar1
+       SETD    [D0Ar6+#TBICTX_Ext_AX2_U1],D1Ar5
+#else
+       SETL    [D1Ar1+#TBICTX_Ext_AX2],D0Ar6,D1Ar5
+#endif
+$LCtxRestXDX8:
+       TSTT    D1Re0,#TBICTX_XDX8_BIT          /* Get extended DX regs? */
+       MOV     A1.2,D1RtP                      /* Free off D1RtP register */
+       BZ      $LCtxRestXAXX
+/*
+ * Restore 8 extra DX registers
+ */
+       MGETL   D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15,[D0Re0]
+$LCtxRestXAXX:
+       TSTT    D1Re0,#TBICTX_XAXX_BIT          /* Get extended AX regs? */
+       BZ      $LCtxRestXHL2
+/*
+ * Restore 3 extra AX registers
+ */
+       MGETL   A0_4 A0.5,A0.6,A0.7,[D0Re0]     /* Get 8*3 bytes */
+$LCtxRestXHL2:
+       TSTT    D1Re0,#TBICTX_XHL2_BIT          /* Get hardware-loop regs? */
+       BZ      $LCtxRestXTDP
+/*
+ * Get hardware loop registers
+ */
+       MGETL   D0Ar6,D0Ar4,D0Ar2,[D0Re0]       /* Get 8*3 bytes */
+       MOV     TXL1START,D0Ar6
+       MOV     TXL2START,D1Ar5
+       MOV     TXL1END,D0Ar4
+       MOV     TXL2END,D1Ar3
+       MOV     TXL1COUNT,D0Ar2
+       MOV     TXL2COUNT,D1Ar1
+$LCtxRestXTDP:
+       TSTT    D1Re0,#TBICTX_XTDP_BIT          /* Get per-thread DSP regs? */
+       MOVZ    PC,A1.2                         /* No: Early return */
+/*
+ * Get per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero)
+ */
+       MOV     A0.2,D0Re0
+       GETL    D0Ar6,D1Ar5,[D0Re0++#((16*4)+(18*8))]
+#ifndef CTX_NO_DSP
+D      GETL    AC0.0,AC1.0,[A0.2++]            /* Restore ACx.0 lower 32-bits */
+DH     GETL    AC0.0,AC1.0,[A0.2++]            /* Restore ACx.0 upper 32-bits */
+#else
+       ADD     A0.2,A0.2,#(2*8)
+#endif
+       ADD     D0Re0,D0Re0,#(2*4)
+       MOV     TXMODE,A0.3                     /* Some TXMODE bits needed */
+       MOV     TXMRSIZE,D0Ar6
+       MOV     TXDRSIZE,D1Ar5
+#ifndef CTX_NO_DSP
+D      GETL    D0AR.0,D1AR.0,[A0.2++]          /* Restore DSP RAM registers */
+D      GETL    D0AR.1,D1AR.1,[A0.2++]
+D      GETL    D0AW.0,D1AW.0,[A0.2++]
+D      GETL    D0AW.1,D1AW.1,[A0.2++]
+D      GETL    D0BR.0,D1BR.0,[A0.2++]
+D      GETL    D0BR.1,D1BR.1,[A0.2++]
+D      GETL    D0BW.0,D1BW.0,[A0.2++]
+D      GETL    D0BW.1,D1BW.1,[A0.2++]
+#else
+       ADD     A0.2,A0.2,#(8*8)
+#endif
+       MOV     TXMODE,#0                       /* Restore TXMODE */
+#ifndef CTX_NO_DSP
+D      GETL    D0ARI.0,D1ARI.0,[A0.2++]
+D      GETL    D0ARI.1,D1ARI.1,[A0.2++]
+D      GETL    D0AWI.0,D1AWI.0,[A0.2++]
+D      GETL    D0AWI.1,D1AWI.1,[A0.2++]
+D      GETL    D0BRI.0,D1BRI.0,[A0.2++]
+D      GETL    D0BRI.1,D1BRI.1,[A0.2++]
+D      GETL    D0BWI.0,D1BWI.0,[A0.2++]
+D      GETL    D0BWI.1,D1BWI.1,[A0.2++]
+D      GETD    T0,[A0.2++]
+D      GETD    T1,[A0.2++]
+D      GETD    T2,[A0.2++]
+D      GETD    T3,[A0.2++]
+D      GETD    T4,[A0.2++]
+D      GETD    T5,[A0.2++]
+D      GETD    T6,[A0.2++]
+D      GETD    T7,[A0.2++]
+D      GETD    T8,[A0.2++]
+D      GETD    T9,[A0.2++]
+D      GETD    TA,[A0.2++]
+D      GETD    TB,[A0.2++]
+D      GETD    TC,[A0.2++]
+D      GETD    TD,[A0.2++]
+D      GETD    TE,[A0.2++]
+D      GETD    TF,[A0.2++]
+#else
+       ADD     A0.2,A0.2,#(8*8+4*16)
+#endif
+       MOV     PC,A1.2                         /* Return */
+       .size   ___TBICtxRestore,.-___TBICtxRestore
+
+/*
+ * End of tbictx.S
+ */
diff --git a/arch/metag/tbx/tbictxfpu.S b/arch/metag/tbx/tbictxfpu.S
new file mode 100644 (file)
index 0000000..e773bea
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * tbictxfpu.S
+ *
+ * Copyright (C) 2009, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Explicit state save and restore routines forming part of the thread binary
+ * interface for META processors
+ */
+
+       .file   "tbifpuctx.S"
+
+#include <asm/metag_regs.h>
+#include <asm/tbx.h>
+
+#ifdef TBI_1_4
+/*
+ * void *__TBICtxFPUSave( TBIRES State, void *pExt )
+ *
+ *                 D0Ar2 contains TBICTX_*_BIT values that control what
+ *                    extended data is to be saved.
+ *                 These bits must be ored into the SaveMask of this structure.
+ *
+ *                 Virtually all possible scratch registers are used.
+ */
+       .text
+       .balign 4
+       .global ___TBICtxFPUSave
+       .type   ___TBICtxFPUSave,function
+___TBICtxFPUSave:
+
+       /* D1Ar1:D0Ar2 - State
+        * D1Ar3       - pExt
+        * D0Ar4       - Value of METAC_CORE_ID
+        * D1Ar5       - Scratch
+        * D0Ar6       - Scratch
+        */
+       
+       /* If the FPAC bit isnt set then there is nothing to do */
+       TSTT    D0Ar2,#TBICTX_FPAC_BIT
+       MOVZ    PC, D1RtP
+
+       /* Obtain the Core config */
+       MOVT    D0Ar4,        #HI(METAC_CORE_ID)
+       ADD     D0Ar4, D0Ar4, #LO(METAC_CORE_ID)
+       GETD    D0Ar4, [D0Ar4]
+
+       /* Detect FX.8 - FX.15 and add to core config */
+       MOV     D0Ar6, TXENABLE
+       AND     D0Ar6, D0Ar6, #(TXENABLE_CLASSALT_FPUR8 << TXENABLE_CLASS_S)
+       AND     D0Ar4, D0Ar4, #LO(0x0000FFFF)
+       ORT     D0Ar4, D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT)
+       XOR     D0Ar4, D0Ar4, D0Ar6
+
+       /* Save the relevant bits to the buffer */
+       SETD    [D1Ar3++], D0Ar4
+
+       /* Save the relevant bits of TXDEFR (Assumes TXDEFR is coherent) ... */
+       MOV     D0Ar6, TXDEFR
+       LSR     D0Re0, D0Ar6, #8
+       AND     D0Re0, D0Re0, #LO(TXDEFR_FPE_FE_BITS>>8)
+       AND     D0Ar6, D0Ar6, #LO(TXDEFR_FPE_ICTRL_BITS)
+       OR      D0Re0, D0Re0, D0Ar6
+
+       /* ... along with relevant bits of TXMODE to buffer */
+       MOV     D0Ar6, TXMODE
+       ANDT    D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS)
+       ORT     D0Ar6, D0Ar6, #HI(TXMODE_FPURMODEWRITE_BIT)
+       OR      D0Ar6, D0Ar6, D0Re0
+       SETD    [D1Ar3++], D0Ar6
+
+       GETD    D0Ar6,[D1Ar1+#TBICTX_SaveMask-2] /* Get the current SaveMask */
+       /* D0Ar6       - pCtx->SaveMask */
+
+       TSTT    D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers
+                                                   * to avoid stalls
+                                                   */
+       /* Save the standard FPU registers */
+F      MSETL   [D1Ar3++], FX.0, FX.2, FX.4, FX.6
+
+       /* Save the extended FPU registers if they are present */
+       BZ      $Lskip_save_fx8_fx16
+F      MSETL   [D1Ar3++], FX.8, FX.10, FX.12, FX.14
+$Lskip_save_fx8_fx16:
+
+       /* Save the FPU Accumulator if it is present */
+       TST     D0Ar4, #METAC_COREID_NOFPACC_BIT
+       BNZ     $Lskip_save_fpacc
+F      SETL    [D1Ar3++], ACF.0
+F      SETL    [D1Ar3++], ACF.1
+F      SETL    [D1Ar3++], ACF.2
+$Lskip_save_fpacc:
+
+       /* Update pCtx->SaveMask */
+       ANDT    D0Ar2, D0Ar2, #TBICTX_FPAC_BIT
+       OR      D0Ar6, D0Ar6, D0Ar2
+       SETD    [D1Ar1+#TBICTX_SaveMask-2],D0Ar6/* Add in XCBF bit to TBICTX */
+
+       MOV     D0Re0, D1Ar3 /* Return end of save area */
+       MOV     PC, D1RtP
+
+       .size   ___TBICtxFPUSave,.-___TBICtxFPUSave
+
+/*
+ * void *__TBICtxFPURestore( TBIRES State, void *pExt )
+ *
+ *                 D0Ar2 contains TBICTX_*_BIT values that control what
+ *                    extended data is to be recovered from D1Ar3 (pExt).
+ *
+ *                 Virtually all possible scratch registers are used.
+ */
+/*
+ * If TBICTX_XEXT_BIT is specified in State. Then the saved state of
+ *       the orginal A0.2 and A1.2 is restored from pExt and the XEXT
+ *       related flags are removed from State.pCtx->SaveMask.
+ *
+ */
+       .balign 4
+       .global ___TBICtxFPURestore
+       .type   ___TBICtxFPURestore,function
+___TBICtxFPURestore:
+
+       /* D1Ar1:D0Ar2 - State
+        * D1Ar3       - pExt
+        * D0Ar4       - Value of METAC_CORE_ID
+        * D1Ar5       - Scratch
+        * D0Ar6       - Scratch
+        * D1Re0       - Scratch
+        */
+
+       /* If the FPAC bit isnt set then there is nothing to do */
+       TSTT    D0Ar2,#TBICTX_FPAC_BIT
+       MOVZ    PC, D1RtP
+
+       /* Obtain the relevant bits of the Core config */
+       GETD    D0Ar4, [D1Ar3++]
+
+       /* Restore FPU related parts of TXDEFR. Assumes TXDEFR is coherent */
+       GETD    D1Ar5, [D1Ar3++]
+       MOV     D0Ar6, D1Ar5
+       LSL     D1Re0, D1Ar5, #8
+       ANDT    D1Re0, D1Re0, #HI(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)
+       AND     D1Ar5, D1Ar5, #LO(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)
+       OR      D1Re0, D1Re0, D1Ar5
+
+       MOV     D1Ar5, TXDEFR
+       ANDMT   D1Ar5, D1Ar5, #HI(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS))
+       ANDMB   D1Ar5, D1Ar5, #LO(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS))
+       OR      D1Re0, D1Re0, D1Ar5
+       MOV     TXDEFR, D1Re0
+
+       /* Restore relevant bits of TXMODE */
+       MOV     D1Ar5, TXMODE
+       ANDMT   D1Ar5, D1Ar5, #HI(~TXMODE_FPURMODE_BITS)
+       ANDT    D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS|TXMODE_FPURMODEWRITE_BIT)
+       OR      D0Ar6, D0Ar6, D1Ar5
+       MOV     TXMODE, D0Ar6
+
+       TSTT    D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers
+                                                   * to avoid stalls
+                                                   */
+       /* Save the standard FPU registers */
+F      MGETL   FX.0, FX.2, FX.4, FX.6, [D1Ar3++]
+
+       /* Save the extended FPU registers if they are present */
+       BZ      $Lskip_restore_fx8_fx16
+F      MGETL   FX.8, FX.10, FX.12, FX.14, [D1Ar3++]
+$Lskip_restore_fx8_fx16:
+
+       /* Save the FPU Accumulator if it is present */
+       TST     D0Ar4, #METAC_COREID_NOFPACC_BIT
+       BNZ     $Lskip_restore_fpacc
+F      GETL    ACF.0, [D1Ar3++]
+F      GETL    ACF.1, [D1Ar3++]
+F      GETL    ACF.2, [D1Ar3++]
+$Lskip_restore_fpacc:
+
+       MOV     D0Re0, D1Ar3 /* Return end of save area */
+       MOV     PC, D1RtP
+
+       .size   ___TBICtxFPURestore,.-___TBICtxFPURestore
+
+#endif /* TBI_1_4 */
+
+/*
+ * End of tbictx.S
+ */
diff --git a/arch/metag/tbx/tbidefr.S b/arch/metag/tbx/tbidefr.S
new file mode 100644 (file)
index 0000000..3eb165e
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * tbidefr.S
+ *
+ * Copyright (C) 2009, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Routing deferred exceptions
+ */
+
+#include <asm/metag_regs.h>
+#include <asm/tbx.h>
+
+       .text
+       .balign 4
+       .global ___TBIHandleDFR
+       .type   ___TBIHandleDFR,function
+/* D1Ar1:D0Ar2 -- State
+ * D0Ar3       -- SigNum
+ * D0Ar4       -- Triggers
+ * D1Ar5       -- InstOrSWSId
+ * D0Ar6       -- pTBI (volatile)
+ */
+___TBIHandleDFR:
+#ifdef META_BUG_MBN100212
+       MSETL   [A0StP++], D0FrT, D0.5
+
+       /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved
+        * D0Ar4       -- The deferred exceptions
+        * D1Ar3       -- As per D0Ar4 but just the trigger bits
+        * D0.5        -- The bgnd deferred exceptions
+        * D1.5        -- TXDEFR with bgnd re-added
+        */
+
+       /* - Collect the pending deferred exceptions using TXSTAT,
+        *   (ack's the bgnd exceptions as a side-effect)
+        * - Manually collect remaining (interrupt) deferred exceptions
+        *   using TXDEFR
+        * - Replace the triggers (from TXSTATI) with the int deferred
+        *   exceptions DEFR ..., TXSTATI would have returned if it was valid
+        *   from bgnd code
+        * - Reconstruct TXDEFR by or'ing bgnd deferred exceptions (except
+        *   the DEFER bit) and the int deferred exceptions. This will be
+        *   restored later
+        */
+       DEFR    D0.5,  TXSTAT
+       MOV     D1.5,  TXDEFR
+       ANDT    D0.5,  D0.5, #HI(0xFFFF0000)
+       MOV     D1Ar3, D1.5
+       ANDT    D1Ar3, D1Ar3, #HI(0xFFFF0000)
+       OR      D0Ar4, D1Ar3, #TXSTAT_DEFER_BIT
+       OR      D1.5, D1.5, D0.5
+
+       /* Mask off anything unrelated to the deferred exception triggers */
+       ANDT    D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS)
+
+       /* Can assume that at least one exception happened since this
+        * handler wouldnt have been called otherwise.
+        * 
+        * Replace the signal number and at the same time, prepare
+        * the mask to acknowledge the exception
+        *
+        * D1Re0 -- The bits to acknowledge
+        * D1Ar3 -- The signal number
+        * D1RtP -- Scratch to deal with non-conditional insns
+        */
+       MOVT    D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT)
+       MOV     D1RtP, #TXSTAT_FPE_INVALID_S
+       FFB     D1Ar3, D1Ar3
+       CMP     D1Ar3, #TXSTAT_FPE_INVALID_S
+       MOVLE   D1Ar3, D1RtP /* Collapse FPE triggers to a single signal */
+       MOV     D1RtP, #1
+       LSLGT   D1Re0, D1RtP, D1Ar3
+
+       /* Get the handler using the signal number
+        *
+        * D1Ar3 -- The signal number
+        * D0Re0 -- Offset into TBI struct containing handler address
+        * D1Re0 -- Mask of triggers to keep
+        * D1RtP -- Address of handler
+        */
+       SUB     D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE)
+       LSL     D0Re0, D1Ar3, #2
+       XOR     D1Re0, D1Re0, #-1   /* Prepare mask for acknowledge (avoids stall) */
+       ADD     D0Re0,D0Re0,#TBI_fnSigs
+       GETD    D1RtP, [D0Ar6+D0Re0]
+
+       /* Acknowledge triggers */
+       AND     D1.5, D1.5, D1Re0
+
+       /* Restore remaining exceptions
+        * Do this here in case the handler enables nested interrupts
+        *
+        * D1.5 -- TXDEFR with this exception ack'd
+        */
+       MOV     TXDEFR, D1.5
+
+       /* Call the handler */
+       SWAP    D1RtP, PC
+
+       GETL    D0.5,  D1.5,  [--A0StP]
+       GETL    D0FrT, D1RtP, [--A0StP]
+       MOV     PC,D1RtP
+#else  /* META_BUG_MBN100212 */
+
+       /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved
+        * D0Ar4       -- The deferred exceptions
+        * D1Ar3       -- As per D0Ar4 but just the trigger bits
+        */
+
+       /* - Collect the pending deferred exceptions using TXSTAT,
+        *   (ack's the interrupt exceptions as a side-effect)
+        */
+       DEFR    D0Ar4, TXSTATI
+
+       /* Mask off anything unrelated to the deferred exception triggers */
+       MOV     D1Ar3, D0Ar4
+       ANDT    D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS)
+
+       /* Can assume that at least one exception happened since this
+        * handler wouldnt have been called otherwise.
+        * 
+        * Replace the signal number and at the same time, prepare
+        * the mask to acknowledge the exception
+        *
+        * The unusual code for 1<<D1Ar3 may need explanation.
+        * Normally this would be done using 'MOV rs,#1' and 'LSL rd,rs,D1Ar3'
+        * but only D1Re0 is available in D1 and no crossunit insns are available
+        * Even worse, there is no conditional 'MOV r,#uimm8'.
+        * Since the CMP proves that D1Ar3 >= 20, we can reuse the bottom 12-bits
+        * of D1Re0 (using 'ORGT r,#1') in the knowledge that the top 20-bits will
+        * be discarded without affecting the result.
+        *
+        * D1Re0 -- The bits to acknowledge
+        * D1Ar3 -- The signal number
+        */
+       MOVT    D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT)
+       MOV     D0Re0, #TXSTAT_FPE_INVALID_S
+       FFB     D1Ar3, D1Ar3
+       CMP     D1Ar3, #TXSTAT_FPE_INVALID_S
+       MOVLE   D1Ar3, D0Re0 /* Collapse FPE triggers to a single signal */
+       ORGT    D1Re0, D1Re0, #1
+       LSLGT   D1Re0, D1Re0, D1Ar3
+
+       SUB     D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE)
+
+       /* Acknowledge triggers and restore remaining exceptions
+        * Do this here in case the handler enables nested interrupts
+        *
+        * (x | y) ^ y == x & ~y. It avoids the restrictive XOR ...,#-1 insn
+        * and is the same length
+        */
+       MOV     D0Re0, TXDEFR
+       OR      D0Re0, D0Re0, D1Re0
+       XOR     TXDEFR, D0Re0, D1Re0
+
+       /* Get the handler using the signal number
+        *
+        * D1Ar3 -- The signal number
+        * D0Re0 -- Address of handler
+        */
+       LSL     D0Re0, D1Ar3, #2
+       ADD     D0Re0,D0Re0,#TBI_fnSigs
+       GETD    D0Re0, [D0Ar6+D0Re0]
+
+       /* Tailcall the handler */
+       MOV     PC,D0Re0
+
+#endif /* META_BUG_MBN100212 */
+       .size   ___TBIHandleDFR,.-___TBIHandleDFR
+/*
+ * End of tbidefr.S
+ */
diff --git a/arch/metag/tbx/tbidspram.S b/arch/metag/tbx/tbidspram.S
new file mode 100644 (file)
index 0000000..2f27c03
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * tbidspram.S
+ *
+ * Copyright (C) 2009, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Explicit state save and restore routines forming part of the thread binary
+ * interface for META processors
+ */
+
+       .file   "tbidspram.S"
+
+/* These aren't generally useful to a user so for now, they arent publically available */
+#define _TBIECH_DSPRAM_DUA_S    8
+#define _TBIECH_DSPRAM_DUA_BITS 0x7f00
+#define _TBIECH_DSPRAM_DUB_S    0
+#define _TBIECH_DSPRAM_DUB_BITS 0x007f
+
+/*
+ * void *__TBIDspramSaveA( short DspramSizes, void *pExt )
+ */
+       .text
+       .balign 4
+       .global ___TBIDspramSaveA
+       .type   ___TBIDspramSaveA,function
+___TBIDspramSaveA:
+
+       SETL    [A0StP++], D0.5, D1.5
+       MOV     A0.3, D0Ar2
+
+       /* D1Ar1 - Dspram Sizes
+        * A0.4  - Pointer to buffer
+        */
+
+       /* Save the specified amount of dspram DUA */
+DL     MOV     D0AR.0, #0
+       LSR     D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S
+       AND     D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S)
+       SUB     TXRPT, D1Ar1, #1
+$L1:
+DL     MOV     D0Re0, [D0AR.0++]
+DL     MOV     D0Ar6, [D0AR.0++]
+DL     MOV     D0Ar4, [D0AR.0++]
+DL     MOV     D0.5,  [D0AR.0++]
+       MSETL   [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5
+
+       BR      $L1
+
+       GETL    D0.5, D1.5, [--A0StP]
+       MOV     PC, D1RtP
+
+       .size   ___TBIDspramSaveA,.-___TBIDspramSaveA
+
+/*
+ * void *__TBIDspramSaveB( short DspramSizes, void *pExt )
+ */
+       .balign 4
+       .global ___TBIDspramSaveB
+       .type   ___TBIDspramSaveB,function
+___TBIDspramSaveB:
+
+       SETL    [A0StP++], D0.5, D1.5
+       MOV     A0.3, D0Ar2
+
+       /* D1Ar1 - Dspram Sizes
+        * A0.3  - Pointer to buffer
+        */
+
+       /* Save the specified amount of dspram DUA */
+DL     MOV     D0BR.0, #0
+       LSR     D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S
+       AND     D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S)
+       SUB     TXRPT, D1Ar1, #1
+$L2:
+DL     MOV     D0Re0, [D0BR.0++]
+DL     MOV     D0Ar6, [D0BR.0++]
+DL     MOV     D0Ar4, [D0BR.0++]
+DL     MOV     D0.5,  [D0BR.0++]
+       MSETL   [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5
+
+       BR      $L2
+
+       GETL    D0.5, D1.5, [--A0StP]
+       MOV     PC, D1RtP
+
+       .size   ___TBIDspramSaveB,.-___TBIDspramSaveB
+
+/*
+ * void *__TBIDspramRestoreA( short DspramSizes, void *pExt )
+ */
+       .balign 4
+       .global ___TBIDspramRestoreA
+       .type   ___TBIDspramRestoreA,function
+___TBIDspramRestoreA:
+
+       SETL    [A0StP++], D0.5, D1.5
+       MOV     A0.3, D0Ar2
+
+       /* D1Ar1 - Dspram Sizes
+        * A0.3 - Pointer to buffer
+        */
+
+       /* Restore the specified amount of dspram DUA */
+DL     MOV     D0AW.0, #0
+       LSR     D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S
+       AND     D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S)
+       SUB     TXRPT, D1Ar1, #1
+$L3:
+       MGETL   D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++]
+DL     MOV     [D0AW.0++], D0Re0
+DL     MOV     [D0AW.0++], D0Ar6
+DL     MOV     [D0AW.0++], D0Ar4
+DL     MOV     [D0AW.0++], D0.5
+
+       BR      $L3
+
+       GETL    D0.5, D1.5, [--A0StP]
+       MOV     PC, D1RtP
+
+       .size   ___TBIDspramRestoreA,.-___TBIDspramRestoreA
+
+/*
+ * void *__TBIDspramRestoreB( short DspramSizes, void *pExt )
+ */
+       .balign 4
+       .global ___TBIDspramRestoreB
+       .type   ___TBIDspramRestoreB,function
+___TBIDspramRestoreB:
+
+       SETL    [A0StP++], D0.5, D1.5
+       MOV     A0.3, D0Ar2
+
+       /* D1Ar1 - Dspram Sizes
+        * A0.3 - Pointer to buffer
+        */
+
+       /* Restore the specified amount of dspram DUA */
+DL     MOV     D0BW.0, #0
+       LSR     D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S
+       AND     D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S)
+       SUB     TXRPT, D1Ar1, #1
+$L4:
+       MGETL   D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++]
+DL     MOV     [D0BW.0++], D0Re0
+DL     MOV     [D0BW.0++], D0Ar6
+DL     MOV     [D0BW.0++], D0Ar4
+DL     MOV     [D0BW.0++], D0.5
+
+       BR      $L4
+
+       GETL    D0.5, D1.5, [--A0StP]
+       MOV     PC, D1RtP
+
+       .size   ___TBIDspramRestoreB,.-___TBIDspramRestoreB
+
+/*
+ * End of tbidspram.S
+ */
diff --git a/arch/metag/tbx/tbilogf.S b/arch/metag/tbx/tbilogf.S
new file mode 100644 (file)
index 0000000..4a34d80
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * tbilogf.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Defines __TBILogF trap code for debugging messages and __TBICont for debug
+ * assert to be implemented on.
+ */
+
+       .file   "tbilogf.S"
+
+/*
+ * Perform console printf using external debugger or host support
+ */
+       .text
+       .balign 4
+       .global ___TBILogF
+       .type   ___TBILogF,function
+___TBILogF:
+       MSETL   [A0StP],D0Ar6,D0Ar4,D0Ar2
+       SWITCH  #0xC10020
+       MOV     D0Re0,#0
+       SUB     A0StP,A0StP,#24
+       MOV     PC,D1RtP
+       .size   ___TBILogF,.-___TBILogF
+
+/*
+ * Perform wait for continue under control of the debugger
+ */
+       .text
+       .balign 4
+       .global ___TBICont
+       .type   ___TBICont,function
+___TBICont:
+       MOV     D0Ar6,#1
+       MSETL   [A0StP],D0Ar6,D0Ar4,D0Ar2
+       SWITCH  #0xC30006       /* Returns if we are to continue */
+       SUB     A0StP,A0StP,#(8*3)
+       MOV     PC,D1RtP        /* Return */
+       .size   ___TBICont,.-___TBICont
+
+/*
+ * End of tbilogf.S
+ */
diff --git a/arch/metag/tbx/tbipcx.S b/arch/metag/tbx/tbipcx.S
new file mode 100644 (file)
index 0000000..de0626f
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+ * tbipcx.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2009, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Asyncronous trigger handling including exceptions
+ */
+
+       .file   "tbipcx.S"
+#include <asm/metag_regs.h>
+#include <asm/tbx.h>
+
+/* BEGIN HACK */
+/* define these for now while doing inital conversion to GAS 
+   will fix properly later */
+
+/* Signal identifiers always have the TBID_SIGNAL_BIT set and contain the
+   following related bit-fields */
+#define TBID_SIGNUM_S       2
+
+/* END HACK */
+
+#ifdef METAC_1_0
+/* Ax.4 is saved in TBICTX */
+#define A0_4  ,A0.4
+#else
+/* Ax.4 is NOT saved in TBICTX */
+#define A0_4
+#endif
+
+/* Size of the TBICTX structure */
+#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
+
+#ifdef METAC_1_1
+#ifndef BOOTROM
+#ifndef SPECIAL_BUILD
+/* Jump straight into the boot ROM version of this code */
+#define CODE_USES_BOOTROM
+#endif
+#endif
+#endif
+
+/* Define space needed for CATCH buffer state in traditional units */
+#define CATCH_ENTRIES      5
+#define CATCH_ENTRY_BYTES 16
+
+#ifndef CODE_USES_BOOTROM
+#define A0GblIStP      A0.15  /* PTBICTX for current thread in PRIV system */
+#define A1GblIGbP      A1.15  /* Interrupt A1GbP value in PRIV system */
+#endif
+
+/*
+ * TBIRES __TBIASyncTrigger( TBIRES State )
+ */
+       .text
+       .balign 4
+       .global ___TBIASyncTrigger
+       .type   ___TBIASyncTrigger,function
+___TBIASyncTrigger:
+#ifdef CODE_USES_BOOTROM
+       MOVT    D0Re0,#HI(LINCORE_BASE)
+       JUMP    D0Re0,#0xA0
+#else
+       MOV     D0FrT,A0FrP                     /* Boing entry sequence */
+       ADD     A0FrP,A0StP,#0
+       SETL    [A0StP++],D0FrT,D1RtP
+       MOV     D0Re0,PCX                       /* Check for repeat call */
+       MOVT    D0FrT,#HI(___TBIBoingRTI+4)
+       ADD     D0FrT,D0FrT,#LO(___TBIBoingRTI+4)
+       CMP     D0Re0,D0FrT
+       BEQ     ___TBIBoingExit                 /* Already set up - come out */
+       ADD     D1Ar1,D1Ar1,#7                  /* PRIV system stack here */
+       MOV     A0.2,A0StP                      /*  else push context here */
+       MOVS    D0Re0,D0Ar2                     /* Return in user mode? */
+       ANDMB   D1Ar1,D1Ar1,#0xfff8             /*  align priv stack to 64-bit */
+       MOV     D1Re0,D1Ar1                     /*   and set result to arg */
+       MOVMI   A0.2,D1Ar1                      /*  use priv stack if PRIV set                   */
+/*
+ * Generate an initial TBICTX to return to our own current call context
+ */
+       MOVT    D1Ar5,#HI(___TBIBoingExit)      /* Go here to return */
+       ADD     D1Ar5,D1Ar5,#LO(___TBIBoingExit)
+       ADD     A0.3,A0.2,#TBICTX_DX            /* DX Save area */
+       ANDT    D0Ar2,D0Ar2,#TBICTX_PRIV_BIT    /* Extract PRIV bit */
+       MOVT    D0Ar6,#TBICTX_SOFT_BIT          /* Only soft thread state */
+       ADD     D0Ar6,D0Ar6,D0Ar2               /* Add in PRIV bit if requested */
+       SETL    [A0.2],D0Ar6,D1Ar5              /* Push header fields */
+       ADD     D0FrT,A0.2,#TBICTX_AX           /* Address AX save area */
+       MSETL   [A0.3],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+       MOV     D0Ar6,#0
+       MOV     D1Ar5,#0
+       SETL    [A0.3++],D0Ar6,D1Ar5            /* Zero CT register states */
+       SETL    [A0.3++],D0Ar6,D1Ar5
+       MSETL   [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */
+       MOV     A0FrP,A0.2                      /* Restore me! */
+       B       ___TBIResume
+       .size   ___TBIASyncTrigger,.-___TBIASyncTrigger
+
+/*
+ * Optimised return to handler for META Core
+ */
+___TBIBoingRTH:
+       RTH                                     /* Go to background level */
+       MOVT    A0.2,     #HI($Lpcx_target)
+       ADD     A0.2,A0.2,#LO($Lpcx_target)
+       MOV     PCX,A0.2                        /* Setup PCX for interrupts */
+       MOV     PC,D1Re0                        /* Jump to handler */
+/*
+ * This is where the code below needs to jump to wait for outermost interrupt
+ * event in a non-privilege mode system (single shared interrupt stack).
+ */
+___TBIBoingPCX:
+       MGETL   A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */
+       MOV     TXSTATUS,D0Re0                  /* Restore flags */
+       GETL    D0Re0,D1Re0,[D1Re0+#TBICTX_DX-TBICTX_BYTES]
+___TBIBoingRTI:
+       RTI                                     /* Wait for interrupt */
+$Lpcx_target:
+/*
+ * Save initial interrupt state on current stack
+ */
+       SETL    [A0StP+#TBICTX_DX],D0Re0,D1Re0  /* Save key registers */
+       ADD     D1Re0,A0StP,#TBICTX_AX          /* Address AX save area */
+       MOV     D0Re0,TXSTATUS                  /* Read TXSTATUS into D0Re0 */
+       MOV     TXSTATUS,#0                     /* Clear TXSTATUS */
+       MSETL   [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */
+/*
+ * Register state at this point is-
+ *
+ *     D0Re0 - Old TXSTATUS with PRIV and CBUF bits set if appropriate
+ *     A0StP - Is call stack frame and base of TBICTX being generated
+ *     A1GbP - Is valid static access link
+ */
+___TBIBoing:
+       LOCK0                                   /* Make sure we have no locks! */
+       ADD     A1.2,A0StP,#TBICTX_DX+(8*1)     /* Address DX.1 save area */
+       MOV     A0FrP,A0StP                     /* Setup frame pointer */
+       MSETL   [A1.2],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+       MOV     D0Ar4,TXRPT                     /* Save critical CT regs */
+       MOV     D1Ar3,TXBPOBITS
+       MOV     D1Ar1,TXDIVTIME                 /* Calc catch buffer pSrc */
+       MOV     D0Ar2,TXMODE
+       MOV     TXMODE,#0                       /* Clear TXMODE */
+#ifdef TXDIVTIME_RPDIRTY_BIT
+       TSTT    D1Ar1,#HI(TXDIVTIME_RPDIRTY_BIT)/* NZ = RPDIRTY */
+       MOVT    D0Ar6,#TBICTX_CBRP_BIT
+       ORNZ    D0Re0,D0Re0,D0Ar6               /* Set CBRP if RPDIRTY set */
+#endif
+       MSETL   [A1.2],D0Ar4,D0Ar2              /* Save CT regs state */
+       MOV     D0Ar2,D0Re0                     /* Copy TXSTATUS */
+       ANDMT   D0Ar2,D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT
+#ifdef TBI_1_4
+       MOVT    D1Ar1,#TBICTX_FPAC_BIT          /* Copy FPActive into FPAC */
+       TSTT    D0Re0,#HI(TXSTATUS_FPACTIVE_BIT)
+       ORNZ    D0Ar2,D0Ar2,D1Ar1
+#endif
+       MOV     D1Ar1,PCX                       /* Read CurrPC */
+       ORT     D0Ar2,D0Ar2,#TBICTX_CRIT_BIT    /* SaveMask + CRIT bit */
+       SETL    [A0FrP+#TBICTX_Flags],D0Ar2,D1Ar1 /* Set pCtx header fields */
+/*
+ * Completed context save, now we need to make a call to an interrupt handler
+ *
+ *     D0Re0 - holds PRIV, WAIT, CBUF flags, HALT reason if appropriate
+ *     A0FrP - interrupt stack frame and base of TBICTX being generated
+ *     A0StP - same as A0FrP
+ */
+___TBIBoingWait:
+                               /* Reserve space for TBICTX and CBUF */
+       ADD     A0StP,A0StP,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES)
+       MOV     D0Ar4,TXSTATI                   /* Read the Triggers data */
+       MOV     D1Ar3,TXDIVTIME                 /* Read IRQEnc bits */
+       MOV     D0Ar2,D0Re0                     /* Copy PRIV and WAIT flags */
+       ANDT    D0Ar2,D0Ar2,#TBICTX_PRIV_BIT+TBICTX_WAIT_BIT+TBICTX_CBUF_BIT
+#ifdef TBI_1_4
+       MOVT    D1Ar5,#TBICTX_FPAC_BIT          /* Copy FPActive into FPAC */
+       TSTT    D0Re0,#HI(TXSTATUS_FPACTIVE_BIT)
+       ORNZ    D0Ar2,D0Ar2,D1Ar5
+#endif
+       ANDT    D1Ar3,D1Ar3,#HI(TXDIVTIME_IRQENC_BITS)
+       LSR     D1Ar3,D1Ar3,#TXDIVTIME_IRQENC_S
+       AND     TXSTATI,D0Ar4,#TXSTATI_BGNDHALT_BIT/* Ack any HALT seen */
+       ANDS    D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* Only seen HALT? */
+       ORT     D0Ar2,D0Ar2,#TBICTX_CRIT_BIT    /* Set CRIT */
+#ifndef BOOTROM
+       MOVT    A1LbP,#HI(___pTBIs)
+       ADD     A1LbP,A1LbP,#LO(___pTBIs)
+       GETL    D1Ar5,D0Ar6,[A1LbP]             /* D0Ar6 = ___pTBIs[1] */
+#else
+/*
+ * For BOOTROM support ___pTBIs must be allocated at offset 0 vs A1GbP
+ */
+       GETL    D1Ar5,D0Ar6,[A1GbP]                     /* D0Ar6 = ___pTBIs[1] */
+#endif
+       BZ      ___TBIBoingHalt                 /* Yes: Service HALT */
+/*
+ * Encode interrupt as signal vector, strip away same/lower TXMASKI bits
+ */
+       MOV     D1Ar1,#1                        /* Generate mask for this bit */
+       MOV     D0Re0,TXMASKI                   /* Get interrupt mask */
+       LSL     TXSTATI,D1Ar1,D1Ar3             /* Acknowledge trigger */
+       AND     TXMASKI,D0Re0,#TXSTATI_BGNDHALT_BIT     /* Only allow HALTs */
+       OR      D0Ar2,D0Ar2,D0Re0               /* Set TBIRES.Sig.TrigMask */
+       ADD     D1Ar3,D1Ar3,#TBID_SIGNUM_TRT    /* Offset into interrupt sigs */
+       LSL     D0Re0,D1Ar3,#TBID_SIGNUM_S      /* Generate offset from SigNum */
+/*
+ * This is a key moment we are about to call the handler, register state is
+ * as follows-
+ *
+ *     D0Re0 - Handler vector (SigNum<<TBID_SIGNUM_S)
+ *     D0Ar2 - TXMASKI:TBICTX_CRIT_BIT with optional CBUF and PRIV bits
+ *     D1Ar3 - SigNum
+ *     D0Ar4 - State read from TXSTATI
+ *     D1Ar5 - Inst for SWITCH trigger case only, otherwise undefined
+ *     D0Ar6 - pTBI
+ */
+___TBIBoingVec:
+       ADD     D0Re0,D0Re0,#TBI_fnSigs         /* Offset into signal table */
+       GETD    D1Re0,[D0Ar6+D0Re0]             /* Get address for Handler */
+/*
+ * Call handler at interrupt level, when it returns simply resume execution
+ * of state indicated by D1Re0.
+ */
+       MOV     D1Ar1,A0FrP                     /* Pass in pCtx */
+       CALLR   D1RtP,___TBIBoingRTH            /* Use RTH to invoke handler */
+       
+/*
+ * Perform critical state restore and execute background thread.
+ *
+ *     A0FrP - is pointer to TBICTX structure to resume
+ *     D0Re0 - contains additional TXMASKI triggers
+ */
+       .text
+       .balign 4
+#ifdef BOOTROM
+       .global ___TBIResume
+#endif
+___TBIResume:
+/*
+ * New META IP method
+ */
+       RTH                                     /* Go to interrupt level */
+       MOV     D0Ar4,TXMASKI                   /* Read TXMASKI */
+       OR      TXMASKI,D0Ar4,D0Re0             /* -Write-Modify TXMASKI */
+       GETL    D0Re0,D1Re0,[A0FrP+#TBICTX_Flags]/* Get Flags:SaveMask, CurrPC */
+       MOV     A0StP,A0FrP                     /* Position stack pointer */
+       MOV     D0Ar2,TXPOLLI                   /* Read pending triggers */
+       MOV     PCX,D1Re0                       /* Set resumption PC */
+       TST     D0Ar2,#0xFFFF                   /* Any pending triggers? */
+       BNZ     ___TBIBoingWait                 /* Yes: Go for triggers */
+       TSTT    D0Re0,#TBICTX_WAIT_BIT          /* Do we WAIT anyway? */
+       BNZ     ___TBIBoingWait                 /* Yes: Go for triggers */
+       LSLS    D1Ar5,D0Re0,#1                  /* Test XCBF (MI) & PRIV (CS)? */
+       ADD     D1Re0,A0FrP,#TBICTX_CurrRPT     /* Address CT save area */
+       ADD     A0StP,A0FrP,#TBICTX_DX+(8*1)    /* Address DX.1 save area */
+       MGETL   A0.2,A0.3,[D1Re0]               /* Get CT reg states */
+       MOV     D1Ar3,A1.3                      /* Copy old TXDIVTIME */
+       BPL     ___TBIResCrit                   /* No: Skip logic */
+       ADD     D0Ar4,A0FrP,#TBICTX_BYTES       /* Source is after TBICTX */
+       ANDST   D1Ar3,D1Ar3,#HI(TXDIVTIME_RPMASK_BITS)/* !Z if RPDIRTY */
+       MGETL   D0.5,D0.6,[D0Ar4]               /* Read Catch state */
+       MOV     TXCATCH0,D0.5                   /* Restore TXCATCHn */
+       MOV     TXCATCH1,D1.5
+       MOV     TXCATCH2,D0.6
+       MOV     TXCATCH3,D1.6
+       BZ      ___TBIResCrit
+       MOV     D0Ar2,#(1*8)
+       LSRS    D1Ar3,D1Ar3,#TXDIVTIME_RPMASK_S+1 /* 2nd RPMASK bit -> bit 0 */
+       ADD     RA,D0Ar4,#(0*8)                 /* Re-read read pipeline */
+       ADDNZ   RA,D0Ar4,D0Ar2                  /* If Bit 0 set issue RA */
+       LSRS    D1Ar3,D1Ar3,#2                  /* Bit 1 -> C, Bit 2 -> Bit 0 */
+       ADD     D0Ar2,D0Ar2,#8
+       ADDCS   RA,D0Ar4,D0Ar2                  /* If C issue RA */
+       ADD     D0Ar2,D0Ar2,#8
+       ADDNZ   RA,D0Ar4,D0Ar2                  /* If Bit 0 set issue RA */
+       LSRS    D1Ar3,D1Ar3,#2                  /* Bit 1 -> C, Bit 2 -> Bit 0 */
+       ADD     D0Ar2,D0Ar2,#8
+       ADDCS   RA,D0Ar4,D0Ar2                  /* If C issue RA */
+       ADD     D0Ar2,D0Ar2,#8
+       ADDNZ   RA,D0Ar4,D0Ar2                  /* If Bit 0 set issue RA */
+       MOV     TXDIVTIME,A1.3                  /* Set RPDIRTY again */
+___TBIResCrit:
+       LSLS    D1Ar5,D0Re0,#1                  /* Test XCBF (MI) & PRIV (CS)? */
+#ifdef TBI_1_4
+       ANDT    D1Ar5,D1Ar5,#(TBICTX_FPAC_BIT*2)
+       LSL     D0Ar6,D1Ar5,#3                  /* Convert FPAC into FPACTIVE */
+#endif
+       ANDMT   D0Re0,D0Re0,#TBICTX_CBUF_BIT    /* Keep CBUF bit from SaveMask */
+#ifdef TBI_1_4
+       OR      D0Re0,D0Re0,D0Ar6               /* Combine FPACTIVE with others */
+#endif
+       MGETL   D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7,[A0StP] /* Restore DX */
+       MOV     TXRPT,A0.2                      /* Restore CT regs */
+       MOV     TXBPOBITS,A1.2
+       MOV     TXMODE,A0.3
+       BCC     ___TBIBoingPCX                  /* Do non-PRIV wait! */
+       MOV     A1GblIGbP,A1GbP                 /* Save A1GbP too */
+       MGETL   A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */
+/*
+ * Wait for the first interrupt/exception trigger in a privilege mode system
+ * (interrupt stack area for current TASK to be pointed to by A0GblIStP
+ * or per_cpu__stack_save[hwthread_id]).
+ */
+       MOV     TXSTATUS,D0Re0                  /* Restore flags */
+       MOV     D0Re0,TXPRIVEXT                 /* Set TXPRIVEXT_TXTOGGLEI_BIT */
+       SUB     D1Re0,D1Re0,#TBICTX_BYTES       /* TBICTX is top of int stack */
+#ifdef TBX_PERCPU_SP_SAVE
+       SWAP    D1Ar3,A1GbP
+       MOV     D1Ar3,TXENABLE                  /* Which thread are we? */
+       AND     D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS
+       LSR     D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2
+       ADDT    D1Ar3,D1Ar3,#HI(_per_cpu__stack_save)
+       ADD     D1Ar3,D1Ar3,#LO(_per_cpu__stack_save)
+       SETD    [D1Ar3],D1Re0
+       SWAP    D1Ar3,A1GbP
+#else
+       MOV     A0GblIStP, D1Re0
+#endif
+       OR      D0Re0,D0Re0,#TXPRIVEXT_TXTOGGLEI_BIT
+       MOV     TXPRIVEXT,D0Re0                 /* Cannot set TXPRIVEXT if !priv */
+       GETL    D0Re0,D1Re0,[D1Re0+#TBICTX_DX]
+       RTI                                     /* Wait for interrupt */
+/*
+ * Save initial interrupt state on A0GblIStP, switch to A0GblIStP if
+ * BOOTROM code, save and switch to [A1GbP] otherwise.
+ */
+___TBIBoingPCXP:
+#ifdef TBX_PERCPU_SP_SAVE
+       SWAP    D1Ar3,A1GbP                     /* Get PRIV stack base */
+       MOV     D1Ar3,TXENABLE                  /* Which thread are we? */
+       AND     D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS
+       LSR     D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2
+       ADDT    D1Ar3,D1Ar3,#HI(_per_cpu__stack_save)
+       ADD     D1Ar3,D1Ar3,#LO(_per_cpu__stack_save)
+       GETD    D1Ar3,[D1Ar3]
+#else
+       SWAP    D1Ar3,A0GblIStP                 /* Get PRIV stack base */
+#endif
+       SETL    [D1Ar3+#TBICTX_DX],D0Re0,D1Re0 /* Save key registers */
+       MOV     D0Re0,TXPRIVEXT                 /* Clear TXPRIVEXT_TXTOGGLEI_BIT */
+       ADD     D1Re0,D1Ar3,#TBICTX_AX  /* Address AX save area */
+       ANDMB   D0Re0,D0Re0,#0xFFFF-TXPRIVEXT_TXTOGGLEI_BIT
+       MOV     TXPRIVEXT,D0Re0                 /* Cannot set TXPRIVEXT if !priv */
+       MOV     D0Re0,TXSTATUS                  /* Read TXSTATUS into D0Re0 */
+       MOV     TXSTATUS,#0                     /* Clear TXSTATUS */
+       MSETL   [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */
+       MOV     A0StP,D1Ar3                     /* Switch stacks */
+#ifdef TBX_PERCPU_SP_SAVE
+       MOV     D1Ar3,A1GbP                     /* Get D1Ar2 back */
+#else
+       MOV     D1Ar3,A0GblIStP                 /* Get D1Ar2 back */
+#endif
+       ORT     D0Re0,D0Re0,#TBICTX_PRIV_BIT    /* Add PRIV to TXSTATUS */
+       MOV     A1GbP,A1GblIGbP                 /* Restore A1GbP */
+       B       ___TBIBoing                     /* Enter common handler code */
+/*
+ * At this point we know it's a background HALT case we are handling.
+ * The restored TXSTATUS always needs to have zero in the reason bits.
+ */
+___TBIBoingHalt:
+       MOV     D0Ar4,TXMASKI                   /* Get interrupt mask */
+       ANDST   D0Re0,D0Re0,#HI(TXSTATUS_MAJOR_HALT_BITS+TXSTATUS_MEM_FAULT_BITS)
+       AND     TXMASKI,D0Ar4,#TXSTATI_BGNDHALT_BIT /* Only allow HALTs */
+       AND     D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* What ints are off? */
+       OR      D0Ar2,D0Ar2,D0Ar4               /* Set TBIRES.Sig.TrigMask */
+       MOV     D0Ar4,#TXSTATI_BGNDHALT_BIT     /* This was the trigger state */
+       LSR     D1Ar3,D0Re0,#TXSTATUS_MAJOR_HALT_S
+       MOV     D0Re0,#TBID_SIGNUM_XXF<<TBID_SIGNUM_S
+       BNZ     ___TBIBoingVec                  /* Jump to XXF exception handler */
+/*
+ * Only the SWITCH cases are left, PCX must be valid
+ */
+#ifdef TBI_1_4
+       MOV     D1Ar5,TXPRIVEXT
+       TST     D1Ar5,#TXPRIVEXT_MINIMON_BIT
+       LSR     D1Ar3,D1Ar1,#1                  /* Shift needed for MINIM paths (fill stall) */
+       BZ      $Lmeta                          /* If META only, skip */
+       TSTT    D1Ar1,#HI(0x00800000)
+       ANDMT   D1Ar3,D1Ar3,#HI(0x007FFFFF >> 1)/* Shifted mask for large MINIM */
+       ANDT    D1Ar1,D1Ar1,#HI(0xFFE00000)     /* Static mask for small MINIM */
+       BZ      $Llarge_minim                   /* If large MINIM */
+$Lsmall_minim:
+       TSTT    D1Ar3,#HI(0x00100000 >> 1)
+       ANDMT   D1Ar3,D1Ar3,#HI(0x001FFFFF >> 1)/* Correct shifted mask for large MINIM */
+       ADDZ    D1Ar1,D1Ar1,D1Ar3               /* If META rgn, add twice to undo LSR #1 */
+       B       $Lrecombine
+$Llarge_minim:
+       ANDST   D1Ar1,D1Ar1,#HI(0xFF800000)     /* Correct static mask for small MINIM */
+                                               /* Z=0 (Cannot place code at NULL) */
+$Lrecombine:
+       ADD     D1Ar1,D1Ar1,D1Ar3               /* Combine static and shifted parts */
+$Lmeta:
+       GETW    D1Ar5,[D1Ar1++]                 /* META: lo-16, MINIM: lo-16 (all-16 if short) */
+       GETW    D1Ar3,[D1Ar1]                   /* META: hi-16, MINIM: hi-16 (only if long) */
+       MOV     D1Re0,D1Ar5
+       XOR     D1Re0,D1Re0,#0x4000
+       LSLSNZ  D1Re0,D1Re0,#(32-14)            /* MINIM: If long C=0, if short C=1 */
+       LSLCC   D1Ar3,D1Ar3,#16                 /* META/MINIM long: Move hi-16 up */
+       LSLCS   D1Ar3,D1Ar5,#16                 /* MINIM short: Dup all-16 */
+       ADD     D1Ar5,D1Ar5,D1Ar3               /* ALL: Combine both 16-bit parts */
+#else
+       GETD    D1Ar5,[D1Ar1]                   /* Read instruction for switch */
+#endif
+       LSR     D1Ar3,D1Ar5,#22                 /* Convert into signal number */
+       AND     D1Ar3,D1Ar3,#TBID_SIGNUM_SW3-TBID_SIGNUM_SW0
+       LSL     D0Re0,D1Ar3,#TBID_SIGNUM_S      /* Generate offset from SigNum */
+       B       ___TBIBoingVec                  /* Jump to switch handler */
+/*
+ * Exit from TBIASyncTrigger call
+ */
+___TBIBoingExit:
+       GETL    D0FrT,D1RtP,[A0FrP++]           /* Restore state from frame */
+       SUB     A0StP,A0FrP,#8                  /* Unwind stack */
+       MOV     A0FrP,D0FrT                     /* Last memory read completes */
+       MOV     PC,D1RtP                        /* Return to caller */
+#endif /* ifdef CODE_USES_BOOTROM */
+       .size   ___TBIResume,.-___TBIResume
+
+#ifndef BOOTROM
+/*
+ * void __TBIASyncResume( TBIRES State )
+ */
+       .text
+       .balign 4
+       .global ___TBIASyncResume
+       .type   ___TBIASyncResume,function
+___TBIASyncResume:
+/*
+ * Perform CRIT|SOFT state restore and execute background thread.
+ */
+       MOV     D1Ar3,D1Ar1                     /* Restore this context */
+       MOV     D0Re0,D0Ar2                     /* Carry in additional triggers */
+                                               /* Reserve space for TBICTX */
+       ADD     D1Ar3,D1Ar3,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES)
+       MOV     A0StP,D1Ar3                     /* Enter with protection of */
+       MOV     A0FrP,D1Ar1                     /*   TBICTX on our stack */
+#ifdef CODE_USES_BOOTROM
+       MOVT    D1Ar1,#HI(LINCORE_BASE)
+       JUMP    D1Ar1,#0xA4
+#else
+       B       ___TBIResume
+#endif
+       .size   ___TBIASyncResume,.-___TBIASyncResume
+#endif /* ifndef BOOTROM */
+
+/*
+ * End of tbipcx.S
+ */
diff --git a/arch/metag/tbx/tbiroot.S b/arch/metag/tbx/tbiroot.S
new file mode 100644 (file)
index 0000000..7d84daf
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * tbiroot.S
+ *
+ * Copyright (C) 2001, 2002, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Module that creates and via ___TBI function returns a TBI Root Block for
+ * interrupt and background processing on the current thread.
+ */
+
+       .file   "tbiroot.S"
+#include <asm/metag_regs.h>
+
+/*
+ * Get data structures and defines from the TBI C header
+ */
+#include <asm/tbx.h>
+
+
+/* If signals need to be exchanged we must create a TBI Root Block */
+
+       .data
+       .balign 8
+       .global ___pTBIs
+       .type   ___pTBIs,object
+___pTBIs:
+       .long   0 /* Bgnd+Int root block ptrs */
+       .long   0
+       .size   ___pTBIs,.-___pTBIs
+
+
+/*
+ * Return ___pTBIs value specific to execution level with promotion/demotion
+ *
+ * Register Usage: D1Ar1 is Id, D0Re0 is the primary result
+ *                 D1Re0 is secondary result (___pTBIs for other exec level)
+ */
+       .text
+       .balign 4
+       .global ___TBI
+       .type   ___TBI,function
+___TBI:
+       TSTT    D1Ar1,#HI(TBID_ISTAT_BIT)       /* Bgnd or Int level? */
+       MOVT    A1LbP,#HI(___pTBIs)
+       ADD     A1LbP,A1LbP,#LO(___pTBIs)
+       GETL    D0Re0,D1Re0,[A1LbP] /* Base of root block table */
+       SWAPNZ  D0Re0,D1Re0                     /* Swap if asked */
+       MOV     PC,D1RtP
+       .size   ___TBI,.-___TBI
+
+
+/*
+ * Return identifier of the current thread in TBI segment or signal format with
+ * secondary mask to indicate privilege and interrupt level of thread
+ */
+       .text
+       .balign 4
+       .global ___TBIThrdPrivId
+       .type   ___TBIThrdPrivId,function
+___TBIThrdPrivId:
+       .global ___TBIThreadId
+       .type   ___TBIThreadId,function
+___TBIThreadId:
+#ifndef METAC_0_1
+       MOV     D1Re0,TXSTATUS                  /* Are we privileged or int? */
+       MOV     D0Re0,TXENABLE                  /* Which thread are we? */
+/* Disable privilege adaption for now */
+       ANDT    D1Re0,D1Re0,#HI(TXSTATUS_ISTAT_BIT) /* +TXSTATUS_PSTAT_BIT) */
+       LSL     D1Re0,D1Re0,#TBID_ISTAT_S-TXSTATUS_ISTAT_S
+       AND     D0Re0,D0Re0,#TXENABLE_THREAD_BITS
+       LSL     D0Re0,D0Re0,#TBID_THREAD_S-TXENABLE_THREAD_S
+#else
+/* Thread 0 only */
+       XOR     D0Re0,D0Re0,D0Re0
+       XOR     D1Re0,D1Re0,D1Re0
+#endif
+       MOV     PC,D1RtP                        /* Return */
+       .size   ___TBIThrdPrivId,.-___TBIThrdPrivId
+       .size   ___TBIThreadId,.-___TBIThreadId 
+
+
+/*
+ * End of tbiroot.S
+ */
diff --git a/arch/metag/tbx/tbisoft.S b/arch/metag/tbx/tbisoft.S
new file mode 100644 (file)
index 0000000..0346fe8
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * tbisoft.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Support for soft threads and soft context switches
+ */
+
+       .file   "tbisoft.S"
+
+#include <asm/tbx.h>
+
+#ifdef METAC_1_0
+/* Ax.4 is saved in TBICTX */
+#define A0_4  ,A0.4
+#define D0_5  ,D0.5
+#else
+/* Ax.4 is NOT saved in TBICTX */
+#define A0_4
+#define D0_5
+#endif
+
+/* Size of the TBICTX structure */
+#define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX)
+
+       .text
+       .balign 4
+       .global ___TBISwitchTail
+       .type   ___TBISwitchTail,function
+___TBISwitchTail:
+       B       $LSwitchTail
+       .size   ___TBISwitchTail,.-___TBISwitchTail
+
+/* 
+ * TBIRES __TBIJumpX( TBIX64 ArgsA, PTBICTX *rpSaveCtx, int TrigsMask,
+ *                                    void (*fnMain)(), void *pStack );
+ *
+ * This is a combination of __TBISwitch and __TBIJump with the context of
+ * the calling thread being saved in the rpSaveCtx location with a drop-thru
+ *  effect into the __TBIJump logic. ArgsB passes via __TBIJump to the
+ *  routine eventually invoked will reflect the rpSaveCtx value specified.
+ */
+       .text
+       .balign 4
+       .global ___TBIJumpX
+       .type   ___TBIJumpX,function
+___TBIJumpX:
+       CMP     D1RtP,#-1
+       B       $LSwitchStart
+       .size   ___TBIJumpX,.-___TBIJumpX
+
+/*
+ * TBIRES __TBISwitch( TBIRES Switch, PTBICTX *rpSaveCtx )
+ *
+ * Software syncronous context switch between soft threads, save only the
+ * registers which are actually valid on call entry.
+ *
+ *     A0FrP, D0RtP, D0.5, D0.6, D0.7      - Saved on stack
+ *     A1GbP is global to all soft threads so not virtualised
+ *     A0StP is then saved as the base of the TBICTX of the thread
+ *     
+ */
+       .text
+       .balign 4
+       .global ___TBISwitch
+       .type   ___TBISwitch,function
+___TBISwitch:
+       XORS    D0Re0,D0Re0,D0Re0               /* Set ZERO flag */
+$LSwitchStart:
+       MOV     D0FrT,A0FrP                     /* Boing entry sequence */
+       ADD     A0FrP,A0StP,#0                  
+       SETL    [A0StP+#8++],D0FrT,D1RtP
+/*
+ * Save current frame state - we save all regs because we don't want
+ * uninitialised crap in the TBICTX structure that the asyncronous resumption
+ * of a thread will restore.
+ */
+       MOVT    D1Re0,#HI($LSwitchExit)         /* ASync resume point here */
+       ADD     D1Re0,D1Re0,#LO($LSwitchExit)
+       SETD    [D1Ar3],A0StP                   /* Record pCtx of this thread */
+       MOVT    D0Re0,#TBICTX_SOFT_BIT          /* Only soft thread state */
+       SETL    [A0StP++],D0Re0,D1Re0           /* Push header fields */
+       ADD     D0FrT,A0StP,#TBICTX_AX-TBICTX_DX /* Address AX save area */
+       MOV     D0Re0,#0                        /* Setup 0:0 result for ASync */
+       MOV     D1Re0,#0                        /* resume of the thread */
+       MSETL   [A0StP],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+       SETL    [A0StP++],D0Re0,D1Re0           /* Zero CurrRPT, CurrBPOBITS, */
+       SETL    [A0StP++],D0Re0,D1Re0           /* Zero CurrMODE, CurrDIVTIME */
+       ADD     A0StP,A0StP,#(TBICTX_AX_REGS*8) /* Reserve AX save space */
+       MSETL   [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */
+       BNZ     ___TBIJump
+/*
+ * NextThread MUST be in TBICTX_SOFT_BIT state!
+ */
+$LSwitchTail:
+       MOV     D0Re0,D0Ar2                     /* Result from args */
+       MOV     D1Re0,D1Ar1
+       ADD     D1RtP,D1Ar1,#TBICTX_AX
+       MGETL   A0StP,A0FrP,[D1RtP]             /* Get frame values */
+$LSwitchCmn:
+       ADD     A0.2,D1Ar1,#TBICTX_DX+(8*5)
+       MGETL   D0.5,D0.6,D0.7,[A0.2]           /* Get caller-saved DX regs */
+$LSwitchExit:
+       GETL    D0FrT,D1RtP,[A0FrP++]           /* Restore state from frame */
+       SUB     A0StP,A0FrP,#8                  /* Unwind stack */
+       MOV     A0FrP,D0FrT                     /* Last memory read completes */
+       MOV     PC,D1RtP                        /* Return to caller */
+       .size   ___TBISwitch,.-___TBISwitch
+
+/*
+ * void __TBISyncResume( TBIRES State, int TrigMask );
+ *
+ * This routine causes the TBICTX structure specified in State.Sig.pCtx to
+ * be restored. This implies that execution will not return to the caller.
+ * The State.Sig.TrigMask field will be ored into TXMASKI during the
+ * context switch such that any immediately occuring interrupts occur in
+ * the context of the newly specified task. The State.Sig.SaveMask parameter
+ * is ignored.
+ */
+       .text
+       .balign 4
+       .global ___TBISyncResume
+       .type   ___TBISyncResume,function
+___TBISyncResume:
+       MOV     D0Re0,D0Ar2                     /* Result from args */
+       MOV     D1Re0,D1Ar1
+       XOR     D1Ar5,D1Ar5,D1Ar5               /* D1Ar5 = 0 */
+       ADD     D1RtP,D1Ar1,#TBICTX_AX
+       SWAP    D1Ar5,TXMASKI                   /* D1Ar5 <-> TXMASKI */
+       MGETL   A0StP,A0FrP,[D1RtP]             /* Get frame values */
+       OR      TXMASKI,D1Ar5,D1Ar3             /* New TXMASKI */
+       B       $LSwitchCmn
+       .size   ___TBISyncResume,.-___TBISyncResume
+
+/*
+ * void __TBIJump( TBIX64 ArgsA, TBIX32 ArgsB, int TrigsMask,
+ *                               void (*fnMain)(), void *pStack );
+ *
+ * Jump directly to a new routine on an arbitrary stack with arbitrary args
+ * oring bits back into TXMASKI on route.
+ */
+       .text
+       .balign 4
+       .global ___TBIJump
+       .type   ___TBIJump,function
+___TBIJump:
+       XOR     D0Re0,D0Re0,D0Re0               /* D0Re0 = 0 */
+       MOV     A0StP,D0Ar6                     /* Stack = Frame */
+       SWAP    D0Re0,TXMASKI                   /* D0Re0 <-> TXMASKI */
+       MOV     A0FrP,D0Ar6                     
+       MOVT    A1LbP,#HI(__exit)
+       ADD     A1LbP,A1LbP,#LO(__exit)
+       MOV     D1RtP,A1LbP                     /* D1RtP = __exit */
+       OR      TXMASKI,D0Re0,D0Ar4             /* New TXMASKI */
+       MOV     PC,D1Ar5                        /* Jump to fnMain */
+       .size   ___TBIJump,.-___TBIJump
+
+/*
+ *     PTBICTX __TBISwitchInit( void *pStack, int (*fnMain)(),
+ *                             .... 4 extra 32-bit args .... );
+ *                             
+ * Generate a new soft thread context ready for it's first outing.
+ *
+ *     D1Ar1 - Region of memory to be used as the new soft thread stack
+ *     D0Ar2 - Main line routine for new soft thread
+ *     D1Ar3, D0Ar4, D1Ar5, D0Ar6 - arguments to be passed on stack
+ *     The routine returns the initial PTBICTX value for the new thread
+ */
+       .text
+       .balign 4
+       .global ___TBISwitchInit
+       .type   ___TBISwitchInit,function
+___TBISwitchInit:
+       MOV     D0FrT,A0FrP                     /* Need save return point */
+       ADD     A0FrP,A0StP,#0
+       SETL    [A0StP++],D0FrT,D1RtP           /* Save return to caller */
+       MOVT    A1LbP,#HI(__exit)
+       ADD     A1LbP,A1LbP,#LO(__exit)
+       MOV     D1RtP,A1LbP                     /* Get address of __exit */
+       ADD     D1Ar1,D1Ar1,#7                  /* Align stack to 64-bits */
+       ANDMB   D1Ar1,D1Ar1,#0xfff8             /*   by rounding base up */
+       MOV     A0.2,D1Ar1                      /* A0.2 is new stack */
+       MOV     D0FrT,D1Ar1                     /* Initial puesdo-frame pointer */
+       SETL    [A0.2++],D0FrT,D1RtP            /* Save return to __exit */
+       MOV     D1RtP,D0Ar2
+       SETL    [A0.2++],D0FrT,D1RtP            /* Save return to fnMain */
+       ADD     D0FrT,D0FrT,#8                  /* Advance puesdo-frame pointer */
+       MSETL   [A0.2],D0Ar6,D0Ar4              /* Save extra initial args */
+       MOVT    D1RtP,#HI(___TBIStart)          /* Start up code for new stack */
+       ADD     D1RtP,D1RtP,#LO(___TBIStart)
+       SETL    [A0.2++],D0FrT,D1RtP            /* Save return to ___TBIStart */
+       ADD     D0FrT,D0FrT,#(8*3)              /* Advance puesdo-frame pointer */
+       MOV     D0Re0,A0.2                      /* Return pCtx for new thread */
+       MOV     D1Re0,#0                        /* pCtx:0 is default Arg1:Arg2 */
+/*
+ * Generate initial TBICTX state
+ */
+       MOVT    D1Ar1,#HI($LSwitchExit)         /* Async restore code */
+       ADD     D1Ar1,D1Ar1,#LO($LSwitchExit)
+       MOVT    D0Ar2,#TBICTX_SOFT_BIT          /* Only soft thread state */
+       ADD     D0Ar6,A0.2,#TBICTX_BYTES        /* New A0StP */
+       MOV     D1Ar5,A1GbP                     /* Same A1GbP */
+       MOV     D0Ar4,D0FrT                     /* Initial A0FrP */
+       MOV     D1Ar3,A1LbP                     /* Same A1LbP */
+       SETL    [A0.2++],D0Ar2,D1Ar1            /* Set header fields */
+       MSETL   [A0.2],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7
+       MOV     D0Ar2,#0                        /* Zero values */
+       MOV     D1Ar1,#0
+       SETL    [A0.2++],D0Ar2,D1Ar1            /* Zero CurrRPT, CurrBPOBITS, */
+       SETL    [A0.2++],D0Ar2,D1Ar1            /*      CurrMODE, and pCurrCBuf */
+       MSETL   [A0.2],D0Ar6,D0Ar4,D0Ar2,D0FrT D0_5 /* Set DX and then AX regs */
+       B       $LSwitchExit                    /* All done! */
+       .size   ___TBISwitchInit,.-___TBISwitchInit
+
+       .text
+       .balign 4
+       .global ___TBIStart
+       .type   ___TBIStart,function
+___TBIStart:
+       MOV     D1Ar1,D1Re0                     /* Pass TBIRES args to call */
+       MOV     D0Ar2,D0Re0
+       MGETL   D0Re0,D0Ar6,D0Ar4,[A0FrP]       /* Get hidden args */
+       SUB     A0StP,A0FrP,#(8*3)              /* Entry stack pointer */
+       MOV     A0FrP,D0Re0                     /* Entry frame pointer */
+       MOVT    A1LbP,#HI(__exit)
+       ADD     A1LbP,A1LbP,#LO(__exit)
+       MOV     D1RtP,A1LbP                     /* D1RtP = __exit */
+       MOV     PC,D1Re0                        /* Jump into fnMain */
+       .size   ___TBIStart,.-___TBIStart
+
+/*
+ * End of tbisoft.S
+ */
diff --git a/arch/metag/tbx/tbistring.c b/arch/metag/tbx/tbistring.c
new file mode 100644 (file)
index 0000000..f90cd08
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * tbistring.c
+ *
+ * Copyright (C) 2001, 2002, 2003, 2005, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * String table functions provided as part of the thread binary interface for
+ * Meta processors
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include <asm/tbx.h>
+
+/*
+ * There are not any functions to modify the string table currently, if these
+ * are required at some later point I suggest having a seperate module and
+ * ensuring that creating new entries does not interfere with reading old
+ * entries in any way.
+ */
+
+const TBISTR *__TBIFindStr(const TBISTR *start,
+                          const char *str, int match_len)
+{
+       const TBISTR *search = start;
+       bool exact = true;
+       const TBISEG *seg;
+
+       if (match_len < 0) {
+               /* Make match_len always positive for the inner loop */
+               match_len = -match_len;
+               exact = false;
+       } else {
+               /*
+                * Also support historic behaviour, which expected match_len to
+                * include null terminator
+                */
+               if (match_len && str[match_len-1] == '\0')
+                       match_len--;
+       }
+
+       if (!search) {
+               /* Find global string table segment */
+               seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
+                                                 TBID_SEGSCOPE_GLOBAL,
+                                                 TBID_SEGTYPE_STRING));
+
+               if (!seg || seg->Bytes < sizeof(TBISTR))
+                       /* No string table! */
+                       return NULL;
+
+               /* Start of string table */
+               search = seg->pGAddr;
+       }
+
+       for (;;) {
+               while (!search->Tag)
+                       /* Allow simple gaps which are just zero initialised */
+                       search = (const TBISTR *)((const char *)search + 8);
+
+               if (search->Tag == METAG_TBI_STRE) {
+                       /* Reached the end of the table */
+                       search = NULL;
+                       break;
+               }
+
+               if ((search->Len >= match_len) &&
+                   (!exact || (search->Len == match_len + 1)) &&
+                   (search->Tag != METAG_TBI_STRG)) {
+                       /* Worth searching */
+                       if (!strncmp(str, (const char *)search->String,
+                                    match_len))
+                               break;
+               }
+
+               /* Next entry */
+               search = (const TBISTR *)((const char *)search + search->Bytes);
+       }
+
+       return search;
+}
+
+const void *__TBITransStr(const char *str, int len)
+{
+       const TBISTR *search = NULL;
+       const void *res = NULL;
+
+       for (;;) {
+               /* Search onwards */
+               search = __TBIFindStr(search, str, len);
+
+               /* No translation returns NULL */
+               if (!search)
+                       break;
+
+               /* Skip matching entries with no translation data */
+               if (search->TransLen != METAG_TBI_STRX) {
+                       /* Calculate base of translation string */
+                       res = (const char *)search->String +
+                               ((search->Len + 7) & ~7);
+                       break;
+               }
+
+               /* Next entry */
+               search = (const TBISTR *)((const char *)search + search->Bytes);
+       }
+
+       /* Return base address of translation data or NULL */
+       return res;
+}
+EXPORT_SYMBOL(__TBITransStr);
diff --git a/arch/metag/tbx/tbitimer.S b/arch/metag/tbx/tbitimer.S
new file mode 100644 (file)
index 0000000..5dbedde
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * tbitimer.S
+ *
+ * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * TBI timer support routines and data values
+ */
+
+       .file   "tbitimer.S"
+/*
+ * Get data structures and defines from the main C header
+ */
+#include <asm/tbx.h>
+
+       .data
+       .balign 8
+       .global ___TBITimeB
+       .type   ___TBITimeB,object
+___TBITimeB:
+       .quad   0               /* Background 'lost' ticks */
+       .size   ___TBITimeB,.-___TBITimeB
+
+       .data
+       .balign 8
+       .global ___TBITimeI
+       .type   ___TBITimeI,object
+___TBITimeI:
+       .quad   0               /* Interrupt 'lost' ticks */
+       .size   ___TBITimeI,.-___TBITimeI
+
+       .data
+       .balign 8
+       .global ___TBITimes
+       .type   ___TBITimes,object
+___TBITimes:
+       .long   ___TBITimeB     /* Table of 'lost' tick values */
+       .long   ___TBITimeI
+       .size   ___TBITimes,.-___TBITimes
+
+/*
+ * Flag bits for control of ___TBITimeCore
+ */
+#define TIMER_SET_BIT  1
+#define TIMER_ADD_BIT  2
+
+/*
+ * Initialise or stop timer support
+ *
+ * Register Usage: D1Ar1 holds Id, D1Ar2 is initial delay or 0
+ *                 D0FrT is used to call ___TBITimeCore
+ *                 D0Re0 is used for the result which is TXSTAT_TIMER_BIT
+ *                 D0Ar4, D1Ar5, D0Ar6 are all used as scratch
+ *               Other registers are those set by ___TBITimeCore
+ *                     A0.3 is assumed to point at ___TBITime(I/B)
+ */
+       .text
+       .balign 4
+       .global ___TBITimerCtrl
+       .type   ___TBITimerCtrl,function
+___TBITimerCtrl:
+       MOV     D1Ar5,#TIMER_SET_BIT            /* Timer SET request */
+       MOVT    D0FrT,#HI(___TBITimeCore)       /* Get timer core reg values */
+       CALL    D0FrT,#LO(___TBITimeCore)       /* and perform register update */
+       NEGS    D0Ar6,D0Ar2                     /* Set flags from time-stamp */
+       ASR     D1Ar5,D0Ar6,#31                 /* Sign extend D0Ar6 into D1Ar5 */
+       SETLNZ  [A0.3],D0Ar6,D1Ar5              /* ___TBITime(B/I)=-Start if enable */
+       MOV     PC,D1RtP                        /* Return */
+       .size   ___TBITimerCtrl,.-___TBITimerCtrl
+       
+/*
+ * Return ___TBITimeStamp value
+ *
+ * Register Usage: D1Ar1 holds Id
+ *                 D0FrT is used to call ___TBITimeCore
+ *                 D0Re0, D1Re0 is used for the result
+ *                 D1Ar3, D0Ar4, D1Ar5
+ *               Other registers are those set by ___TBITimeCore
+ *                     D0Ar6 is assumed to be the timer value read
+ *                     A0.3 is assumed to point at ___TBITime(I/B)
+ */
+       .text
+       .balign 4
+       .global ___TBITimeStamp
+       .type   ___TBITimeStamp,function
+___TBITimeStamp:
+       MOV     D1Ar5,#0                        /* Timer GET request */
+       MOVT    D0FrT,#HI(___TBITimeCore)       /* Get timer core reg values */
+       CALL    D0FrT,#LO(___TBITimeCore)       /* with no register update */
+       ADDS    D0Re0,D0Ar4,D0Ar6               /* Add current time value */
+       ADD     D1Re0,D1Ar3,D1Ar5               /*  to 64-bit signed extend time */
+       ADDCS   D1Re0,D1Re0,#1                  /* Support borrow too */
+       MOV     PC,D1RtP                        /* Return */
+       .size   ___TBITimeStamp,.-___TBITimeStamp
+
+/*
+ * Perform ___TBITimerAdd logic
+ *
+ * Register Usage: D1Ar1 holds Id, D0Ar2 holds value to be added to the timer
+ *                 D0Re0 is used for the result - new TIMER value
+ *                 D1Ar5, D0Ar6 are used as scratch
+ *               Other registers are those set by ___TBITimeCore
+ *                     D0Ar6 is assumed to be the timer value read
+ *                     D0Ar4, D1Ar3 is the current value of ___TBITime(B/I)
+ */
+       .text
+       .balign 4
+       .global ___TBITimerAdd
+       .type   ___TBITimerAdd,function
+___TBITimerAdd:
+       MOV     D1Ar5,#TIMER_ADD_BIT            /* Timer ADD request */
+       MOVT    D0FrT,#HI(___TBITimeCore)       /* Get timer core reg values */
+       CALL    D0FrT,#LO(___TBITimeCore)       /* with no register update */
+       ADD     D0Re0,D0Ar2,D0Ar6               /* Regenerate new value = result */
+       NEG     D0Ar2,D0Ar2                     /* Negate delta */
+       ASR     D1Re0,D0Ar2,#31                 /* Sign extend negated delta */
+       ADDS    D0Ar4,D0Ar4,D0Ar2               /* Add time added to ... */
+       ADD     D1Ar3,D1Ar3,D1Re0               /* ... real timer ... */
+       ADDCS   D1Ar3,D1Ar3,#1                  /* ... with carry */
+       SETL    [A0.3],D0Ar4,D1Ar3              /* Update ___TBITime(B/I) */
+       MOV     PC,D1RtP                        /* Return */
+       .size   ___TBITimerAdd,.-___TBITimerAdd
+
+#ifdef TBI_1_4
+/*
+ * Perform ___TBITimerDeadline logic
+ *    NB: Delays are positive compared to the Wait values which are -ive
+ *
+ * Register Usage: D1Ar1 holds Id
+ *                 D0Ar2 holds Delay requested
+ *                 D0Re0 is used for the result - old TIMER Delay value
+ *                 D1Ar5, D0Ar6 are used as scratch
+ *                 Other registers are those set by ___TBITimeCore
+ *                 D0Ar6 is assumed to be the timer value read
+ *                 D0Ar4, D1Ar3 is the current value of ___TBITime(B/I)
+ *
+ */
+        .text
+        .type   ___TBITimerDeadline,function
+        .global ___TBITimerDeadline
+        .align  2
+___TBITimerDeadline:
+       MOV     D1Ar5,#TIMER_SET_BIT            /* Timer SET request */
+       MOVT    D0FrT,#HI(___TBITimeCore)       /* Get timer core reg values */
+       CALL    D0FrT,#LO(___TBITimeCore)       /* with no register update */
+       MOV     D0Re0,D0Ar6                     /* Old value read = result */
+       SUB     D0Ar2,D0Ar6,D0Ar2               /* Delta from (old - new) */
+       ASR     D1Re0,D0Ar2,#31                 /* Sign extend delta */
+       ADDS    D0Ar4,D0Ar4,D0Ar2               /* Add time added to ... */
+       ADD     D1Ar3,D1Ar3,D1Re0               /* ... real timer ... */
+       ADDCS   D1Ar3,D1Ar3,#1                  /* ... with carry */
+       SETL    [A0.3],D0Ar4,D1Ar3              /* Update ___TBITime(B/I) */
+       MOV     PC,D1RtP                        /* Return */
+        .size   ___TBITimerDeadline,.-___TBITimerDeadline
+#endif /* TBI_1_4 */
+
+/*
+ * Perform core timer access logic
+ *
+ * Register Usage: D1Ar1 holds Id, D0Ar2 holds input value for SET and
+ *                                             input value for ADD
+ *                 D1Ar5 controls op as SET or ADD as bit values
+ *                 On return D0Ar6, D1Ar5 holds the old 64-bit timer value
+ *                 A0.3 is setup to point at ___TBITime(I/B)
+ *                 A1.3 is setup to point at ___TBITimes
+ *                 D0Ar4, D1Ar3 is setup to value of ___TBITime(I/B)
+ */
+       .text
+       .balign 4
+       .global ___TBITimeCore
+       .type   ___TBITimeCore,function
+___TBITimeCore:
+#ifndef METAC_0_1
+       TSTT    D1Ar1,#HI(TBID_ISTAT_BIT)       /* Interrupt level timer? */
+#endif
+       MOVT    A1LbP,#HI(___TBITimes)
+       ADD     A1LbP,A1LbP,#LO(___TBITimes)
+       MOV     A1.3,A1LbP                      /* Get ___TBITimes address */
+#ifndef METAC_0_1
+       BNZ     $LTimeCoreI                     /* Yes: Service TXTIMERI! */
+#endif
+       LSRS    D1Ar5,D1Ar5,#1                  /* Carry = SET, Zero = !ADD */
+       GETD    A0.3,[A1.3+#0]                  /* A0.3 == &___TBITimeB */
+       MOV     D0Ar6,TXTIMER                   /* Always GET old value */
+       MOVCS   TXTIMER,D0Ar2                   /* Conditional SET operation */
+       ADDNZ   TXTIMER,D0Ar2,D0Ar6             /* Conditional ADD operation */
+#ifndef METAC_0_1
+       B       $LTimeCoreEnd
+$LTimeCoreI:
+       LSRS    D1Ar5,D1Ar5,#1                  /* Carry = SET, Zero = !ADD */
+       GETD    A0.3,[A1.3+#4]                  /* A0.3 == &___TBITimeI */
+       MOV     D0Ar6,TXTIMERI                  /* Always GET old value */
+       MOVCS   TXTIMERI,D0Ar2                  /* Conditional SET operation */
+       ADDNZ   TXTIMERI,D0Ar2,D0Ar6            /* Conditional ADD operation */
+$LTimeCoreEnd:
+#endif
+       ASR     D1Ar5,D0Ar6,#31                 /* Sign extend D0Ar6 into D1Ar5 */
+       GETL    D0Ar4,D1Ar3,[A0.3]              /* Read ___TBITime(B/I) */
+       MOV     PC,D0FrT                        /* Return quickly */
+       .size   ___TBITimeCore,.-___TBITimeCore
+
+/*
+ * End of tbitimer.S
+ */
index ba3b7c8..7843d11 100644 (file)
@@ -19,6 +19,7 @@ config MICROBLAZE
        select HAVE_DEBUG_KMEMLEAK
        select IRQ_DOMAIN
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_PCI_IOMAP
index d26fb90..0a603d3 100644 (file)
@@ -69,16 +69,13 @@ export MMU DTB
 
 all: linux.bin
 
-# With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 = linux.bin linux.bin.gz
-BOOT_TARGETS2 = simpleImage.%
-
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
-$(BOOT_TARGETS1): vmlinux
+linux.bin linux.bin.gz: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-$(BOOT_TARGETS2): vmlinux
+
+simpleImage.%: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 define archhelp
diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore
new file mode 100644 (file)
index 0000000..bf04591
--- /dev/null
@@ -0,0 +1,3 @@
+*.dtb
+linux.bin*
+simpleImage.*
index 4fbfdc1..8cb8a85 100644 (file)
@@ -150,7 +150,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
 #define page_to_bus(page)      (page_to_phys(page))
 #define bus_to_virt(addr)      (phys_to_virt(addr))
 
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
 /*extern void *__ioremap(phys_addr_t address, unsigned long size,
                unsigned long flags);*/
 extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index 4b7d8a3..4254514 100644 (file)
 
 static inline void __enable_icache_msr(void)
 {
-       __asm__ __volatile__ (" msrset  r0, %0;         \
-                               nop; "                  \
+       __asm__ __volatile__ ("  msrset r0, %0;"        \
+                               "nop;"                  \
                        : : "i" (MSR_ICE) : "memory");
 }
 
 static inline void __disable_icache_msr(void)
 {
-       __asm__ __volatile__ (" msrclr  r0, %0;         \
-                               nop; "                  \
+       __asm__ __volatile__ ("  msrclr r0, %0;"        \
+                               "nop;"                  \
                        : : "i" (MSR_ICE) : "memory");
 }
 
 static inline void __enable_dcache_msr(void)
 {
-       __asm__ __volatile__ (" msrset  r0, %0;         \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_DCE)         \
-                               : "memory");
+       __asm__ __volatile__ ("  msrset r0, %0;"        \
+                               "nop;"                  \
+                       : : "i" (MSR_DCE) : "memory");
 }
 
 static inline void __disable_dcache_msr(void)
 {
-       __asm__ __volatile__ (" msrclr  r0, %0;         \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_DCE)         \
-                               : "memory");
+       __asm__ __volatile__ ("  msrclr r0, %0;"        \
+                               "nop; "                 \
+                       : : "i" (MSR_DCE) : "memory");
 }
 
 static inline void __enable_icache_nomsr(void)
 {
-       __asm__ __volatile__ (" mfs     r12, rmsr;      \
-                               nop;                    \
-                               ori     r12, r12, %0;   \
-                               mts     rmsr, r12;      \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_ICE)         \
-                               : "memory", "r12");
+       __asm__ __volatile__ ("  mfs    r12, rmsr;"     \
+                               "nop;"                  \
+                               "ori    r12, r12, %0;"  \
+                               "mts    rmsr, r12;"     \
+                               "nop;"                  \
+                       : : "i" (MSR_ICE) : "memory", "r12");
 }
 
 static inline void __disable_icache_nomsr(void)
 {
-       __asm__ __volatile__ (" mfs     r12, rmsr;      \
-                               nop;                    \
-                               andi    r12, r12, ~%0;  \
-                               mts     rmsr, r12;      \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_ICE)         \
-                               : "memory", "r12");
+       __asm__ __volatile__ ("  mfs    r12, rmsr;"     \
+                               "nop;"                  \
+                               "andi   r12, r12, ~%0;" \
+                               "mts    rmsr, r12;"     \
+                               "nop;"                  \
+                       : : "i" (MSR_ICE) : "memory", "r12");
 }
 
 static inline void __enable_dcache_nomsr(void)
 {
-       __asm__ __volatile__ (" mfs     r12, rmsr;      \
-                               nop;                    \
-                               ori     r12, r12, %0;   \
-                               mts     rmsr, r12;      \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_DCE)         \
-                               : "memory", "r12");
+       __asm__ __volatile__ ("  mfs    r12, rmsr;"     \
+                               "nop;"                  \
+                               "ori    r12, r12, %0;"  \
+                               "mts    rmsr, r12;"     \
+                               "nop;"                  \
+                       : : "i" (MSR_DCE) : "memory", "r12");
 }
 
 static inline void __disable_dcache_nomsr(void)
 {
-       __asm__ __volatile__ (" mfs     r12, rmsr;      \
-                               nop;                    \
-                               andi    r12, r12, ~%0;  \
-                               mts     rmsr, r12;      \
-                               nop; "                  \
-                               :                       \
-                               : "i" (MSR_DCE)         \
-                               : "memory", "r12");
+       __asm__ __volatile__ ("  mfs    r12, rmsr;"     \
+                               "nop;"                  \
+                               "andi   r12, r12, ~%0;" \
+                               "mts    rmsr, r12;"     \
+                               "nop;"                  \
+                       : : "i" (MSR_DCE) : "memory", "r12");
 }
 
 
@@ -106,7 +94,7 @@ do {                                                                 \
        int align = ~(cache_line_length - 1);                           \
        end = min(start + cache_size, end);                             \
        start &= align;                                                 \
-} while (0);
+} while (0)
 
 /*
  * Helper macro to loop over the specified cache_size/line_length and
@@ -118,12 +106,12 @@ do {                                                                      \
        int step = -line_length;                                        \
        WARN_ON(step >= 0);                                             \
                                                                        \
-       __asm__ __volatile__ (" 1:      " #op " %0, r0;                 \
-                                       bgtid   %0, 1b;                 \
-                                       addk    %0, %0, %1;             \
-                                       " : : "r" (len), "r" (step)     \
+       __asm__ __volatile__ (" 1:      " #op " %0, r0;"                \
+                                       "bgtid   %0, 1b;"               \
+                                       "addk    %0, %0, %1;"           \
+                                       : : "r" (len), "r" (step)       \
                                        : "memory");                    \
-} while (0);
+} while (0)
 
 /* Used for wdc.flush/clear which can use rB for offset which is not possible
  * to use for simple wdc or wic.
@@ -142,12 +130,12 @@ do {                                                                      \
        count = end - start;                                            \
        WARN_ON(count < 0);                                             \
                                                                        \
-       __asm__ __volatile__ (" 1:      " #op " %0, %1;                 \
-                                       bgtid   %1, 1b;                 \
-                                       addk    %1, %1, %2;             \
-                                       " : : "r" (start), "r" (count), \
+       __asm__ __volatile__ (" 1:      " #op " %0, %1;"                \
+                                       "bgtid  %1, 1b;"                \
+                                       "addk   %1, %1, %2;"            \
+                                       : : "r" (start), "r" (count),   \
                                        "r" (step) : "memory");         \
-} while (0);
+} while (0)
 
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
@@ -157,13 +145,13 @@ do {                                                                      \
        end = ((end & align) == end) ? end - line_length : end & align; \
        WARN_ON(end - start < 0);                                       \
                                                                        \
-       __asm__ __volatile__ (" 1:      " #op " %1, r0;                 \
-                                       cmpu    %0, %1, %2;             \
-                                       bgtid   %0, 1b;                 \
-                                       addk    %1, %1, %3;             \
-                               " : : "r" (temp), "r" (start), "r" (end),\
+       __asm__ __volatile__ (" 1:      " #op " %1, r0;"                \
+                                       "cmpu   %0, %1, %2;"            \
+                                       "bgtid  %0, 1b;"                \
+                                       "addk   %1, %1, %3;"            \
+                               : : "r" (temp), "r" (start), "r" (end), \
                                        "r" (line_length) : "memory");  \
-} while (0);
+} while (0)
 
 #define ASM_LOOP
 
@@ -352,7 +340,7 @@ static void __invalidate_dcache_all_noirq_wt(void)
 #endif
        pr_debug("%s\n", __func__);
 #ifdef ASM_LOOP
-       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+       CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
 #else
        for (i = 0; i < cpuinfo.dcache_size;
                 i += cpuinfo.dcache_line_length)
@@ -361,7 +349,8 @@ static void __invalidate_dcache_all_noirq_wt(void)
 #endif
 }
 
-/* FIXME It is blindly invalidation as is expected
+/*
+ * FIXME It is blindly invalidation as is expected
  * but can't be called on noMMU in microblaze_cache_init below
  *
  * MS: noMMU kernel won't boot if simple wdc is used
@@ -375,7 +364,7 @@ static void __invalidate_dcache_all_wb(void)
        pr_debug("%s\n", __func__);
 #ifdef ASM_LOOP
        CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
-                                       wdc)
+                                       wdc);
 #else
        for (i = 0; i < cpuinfo.dcache_size;
                 i += cpuinfo.dcache_line_length)
@@ -616,49 +605,48 @@ static const struct scache wt_nomsr_noirq = {
 #define CPUVER_7_20_A  0x0c
 #define CPUVER_7_20_D  0x0f
 
-#define INFO(s)        printk(KERN_INFO "cache: " s "\n");
-
 void microblaze_cache_init(void)
 {
        if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
                if (cpuinfo.dcache_wb) {
-                       INFO("wb_msr");
+                       pr_info("wb_msr\n");
                        mbc = (struct scache *)&wb_msr;
                        if (cpuinfo.ver_code <= CPUVER_7_20_D) {
                                /* MS: problem with signal handling - hw bug */
-                               INFO("WB won't work properly");
+                               pr_info("WB won't work properly\n");
                        }
                } else {
                        if (cpuinfo.ver_code >= CPUVER_7_20_A) {
-                               INFO("wt_msr_noirq");
+                               pr_info("wt_msr_noirq\n");
                                mbc = (struct scache *)&wt_msr_noirq;
                        } else {
-                               INFO("wt_msr");
+                               pr_info("wt_msr\n");
                                mbc = (struct scache *)&wt_msr;
                        }
                }
        } else {
                if (cpuinfo.dcache_wb) {
-                       INFO("wb_nomsr");
+                       pr_info("wb_nomsr\n");
                        mbc = (struct scache *)&wb_nomsr;
                        if (cpuinfo.ver_code <= CPUVER_7_20_D) {
                                /* MS: problem with signal handling - hw bug */
-                               INFO("WB won't work properly");
+                               pr_info("WB won't work properly\n");
                        }
                } else {
                        if (cpuinfo.ver_code >= CPUVER_7_20_A) {
-                               INFO("wt_nomsr_noirq");
+                               pr_info("wt_nomsr_noirq\n");
                                mbc = (struct scache *)&wt_nomsr_noirq;
                        } else {
-                               INFO("wt_nomsr");
+                               pr_info("wt_nomsr\n");
                                mbc = (struct scache *)&wt_nomsr;
                        }
                }
        }
-/* FIXME Invalidation is done in U-BOOT
- * WT cache: Data is already written to main memory
- * WB cache: Discard data on noMMU which caused that kernel doesn't boot
- */
+       /*
+        * FIXME Invalidation is done in U-BOOT
+        * WT cache: Data is already written to main memory
+        * WB cache: Discard data on noMMU which caused that kernel doesn't boot
+        */
        /* invalidate_dcache(); */
        enable_dcache();
 
index 916aaed..ee46894 100644 (file)
@@ -27,7 +27,7 @@
        early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
 #else
 #define err_printk(x) \
-       printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n");
+       pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n");
 #endif
 
 void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
@@ -38,12 +38,11 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
 
        CI(ver_code, VERSION);
        if (!ci->ver_code) {
-               printk(KERN_ERR "ERROR: MB has broken PVR regs "
-                                               "-> use DTS setting\n");
+               pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n");
                return;
        }
 
-       temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |\
+       temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |
                PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr);
        if (ci->use_instr != temp)
                err_printk("BARREL, MSR, PCMP or DIV");
@@ -59,13 +58,13 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
                err_printk("HW_FPU");
        ci->use_fpu = temp;
 
-       ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |\
-                       PVR_UNALIGNED_EXCEPTION(pvr) |\
-                       PVR_ILL_OPCODE_EXCEPTION(pvr) |\
-                       PVR_IOPB_BUS_EXCEPTION(pvr) |\
-                       PVR_DOPB_BUS_EXCEPTION(pvr) |\
-                       PVR_DIV_ZERO_EXCEPTION(pvr) |\
-                       PVR_FPU_EXCEPTION(pvr) |\
+       ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |
+                       PVR_UNALIGNED_EXCEPTION(pvr) |
+                       PVR_ILL_OPCODE_EXCEPTION(pvr) |
+                       PVR_IOPB_BUS_EXCEPTION(pvr) |
+                       PVR_DOPB_BUS_EXCEPTION(pvr) |
+                       PVR_DIV_ZERO_EXCEPTION(pvr) |
+                       PVR_FPU_EXCEPTION(pvr) |
                        PVR_FSL_EXCEPTION(pvr);
 
        CI(pvr_user1, USER1);
index eab6abf..0b2299b 100644 (file)
@@ -68,31 +68,30 @@ void __init setup_cpuinfo(void)
 
        cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu");
        if (!cpu)
-               printk(KERN_ERR "You don't have cpu!!!\n");
+               pr_err("You don't have cpu!!!\n");
 
-       printk(KERN_INFO "%s: initialising\n", __func__);
+       pr_info("%s: initialising\n", __func__);
 
        switch (cpu_has_pvr()) {
        case 0:
-               printk(KERN_WARNING
-                       "%s: No PVR support. Using static CPU info from FDT\n",
+               pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
                        __func__);
                set_cpuinfo_static(&cpuinfo, cpu);
                break;
 /* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
  * please do not use FULL PVR with MMU */
        case 1:
-               printk(KERN_INFO "%s: Using full CPU PVR support\n",
+               pr_info("%s: Using full CPU PVR support\n",
                        __func__);
                set_cpuinfo_static(&cpuinfo, cpu);
                set_cpuinfo_pvr_full(&cpuinfo, cpu);
                break;
        default:
-               printk(KERN_WARNING "%s: Unsupported PVR setting\n", __func__);
+               pr_warn("%s: Unsupported PVR setting\n", __func__);
                set_cpuinfo_static(&cpuinfo, cpu);
        }
 
        if (cpuinfo.mmu_privins)
-               printk(KERN_WARNING "%s: Stream instructions enabled"
+               pr_warn("%s: Stream instructions enabled"
                        " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
 }
index 3a749d5..8d0dc6d 100644 (file)
@@ -27,7 +27,7 @@
        tmp = 0x0;      /* Prevent warning about unused */      \
        __asm__ __volatile__ (                                  \
                        "mfs    %0, rpvr" #pvrid ";"            \
-                       : "=r" (tmp) : : "memory");             \
+                       : "=r" (tmp) : : "memory");             \
        val = tmp;                                              \
 }
 
index a2bfa2c..da68d00 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/gfp.h>
 #include <linux/dma-debug.h>
 #include <linux/export.h>
-#include <asm/bug.h>
+#include <linux/bug.h>
 
 /*
  * Generic direct DMA implementation
@@ -197,8 +197,8 @@ EXPORT_SYMBOL(dma_direct_ops);
 
 static int __init dma_init(void)
 {
-       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
 
-       return 0;
+       return 0;
 }
 fs_initcall(dma_init);
index aba1f9a..60dcacc 100644 (file)
@@ -140,20 +140,20 @@ int __init setup_early_printk(char *opt)
                switch (version) {
 #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
                case UARTLITE:
-                       printk(KERN_INFO "Early console on uartlite "
-                                               "at 0x%08x\n", base_addr);
+                       pr_info("Early console on uartlite at 0x%08x\n",
+                                                               base_addr);
                        early_console = &early_serial_uartlite_console;
                        break;
 #endif
 #ifdef CONFIG_SERIAL_8250_CONSOLE
                case UART16550:
-                       printk(KERN_INFO "Early console on uart16650 "
-                                               "at 0x%08x\n", base_addr);
+                       pr_info("Early console on uart16650 at 0x%08x\n",
+                                                               base_addr);
                        early_console = &early_serial_uart16550_console;
                        break;
 #endif
                default:
-                       printk(KERN_INFO  "Unsupported early console %d\n",
+                       pr_info("Unsupported early console %d\n",
                                                                version);
                        return 1;
                }
@@ -171,10 +171,9 @@ void __init remap_early_printk(void)
 {
        if (!early_console_initialized || !early_console)
                return;
-       printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
-                                                               base_addr);
+       pr_info("early_printk_console remapping from 0x%x to ", base_addr);
        base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
-       printk(KERN_CONT "0x%x\n", base_addr);
+       pr_cont("0x%x\n", base_addr);
 
 #ifdef CONFIG_MMU
        /*
@@ -197,7 +196,7 @@ void __init disable_early_printk(void)
 {
        if (!early_console_initialized || !early_console)
                return;
-       printk(KERN_WARNING "disabling early console\n");
+       pr_warn("disabling early console\n");
        unregister_console(early_console);
        early_console_initialized = 0;
 }
index 6348dc8..42dd12a 100644 (file)
  * This file handles the architecture-dependent parts of hardware exceptions
  */
 
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/kallsyms.h>
-#include <linux/module.h>
 
 #include <asm/exceptions.h>
 #include <asm/entry.h>         /* For KM CPU var */
@@ -40,7 +40,7 @@ void die(const char *str, struct pt_regs *fp, long err)
 {
        console_verbose();
        spin_lock_irq(&die_lock);
-       printk(KERN_WARNING "Oops: %s, sig: %ld\n", str, err);
+       pr_warn("Oops: %s, sig: %ld\n", str, err);
        show_regs(fp);
        spin_unlock_irq(&die_lock);
        /* do_exit() should take care of panic'ing from an interrupt
@@ -61,9 +61,9 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 {
        siginfo_t info;
 
-       if (kernel_mode(regs)) {
+       if (kernel_mode(regs))
                die("Exception in kernel mode", regs, signr);
-       }
+
        info.si_signo = signr;
        info.si_errno = 0;
        info.si_code = code;
@@ -79,8 +79,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 #endif
 
 #if 0
-       printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x " \
-                                                       "ESR=%08x\n",
+       pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
                        type, user_mode(regs) ? "user" : "kernel", fsr,
                        (unsigned int) regs->pc, (unsigned int) regs->esr);
 #endif
@@ -92,8 +91,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
                        _exception(SIGILL, regs, ILL_ILLOPC, addr);
                        return;
                }
-               printk(KERN_WARNING "Illegal opcode exception " \
-                                                       "in kernel mode.\n");
+               pr_warn("Illegal opcode exception in kernel mode.\n");
                die("opcode exception", regs, SIGBUS);
                break;
        case MICROBLAZE_IBUS_EXCEPTION:
@@ -102,8 +100,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
                        _exception(SIGBUS, regs, BUS_ADRERR, addr);
                        return;
                }
-               printk(KERN_WARNING "Instruction bus error exception " \
-                                                       "in kernel mode.\n");
+               pr_warn("Instruction bus error exception in kernel mode.\n");
                die("bus exception", regs, SIGBUS);
                break;
        case MICROBLAZE_DBUS_EXCEPTION:
@@ -112,8 +109,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
                        _exception(SIGBUS, regs, BUS_ADRERR, addr);
                        return;
                }
-               printk(KERN_WARNING "Data bus error exception " \
-                                                       "in kernel mode.\n");
+               pr_warn("Data bus error exception in kernel mode.\n");
                die("bus exception", regs, SIGBUS);
                break;
        case MICROBLAZE_DIV_ZERO_EXCEPTION:
@@ -122,8 +118,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
                        _exception(SIGFPE, regs, FPE_INTDIV, addr);
                        return;
                }
-               printk(KERN_WARNING "Divide by zero exception " \
-                                                       "in kernel mode.\n");
+               pr_warn("Divide by zero exception in kernel mode.\n");
                die("Divide by zero exception", regs, SIGBUS);
                break;
        case MICROBLAZE_FPU_EXCEPTION:
@@ -151,8 +146,8 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
 #endif
        default:
        /* FIXME what to do in unexpected exception */
-               printk(KERN_WARNING "Unexpected exception %02x "
-                       "PC=%08x in %s mode\n", type, (unsigned int) addr,
+               pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
+                       type, (unsigned int) addr,
                        kernel_mode(regs) ? "kernel" : "user");
        }
        return;
index 357d56a..e8a5e9c 100644 (file)
@@ -35,18 +35,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
         * happen. This tool is too much intrusive to
         * ignore such a protection.
         */
-       asm volatile("  1:      lwi     %0, %2, 0;              \
-                       2:      swi     %3, %2, 0;              \
-                               addik   %1, r0, 0;              \
-                       3:                                      \
-                               .section .fixup, \"ax\";        \
-                       4:      brid    3b;                     \
-                               addik   %1, r0, 1;              \
-                               .previous;                      \
-                               .section __ex_table,\"a\";      \
-                               .word   1b,4b;                  \
-                               .word   2b,4b;                  \
-                               .previous;"                     \
+       asm volatile("  1:      lwi     %0, %2, 0;"             \
+                       "2:     swi     %3, %2, 0;"             \
+                       "       addik   %1, r0, 0;"             \
+                       "3:"                                    \
+                       "       .section .fixup, \"ax\";"       \
+                       "4:     brid    3b;"                    \
+                       "       addik   %1, r0, 1;"             \
+                       "       .previous;"                     \
+                       "       .section __ex_table,\"a\";"     \
+                       "       .word   1b,4b;"                 \
+                       "       .word   2b,4b;"                 \
+                       "       .previous;"                     \
                        : "=&r" (old), "=r" (faulted)
                        : "r" (parent), "r" (return_hooker)
        );
@@ -81,16 +81,16 @@ static int ftrace_modify_code(unsigned long addr, unsigned int value)
 {
        int faulted = 0;
 
-       __asm__ __volatile__("  1:      swi     %2, %1, 0;              \
-                                       addik   %0, r0, 0;              \
-                               2:                                      \
-                                       .section .fixup, \"ax\";        \
-                               3:      brid    2b;                     \
-                                       addik   %0, r0, 1;              \
-                                       .previous;                      \
-                                       .section __ex_table,\"a\";      \
-                                       .word   1b,3b;                  \
-                                       .previous;"                     \
+       __asm__ __volatile__("  1:      swi     %2, %1, 0;"             \
+                               "       addik   %0, r0, 0;"             \
+                               "2:"                                    \
+                               "       .section .fixup, \"ax\";"       \
+                               "3:     brid    2b;"                    \
+                               "       addik   %0, r0, 1;"             \
+                               "       .previous;"                     \
+                               "       .section __ex_table,\"a\";"     \
+                               "       .word   1b,3b;"                 \
+                               "       .previous;"                     \
                                : "=r" (faulted)
                                : "r" (addr), "r" (value)
        );
index 154756f..1879a05 100644 (file)
@@ -61,7 +61,7 @@ void setup_heartbeat(void)
        if (gpio) {
                base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
                base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
-               printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
+               pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
 
                /* GPIO is configured as output */
                prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
index 7a1a8d4..8778adf 100644 (file)
@@ -147,12 +147,12 @@ void __init init_IRQ(void)
        intr_mask =
                be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
        if (intr_mask > (u32)((1ULL << nr_irq) - 1))
-               printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
+               pr_info(" ERROR: Mismatch in kind-of-intr param\n");
 
 #ifdef CONFIG_SELFMOD_INTC
        selfmod_function((int *) arr_func, intc_baseaddr);
 #endif
-       printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
+       pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
                intc->name, intc_baseaddr, nr_irq, intr_mask);
 
        /*
index 09a5e82..8adc924 100644 (file)
@@ -141,7 +141,7 @@ void kgdb_arch_exit(void)
 /*
  * Global data
  */
-struct kgdb_arch arch_kgdb_ops = {
+const struct kgdb_arch arch_kgdb_ops = {
 #ifdef __MICROBLAZEEL__
        .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
 #else
index 2b25bcf..9f1d02c 100644 (file)
@@ -7,7 +7,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/string.h>
 #include <linux/cryptohash.h>
 #include <linux/delay.h>
index f39257a..182e6be 100644 (file)
@@ -7,7 +7,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/moduleloader.h>
 #include <linux/kernel.h>
 #include <linux/elf.h>
@@ -108,8 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
                        break;
 
                default:
-                       printk(KERN_ERR "module %s: "
-                               "Unknown relocation: %u\n",
+                       pr_err("module %s: Unknown relocation: %u\n",
                                module->name,
                                ELF32_R_TYPE(rela[i].r_info));
                        return -ENOEXEC;
index 08f8734..fa0ea60 100644 (file)
@@ -8,36 +8,36 @@
  * for more details.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/pm.h>
 #include <linux/tick.h>
 #include <linux/bitops.h>
 #include <linux/ptrace.h>
 #include <asm/pgalloc.h>
-#include <asm/uaccess.h> /* for USER_DS macros */
+#include <linux/uaccess.h> /* for USER_DS macros */
 #include <asm/cacheflush.h>
 
 void show_regs(struct pt_regs *regs)
 {
-       printk(KERN_INFO " Registers dump: mode=%X\r\n", regs->pt_mode);
-       printk(KERN_INFO " r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
+       pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
+       pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
                                regs->r1, regs->r2, regs->r3, regs->r4);
-       printk(KERN_INFO " r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
+       pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
                                regs->r5, regs->r6, regs->r7, regs->r8);
-       printk(KERN_INFO " r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
+       pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
                                regs->r9, regs->r10, regs->r11, regs->r12);
-       printk(KERN_INFO " r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
+       pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
                                regs->r13, regs->r14, regs->r15, regs->r16);
-       printk(KERN_INFO " r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
+       pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
                                regs->r17, regs->r18, regs->r19, regs->r20);
-       printk(KERN_INFO " r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
+       pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
                                regs->r21, regs->r22, regs->r23, regs->r24);
-       printk(KERN_INFO " r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
+       pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
                                regs->r25, regs->r26, regs->r27, regs->r28);
-       printk(KERN_INFO " r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
+       pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
                                regs->r29, regs->r30, regs->r31, regs->pc);
-       printk(KERN_INFO " msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
+       pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
                                regs->msr, regs->ear, regs->esr, regs->fsr);
 }
 
index a744e3f..0a2c68f 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <stdarg.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/init.h>
@@ -25,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/initrd.h>
 #include <linux/bitops.h>
-#include <linux/module.h>
 #include <linux/kexec.h>
 #include <linux/debugfs.h>
 #include <linux/irq.h>
index 47187cc..068762f 100644 (file)
@@ -1,8 +1,8 @@
 #undef DEBUG
 
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/etherdevice.h>
 #include <linux/of_address.h>
index b050219..39cf508 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
 #include <asm/syscall.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* Returns the address where the register at REG_OFFS in P is stashed away. */
 static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
index 954348f..0263da7 100644 (file)
@@ -150,33 +150,35 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
        /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
                                                        tlb1, kernel_tlb); */
 
-       printk("Ramdisk addr 0x%08x, ", ram);
+       pr_info("Ramdisk addr 0x%08x, ", ram);
        if (fdt)
-               printk("FDT at 0x%08x\n", fdt);
+               pr_info("FDT at 0x%08x\n", fdt);
        else
-               printk("Compiled-in FDT at 0x%08x\n",
+               pr_info("Compiled-in FDT at 0x%08x\n",
                                        (unsigned int)_fdt_start);
 
 #ifdef CONFIG_MTD_UCLINUX
-       printk("Found romfs @ 0x%08x (0x%08x)\n",
+       pr_info("Found romfs @ 0x%08x (0x%08x)\n",
                        romfs_base, romfs_size);
-       printk("#### klimit %p ####\n", old_klimit);
+       pr_info("#### klimit %p ####\n", old_klimit);
        BUG_ON(romfs_size < 0); /* What else can we do? */
 
-       printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+       pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
                        romfs_size, romfs_base, (unsigned)&__bss_stop);
 
-       printk("New klimit: 0x%08x\n", (unsigned)klimit);
+       pr_info("New klimit: 0x%08x\n", (unsigned)klimit);
 #endif
 
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-       if (msr)
-               printk("!!!Your kernel has setup MSR instruction but "
-                               "CPU don't have it %x\n", msr);
+       if (msr) {
+               pr_info("!!!Your kernel has setup MSR instruction but ");
+               pr_cont("CPU don't have it %x\n", msr);
+       }
 #else
-       if (!msr)
-               printk("!!!Your kernel not setup MSR instruction but "
-                               "CPU have it %x\n", msr);
+       if (!msr) {
+               pr_info("!!!Your kernel not setup MSR instruction but ");
+               pr_cont"CPU have it %x\n", msr);
+       }
 #endif
 
        /* Do not copy reset vectors. offset = 0x2 means skip the first
@@ -216,6 +218,8 @@ static int __init debugfs_tlb(void)
        d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
        if (!d)
                return -ENOMEM;
+
+       return 0;
 }
 device_initcall(debugfs_tlb);
 # endif
index 9f7a8bd..d26d7e7 100644 (file)
@@ -242,7 +242,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        set_fs(USER_DS);
 
 #ifdef DEBUG_SIG
-       printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
+       pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
                current->comm, current->pid, frame, regs->pc);
 #endif
 
@@ -317,8 +317,8 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
        int signr;
        struct k_sigaction ka;
 #ifdef DEBUG_SIG
-       printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
-       printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
+       pr_info("do signal: %p %d\n", regs, in_syscall);
+       pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
                        regs->r12, current_thread_info()->flags);
 #endif
 
index 84bc668..b4debe2 100644 (file)
@@ -9,11 +9,11 @@
  * for more details.
  */
 
+#include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 #include <linux/thread_info.h>
 #include <linux/ptrace.h>
-#include <linux/module.h>
 #include <asm/unwind.h>
 
 void save_stack_trace(struct stack_trace *trace)
index 63647c5..f905b3a 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/syscalls.h>
 #include <linux/sys.h>
 #include <linux/ipc.h>
 #include <linux/file.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/semaphore.h>
 #include <linux/uaccess.h>
 #include <linux/unistd.h>
 #include <linux/slab.h>
-
 #include <asm/syscalls.h>
 
 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
index 5541ac5..30e6b50 100644 (file)
@@ -8,9 +8,9 @@
  * for more details.
  */
 
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/kallsyms.h>
-#include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/debug_locks.h>
 
@@ -26,7 +26,7 @@ static unsigned long kstack_depth_to_print;   /* 0 == entire stack */
 
 static int __init kstack_setup(char *s)
 {
-       return !strict_strtoul(s, 0, &kstack_depth_to_print);
+       return !kstrtoul(s, 0, &kstack_depth_to_print);
 }
 __setup("kstack=", kstack_setup);
 
@@ -66,9 +66,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
        }
        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
                       words_to_show << 2, 0);
-       printk(KERN_INFO "\n\n");
-
-       pr_info("Call Trace:\n");
+       pr_info("\n\nCall Trace:\n");
        microblaze_unwind(task, NULL);
        pr_info("\n");
 
index 6be4ae3..1f7b8d4 100644 (file)
  */
 
 /* #define DEBUG 1 */
+#include <linux/export.h>
 #include <linux/kallsyms.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <asm/sections.h>
 #include <asm/exceptions.h>
index beb80f3..1af904c 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
@@ -25,5 +25,4 @@ long long __ashldi3(long long u, word_type b)
 
        return w.ll;
 }
-
 EXPORT_SYMBOL(__ashldi3);
index c884a91..32c334c 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
@@ -27,5 +27,4 @@ long long __ashrdi3(long long u, word_type b)
 
        return w.ll;
 }
-
 EXPORT_SYMBOL(__ashrdi3);
index a708400..67abc9a 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
index dcf8d68..adcb253 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
@@ -25,5 +25,4 @@ long long __lshrdi3(long long u, word_type b)
 
        return w.ll;
 }
-
 EXPORT_SYMBOL(__lshrdi3);
index fe9c53f..f536e81 100644 (file)
  * not any responsibility to update it.
  */
 
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
 
 #include <linux/string.h>
 
@@ -103,12 +103,12 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
                        }
 #else
                        /* Load the holding buffer */
-                       buf_hold = (*i_src++ & 0xFFFFFF00) >>8;
+                       buf_hold = (*i_src++ & 0xFFFFFF00) >> 8;
 
                        for (; c >= 4; c -= 4) {
                                value = *i_src++;
                                *i_dst++ = buf_hold | ((value & 0xFF) << 24);
-                               buf_hold = (value & 0xFFFFFF00) >>8;
+                               buf_hold = (value & 0xFFFFFF00) >> 8;
                        }
 #endif
                        /* Realign the source */
@@ -129,12 +129,12 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
                        }
 #else
                        /* Load the holding buffer */
-                       buf_hold = (*i_src++ & 0xFFFF0000 )>>16;
+                       buf_hold = (*i_src++ & 0xFFFF0000) >> 16;
 
                        for (; c >= 4; c -= 4) {
                                value = *i_src++;
-                               *i_dst++ = buf_hold | ((value & 0xFFFF)<<16);
-                               buf_hold = (value & 0xFFFF0000) >>16;
+                               *i_dst++ = buf_hold | ((value & 0xFFFF) << 16);
+                               buf_hold = (value & 0xFFFF0000) >> 16;
                        }
 #endif
                        /* Realign the source */
index 2146c37..3611ce7 100644 (file)
  * not any responsibility to update it.
  */
 
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
 #include <linux/string.h>
 
 #ifdef __HAVE_ARCH_MEMMOVE
@@ -129,7 +129,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
 
                        for (; c >= 4; c -= 4) {
                                value = *--i_src;
-                               *--i_dst = buf_hold | ((value & 0xFFFFFF00)>>8);
+                               *--i_dst = buf_hold |
+                                               ((value & 0xFFFFFF00) >> 8);
                                buf_hold = (value  & 0xFF) << 24;
                        }
 #endif
@@ -155,7 +156,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
 
                        for (; c >= 4; c -= 4) {
                                value = *--i_src;
-                               *--i_dst = buf_hold | ((value & 0xFFFF0000)>>16);
+                               *--i_dst = buf_hold |
+                                               ((value & 0xFFFF0000) >> 16);
                                buf_hold = (value & 0xFFFF) << 16;
                        }
 #endif
@@ -181,7 +183,8 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
 
                        for (; c >= 4; c -= 4) {
                                value = *--i_src;
-                               *--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
+                               *--i_dst = buf_hold |
+                                               ((value & 0xFF000000) >> 24);
                                buf_hold = (value & 0xFFFFFF) << 8;
                        }
 #endif
index ddf6793..04ea72c 100644 (file)
  * not any responsibility to update it.
  */
 
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/compiler.h>
-#include <linux/module.h>
 #include <linux/string.h>
 
 #ifdef __HAVE_ARCH_MEMSET
index d365924..a3f9a03 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
index f085995..0e8cc27 100644 (file)
@@ -38,15 +38,14 @@ __strncpy_user:
        addik   r3,r7,0         /* temp_count = len */
 1:
        lbu     r4,r6,r0
+       beqid   r4,2f
        sb      r4,r5,r0
 
-       addik   r3,r3,-1
-       beqi    r3,2f           /* break on len */
-
        addik   r5,r5,1
-       bneid   r4,1b
        addik   r6,r6,1         /* delay slot */
-       addik   r3,r3,1         /* undo "temp_count--" */
+
+       addik   r3,r3,-1
+       bnei    r3,1b           /* break on len */
 2:
        rsubk   r3,r3,r7        /* temp_count = len - temp_count */
 3:
index 63ca105..d05f158 100644 (file)
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
 
 #include "libgcc.h"
 
index a1e2e18..5226b09 100644 (file)
@@ -13,7 +13,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -37,7 +37,7 @@
 #include <asm/pgalloc.h>
 #include <linux/io.h>
 #include <linux/hardirq.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
 #include <asm/mmu.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -102,8 +102,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
 # endif
        if ((unsigned int)ret > cpuinfo.dcache_base &&
                                (unsigned int)ret < cpuinfo.dcache_high)
-               printk(KERN_WARNING
-                       "ERROR: Your cache coherent area is CACHED!!!\n");
+               pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
 
        /* dma_handle is same as physical (shadowed) address */
        *dma_handle = (dma_addr_t)ret;
index 714b35a..731f739 100644 (file)
@@ -32,7 +32,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
 #include <linux/uaccess.h>
 #include <asm/exceptions.h>
 
@@ -100,7 +100,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 
        /* On a kernel SLB miss we can only check for a valid exception entry */
        if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
-               printk(KERN_WARNING "kernel task_size exceed");
+               pr_warn("kernel task_size exceed");
                _exception(SIGSEGV, regs, code, address);
        }
 
@@ -114,9 +114,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
 
                /* in_atomic() in user mode is really bad,
                   as is current->mm == NULL. */
-               printk(KERN_EMERG "Page fault in user mode with "
-                      "in_atomic(), mm = %p\n", mm);
-               printk(KERN_EMERG "r15 = %lx  MSR = %lx\n",
+               pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
+                                                                       mm);
+               pr_emerg("r15 = %lx  MSR = %lx\n",
                       regs->r15, regs->msr);
                die("Weird page fault", regs, SIGSEGV);
        }
index 7d78838..5a92576 100644 (file)
@@ -20,8 +20,8 @@
  * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
  */
 
+#include <linux/export.h>
 #include <linux/highmem.h>
-#include <linux/module.h>
 
 /*
  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
index ce80823..8f8b367 100644 (file)
@@ -89,7 +89,7 @@ static unsigned long highmem_setup(void)
                reservedpages++;
        }
        totalram_pages += totalhigh_pages;
-       printk(KERN_INFO "High memory: %luk\n",
+       pr_info("High memory: %luk\n",
                                        totalhigh_pages << (PAGE_SHIFT-10));
 
        return reservedpages;
@@ -142,8 +142,8 @@ void __init setup_memory(void)
                        ((u32)_text <= (memory_start + lowmem_size - 1))) {
                        memory_size = lowmem_size;
                        PAGE_OFFSET = memory_start;
-                       printk(KERN_INFO "%s: Main mem: 0x%x, "
-                               "size 0x%08x\n", __func__, (u32) memory_start,
+                       pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
+                               __func__, (u32) memory_start,
                                        (u32) memory_size);
                        break;
                }
@@ -158,7 +158,7 @@ void __init setup_memory(void)
        kernel_align_start = PAGE_DOWN((u32)_text);
        /* ALIGN can be remove because _end in vmlinux.lds.S is align */
        kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
-       printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
+       pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
                __func__, kernel_align_start, kernel_align_start
                        + kernel_align_size, kernel_align_size);
        memblock_reserve(kernel_align_start, kernel_align_size);
@@ -181,10 +181,10 @@ void __init setup_memory(void)
        max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
        max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
 
-       printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
-       printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
-       printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
-       printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
+       pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
+       pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
+       pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
+       pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
 
        /*
         * Find an area to use for the bootmem bitmap.
@@ -246,7 +246,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
                free_page(addr);
                totalram_pages++;
        }
-       printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+       pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -260,7 +260,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
                totalram_pages++;
                pages++;
        }
-       printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
+       pr_notice("Freeing initrd memory: %dk freed\n",
                                        (int)(pages * (PAGE_SIZE / 1024)));
 }
 #endif
@@ -304,11 +304,11 @@ void __init mem_init(void)
        initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
        bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 
-       pr_info("Memory: %luk/%luk available (%luk kernel code, "
-               "%luk reserved, %luk data, %luk bss, %luk init)\n",
+       pr_info("Memory: %luk/%luk available (%luk kernel code, ",
                nr_free_pages() << (PAGE_SHIFT-10),
                num_physpages << (PAGE_SHIFT-10),
-               codesize >> 10,
+               codesize >> 10);
+       pr_cont("%luk reserved, %luk data, %luk bss, %luk init)\n",
                reservedpages << (PAGE_SHIFT-10),
                datasize >> 10,
                bsssize >> 10,
@@ -394,17 +394,17 @@ asmlinkage void __init mmu_init(void)
        unsigned int kstart, ksize;
 
        if (!memblock.reserved.cnt) {
-               printk(KERN_EMERG "Error memory count\n");
+               pr_emerg("Error memory count\n");
                machine_restart(NULL);
        }
 
        if ((u32) memblock.memory.regions[0].size < 0x400000) {
-               printk(KERN_EMERG "Memory must be greater than 4MB\n");
+               pr_emerg("Memory must be greater than 4MB\n");
                machine_restart(NULL);
        }
 
        if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
-               printk(KERN_EMERG "Kernel size is greater than memory node\n");
+               pr_emerg("Kernel size is greater than memory node\n");
                machine_restart(NULL);
        }
 
index d1c06d0..10b3bd0 100644 (file)
@@ -26,8 +26,8 @@
  *
  */
 
+#include <linux/export.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <linux/init.h>
@@ -39,8 +39,6 @@
 #include <asm/sections.h>
 #include <asm/fixmap.h>
 
-#define flush_HPTE(X, va, pg)  _tlbie(va)
-
 unsigned long ioremap_base;
 unsigned long ioremap_bot;
 EXPORT_SYMBOL(ioremap_bot);
@@ -75,9 +73,8 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
                p >= memory_start && p < virt_to_phys(high_memory) &&
                !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
                p < virt_to_phys((unsigned long)__bss_stop))) {
-               printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
-                       " is RAM lr %pf\n", (unsigned long)p,
-                       __builtin_return_address(0));
+               pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
+                       (unsigned long)p, __builtin_return_address(0));
                return NULL;
        }
 
@@ -128,9 +125,10 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap);
 
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
-       if (addr > high_memory && (unsigned long) addr < ioremap_bot)
+       if ((__force void *)addr > high_memory &&
+                                       (unsigned long) addr < ioremap_bot)
                vfree((void *) (PAGE_MASK & (unsigned long) addr));
 }
 EXPORT_SYMBOL(iounmap);
@@ -152,8 +150,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
                set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
                                __pgprot(flags)));
                if (unlikely(mem_init_done))
-                       flush_HPTE(0, va, pmd_val(*pd));
-                       /* flush_HPTE(0, va, pg); */
+                       _tlbie(va);
        }
        return err;
 }
index 4196eb6..ae4fca4 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
 
index b07abba..94149f5 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/pci.h>
 #include <linux/mm.h>
 #include <linux/export.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/pci-bridge.h>
 
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
index 9641655..9ea521e 100644 (file)
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
+#include <linux/pci.h>
 #include <linux/export.h>
 
 #include <asm/processor.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 
@@ -552,11 +553,10 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
                 */
                if ((offset + size) > hose->isa_mem_size) {
 #ifdef CONFIG_MMU
-                       printk(KERN_DEBUG
-                               "Process %s (pid:%d) mapped non-existing PCI"
-                               "legacy memory for 0%04x:%02x\n",
-                               current->comm, current->pid, pci_domain_nr(bus),
-                                                               bus->number);
+                       pr_debug("Process %s (pid:%d) mapped non-existing PCI",
+                               current->comm, current->pid);
+                       pr_debug("legacy memory for 0%04x:%02x\n",
+                               pci_domain_nr(bus), bus->number);
 #endif
                        if (vma->vm_flags & VM_SHARED)
                                return shmem_zero_setup(vma);
@@ -564,7 +564,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
                }
                offset += hose->isa_mem_phys;
        } else {
-               unsigned long io_offset = (unsigned long)hose->io_base_virt - \
+               unsigned long io_offset = (unsigned long)hose->io_base_virt -
                                                                _IO_BASE;
                unsigned long roffset = offset + io_offset;
                rp = &hose->io_resource;
@@ -668,7 +668,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
        unsigned long long isa_mb = 0;
        struct resource *res;
 
-       printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
+       pr_info("PCI host bridge %s %s ranges:\n",
               dev->full_name, primary ? "(primary)" : "");
 
        /* Get ranges property */
@@ -685,9 +685,10 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                cpu_addr = of_translate_address(dev, ranges + 3);
                size = of_read_number(ranges + pna + 3, 2);
 
-               pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
-                               "cpu_addr:0x%016llx size:0x%016llx\n",
-                                       pci_space, pci_addr, cpu_addr, size);
+               pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
+                               pci_space, pci_addr);
+               pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
+                                       cpu_addr, size);
 
                ranges += np;
 
@@ -716,14 +717,12 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                res = NULL;
                switch ((pci_space >> 24) & 0x3) {
                case 1:         /* PCI IO space */
-                       printk(KERN_INFO
-                              "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
+                       pr_info("  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
                               cpu_addr, cpu_addr + size - 1, pci_addr);
 
                        /* We support only one IO range */
                        if (hose->pci_io_size) {
-                               printk(KERN_INFO
-                                      " \\--> Skipped (too many) !\n");
+                               pr_info(" \\--> Skipped (too many) !\n");
                                continue;
                        }
                        /* On 32 bits, limit I/O space to 16MB */
@@ -750,15 +749,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        break;
                case 2:         /* PCI Memory space */
                case 3:         /* PCI 64 bits Memory space */
-                       printk(KERN_INFO
-                              " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
+                       pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
                               cpu_addr, cpu_addr + size - 1, pci_addr,
                               (pci_space & 0x40000000) ? "Prefetch" : "");
 
                        /* We support only 3 memory ranges */
                        if (memno >= 3) {
-                               printk(KERN_INFO
-                                      " \\--> Skipped (too many) !\n");
+                               pr_info(" \\--> Skipped (too many) !\n");
                                continue;
                        }
                        /* Handles ISA memory hole space here */
@@ -781,8 +778,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                hose->pci_mem_offset = cpu_addr - pci_addr;
                        else if (pci_addr != 0 &&
                                 hose->pci_mem_offset != cpu_addr - pci_addr) {
-                               printk(KERN_INFO
-                                      " \\--> Skipped (offset mismatch) !\n");
+                               pr_info(" \\--> Skipped (offset mismatch) !\n");
                                continue;
                        }
 
@@ -809,7 +805,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
         */
        if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
                unsigned int next = isa_hole + 1;
-               printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
+               pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
                if (next < memno)
                        memmove(&hose->mem_resources[isa_hole],
                                &hose->mem_resources[next],
@@ -833,7 +829,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
        int i;
 
        if (!hose) {
-               printk(KERN_ERR "No host bridge for PCI dev %s !\n",
+               pr_err("No host bridge for PCI dev %s !\n",
                       pci_name(dev));
                return;
        }
@@ -842,12 +838,12 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
                if (!res->flags)
                        continue;
                if (res->start == 0) {
-                       pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
-                                                       "is unassigned\n",
+                       pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
                                 pci_name(dev), i,
                                 (unsigned long long)res->start,
                                 (unsigned long long)res->end,
                                 (unsigned int)res->flags);
+                       pr_debug("is unassigned\n");
                        res->end -= res->start;
                        res->start = 0;
                        res->flags |= IORESOURCE_UNSET;
@@ -856,7 +852,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
 
                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
                         pci_name(dev), i,
-                        (unsigned long long)res->start,\
+                        (unsigned long long)res->start,
                         (unsigned long long)res->end,
                         (unsigned int)res->flags);
        }
@@ -947,7 +943,7 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
 
                pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
                         pci_name(dev), i,
-                        (unsigned long long)res->start,\
+                        (unsigned long long)res->start,
                         (unsigned long long)res->end,
                         (unsigned int)res->flags);
 
@@ -1154,12 +1150,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                        }
                }
 
-               pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
-                        "[0x%x], parent %p (%s)\n",
+               pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
                         bus->self ? pci_name(bus->self) : "PHB",
                         bus->number, i,
                         (unsigned long long)res->start,
-                        (unsigned long long)res->end,
+                        (unsigned long long)res->end);
+               pr_debug("[0x%x], parent %p (%s)\n",
                         (unsigned int)res->flags,
                         pr, (pr && pr->name) ? pr->name : "nil");
 
@@ -1174,9 +1170,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                        if (reparent_resources(pr, res) == 0)
                                continue;
                }
-               printk(KERN_WARNING "PCI: Cannot allocate resource region "
-                      "%d of PCI bridge %d, will remap\n", i, bus->number);
-
+               pr_warn("PCI: Cannot allocate resource region ");
+               pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
                res->start = res->end = 0;
                res->flags = 0;
        }
@@ -1198,8 +1193,8 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
        pr = pci_find_parent_resource(dev, r);
        if (!pr || (pr->flags & IORESOURCE_UNSET) ||
            request_resource(pr, r) < 0) {
-               printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
-                      " of device %s, will remap\n", idx, pci_name(dev));
+               pr_warn("PCI: Cannot allocate resource region %d ", idx);
+               pr_cont("of device %s, will remap\n", pci_name(dev));
                if (pr)
                        pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
                                 pr,
@@ -1282,8 +1277,7 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
        res->end = (offset + 0xfff) & 0xfffffffful;
        pr_debug("Candidate legacy IO: %pR\n", res);
        if (request_resource(&hose->io_resource, res)) {
-               printk(KERN_DEBUG
-                      "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
+               pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
                       pci_domain_nr(bus), bus->number, res);
                kfree(res);
        }
@@ -1311,8 +1305,7 @@ static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
        res->end = 0xbffff + offset;
        pr_debug("Candidate VGA memory: %pR\n", res);
        if (request_resource(pres, res)) {
-               printk(KERN_DEBUG
-                      "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
+               pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
                       pci_domain_nr(bus), bus->number, res);
                kfree(res);
        }
@@ -1362,10 +1355,9 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
                        if (r->parent || !r->start || !r->flags)
                                continue;
 
-                       pr_debug("PCI: Claiming %s: "
-                                "Resource %d: %016llx..%016llx [%x]\n",
-                                pci_name(dev), i,
-                                (unsigned long long)r->start,
+                       pr_debug("PCI: Claiming %s: ", pci_name(dev));
+                       pr_debug("Resource %d: %016llx..%016llx [%x]\n",
+                                i, (unsigned long long)r->start,
                                 (unsigned long long)r->end,
                                 (unsigned int)r->flags);
 
@@ -1423,9 +1415,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
        res->end = (res->end + io_offset) & 0xffffffffu;
 
        if (!res->flags) {
-               printk(KERN_WARNING "PCI: I/O resource not set for host"
-                      " bridge %s (domain %d)\n",
-                      hose->dn->full_name, hose->global_number);
+               pr_warn("PCI: I/O resource not set for host ");
+               pr_cont("bridge %s (domain %d)\n",
+                       hose->dn->full_name, hose->global_number);
                /* Workaround for lack of IO resource only on 32-bit */
                res->start = (unsigned long)hose->io_base_virt - isa_io_base;
                res->end = res->start + IO_SPACE_LIMIT;
@@ -1445,9 +1437,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
                if (!res->flags) {
                        if (i > 0)
                                continue;
-                       printk(KERN_ERR "PCI: Memory resource 0 not set for "
-                              "host bridge %s (domain %d)\n",
-                              hose->dn->full_name, hose->global_number);
+                       pr_err("PCI: Memory resource 0 not set for ");
+                       pr_cont("host bridge %s (domain %d)\n",
+                               hose->dn->full_name, hose->global_number);
 
                        /* Workaround for lack of MEM resource only on 32-bit */
                        res->start = hose->pci_mem_offset;
@@ -1489,7 +1481,7 @@ static void pcibios_scan_phb(struct pci_controller *hose)
        bus = pci_scan_root_bus(hose->parent, hose->first_busno,
                                hose->ops, hose, &resources);
        if (bus == NULL) {
-               printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+               pr_err("Failed to create bus for PCI domain %04x\n",
                       hose->global_number);
                pci_free_resource_list(&resources);
                return;
@@ -1505,7 +1497,7 @@ static int __init pcibios_init(void)
        struct pci_controller *hose, *tmp;
        int next_busno = 0;
 
-       printk(KERN_INFO "PCI: Probing PCI hardware\n");
+       pr_info("PCI: Probing PCI hardware\n");
 
        /* Scan all of the recorded PCI controllers.  */
        list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
@@ -1605,7 +1597,7 @@ fake_pci_bus(struct pci_controller *hose, int busnr)
        static struct pci_bus bus;
 
        if (!hose)
-               printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
+               pr_err("Can't find hose for PCI bus %d!\n", busnr);
 
        bus.number = busnr;
        bus.sysdata = hose;
index 0687a42..14c7da5 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #define XPLB_PCI_ADDR 0x10c
 #define XPLB_PCI_DATA 0x110
@@ -82,7 +82,7 @@ xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
  *
  * List pci devices in very early phase.
  */
-void __init xilinx_early_pci_scan(struct pci_controller *hose)
+static void __init xilinx_early_pci_scan(struct pci_controller *hose)
 {
        u32 bus = 0;
        u32 val, dev, func, offset;
@@ -91,27 +91,27 @@ void __init xilinx_early_pci_scan(struct pci_controller *hose)
        for (dev = 0; dev < 2; dev++) {
                /* List only first function number - up-to 8 functions */
                for (func = 0; func < 1; func++) {
-                       printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
+                       pr_info("%02x:%02x:%02x", bus, dev, func);
                        /* read the first 64 standardized bytes */
                        /* Up-to 192 bytes can be list of capabilities */
                        for (offset = 0; offset < 64; offset += 4) {
                                early_read_config_dword(hose, bus,
                                        PCI_DEVFN(dev, func), offset, &val);
                                if (offset == 0 && val == 0xFFFFFFFF) {
-                                       printk(KERN_CONT "\nABSENT");
+                                       pr_cont("\nABSENT");
                                        break;
                                }
                                if (!(offset % 0x10))
-                                       printk(KERN_CONT "\n%04x:    ", offset);
+                                       pr_cont("\n%04x:    ", offset);
 
-                               printk(KERN_CONT "%08x  ", val);
+                               pr_cont("%08x  ", val);
                        }
-                       printk(KERN_INFO "\n");
+                       pr_info("\n");
                }
        }
 }
 #else
-void __init xilinx_early_pci_scan(struct pci_controller *hose)
+static void __init xilinx_early_pci_scan(struct pci_controller *hose)
 {
 }
 #endif
index 91b9d69..4b597d9 100644 (file)
@@ -18,10 +18,10 @@ platforms += loongson1
 platforms += mti-malta
 platforms += mti-sead3
 platforms += netlogic
-platforms += pmc-sierra
+platforms += pmcs-msp71xx
 platforms += pnx833x
-platforms += pnx8550
 platforms += powertv
+platforms += ralink
 platforms += rb532
 platforms += sgi-ip22
 platforms += sgi-ip27
index 1986415..ae9c716 100644 (file)
@@ -38,6 +38,7 @@ config MIPS
        select GENERIC_CLOCKEVENTS
        select GENERIC_CMOS_UPDATE
        select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_VIRT_TO_BUS
        select MODULES_USE_ELF_REL if MODULES
        select MODULES_USE_ELF_RELA if MODULES && 64BIT
        select CLONE_BACKWARDS
@@ -107,12 +108,14 @@ config ATH79
 config BCM47XX
        bool "Broadcom BCM47XX based boards"
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
        select DMA_NONCOHERENT
        select FW_CFE
        select HW_HAS_PCI
        select IRQ_CPU
+       select NO_EXCEPT_FILL
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
@@ -294,6 +297,7 @@ config MIPS_MALTA
        select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
+       select CSRC_GIC
        select DMA_NONCOHERENT
        select GENERIC_ISA_DMA
        select HAVE_PCSPKR_PLATFORM
@@ -353,6 +357,7 @@ config MIPS_SEAD3
        select USB_ARCH_HAS_EHCI
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
+       select USE_OF
        help
          This enables support for the MIPS Technologies SEAD3 evaluation
          board.
@@ -384,16 +389,6 @@ config NXP_STB225
        help
         Support for NXP Semiconductors STB225 Development Board.
 
-config PNX8550_JBS
-       bool "NXP PNX8550 based JBS board"
-       select PNX8550
-       select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config PNX8550_STB810
-       bool "NXP PNX8550 based STB810 board"
-       select PNX8550
-       select SYS_SUPPORTS_LITTLE_ENDIAN
-
 config PMC_MSP
        bool "PMC-Sierra MSP chipsets"
        select CEVT_R4K
@@ -433,6 +428,22 @@ config POWERTV
        help
          This enables support for the Cisco PowerTV Platform.
 
+config RALINK
+       bool "Ralink based machines"
+       select CEVT_R4K
+       select CSRC_R4K
+       select BOOT_RAW
+       select DMA_NONCOHERENT
+       select IRQ_CPU
+       select USE_OF
+       select SYS_HAS_CPU_MIPS32_R1
+       select SYS_HAS_CPU_MIPS32_R2
+       select SYS_SUPPORTS_32BIT_KERNEL
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+       select SYS_HAS_EARLY_PRINTK
+       select HAVE_MACH_CLKDEV
+       select CLKDEV_LOOKUP
+
 config SGI_IP22
        bool "SGI IP22 (Indy/Indigo2)"
        select FW_ARC
@@ -834,8 +845,9 @@ source "arch/mips/jazz/Kconfig"
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
-source "arch/mips/pmc-sierra/Kconfig"
+source "arch/mips/pmcs-msp71xx/Kconfig"
 source "arch/mips/powertv/Kconfig"
+source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
 source "arch/mips/sibyte/Kconfig"
 source "arch/mips/txx9/Kconfig"
@@ -916,6 +928,9 @@ config CSRC_POWERTV
 config CSRC_R4K
        bool
 
+config CSRC_GIC
+       bool
+
 config CSRC_SB1250
        bool
 
@@ -1102,19 +1117,6 @@ config SOC_PNX8335
        bool
        select SOC_PNX833X
 
-config PNX8550
-       bool
-       select SOC_PNX8550
-
-config SOC_PNX8550
-       bool
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select SYS_HAS_CPU_MIPS32_R1
-       select SYS_HAS_EARLY_PRINTK
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select GENERIC_GPIO
-
 config SWAP_IO_SPACE
        bool
 
index f2dfd40..6f7978f 100644 (file)
@@ -191,7 +191,7 @@ endif
 include $(srctree)/arch/mips/Kbuild.platforms
 
 ifdef CONFIG_PHYSICAL_START
-load-y                                  = $(CONFIG_PHYSICAL_START)
+load-y                                 = $(CONFIG_PHYSICAL_START)
 endif
 
 cflags-y                       += -I$(srctree)/arch/mips/include/asm/mach-generic
index 942c580..fa1bdd1 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Core Alchemy code
 #
-platform-$(CONFIG_MIPS_ALCHEMY)        += alchemy/common/
+platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
 
 
 #
@@ -45,7 +45,7 @@ load-$(CONFIG_MIPS_MTX1)      += 0xffffffff80100000
 #
 # MyCable eval board
 #
-platform-$(CONFIG_MIPS_XXS1500)        += alchemy/
+platform-$(CONFIG_MIPS_XXS1500) += alchemy/
 load-$(CONFIG_MIPS_XXS1500)    += 0xffffffff80100000
 
 #
@@ -56,7 +56,7 @@ load-$(CONFIG_MIPS_GPR)               += 0xffffffff80100000
 
 # boards can specify their own <gpio.h> in one of their include dirs.
 # If they do, placing this line here at the end will make sure the
-# compiler picks the board one.  If they don't, it will make sure
+# compiler picks the board one.         If they don't, it will make sure
 # the alchemy generic gpio header is picked up.
 
 cflags-$(CONFIG_MIPS_ALCHEMY)  += -I$(srctree)/arch/mips/include/asm/mach-au1x00
index ba32590..cb0f6af 100644 (file)
@@ -135,33 +135,33 @@ static struct mtd_partition gpr_mtd_partitions[] = {
        {
                .name   = "kernel",
                .size   = 0x00200000,
-               .offset = 0,
+               .offset = 0,
        },
        {
                .name   = "rootfs",
                .size   = 0x00800000,
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
                .mask_flags = MTD_WRITEABLE,
        },
        {
                .name   = "config",
                .size   = 0x00200000,
-               .offset = 0x01d00000,
+               .offset = 0x01d00000,
        },
        {
                .name   = "yamon",
                .size   = 0x00100000,
-               .offset = 0x01c00000,
+               .offset = 0x01c00000,
        },
        {
                .name   = "yamon env vars",
                .size   = 0x00040000,
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
        },
        {
                .name   = "kernel+rootfs",
                .size   = 0x00a00000,
-               .offset = 0,
+               .offset = 0,
        },
 };
 
index a124c25..4a9baa9 100644 (file)
@@ -173,23 +173,23 @@ static struct mtd_partition mtx1_mtd_partitions[] = {
        {
                .name   = "filesystem",
                .size   = 0x01C00000,
-               .offset = 0,
+               .offset = 0,
        },
        {
                .name   = "yamon",
                .size   = 0x00100000,
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
                .mask_flags = MTD_WRITEABLE,
        },
        {
                .name   = "kernel",
                .size   = 0x002c0000,
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
        },
        {
                .name   = "yamon env",
                .size   = 0x00040000,
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
        },
 };
 
index cf02d7d..19d5642 100644 (file)
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(au1xxx_ddma_del_device);
 u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
        void (*callback)(int, void *), void *callparam)
 {
-       unsigned long   flags;
+       unsigned long   flags;
        u32             used, chan;
        u32             dcp;
        int             i;
@@ -512,7 +512,7 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
                break;
        }
 
-       /* If source input is FIFO, set static address. */
+       /* If source input is FIFO, set static address. */
        if (stp->dev_flags & DEV_FLAGS_IN) {
                if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
                        src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
@@ -635,7 +635,7 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
        ctp->chan_ptr->ddma_dbell = 0;
 
-       /* Get next descriptor pointer. */
+       /* Get next descriptor pointer. */
        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 
        /* Return something non-zero. */
@@ -697,7 +697,7 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
        ctp->chan_ptr->ddma_dbell = 0;
 
-       /* Get next descriptor pointer. */
+       /* Get next descriptor pointer. */
        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 
        /* Return something non-zero. */
@@ -742,7 +742,7 @@ u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
        *nbytes = dp->dscr_cmd1;
        rv = dp->dscr_stat;
 
-       /* Get next descriptor pointer. */
+       /* Get next descriptor pointer. */
        ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 
        /* Return something non-zero. */
@@ -891,7 +891,7 @@ void au1xxx_dbdma_dump(u32 chanid)
        chan_tab_t       *ctp;
        au1x_ddma_desc_t *dp;
        dbdev_tab_t      *stp, *dtp;
-       au1x_dma_chan_t  *cp;
+       au1x_dma_chan_t  *cp;
        u32 i            = 0;
 
        ctp = *((chan_tab_t **)chanid);
@@ -969,7 +969,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
        dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
        ctp->chan_ptr->ddma_dbell = 0;
 
-       /* Get next descriptor pointer. */
+       /* Get next descriptor pointer. */
        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 
        /* Return something non-zero. */
index f1b50f0..f9bc4f5 100644 (file)
@@ -106,14 +106,14 @@ struct gpio_chip alchemy_gpio_chip[] = {
                .ngpio                  = ALCHEMY_GPIO1_NUM,
        },
        [1] = {
-               .label                  = "alchemy-gpio2",
-               .direction_input        = gpio2_direction_input,
-               .direction_output       = gpio2_direction_output,
-               .get                    = gpio2_get,
-               .set                    = gpio2_set,
+               .label                  = "alchemy-gpio2",
+               .direction_input        = gpio2_direction_input,
+               .direction_output       = gpio2_direction_output,
+               .get                    = gpio2_get,
+               .set                    = gpio2_set,
                .to_irq                 = gpio2_to_irq,
-               .base                   = ALCHEMY_GPIO2_BASE,
-               .ngpio                  = ALCHEMY_GPIO2_NUM,
+               .base                   = ALCHEMY_GPIO2_BASE,
+               .ngpio                  = ALCHEMY_GPIO2_NUM,
        },
 };
 
index 94fbcd1..63a7181 100644 (file)
@@ -84,20 +84,20 @@ static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
  * needs the highest priority.
  */
 struct alchemy_irqmap au1000_irqmap[] __initdata = {
-       { AU1000_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_UART2_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_SSI0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_SSI1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_UART2_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_SSI0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_SSI1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1000_TOY_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1000_TOY_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1000_TOY_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -106,33 +106,33 @@ struct alchemy_irqmap au1000_irqmap[] __initdata = {
        { AU1000_RTC_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1000_RTC_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1000_RTC_MATCH2_INT,  IRQ_TYPE_EDGE_RISING, 0, 0 },
-       { AU1000_IRDA_TX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_IRDA_RX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
+       { AU1000_IRDA_TX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_IRDA_RX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
        { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1000_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1000_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
        { AU1000_ACSYNC_INT,      IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1000_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1000_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1000_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1000_AC97C_INT,       IRQ_TYPE_EDGE_RISING, 1, 0 },
        { -1, },
 };
 
 struct alchemy_irqmap au1500_irqmap[] __initdata = {
-       { AU1500_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_PCI_INTA,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1500_PCI_INTB,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1500_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_PCI_INTC,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1500_PCI_INTD,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1500_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_PCI_INTA,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1500_PCI_INTB,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1500_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_PCI_INTC,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1500_PCI_INTD,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1500_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1500_TOY_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1500_TOY_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1500_TOY_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -141,31 +141,31 @@ struct alchemy_irqmap au1500_irqmap[] __initdata = {
        { AU1500_RTC_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1500_RTC_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1500_RTC_MATCH2_INT,  IRQ_TYPE_EDGE_RISING, 0, 0 },
-       { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
+       { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
        { AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1500_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1500_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
        { AU1500_ACSYNC_INT,      IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1500_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1500_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1500_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1500_AC97C_INT,       IRQ_TYPE_EDGE_RISING, 1, 0 },
        { -1, },
 };
 
 struct alchemy_irqmap au1100_irqmap[] __initdata = {
-       { AU1100_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_SD_INT,          IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_SSI0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_SSI1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_SD_INT,          IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_SSI0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_SSI1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+1,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+2,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+3,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+4,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+5,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+6,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_DMA_INT_BASE+7,  IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1100_TOY_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1100_TOY_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1100_TOY_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -174,33 +174,33 @@ struct alchemy_irqmap au1100_irqmap[] __initdata = {
        { AU1100_RTC_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1100_RTC_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1100_RTC_MATCH2_INT,  IRQ_TYPE_EDGE_RISING, 0, 0 },
-       { AU1100_IRDA_TX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_IRDA_RX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
+       { AU1100_IRDA_TX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_IRDA_RX_INT,     IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
        { AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1100_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1100_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
        { AU1100_ACSYNC_INT,      IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1100_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1100_LCD_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1100_LCD_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1100_AC97C_INT,       IRQ_TYPE_EDGE_RISING, 1, 0 },
        { -1, },
 };
 
 struct alchemy_irqmap au1550_irqmap[] __initdata = {
-       { AU1550_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PCI_INTA,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_PCI_INTB,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_DDMA_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_CRYPTO_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PCI_INTC,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_PCI_INTD,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_PCI_RST_INT,     IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PSC0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PSC1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PSC2_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_PSC3_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PCI_INTA,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_PCI_INTB,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_DDMA_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_CRYPTO_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PCI_INTC,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_PCI_INTD,        IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_PCI_RST_INT,     IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_UART3_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PSC0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PSC1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PSC2_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_PSC3_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1550_TOY_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1550_TOY_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1550_TOY_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -210,26 +210,26 @@ struct alchemy_irqmap au1550_irqmap[] __initdata = {
        { AU1550_RTC_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1550_RTC_MATCH2_INT,  IRQ_TYPE_EDGE_RISING, 0, 0 },
        { AU1550_NAND_INT,        IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
+       { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH,  0, 0 },
        { AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1550_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
-       { AU1550_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1550_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_USB_HOST_INT,    IRQ_TYPE_LEVEL_LOW,   1, 0 },
+       { AU1550_MAC0_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1550_MAC1_DMA_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { -1, },
 };
 
 struct alchemy_irqmap au1200_irqmap[] __initdata = {
-       { AU1200_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_UART0_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1200_SWT_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1200_SD_INT,          IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_DDMA_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_MAE_BE_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_MAE_FE_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_PSC0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_PSC1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_AES_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_CAMERA_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_SD_INT,          IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_DDMA_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_MAE_BE_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_UART1_INT,       IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_MAE_FE_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_PSC0_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_PSC1_INT,        IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_AES_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_CAMERA_INT,      IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { AU1200_TOY_INT,         IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1200_TOY_MATCH0_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1200_TOY_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -239,9 +239,9 @@ struct alchemy_irqmap au1200_irqmap[] __initdata = {
        { AU1200_RTC_MATCH1_INT,  IRQ_TYPE_EDGE_RISING, 1, 0 },
        { AU1200_RTC_MATCH2_INT,  IRQ_TYPE_EDGE_RISING, 0, 0 },
        { AU1200_NAND_INT,        IRQ_TYPE_EDGE_RISING, 1, 0 },
-       { AU1200_USB_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_LCD_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
-       { AU1200_MAE_BOTH_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_USB_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_LCD_INT,         IRQ_TYPE_LEVEL_HIGH,  1, 0 },
+       { AU1200_MAE_BOTH_INT,    IRQ_TYPE_LEVEL_HIGH,  1, 0 },
        { -1, },
 };
 
index 7af941d..9837a13 100644 (file)
@@ -53,7 +53,7 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
                .irq            = _irq,                         \
                .regshift       = 2,                            \
                .iotype         = UPIO_AU,                      \
-               .flags          = UPF_SKIP_TEST | UPF_IOREMAP | \
+               .flags          = UPF_SKIP_TEST | UPF_IOREMAP | \
                                  UPF_FIXED_TYPE,               \
                .type           = PORT_16550A,                  \
                .pm             = alchemy_8250_pm,              \
@@ -137,7 +137,7 @@ static void alchemy_ehci_power_off(struct platform_device *pdev)
 }
 
 static struct usb_ehci_pdata alchemy_ehci_pdata = {
-       .no_io_watchdog = 1,
+       .no_io_watchdog = 1,
        .power_on       = alchemy_ehci_power_on,
        .power_off      = alchemy_ehci_power_off,
        .power_suspend  = alchemy_ehci_power_off,
index 37ffd99..62b4e7b 100644 (file)
@@ -59,7 +59,7 @@ void __init plat_mem_setup(void)
                /* Clear to obtain best system bus performance */
                clear_c0_config(1 << 19); /* Clear Config[OD] */
 
-       board_setup();  /* board specific setup */
+       board_setup();  /* board specific setup */
 
        /* IO/MEM resources. */
        set_io_port_base(0);
index c7bcc7e..706d933 100644 (file)
@@ -102,12 +102,12 @@ LEAF(alchemy_sleep_au1000)
        cache   0x14, 96(t0)
        .set    mips0
 
-1:     lui     a0, 0xb400              /* mem_xxx */
-       sw      zero, 0x001c(a0)        /* Precharge */
+1:     lui     a0, 0xb400              /* mem_xxx */
+       sw      zero, 0x001c(a0)        /* Precharge */
        sync
        sw      zero, 0x0020(a0)        /* Auto Refresh */
        sync
-       sw      zero, 0x0030(a0)        /* Sleep */
+       sw      zero, 0x0030(a0)        /* Sleep */
        sync
 
        DO_SLEEP
@@ -128,15 +128,15 @@ LEAF(alchemy_sleep_au1550)
        cache   0x14, 96(t0)
        .set    mips0
 
-1:     lui     a0, 0xb400              /* mem_xxx */
-       sw      zero, 0x08c0(a0)        /* Precharge */
+1:     lui     a0, 0xb400              /* mem_xxx */
+       sw      zero, 0x08c0(a0)        /* Precharge */
        sync
        sw      zero, 0x08d0(a0)        /* Self Refresh */
        sync
 
        /* wait for sdram to enter self-refresh mode */
-       lui     t0, 0x0100
-2:     lw      t1, 0x0850(a0)          /* mem_sdstat */
+       lui     t0, 0x0100
+2:     lw      t1, 0x0850(a0)          /* mem_sdstat */
        and     t2, t1, t0
        beq     t2, zero, 2b
         nop
@@ -144,9 +144,9 @@ LEAF(alchemy_sleep_au1550)
        /* disable SDRAM clocks */
        lui     t0, 0xcfff
        ori     t0, t0, 0xffff
-       lw      t1, 0x0840(a0)          /* mem_sdconfiga */
-       and     t1, t0, t1              /* clear CE[1:0] */
-       sw      t1, 0x0840(a0)          /* mem_sdconfiga */
+       lw      t1, 0x0840(a0)          /* mem_sdconfiga */
+       and     t1, t0, t1              /* clear CE[1:0] */
+       sw      t1, 0x0840(a0)          /* mem_sdconfiga */
        sync
 
        DO_SLEEP
index b67930d..38afb11 100644 (file)
@@ -85,7 +85,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
        .name           = "rtcmatch2",
        .features       = CLOCK_EVT_FEAT_ONESHOT,
        .rating         = 1500,
-       .set_next_event = au1x_rtcmatch2_set_next_event,
+       .set_next_event = au1x_rtcmatch2_set_next_event,
        .set_mode       = au1x_rtcmatch2_set_mode,
        .cpumask        = cpu_all_mask,
 };
index 936af83..fcc6956 100644 (file)
@@ -122,7 +122,7 @@ static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
        unsigned long r;
 
        if (enable) {
-               __raw_writel(1, base + USB_DWC_CTRL7);  /* start OHCI clock */
+               __raw_writel(1, base + USB_DWC_CTRL7);  /* start OHCI clock */
                wmb();
 
                r = __raw_readl(base + USB_DWC_CTRL3);  /* enable OHCI block */
index f2039ef..c98c9ea 100644 (file)
@@ -20,7 +20,7 @@ static struct bcsr_reg {
        spinlock_t lock;
 } bcsr_regs[BCSR_CNT];
 
-static void __iomem *bcsr_virt;        /* KSEG1 addr of BCSR base */
+static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
 static int bcsr_csc_base;      /* linux-irq of first cascaded irq */
 
 void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
index 8187845..11f3ad2 100644 (file)
@@ -276,7 +276,7 @@ static void db1100_mmcled_set(struct led_classdev *led, enum led_brightness b)
 }
 
 static struct led_classdev db1100_mmc_led = {
-       .brightness_set = db1100_mmcled_set,
+       .brightness_set = db1100_mmcled_set,
 };
 
 static int db1100_mmc1_card_readonly(void *mmc_host)
@@ -314,7 +314,7 @@ static void db1100_mmc1led_set(struct led_classdev *led, enum led_brightness b)
 }
 
 static struct led_classdev db1100_mmc1_led = {
-       .brightness_set = db1100_mmc1led_set,
+       .brightness_set = db1100_mmc1led_set,
 };
 
 static struct au1xmmc_platform_data db1100_mmc_platdata[2] = {
@@ -357,7 +357,7 @@ static struct resource au1100_mmc0_resources[] = {
        }
 };
 
-static u64 au1xxx_mmc_dmamask =  DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask =         DMA_BIT_MASK(32);
 
 static struct platform_device db1100_mmc0_dev = {
        .name           = "au1xxx-mmc",
@@ -482,7 +482,7 @@ static struct spi_board_info db1100_spi_info[] __initdata = {
                .mode            = 0,
                .irq             = AU1100_GPIO21_INT,
                .platform_data   = &db1100_touch_pd,
-               .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
+               .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
        },
 };
 
@@ -572,7 +572,7 @@ static int __init db1000_dev_init(void)
                irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_LOW);
                irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
                /* EPSON S1D13806 0x1b000000
-                * SRAM 1MB/2MB   0x1a000000
+                * SRAM 1MB/2MB   0x1a000000
                 * DS1693 RTC     0x0c000000
                 */
        } else if (board == BCSR_WHOAMI_PB1100) {
@@ -586,7 +586,7 @@ static int __init db1000_dev_init(void)
                irq_set_irq_type(AU1100_GPIO12_INT, IRQ_TYPE_LEVEL_LOW);
                irq_set_irq_type(AU1100_GPIO13_INT, IRQ_TYPE_LEVEL_LOW);
                /* EPSON S1D13806 0x1b000000
-                * SRAM 1MB/2MB   0x1a000000
+                * SRAM 1MB/2MB   0x1a000000
                 * DiskOnChip     0x0d000000
                 * DS1693 RTC     0x0c000000
                 */
@@ -605,7 +605,7 @@ static int __init db1000_dev_init(void)
                AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
                AU1000_PCMCIA_IO_PHYS_ADDR,
                AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
-               c0, d0, /*s0*/0, 0, 0);
+               c0, d0, /*s0*/0, 0, 0);
 
        if (twosocks) {
                irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
@@ -619,7 +619,7 @@ static int __init db1000_dev_init(void)
                        AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
                        AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
                        AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
-                       c1, d1, /*s1*/0, 0, 1);
+                       c1, d1, /*s1*/0, 0, 1);
        }
 
        platform_add_devices(db1x00_devs, ARRAY_SIZE(db1x00_devs));
index 299b7d2..a84d98b 100644 (file)
@@ -90,14 +90,14 @@ int __init db1200_board_setup(void)
 
        whoami = bcsr_read(BCSR_WHOAMI);
        printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
-               "  Board-ID %d  Daughtercard ID %d\n", get_system_type(),
+               "  Board-ID %d  Daughtercard ID %d\n", get_system_type(),
                (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
 
        /* SMBus/SPI on PSC0, Audio on PSC1 */
        pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
        pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
        pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
-       pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
+       pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
        __raw_writel(pfc, (void __iomem *)SYS_PINFUNC);
        wmb();
 
@@ -129,7 +129,7 @@ int __init db1200_board_setup(void)
 static struct mtd_partition db1200_spiflash_parts[] = {
        {
                .name   = "spi_flash",
-               .offset = 0,
+               .offset = 0,
                .size   = MTDPART_SIZ_FULL,
        },
 };
@@ -200,12 +200,12 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
 static struct mtd_partition db1200_nand_parts[] = {
        {
                .name   = "NAND FS 0",
-               .offset = 0,
+               .offset = 0,
                .size   = 8 * 1024 * 1024,
        },
        {
                .name   = "NAND FS 1",
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
                .size   = MTDPART_SIZ_FULL
        },
 };
@@ -395,7 +395,7 @@ static void db1200_mmcled_set(struct led_classdev *led,
 }
 
 static struct led_classdev db1200_mmc_led = {
-       .brightness_set = db1200_mmcled_set,
+       .brightness_set = db1200_mmcled_set,
 };
 
 /* -- */
@@ -463,7 +463,7 @@ static void pb1200_mmc1led_set(struct led_classdev *led,
 }
 
 static struct led_classdev pb1200_mmc1_led = {
-       .brightness_set = pb1200_mmc1led_set,
+       .brightness_set = pb1200_mmc1led_set,
 };
 
 static void pb1200_mmc1_set_power(void *mmc_host, int state)
@@ -526,7 +526,7 @@ static struct resource au1200_mmc0_resources[] = {
        }
 };
 
-static u64 au1xxx_mmc_dmamask =  DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask =         DMA_BIT_MASK(32);
 
 static struct platform_device db1200_mmc0_dev = {
        .name           = "au1xxx-mmc",
@@ -601,7 +601,7 @@ static int db1200fb_panel_shutdown(void)
 static struct au1200fb_platdata db1200fb_pd = {
        .panel_index    = db1200fb_panel_index,
        .panel_init     = db1200fb_panel_init,
-       .panel_shutdown = db1200fb_panel_shutdown,
+       .panel_shutdown = db1200fb_panel_shutdown,
 };
 
 static struct resource au1200_lcd_res[] = {
@@ -772,11 +772,11 @@ static int __init pb1200_res_fixup(void)
        }
 
        db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR;
-       db1200_nand_res[0].end   = PB1200_NAND_PHYS_ADDR + 0xff;
+       db1200_nand_res[0].end   = PB1200_NAND_PHYS_ADDR + 0xff;
        db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR;
-       db1200_ide_res[0].end   = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
+       db1200_ide_res[0].end   = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
        db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR;
-       db1200_eth_res[0].end   = PB1200_ETH_PHYS_ADDR + 0xff;
+       db1200_eth_res[0].end   = PB1200_ETH_PHYS_ADDR + 0xff;
        return 0;
 }
 
@@ -797,7 +797,7 @@ int __init db1200_dev_setup(void)
        irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW);
        bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
 
-       /* insert/eject pairs: one of both is always screaming.  To avoid
+       /* insert/eject pairs: one of both is always screaming.  To avoid
         * issues they must not be automatically enabled when initially
         * requested.
         */
@@ -813,7 +813,7 @@ int __init db1200_dev_setup(void)
        spi_register_board_info(db1200_spi_devs,
                                ARRAY_SIZE(db1200_i2c_devs));
 
-       /* SWITCHES:    S6.8 I2C/SPI selector  (OFF=I2C  ON=SPI)
+       /* SWITCHES:    S6.8 I2C/SPI selector  (OFF=I2C  ON=SPI)
         *              S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
         *              or S12 on the PB1200.
         */
index cdf37cb..6167e73 100644 (file)
@@ -80,7 +80,7 @@ static int db1300_dev_pins[] __initdata = {
        AU1300_PIN_PSC0D1,
        AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
        AU1300_PIN_PSC1D1,
-       AU1300_PIN_PSC2SYNC0,                       AU1300_PIN_PSC2D0,
+       AU1300_PIN_PSC2SYNC0,                       AU1300_PIN_PSC2D0,
        AU1300_PIN_PSC2D1,
        AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
        AU1300_PIN_PSC3D1,
@@ -143,12 +143,12 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
 static struct mtd_partition db1300_nand_parts[] = {
        {
                .name   = "NAND FS 0",
-               .offset = 0,
+               .offset = 0,
                .size   = 8 * 1024 * 1024,
        },
        {
                .name   = "NAND FS 1",
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
                .size   = MTDPART_SIZ_FULL
        },
 };
@@ -487,7 +487,7 @@ static void db1300_mmcled_set(struct led_classdev *led,
 }
 
 static struct led_classdev db1300_mmc_led = {
-       .brightness_set = db1300_mmcled_set,
+       .brightness_set = db1300_mmcled_set,
 };
 
 struct au1xmmc_platform_data db1300_sd1_platdata = {
@@ -646,7 +646,7 @@ static int db1300fb_panel_shutdown(void)
 static struct au1200fb_platdata db1300fb_pd = {
        .panel_index    = db1300fb_panel_index,
        .panel_init     = db1300fb_panel_init,
-       .panel_shutdown = db1300fb_panel_shutdown,
+       .panel_shutdown = db1300fb_panel_shutdown,
 };
 
 static struct resource au1300_lcd_res[] = {
index 5a9ae60..016cdda 100644 (file)
@@ -67,7 +67,7 @@ int __init db1550_board_setup(void)
                bcsr_init(PB1550_BCSR_PHYS_ADDR,
                          PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
 
-       pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d  "       \
+       pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d  "       \
                "Daughtercard ID %d\n", get_system_type(),
                (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
 
@@ -80,7 +80,7 @@ int __init db1550_board_setup(void)
 static struct mtd_partition db1550_spiflash_parts[] = {
        {
                .name   = "spi_flash",
-               .offset = 0,
+               .offset = 0,
                .size   = MTDPART_SIZ_FULL,
        },
 };
@@ -151,12 +151,12 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
 static struct mtd_partition db1550_nand_parts[] = {
        {
                .name   = "NAND FS 0",
-               .offset = 0,
+               .offset = 0,
                .size   = 8 * 1024 * 1024,
        },
        {
                .name   = "NAND FS 1",
-               .offset = MTDPART_OFS_APPEND,
+               .offset = MTDPART_OFS_APPEND,
                .size   = MTDPART_SIZ_FULL
        },
 };
@@ -495,10 +495,10 @@ static void __init db1550_devices(void)
 {
        alchemy_gpio_direction_output(203, 0);  /* red led on */
 
-       irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH);  /* CD0# */
-       irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH);  /* CD1# */
-       irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW);  /* CARD0# */
-       irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW);  /* CARD1# */
+       irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH);  /* CD0# */
+       irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH);  /* CD1# */
+       irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW);  /* CARD0# */
+       irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW);  /* CARD1# */
        irq_set_irq_type(AU1550_GPIO21_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG0# */
        irq_set_irq_type(AU1550_GPIO22_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG1# */
 
@@ -539,7 +539,7 @@ static void __init pb1550_devices(void)
 
        /* Pb1550, like all others, also has statuschange irqs; however they're
        * wired up on one of the Au1550's shared GPIO201_205 line, which also
-       * services the PCMCIA card interrupts.  So we ignore statuschange and
+       * services the PCMCIA card interrupts.  So we ignore statuschange and
        * use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
        * drivers are used to shared irqs and b) statuschange isn't really use-
        * ful anyway.
index acaf91b..b86bff3 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
 }
 
 #define ATTR(x)                                                        \
-       static struct kobj_attribute x##_attribute =            \
+       static struct kobj_attribute x##_attribute =            \
                __ATTR(x, 0664, db1x_pmattr_show,               \
                                db1x_pmattr_store);
 
index 0bf85c4..21f9102 100644 (file)
@@ -1,6 +1,6 @@
 #
 # Texas Instruments AR7
 #
-platform-$(CONFIG_AR7)          += ar7/
-cflags-$(CONFIG_AR7)            += -I$(srctree)/arch/mips/include/asm/mach-ar7
-load-$(CONFIG_AR7)              += 0xffffffff94100000
+platform-$(CONFIG_AR7)         += ar7/
+cflags-$(CONFIG_AR7)           += -I$(srctree)/arch/mips/include/asm/mach-ar7
+load-$(CONFIG_AR7)             += 0xffffffff94100000
index 7477fd2..7e2356f 100644 (file)
@@ -492,11 +492,11 @@ static struct gpio_led gt701_leds[] = {
                .active_low             = 1,
                .default_trigger        = "default-on",
        },
-        {
-                .name                   = "ethernet",
-                .gpio                   = 10,
-                .active_low             = 1,
-        },
+       {
+               .name                   = "ethernet",
+               .gpio                   = 10,
+               .active_low             = 1,
+       },
 };
 
 static struct gpio_led_platform_data ar7_led_data;
@@ -512,7 +512,7 @@ static void __init detect_leds(void)
 {
        char *prid, *usb_prod;
 
-       /* Default LEDs */
+       /* Default LEDs */
        ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
        ar7_led_data.leds = default_leds;
 
index f44feee..3995e31 100644 (file)
@@ -14,6 +14,18 @@ config ATH79_MACH_AP121
          Say 'Y' here if you want your kernel to support the
          Atheros AP121 reference board.
 
+config ATH79_MACH_AP136
+       bool "Atheros AP136 reference board"
+       select SOC_QCA955X
+       select ATH79_DEV_GPIO_BUTTONS
+       select ATH79_DEV_LEDS_GPIO
+       select ATH79_DEV_SPI
+       select ATH79_DEV_USB
+       select ATH79_DEV_WMAC
+       help
+         Say 'Y' here if you want your kernel to support the
+         Atheros AP136 reference board.
+
 config ATH79_MACH_AP81
        bool "Atheros AP81 reference board"
        select SOC_AR913X
@@ -88,6 +100,12 @@ config SOC_AR934X
        select PCI_AR724X if PCI
        def_bool n
 
+config SOC_QCA955X
+       select USB_ARCH_HAS_EHCI
+       select HW_HAS_PCI
+       select PCI_AR724X if PCI
+       def_bool n
+
 config PCI_AR724X
        def_bool n
 
@@ -104,7 +122,7 @@ config ATH79_DEV_USB
        def_bool n
 
 config ATH79_DEV_WMAC
-       depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
+       depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X || SOC_QCA955X)
        def_bool n
 
 endif
index 2b54d98..5c9ff69 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_ATH79_DEV_WMAC)          += dev-wmac.o
 # Machines
 #
 obj-$(CONFIG_ATH79_MACH_AP121)         += mach-ap121.o
+obj-$(CONFIG_ATH79_MACH_AP136)         += mach-ap136.o
 obj-$(CONFIG_ATH79_MACH_AP81)          += mach-ap81.o
 obj-$(CONFIG_ATH79_MACH_DB120)         += mach-db120.o
 obj-$(CONFIG_ATH79_MACH_PB44)          += mach-pb44.o
index 579f452..765ef30 100644 (file)
@@ -198,7 +198,7 @@ static void __init ar934x_clocks_init(void)
        dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
 
        bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
-       if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+       if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
                ath79_ref_clk.rate = 40 * 1000 * 1000;
        else
                ath79_ref_clk.rate = 25 * 1000 * 1000;
@@ -295,6 +295,82 @@ static void __init ar934x_clocks_init(void)
        iounmap(dpll_base);
 }
 
+static void __init qca955x_clocks_init(void)
+{
+       u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+       u32 cpu_pll, ddr_pll;
+       u32 bootstrap;
+
+       bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+       if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40)
+               ath79_ref_clk.rate = 40 * 1000 * 1000;
+       else
+               ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+       pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG);
+       out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+                 QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+                 QCA955X_PLL_CPU_CONFIG_REFDIV_MASK;
+       nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) &
+              QCA955X_PLL_CPU_CONFIG_NINT_MASK;
+       frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+              QCA955X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+       cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+       cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+       cpu_pll /= (1 << out_div);
+
+       pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG);
+       out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+                 QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
+       ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+                 QCA955X_PLL_DDR_CONFIG_REFDIV_MASK;
+       nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) &
+              QCA955X_PLL_DDR_CONFIG_NINT_MASK;
+       frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+              QCA955X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+       ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+       ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+       ddr_pll /= (1 << out_div);
+
+       clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG);
+
+       postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+                 QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+               ath79_cpu_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+               ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+       else
+               ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+                 QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+               ath79_ddr_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+               ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+       else
+               ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+
+       postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+                 QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+       if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+               ath79_ahb_clk.rate = ath79_ref_clk.rate;
+       else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+               ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+       else
+               ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+       ath79_wdt_clk.rate = ath79_ref_clk.rate;
+       ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
 void __init ath79_clocks_init(void)
 {
        if (soc_is_ar71xx())
@@ -307,6 +383,8 @@ void __init ath79_clocks_init(void)
                ar933x_clocks_init();
        else if (soc_is_ar934x())
                ar934x_clocks_init();
+       else if (soc_is_qca955x())
+               qca955x_clocks_init();
        else
                BUG();
 
index 5a4adfc..eb3966c 100644 (file)
@@ -72,6 +72,8 @@ void ath79_device_reset_set(u32 mask)
                reg = AR933X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar934x())
                reg = AR934X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca955x())
+               reg = QCA955X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
@@ -98,6 +100,8 @@ void ath79_device_reset_clear(u32 mask)
                reg = AR933X_RESET_REG_RESET_MODULE;
        else if (soc_is_ar934x())
                reg = AR934X_RESET_REG_RESET_MODULE;
+       else if (soc_is_qca955x())
+               reg = QCA955X_RESET_REG_RESET_MODULE;
        else
                BUG();
 
index 45efc63..a3a2741 100644 (file)
@@ -36,7 +36,7 @@ static struct resource ath79_uart_resources[] = {
 static struct plat_serial8250_port ath79_uart_data[] = {
        {
                .mapbase        = AR71XX_UART_BASE,
-               .irq            = ATH79_MISC_IRQ_UART,
+               .irq            = ATH79_MISC_IRQ(3),
                .flags          = AR71XX_UART_FLAGS,
                .iotype         = UPIO_MEM32,
                .regshift       = 2,
@@ -62,8 +62,8 @@ static struct resource ar933x_uart_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        {
-               .start  = ATH79_MISC_IRQ_UART,
-               .end    = ATH79_MISC_IRQ_UART,
+               .start  = ATH79_MISC_IRQ(3),
+               .end    = ATH79_MISC_IRQ(3),
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -90,7 +90,8 @@ void __init ath79_register_uart(void)
        if (soc_is_ar71xx() ||
            soc_is_ar724x() ||
            soc_is_ar913x() ||
-           soc_is_ar934x()) {
+           soc_is_ar934x() ||
+           soc_is_qca955x()) {
                ath79_uart_data[0].uartclk = clk_get_rate(clk);
                platform_device_register(&ath79_uart_device);
        } else if (soc_is_ar933x()) {
@@ -101,12 +102,15 @@ void __init ath79_register_uart(void)
        }
 }
 
-static struct platform_device ath79_wdt_device = {
-       .name           = "ath79-wdt",
-       .id             = -1,
-};
-
 void __init ath79_register_wdt(void)
 {
-       platform_device_register(&ath79_wdt_device);
+       struct resource res;
+
+       memset(&res, 0, sizeof(res));
+
+       res.flags = IORESOURCE_MEM;
+       res.start = AR71XX_RESET_BASE + AR71XX_RESET_REG_WDOG_CTRL;
+       res.end = res.start + 0x8 - 1;
+
+       platform_device_register_simple("ath79-wdt", -1, &res, 1);
 }
index bd2bc10..8227265 100644 (file)
 #include "common.h"
 #include "dev-usb.h"
 
-static struct resource ath79_ohci_resources[2];
-
-static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32);
+static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
 
 static struct usb_ohci_pdata ath79_ohci_pdata = {
 };
 
-static struct platform_device ath79_ohci_device = {
-       .name           = "ohci-platform",
-       .id             = -1,
-       .resource       = ath79_ohci_resources,
-       .num_resources  = ARRAY_SIZE(ath79_ohci_resources),
-       .dev = {
-               .dma_mask               = &ath79_ohci_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = &ath79_ohci_pdata,
-       },
-};
-
-static struct resource ath79_ehci_resources[2];
-
-static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32);
-
 static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
        .has_synopsys_hc_bug    = 1,
 };
@@ -57,22 +39,16 @@ static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
        .has_tt                 = 1,
 };
 
-static struct platform_device ath79_ehci_device = {
-       .name           = "ehci-platform",
-       .id             = -1,
-       .resource       = ath79_ehci_resources,
-       .num_resources  = ARRAY_SIZE(ath79_ehci_resources),
-       .dev = {
-               .dma_mask               = &ath79_ehci_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-       },
-};
-
-static void __init ath79_usb_init_resource(struct resource res[2],
-                                          unsigned long base,
-                                          unsigned long size,
-                                          int irq)
+static void __init ath79_usb_register(const char *name, int id,
+                                     unsigned long base, unsigned long size,
+                                     int irq, const void *data,
+                                     size_t data_size)
 {
+       struct resource res[2];
+       struct platform_device *pdev;
+
+       memset(res, 0, sizeof(res));
+
        res[0].flags = IORESOURCE_MEM;
        res[0].start = base;
        res[0].end = base + size - 1;
@@ -80,6 +56,19 @@ static void __init ath79_usb_init_resource(struct resource res[2],
        res[1].flags = IORESOURCE_IRQ;
        res[1].start = irq;
        res[1].end = irq;
+
+       pdev = platform_device_register_resndata(NULL, name, id,
+                                                res, ARRAY_SIZE(res),
+                                                data, data_size);
+
+       if (IS_ERR(pdev)) {
+               pr_err("ath79: unable to register USB at %08lx, err=%d\n",
+                      base, (int) PTR_ERR(pdev));
+               return;
+       }
+
+       pdev->dev.dma_mask = &ath79_usb_dmamask;
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 }
 
 #define AR71XX_USB_RESET_MASK  (AR71XX_RESET_USB_HOST | \
@@ -106,14 +95,15 @@ static void __init ath79_usb_setup(void)
 
        mdelay(900);
 
-       ath79_usb_init_resource(ath79_ohci_resources, AR71XX_OHCI_BASE,
-                               AR71XX_OHCI_SIZE, ATH79_MISC_IRQ_OHCI);
-       platform_device_register(&ath79_ohci_device);
+       ath79_usb_register("ohci-platform", -1,
+                          AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
+                          ATH79_MISC_IRQ(6),
+                          &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
 
-       ath79_usb_init_resource(ath79_ehci_resources, AR71XX_EHCI_BASE,
-                               AR71XX_EHCI_SIZE, ATH79_CPU_IRQ_USB);
-       ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v1;
-       platform_device_register(&ath79_ehci_device);
+       ath79_usb_register("ehci-platform", -1,
+                          AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
 }
 
 static void __init ar7240_usb_setup(void)
@@ -135,9 +125,10 @@ static void __init ar7240_usb_setup(void)
 
        iounmap(usb_ctrl_base);
 
-       ath79_usb_init_resource(ath79_ohci_resources, AR7240_OHCI_BASE,
-                               AR7240_OHCI_SIZE, ATH79_CPU_IRQ_USB);
-       platform_device_register(&ath79_ohci_device);
+       ath79_usb_register("ohci-platform", -1,
+                          AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
 }
 
 static void __init ar724x_usb_setup(void)
@@ -151,10 +142,10 @@ static void __init ar724x_usb_setup(void)
        ath79_device_reset_clear(AR724X_RESET_USB_PHY);
        mdelay(10);
 
-       ath79_usb_init_resource(ath79_ehci_resources, AR724X_EHCI_BASE,
-                               AR724X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
-       ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
-       platform_device_register(&ath79_ehci_device);
+       ath79_usb_register("ehci-platform", -1,
+                          AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
 }
 
 static void __init ar913x_usb_setup(void)
@@ -168,10 +159,10 @@ static void __init ar913x_usb_setup(void)
        ath79_device_reset_clear(AR913X_RESET_USB_PHY);
        mdelay(10);
 
-       ath79_usb_init_resource(ath79_ehci_resources, AR913X_EHCI_BASE,
-                               AR913X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
-       ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
-       platform_device_register(&ath79_ehci_device);
+       ath79_usb_register("ehci-platform", -1,
+                          AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
 }
 
 static void __init ar933x_usb_setup(void)
@@ -185,10 +176,10 @@ static void __init ar933x_usb_setup(void)
        ath79_device_reset_clear(AR933X_RESET_USB_PHY);
        mdelay(10);
 
-       ath79_usb_init_resource(ath79_ehci_resources, AR933X_EHCI_BASE,
-                               AR933X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
-       ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
-       platform_device_register(&ath79_ehci_device);
+       ath79_usb_register("ehci-platform", -1,
+                          AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
 }
 
 static void __init ar934x_usb_setup(void)
@@ -211,10 +202,23 @@ static void __init ar934x_usb_setup(void)
        ath79_device_reset_clear(AR934X_RESET_USB_HOST);
        udelay(1000);
 
-       ath79_usb_init_resource(ath79_ehci_resources, AR934X_EHCI_BASE,
-                               AR934X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
-       ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
-       platform_device_register(&ath79_ehci_device);
+       ath79_usb_register("ehci-platform", -1,
+                          AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
+                          ATH79_CPU_IRQ(3),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+}
+
+static void __init qca955x_usb_setup(void)
+{
+       ath79_usb_register("ehci-platform", 0,
+                          QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
+                          ATH79_IP3_IRQ(0),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+
+       ath79_usb_register("ehci-platform", 1,
+                          QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
+                          ATH79_IP3_IRQ(1),
+                          &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
 }
 
 void __init ath79_register_usb(void)
@@ -231,6 +235,8 @@ void __init ath79_register_usb(void)
                ar933x_usb_setup();
        else if (soc_is_ar934x())
                ar934x_usb_setup();
+       else if (soc_is_qca955x())
+               qca955x_usb_setup();
        else
                BUG();
 }
index d6d893c..da190b1 100644 (file)
@@ -55,8 +55,8 @@ static void __init ar913x_wmac_setup(void)
 
        ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
        ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
-       ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
-       ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+       ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+       ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
 }
 
 
@@ -83,8 +83,8 @@ static void __init ar933x_wmac_setup(void)
 
        ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
        ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
-       ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
-       ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+       ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+       ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
 
        t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
        if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -107,7 +107,7 @@ static void ar934x_wmac_setup(void)
        ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
        ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
        ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
-       ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+       ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
 
        t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
        if (t & AR934X_BOOTSTRAP_REF_CLK_40)
@@ -116,6 +116,24 @@ static void ar934x_wmac_setup(void)
                ath79_wmac_data.is_clk_25mhz = true;
 }
 
+static void qca955x_wmac_setup(void)
+{
+       u32 t;
+
+       ath79_wmac_device.name = "qca955x_wmac";
+
+       ath79_wmac_resources[0].start = QCA955X_WMAC_BASE;
+       ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1;
+       ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+       ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
+
+       t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+       if (t & QCA955X_BOOTSTRAP_REF_CLK_40)
+               ath79_wmac_data.is_clk_25mhz = false;
+       else
+               ath79_wmac_data.is_clk_25mhz = true;
+}
+
 void __init ath79_register_wmac(u8 *cal_data)
 {
        if (soc_is_ar913x())
@@ -124,6 +142,8 @@ void __init ath79_register_wmac(u8 *cal_data)
                ar933x_wmac_setup();
        else if (soc_is_ar934x())
                ar934x_wmac_setup();
+       else if (soc_is_qca955x())
+               qca955x_wmac_setup();
        else
                BUG();
 
index dc938cb..b955faf 100644 (file)
@@ -74,6 +74,8 @@ static void prom_putchar_init(void)
        case REV_ID_MAJOR_AR9341:
        case REV_ID_MAJOR_AR9342:
        case REV_ID_MAJOR_AR9344:
+       case REV_ID_MAJOR_QCA9556:
+       case REV_ID_MAJOR_QCA9558:
                _prom_putchar = prom_putchar_ar71xx;
                break;
 
index 48fe762..8d025b0 100644 (file)
@@ -137,49 +137,45 @@ static struct gpio_chip ath79_gpio_chip = {
        .base                   = 0,
 };
 
-void ath79_gpio_function_enable(u32 mask)
+static void __iomem *ath79_gpio_get_function_reg(void)
 {
-       void __iomem *base = ath79_gpio_base;
-       unsigned long flags;
+       u32 reg = 0;
 
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) | mask,
-                    base + AR71XX_GPIO_REG_FUNC);
-       /* flush write */
-       __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+       if (soc_is_ar71xx() ||
+           soc_is_ar724x() ||
+           soc_is_ar913x() ||
+           soc_is_ar933x())
+               reg = AR71XX_GPIO_REG_FUNC;
+       else if (soc_is_ar934x())
+               reg = AR934X_GPIO_REG_FUNC;
+       else
+               BUG();
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+       return ath79_gpio_base + reg;
 }
 
-void ath79_gpio_function_disable(u32 mask)
+void ath79_gpio_function_setup(u32 set, u32 clear)
 {
-       void __iomem *base = ath79_gpio_base;
+       void __iomem *reg = ath79_gpio_get_function_reg();
        unsigned long flags;
 
        spin_lock_irqsave(&ath79_gpio_lock, flags);
 
-       __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~mask,
-                    base + AR71XX_GPIO_REG_FUNC);
+       __raw_writel((__raw_readl(reg) & ~clear) | set, reg);
        /* flush write */
-       __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+       __raw_readl(reg);
 
        spin_unlock_irqrestore(&ath79_gpio_lock, flags);
 }
 
-void ath79_gpio_function_setup(u32 set, u32 clear)
+void ath79_gpio_function_enable(u32 mask)
 {
-       void __iomem *base = ath79_gpio_base;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-       __raw_writel((__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~clear) | set,
-                    base + AR71XX_GPIO_REG_FUNC);
-       /* flush write */
-       __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+       ath79_gpio_function_setup(mask, 0);
+}
 
-       spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+void ath79_gpio_function_disable(u32 mask)
+{
+       ath79_gpio_function_setup(0, mask);
 }
 
 void __init ath79_gpio_init(void)
@@ -198,12 +194,14 @@ void __init ath79_gpio_init(void)
                ath79_gpio_count = AR933X_GPIO_COUNT;
        else if (soc_is_ar934x())
                ath79_gpio_count = AR934X_GPIO_COUNT;
+       else if (soc_is_qca955x())
+               ath79_gpio_count = QCA955X_GPIO_COUNT;
        else
                BUG();
 
        ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
        ath79_gpio_chip.ngpio = ath79_gpio_count;
-       if (soc_is_ar934x()) {
+       if (soc_is_ar934x() || soc_is_qca955x()) {
                ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
                ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
        }
index 90d09fc..9c0e176 100644 (file)
@@ -35,44 +35,17 @@ static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
        pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
                  __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
 
-       if (pending & MISC_INT_UART)
-               generic_handle_irq(ATH79_MISC_IRQ_UART);
-
-       else if (pending & MISC_INT_DMA)
-               generic_handle_irq(ATH79_MISC_IRQ_DMA);
-
-       else if (pending & MISC_INT_PERFC)
-               generic_handle_irq(ATH79_MISC_IRQ_PERFC);
-
-       else if (pending & MISC_INT_TIMER)
-               generic_handle_irq(ATH79_MISC_IRQ_TIMER);
-
-       else if (pending & MISC_INT_TIMER2)
-               generic_handle_irq(ATH79_MISC_IRQ_TIMER2);
-
-       else if (pending & MISC_INT_TIMER3)
-               generic_handle_irq(ATH79_MISC_IRQ_TIMER3);
-
-       else if (pending & MISC_INT_TIMER4)
-               generic_handle_irq(ATH79_MISC_IRQ_TIMER4);
-
-       else if (pending & MISC_INT_OHCI)
-               generic_handle_irq(ATH79_MISC_IRQ_OHCI);
-
-       else if (pending & MISC_INT_ERROR)
-               generic_handle_irq(ATH79_MISC_IRQ_ERROR);
-
-       else if (pending & MISC_INT_GPIO)
-               generic_handle_irq(ATH79_MISC_IRQ_GPIO);
-
-       else if (pending & MISC_INT_WDOG)
-               generic_handle_irq(ATH79_MISC_IRQ_WDOG);
+       if (!pending) {
+               spurious_interrupt();
+               return;
+       }
 
-       else if (pending & MISC_INT_ETHSW)
-               generic_handle_irq(ATH79_MISC_IRQ_ETHSW);
+       while (pending) {
+               int bit = __ffs(pending);
 
-       else
-               spurious_interrupt();
+               generic_handle_irq(ATH79_MISC_IRQ(bit));
+               pending &= ~BIT(bit);
+       }
 }
 
 static void ar71xx_misc_irq_unmask(struct irq_data *d)
@@ -130,7 +103,10 @@ static void __init ath79_misc_irq_init(void)
 
        if (soc_is_ar71xx() || soc_is_ar913x())
                ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
-       else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
+       else if (soc_is_ar724x() ||
+                soc_is_ar933x() ||
+                soc_is_ar934x() ||
+                soc_is_qca955x())
                ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
        else
                BUG();
@@ -141,7 +117,7 @@ static void __init ath79_misc_irq_init(void)
                                         handle_level_irq);
        }
 
-       irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
+       irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
 }
 
 static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
@@ -174,7 +150,89 @@ static void ar934x_ip2_irq_init(void)
                irq_set_chip_and_handler(i, &dummy_irq_chip,
                                         handle_level_irq);
 
-       irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+       irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
+}
+
+static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+       u32 status;
+
+       disable_irq_nosync(irq);
+
+       status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+       status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
+
+       if (status == 0) {
+               spurious_interrupt();
+               goto enable;
+       }
+
+       if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
+               /* TODO: flush DDR? */
+               generic_handle_irq(ATH79_IP2_IRQ(0));
+       }
+
+       if (status & QCA955X_EXT_INT_WMAC_ALL) {
+               /* TODO: flush DDR? */
+               generic_handle_irq(ATH79_IP2_IRQ(1));
+       }
+
+enable:
+       enable_irq(irq);
+}
+
+static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+       u32 status;
+
+       disable_irq_nosync(irq);
+
+       status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+       status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
+                 QCA955X_EXT_INT_USB1 |
+                 QCA955X_EXT_INT_USB2;
+
+       if (status == 0) {
+               spurious_interrupt();
+               goto enable;
+       }
+
+       if (status & QCA955X_EXT_INT_USB1) {
+               /* TODO: flush DDR? */
+               generic_handle_irq(ATH79_IP3_IRQ(0));
+       }
+
+       if (status & QCA955X_EXT_INT_USB2) {
+               /* TODO: flush DDR? */
+               generic_handle_irq(ATH79_IP3_IRQ(1));
+       }
+
+       if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
+               /* TODO: flush DDR? */
+               generic_handle_irq(ATH79_IP3_IRQ(2));
+       }
+
+enable:
+       enable_irq(irq);
+}
+
+static void qca955x_irq_init(void)
+{
+       int i;
+
+       for (i = ATH79_IP2_IRQ_BASE;
+            i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+               irq_set_chip_and_handler(i, &dummy_irq_chip,
+                                        handle_level_irq);
+
+       irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
+
+       for (i = ATH79_IP3_IRQ_BASE;
+            i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
+               irq_set_chip_and_handler(i, &dummy_irq_chip,
+                                        handle_level_irq);
+
+       irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
 }
 
 asmlinkage void plat_irq_dispatch(void)
@@ -184,22 +242,22 @@ asmlinkage void plat_irq_dispatch(void)
        pending = read_c0_status() & read_c0_cause() & ST0_IM;
 
        if (pending & STATUSF_IP7)
-               do_IRQ(ATH79_CPU_IRQ_TIMER);
+               do_IRQ(ATH79_CPU_IRQ(7));
 
        else if (pending & STATUSF_IP2)
                ath79_ip2_handler();
 
        else if (pending & STATUSF_IP4)
-               do_IRQ(ATH79_CPU_IRQ_GE0);
+               do_IRQ(ATH79_CPU_IRQ(4));
 
        else if (pending & STATUSF_IP5)
-               do_IRQ(ATH79_CPU_IRQ_GE1);
+               do_IRQ(ATH79_CPU_IRQ(5));
 
        else if (pending & STATUSF_IP3)
                ath79_ip3_handler();
 
        else if (pending & STATUSF_IP6)
-               do_IRQ(ATH79_CPU_IRQ_MISC);
+               do_IRQ(ATH79_CPU_IRQ(6));
 
        else
                spurious_interrupt();
@@ -212,63 +270,69 @@ asmlinkage void plat_irq_dispatch(void)
  * Issue a flush in the handlers to ensure that the driver sees
  * the update.
  */
+
+static void ath79_default_ip2_handler(void)
+{
+       do_IRQ(ATH79_CPU_IRQ(2));
+}
+
+static void ath79_default_ip3_handler(void)
+{
+       do_IRQ(ATH79_CPU_IRQ(3));
+}
+
 static void ar71xx_ip2_handler(void)
 {
        ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
-       do_IRQ(ATH79_CPU_IRQ_IP2);
+       do_IRQ(ATH79_CPU_IRQ(2));
 }
 
 static void ar724x_ip2_handler(void)
 {
        ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
-       do_IRQ(ATH79_CPU_IRQ_IP2);
+       do_IRQ(ATH79_CPU_IRQ(2));
 }
 
 static void ar913x_ip2_handler(void)
 {
        ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
-       do_IRQ(ATH79_CPU_IRQ_IP2);
+       do_IRQ(ATH79_CPU_IRQ(2));
 }
 
 static void ar933x_ip2_handler(void)
 {
        ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
-       do_IRQ(ATH79_CPU_IRQ_IP2);
-}
-
-static void ar934x_ip2_handler(void)
-{
-       do_IRQ(ATH79_CPU_IRQ_IP2);
+       do_IRQ(ATH79_CPU_IRQ(2));
 }
 
 static void ar71xx_ip3_handler(void)
 {
        ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
-       do_IRQ(ATH79_CPU_IRQ_USB);
+       do_IRQ(ATH79_CPU_IRQ(3));
 }
 
 static void ar724x_ip3_handler(void)
 {
        ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
-       do_IRQ(ATH79_CPU_IRQ_USB);
+       do_IRQ(ATH79_CPU_IRQ(3));
 }
 
 static void ar913x_ip3_handler(void)
 {
        ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
-       do_IRQ(ATH79_CPU_IRQ_USB);
+       do_IRQ(ATH79_CPU_IRQ(3));
 }
 
 static void ar933x_ip3_handler(void)
 {
        ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
-       do_IRQ(ATH79_CPU_IRQ_USB);
+       do_IRQ(ATH79_CPU_IRQ(3));
 }
 
 static void ar934x_ip3_handler(void)
 {
        ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
-       do_IRQ(ATH79_CPU_IRQ_USB);
+       do_IRQ(ATH79_CPU_IRQ(3));
 }
 
 void __init arch_init_irq(void)
@@ -286,16 +350,21 @@ void __init arch_init_irq(void)
                ath79_ip2_handler = ar933x_ip2_handler;
                ath79_ip3_handler = ar933x_ip3_handler;
        } else if (soc_is_ar934x()) {
-               ath79_ip2_handler = ar934x_ip2_handler;
+               ath79_ip2_handler = ath79_default_ip2_handler;
                ath79_ip3_handler = ar934x_ip3_handler;
+       } else if (soc_is_qca955x()) {
+               ath79_ip2_handler = ath79_default_ip2_handler;
+               ath79_ip3_handler = ath79_default_ip3_handler;
        } else {
                BUG();
        }
 
-       cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
+       cp0_perfcount_irq = ATH79_MISC_IRQ(5);
        mips_cpu_irq_init();
        ath79_misc_irq_init();
 
        if (soc_is_ar934x())
                ar934x_ip2_irq_init();
+       else if (soc_is_qca955x())
+               qca955x_irq_init();
 }
index 4c20200..1bf73f2 100644 (file)
@@ -69,7 +69,7 @@ static struct spi_board_info ap121_spi_info[] = {
 
 static struct ath79_spi_platform_data ap121_spi_data = {
        .bus_num        = 0,
-       .num_chipselect = 1,
+       .num_chipselect = 1,
 };
 
 static void __init ap121_setup(void)
diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c
new file mode 100644 (file)
index 0000000..479dd4b
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Qualcomm Atheros AP136 reference board support
+ *
+ * Copyright (c) 2012 Qualcomm Atheros
+ * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-usb.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define AP136_GPIO_LED_STATUS_RED      14
+#define AP136_GPIO_LED_STATUS_GREEN    19
+#define AP136_GPIO_LED_USB             4
+#define AP136_GPIO_LED_WLAN_2G         13
+#define AP136_GPIO_LED_WLAN_5G         12
+#define AP136_GPIO_LED_WPS_RED         15
+#define AP136_GPIO_LED_WPS_GREEN       20
+
+#define AP136_GPIO_BTN_WPS             16
+#define AP136_GPIO_BTN_RFKILL          21
+
+#define AP136_KEYS_POLL_INTERVAL       20      /* msecs */
+#define AP136_KEYS_DEBOUNCE_INTERVAL   (3 * AP136_KEYS_POLL_INTERVAL)
+
+#define AP136_WMAC_CALDATA_OFFSET 0x1000
+#define AP136_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led ap136_leds_gpio[] __initdata = {
+       {
+               .name           = "qca:green:status",
+               .gpio           = AP136_GPIO_LED_STATUS_GREEN,
+               .active_low     = 1,
+       },
+       {
+               .name           = "qca:red:status",
+               .gpio           = AP136_GPIO_LED_STATUS_RED,
+               .active_low     = 1,
+       },
+       {
+               .name           = "qca:green:wps",
+               .gpio           = AP136_GPIO_LED_WPS_GREEN,
+               .active_low     = 1,
+       },
+       {
+               .name           = "qca:red:wps",
+               .gpio           = AP136_GPIO_LED_WPS_RED,
+               .active_low     = 1,
+       },
+       {
+               .name           = "qca:red:wlan-2g",
+               .gpio           = AP136_GPIO_LED_WLAN_2G,
+               .active_low     = 1,
+       },
+       {
+               .name           = "qca:red:usb",
+               .gpio           = AP136_GPIO_LED_USB,
+               .active_low     = 1,
+       }
+};
+
+static struct gpio_keys_button ap136_gpio_keys[] __initdata = {
+       {
+               .desc           = "WPS button",
+               .type           = EV_KEY,
+               .code           = KEY_WPS_BUTTON,
+               .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+               .gpio           = AP136_GPIO_BTN_WPS,
+               .active_low     = 1,
+       },
+       {
+               .desc           = "RFKILL button",
+               .type           = EV_KEY,
+               .code           = KEY_RFKILL,
+               .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+               .gpio           = AP136_GPIO_BTN_RFKILL,
+               .active_low     = 1,
+       },
+};
+
+static struct spi_board_info ap136_spi_info[] = {
+       {
+               .bus_num        = 0,
+               .chip_select    = 0,
+               .max_speed_hz   = 25000000,
+               .modalias       = "mx25l6405d",
+       }
+};
+
+static struct ath79_spi_platform_data ap136_spi_data = {
+       .bus_num        = 0,
+       .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data ap136_ath9k_data;
+
+static int ap136_pci_plat_dev_init(struct pci_dev *dev)
+{
+       if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0)
+               dev->dev.platform_data = &ap136_ath9k_data;
+
+       return 0;
+}
+
+static void __init ap136_pci_init(u8 *eeprom)
+{
+       memcpy(ap136_ath9k_data.eeprom_data, eeprom,
+              sizeof(ap136_ath9k_data.eeprom_data));
+
+       ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
+       ath79_register_pci();
+}
+#else
+static inline void ap136_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init ap136_setup(void)
+{
+       u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+       ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
+                                ap136_leds_gpio);
+       ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL,
+                                       ARRAY_SIZE(ap136_gpio_keys),
+                                       ap136_gpio_keys);
+       ath79_register_spi(&ap136_spi_data, ap136_spi_info,
+                          ARRAY_SIZE(ap136_spi_info));
+       ath79_register_usb();
+       ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET);
+       ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010",
+            "Atheros AP136-010 reference board",
+            ap136_setup);
index abe1983..1c78d49 100644 (file)
@@ -78,7 +78,7 @@ static struct spi_board_info ap81_spi_info[] = {
 
 static struct ath79_spi_platform_data ap81_spi_data = {
        .bus_num        = 0,
-       .num_chipselect = 1,
+       .num_chipselect = 1,
 };
 
 static void __init ap81_setup(void)
index 42f540a..4d661a1 100644 (file)
@@ -87,7 +87,7 @@ static struct spi_board_info db120_spi_info[] = {
 
 static struct ath79_spi_platform_data db120_spi_data = {
        .bus_num        = 0,
-       .num_chipselect = 1,
+       .num_chipselect = 1,
 };
 
 #ifdef CONFIG_PCI
index c5f0ea5..67b980d 100644 (file)
@@ -34,8 +34,8 @@
 #define PB44_KEYS_DEBOUNCE_INTERVAL    (3 * PB44_KEYS_POLL_INTERVAL)
 
 static struct i2c_gpio_platform_data pb44_i2c_gpio_data = {
-       .sda_pin        = PB44_GPIO_I2C_SDA,
-       .scl_pin        = PB44_GPIO_I2C_SCL,
+       .sda_pin        = PB44_GPIO_I2C_SDA,
+       .scl_pin        = PB44_GPIO_I2C_SCL,
 };
 
 static struct platform_device pb44_i2c_gpio_device = {
@@ -53,7 +53,7 @@ static struct pcf857x_platform_data pb44_pcf857x_data = {
 static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
        {
                I2C_BOARD_INFO("pcf8575", 0x20),
-               .platform_data  = &pb44_pcf857x_data,
+               .platform_data  = &pb44_pcf857x_data,
        },
 };
 
index af92e5c..2625405 100644 (file)
@@ -17,6 +17,7 @@
 enum ath79_mach_type {
        ATH79_MACH_GENERIC = 0,
        ATH79_MACH_AP121,               /* Atheros AP121 reference board */
+       ATH79_MACH_AP136_010,           /* Atheros AP136-010 reference board */
        ATH79_MACH_AP81,                /* Atheros AP81 reference board */
        ATH79_MACH_DB120,               /* Atheros DB120 reference board */
        ATH79_MACH_PB44,                /* Atheros PB44 reference board */
index ca83abd..730c0b0 100644 (file)
 
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/irq.h>
-#include <asm/mach-ath79/pci.h>
 #include "pci.h"
 
 static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
@@ -48,6 +49,21 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
        }
 };
 
+static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
+       {
+               .bus    = 0,
+               .slot   = 0,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(0),
+       },
+       {
+               .bus    = 1,
+               .slot   = 0,
+               .pin    = 1,
+               .irq    = ATH79_PCI_IRQ(1),
+       },
+};
+
 int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
 {
        int irq = -1;
@@ -63,6 +79,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
                           soc_is_ar9344()) {
                        ath79_pci_irq_map = ar724x_pci_irq_map;
                        ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+               } else if (soc_is_qca955x()) {
+                       ath79_pci_irq_map = qca955x_pci_irq_map;
+                       ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map);
                } else {
                        pr_crit("pci %s: invalid irq map\n",
                                pci_name((struct pci_dev *) dev));
@@ -74,7 +93,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
                const struct ath79_pci_irq *entry;
 
                entry = &ath79_pci_irq_map[i];
-               if (entry->slot == slot && entry->pin == pin) {
+               if (entry->bus == dev->bus->number &&
+                   entry->slot == slot &&
+                   entry->pin == pin) {
                        irq = entry->irq;
                        break;
                }
@@ -110,21 +131,143 @@ void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
        ath79_pci_plat_dev_init = func;
 }
 
-int __init ath79_register_pci(void)
+static struct platform_device *
+ath79_register_pci_ar71xx(void)
+{
+       struct platform_device *pdev;
+       struct resource res[4];
+
+       memset(res, 0, sizeof(res));
+
+       res[0].name = "cfg_base";
+       res[0].flags = IORESOURCE_MEM;
+       res[0].start = AR71XX_PCI_CFG_BASE;
+       res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1;
+
+       res[1].flags = IORESOURCE_IRQ;
+       res[1].start = ATH79_CPU_IRQ(2);
+       res[1].end = ATH79_CPU_IRQ(2);
+
+       res[2].name = "io_base";
+       res[2].flags = IORESOURCE_IO;
+       res[2].start = 0;
+       res[2].end = 0;
+
+       res[3].name = "mem_base";
+       res[3].flags = IORESOURCE_MEM;
+       res[3].start = AR71XX_PCI_MEM_BASE;
+       res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1;
+
+       pdev = platform_device_register_simple("ar71xx-pci", -1,
+                                              res, ARRAY_SIZE(res));
+       return pdev;
+}
+
+static struct platform_device *
+ath79_register_pci_ar724x(int id,
+                         unsigned long cfg_base,
+                         unsigned long ctrl_base,
+                         unsigned long crp_base,
+                         unsigned long mem_base,
+                         unsigned long mem_size,
+                         unsigned long io_base,
+                         int irq)
 {
-       if (soc_is_ar71xx())
-               return ar71xx_pcibios_init();
+       struct platform_device *pdev;
+       struct resource res[6];
+
+       memset(res, 0, sizeof(res));
+
+       res[0].name = "cfg_base";
+       res[0].flags = IORESOURCE_MEM;
+       res[0].start = cfg_base;
+       res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1;
+
+       res[1].name = "ctrl_base";
+       res[1].flags = IORESOURCE_MEM;
+       res[1].start = ctrl_base;
+       res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1;
+
+       res[2].flags = IORESOURCE_IRQ;
+       res[2].start = irq;
+       res[2].end = irq;
+
+       res[3].name = "mem_base";
+       res[3].flags = IORESOURCE_MEM;
+       res[3].start = mem_base;
+       res[3].end = mem_base + mem_size - 1;
+
+       res[4].name = "io_base";
+       res[4].flags = IORESOURCE_IO;
+       res[4].start = io_base;
+       res[4].end = io_base;
 
-       if (soc_is_ar724x())
-               return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+       res[5].name = "crp_base";
+       res[5].flags = IORESOURCE_MEM;
+       res[5].start = crp_base;
+       res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1;
 
-       if (soc_is_ar9342() || soc_is_ar9344()) {
+       pdev = platform_device_register_simple("ar724x-pci", id,
+                                              res, ARRAY_SIZE(res));
+       return pdev;
+}
+
+int __init ath79_register_pci(void)
+{
+       struct platform_device *pdev = NULL;
+
+       if (soc_is_ar71xx()) {
+               pdev = ath79_register_pci_ar71xx();
+       } else if (soc_is_ar724x()) {
+               pdev = ath79_register_pci_ar724x(-1,
+                                                AR724X_PCI_CFG_BASE,
+                                                AR724X_PCI_CTRL_BASE,
+                                                AR724X_PCI_CRP_BASE,
+                                                AR724X_PCI_MEM_BASE,
+                                                AR724X_PCI_MEM_SIZE,
+                                                0,
+                                                ATH79_CPU_IRQ(2));
+       } else if (soc_is_ar9342() ||
+                  soc_is_ar9344()) {
                u32 bootstrap;
 
                bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
-               if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
-                       return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+               if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0)
+                       return -ENODEV;
+
+               pdev = ath79_register_pci_ar724x(-1,
+                                                AR724X_PCI_CFG_BASE,
+                                                AR724X_PCI_CTRL_BASE,
+                                                AR724X_PCI_CRP_BASE,
+                                                AR724X_PCI_MEM_BASE,
+                                                AR724X_PCI_MEM_SIZE,
+                                                0,
+                                                ATH79_IP2_IRQ(0));
+       } else if (soc_is_qca9558()) {
+               pdev = ath79_register_pci_ar724x(0,
+                                                QCA955X_PCI_CFG_BASE0,
+                                                QCA955X_PCI_CTRL_BASE0,
+                                                QCA955X_PCI_CRP_BASE0,
+                                                QCA955X_PCI_MEM_BASE0,
+                                                QCA955X_PCI_MEM_SIZE,
+                                                0,
+                                                ATH79_IP2_IRQ(0));
+
+               pdev = ath79_register_pci_ar724x(1,
+                                                QCA955X_PCI_CFG_BASE1,
+                                                QCA955X_PCI_CTRL_BASE1,
+                                                QCA955X_PCI_CRP_BASE1,
+                                                QCA955X_PCI_MEM_BASE1,
+                                                QCA955X_PCI_MEM_SIZE,
+                                                1,
+                                                ATH79_IP3_IRQ(2));
+       } else {
+               /* No PCI support */
+               return -ENODEV;
        }
 
-       return -ENODEV;
+       if (!pdev)
+               pr_err("unable to register PCI controller device\n");
+
+       return pdev ? 0 : -ENODEV;
 }
index 51c6625..1d00a38 100644 (file)
@@ -14,6 +14,7 @@
 #define _ATH79_PCI_H
 
 struct ath79_pci_irq {
+       int     bus;
        u8      slot;
        u8      pin;
        int     irq;
index 60d212e..d5b3c90 100644 (file)
@@ -164,13 +164,29 @@ static void __init ath79_detect_sys_type(void)
                rev = id & AR934X_REV_ID_REVISION_MASK;
                break;
 
+       case REV_ID_MAJOR_QCA9556:
+               ath79_soc = ATH79_SOC_QCA9556;
+               chip = "9556";
+               rev = id & QCA955X_REV_ID_REVISION_MASK;
+               break;
+
+       case REV_ID_MAJOR_QCA9558:
+               ath79_soc = ATH79_SOC_QCA9558;
+               chip = "9558";
+               rev = id & QCA955X_REV_ID_REVISION_MASK;
+               break;
+
        default:
                panic("ath79: unknown SoC, id:0x%08x", id);
        }
 
        ath79_soc_rev = rev;
 
-       sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
+       if (soc_is_qca955x())
+               sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+                       chip, rev);
+       else
+               sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
        pr_info("SoC: %s\n", ath79_sys_type);
 }
 
index 1a3567f..f3bf6d5 100644 (file)
@@ -3,5 +3,5 @@
 # under Linux.
 #
 
-obj-y                          += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y                          += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
 obj-$(CONFIG_BCM47XX_SSB)      += wgt634u.o
index 48a4c70..cc40b74 100644 (file)
@@ -3,10 +3,10 @@
  *
  * Copyright (C) 2005 Broadcom Corporation
  * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <asm/addrspace.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 #include <asm/mach-bcm47xx/bcm47xx.h>
 
 static char nvram_buf[NVRAM_SPACE];
 
+static u32 find_nvram_size(u32 end)
+{
+       struct nvram_header *header;
+       u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+               header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]);
+               if (header->magic == NVRAM_HEADER)
+                       return nvram_sizes[i];
+       }
+
+       return 0;
+}
+
 /* Probe for NVRAM header */
-static void early_nvram_init(void)
+static int nvram_find_and_copy(u32 base, u32 lim)
 {
-#ifdef CONFIG_BCM47XX_SSB
-       struct ssb_mipscore *mcore_ssb;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
-       struct bcma_drv_cc *bcma_cc;
-#endif
        struct nvram_header *header;
        int i;
-       u32 base = 0;
-       u32 lim = 0;
        u32 off;
        u32 *src, *dst;
+       u32 size;
 
-       switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
-       case BCM47XX_BUS_TYPE_SSB:
-               mcore_ssb = &bcm47xx_bus.ssb.mipscore;
-               base = mcore_ssb->pflash.window;
-               lim = mcore_ssb->pflash.window_size;
-               break;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
-       case BCM47XX_BUS_TYPE_BCMA:
-               bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc;
-               base = bcma_cc->pflash.window;
-               lim = bcma_cc->pflash.window_size;
-               break;
-#endif
-       }
-
+       /* TODO: when nvram is on nand flash check for bad blocks first. */
        off = FLASH_MIN;
        while (off <= lim) {
                /* Windowed flash access */
-               header = (struct nvram_header *)
-                       KSEG1ADDR(base + off - NVRAM_SPACE);
-               if (header->magic == NVRAM_HEADER)
+               size = find_nvram_size(base + off);
+               if (size) {
+                       header = (struct nvram_header *)KSEG1ADDR(base + off -
+                                                                 size);
                        goto found;
+               }
                off <<= 1;
        }
 
        /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
        header = (struct nvram_header *) KSEG1ADDR(base + 4096);
-       if (header->magic == NVRAM_HEADER)
+       if (header->magic == NVRAM_HEADER) {
+               size = NVRAM_SPACE;
                goto found;
+       }
 
        header = (struct nvram_header *) KSEG1ADDR(base + 1024);
-       if (header->magic == NVRAM_HEADER)
+       if (header->magic == NVRAM_HEADER) {
+               size = NVRAM_SPACE;
                goto found;
+       }
 
-       return;
+       pr_err("no nvram found\n");
+       return -ENXIO;
 
 found:
+
+       if (header->len > size)
+               pr_err("The nvram size accoridng to the header seems to be bigger than the partition on flash\n");
+       if (header->len > NVRAM_SPACE)
+               pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+                      header->len, NVRAM_SPACE);
+
        src = (u32 *) header;
        dst = (u32 *) nvram_buf;
        for (i = 0; i < sizeof(struct nvram_header); i += 4)
                *dst++ = *src++;
-       for (; i < header->len && i < NVRAM_SPACE; i += 4)
+       for (; i < header->len && i < NVRAM_SPACE && i < size; i += 4)
                *dst++ = le32_to_cpu(*src++);
+       memset(dst, 0x0, NVRAM_SPACE - i);
+
+       return 0;
 }
 
-int nvram_getenv(char *name, char *val, size_t val_len)
+#ifdef CONFIG_BCM47XX_SSB
+static int nvram_init_ssb(void)
+{
+       struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
+       u32 base;
+       u32 lim;
+
+       if (mcore->pflash.present) {
+               base = mcore->pflash.window;
+               lim = mcore->pflash.window_size;
+       } else {
+               pr_err("Couldn't find supported flash memory\n");
+               return -ENXIO;
+       }
+
+       return nvram_find_and_copy(base, lim);
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int nvram_init_bcma(void)
+{
+       struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc;
+       u32 base;
+       u32 lim;
+
+#ifdef CONFIG_BCMA_NFLASH
+       if (cc->nflash.boot) {
+               base = BCMA_SOC_FLASH1;
+               lim = BCMA_SOC_FLASH1_SZ;
+       } else
+#endif
+       if (cc->pflash.present) {
+               base = cc->pflash.window;
+               lim = cc->pflash.window_size;
+#ifdef CONFIG_BCMA_SFLASH
+       } else if (cc->sflash.present) {
+               base = cc->sflash.window;
+               lim = cc->sflash.size;
+#endif
+       } else {
+               pr_err("Couldn't find supported flash memory\n");
+               return -ENXIO;
+       }
+
+       return nvram_find_and_copy(base, lim);
+}
+#endif
+
+static int nvram_init(void)
+{
+       switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+       case BCM47XX_BUS_TYPE_SSB:
+               return nvram_init_ssb();
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+       case BCM47XX_BUS_TYPE_BCMA:
+               return nvram_init_bcma();
+#endif
+       }
+       return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
 {
        char *var, *value, *end, *eq;
+       int err;
 
        if (!name)
-               return NVRAM_ERR_INV_PARAM;
+               return -EINVAL;
 
-       if (!nvram_buf[0])
-               early_nvram_init();
+       if (!nvram_buf[0]) {
+               err = nvram_init();
+               if (err)
+                       return err;
+       }
 
        /* Look for name=value and return value */
        var = &nvram_buf[sizeof(struct nvram_header)];
@@ -110,6 +187,6 @@ int nvram_getenv(char *name, char *val, size_t val_len)
                        return snprintf(val, val_len, "%s", value);
                }
        }
-       return NVRAM_ERR_ENVNOTFOUND;
+       return -ENOENT;
 }
-EXPORT_SYMBOL(nvram_getenv);
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
index 4d54b58..b2246cd 100644 (file)
@@ -35,7 +35,7 @@
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <bcm47xx.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 
 union bcm47xx_bus bcm47xx_bus;
 EXPORT_SYMBOL(bcm47xx_bus);
@@ -115,7 +115,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
        memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
        bcm47xx_fill_sprom(&iv->sprom, NULL, false);
 
-       if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
+       if (bcm47xx_nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
                iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
 
        return 0;
@@ -138,7 +138,7 @@ static void __init bcm47xx_register_ssb(void)
                panic("Failed to initialize SSB bus (err %d)", err);
 
        mcore = &bcm47xx_bus.ssb.mipscore;
-       if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
+       if (bcm47xx_nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
                if (strstr(buf, "console=ttyS1")) {
                        struct ssb_serial_port port;
 
index 289cc0a..ad03c93 100644 (file)
@@ -27,7 +27,7 @@
  */
 
 #include <bcm47xx.h>
-#include <nvram.h>
+#include <bcm47xx_nvram.h>
 
 static void create_key(const char *prefix, const char *postfix,
                       const char *name, char *buf, int len)
@@ -50,18 +50,18 @@ static int get_nvram_var(const char *prefix, const char *postfix,
 
        create_key(prefix, postfix, name, key, sizeof(key));
 
-       err = nvram_getenv(key, buf, len);
-       if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) {
+       err = bcm47xx_nvram_getenv(key, buf, len);
+       if (fallback && err == -ENOENT && prefix) {
                create_key(NULL, postfix, name, key, sizeof(key));
-               err = nvram_getenv(key, buf, len);
+               err = bcm47xx_nvram_getenv(key, buf, len);
        }
        return err;
 }
 
 #define NVRAM_READ_VAL(type)                                           \
 static void nvram_read_ ## type (const char *prefix,                   \
-                                const char *postfix, const char *name, \
-                                type *val, type allset, bool fallback) \
+                                const char *postfix, const char *name, \
+                                type *val, type allset, bool fallback) \
 {                                                                      \
        char buf[100];                                                  \
        int err;                                                        \
@@ -71,7 +71,7 @@ static void nvram_read_ ## type (const char *prefix,                  \
                            fallback);                                  \
        if (err < 0)                                                    \
                return;                                                 \
-       err = kstrto ## type (buf, 0, &var);                            \
+       err = kstrto ## type(strim(buf), 0, &var);                      \
        if (err) {                                                      \
                pr_warn("can not parse nvram name %s%s%s with value %s got %i\n",       \
                        prefix, name, postfix, buf, err);               \
@@ -99,7 +99,7 @@ static void nvram_read_u32_2(const char *prefix, const char *name,
        err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
        if (err < 0)
                return;
-       err = kstrtou32(buf, 0, &val);
+       err = kstrtou32(strim(buf), 0, &val);
        if (err) {
                pr_warn("can not parse nvram name %s%s with value %s got %i\n",
                        prefix, name, buf, err);
@@ -120,7 +120,7 @@ static void nvram_read_leddc(const char *prefix, const char *name,
        err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
        if (err < 0)
                return;
-       err = kstrtou32(buf, 0, &val);
+       err = kstrtou32(strim(buf), 0, &val);
        if (err) {
                pr_warn("can not parse nvram name %s%s with value %s got %i\n",
                        prefix, name, buf, err);
@@ -144,7 +144,7 @@ static void nvram_read_macaddr(const char *prefix, const char *name,
        if (err < 0)
                return;
 
-       nvram_parse_macaddr(buf, *val);
+       bcm47xx_nvram_parse_macaddr(buf, *val);
 }
 
 static void nvram_read_alpha2(const char *prefix, const char *name,
@@ -652,12 +652,10 @@ static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
 static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
                                    bool fallback)
 {
-       nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0,
-                      fallback);
+       nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true);
        nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
                       fallback);
-       nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0,
-                      fallback);
+       nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true);
        nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
                         &sprom->boardflags_hi, fallback);
        nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
index 9d111e8..c63a4c2 100644 (file)
@@ -36,13 +36,13 @@ static struct gpio_led wgt634u_leds[] = {
 };
 
 static struct gpio_led_platform_data wgt634u_led_data = {
-       .num_leds =     ARRAY_SIZE(wgt634u_leds),
-       .leds =         wgt634u_leds,
+       .num_leds =     ARRAY_SIZE(wgt634u_leds),
+       .leds =         wgt634u_leds,
 };
 
 static struct platform_device wgt634u_gpio_leds = {
-       .name =         "leds-gpio",
-       .id =           -1,
+       .name =         "leds-gpio",
+       .id =           -1,
        .dev = {
                .platform_data = &wgt634u_led_data,
        }
@@ -53,35 +53,35 @@ static struct platform_device wgt634u_gpio_leds = {
    firmware. */
 static struct mtd_partition wgt634u_partitions[] = {
        {
-               .name       = "cfe",
-               .offset     = 0,
-               .size       = 0x60000,          /* 384k */
-               .mask_flags = MTD_WRITEABLE     /* force read-only */
+               .name       = "cfe",
+               .offset     = 0,
+               .size       = 0x60000,          /* 384k */
+               .mask_flags = MTD_WRITEABLE     /* force read-only */
        },
        {
-               .name   = "config",
+               .name   = "config",
                .offset = 0x60000,
-               .size   = 0x20000               /* 128k */
+               .size   = 0x20000               /* 128k */
        },
        {
-               .name   = "linux",
+               .name   = "linux",
                .offset = 0x80000,
-               .size   = 0x140000              /* 1280k */
+               .size   = 0x140000              /* 1280k */
        },
        {
-               .name   = "jffs",
+               .name   = "jffs",
                .offset = 0x1c0000,
-               .size   = 0x620000              /* 6272k */
+               .size   = 0x620000              /* 6272k */
        },
        {
-               .name   = "nvram",
+               .name   = "nvram",
                .offset = 0x7e0000,
-               .size   = 0x20000               /* 128k */
+               .size   = 0x20000               /* 128k */
        },
 };
 
 static struct physmap_flash_data wgt634u_flash_data = {
-       .parts    = wgt634u_partitions,
+       .parts    = wgt634u_partitions,
        .nr_parts = ARRAY_SIZE(wgt634u_partitions)
 };
 
@@ -90,9 +90,9 @@ static struct resource wgt634u_flash_resource = {
 };
 
 static struct platform_device wgt634u_flash = {
-       .name          = "physmap-flash",
-       .id            = 0,
-       .dev           = { .platform_data = &wgt634u_flash_data, },
+       .name          = "physmap-flash",
+       .id            = 0,
+       .dev           = { .platform_data = &wgt634u_flash_data, },
        .resource      = &wgt634u_flash_resource,
        .num_resources = 1,
 };
index 73be9b3..ed1949c 100644 (file)
@@ -406,9 +406,9 @@ static struct board_info __initdata board_FAST2404 = {
        .expected_cpu_id                = 0x6348,
 
        .has_uart0                      = 1,
-        .has_enet0                     = 1,
-        .has_enet1                     = 1,
-        .has_pci                       = 1,
+       .has_enet0                      = 1,
+       .has_enet1                      = 1,
+       .has_pci                        = 1,
 
        .enet0 = {
                .has_phy                = 1,
@@ -591,22 +591,22 @@ static struct board_info __initdata board_96358vw2 = {
 };
 
 static struct board_info __initdata board_AGPFS0 = {
-       .name                           = "AGPF-S0",
-       .expected_cpu_id                = 0x6358,
+       .name                           = "AGPF-S0",
+       .expected_cpu_id                = 0x6358,
 
        .has_uart0                      = 1,
-       .has_enet0                      = 1,
-       .has_enet1                      = 1,
-       .has_pci                        = 1,
+       .has_enet0                      = 1,
+       .has_enet1                      = 1,
+       .has_pci                        = 1,
 
        .enet0 = {
-               .has_phy                = 1,
-               .use_internal_phy       = 1,
+               .has_phy                = 1,
+               .use_internal_phy       = 1,
        },
 
        .enet1 = {
-               .force_speed_100        = 1,
-               .force_duplex_full      = 1,
+               .force_speed_100        = 1,
+               .force_duplex_full      = 1,
        },
 
        .has_ohci0 = 1,
@@ -677,7 +677,7 @@ static struct ssb_sprom bcm63xx_sprom = {
        .revision               = 0x02,
        .board_rev              = 0x17,
        .country_code           = 0x0,
-       .ant_available_bg       = 0x3,
+       .ant_available_bg       = 0x3,
        .pa0b0                  = 0x15ae,
        .pa0b1                  = 0xfa85,
        .pa0b2                  = 0xfe8d,
index bf353c9..aa8f7f9 100644 (file)
@@ -10,7 +10,7 @@
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 
-static void __init wait_xfered(void)
+static void wait_xfered(void)
 {
        unsigned int val;
 
@@ -22,7 +22,7 @@ static void __init wait_xfered(void)
        } while (1);
 }
 
-void __init prom_putchar(char c)
+void prom_putchar(char c)
 {
        wait_xfered();
        bcm_uart0_writel(c, UART_FIFO_REG);
index 85bcb5a..851261e 100644 (file)
@@ -24,7 +24,7 @@ strip-flags   := $(addprefix --remove-section=,$(drop-sections))
 hostprogs-y := elf2ecoff
 
 targets := vmlinux.ecoff
-quiet_cmd_ecoff = ECOFF   $@
+quiet_cmd_ecoff = ECOFF          $@
       cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
 $(obj)/vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX) FORCE
        $(call if_changed,ecoff)
index c2a3fb0..bbaa1d4 100644 (file)
@@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
 
 targets += piggy.o
 OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
-                        --set-section-flags=.image=contents,alloc,load,readonly,data
+                       --set-section-flags=.image=contents,alloc,load,readonly,data
 $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
        $(call if_changed,objcopy)
 
@@ -67,9 +67,9 @@ endif
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
-quiet_cmd_zld = LD      $@
+quiet_cmd_zld = LD     $@
       cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
-quiet_cmd_strip = STRIP   $@
+quiet_cmd_strip = STRIP          $@
       cmd_strip = $(STRIP) -s $@
 vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
        $(call cmd,zld)
@@ -96,7 +96,7 @@ quiet_cmd_32 = OBJCOPY $@
 vmlinuz.32: vmlinuz
        $(call cmd,32)
 
-quiet_cmd_ecoff = ECOFF   $@
+quiet_cmd_ecoff = ECOFF          $@
       cmd_ecoff = $< $(VMLINUZ) $@ $(e2eflag)
 vmlinuz.ecoff: $(obj)/../elf2ecoff $(VMLINUZ)
        $(call cmd,ecoff)
index 9a62436..37fe58c 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (C) 2010 "Wu Zhangjin" <wuzhangjin@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 5cad0fa..2c95730 100644 (file)
@@ -5,8 +5,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 4e65a84..409cb48 100644 (file)
@@ -32,8 +32,8 @@ start:
        bne     a2, a0, 1b
         addiu  a0, a0, 4
 
-       PTR_LA  a0, (.heap)          /* heap address */
-       PTR_LA  sp, (.stack + 8192)  /* stack address */
+       PTR_LA  a0, (.heap)          /* heap address */
+       PTR_LA  sp, (.stack + 8192)  /* stack address */
 
        PTR_LA  ra, 2f
        PTR_LA  k0, decompress_kernel
index 8c3eed2..83e5c38 100644 (file)
@@ -2,48 +2,48 @@
  * Some ECOFF definitions.
  */
 typedef struct filehdr {
-        unsigned short  f_magic;        /* magic number */
-        unsigned short  f_nscns;        /* number of sections */
-        long            f_timdat;       /* time & date stamp */
-        long            f_symptr;       /* file pointer to symbolic header */
-        long            f_nsyms;        /* sizeof(symbolic hdr) */
-        unsigned short  f_opthdr;       /* sizeof(optional hdr) */
-        unsigned short  f_flags;        /* flags */
+       unsigned short  f_magic;        /* magic number */
+       unsigned short  f_nscns;        /* number of sections */
+       long            f_timdat;       /* time & date stamp */
+       long            f_symptr;       /* file pointer to symbolic header */
+       long            f_nsyms;        /* sizeof(symbolic hdr) */
+       unsigned short  f_opthdr;       /* sizeof(optional hdr) */
+       unsigned short  f_flags;        /* flags */
 } FILHDR;
-#define FILHSZ  sizeof(FILHDR)
+#define FILHSZ sizeof(FILHDR)
 
 #define OMAGIC         0407
 #define MIPSEBMAGIC    0x160
 #define MIPSELMAGIC    0x162
 
 typedef struct scnhdr {
-        char            s_name[8];      /* section name */
-        long            s_paddr;        /* physical address, aliased s_nlib */
-        long            s_vaddr;        /* virtual address */
-        long            s_size;         /* section size */
-        long            s_scnptr;       /* file ptr to raw data for section */
-        long            s_relptr;       /* file ptr to relocation */
-        long            s_lnnoptr;      /* file ptr to gp histogram */
-        unsigned short  s_nreloc;       /* number of relocation entries */
-        unsigned short  s_nlnno;        /* number of gp histogram entries */
-        long            s_flags;        /* flags */
+       char            s_name[8];      /* section name */
+       long            s_paddr;        /* physical address, aliased s_nlib */
+       long            s_vaddr;        /* virtual address */
+       long            s_size;         /* section size */
+       long            s_scnptr;       /* file ptr to raw data for section */
+       long            s_relptr;       /* file ptr to relocation */
+       long            s_lnnoptr;      /* file ptr to gp histogram */
+       unsigned short  s_nreloc;       /* number of relocation entries */
+       unsigned short  s_nlnno;        /* number of gp histogram entries */
+       long            s_flags;        /* flags */
 } SCNHDR;
 #define SCNHSZ         sizeof(SCNHDR)
 #define SCNROUND       ((long)16)
 
 typedef struct aouthdr {
-        short   magic;          /* see above                            */
-        short   vstamp;         /* version stamp                        */
-        long    tsize;          /* text size in bytes, padded to DW bdry*/
-        long    dsize;          /* initialized data "  "                */
-        long    bsize;          /* uninitialized data "   "             */
-        long    entry;          /* entry pt.                            */
-        long    text_start;     /* base of text used for this file      */
-        long    data_start;     /* base of data used for this file      */
-        long    bss_start;      /* base of bss used for this file       */
-        long    gprmask;        /* general purpose register mask        */
-        long    cprmask[4];     /* co-processor register masks          */
-        long    gp_value;       /* the gp value used for this object    */
+       short   magic;          /* see above                            */
+       short   vstamp;         /* version stamp                        */
+       long    tsize;          /* text size in bytes, padded to DW bdry*/
+       long    dsize;          /* initialized data "  "                */
+       long    bsize;          /* uninitialized data "   "             */
+       long    entry;          /* entry pt.                            */
+       long    text_start;     /* base of text used for this file      */
+       long    data_start;     /* base of data used for this file      */
+       long    bss_start;      /* base of bss used for this file       */
+       long    gprmask;        /* general purpose register mask        */
+       long    cprmask[4];     /* co-processor register masks          */
+       long    gp_value;       /* the gp value used for this object    */
 } AOUTHDR;
 #define AOUTHSZ sizeof(AOUTHDR)
 
@@ -51,7 +51,7 @@ typedef struct aouthdr {
 #define NMAGIC         0410
 #define ZMAGIC         0413
 #define SMAGIC         0411
-#define LIBMAGIC        0443
+#define LIBMAGIC       0443
 
 #define N_TXTOFF(f, a) \
  ((a).magic == ZMAGIC || (a).magic == LIBMAGIC ? 0 : \
index e19d906..8585078 100644 (file)
@@ -29,7 +29,7 @@
 /* elf2ecoff.c
 
    This program converts an elf executable to an ECOFF executable.
-   No symbol table is retained.   This is useful primarily in building
+   No symbol table is retained.          This is useful primarily in building
    net-bootable kernels for machines (e.g., DECstation and Alpha) which
    only support the ECOFF object file format. */
 
@@ -341,7 +341,7 @@ int main(int argc, char *argv[])
 
        /* Figure out if we can cram the program header into an ECOFF
           header...  Basically, we can't handle anything but loadable
-          segments, but we can ignore some kinds of segments.  We can't
+          segments, but we can ignore some kinds of segments.  We can't
           handle holes in the address space.  Segments may be out of order,
           so we sort them first. */
 
@@ -514,7 +514,7 @@ int main(int argc, char *argv[])
 
                for (i = 0; i < nosecs; i++) {
                        printf
-                           ("Section %d: %s phys %lx  size %lx  file offset %lx\n",
+                           ("Section %d: %s phys %lx  size %lx  file offset %lx\n",
                             i, esecs[i].s_name, esecs[i].s_paddr,
                             esecs[i].s_size, esecs[i].s_scnptr);
                }
@@ -551,7 +551,7 @@ int main(int argc, char *argv[])
        }
 
        /*
-        * Copy the loadable sections.   Zero-fill any gaps less than 64k;
+        * Copy the loadable sections.   Zero-fill any gaps less than 64k;
         * complain about any zero-filling, and die if we're asked to zero-fill
         * more than 64k.
         */
index 2f4f6d5..75a6df7 100644 (file)
@@ -94,4 +94,13 @@ config SWIOTLB
        select NEED_SG_DMA_LENGTH
 
 
+config OCTEON_ILM
+       tristate "Module to measure interrupt latency using Octeon CIU Timer"
+       help
+         This driver is a module to measure interrupt latency using the
+         the CIU Timers on Octeon.
+
+         To compile this driver as a module, choose M here.  The module
+         will be called octeon-ilm
+
 endif # CPU_CAVIUM_OCTEON
index 6e927cf..3595aff 100644 (file)
@@ -17,7 +17,8 @@ obj-y += dma-octeon.o flash_setup.o
 obj-y += octeon-memcpy.o
 obj-y += executive/
 
-obj-$(CONFIG_SMP)                     += smp.o
+obj-$(CONFIG_SMP)                    += smp.o
+obj-$(CONFIG_OCTEON_ILM)             += oct_ilm.o
 
 DTS_FILES = octeon_3xxx.dts octeon_68xx.dts
 DTB_FILES = $(patsubst %.dts, %.dtb, $(DTS_FILES))
index 6d5ddbc..504ed61 100644 (file)
@@ -155,8 +155,8 @@ int cvmx_bootmem_init(void *mem_desc_ptr)
         *
         * Linux 64 bit: Set XKPHYS bit
         * Linux 32 bit: use mmap to create mapping, use virtual address
-        * CVMX 64 bit:  use physical address directly
-        * CVMX 32 bit:  use physical address directly
+        * CVMX 64 bit:  use physical address directly
+        * CVMX 32 bit:  use physical address directly
         *
         * Note that the CVMX environment assumes the use of 1-1 TLB
         * mappings so that the physical addresses can be used
@@ -398,7 +398,7 @@ error_out:
 int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
 {
        uint64_t cur_addr;
-       uint64_t prev_addr = 0; /* zero is invalid */
+       uint64_t prev_addr = 0; /* zero is invalid */
        int retval = 0;
 
 #ifdef DEBUG
@@ -424,7 +424,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
        if (cur_addr == 0 || phy_addr < cur_addr) {
                /* add at front of list - special case with changing head ptr */
                if (cur_addr && phy_addr + size > cur_addr)
-                       goto bootmem_free_done; /* error, overlapping section */
+                       goto bootmem_free_done; /* error, overlapping section */
                else if (phy_addr + size == cur_addr) {
                        /* Add to front of existing first block */
                        cvmx_bootmem_phy_set_next(phy_addr,
@@ -611,7 +611,7 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
        }
 
        cvmx_bootmem_unlock();
-       return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+       return named_block_ptr != NULL; /* 0 on failure, 1 on success */
 }
 
 int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
index fd20153..7c64977 100644 (file)
@@ -203,10 +203,10 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
  * enumeration from the bootloader.
  *
  * @ipd_port: IPD input port associated with the port we want to get link
- *                 status for.
+ *                status for.
  *
  * Returns The ports link status. If the link isn't fully resolved, this must
- *         return zero.
+ *        return zero.
  */
 cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
 {
@@ -357,16 +357,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
                                result.s.link_up = 1;
                                result.s.full_duplex = ((phy_status >> 13) & 1);
                                switch ((phy_status >> 14) & 3) {
-                               case 0: /* 10 Mbps */
+                               case 0: /* 10 Mbps */
                                        result.s.speed = 10;
                                        break;
-                               case 1: /* 100 Mbps */
+                               case 1: /* 100 Mbps */
                                        result.s.speed = 100;
                                        break;
-                               case 2: /* 1 Gbps */
+                               case 2: /* 1 Gbps */
                                        result.s.speed = 1000;
                                        break;
-                               case 3: /* Illegal */
+                               case 3: /* Illegal */
                                        result.u64 = 0;
                                        break;
                                }
@@ -391,16 +391,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
                result.s.link_up = inband_status.s.status;
                result.s.full_duplex = inband_status.s.duplex;
                switch (inband_status.s.speed) {
-               case 0: /* 10 Mbps */
+               case 0: /* 10 Mbps */
                        result.s.speed = 10;
                        break;
-               case 1: /* 100 Mbps */
+               case 1: /* 100 Mbps */
                        result.s.speed = 100;
                        break;
-               case 2: /* 1 Gbps */
+               case 2: /* 1 Gbps */
                        result.s.speed = 1000;
                        break;
-               case 3: /* Illegal */
+               case 3: /* Illegal */
                        result.u64 = 0;
                        break;
                }
@@ -429,9 +429,9 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
  *
  * @phy_addr:  The address of the PHY to program
  * @enable_autoneg:
- *                  Non zero if you want to enable auto-negotiation.
+ *                 Non zero if you want to enable auto-negotiation.
  * @link_info: Link speed to program. If the speed is zero and auto-negotiation
- *                  is enabled, all possible negotiation speeds are advertised.
+ *                 is enabled, all possible negotiation speeds are advertised.
  *
  * Returns Zero on success, negative on failure
  */
@@ -607,10 +607,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
  *
  * @interface: Interface to probe
  * @supported_ports:
- *                  Number of ports Octeon supports.
+ *                 Number of ports Octeon supports.
  *
  * Returns Number of ports the actual board supports. Many times this will
- *         simple be "support_ports".
+ *        simple be "support_ports".
  */
 int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
 {
index c1c5489..607b4e6 100644 (file)
@@ -79,10 +79,10 @@ void cvmx_helper_qlm_jtag_init(void)
  * @qlm:    QLM to shift value into
  * @bits:   Number of bits to shift in (1-32).
  * @data:   Data to shift in. Bit 0 enters the chain first, followed by
- *               bit 1, etc.
+ *              bit 1, etc.
  *
  * Returns The low order bits of the JTAG chain that shifted out of the
- *         circle.
+ *        circle.
  */
 uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
 {
index 82b2184..f59c88e 100644 (file)
@@ -131,7 +131,7 @@ void cvmx_helper_rgmii_internal_loopback(int port)
  * @interface: Interface to setup
  * @port:      Port to setup (0..3)
  * @cpu_clock_hz:
- *                  Chip frequency in Hertz
+ *                 Chip frequency in Hertz
  *
  * Returns Zero on success, negative on failure
  */
@@ -409,14 +409,14 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
                        mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
 
        /*
-        * Port  .en  .type  .p0mii  Configuration
-        * ----  ---  -----  ------  -----------------------------------------
-        *  X      0     X      X    All links are disabled.
-        *  0      1     X      0    Port 0 is RGMII
-        *  0      1     X      1    Port 0 is MII
-        *  1      1     0      X    Ports 1 and 2 are configured as RGMII ports.
-        *  1      1     1      X    Port 1: GMII/MII; Port 2: disabled. GMII or
-        *                           MII port is selected by GMX_PRT1_CFG[SPEED].
+        * Port  .en  .type  .p0mii  Configuration
+        * ----  ---  -----  ------  -----------------------------------------
+        *  X      0     X      X    All links are disabled.
+        *  0      1     X      0    Port 0 is RGMII
+        *  0      1     X      1    Port 0 is MII
+        *  1      1     0      X    Ports 1 and 2 are configured as RGMII ports.
+        *  1      1     1      X    Port 1: GMII/MII; Port 2: disabled. GMII or
+        *                           MII port is selected by GMX_PRT1_CFG[SPEED].
         */
 
                        /* In MII mode, CLK_CNT = 1. */
@@ -464,9 +464,9 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 0c0bf5d..45f18cc 100644 (file)
@@ -523,9 +523,9 @@ int __cvmx_helper_sgmii_link_set(int ipd_port,
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 2830e4b..1f3030c 100644 (file)
@@ -160,16 +160,16 @@ cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
                result.s.link_up = inband.s.status;
                result.s.full_duplex = inband.s.duplex;
                switch (inband.s.speed) {
-               case 0: /* 10 Mbps */
+               case 0: /* 10 Mbps */
                        result.s.speed = 10;
                        break;
-               case 1: /* 100 Mbps */
+               case 1: /* 100 Mbps */
                        result.s.speed = 100;
                        break;
-               case 2: /* 1 Gbps */
+               case 2: /* 1 Gbps */
                        result.s.speed = 1000;
                        break;
-               case 3: /* Illegal */
+               case 3: /* Illegal */
                        result.s.speed = 0;
                        result.s.link_up = 0;
                        break;
index dfdfe8b..65d2bc9 100644 (file)
@@ -96,9 +96,9 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
        uint8_t *end_of_data;
 
        cvmx_dprintf("Packet Length:   %u\n", work->len);
-       cvmx_dprintf("    Input Port:  %u\n", work->ipprt);
-       cvmx_dprintf("    QoS:         %u\n", work->qos);
-       cvmx_dprintf("    Buffers:     %u\n", work->word2.s.bufs);
+       cvmx_dprintf("    Input Port:  %u\n", work->ipprt);
+       cvmx_dprintf("    QoS:         %u\n", work->qos);
+       cvmx_dprintf("    Buffers:     %u\n", work->word2.s.bufs);
 
        if (work->word2.s.bufs == 0) {
                union cvmx_ipd_wqe_fpa_queue wqe_pool;
@@ -132,14 +132,14 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
        while (remaining_bytes) {
                start_of_buffer =
                    ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
-               cvmx_dprintf("    Buffer Start:%llx\n",
+               cvmx_dprintf("    Buffer Start:%llx\n",
                             (unsigned long long)start_of_buffer);
-               cvmx_dprintf("    Buffer I   : %u\n", buffer_ptr.s.i);
-               cvmx_dprintf("    Buffer Back: %u\n", buffer_ptr.s.back);
-               cvmx_dprintf("    Buffer Pool: %u\n", buffer_ptr.s.pool);
-               cvmx_dprintf("    Buffer Data: %llx\n",
+               cvmx_dprintf("    Buffer I   : %u\n", buffer_ptr.s.i);
+               cvmx_dprintf("    Buffer Back: %u\n", buffer_ptr.s.back);
+               cvmx_dprintf("    Buffer Pool: %u\n", buffer_ptr.s.pool);
+               cvmx_dprintf("    Buffer Data: %llx\n",
                             (unsigned long long)buffer_ptr.s.addr);
-               cvmx_dprintf("    Buffer Size: %u\n", buffer_ptr.s.size);
+               cvmx_dprintf("    Buffer Size: %u\n", buffer_ptr.s.size);
 
                cvmx_dprintf("\t\t");
                data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
@@ -172,11 +172,11 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
  *
  * @queue:  Input queue to setup RED on (0-7)
  * @pass_thresh:
- *               Packets will begin slowly dropping when there are less than
- *               this many packet buffers free in FPA 0.
+ *              Packets will begin slowly dropping when there are less than
+ *              this many packet buffers free in FPA 0.
  * @drop_thresh:
- *               All incoming packets will be dropped when there are less
- *               than this many free packet buffers in FPA 0.
+ *              All incoming packets will be dropped when there are less
+ *              than this many free packet buffers in FPA 0.
  * Returns Zero on success. Negative on failure
  */
 int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
@@ -207,11 +207,11 @@ int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
  * Setup Random Early Drop to automatically begin dropping packets.
  *
  * @pass_thresh:
- *               Packets will begin slowly dropping when there are less than
- *               this many packet buffers free in FPA 0.
+ *              Packets will begin slowly dropping when there are less than
+ *              this many packet buffers free in FPA 0.
  * @drop_thresh:
- *               All incoming packets will be dropped when there are less
- *               than this many free packet buffers in FPA 0.
+ *              All incoming packets will be dropped when there are less
+ *              than this many free packet buffers in FPA 0.
  * Returns Zero on success. Negative on failure
  */
 int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
index 1723248..7653b7e 100644 (file)
@@ -321,9 +321,9 @@ int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index fa49638..d63d20d 100644 (file)
@@ -111,7 +111,7 @@ int cvmx_helper_ports_on_interface(int interface)
  * @interface: Interface to probe
  *
  * Returns Mode of the interface. Unknown or unsupported interfaces return
- *         DISABLED.
+ *        DISABLED.
  */
 cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
 {
@@ -187,7 +187,7 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
  * the defines in executive-config.h.
  *
  * @ipd_port: Port to configure. This follows the IPD numbering, not the
- *                 per interface numbering
+ *                per interface numbering
  *
  * Returns Zero on success, negative on failure
  */
@@ -591,7 +591,7 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
  * Function to adjust internal IPD pointer alignments
  *
  * Returns 0 on success
- *         !0 on failure
+ *        !0 on failure
  */
 int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
 {
@@ -1068,9 +1068,9 @@ int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 560e034..fa327ec 100644 (file)
@@ -85,11 +85,11 @@ void __cvmx_interrupt_gmxx_enable(int interface)
        if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
                if (mode.s.en) {
                        switch (mode.cn56xx.mode) {
-                       case 1: /* XAUI */
+                       case 1: /* XAUI */
                                num_ports = 1;
                                break;
-                       case 2: /* SGMII */
-                       case 3: /* PICMG */
+                       case 2: /* SGMII */
+                       case 3: /* PICMG */
                                num_ports = 4;
                                break;
                        default:        /* Disabled */
index 33b7214..42e38c3 100644 (file)
@@ -147,7 +147,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
        mask &= valid_mask;
 
        /* A UMSK setting which blocks all L2C Ways is an error on some chips */
-       if (mask == valid_mask  && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+       if (mask == valid_mask  && !OCTEON_IS_MODEL(OCTEON_CN63XX))
                return -1;
 
        if (OCTEON_IS_MODEL(OCTEON_CN63XX))
@@ -438,7 +438,7 @@ void cvmx_l2c_flush(void)
                for (set = 0; set < n_set; set++) {
                        for (assoc = 0; assoc < n_assoc; assoc++) {
                                address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
-                                                      (assoc << assoc_shift) | (set << set_shift));
+                                                      (assoc << assoc_shift) | (set << set_shift));
                                CVMX_CACHE_WBIL2I(address, 0);
                        }
                }
@@ -573,8 +573,8 @@ union __cvmx_l2c_tag {
  * @index:  Index of the cacheline
  *
  * Returns The Octeon model specific tag structure.  This is
- *         translated by a wrapper function to a generic form that is
- *         easier for applications to use.
+ *        translated by a wrapper function to a generic form that is
+ *        easier for applications to use.
  */
 static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
 {
@@ -618,12 +618,12 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
                ".set push\n\t"
                ".set mips64\n\t"
                ".set noreorder\n\t"
-               "sd    %[dbg_val], 0(%[dbg_addr])\n\t"   /* Enter debug mode, wait for store */
+               "sd    %[dbg_val], 0(%[dbg_addr])\n\t"   /* Enter debug mode, wait for store */
                "ld    $0, 0(%[dbg_addr])\n\t"
-               "ld    %[tag_val], 0(%[tag_addr])\n\t"   /* Read L2C tag data */
-               "sd    $0, 0(%[dbg_addr])\n\t"          /* Exit debug mode, wait for store */
+               "ld    %[tag_val], 0(%[tag_addr])\n\t"   /* Read L2C tag data */
+               "sd    $0, 0(%[dbg_addr])\n\t"          /* Exit debug mode, wait for store */
                "ld    $0, 0(%[dbg_addr])\n\t"
-               "cache 9, 0($0)\n\t"             /* Invalidate dcache to discard debug data */
+               "cache 9, 0($0)\n\t"             /* Invalidate dcache to discard debug data */
                ".set pop"
                : [tag_val] "=r" (tag_val)
                : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
@@ -664,10 +664,10 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
                CVMX_SYNC;   /* make sure CVMX_L2C_TADX_TAG is updated */
                l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
 
-               tag.s.V     = l2c_tadx_tag.s.valid;
-               tag.s.D     = l2c_tadx_tag.s.dirty;
-               tag.s.L     = l2c_tadx_tag.s.lock;
-               tag.s.U     = l2c_tadx_tag.s.use;
+               tag.s.V     = l2c_tadx_tag.s.valid;
+               tag.s.D     = l2c_tadx_tag.s.dirty;
+               tag.s.L     = l2c_tadx_tag.s.lock;
+               tag.s.U     = l2c_tadx_tag.s.use;
                tag.s.addr  = l2c_tadx_tag.s.tag;
        } else {
                union __cvmx_l2c_tag tmp_tag;
@@ -679,34 +679,34 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
                 * as it can represent all models.
                 */
                if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
-                       tag.s.V    = tmp_tag.cn58xx.V;
-                       tag.s.D    = tmp_tag.cn58xx.D;
-                       tag.s.L    = tmp_tag.cn58xx.L;
-                       tag.s.U    = tmp_tag.cn58xx.U;
+                       tag.s.V    = tmp_tag.cn58xx.V;
+                       tag.s.D    = tmp_tag.cn58xx.D;
+                       tag.s.L    = tmp_tag.cn58xx.L;
+                       tag.s.U    = tmp_tag.cn58xx.U;
                        tag.s.addr = tmp_tag.cn58xx.addr;
                } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
-                       tag.s.V    = tmp_tag.cn38xx.V;
-                       tag.s.D    = tmp_tag.cn38xx.D;
-                       tag.s.L    = tmp_tag.cn38xx.L;
-                       tag.s.U    = tmp_tag.cn38xx.U;
+                       tag.s.V    = tmp_tag.cn38xx.V;
+                       tag.s.D    = tmp_tag.cn38xx.D;
+                       tag.s.L    = tmp_tag.cn38xx.L;
+                       tag.s.U    = tmp_tag.cn38xx.U;
                        tag.s.addr = tmp_tag.cn38xx.addr;
                } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
-                       tag.s.V    = tmp_tag.cn31xx.V;
-                       tag.s.D    = tmp_tag.cn31xx.D;
-                       tag.s.L    = tmp_tag.cn31xx.L;
-                       tag.s.U    = tmp_tag.cn31xx.U;
+                       tag.s.V    = tmp_tag.cn31xx.V;
+                       tag.s.D    = tmp_tag.cn31xx.D;
+                       tag.s.L    = tmp_tag.cn31xx.L;
+                       tag.s.U    = tmp_tag.cn31xx.U;
                        tag.s.addr = tmp_tag.cn31xx.addr;
                } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
-                       tag.s.V    = tmp_tag.cn30xx.V;
-                       tag.s.D    = tmp_tag.cn30xx.D;
-                       tag.s.L    = tmp_tag.cn30xx.L;
-                       tag.s.U    = tmp_tag.cn30xx.U;
+                       tag.s.V    = tmp_tag.cn30xx.V;
+                       tag.s.D    = tmp_tag.cn30xx.D;
+                       tag.s.L    = tmp_tag.cn30xx.L;
+                       tag.s.U    = tmp_tag.cn30xx.U;
                        tag.s.addr = tmp_tag.cn30xx.addr;
                } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
-                       tag.s.V    = tmp_tag.cn50xx.V;
-                       tag.s.D    = tmp_tag.cn50xx.D;
-                       tag.s.L    = tmp_tag.cn50xx.L;
-                       tag.s.U    = tmp_tag.cn50xx.U;
+                       tag.s.V    = tmp_tag.cn50xx.V;
+                       tag.s.D    = tmp_tag.cn50xx.D;
+                       tag.s.L    = tmp_tag.cn50xx.L;
+                       tag.s.U    = tmp_tag.cn50xx.U;
                        tag.s.addr = tmp_tag.cn50xx.addr;
                } else {
                        cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
@@ -865,7 +865,7 @@ void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
                uint64_t address;
                /* Create the address based on index and association.
                 * Bits<20:17> select the way of the cache block involved in
-                *             the operation
+                *             the operation
                 * Bits<16:7> of the effect address select the index
                 */
                address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
index f557084..f2c8775 100644 (file)
@@ -99,7 +99,7 @@ void cvmx_pko_initialize_global(void)
  * be called after the FPA has been initialized and filled with pages.
  *
  * Returns 0 on success
- *         !0 on failure
+ *        !0 on failure
  */
 int cvmx_pko_initialize_local(void)
 {
@@ -186,19 +186,19 @@ void cvmx_pko_shutdown(void)
 /**
  * Configure a output port and the associated queues for use.
  *
- * @port:       Port to configure.
+ * @port:      Port to configure.
  * @base_queue: First queue number to associate with this port.
  * @num_queues: Number of queues to associate with this port
- * @priority:   Array of priority levels for each queue. Values are
- *                   allowed to be 0-8. A value of 8 get 8 times the traffic
- *                   of a value of 1.  A value of 0 indicates that no rounds
- *                   will be participated in. These priorities can be changed
- *                   on the fly while the pko is enabled. A priority of 9
- *                   indicates that static priority should be used.  If static
- *                   priority is used all queues with static priority must be
- *                   contiguous starting at the base_queue, and lower numbered
- *                   queues have higher priority than higher numbered queues.
- *                   There must be num_queues elements in the array.
+ * @priority:  Array of priority levels for each queue. Values are
+ *                  allowed to be 0-8. A value of 8 get 8 times the traffic
+ *                  of a value of 1.  A value of 0 indicates that no rounds
+ *                  will be participated in. These priorities can be changed
+ *                  on the fly while the pko is enabled. A priority of 9
+ *                  indicates that static priority should be used.  If static
+ *                  priority is used all queues with static priority must be
+ *                  contiguous starting at the base_queue, and lower numbered
+ *                  queues have higher priority than higher numbered queues.
+ *                  There must be num_queues elements in the array.
  */
 cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
                                       uint64_t num_queues,
@@ -440,7 +440,7 @@ void cvmx_pko_show_queue_map()
  * @port:      Port to rate limit
  * @packets_s: Maximum packet/sec
  * @burst:     Maximum number of packets to burst in a row before rate
- *                  limiting cuts in.
+ *                 limiting cuts in.
  *
  * Returns Zero on success, negative on failure
  */
@@ -473,7 +473,7 @@ int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
  * @port:   Port to rate limit
  * @bits_s: PKO rate limit in bits/sec
  * @burst:  Maximum number of bits to burst before rate
- *               limiting cuts in.
+ *              limiting cuts in.
  *
  * Returns Zero on success, negative on failure
  */
index 74afb17..ef5198d 100644 (file)
@@ -69,7 +69,7 @@ static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
 /**
  * Get current SPI4 initialization callbacks
  *
- * @callbacks:  Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
  *
  * Returns Pointer to cvmx_spi_callbacks_t structure.
  */
@@ -92,11 +92,11 @@ void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
  * Initialize and start the SPI interface.
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  * @num_ports: Number of SPI ports to configure
  *
@@ -138,11 +138,11 @@ int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
  * with its correspondent system.
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  *
  * Returns Zero on success, negative of failure.
@@ -160,7 +160,7 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
        INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
 
        /* NOTE: Calendar setup is not performed during restart */
-       /*       Refer to cvmx_spi_start_interface() for the full sequence */
+       /*       Refer to cvmx_spi_start_interface() for the full sequence */
 
        /* Callback to perform clock detection */
        INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
@@ -182,11 +182,11 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
  * Callback to perform SPI4 reset
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  *
  * Returns Zero on success, non-zero error code on failure (will cause
  * SPI initialization to abort)
@@ -297,11 +297,11 @@ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
  * Callback to setup calendar and miscellaneous settings before clock detection
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @num_ports: Number of ports to configure on SPI
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -382,7 +382,7 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
                stxx_spi4_dat.u64 = 0;
                /*Minimum needed by dynamic alignment */
                stxx_spi4_dat.s.alpha = 32;
-               stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+               stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
                cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
                               stxx_spi4_dat.u64);
 
@@ -416,11 +416,11 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
  * Callback to perform clock detection
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -494,11 +494,11 @@ int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
  * Callback to perform link training
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for link to be trained (in seconds)
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -563,11 +563,11 @@ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
  * Callback to perform calendar data synchronization
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for calendar data in seconds
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -620,11 +620,11 @@ int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
  * Callback to handle interface up
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  *
  * Returns Zero on success, non-zero error code on failure (will cause
  * SPI initialization to abort)
index 8b18a20..3d17fac 100644 (file)
@@ -74,26 +74,26 @@ EXPORT_SYMBOL(cvmx_sysinfo_get);
 
 /**
  * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.)  to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
  * are required to use simple executive files directly.
  *
  * Locking (if required) must be handled outside of this
  * function
  *
  * @phy_mem_desc_ptr:
- *                   Pointer to global physical memory descriptor
- *                   (bootmem descriptor) @board_type: Octeon board
- *                   type enumeration
+ *                  Pointer to global physical memory descriptor
+ *                  (bootmem descriptor) @board_type: Octeon board
+ *                  type enumeration
  *
  * @board_rev_major:
- *                   Board major revision
+ *                  Board major revision
  * @board_rev_minor:
- *                   Board minor revision
+ *                  Board minor revision
  * @cpu_clock_hz:
- *                   CPU clock freqency in hertz
+ *                  CPU clock freqency in hertz
  *
  * Returns 0: Failure
- *         1: success
+ *        1: success
  */
 int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
                                    uint16_t board_type,
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
new file mode 100644 (file)
index 0000000..71b213d
--- /dev/null
@@ -0,0 +1,206 @@
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#define TIMER_NUM 3
+
+static bool reset_stats;
+
+struct latency_info {
+       u64 io_interval;
+       u64 cpu_interval;
+       u64 timer_start1;
+       u64 timer_start2;
+       u64 max_latency;
+       u64 min_latency;
+       u64 latency_sum;
+       u64 average_latency;
+       u64 interrupt_cnt;
+};
+
+static struct latency_info li;
+static struct dentry *dir;
+
+static int show_latency(struct seq_file *m, void *v)
+{
+       u64 cpuclk, avg, max, min;
+       struct latency_info curr_li = li;
+
+       cpuclk = octeon_get_clock_rate();
+
+       max = (curr_li.max_latency * 1000000000) / cpuclk;
+       min = (curr_li.min_latency * 1000000000) / cpuclk;
+       avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
+
+       seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
+                  curr_li.interrupt_cnt, avg, max, min);
+       return 0;
+}
+
+static int oct_ilm_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_latency, NULL);
+}
+
+static const struct file_operations oct_ilm_ops = {
+       .open = oct_ilm_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int reset_statistics(void *data, u64 value)
+{
+       reset_stats = true;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+
+static int init_debufs(void)
+{
+       struct dentry *show_dentry;
+       dir = debugfs_create_dir("oct_ilm", 0);
+       if (!dir) {
+               pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
+               return -1;
+       }
+
+       show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
+                                         &oct_ilm_ops);
+       if (!show_dentry) {
+               pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
+               return -1;
+       }
+
+       show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
+                                         &reset_statistics_ops);
+       if (!show_dentry) {
+               pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
+               return -1;
+       }
+
+       return 0;
+
+}
+
+static void init_latency_info(struct latency_info *li, int startup)
+{
+       /* interval in milli seconds after which the interrupt will
+        * be triggered
+        */
+       int interval = 1;
+
+       if (startup) {
+               /* Calculating by the amounts io clock and cpu clock would
+                *  increment in interval amount of ms
+                */
+               li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
+               li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
+       }
+       li->timer_start1 = 0;
+       li->timer_start2 = 0;
+       li->max_latency = 0;
+       li->min_latency = (u64)-1;
+       li->latency_sum = 0;
+       li->interrupt_cnt = 0;
+}
+
+
+static void start_timer(int timer, u64 interval)
+{
+       union cvmx_ciu_timx timx;
+       unsigned long flags;
+
+       timx.u64 = 0;
+       timx.s.one_shot = 1;
+       timx.s.len = interval;
+       raw_local_irq_save(flags);
+       li.timer_start1 = read_c0_cvmcount();
+       cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+       /* Read it back to force wait until register is written. */
+       timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+       li.timer_start2 = read_c0_cvmcount();
+       raw_local_irq_restore(flags);
+}
+
+
+static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
+{
+       u64 last_latency;
+       u64 last_int_cnt;
+
+       if (reset_stats) {
+               init_latency_info(&li, 0);
+               reset_stats = false;
+       } else {
+               last_int_cnt = read_c0_cvmcount();
+               last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
+               li.interrupt_cnt++;
+               li.latency_sum += last_latency;
+               if (last_latency > li.max_latency)
+                       li.max_latency = last_latency;
+               if (last_latency < li.min_latency)
+                       li.min_latency = last_latency;
+       }
+       start_timer(TIMER_NUM, li.io_interval);
+       return IRQ_HANDLED;
+}
+
+static void disable_timer(int timer)
+{
+       union cvmx_ciu_timx timx;
+
+       timx.s.one_shot = 0;
+       timx.s.len = 0;
+       cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+       /* Read it back to force immediate write of timer register*/
+       timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+}
+
+static __init int oct_ilm_module_init(void)
+{
+       int rc;
+       int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
+
+       rc = init_debufs();
+       if (rc) {
+               WARN(1, "Could not create debugfs entries");
+               return rc;
+       }
+
+       rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
+                        "oct_ilm", 0);
+       if (rc) {
+               WARN(1, "Could not acquire IRQ %d", irq);
+               goto err_irq;
+       }
+
+       init_latency_info(&li, 1);
+       start_timer(TIMER_NUM, li.io_interval);
+
+       return 0;
+err_irq:
+       debugfs_remove_recursive(dir);
+       return rc;
+}
+
+static __exit void oct_ilm_module_exit(void)
+{
+       disable_timer(TIMER_NUM);
+       if (dir)
+               debugfs_remove_recursive(dir);
+       free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
+}
+
+module_exit(oct_ilm_module_exit);
+module_init(oct_ilm_module_init);
+MODULE_AUTHOR("Venkat Subbiah, Cavium");
+MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
+MODULE_LICENSE("GPL");
index 46f5dbc..156aa61 100644 (file)
@@ -1542,7 +1542,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
 
        if (line == 3) /* MIO */
                switch (bit) {
-               case 2:  /* IPD_DRP */
+               case 2:  /* IPD_DRP */
                case 8 ... 11: /* Timers */
                case 48: /* PTP */
                        edge = true;
@@ -1553,7 +1553,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
        else if (line == 6) /* PKT */
                switch (bit) {
                case 52 ... 53: /* ILK_DRP */
-               case 8 ... 12:  /* GMX_DRP */
+               case 8 ... 12:  /* GMX_DRP */
                        edge = true;
                        break;
                default:
index 0ba0eb9..64e08df 100644 (file)
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define LDFIRST LOADR
-#define LDREST  LOADL
+#define LDREST LOADL
 #define STFIRST STORER
-#define STREST  STOREL
+#define STREST STOREL
 #define SHIFT_DISCARD SLLV
 #else
 #define LDFIRST LOADL
-#define LDREST  LOADR
+#define LDREST LOADR
 #define STFIRST STOREL
-#define STREST  STORER
+#define STREST STORER
 #define SHIFT_DISCARD SRLV
 #endif
 
@@ -316,9 +316,9 @@ EXC(         STORE  t0, -8(dst),            s_exc_p1u)
 
 src_unaligned:
 #define rem t8
-       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
+       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
        beqz    t0, cleanup_src_unaligned
-        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
+        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
 1:
 /*
  * Avoid consecutive LD*'s to the same register since some mips
@@ -326,13 +326,13 @@ src_unaligned:
  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  * are to the same unit (unless src is aligned, but it's not).
  */
-EXC(   LDFIRST t0, FIRST(0)(src),      l_exc)
-EXC(   LDFIRST t1, FIRST(1)(src),      l_exc_copy)
-       SUB     len, len, 4*NBYTES
+EXC(   LDFIRST t0, FIRST(0)(src),      l_exc)
+EXC(   LDFIRST t1, FIRST(1)(src),      l_exc_copy)
+       SUB     len, len, 4*NBYTES
 EXC(   LDREST  t0, REST(0)(src),       l_exc_copy)
 EXC(   LDREST  t1, REST(1)(src),       l_exc_copy)
-EXC(   LDFIRST t2, FIRST(2)(src),      l_exc_copy)
-EXC(   LDFIRST t3, FIRST(3)(src),      l_exc_copy)
+EXC(   LDFIRST t2, FIRST(2)(src),      l_exc_copy)
+EXC(   LDFIRST t3, FIRST(3)(src),      l_exc_copy)
 EXC(   LDREST  t2, REST(2)(src),       l_exc_copy)
 EXC(   LDREST  t3, REST(3)(src),       l_exc_copy)
        ADD     src, src, 4*NBYTES
index 3c1b625..389512e 100644 (file)
@@ -410,7 +410,7 @@ int __init octeon_prune_device_tree(void)
        pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
        if (pip_path) {
                int pip = fdt_path_offset(initial_boot_params, pip_path);
-               if (pip  >= 0)
+               if (pip  >= 0)
                        for (i = 0; i <= 4; i++)
                                octeon_fdt_pip_iface(pip, i, &mac_addr_base);
        }
index f28b2d0..88cb42d 100644 (file)
@@ -3,7 +3,7 @@
  * OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
  *
  * This device tree is pruned and patched by early boot code before
- * use.  Because of this, it contains a super-set of the available
+ * use.         Because of this, it contains a super-set of the available
  * devices and properties.
  */
 / {
                                cavium,t-we   = <45>;
                                cavium,t-rd-hld = <35>;
                                cavium,t-wr-hld = <45>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <0>;
-                               cavium,t-page   = <35>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <0>;
+                               cavium,t-page   = <35>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <8>;
                        };
                        cavium,cs-config@4 {
                                cavium,t-we   = <320>;
                                cavium,t-rd-hld = <320>;
                                cavium,t-wr-hld = <320>;
-                               cavium,t-pause  = <320>;
-                               cavium,t-wait   = <320>;
-                               cavium,t-page   = <320>;
+                               cavium,t-pause  = <320>;
+                               cavium,t-wait   = <320>;
+                               cavium,t-page   = <320>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <8>;
                        };
                        cavium,cs-config@5 {
                                cavium,t-we   = <150>;
                                cavium,t-rd-hld = <100>;
                                cavium,t-wr-hld = <30>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <30>;
-                               cavium,t-page   = <320>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <30>;
+                               cavium,t-page   = <320>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <16>;
                        };
                        cavium,cs-config@6 {
                                cavium,t-we   = <150>;
                                cavium,t-rd-hld = <100>;
                                cavium,t-wr-hld = <70>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <0>;
-                               cavium,t-page   = <320>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <0>;
+                               cavium,t-page   = <320>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,wait-mode;
                                cavium,bus-width = <16>;
                        };
index 1839468..79b46fc 100644 (file)
@@ -3,7 +3,7 @@
  * OCTEON 68XX device tree skeleton.
  *
  * This device tree is pruned and patched by early boot code before
- * use.  Because of this, it contains a super-set of the available
+ * use.         Because of this, it contains a super-set of the available
  * devices and properties.
  */
 / {
                                cavium,t-we   = <35>;
                                cavium,t-rd-hld = <25>;
                                cavium,t-wr-hld = <35>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <300>;
-                               cavium,t-page   = <25>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <300>;
+                               cavium,t-page   = <25>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <8>;
                        };
                        cavium,cs-config@4 {
                                cavium,t-we   = <320>;
                                cavium,t-rd-hld = <320>;
                                cavium,t-wr-hld = <320>;
-                               cavium,t-pause  = <320>;
-                               cavium,t-wait   = <320>;
-                               cavium,t-page   = <320>;
+                               cavium,t-pause  = <320>;
+                               cavium,t-wait   = <320>;
+                               cavium,t-page   = <320>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <8>;
                        };
                        cavium,cs-config@5 {
                                cavium,t-we   = <150>;
                                cavium,t-rd-hld = <100>;
                                cavium,t-wr-hld = <300>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <300>;
-                               cavium,t-page   = <310>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <300>;
+                               cavium,t-page   = <310>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,bus-width = <16>;
                        };
                        cavium,cs-config@6 {
                                cavium,t-we   = <150>;
                                cavium,t-rd-hld = <100>;
                                cavium,t-wr-hld = <30>;
-                               cavium,t-pause  = <0>;
-                               cavium,t-wait   = <30>;
-                               cavium,t-page   = <310>;
+                               cavium,t-pause  = <0>;
+                               cavium,t-wait   = <30>;
+                               cavium,t-page   = <310>;
                                cavium,t-rd-dly = <0>;
 
-                               cavium,pages     = <0>;
+                               cavium,pages     = <0>;
                                cavium,wait-mode;
                                cavium,bus-width = <16>;
                        };
index 428864b..7b066bb 100644 (file)
@@ -31,7 +31,7 @@ struct boot_init_vector {
        uint32_t k0_val;
        /* Address of boot info block structure */
        uint64_t boot_info_addr;
-       uint32_t flags;         /* flags */
+       uint32_t flags;         /* flags */
        uint32_t pad;
 };
 
@@ -53,20 +53,20 @@ struct linux_app_boot_info {
 
 /* If not to copy a lot of bootloader's structures
    here is only offset of requested member */
-#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK    0x765c
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK   0x765c
 
 /* hardcoded in bootloader */
-#define  LABI_ADDR_IN_BOOTLOADER                         0x700
+#define         LABI_ADDR_IN_BOOTLOADER                         0x700
 
 #define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
 
 #define LABI_SIGNATURE 0xAABBCC01
 
 /*  from uboot-headers/octeon_mem_map.h */
-#define EXCEPTION_BASE_INCR     (4 * 1024)
+#define EXCEPTION_BASE_INCR    (4 * 1024)
                               /* Increment size for exception base addresses (4k minimum) */
-#define EXCEPTION_BASE_BASE     0
-#define BOOTLOADER_PRIV_DATA_BASE       (EXCEPTION_BASE_BASE + 0x800)
-#define BOOTLOADER_BOOT_VECTOR          (BOOTLOADER_PRIV_DATA_BASE)
+#define EXCEPTION_BASE_BASE    0
+#define BOOTLOADER_PRIV_DATA_BASE      (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR         (BOOTLOADER_PRIV_DATA_BASE)
 
 #endif /* __OCTEON_BOOT_H__ */
index d7e0a09..c594a3d 100644 (file)
@@ -319,7 +319,7 @@ EXPORT_SYMBOL(octeon_get_io_clock_rate);
  * exists on most Cavium evaluation boards. If it doesn't exist, then
  * this function doesn't do anything.
  *
- * @s:      String to write
+ * @s:     String to write
  */
 void octeon_write_lcd(const char *s)
 {
@@ -341,7 +341,7 @@ void octeon_write_lcd(const char *s)
 /**
  * Return the console uart passed by the bootloader
  *
- * Returns uart   (0 or 1)
+ * Returns uart          (0 or 1)
  */
 int octeon_get_boot_uart(void)
 {
@@ -805,7 +805,7 @@ void __init prom_init(void)
                        /*
                         * To do: switch parsing to new style, something like:
                         * parse_crashkernel(arg, sysinfo->system_dram_size,
-                        *                &crashk_size, &crashk_base);
+                        *                &crashk_size, &crashk_base);
                         */
 #endif
                } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
@@ -1013,7 +1013,7 @@ void __init plat_mem_setup(void)
 }
 
 /*
- * Emit one character to the boot UART.  Exported for use by the
+ * Emit one character to the boot UART.         Exported for use by the
  * watchdog timer.
  */
 int prom_putchar(char c)
index ee1fb9f..295137d 100644 (file)
@@ -55,7 +55,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
 
 /**
  * Cause the function described by call_data to be executed on the passed
- * cpu.  When the function has finished, increment the finished field of
+ * cpu.         When the function has finished, increment the finished field of
  * call_data.
  */
 void octeon_send_ipi_single(int cpu, unsigned int action)
@@ -126,8 +126,8 @@ static void octeon_smp_setup(void)
 
 #ifdef CONFIG_HOTPLUG_CPU
        /*
-        * The possible CPUs are all those present on the chip.  We
-        * will assign CPU numbers for possible cores as well.  Cores
+        * The possible CPUs are all those present on the chip.  We
+        * will assign CPU numbers for possible cores as well.  Cores
         * are always consecutively numberd from 0.
         */
        for (id = 0; id < num_cores && id < NR_CPUS; id++) {
@@ -332,7 +332,7 @@ extern void kernel_entry(unsigned long arg1, ...);
 
 static void start_after_reset(void)
 {
-       kernel_entry(0, 0, 0);  /* set a2 = 0 for secondary core */
+       kernel_entry(0, 0, 0);  /* set a2 = 0 for secondary core */
 }
 
 static int octeon_update_boot_vector(unsigned int cpu)
@@ -401,7 +401,7 @@ static int __cpuinit register_cavium_notifier(void)
 }
 late_initcall(register_cavium_notifier);
 
-#endif  /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
 
 struct plat_smp_ops octeon_smp_ops = {
        .send_ipi_single        = octeon_send_ipi_single,
index d3ce6fa..32265f5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Registration of Cobalt LED platform device.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 691d620..8db7b5d 100644 (file)
@@ -25,7 +25,7 @@
 static struct mtd_partition cobalt_mtd_partitions[] = {
        {
                .name   = "firmware",
-               .offset = 0x0,
+               .offset = 0x0,
                .size   = 0x80000,
        },
 };
index 3ab3989..a6bc75a 100644 (file)
@@ -46,7 +46,7 @@ static __init int cobalt_rtc_add(void)
                return -ENOMEM;
 
        retval = platform_device_add_resources(pdev, cobalt_rtc_resource,
-                                              ARRAY_SIZE(cobalt_rtc_resource));
+                                              ARRAY_SIZE(cobalt_rtc_resource));
        if (retval)
                goto err_free_device;
 
index ea87d43..e3a3836 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_ATH79=y
 CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP136=y
 CONFIG_ATH79_MACH_AP81=y
 CONFIG_ATH79_MACH_DB120=y
 CONFIG_ATH79_MACH_PB44=y
diff --git a/arch/mips/configs/pnx8550_jbs_defconfig b/arch/mips/configs/pnx8550_jbs_defconfig
deleted file mode 100644 (file)
index 1d1f206..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-CONFIG_PNX8550_JBS=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SGI_IOC4=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
-CONFIG_8139TOO_8129=y
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_PNX8XXX=y
-CONFIG_SERIAL_PNX8XXX_CONSOLE=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/pnx8550_stb810_defconfig b/arch/mips/configs/pnx8550_stb810_defconfig
deleted file mode 100644 (file)
index 15c66a5..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-CONFIG_PNX8550_STB810=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
-CONFIG_NATSEMI=y
-CONFIG_CHELSIO_T3=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
new file mode 100644 (file)
index 0000000..d1741bc
--- /dev/null
@@ -0,0 +1,167 @@
+CONFIG_RALINK=y
+CONFIG_DTB_RT305X_EVAL=y
+CONFIG_CPU_MIPS32_R2=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
+# CONFIG_RD_GZIP is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_HAMRADIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_EEPROM_93CX6=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_ISDN=y
+CONFIG_INPUT=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_STAGING=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_AVERAGE=y
index 82c8528..22afed1 100644 (file)
  * DS2100/3100's, aka kn01, aka Pmax:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        SCSI
- *             3        Lance Ethernet
- *             4        DZ11 serial
- *             5        RTC
- *             6        Memory Controller & Video
- *             7        FPU
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        SCSI
+ *            3        Lance Ethernet
+ *            4        DZ11 serial
+ *            5        RTC
+ *            6        Memory Controller & Video
+ *            7        FPU
  *
  * DS5000/200, aka kn02, aka 3max:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        TurboChannel
- *             3        RTC
- *             4        Reserved
- *             5        Memory Controller
- *             6        Reserved
- *             7        FPU
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        TurboChannel
+ *            3        RTC
+ *            4        Reserved
+ *            5        Memory Controller
+ *            6        Reserved
+ *            7        FPU
  *
  * DS5000/1xx's, aka kn02ba, aka 3min:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        TurboChannel Slot 0
- *             3        TurboChannel Slot 1
- *             4        TurboChannel Slot 2
- *             5        TurboChannel Slot 3 (ASIC)
- *             6        Halt button
- *             7        FPU/R4k timer
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        TurboChannel Slot 0
+ *            3        TurboChannel Slot 1
+ *            4        TurboChannel Slot 2
+ *            5        TurboChannel Slot 3 (ASIC)
+ *            6        Halt button
+ *            7        FPU/R4k timer
  *
  * DS5000/2x's, aka kn02ca, aka maxine:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        Periodic Interrupt (100usec)
- *             3        RTC
- *             4        I/O write timeout
- *             5        TurboChannel (ASIC)
- *             6        Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
- *             7        FPU/R4k timer
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        Periodic Interrupt (100usec)
+ *            3        RTC
+ *            4        I/O write timeout
+ *            5        TurboChannel (ASIC)
+ *            6        Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
+ *            7        FPU/R4k timer
  *
  * DS5000/2xx's, aka kn03, aka 3maxplus:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        System Board (ASIC)
- *             3        RTC
- *             4        Reserved
- *             5        Memory
- *             6        Halt Button
- *             7        FPU/R4k timer
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        System Board (ASIC)
+ *            3        RTC
+ *            4        Reserved
+ *            5        Memory
+ *            6        Halt Button
+ *            7        FPU/R4k timer
  *
  * We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return.  If multiple IRQs are pending then we will
+ * then we just return.         If multiple IRQs are pending then we will
  * just take another exception, big deal.
  */
                .align  5
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,cpu_mask_nr_tbl
+                PTR_LA t1,cpu_mask_nr_tbl
 1:             lw      t2,(t1)
                nop
                and     t2,t0
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,asic_mask_nr_tbl
+                PTR_LA t1,asic_mask_nr_tbl
 2:             lw      t2,(t1)
                nop
                and     t2,t0
                FEXPORT(cpu_all_int)            # HALT, timers, software junk
                li      a0,DEC_CPU_IRQ_BASE
                srl     t0,CAUSEB_IP
-               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
+               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
                b       1f
                 li     t2,4                    # nr of bits / 2
 
index ebb73c5..f434b75 100644 (file)
@@ -128,8 +128,8 @@ void __init dec_kn02xa_be_init(void)
 {
        volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
 
-        /* For KN04 we need to make sure EE (?) is enabled in the MB.  */
-        if (current_cpu_type() == CPU_R4000SC)
+       /* For KN04 we need to make sure EE (?) is enabled in the MB.  */
+       if (current_cpu_type() == CPU_R4000SC)
                *mbcs |= KN4K_MB_CSR_EE;
        fast_iob();
 
index 8c84981..c0d1522 100644 (file)
@@ -14,7 +14,7 @@
 
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save.  */
+/* Number of static registers we save. */
 #define O32_STATC      11
 /* Frame size for both of the above.  */
 #define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
index 707b6f1..69ea5b9 100644 (file)
@@ -1,5 +1,5 @@
 #ifndef DECTYPES
-#define        DECTYPES
+#define DECTYPES
 
 #define DS2100_3100    1       /* DS2100/3100  Pmax            */
 #define DS5000_200     2       /* DS5000/200   3max            */
index 93f1239..ab16904 100644 (file)
@@ -103,7 +103,7 @@ void __init prom_init(void)
        if (prom_is_rex(magic))
                rex_clear_cache();
 
-       /* Register the early console.  */
+       /* Register the early console.  */
        register_prom_console();
 
        /* Were we compiled with the right CPU option? */
index 8c62316..0aadac7 100644 (file)
@@ -22,7 +22,7 @@ volatile unsigned long mem_err;               /* So we know an error occurred */
 
 /*
  * Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory.  Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
  */
 
 #define CHUNK_SIZE 0x400000
index b874acc..741cb42 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
 /*
  * IRQ routing and priority tables.  Priorites are set as follows:
  *
- *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
+ *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
  *
  * MEMORY      CPU     CPU     CPU     ASIC    CPU     CPU
  * RTC         CPU     CPU     CPU     ASIC    CPU     CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
 
 /*
  * Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min.  Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min.         Also applies to KN04(-BA), aka
  * DS5000/150, aka 4min.
  */
 static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
index 43feddd..56bda4a 100644 (file)
@@ -2,9 +2,9 @@
  * Setup the right wbflush routine for the different DECstations.
  *
  * Created with information from:
- *      DECstation 3100 Desktop Workstation Functional Specification
- *      DECstation 5000/200 KN02 System Module Functional Specification
- *      mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
+ *     DECstation 3100 Desktop Workstation Functional Specification
+ *     DECstation 5000/200 KN02 System Module Functional Specification
+ *     mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
index b5f0825..b880a83 100644 (file)
@@ -292,7 +292,7 @@ void __init arch_init_irq(void)
 
 asmlinkage void plat_irq_dispatch(void)
 {
-        unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+       unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
 
        if (pending & STATUSF_IP7)
                do_IRQ(MIPS_CPU_IRQ_BASE + 7);
index b05b08b..99ea004 100644 (file)
@@ -190,7 +190,7 @@ static struct platform_device markeins_flash_device = {
        .name           = "physmap-flash",
        .id             = 0,
        .dev            = {
-               .platform_data  = &markeins_flash_data,
+               .platform_data  = &markeins_flash_data,
        },
        .num_resources  = 1,
        .resource       = &markeins_flash_resource,
index feceebc..d710058 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <asm/emma/emma2rh.h>
 
-#define        USE_CPU_COUNTER_TIMER   /* whether we use cpu counter */
+#define USE_CPU_COUNTER_TIMER  /* whether we use cpu counter */
 
 extern void markeins_led(const char *);
 
index 3033534..a8b0803 100644 (file)
@@ -15,7 +15,7 @@
 
 LONG
 ArcGetDirectoryEntry(ULONG FileID, struct linux_vdirent *Buffer,
-                     ULONG N, ULONG *Count)
+                    ULONG N, ULONG *Count)
 {
        return ARC_CALL4(get_vdirent, FileID, Buffer, N, Count);
 }
@@ -69,7 +69,7 @@ ArcGetFileInformation(ULONG FileID, struct linux_finfo *Information)
 }
 
 LONG ArcSetFileInformation(ULONG FileID, ULONG AttributeFlags,
-                           ULONG AttributeMask)
+                          ULONG AttributeMask)
 {
        return ARC_CALL3(set_finfo, FileID, AttributeFlags, AttributeMask);
 }
index 54a33c7..f90266c 100644 (file)
@@ -100,7 +100,7 @@ void __init prom_identify_arch(void)
        if (p == NULL) {
 #ifdef CONFIG_SGI_IP27
                /* IP27 PROM misbehaves, seems to not implement ARC
-                  GetChild().  So we just assume it's an IP27.  */
+                  GetChild().  So we just assume it's an IP27.  */
                iname = "SGI-IP27";
 #else
                iname = "Unknown";
index 8b8eea2..5537b94 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * memory.c: PROM library functions for acquiring/using memory descriptors
- *           given to us from the ARCS firmware.
+ *          given to us from the ARCS firmware.
  *
  * Copyright (C) 1996 by David S. Miller
  * Copyright (C) 1999, 2000, 2001 by Ralf Baechle
index b7f9dd3..7e8ba5c 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/bcache.h>
 
 /*
- * IP22 boardcache is not compatible with board caches.  Thus we disable it
+ * IP22 boardcache is not compatible with board caches.         Thus we disable it
  * during romvec action.  Since r4xx0.c is always compiled and linked with your
  * kernel, this shouldn't cause any harm regardless what MIPS processor you
  * have.
index e0a6871..b308b2a 100644 (file)
@@ -14,7 +14,7 @@
 
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save.  */
+/* Number of static registers we save. */
 #define O32_STATC      11
 /* Frame size for static register  */
 #define O32_FRAMESZ    (SZREG * O32_STATC)
index 96ba992..2c2cb18 100644 (file)
  * registers
  */
 #define PROM_GET_MEMCONF       58
-#define PROM_GET_HWCONF         61
+#define PROM_GET_HWCONF                61
 
 #define PROM_VEC               (u64 *)CKSEG1ADDR(0x1fc00000)
 #define PROM_ENTRY(x)          (PROM_VEC + (x))
 
-#define ___prom_putchar         ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
-#define ___prom_getenv          ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
-#define ___prom_get_memconf     ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
-#define ___prom_get_hwconf      ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
+#define ___prom_putchar                ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
+#define ___prom_getenv         ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
+#define ___prom_get_memconf    ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
+#define ___prom_get_hwconf     ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
 
 #ifdef CONFIG_64BIT
 
 static u8 o32_stk[16384];
-#define O32_STK   &o32_stk[sizeof(o32_stk)]
+#define O32_STK          &o32_stk[sizeof(o32_stk)]
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                     __asm__(#fun " = call_o32")
@@ -52,13 +52,13 @@ void  __PROM_O32(__prom_get_memconf, (void (*)(void *), void *, void *));
 u32   __PROM_O32(__prom_get_hwconf, (u32 (*)(void), void *));
 
 #define _prom_putchar(x)     __prom_putchar(___prom_putchar, O32_STK, x)
-#define _prom_getenv(x)      __prom_getenv(___prom_getenv, O32_STK, x)
+#define _prom_getenv(x)             __prom_getenv(___prom_getenv, O32_STK, x)
 #define _prom_get_memconf(x) __prom_get_memconf(___prom_get_memconf, O32_STK, x)
 #define _prom_get_hwconf()   __prom_get_hwconf(___prom_get_hwconf, O32_STK)
 
 #else
 #define _prom_putchar(x)     ___prom_putchar(x)
-#define _prom_getenv(x)      ___prom_getenv(x)
+#define _prom_getenv(x)             ___prom_getenv(x)
 #define _prom_get_memconf(x) ___prom_get_memconf(x)
 #define _prom_get_hwconf(x)  ___prom_get_hwconf(x)
 #endif
index 9252d9b..909bb69 100644 (file)
 
 struct mips_abi {
        int (* const setup_frame)(void *sig_return, struct k_sigaction *ka,
-                                 struct pt_regs *regs, int signr,
-                                 sigset_t *set);
+                                 struct pt_regs *regs, int signr,
+                                 sigset_t *set);
        const unsigned long     signal_return_offset;
        int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka,
-                              struct pt_regs *regs, int signr,
-                              sigset_t *set, siginfo_t *info);
+                              struct pt_regs *regs, int signr,
+                              sigset_t *set, siginfo_t *info);
        const unsigned long     rt_signal_return_offset;
        const unsigned long     restart;
 };
index 569f80a..13d61c0 100644 (file)
  * Returns the physical address of a CKSEGx / XKPHYS address
  */
 #define CPHYSADDR(a)           ((_ACAST32_(a)) & 0x1fffffff)
-#define XPHYSADDR(a)            ((_ACAST64_(a)) &                      \
+#define XPHYSADDR(a)           ((_ACAST64_(a)) &                       \
                                 _CONST64_(0x000000ffffffffff))
 
 #ifdef CONFIG_64BIT
 
 /*
  * Memory segments (64bit kernel mode addresses)
- * The compatibility segments use the full 64-bit sign extended value.  Note
+ * The compatibility segments use the full 64-bit sign extended value. Note
  * the R8000 doesn't have them so don't reference these in generic MIPS code.
  */
 #define XKUSEG                 _CONST64_(0x0000000000000000)
 
 /*
  * The ultimate limited of the 64-bit MIPS architecture:  2 bits for selecting
- * the region, 3 bits for the CCA mode.  This leaves 59 bits of which the
+ * the region, 3 bits for the CCA mode.         This leaves 59 bits of which the
  * R8000 implements most with its 48-bit physical address space.
  */
 #define TO_PHYS_MASK   _CONST64_(0x07ffffffffffffff)   /* 2^^59 - 1 */
index 608cfcf..164a21e 100644 (file)
  * Not used for the kernel but here seems to be the right place.
  */
 #ifdef __PIC__
-#define CPRESTORE(register)                             \
+#define CPRESTORE(register)                            \
                .cprestore register
-#define CPADD(register)                                 \
+#define CPADD(register)                                        \
                .cpadd  register
-#define CPLOAD(register)                                \
-               .cpload register
+#define CPLOAD(register)                               \
+               .cpload register
 #else
 #define CPRESTORE(register)
 #define CPADD(register)
 /*
  * LEAF - declare leaf routine
  */
-#define        LEAF(symbol)                                    \
-               .globl  symbol;                         \
-               .align  2;                              \
-               .type   symbol, @function;              \
-               .ent    symbol, 0;                      \
+#define LEAF(symbol)                                   \
+               .globl  symbol;                         \
+               .align  2;                              \
+               .type   symbol, @function;              \
+               .ent    symbol, 0;                      \
 symbol:                .frame  sp, 0, ra
 
 /*
  * NESTED - declare nested routine entry point
  */
-#define        NESTED(symbol, framesize, rpc)                  \
-               .globl  symbol;                         \
-               .align  2;                              \
-               .type   symbol, @function;              \
-               .ent    symbol, 0;                       \
+#define NESTED(symbol, framesize, rpc)                 \
+               .globl  symbol;                         \
+               .align  2;                              \
+               .type   symbol, @function;              \
+               .ent    symbol, 0;                       \
 symbol:                .frame  sp, framesize, rpc
 
 /*
  * END - mark end of function
  */
-#define        END(function)                                   \
-               .end    function;                       \
+#define END(function)                                  \
+               .end    function;                       \
                .size   function, .-function
 
 /*
  * EXPORT - export definition of symbol
  */
 #define EXPORT(symbol)                                 \
-               .globl  symbol;                         \
+               .globl  symbol;                         \
 symbol:
 
 /*
@@ -90,16 +90,16 @@ symbol:
 /*
  * ABS - export absolute symbol
  */
-#define        ABS(symbol,value)                               \
-               .globl  symbol;                         \
+#define ABS(symbol,value)                              \
+               .globl  symbol;                         \
 symbol         =       value
 
-#define        PANIC(msg)                                      \
+#define PANIC(msg)                                     \
                .set    push;                           \
-               .set    reorder;                        \
-               PTR_LA  a0, 8f;                          \
-               jal     panic;                          \
-9:             b       9b;                             \
+               .set    reorder;                        \
+               PTR_LA  a0, 8f;                          \
+               jal     panic;                          \
+9:             b       9b;                             \
                .set    pop;                            \
                TEXT(msg)
 
@@ -107,31 +107,31 @@ symbol            =       value
  * Print formatted string
  */
 #ifdef CONFIG_PRINTK
-#define PRINT(string)                                   \
+#define PRINT(string)                                  \
                .set    push;                           \
-               .set    reorder;                        \
-               PTR_LA  a0, 8f;                          \
-               jal     printk;                         \
+               .set    reorder;                        \
+               PTR_LA  a0, 8f;                          \
+               jal     printk;                         \
                .set    pop;                            \
                TEXT(string)
 #else
 #define PRINT(string)
 #endif
 
-#define        TEXT(msg)                                       \
+#define TEXT(msg)                                      \
                .pushsection .data;                     \
-8:             .asciiz msg;                            \
+8:             .asciiz msg;                            \
                .popsection;
 
 /*
  * Build text tables
  */
-#define TTABLE(string)                                  \
+#define TTABLE(string)                                 \
                .pushsection .text;                     \
-               .word   1f;                             \
+               .word   1f;                             \
                .popsection                             \
                .pushsection .data;                     \
-1:             .asciiz string;                         \
+1:             .asciiz string;                         \
                .popsection
 
 /*
@@ -143,13 +143,13 @@ symbol            =       value
  */
 #ifdef CONFIG_CPU_HAS_PREFETCH
 
-#define PREF(hint,addr)                                 \
+#define PREF(hint,addr)                                        \
                .set    push;                           \
                .set    mips4;                          \
                pref    hint, addr;                     \
                .set    pop
 
-#define PREFX(hint,addr)                                \
+#define PREFX(hint,addr)                               \
                .set    push;                           \
                .set    mips4;                          \
                prefx   hint, addr;                     \
@@ -166,42 +166,42 @@ symbol            =       value
  * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
  */
 #if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt)                                \
+#define MOVN(rd, rs, rt)                               \
                .set    push;                           \
                .set    reorder;                        \
-               beqz    rt, 9f;                         \
-               move    rd, rs;                         \
+               beqz    rt, 9f;                         \
+               move    rd, rs;                         \
                .set    pop;                            \
 9:
-#define MOVZ(rd, rs, rt)                                \
+#define MOVZ(rd, rs, rt)                               \
                .set    push;                           \
                .set    reorder;                        \
-               bnez    rt, 9f;                         \
-               move    rd, rs;                         \
+               bnez    rt, 9f;                         \
+               move    rd, rs;                         \
                .set    pop;                            \
 9:
 #endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
 #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt)                                \
+#define MOVN(rd, rs, rt)                               \
                .set    push;                           \
                .set    noreorder;                      \
-               bnezl   rt, 9f;                         \
-                move   rd, rs;                         \
+               bnezl   rt, 9f;                         \
+                move   rd, rs;                         \
                .set    pop;                            \
 9:
-#define MOVZ(rd, rs, rt)                                \
+#define MOVZ(rd, rs, rt)                               \
                .set    push;                           \
                .set    noreorder;                      \
-               beqzl   rt, 9f;                         \
-                move   rd, rs;                         \
+               beqzl   rt, 9f;                         \
+                move   rd, rs;                         \
                .set    pop;                            \
 9:
 #endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
 #if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt)                                \
+#define MOVN(rd, rs, rt)                               \
                movn    rd, rs, rt
-#define MOVZ(rd, rs, rt)                                \
+#define MOVZ(rd, rs, rt)                               \
                movz    rd, rs, rt
 #endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
 
index 01cc6ba..08b6079 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Atomic operations that C can't guarantee us.  Useful for
+ * Atomic operations that C can't guarantee us.         Useful for
  * resource counting etc..
  *
  * But use these as seldom as possible since they are much more slower
@@ -21,7 +21,7 @@
 #include <asm/cmpxchg.h>
 #include <asm/war.h>
 
-#define ATOMIC_INIT(i)    { (i) }
+#define ATOMIC_INIT(i)   { (i) }
 
 /*
  * atomic_read - read atomic variable
index f7fdc24..314ab55 100644 (file)
@@ -18,7 +18,7 @@
  * over this barrier.  All reads preceding this primitive are guaranteed
  * to access memory (but not necessarily other CPUs' caches) before any
  * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
  * rmb() on most CPUs, and is never heavier weight than is
  * rmb().
  *
@@ -43,7 +43,7 @@
  * </programlisting>
  *
  * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
+ * two reads are separated by a read_barrier_depends().         However,
  * the following code, with the same initial values for "a" and "b":
  *
  * <programlisting>
@@ -57,7 +57,7 @@
  * </programlisting>
  *
  * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * the read of "a" and the read of "b".         Therefore, on some CPUs, such
  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
  * in cases like this where there are no data dependencies.
  */
@@ -92,7 +92,7 @@
                : "memory")
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 # define OCTEON_SYNCW_STR      ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw()     __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
+# define __syncw()     __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
 
 # define fast_wmb()    __syncw()
 # define fast_rmb()    barrier()
 #endif
 
 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB         "       sync    \n"
+#define __WEAK_LLSC_MB         "       sync    \n"
 #else
 #define __WEAK_LLSC_MB         "               \n"
 #endif
index 0ba9d6e..8c34484 100644 (file)
@@ -11,7 +11,7 @@
 
 
 /* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
-   chipset implemented caches.  On machines with other CPUs the CPU does the
+   chipset implemented caches. On machines with other CPUs the CPU does the
    cache thing itself. */
 struct bcache_ops {
        void (*bc_enable)(void);
index 46ac73a..71305a8 100644 (file)
 #define SZLONG_MASK 31UL
 #define __LL           "ll     "
 #define __SC           "sc     "
-#define __INS          "ins    "
-#define __EXT          "ext    "
+#define __INS          "ins    "
+#define __EXT          "ext    "
 #elif _MIPS_SZLONG == 64
 #define SZLONG_LOG 6
 #define SZLONG_MASK 63UL
 #define __LL           "lld    "
 #define __SC           "scd    "
-#define __INS          "dins    "
-#define __EXT          "dext    "
+#define __INS          "dins    "
+#define __EXT          "dext    "
 #endif
 
 /*
@@ -357,7 +357,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                "1:     " __LL  "%0, %1         # test_and_clear_bit    \n"
                "       or      %2, %0, %3                              \n"
                "       xor     %2, %3                                  \n"
-               "       " __SC  "%2, %1                                 \n"
+               "       " __SC  "%2, %1                                 \n"
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
@@ -371,10 +371,10 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
+                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       " __EXT "%2, %0, %3, 1                  \n"
-                       "       " __INS "%0, $0, %3, 1                  \n"
-                       "       " __SC  "%0, %1                         \n"
+                       "       " __INS "%0, $0, %3, 1                  \n"
+                       "       " __SC  "%0, %1                         \n"
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "ir" (bit)
                        : "memory");
@@ -387,10 +387,10 @@ static inline int test_and_clear_bit(unsigned long nr,
                do {
                        __asm__ __volatile__(
                        "       .set    mips3                           \n"
-                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
+                       "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
-                       "       " __SC  "%2, %1                         \n"
+                       "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
                        : "=&r" (temp), "+m" (*m), "=&r" (res)
                        : "r" (1UL << bit)
@@ -444,7 +444,7 @@ static inline int test_and_change_bit(unsigned long nr,
                do {
                        __asm__ __volatile__(
                        "       .set    mips3                           \n"
-                       "       " __LL  "%0, %1 # test_and_change_bit   \n"
+                       "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
                        "       .set    mips0                           \n"
index 7a51d87..b71dd5b 100644 (file)
 /*
  * Valid machtype for group PMC-MSP
  */
-#define MACH_MSP4200_EVAL       0      /* PMC-Sierra MSP4200 Evaluation */
-#define MACH_MSP4200_GW         1      /* PMC-Sierra MSP4200 Gateway demo */
-#define MACH_MSP4200_FPGA       2      /* PMC-Sierra MSP4200 Emulation */
-#define MACH_MSP7120_EVAL       3      /* PMC-Sierra MSP7120 Evaluation */
-#define MACH_MSP7120_GW         4      /* PMC-Sierra MSP7120 Residential GW */
-#define MACH_MSP7120_FPGA       5      /* PMC-Sierra MSP7120 Emulation */
-#define MACH_MSP_OTHER        255      /* PMC-Sierra unknown board type */
+#define MACH_MSP4200_EVAL            /* PMC-Sierra MSP4200 Evaluation */
+#define MACH_MSP4200_GW                1       /* PMC-Sierra MSP4200 Gateway demo */
+#define MACH_MSP4200_FPGA            /* PMC-Sierra MSP4200 Emulation */
+#define MACH_MSP7120_EVAL            /* PMC-Sierra MSP7120 Evaluation */
+#define MACH_MSP7120_GW                4       /* PMC-Sierra MSP7120 Residential GW */
+#define MACH_MSP7120_FPGA            /* PMC-Sierra MSP7120 Emulation */
+#define MACH_MSP_OTHER       255       /* PMC-Sierra unknown board type */
 
 /*
  * Valid machtype for group Mikrotik
  */
-#define        MACH_MIKROTIK_RB532     0       /* Mikrotik RouterBoard 532     */
-#define MACH_MIKROTIK_RB532A   1       /* Mikrotik RouterBoard 532A    */
+#define MACH_MIKROTIK_RB532    0       /* Mikrotik RouterBoard 532     */
+#define MACH_MIKROTIK_RB532A   1       /* Mikrotik RouterBoard 532A    */
 
 /*
  * Valid machtype for Loongson family
@@ -67,7 +67,7 @@
 #define MACH_LEMOTE_ML2F7      3
 #define MACH_LEMOTE_YL2F89     4
 #define MACH_DEXXON_GDIUM2F10  5
-#define MACH_LEMOTE_NAS        6
+#define MACH_LEMOTE_NAS               6
 #define MACH_LEMOTE_LL2F       7
 #define MACH_LOONGSON_END      8
 
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/asm/break.h
new file mode 100644 (file)
index 0000000..0ef1142
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_BREAK_H
+#define __ASM_BREAK_H
+
+#ifdef __UAPI_ASM_BREAK_H
+#error "Error: Do not directly include <uapi/asm/break.h>"
+#endif
+#include <uapi/asm/break.h>
+
+/*
+ * Break codes used internally to the kernel.
+ */
+#define BRK_KDB                513     /* Used in KDB_ENTER() */
+#define BRK_MEMU       514     /* Used by FPU emulator */
+#define BRK_KPROBE_BP  515     /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
+#define BRK_MULOVF     1023    /* Multiply overflow */
+
+#endif /* __ASM_BREAK_H */
index 8f99c11..68f37e3 100644 (file)
@@ -8,20 +8,20 @@
  * (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle
  * (C) Copyright 1999 Silicon Graphics, Inc.
  */
-#ifndef        __ASM_CACHEOPS_H
-#define        __ASM_CACHEOPS_H
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
 
 /*
  * Cache Operations available on all MIPS processors with R4000-style caches
  */
-#define Index_Invalidate_I      0x00
-#define Index_Writeback_Inv_D   0x01
+#define Index_Invalidate_I     0x00
+#define Index_Writeback_Inv_D  0x01
 #define Index_Load_Tag_I       0x04
 #define Index_Load_Tag_D       0x05
 #define Index_Store_Tag_I      0x08
 #define Index_Store_Tag_D      0x09
 #if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I       0x00
+#define Hit_Invalidate_I       0x00
 #else
 #define Hit_Invalidate_I       0x10
 #endif
@@ -39,8 +39,8 @@
 /*
  * R4000SC and R4400SC-specific cacheops
  */
-#define Index_Invalidate_SI     0x02
-#define Index_Writeback_Inv_SD  0x03
+#define Index_Invalidate_SI    0x02
+#define Index_Writeback_Inv_SD 0x03
 #define Index_Load_Tag_SI      0x06
 #define Index_Load_Tag_SD      0x07
 #define Index_Store_Tag_SI     0x0A
index f2f7c6c..ac3d2b8 100644 (file)
@@ -194,7 +194,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
 
 #define _HAVE_ARCH_IPV6_CSUM
 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
-                                         const struct in6_addr *daddr,
+                                         const struct in6_addr *daddr,
                                          __u32 len, unsigned short proto,
                                          __wsum sum)
 {
index eee10dc..466069b 100644 (file)
@@ -146,7 +146,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
                "       .set    mips3                           \n"     \
-               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
+               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
@@ -163,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
                "       .set    mips3                           \n"     \
-               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
+               "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
@@ -205,7 +205,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
                                                                        \
        switch (sizeof(*(__ptr))) {                                     \
        case 4:                                                         \
-               __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
+               __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
                break;                                                  \
        case 8:                                                         \
                if (sizeof(long) == 8) {                                \
index 6599a90..64e0b93 100644 (file)
@@ -18,9 +18,9 @@ static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
        BUG_ON(sizeof(*d) != sizeof(*s));
        BUG_ON(_NSIG_WORDS != 2);
 
-       err  = __put_user(s->sig[0],       &d->sig[0]);
+       err  = __put_user(s->sig[0],       &d->sig[0]);
        err |= __put_user(s->sig[0] >> 32, &d->sig[1]);
-       err |= __put_user(s->sig[1],       &d->sig[2]);
+       err |= __put_user(s->sig[1],       &d->sig[2]);
        err |= __put_user(s->sig[1] >> 32, &d->sig[3]);
 
        return err;
index ebaae96..c4bd54a 100644 (file)
@@ -120,7 +120,7 @@ struct compat_statfs {
 
 typedef u32            compat_old_sigset_t;    /* at least 32 bits */
 
-#define _COMPAT_NSIG           128             /* Don't ask !$@#% ...  */
+#define _COMPAT_NSIG           128             /* Don't ask !$@#% ...  */
 #define _COMPAT_NSIG_BPW       32
 
 typedef u32            compat_sigset_word;
@@ -168,7 +168,7 @@ typedef struct compat_siginfo {
                        s32 _addr; /* faulting insn/memory ref. */
                } _sigfault;
 
-               /* SIGPOLL, SIGXFSZ (To do ...)  */
+               /* SIGPOLL, SIGXFSZ (To do ...)  */
                struct {
                        int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
@@ -179,7 +179,7 @@ typedef struct compat_siginfo {
                        timer_t _tid;           /* timer id */
                        int _overrun;           /* overrun count */
                        compat_sigval_t _sigval;/* same as below */
-                       int _sys_private;       /* not to be passed to user */
+                       int _sys_private;       /* not to be passed to user */
                } _timer;
 
                /* POSIX.1b signals */
index c507b93..1a57e8b 100644 (file)
@@ -14,7 +14,7 @@
 #include <cpu-feature-overrides.h>
 
 #ifndef current_cpu_type
-#define current_cpu_type()      current_cpu_data.cputype
+#define current_cpu_type()     current_cpu_data.cputype
 #endif
 
 /*
 #define cpu_has_mips16         (cpu_data[0].ases & MIPS_ASE_MIPS16)
 #endif
 #ifndef cpu_has_mdmx
-#define cpu_has_mdmx           (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx          (cpu_data[0].ases & MIPS_ASE_MDMX)
 #endif
 #ifndef cpu_has_mips3d
-#define cpu_has_mips3d         (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d        (cpu_data[0].ases & MIPS_ASE_MIPS3D)
 #endif
 #ifndef cpu_has_smartmips
 #define cpu_has_smartmips      (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
@@ -98,6 +98,9 @@
 #ifndef cpu_has_rixi
 #define cpu_has_rixi           (cpu_data[0].options & MIPS_CPU_RIXI)
 #endif
+#ifndef cpu_has_mmips
+#define cpu_has_mmips          (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+#endif
 #ifndef cpu_has_vtag_icache
 #define cpu_has_vtag_icache    (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
 #endif
 #define cpu_has_ic_fills_f_dc  (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
 #endif
 #ifndef cpu_has_pindexed_dcache
-#define cpu_has_pindexed_dcache        (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
+#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
 #endif
 
 /*
- * I-Cache snoops remote store.  This only matters on SMP.  Some multiprocessors
+ * I-Cache snoops remote store.         This only matters on SMP.  Some multiprocessors
  * such as the R10000 have I-Caches that snoop local stores; the embedded ones
  * don't.  For maintaining I-cache coherency this means we need to flush the
  * D-cache all the way back to whever the I-cache does refills from, so the
 #endif
 #endif
 
+# define cpu_has_mips_1                (cpu_data[0].isa_level & MIPS_CPU_ISA_I)
+#ifndef cpu_has_mips_2
+# define cpu_has_mips_2                (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
+#endif
+#ifndef cpu_has_mips_3
+# define cpu_has_mips_3                (cpu_data[0].isa_level & MIPS_CPU_ISA_III)
+#endif
+#ifndef cpu_has_mips_4
+# define cpu_has_mips_4                (cpu_data[0].isa_level & MIPS_CPU_ISA_IV)
+#endif
+#ifndef cpu_has_mips_5
+# define cpu_has_mips_5                (cpu_data[0].isa_level & MIPS_CPU_ISA_V)
+#endif
 # ifndef cpu_has_mips32r1
 # define cpu_has_mips32r1      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
 # endif
  */
 #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
 #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
-#define cpu_has_mips_r1        (cpu_has_mips32r1 | cpu_has_mips64r1)
-#define cpu_has_mips_r2        (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
                         cpu_has_mips64r1 | cpu_has_mips64r2)
 
 
 /*
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ.  The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  */
 # define cpu_has_64bits                (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
 # endif
 # ifndef cpu_has_64bit_zero_reg
-# define cpu_has_64bit_zero_reg        (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
+# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
 # endif
 # ifndef cpu_has_64bit_gp_regs
 # define cpu_has_64bit_gp_regs         0
 #define cpu_has_perf_cntr_intr_bit     (cpu_data[0].options & MIPS_CPU_PCI)
 #endif
 
+#ifndef cpu_has_vz
+#define cpu_has_vz             (cpu_data[0].ases & MIPS_ASE_VZ)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
index c454550..41401d8 100644 (file)
@@ -52,14 +52,14 @@ struct cpuinfo_mips {
        unsigned int            cputype;
        int                     isa_level;
        int                     tlbsize;
-       struct cache_desc       icache; /* Primary I-cache */
-       struct cache_desc       dcache; /* Primary D or combined I/D cache */
-       struct cache_desc       scache; /* Secondary cache */
-       struct cache_desc       tcache; /* Tertiary/split secondary cache */
-       int                     srsets; /* Shadow register sets */
+       struct cache_desc       icache; /* Primary I-cache */
+       struct cache_desc       dcache; /* Primary D or combined I/D cache */
+       struct cache_desc       scache; /* Secondary cache */
+       struct cache_desc       tcache; /* Tertiary/split secondary cache */
+       int                     srsets; /* Shadow register sets */
        int                     core;   /* physical core number */
 #ifdef CONFIG_64BIT
-       int                     vmbits; /* Virtual memory size in bits */
+       int                     vmbits; /* Virtual memory size in bits */
 #endif
 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
        /*
@@ -68,12 +68,12 @@ struct cpuinfo_mips {
         * exception resources, ASID spaces, etc, are common
         * to all TCs within the same VPE.
         */
-       int                     vpe_id;  /* Virtual Processor number */
+       int                     vpe_id;  /* Virtual Processor number */
 #endif
 #ifdef CONFIG_MIPS_MT_SMTC
-       int                     tc_id;   /* Thread Context number */
+       int                     tc_id;   /* Thread Context number */
 #endif
-       void                    *data;  /* Additional data */
+       void                    *data;  /* Additional data */
        unsigned int            watch_reg_count;   /* Number that exist */
        unsigned int            watch_reg_use_cnt; /* Usable by ptrace */
 #define NUM_WATCH_REGS 4
index 90112ad..dd86ab2 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * cpu.h: Values of the PRId register used to match up
- *        various MIPS cpu types.
+ *       various MIPS cpu types.
  *
  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  * Copyright (C) 2004  Maciej W. Rozycki
@@ -9,14 +9,14 @@
 #define _ASM_CPU_H
 
 /* Assigned Company values for bits 23:16 of the PRId Register
-   (CP0 register 15, select 0).  As of the MIPS32 and MIPS64 specs from
+   (CP0 register 15, select 0).         As of the MIPS32 and MIPS64 specs from
    MTI, the PRId register is defined in this (backwards compatible)
    way:
 
   +----------------+----------------+----------------+----------------+
-  | Company Options| Company ID     | Processor ID   | Revision       |
+  | Company Options| Company ID            | Processor ID   | Revision       |
   +----------------+----------------+----------------+----------------+
-   31            24 23            16 15             8 7
+   31           24 23            16 15             8 7
 
    I don't have docs for all the previous processors, but my impression is
    that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
@@ -29,7 +29,7 @@
 #define PRID_COMP_ALCHEMY      0x030000
 #define PRID_COMP_SIBYTE       0x040000
 #define PRID_COMP_SANDCRAFT    0x050000
-#define PRID_COMP_NXP          0x060000
+#define PRID_COMP_NXP          0x060000
 #define PRID_COMP_TOSHIBA      0x070000
 #define PRID_COMP_LSI          0x080000
 #define PRID_COMP_LEXRA                0x0b0000
@@ -38,9 +38,9 @@
 #define PRID_COMP_INGENIC      0xd00000
 
 /*
- * Assigned values for the product ID register.  In order to detect a
+ * Assigned values for the product ID register.         In order to detect a
  * certain CPU type exactly eventually additional registers may need to
- * be examined.  These are valid when 23:16 == PRID_COMP_LEGACY
+ * be examined.         These are valid when 23:16 == PRID_COMP_LEGACY
  */
 #define PRID_IMP_R2000         0x0100
 #define PRID_IMP_AU1_REV1      0x0100
 #define PRID_IMP_1004K         0x9900
 #define PRID_IMP_1074K         0x9a00
 #define PRID_IMP_M14KC         0x9c00
+#define PRID_IMP_M14KEC                0x9e00
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
  */
 
-#define PRID_IMP_SB1            0x0100
-#define PRID_IMP_SB1A           0x1100
+#define PRID_IMP_SB1           0x0100
+#define PRID_IMP_SB1A          0x1100
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SANDCRAFT
  */
 
-#define PRID_IMP_SR71000        0x0400
+#define PRID_IMP_SR71000       0x0400
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
  * These are the PRID's for when 23:16 == PRID_COMP_INGENIC
  */
 
-#define PRID_IMP_JZRISC        0x0200
+#define PRID_IMP_JZRISC               0x0200
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
 #define PRID_REV_R3000A                0x0030
 #define PRID_REV_R3000         0x0020
 #define PRID_REV_R2000A                0x0010
-#define PRID_REV_TX3912        0x0010
-#define PRID_REV_TX3922        0x0030
-#define PRID_REV_TX3927        0x0040
+#define PRID_REV_TX3912                0x0010
+#define PRID_REV_TX3922                0x0030
+#define PRID_REV_TX3927                0x0040
 #define PRID_REV_VR4111                0x0050
 #define PRID_REV_VR4181                0x0050  /* Same as VR4111 */
 #define PRID_REV_VR4121                0x0060
  * FPU implementation/revision register (CP1 control register 0).
  *
  * +---------------------------------+----------------+----------------+
- * | 0                               | Implementation | Revision       |
+ * | 0                              | Implementation | Revision       |
  * +---------------------------------+----------------+----------------+
- *  31                             16 15             8 7              0
+ *  31                            16 15             8 7              0
  */
 
 #define FPIR_IMP_NONE          0x0000
@@ -264,6 +265,7 @@ enum cpu_type_enum {
        CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
        CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
        CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
+       CPU_M14KEC,
 
        /*
         * MIPS64 class processors
@@ -322,6 +324,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_ULRI          0x00200000 /* CPU has ULRI feature */
 #define MIPS_CPU_PCI           0x00400000 /* CPU has Perf Ctr Int indicator */
 #define MIPS_CPU_RIXI          0x00800000 /* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS     0x01000000 /* CPU has microMIPS capability */
 
 /*
  * CPU ASE encodings
@@ -333,6 +336,6 @@ enum cpu_type_enum {
 #define MIPS_ASE_DSP           0x00000010 /* Signal Processing ASE */
 #define MIPS_ASE_MIPSMT                0x00000020 /* CPU supports MIPS MT */
 #define MIPS_ASE_DSP2P         0x00000040 /* Signal Processing ASE Rev 2 */
-
+#define MIPS_ASE_VZ            0x00000080 /* Virtualization ASE */
 
 #endif /* _ASM_CPU_H */
index 4cbc1f8..a8665a7 100644 (file)
  */
 #define IOASIC_SYS_ROM (0*IOASIC_SLOT_SIZE)    /* system board ROM */
 #define IOASIC_IOCTL   (1*IOASIC_SLOT_SIZE)    /* I/O ASIC */
-#define IOASIC_ESAR    (2*IOASIC_SLOT_SIZE)    /* LANCE MAC address chip */
-#define IOASIC_LANCE   (3*IOASIC_SLOT_SIZE)    /* LANCE Ethernet */
-#define IOASIC_SCC0    (4*IOASIC_SLOT_SIZE)    /* SCC #0 */
+#define IOASIC_ESAR    (2*IOASIC_SLOT_SIZE)    /* LANCE MAC address chip */
+#define IOASIC_LANCE   (3*IOASIC_SLOT_SIZE)    /* LANCE Ethernet */
+#define IOASIC_SCC0    (4*IOASIC_SLOT_SIZE)    /* SCC #0 */
 #define IOASIC_VDAC_HI (5*IOASIC_SLOT_SIZE)    /* VDAC (maxine) */
-#define IOASIC_SCC1    (6*IOASIC_SLOT_SIZE)    /* SCC #1 (3min, 3max+) */
+#define IOASIC_SCC1    (6*IOASIC_SLOT_SIZE)    /* SCC #1 (3min, 3max+) */
 #define IOASIC_VDAC_LO (7*IOASIC_SLOT_SIZE)    /* VDAC (maxine) */
-#define IOASIC_TOY     (8*IOASIC_SLOT_SIZE)    /* RTC */
-#define IOASIC_ISDN    (9*IOASIC_SLOT_SIZE)    /* ISDN (maxine) */
+#define IOASIC_TOY     (8*IOASIC_SLOT_SIZE)    /* RTC */
+#define IOASIC_ISDN    (9*IOASIC_SLOT_SIZE)    /* ISDN (maxine) */
 #define IOASIC_ERRADDR (9*IOASIC_SLOT_SIZE)    /* bus error address (3max+) */
-#define IOASIC_CHKSYN  (10*IOASIC_SLOT_SIZE)   /* ECC syndrome (3max+) */
+#define IOASIC_CHKSYN  (10*IOASIC_SLOT_SIZE)   /* ECC syndrome (3max+) */
 #define IOASIC_ACC_BUS (10*IOASIC_SLOT_SIZE)   /* ACCESS.bus (maxine) */
-#define IOASIC_MCR     (11*IOASIC_SLOT_SIZE)   /* memory control (3max+) */
-#define IOASIC_FLOPPY  (11*IOASIC_SLOT_SIZE)   /* FDC (maxine) */
-#define IOASIC_SCSI    (12*IOASIC_SLOT_SIZE)   /* ASC SCSI */
+#define IOASIC_MCR     (11*IOASIC_SLOT_SIZE)   /* memory control (3max+) */
+#define IOASIC_FLOPPY  (11*IOASIC_SLOT_SIZE)   /* FDC (maxine) */
+#define IOASIC_SCSI    (12*IOASIC_SLOT_SIZE)   /* ASC SCSI */
 #define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE)   /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA        (14*IOASIC_SLOT_SIZE)   /* ??? */
+#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE)  /* ??? */
 #define IOASIC_RES_15  (15*IOASIC_SLOT_SIZE)   /* unused? */
 
 
index 88d9ffd..0eb3241 100644 (file)
 /*
  * System Control & Status Register bits.
  */
-#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
-#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
+#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
 #define KN01_CSR_VINT          (1<<9)  /* PCC area detect #2 status & ack */
 #define KN01_CSR_TXDIS         (1<<8)  /* DZ11 transmit disable */
 #define KN01_CSR_VBGTRG                (1<<2)  /* blue DAC voltage over green (r/o) */
index 92c0fe2..69dc2a9 100644 (file)
@@ -68,7 +68,7 @@
 #define KN03CA_IO_SSR_ISDN_RST (1<<12)         /* ~ISDN (Am79C30A) reset */
 
 #define KN03CA_IO_SSR_FLOPPY_RST (1<<7)                /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST        (1<<6)          /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST (1<<6)         /* ~framebuffer reset */
 #define KN03CA_IO_SSR_AB_RST   (1<<5)          /* ACCESS.bus reset */
 #define KN03CA_IO_SSR_RES_4    (1<<4)          /* unused */
 #define KN03CA_IO_SSR_RES_3    (1<<4)          /* unused */
index c0ead63..4465777 100644 (file)
@@ -49,7 +49,7 @@
 
 #ifdef CONFIG_64BIT
 
-#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
+#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
 
 #else /* !CONFIG_64BIT */
 
index 006b43e..f8fc74b 100644 (file)
@@ -5,7 +5,7 @@
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
-#ifndef CONFIG_SGI_IP27        /* Kludge to fix 2.6.39 build for IP27 */
+#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
 #include <dma-coherence.h>
 #endif
 
index f5097f6..5b9ed1b 100644 (file)
  *
  *  Address mapping for channels 0-3:
  *
- *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *    |  ...  |   |  ... |   |  ... |
- *   P7  ...  P0  A7 ... A0  A7 ... A0
- * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
+ *   A23 ... A16 A15 ... A8  A7 ... A0   (Physical addresses)
+ *    |         ...  |   |  ... |   |  ... |
+ *    |         ...  |   |  ... |   |  ... |
+ *    |         ...  |   |  ... |   |  ... |
+ *   P7         ...  P0  A7 ... A0  A7 ... A0
+ * |   Page    | Addr MSB | Addr LSB |   (DMA registers)
  *
  *  Address mapping for channels 5-7:
  *
- *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
- *    |  ...  |   \   \   ... \  \  \  ... \  \
- *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
- *    |  ...  |     \   \   ... \  \  \  ... \
- *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
- * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
+ *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0           (Physical addresses)
+ *    |         ...  |   \   \   ... \  \  \  ... \  \
+ *    |         ...  |    \   \   ... \  \  \  ... \  (not used)
+ *    |         ...  |     \   \   ... \  \  \  ... \
+ *   P7         ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
+ * |     Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
  *
  * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
  * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
 /* DMA controller registers */
 #define DMA1_CMD_REG           0x08    /* command register (w) */
 #define DMA1_STAT_REG          0x08    /* status register (r) */
-#define DMA1_REQ_REG            0x09    /* request register (w) */
+#define DMA1_REQ_REG           0x09    /* request register (w) */
 #define DMA1_MASK_REG          0x0A    /* single-channel mask (w) */
 #define DMA1_MODE_REG          0x0B    /* mode register (w) */
 #define DMA1_CLEAR_FF_REG      0x0C    /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
+#define DMA1_TEMP_REG          0x0D    /* Temporary Register (r) */
 #define DMA1_RESET_REG         0x0D    /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
-#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
+#define DMA1_CLR_MASK_REG      0x0E    /* Clear Mask */
+#define DMA1_MASK_ALL_REG      0x0F    /* all-channels mask (w) */
 
 #define DMA2_CMD_REG           0xD0    /* command register (w) */
 #define DMA2_STAT_REG          0xD0    /* status register (r) */
-#define DMA2_REQ_REG            0xD2    /* request register (w) */
+#define DMA2_REQ_REG           0xD2    /* request register (w) */
 #define DMA2_MASK_REG          0xD4    /* single-channel mask (w) */
 #define DMA2_MODE_REG          0xD6    /* mode register (w) */
 #define DMA2_CLEAR_FF_REG      0xD8    /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
+#define DMA2_TEMP_REG          0xDA    /* Temporary Register (r) */
 #define DMA2_RESET_REG         0xDA    /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
-#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
-
-#define DMA_ADDR_0              0x00    /* DMA address registers */
-#define DMA_ADDR_1              0x02
-#define DMA_ADDR_2              0x04
-#define DMA_ADDR_3              0x06
-#define DMA_ADDR_4              0xC0
-#define DMA_ADDR_5              0xC4
-#define DMA_ADDR_6              0xC8
-#define DMA_ADDR_7              0xCC
-
-#define DMA_CNT_0               0x01    /* DMA count registers */
-#define DMA_CNT_1               0x03
-#define DMA_CNT_2               0x05
-#define DMA_CNT_3               0x07
-#define DMA_CNT_4               0xC2
-#define DMA_CNT_5               0xC6
-#define DMA_CNT_6               0xCA
-#define DMA_CNT_7               0xCE
-
-#define DMA_PAGE_0              0x87    /* DMA page registers */
-#define DMA_PAGE_1              0x83
-#define DMA_PAGE_2              0x81
-#define DMA_PAGE_3              0x82
-#define DMA_PAGE_5              0x8B
-#define DMA_PAGE_6              0x89
-#define DMA_PAGE_7              0x8A
+#define DMA2_CLR_MASK_REG      0xDC    /* Clear Mask */
+#define DMA2_MASK_ALL_REG      0xDE    /* all-channels mask (w) */
+
+#define DMA_ADDR_0             0x00    /* DMA address registers */
+#define DMA_ADDR_1             0x02
+#define DMA_ADDR_2             0x04
+#define DMA_ADDR_3             0x06
+#define DMA_ADDR_4             0xC0
+#define DMA_ADDR_5             0xC4
+#define DMA_ADDR_6             0xC8
+#define DMA_ADDR_7             0xCC
+
+#define DMA_CNT_0              0x01    /* DMA count registers */
+#define DMA_CNT_1              0x03
+#define DMA_CNT_2              0x05
+#define DMA_CNT_3              0x07
+#define DMA_CNT_4              0xC2
+#define DMA_CNT_5              0xC6
+#define DMA_CNT_6              0xCA
+#define DMA_CNT_7              0xCE
+
+#define DMA_PAGE_0             0x87    /* DMA page registers */
+#define DMA_PAGE_1             0x83
+#define DMA_PAGE_2             0x81
+#define DMA_PAGE_3             0x82
+#define DMA_PAGE_5             0x8B
+#define DMA_PAGE_6             0x89
+#define DMA_PAGE_7             0x8A
 
 #define DMA_MODE_READ  0x44    /* I/O to memory, no autoinit, increment, single mode */
 #define DMA_MODE_WRITE 0x48    /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0   /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_MODE_CASCADE 0xC0  /* pass thru DREQ->HRQ, DACK<-HLDA only */
 
 #define DMA_AUTOINIT   0x10
 
@@ -172,7 +172,7 @@ static __inline__ void release_dma_lock(unsigned long flags)
 static __inline__ void enable_dma(unsigned int dmanr)
 {
        if (dmanr<=3)
-               dma_outb(dmanr,  DMA1_MASK_REG);
+               dma_outb(dmanr,  DMA1_MASK_REG);
        else
                dma_outb(dmanr & 3,  DMA2_MASK_REG);
 }
@@ -204,7 +204,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
 static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
 {
        if (dmanr<=3)
-               dma_outb(mode | dmanr,  DMA1_MODE_REG);
+               dma_outb(mode | dmanr,  DMA1_MODE_REG);
        else
                dma_outb(mode | (dmanr&3),  DMA2_MODE_REG);
 }
@@ -248,10 +248,10 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
 static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
 {
        set_dma_page(dmanr, a>>16);
-       if (dmanr <= 3)  {
+       if (dmanr <= 3)  {
            dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-            dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
-       }  else  {
+           dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+       }  else  {
            dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
            dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
        }
@@ -268,14 +268,14 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  */
 static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
 {
-        count--;
-       if (dmanr <= 3)  {
+       count--;
+       if (dmanr <= 3)  {
            dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
            dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
-        } else {
+       } else {
            dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
            dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
-        }
+       }
 }
 
 
index 455c0ac..cf3ae24 100644 (file)
 
 /* ELF header e_flags defines. */
 /* MIPS architecture level. */
-#define EF_MIPS_ARCH_1         0x00000000      /* -mips1 code.  */
-#define EF_MIPS_ARCH_2         0x10000000      /* -mips2 code.  */
-#define EF_MIPS_ARCH_3         0x20000000      /* -mips3 code.  */
-#define EF_MIPS_ARCH_4         0x30000000      /* -mips4 code.  */
-#define EF_MIPS_ARCH_5         0x40000000      /* -mips5 code.  */
-#define EF_MIPS_ARCH_32                0x50000000      /* MIPS32 code.  */
-#define EF_MIPS_ARCH_64                0x60000000      /* MIPS64 code.  */
+#define EF_MIPS_ARCH_1         0x00000000      /* -mips1 code.  */
+#define EF_MIPS_ARCH_2         0x10000000      /* -mips2 code.  */
+#define EF_MIPS_ARCH_3         0x20000000      /* -mips3 code.  */
+#define EF_MIPS_ARCH_4         0x30000000      /* -mips4 code.  */
+#define EF_MIPS_ARCH_5         0x40000000      /* -mips5 code.  */
+#define EF_MIPS_ARCH_32                0x50000000      /* MIPS32 code.  */
+#define EF_MIPS_ARCH_64                0x60000000      /* MIPS64 code.  */
 #define EF_MIPS_ARCH_32R2      0x70000000      /* MIPS32 R2 code.  */
 #define EF_MIPS_ARCH_64R2      0x80000000      /* MIPS64 R2 code.  */
 
@@ -74,7 +74,7 @@
 #define R_MIPS_CALL16          11
 #define R_MIPS_GPREL32         12
 /* The remaining relocs are defined on Irix, although they are not
-   in the MIPS ELF ABI.  */
+   in the MIPS ELF ABI.         */
 #define R_MIPS_UNUSED1         13
 #define R_MIPS_UNUSED2         14
 #define R_MIPS_UNUSED3         15
@@ -214,7 +214,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
                                                                        \
        if (__h->e_machine != EM_MIPS)                                  \
                __res = 0;                                              \
-       if (__h->e_ident[EI_CLASS] != ELFCLASS64)                       \
+       if (__h->e_ident[EI_CLASS] != ELFCLASS64)                       \
                __res = 0;                                              \
                                                                        \
        __res;                                                          \
@@ -292,7 +292,7 @@ do {                                                                        \
                __SET_PERSONALITY32_O32();                              \
 } while (0)
 #else
-#define __SET_PERSONALITY32(ex)        do { } while (0)
+#define __SET_PERSONALITY32(ex) do { } while (0)
 #endif
 
 #define SET_PERSONALITY(ex)                                            \
@@ -337,11 +337,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
 
-#define ELF_HWCAP       (0)
+#define ELF_HWCAP      (0)
 
 /*
  * This yields a string that ld.so will use to load implementation
- * specific libraries for optimization.  This is more specific in
+ * specific libraries for optimization.         This is more specific in
  * intent than poking at uname or /proc/cpuinfo.
  */
 
@@ -365,11 +365,11 @@ extern const char *__elf_platform;
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
+   the loader. We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk. */
 
 #ifndef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
 #endif
 
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
index c1449d2..ecf0596 100644 (file)
@@ -2,7 +2,7 @@
  *  Copyright (C) NEC Electronics Corporation 2005-2006
  *
  *  This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- *          Copyright 2001 MontaVista Software Inc.
+ *         Copyright 2001 MontaVista Software Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -40,7 +40,7 @@
 #define EMMA2RH_BHIF_INT1_EN_2 (0x000058+REGBASE)
 #define EMMA2RH_BHIF_SW_INT    (0x000070+REGBASE)
 #define EMMA2RH_BHIF_SW_INT_EN (0x000080+REGBASE)
-#define EMMA2RH_BHIF_SW_INT_CLR        (0x000090+REGBASE)
+#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
 #define EMMA2RH_BHIF_MAIN_CTRL (0x0000b4+REGBASE)
 #define EMMA2RH_BHIF_EXCEPT_VECT_BASE_ADDRESS  (0x0000c0+REGBASE)
 #define EMMA2RH_GPIO_DIR       (0x110d20+REGBASE)
@@ -73,7 +73,7 @@
  *  Memory map (physical address)
  *
  *  Note most of the following address must be properly aligned by the
- *  corresponding size.  For example, if PCI_IO_SIZE is 16MB, then
+ *  corresponding size.         For example, if PCI_IO_SIZE is 16MB, then
  *  PCI_IO_BASE must be aligned along 16MB boundary.
  */
 
@@ -96,8 +96,8 @@
 #define EMMA2RH_ROM_BASE       0x1c000000
 #define EMMA2RH_ROM_SIZE       0x04000000      /* 64 MB */
 
-#define EMMA2RH_PCI_CONFIG_BASE        EMMA2RH_PCI_IO_BASE
-#define EMMA2RH_PCI_CONFIG_SIZE        EMMA2RH_PCI_IO_SIZE
+#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
+#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
 
 #define NUM_EMMA2RH_IRQ                96
 
@@ -169,51 +169,51 @@ static inline u8 emma2rh_in8(u32 offset)
  **/
 
 /*---------------------------------------------------------------------------*/
-/* CNT - Control register (00H R/W)                                          */
+/* CNT - Control register (00H R/W)                                         */
 /*---------------------------------------------------------------------------*/
-#define SPT         0x00000001
-#define STT         0x00000002
-#define ACKE        0x00000004
-#define WTIM        0x00000008
-#define SPIE        0x00000010
-#define WREL        0x00000020
-#define LREL        0x00000040
-#define IICE        0x00000080
-#define CNT_RESERVED    0x000000ff     /* reserved bit 0 */
-
-#define I2C_EMMA_START      (IICE | STT)
-#define I2C_EMMA_STOP       (IICE | SPT)
+#define SPT        0x00000001
+#define STT        0x00000002
+#define ACKE       0x00000004
+#define WTIM       0x00000008
+#define SPIE       0x00000010
+#define WREL       0x00000020
+#define LREL       0x00000040
+#define IICE       0x00000080
+#define CNT_RESERVED   0x000000ff      /* reserved bit 0 */
+
+#define I2C_EMMA_START     (IICE | STT)
+#define I2C_EMMA_STOP      (IICE | SPT)
 #define I2C_EMMA_REPSTART   I2C_EMMA_START
 
 /*---------------------------------------------------------------------------*/
-/* STA - Status register (10H Read)                                          */
+/* STA - Status register (10H Read)                                         */
 /*---------------------------------------------------------------------------*/
-#define MSTS        0x00000080
-#define ALD         0x00000040
-#define EXC         0x00000020
-#define COI         0x00000010
-#define TRC         0x00000008
-#define ACKD        0x00000004
-#define STD         0x00000002
-#define SPD         0x00000001
+#define MSTS       0x00000080
+#define ALD        0x00000040
+#define EXC        0x00000020
+#define COI        0x00000010
+#define TRC        0x00000008
+#define ACKD       0x00000004
+#define STD        0x00000002
+#define SPD        0x00000001
 
 /*---------------------------------------------------------------------------*/
-/* CSEL - Clock select register (20H R/W)                                    */
+/* CSEL - Clock select register (20H R/W)                                   */
 /*---------------------------------------------------------------------------*/
-#define FCL         0x00000080
-#define ND50        0x00000040
-#define CLD         0x00000020
-#define DAD         0x00000010
-#define SMC         0x00000008
-#define DFC         0x00000004
-#define CL          0x00000003
-#define CSEL_RESERVED   0x000000ff     /* reserved bit 0 */
-
-#define FAST397     0x0000008b
-#define FAST297     0x0000008a
-#define FAST347     0x0000000b
-#define FAST260     0x0000000a
-#define FAST130     0x00000008
+#define FCL        0x00000080
+#define ND50       0x00000040
+#define CLD        0x00000020
+#define DAD        0x00000010
+#define SMC        0x00000008
+#define DFC        0x00000004
+#define CL         0x00000003
+#define CSEL_RESERVED  0x000000ff      /* reserved bit 0 */
+
+#define FAST397            0x0000008b
+#define FAST297            0x0000008a
+#define FAST347            0x0000000b
+#define FAST260            0x0000000a
+#define FAST130            0x00000008
 #define STANDARD108 0x00000083
 #define STANDARD83  0x00000082
 #define STANDARD95  0x00000003
@@ -222,32 +222,32 @@ static inline u8 emma2rh_in8(u32 offset)
 #define STANDARD71  0x00000000
 
 /*---------------------------------------------------------------------------*/
-/* SVA - Slave address register (30H R/W)                                    */
+/* SVA - Slave address register (30H R/W)                                   */
 /*---------------------------------------------------------------------------*/
-#define SVA         0x000000fe
+#define SVA        0x000000fe
 
 /*---------------------------------------------------------------------------*/
-/* SHR - Shift register (40H R/W)                                            */
+/* SHR - Shift register (40H R/W)                                           */
 /*---------------------------------------------------------------------------*/
-#define SR          0x000000ff
+#define SR         0x000000ff
 
 /*---------------------------------------------------------------------------*/
-/* INT - Interrupt register (50H R/W)                                        */
-/* INTM - Interrupt mask register (60H R/W)                                  */
+/* INT - Interrupt register (50H R/W)                                       */
+/* INTM - Interrupt mask register (60H R/W)                                 */
 /*---------------------------------------------------------------------------*/
-#define INTE0       0x00000001
+#define INTE0      0x00000001
 
 /***********************************************************************
  * I2C registers
  ***********************************************************************
  */
-#define I2C_EMMA_CNT            0x00
-#define I2C_EMMA_STA            0x10
-#define I2C_EMMA_CSEL           0x20
-#define I2C_EMMA_SVA            0x30
-#define I2C_EMMA_SHR            0x40
-#define I2C_EMMA_INT            0x50
-#define I2C_EMMA_INTM           0x60
+#define I2C_EMMA_CNT           0x00
+#define I2C_EMMA_STA           0x10
+#define I2C_EMMA_CSEL          0x20
+#define I2C_EMMA_SVA           0x30
+#define I2C_EMMA_SHR           0x40
+#define I2C_EMMA_INT           0x50
+#define I2C_EMMA_INTM          0x60
 
 /*
  * include the board dependent part
index bf2d229..e55a674 100644 (file)
@@ -2,7 +2,7 @@
  *  Copyright (C) NEC Electronics Corporation 2005-2006
  *
  *  This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- *          Copyright 2001 MontaVista Software Inc.
+ *         Copyright 2001 MontaVista Software Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 98bcc98..dfaaf49 100644 (file)
@@ -95,7 +95,7 @@ static inline unsigned long fix_to_virt(const unsigned int idx)
        if (idx >= __end_of_fixed_addresses)
                __this_fixmap_does_not_exist();
 
-        return __fix_to_virt(idx);
+       return __fix_to_virt(idx);
 }
 
 static inline unsigned long virt_to_fix(const unsigned long vaddr)
@@ -111,7 +111,7 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
  * Called from pgtable_init()
  */
 extern void fixrange_init(unsigned long start, unsigned long end,
-        pgd_t *pgd_base);
+       pgd_t *pgd_base);
 
 
 #endif
index 4456c9c..d75aed3 100644 (file)
@@ -24,9 +24,9 @@ static inline void fd_cacheflush(char * addr, long size)
  * And on Mips's the CMOS info fails also ...
  *
  * FIXME: This information should come from the ARC configuration tree
- *        or wherever a particular machine has stored this ...
+ *       or wherever a particular machine has stored this ...
  */
-#define FLOPPY0_TYPE           fd_drive_type(0)
+#define FLOPPY0_TYPE           fd_drive_type(0)
 #define FLOPPY1_TYPE           fd_drive_type(1)
 
 #define FDC1                   fd_getfdaddr1()
index 2b5fddc..429481f 100644 (file)
  * These definitions only cover the R3000-ish 16/32 register model.
  * But we're trying to be R3000 friendly anyway ...
  */
-#define fv0    $f0      /* return value */
+#define fv0    $f0      /* return value */
 #define fv0f   $f1
 #define fv1    $f2
 #define fv1f   $f3
-#define fa0    $f12     /* argument registers */
+#define fa0    $f12     /* argument registers */
 #define fa0f   $f13
 #define fa1    $f14
 #define fa1f   $f15
-#define ft0    $f4      /* caller saved */
+#define ft0    $f4      /* caller saved */
 #define ft0f   $f5
 #define ft1    $f6
 #define ft1f   $f7
@@ -40,7 +40,7 @@
 #define ft4f   $f17
 #define ft5    $f18
 #define ft5f   $f19
-#define fs0    $f20     /* callee saved */
+#define fs0    $f20     /* callee saved */
 #define fs0f   $f21
 #define fs1    $f22
 #define fs1f   $f23
@@ -53,7 +53,7 @@
 #define fs5    $f30
 #define fs5f   $f31
 
-#define fcr31  $31      /* FPU status register */
+#define fcr31  $31      /* FPU status register */
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
index 7fcef8e..d088e5d 100644 (file)
@@ -35,14 +35,14 @@ extern void _restore_fp(struct task_struct *);
 
 #define __enable_fpu()                                                 \
 do {                                                                   \
-        set_c0_status(ST0_CU1);                                                \
-        enable_fpu_hazard();                                           \
+       set_c0_status(ST0_CU1);                                         \
+       enable_fpu_hazard();                                            \
 } while (0)
 
 #define __disable_fpu()                                                        \
 do {                                                                   \
        clear_c0_status(ST0_CU1);                                       \
-        disable_fpu_hazard();                                          \
+       disable_fpu_hazard();                                           \
 } while (0)
 
 #define enable_fpu()                                                   \
index 6ebf173..6ea1581 100644 (file)
@@ -92,24 +92,24 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
+               __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
                break;
 
        case FUTEX_OP_ADD:
-               __futex_atomic_op("addu $1, %1, %z5",
-                                 ret, oldval, uaddr, oparg);
+               __futex_atomic_op("addu $1, %1, %z5",
+                                 ret, oldval, uaddr, oparg);
                break;
        case FUTEX_OP_OR:
                __futex_atomic_op("or   $1, %1, %z5",
-                                 ret, oldval, uaddr, oparg);
+                                 ret, oldval, uaddr, oparg);
                break;
        case FUTEX_OP_ANDN:
                __futex_atomic_op("and  $1, %1, %z5",
-                                 ret, oldval, uaddr, ~oparg);
+                                 ret, oldval, uaddr, ~oparg);
                break;
        case FUTEX_OP_XOR:
                __futex_atomic_op("xor  $1, %1, %z5",
-                                 ret, oldval, uaddr, oparg);
+                                 ret, oldval, uaddr, oparg);
                break;
        default:
                ret = -ENOSYS;
index e6ff4ad..f8d37d1 100644 (file)
@@ -12,7 +12,7 @@ typedef enum configclass {
        SystemClass,
        ProcessorClass,
        CacheClass,
-#ifndef        _NT_PROM
+#ifndef _NT_PROM
        MemoryClass,
        AdapterClass,
        ControllerClass,
@@ -34,7 +34,7 @@ typedef enum configtype {
        SecondaryICache,
        SecondaryDCache,
        SecondaryCache,
-#ifndef        _NT_PROM
+#ifndef _NT_PROM
        Memory,
 #endif
        EISAAdapter,
@@ -93,7 +93,7 @@ typedef enum {
 } IDENTIFIERFLAG;
 
 #ifndef NULL                   /* for GetChild(NULL); */
-#define        NULL    0
+#define NULL   0
 #endif
 
 union key_u {
@@ -125,7 +125,7 @@ typedef struct component {
        IDENTIFIERFLAG  Flags;
        USHORT          Version;
        USHORT          Revision;
-       ULONG           Key;
+       ULONG           Key;
        ULONG           AffinityMask;
        ULONG           ConfigurationDataSize;
        ULONG           IdentifierLength;
@@ -149,7 +149,7 @@ typedef struct systemid {
 typedef enum memorytype {
        ExceptionBlock,
        SPBPage,                        /* ARCS == SystemParameterBlock */
-#ifndef        _NT_PROM
+#ifndef _NT_PROM
        FreeContiguous,
        FreeMemory,
        BadMemory,
index 2b11f87..ad16380 100644 (file)
@@ -15,7 +15,7 @@
 typedef char           CHAR;
 typedef short          SHORT;
 typedef long           LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef        long            LONG __attribute__ ((__mode__ (__SI__)));
+typedef long           LONG __attribute__ ((__mode__ (__SI__)));
 typedef unsigned char  UCHAR;
 typedef unsigned short USHORT;
 typedef unsigned long  ULONG __attribute__ ((__mode__ (__SI__)));
@@ -23,11 +23,11 @@ typedef void                VOID;
 
 /* The pointer types.  Note that we're using a 64-bit compiler but all
    pointer in the ARC structures are only 32-bit, so we need some disgusting
-   workarounds.  Keep your vomit bag handy.  */
+   workarounds.         Keep your vomit bag handy.  */
 typedef LONG           _PCHAR;
 typedef LONG           _PSHORT;
 typedef LONG           _PLARGE_INTEGER;
-typedef        LONG            _PLONG;
+typedef LONG           _PLONG;
 typedef LONG           _PUCHAR;
 typedef LONG           _PUSHORT;
 typedef LONG           _PULONG;
@@ -40,7 +40,7 @@ typedef LONG          _PVOID;
 typedef char           CHAR;
 typedef short          SHORT;
 typedef long           LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef        long            LONG __attribute__ ((__mode__ (__DI__)));
+typedef long           LONG __attribute__ ((__mode__ (__DI__)));
 typedef unsigned char  UCHAR;
 typedef unsigned short USHORT;
 typedef unsigned long  ULONG __attribute__ ((__mode__ (__DI__)));
@@ -51,7 +51,7 @@ typedef void          VOID;
 typedef CHAR           *_PCHAR;
 typedef SHORT          *_PSHORT;
 typedef LARGE_INTEGER  *_PLARGE_INTEGER;
-typedef        LONG            *_PLONG;
+typedef LONG           *_PLONG;
 typedef UCHAR          *_PUCHAR;
 typedef USHORT         *_PUSHORT;
 typedef ULONG          *_PULONG;
@@ -62,7 +62,7 @@ typedef VOID          *_PVOID;
 typedef CHAR           *PCHAR;
 typedef SHORT          *PSHORT;
 typedef LARGE_INTEGER  *PLARGE_INTEGER;
-typedef        LONG            *PLONG;
+typedef LONG           *PLONG;
 typedef UCHAR          *PUCHAR;
 typedef USHORT         *PUSHORT;
 typedef ULONG          *PULONG;
index 0995575..1734755 100644 (file)
@@ -40,7 +40,7 @@ typedef long intptr_t;
 /* Seal indicating CFE's presence, passed to user program. */
 #define CFE_EPTSEAL 0x43464531
 
-#define CFE_MI_RESERVED        0       /* memory is reserved, do not use */
+#define CFE_MI_RESERVED 0      /* memory is reserved, do not use */
 #define CFE_MI_AVAILABLE 1     /* memory is available */
 
 #define CFE_FLG_WARMSTART     0x00000001
@@ -52,13 +52,13 @@ typedef long intptr_t;
 
 #define CFE_STDHANDLE_CONSOLE  0
 
-#define CFE_DEV_NETWORK        1
+#define CFE_DEV_NETWORK                1
 #define CFE_DEV_DISK           2
 #define CFE_DEV_FLASH          3
 #define CFE_DEV_SERIAL         4
 #define CFE_DEV_CPU            5
 #define CFE_DEV_NVRAM          6
-#define CFE_DEV_CLOCK           7
+#define CFE_DEV_CLOCK          7
 #define CFE_DEV_OTHER          8
 #define CFE_DEV_MASK           0x0F
 
index b803746..fc0e91f 100644 (file)
@@ -25,7 +25,7 @@
  */
 
 #define CFE_OK                  0
-#define CFE_ERR                 -1     /* generic error */
+#define CFE_ERR                        -1      /* generic error */
 #define CFE_ERR_INV_COMMAND    -2
 #define CFE_ERR_EOF            -3
 #define CFE_ERR_IOERR          -4
 #define CFE_ERR_ENVREADONLY    -10
 
 #define CFE_ERR_NOTELF         -11
-#define CFE_ERR_NOT32BIT       -12
-#define CFE_ERR_WRONGENDIAN    -13
-#define CFE_ERR_BADELFVERS     -14
-#define CFE_ERR_NOTMIPS        -15
-#define CFE_ERR_BADELFFMT      -16
-#define CFE_ERR_BADADDR        -17
+#define CFE_ERR_NOT32BIT       -12
+#define CFE_ERR_WRONGENDIAN    -13
+#define CFE_ERR_BADELFVERS     -14
+#define CFE_ERR_NOTMIPS                -15
+#define CFE_ERR_BADELFFMT      -16
+#define CFE_ERR_BADADDR                -17
 
 #define CFE_ERR_FILENOTFOUND   -18
 #define CFE_ERR_UNSUPPORTED    -19
@@ -73,8 +73,8 @@
 
 #define CFE_ERR_NOTREADY       -36
 
-#define CFE_ERR_GETMEM          -37
-#define CFE_ERR_SETMEM          -38
+#define CFE_ERR_GETMEM         -37
+#define CFE_ERR_SETMEM         -38
 
 #define CFE_ERR_NOTCONN                -39
 #define CFE_ERR_ADDRINUSE      -40
index c0cf76a..a7359f7 100644 (file)
@@ -32,7 +32,7 @@
 
 /* GCMP register access */
 #define GCMPGCB(reg)                   REGP(_gcmp_base, GCMPGCBOFS(reg))
-#define GCMPGCBn(reg, n)               REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
+#define GCMPGCBn(reg, n)              REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
 #define GCMPCLCB(reg)                  REGP(_gcmp_base, GCMPCLCBOFS(reg))
 #define GCMPCOCB(reg)                  REGP(_gcmp_base, GCMPCOCBOFS(reg))
 #define GCMPGDB(reg)                   REGP(_gcmp_base, GCMPGDBOFS(reg))
 
 /* GCB registers */
 #define GCMP_GCB_GC_OFS                        0x0000  /* Global Config Register */
-#define  GCMP_GCB_GC_NUMIOCU_SHF       8
-#define  GCMP_GCB_GC_NUMIOCU_MSK       GCMPGCBMSK(GC_NUMIOCU, 4)
-#define  GCMP_GCB_GC_NUMCORES_SHF      0
-#define  GCMP_GCB_GC_NUMCORES_MSK      GCMPGCBMSK(GC_NUMCORES, 8)
+#define         GCMP_GCB_GC_NUMIOCU_SHF        8
+#define         GCMP_GCB_GC_NUMIOCU_MSK        GCMPGCBMSK(GC_NUMIOCU, 4)
+#define         GCMP_GCB_GC_NUMCORES_SHF       0
+#define         GCMP_GCB_GC_NUMCORES_MSK       GCMPGCBMSK(GC_NUMCORES, 8)
 #define GCMP_GCB_GCMPB_OFS             0x0008          /* Global GCMP Base */
-#define  GCMP_GCB_GCMPB_GCMPBASE_SHF   15
-#define  GCMP_GCB_GCMPB_GCMPBASE_MSK   GCMPGCBMSK(GCMPB_GCMPBASE, 17)
-#define  GCMP_GCB_GCMPB_CMDEFTGT_SHF   0
-#define  GCMP_GCB_GCMPB_CMDEFTGT_MSK   GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
-#define  GCMP_GCB_GCMPB_CMDEFTGT_DISABLED      0
-#define  GCMP_GCB_GCMPB_CMDEFTGT_MEM           1
-#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU1         2
-#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU2         3
+#define         GCMP_GCB_GCMPB_GCMPBASE_SHF    15
+#define         GCMP_GCB_GCMPB_GCMPBASE_MSK    GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#define         GCMP_GCB_GCMPB_CMDEFTGT_SHF    0
+#define         GCMP_GCB_GCMPB_CMDEFTGT_MSK    GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
+#define         GCMP_GCB_GCMPB_CMDEFTGT_DISABLED       0
+#define         GCMP_GCB_GCMPB_CMDEFTGT_MEM            1
+#define         GCMP_GCB_GCMPB_CMDEFTGT_IOCU1          2
+#define         GCMP_GCB_GCMPB_CMDEFTGT_IOCU2          3
 #define GCMP_GCB_CCMC_OFS              0x0010  /* Global CM Control */
 #define GCMP_GCB_GCSRAP_OFS            0x0020  /* Global CSR Access Privilege */
-#define  GCMP_GCB_GCSRAP_CMACCESS_SHF  0
-#define  GCMP_GCB_GCSRAP_CMACCESS_MSK  GCMPGCBMSK(GCSRAP_CMACCESS, 8)
+#define         GCMP_GCB_GCSRAP_CMACCESS_SHF   0
+#define         GCMP_GCB_GCSRAP_CMACCESS_MSK   GCMPGCBMSK(GCSRAP_CMACCESS, 8)
 #define GCMP_GCB_GCMPREV_OFS           0x0030  /* GCMP Revision Register */
 #define GCMP_GCB_GCMEM_OFS             0x0040  /* Global CM Error Mask */
 #define GCMP_GCB_GCMEC_OFS             0x0048  /* Global CM Error Cause */
-#define  GCMP_GCB_GMEC_ERROR_TYPE_SHF  27
-#define  GCMP_GCB_GMEC_ERROR_TYPE_MSK  GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
-#define  GCMP_GCB_GMEC_ERROR_INFO_SHF  0
-#define  GCMP_GCB_GMEC_ERROR_INFO_MSK  GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define         GCMP_GCB_GMEC_ERROR_TYPE_SHF   27
+#define         GCMP_GCB_GMEC_ERROR_TYPE_MSK   GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
+#define         GCMP_GCB_GMEC_ERROR_INFO_SHF   0
+#define         GCMP_GCB_GMEC_ERROR_INFO_MSK   GCMPGCBMSK(GMEC_ERROR_INFO, 27)
 #define GCMP_GCB_GCMEA_OFS             0x0050  /* Global CM Error Address */
 #define GCMP_GCB_GCMEO_OFS             0x0058  /* Global CM Error Multiple */
-#define  GCMP_GCB_GMEO_ERROR_2ND_SHF   0
-#define  GCMP_GCB_GMEO_ERROR_2ND_MSK   GCMPGCBMSK(GMEO_ERROR_2ND, 5)
+#define         GCMP_GCB_GMEO_ERROR_2ND_SHF    0
+#define         GCMP_GCB_GMEO_ERROR_2ND_MSK    GCMPGCBMSK(GMEO_ERROR_2ND, 5)
 #define GCMP_GCB_GICBA_OFS             0x0080  /* Global Interrupt Controller Base Address */
-#define  GCMP_GCB_GICBA_BASE_SHF       17
-#define  GCMP_GCB_GICBA_BASE_MSK       GCMPGCBMSK(GICBA_BASE, 15)
-#define  GCMP_GCB_GICBA_EN_SHF         0
-#define  GCMP_GCB_GICBA_EN_MSK         GCMPGCBMSK(GICBA_EN, 1)
+#define         GCMP_GCB_GICBA_BASE_SHF        17
+#define         GCMP_GCB_GICBA_BASE_MSK        GCMPGCBMSK(GICBA_BASE, 15)
+#define         GCMP_GCB_GICBA_EN_SHF          0
+#define         GCMP_GCB_GICBA_EN_MSK          GCMPGCBMSK(GICBA_EN, 1)
 
 /* GCB Regions */
 #define GCMP_GCB_CMxBASE_OFS(n)                (0x0090+16*(n))         /* Global Region[0-3] Base Address */
-#define  GCMP_GCB_CMxBASE_BASE_SHF     16
-#define  GCMP_GCB_CMxBASE_BASE_MSK     GCMPGCBMSK(CMxBASE_BASE, 16)
+#define         GCMP_GCB_CMxBASE_BASE_SHF      16
+#define         GCMP_GCB_CMxBASE_BASE_MSK      GCMPGCBMSK(CMxBASE_BASE, 16)
 #define GCMP_GCB_CMxMASK_OFS(n)                (0x0098+16*(n))         /* Global Region[0-3] Address Mask */
-#define  GCMP_GCB_CMxMASK_MASK_SHF     16
-#define  GCMP_GCB_CMxMASK_MASK_MSK     GCMPGCBMSK(CMxMASK_MASK, 16)
-#define  GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
-#define  GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
-#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM  0
-#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM1  1
-#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
-#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+#define         GCMP_GCB_CMxMASK_MASK_SHF      16
+#define         GCMP_GCB_CMxMASK_MASK_MSK      GCMPGCBMSK(CMxMASK_MASK, 16)
+#define         GCMP_GCB_CMxMASK_CMREGTGT_SHF  0
+#define         GCMP_GCB_CMxMASK_CMREGTGT_MSK  GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
+#define         GCMP_GCB_CMxMASK_CMREGTGT_MEM   0
+#define         GCMP_GCB_CMxMASK_CMREGTGT_MEM1  1
+#define         GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
+#define         GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
 
 
 /* Core local/Core other control block registers */
 #define GCMP_CCB_RESETR_OFS            0x0000                  /* Reset Release */
-#define  GCMP_CCB_RESETR_INRESET_SHF   0
-#define  GCMP_CCB_RESETR_INRESET_MSK   GCMPCCBMSK(RESETR_INRESET, 16)
+#define         GCMP_CCB_RESETR_INRESET_SHF    0
+#define         GCMP_CCB_RESETR_INRESET_MSK    GCMPCCBMSK(RESETR_INRESET, 16)
 #define GCMP_CCB_COHCTL_OFS            0x0008                  /* Coherence Control */
-#define  GCMP_CCB_COHCTL_DOMAIN_SHF    0
-#define  GCMP_CCB_COHCTL_DOMAIN_MSK    GCMPCCBMSK(COHCTL_DOMAIN, 8)
+#define         GCMP_CCB_COHCTL_DOMAIN_SHF     0
+#define         GCMP_CCB_COHCTL_DOMAIN_MSK     GCMPCCBMSK(COHCTL_DOMAIN, 8)
 #define GCMP_CCB_CFG_OFS               0x0010                  /* Config */
-#define  GCMP_CCB_CFG_IOCUTYPE_SHF     10
-#define  GCMP_CCB_CFG_IOCUTYPE_MSK     GCMPCCBMSK(CFG_IOCUTYPE, 2)
-#define   GCMP_CCB_CFG_IOCUTYPE_CPU    0
-#define   GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
-#define   GCMP_CCB_CFG_IOCUTYPE_CIOCU  2
-#define  GCMP_CCB_CFG_NUMVPE_SHF       0
-#define  GCMP_CCB_CFG_NUMVPE_MSK       GCMPCCBMSK(CFG_NUMVPE, 10)
+#define         GCMP_CCB_CFG_IOCUTYPE_SHF      10
+#define         GCMP_CCB_CFG_IOCUTYPE_MSK      GCMPCCBMSK(CFG_IOCUTYPE, 2)
+#define          GCMP_CCB_CFG_IOCUTYPE_CPU     0
+#define          GCMP_CCB_CFG_IOCUTYPE_NCIOCU  1
+#define          GCMP_CCB_CFG_IOCUTYPE_CIOCU   2
+#define         GCMP_CCB_CFG_NUMVPE_SHF        0
+#define         GCMP_CCB_CFG_NUMVPE_MSK        GCMPCCBMSK(CFG_NUMVPE, 10)
 #define GCMP_CCB_OTHER_OFS             0x0018          /* Other Address */
-#define  GCMP_CCB_OTHER_CORENUM_SHF    16
-#define  GCMP_CCB_OTHER_CORENUM_MSK    GCMPCCBMSK(OTHER_CORENUM, 16)
+#define         GCMP_CCB_OTHER_CORENUM_SHF     16
+#define         GCMP_CCB_OTHER_CORENUM_MSK     GCMPCCBMSK(OTHER_CORENUM, 16)
 #define GCMP_CCB_RESETBASE_OFS         0x0020          /* Reset Exception Base */
-#define  GCMP_CCB_RESETBASE_BEV_SHF    12
-#define  GCMP_CCB_RESETBASE_BEV_MSK    GCMPCCBMSK(RESETBASE_BEV, 20)
+#define         GCMP_CCB_RESETBASE_BEV_SHF     12
+#define         GCMP_CCB_RESETBASE_BEV_MSK     GCMPCCBMSK(RESETBASE_BEV, 20)
 #define GCMP_CCB_ID_OFS                        0x0028          /* Identification */
 #define GCMP_CCB_DINTGROUP_OFS         0x0030          /* DINT Group Participate */
 #define GCMP_CCB_DBGGROUP_OFS          0x0100          /* DebugBreak Group */
index 37620db..bdc9786 100644 (file)
@@ -66,7 +66,7 @@
 
 /* Register Map for Shared Section */
 
-#define        GIC_SH_CONFIG_OFS               0x0000
+#define GIC_SH_CONFIG_OFS              0x0000
 
 /* Shared Global Counter */
 #define GIC_SH_COUNTER_31_00_OFS       0x0010
 #define GIC_SH_PEND_223_192_OFS                0x0498
 #define GIC_SH_PEND_255_224_OFS                0x049c
 
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS        0x0500
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
 
 /* Maps Interrupt X to a Pin */
 #define GIC_SH_MAP_TO_PIN(intr) \
        (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
 
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS        0x2000
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
 
 /* Maps Interrupt X to a VPE */
 #define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
@@ -326,7 +326,7 @@ struct gic_intr_map {
        unsigned int polarity;  /* Polarity : +/-       */
        unsigned int trigtype;  /* Trigger  : Edge/Levl */
        unsigned int flags;     /* Misc flags   */
-#define GIC_FLAG_IPI           0x01
+#define GIC_FLAG_IPI          0x01
 #define GIC_FLAG_TRANSPARENT   0x02
 };
 
@@ -343,10 +343,10 @@ struct gic_shared_intr_map {
 
 /* GIC nomenclature for Core Interrupt Pins. */
 #define GIC_CPU_INT0           0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1           1 /* .                */
-#define GIC_CPU_INT2           2 /* .                */
-#define GIC_CPU_INT3           3 /* .                */
-#define GIC_CPU_INT4           4 /* .                */
+#define GIC_CPU_INT1           1 /* .                */
+#define GIC_CPU_INT2           2 /* .                */
+#define GIC_CPU_INT3           3 /* .                */
+#define GIC_CPU_INT4           4 /* .                */
 #define GIC_CPU_INT5           5 /* Core Interrupt 5 */
 
 /* Local GIC interrupts. */
@@ -359,6 +359,7 @@ struct gic_shared_intr_map {
 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
 #define GIC_PIN_TO_VEC_OFFSET  (1)
 
+extern int gic_present;
 extern unsigned long _gic_base;
 extern unsigned int gic_irq_base;
 extern unsigned int gic_irq_flags[];
index 5437c84..0878701 100644 (file)
@@ -6,15 +6,15 @@ struct gio_device_id {
 };
 
 struct gio_device {
-       struct device   dev;
+       struct device   dev;
        struct resource resource;
-       unsigned int    irq;
-       unsigned int    slotno;
+       unsigned int    irq;
+       unsigned int    slotno;
 
-       const char      *name;
+       const char      *name;
        struct gio_device_id id;
-       unsigned        id32:1;
-       unsigned        gio64:1;
+       unsigned        id32:1;
+       unsigned        gio64:1;
 };
 #define to_gio_device(d) container_of(d, struct gio_device, dev)
 
@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
 extern int gio_register_driver(struct gio_driver *);
 extern void gio_unregister_driver(struct gio_driver *);
 
-#define gio_get_drvdata(_dev)        drv_get_drvdata(&(_dev)->dev)
+#define gio_get_drvdata(_dev)       drv_get_drvdata(&(_dev)->dev)
 #define gio_set_drvdata(_dev, data)  drv_set_drvdata(&(_dev)->dev, (data))
 
 extern void gio_set_master(struct gio_device *);
index 0aa44ab..2e72abb 100644 (file)
@@ -34,7 +34,7 @@
 
 #define GT_MULTI_OFS           0x120
 
-/* CPU Address Decode.  */
+/* CPU Address Decode. */
 #define GT_SCS10LD_OFS         0x008
 #define GT_SCS10HD_OFS         0x010
 #define GT_SCS32LD_OFS         0x018
 
 #define GT_ADERR_OFS           0x470
 
-/* SDRAM Configuration.  */
+/* SDRAM Configuration.         */
 #define GT_SDRAM_CFG_OFS       0x448
 
 #define GT_SDRAM_OPMODE_OFS    0x474
 #define GT_SDRAM_BM_OFS                0x478
-#define GT_SDRAM_ADDRDECODE_OFS        0x47c
+#define GT_SDRAM_ADDRDECODE_OFS 0x47c
 
 /* SDRAM Parameters.  */
 #define GT_SDRAM_B0_OFS                0x44c
 #define GT_DEV_B3_OFS          0x468
 #define GT_DEV_BOOT_OFS                0x46c
 
-/* ECC.  */
+/* ECC.         */
 #define GT_ECC_ERRDATALO       0x480                   /* GT-64120A only  */
 #define GT_ECC_ERRDATAHI       0x484                   /* GT-64120A only  */
 #define GT_ECC_MEM             0x488                   /* GT-64120A only  */
 #define GT_ECC_CALC            0x48c                   /* GT-64120A only  */
 #define GT_ECC_ERRADDR         0x490                   /* GT-64120A only  */
 
-/* DMA Record.  */
+/* DMA Record. */
 #define GT_DMA0_CNT_OFS                0x800
 #define GT_DMA1_CNT_OFS                0x804
 #define GT_DMA2_CNT_OFS                0x808
 #define GT_DMA2_CUR_OFS                0x878
 #define GT_DMA3_CUR_OFS                0x87c
 
-/* DMA Channel Control.  */
+/* DMA Channel Control.         */
 #define GT_DMA0_CTRL_OFS       0x840
 #define GT_DMA1_CTRL_OFS       0x844
 #define GT_DMA2_CTRL_OFS       0x848
 #define GT_DMA3_CTRL_OFS       0x84c
 
-/* DMA Arbiter.  */
+/* DMA Arbiter.         */
 #define GT_DMA_ARB_OFS         0x860
 
 /* Timer/Counter.  */
 #define GT_PCI0_CFGADDR_OFS    0xcf8
 #define GT_PCI0_CFGDATA_OFS    0xcfc
 
-/* Interrupts.  */
+/* Interrupts. */
 #define GT_INTRCAUSE_OFS       0xc18
 #define GT_INTRMASK_OFS                0xc1c
 
 #define GT_DEF_BASE            0x14000000UL
 
 #define GT_MAX_BANKSIZE                (256 * 1024 * 1024)     /* Max 256MB bank  */
-#define GT_LATTIM_MIN          6                       /* Minimum lat  */
+#define GT_LATTIM_MIN          6                       /* Minimum lat  */
 
 /*
  * The gt64120_dep.h file must define the following macros
  *
  *   GT_READ(ofs, data_pointer)
- *   GT_WRITE(ofs, data)           - read/write GT64120 registers in 32bit
+ *   GT_WRITE(ofs, data)          - read/write GT64120 registers in 32bit
  *
- *   TIMER     - gt64120 timer irq, temporary solution until
+ *   TIMER     - gt64120 timer irq, temporary solution until
  *               full gt64120 cascade interrupt support is in place
  */
 
index f0324e9..44d6a5b 100644 (file)
@@ -25,7 +25,7 @@ static inline void name(void)                                         \
 }
 
 /*
- * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ * MIPS R2 instruction hazard barrier.  Needs to be called as a subroutine.
  */
 extern void mips_ihb(void);
 
@@ -68,7 +68,7 @@ ASMMACRO(back_to_back_c0_hazard,
        )
 /*
  * gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler.  Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
  * annoying difference between la and dla which are only usable for 32-bit
  * rsp. 64-bit code, so can't be used without conditional compilation.
  * The alterantive is switching the assembler to 64-bit code which happens
@@ -114,7 +114,7 @@ ASMMACRO(back_to_back_c0_hazard,
        )
 /*
  * gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler.  Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
  * annoying difference between la and dla which are only usable for 32-bit
  * rsp. 64-bit code, so can't be used without conditional compilation.
  * The alterantive is switching the assembler to 64-bit code which happens
@@ -141,7 +141,7 @@ do {                                                                        \
 
 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
        defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
-       defined(CONFIG_CPU_R5500)
+       defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
 
 /*
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
index 2d91888..b0dd0c8 100644 (file)
@@ -39,8 +39,8 @@ extern pte_t *pkmap_page_table;
  */
 #define LAST_PKMAP 1024
 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 
 extern void * kmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
index 33c34ad..f1eadf7 100644 (file)
 #ifndef _ASM_INST_H
 #define _ASM_INST_H
 
-/*
- * Major opcodes; before MIPS IV cop1x was called cop3.
- */
-enum major_op {
-       spec_op, bcond_op, j_op, jal_op,
-       beq_op, bne_op, blez_op, bgtz_op,
-       addi_op, addiu_op, slti_op, sltiu_op,
-       andi_op, ori_op, xori_op, lui_op,
-       cop0_op, cop1_op, cop2_op, cop1x_op,
-       beql_op, bnel_op, blezl_op, bgtzl_op,
-       daddi_op, daddiu_op, ldl_op, ldr_op,
-       spec2_op, jalx_op, mdmx_op, spec3_op,
-       lb_op, lh_op, lwl_op, lw_op,
-       lbu_op, lhu_op, lwr_op, lwu_op,
-       sb_op, sh_op, swl_op, sw_op,
-       sdl_op, sdr_op, swr_op, cache_op,
-       ll_op, lwc1_op, lwc2_op, pref_op,
-       lld_op, ldc1_op, ldc2_op, ld_op,
-       sc_op, swc1_op, swc2_op, major_3b_op,
-       scd_op, sdc1_op, sdc2_op, sd_op
-};
-
-/*
- * func field of spec opcode.
- */
-enum spec_op {
-       sll_op, movc_op, srl_op, sra_op,
-       sllv_op, pmon_op, srlv_op, srav_op,
-       jr_op, jalr_op, movz_op, movn_op,
-       syscall_op, break_op, spim_op, sync_op,
-       mfhi_op, mthi_op, mflo_op, mtlo_op,
-       dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
-       mult_op, multu_op, div_op, divu_op,
-       dmult_op, dmultu_op, ddiv_op, ddivu_op,
-       add_op, addu_op, sub_op, subu_op,
-       and_op, or_op, xor_op, nor_op,
-       spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
-       dadd_op, daddu_op, dsub_op, dsubu_op,
-       tge_op, tgeu_op, tlt_op, tltu_op,
-       teq_op, spec5_unused_op, tne_op, spec6_unused_op,
-       dsll_op, spec7_unused_op, dsrl_op, dsra_op,
-       dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
-};
-
-/*
- * func field of spec2 opcode.
- */
-enum spec2_op {
-       madd_op, maddu_op, mul_op, spec2_3_unused_op,
-       msub_op, msubu_op, /* more unused ops */
-       clz_op = 0x20, clo_op,
-       dclz_op = 0x24, dclo_op,
-       sdbpp_op = 0x3f
-};
-
-/*
- * func field of spec3 opcode.
- */
-enum spec3_op {
-       ext_op, dextm_op, dextu_op, dext_op,
-       ins_op, dinsm_op, dinsu_op, dins_op,
-       lx_op = 0x0a,
-       bshfl_op = 0x20,
-       dbshfl_op = 0x24,
-       rdhwr_op = 0x3b
-};
-
-/*
- * rt field of bcond opcodes.
- */
-enum rt_op {
-       bltz_op, bgez_op, bltzl_op, bgezl_op,
-       spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
-       tgei_op, tgeiu_op, tlti_op, tltiu_op,
-       teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
-       bltzal_op, bgezal_op, bltzall_op, bgezall_op,
-       rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
-       rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
-       bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
-};
-
-/*
- * rs field of cop opcodes.
- */
-enum cop_op {
-       mfc_op        = 0x00, dmfc_op       = 0x01,
-       cfc_op        = 0x02, mtc_op        = 0x04,
-       dmtc_op       = 0x05, ctc_op        = 0x06,
-       bc_op         = 0x08, cop_op        = 0x10,
-       copm_op       = 0x18
-};
-
-/*
- * rt field of cop.bc_op opcodes
- */
-enum bcop_op {
-       bcf_op, bct_op, bcfl_op, bctl_op
-};
-
-/*
- * func field of cop0 coi opcodes.
- */
-enum cop0_coi_func {
-       tlbr_op       = 0x01, tlbwi_op      = 0x02,
-       tlbwr_op      = 0x06, tlbp_op       = 0x08,
-       rfe_op        = 0x10, eret_op       = 0x18
-};
-
-/*
- * func field of cop0 com opcodes.
- */
-enum cop0_com_func {
-       tlbr1_op      = 0x01, tlbw_op       = 0x02,
-       tlbp1_op      = 0x08, dctr_op       = 0x09,
-       dctw_op       = 0x0a
-};
-
-/*
- * fmt field of cop1 opcodes.
- */
-enum cop1_fmt {
-       s_fmt, d_fmt, e_fmt, q_fmt,
-       w_fmt, l_fmt
-};
-
-/*
- * func field of cop1 instructions using d, s or w format.
- */
-enum cop1_sdw_func {
-       fadd_op      =  0x00, fsub_op      =  0x01,
-       fmul_op      =  0x02, fdiv_op      =  0x03,
-       fsqrt_op     =  0x04, fabs_op      =  0x05,
-       fmov_op      =  0x06, fneg_op      =  0x07,
-       froundl_op   =  0x08, ftruncl_op   =  0x09,
-       fceill_op    =  0x0a, ffloorl_op   =  0x0b,
-       fround_op    =  0x0c, ftrunc_op    =  0x0d,
-       fceil_op     =  0x0e, ffloor_op    =  0x0f,
-       fmovc_op     =  0x11, fmovz_op     =  0x12,
-       fmovn_op     =  0x13, frecip_op    =  0x15,
-       frsqrt_op    =  0x16, fcvts_op     =  0x20,
-       fcvtd_op     =  0x21, fcvte_op     =  0x22,
-       fcvtw_op     =  0x24, fcvtl_op     =  0x25,
-       fcmp_op      =  0x30
-};
-
-/*
- * func field of cop1x opcodes (MIPS IV).
- */
-enum cop1x_func {
-       lwxc1_op     =  0x00, ldxc1_op     =  0x01,
-       pfetch_op    =  0x07, swxc1_op     =  0x08,
-       sdxc1_op     =  0x09, madd_s_op    =  0x20,
-       madd_d_op    =  0x21, madd_e_op    =  0x22,
-       msub_s_op    =  0x28, msub_d_op    =  0x29,
-       msub_e_op    =  0x2a, nmadd_s_op   =  0x30,
-       nmadd_d_op   =  0x31, nmadd_e_op   =  0x32,
-       nmsub_s_op   =  0x38, nmsub_d_op   =  0x39,
-       nmsub_e_op   =  0x3a
-};
-
-/*
- * func field for mad opcodes (MIPS IV).
- */
-enum mad_func {
-       madd_fp_op      = 0x08, msub_fp_op      = 0x0a,
-       nmadd_fp_op     = 0x0c, nmsub_fp_op     = 0x0e
-};
-
-/*
- * func field for special3 lx opcodes (Cavium Octeon).
- */
-enum lx_func {
-       lwx_op  = 0x00,
-       lhx_op  = 0x04,
-       lbux_op = 0x06,
-       ldx_op  = 0x08,
-       lwux_op = 0x10,
-       lhux_op = 0x14,
-       lbx_op  = 0x16,
-};
-
-/*
- * Damn ...  bitfields depend from byteorder :-(
- */
-#ifdef __MIPSEB__
-struct j_format {      /* Jump format */
-       unsigned int opcode : 6;
-       unsigned int target : 26;
-};
-
-struct i_format {      /* Immediate format (addi, lw, ...) */
-       unsigned int opcode : 6;
-       unsigned int rs : 5;
-       unsigned int rt : 5;
-       signed int simmediate : 16;
-};
-
-struct u_format {      /* Unsigned immediate format (ori, xori, ...) */
-       unsigned int opcode : 6;
-       unsigned int rs : 5;
-       unsigned int rt : 5;
-       unsigned int uimmediate : 16;
-};
-
-struct c_format {      /* Cache (>= R6000) format */
-       unsigned int opcode : 6;
-       unsigned int rs : 5;
-       unsigned int c_op : 3;
-       unsigned int cache : 2;
-       unsigned int simmediate : 16;
-};
-
-struct r_format {      /* Register format */
-       unsigned int opcode : 6;
-       unsigned int rs : 5;
-       unsigned int rt : 5;
-       unsigned int rd : 5;
-       unsigned int re : 5;
-       unsigned int func : 6;
-};
-
-struct p_format {      /* Performance counter format (R10000) */
-       unsigned int opcode : 6;
-       unsigned int rs : 5;
-       unsigned int rt : 5;
-       unsigned int rd : 5;
-       unsigned int re : 5;
-       unsigned int func : 6;
-};
-
-struct f_format {      /* FPU register format */
-       unsigned int opcode : 6;
-       unsigned int : 1;
-       unsigned int fmt : 4;
-       unsigned int rt : 5;
-       unsigned int rd : 5;
-       unsigned int re : 5;
-       unsigned int func : 6;
-};
-
-struct ma_format {     /* FPU multiply and add format (MIPS IV) */
-       unsigned int opcode : 6;
-       unsigned int fr : 5;
-       unsigned int ft : 5;
-       unsigned int fs : 5;
-       unsigned int fd : 5;
-       unsigned int func : 4;
-       unsigned int fmt : 2;
-};
-
-struct b_format { /* BREAK and SYSCALL */
-       unsigned int opcode:6;
-       unsigned int code:20;
-       unsigned int func:6;
-};
-
-#elif defined(__MIPSEL__)
-
-struct j_format {      /* Jump format */
-       unsigned int target : 26;
-       unsigned int opcode : 6;
-};
-
-struct i_format {      /* Immediate format */
-       signed int simmediate : 16;
-       unsigned int rt : 5;
-       unsigned int rs : 5;
-       unsigned int opcode : 6;
-};
-
-struct u_format {      /* Unsigned immediate format */
-       unsigned int uimmediate : 16;
-       unsigned int rt : 5;
-       unsigned int rs : 5;
-       unsigned int opcode : 6;
-};
-
-struct c_format {      /* Cache (>= R6000) format */
-       unsigned int simmediate : 16;
-       unsigned int cache : 2;
-       unsigned int c_op : 3;
-       unsigned int rs : 5;
-       unsigned int opcode : 6;
-};
-
-struct r_format {      /* Register format */
-       unsigned int func : 6;
-       unsigned int re : 5;
-       unsigned int rd : 5;
-       unsigned int rt : 5;
-       unsigned int rs : 5;
-       unsigned int opcode : 6;
-};
-
-struct p_format {      /* Performance counter format (R10000) */
-       unsigned int func : 6;
-       unsigned int re : 5;
-       unsigned int rd : 5;
-       unsigned int rt : 5;
-       unsigned int rs : 5;
-       unsigned int opcode : 6;
-};
-
-struct f_format {      /* FPU register format */
-       unsigned int func : 6;
-       unsigned int re : 5;
-       unsigned int rd : 5;
-       unsigned int rt : 5;
-       unsigned int fmt : 4;
-       unsigned int : 1;
-       unsigned int opcode : 6;
-};
-
-struct ma_format {     /* FPU multiply and add format (MIPS IV) */
-       unsigned int fmt : 2;
-       unsigned int func : 4;
-       unsigned int fd : 5;
-       unsigned int fs : 5;
-       unsigned int ft : 5;
-       unsigned int fr : 5;
-       unsigned int opcode : 6;
-};
-
-struct b_format { /* BREAK and SYSCALL */
-       unsigned int func:6;
-       unsigned int code:20;
-       unsigned int opcode:6;
-};
-
-#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
-#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
-#endif
-
-union mips_instruction {
-       unsigned int word;
-       unsigned short halfword[2];
-       unsigned char byte[4];
-       struct j_format j_format;
-       struct i_format i_format;
-       struct u_format u_format;
-       struct c_format c_format;
-       struct r_format r_format;
-       struct p_format p_format;
-       struct f_format f_format;
-       struct ma_format ma_format;
-       struct b_format b_format;
-};
+#include <uapi/asm/inst.h>
 
 /* HACHACHAHCAHC ...  */
 
index ff2e034..1be1372 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright (C) 1994 - 2000, 06 Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2004, 2005  MIPS Technologies, Inc.  All rights reserved.
- *     Author: Maciej W. Rozycki <macro@mips.com>
+ *     Author: Maciej W. Rozycki <macro@mips.com>
  */
 #ifndef _ASM_IO_H
 #define _ASM_IO_H
@@ -253,9 +253,9 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
        __ioremap_mode((offset), (size), _CACHE_UNCACHED)
 
 /*
- * ioremap_cachable -   map bus memory into CPU space
- * @offset:         bus address of the memory
- * @size:           size of the resource to map
+ * ioremap_cachable -  map bus memory into CPU space
+ * @offset:        bus address of the memory
+ * @size:          size of the resource to map
  *
  * ioremap_nocache performs a platform specific sequence of operations to
  * make bus memory CPU accessible via the readb/readw/readl/writeb/
@@ -264,14 +264,14 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
  * address.
  *
  * This version of ioremap ensures that the memory is marked cachable by
- * the CPU.  Also enables full write-combining.  Useful for some
+ * the CPU.  Also enables full write-combining.         Useful for some
  * memory-like regions on I/O busses.
  */
 #define ioremap_cachable(offset, size)                                 \
        __ioremap_mode((offset), (size), _page_cachable_default)
 
 /*
- * These two are MIPS specific ioremap variant.  ioremap_cacheable_cow
+ * These two are MIPS specific ioremap variant.         ioremap_cacheable_cow
  * requests a cachable mapping, ioremap_uncached_accelerated requests a
  * mapping using the uncached accelerated mode which isn't supported on
  * all processors.
@@ -298,7 +298,7 @@ static inline void iounmap(const volatile void __iomem *addr)
 }
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define war_octeon_io_reorder_wmb()            wmb()
+#define war_octeon_io_reorder_wmb()            wmb()
 #else
 #define war_octeon_io_reorder_wmb()            do { } while (0)
 #endif
@@ -317,7 +317,7 @@ static inline void pfx##write##bwlq(type val,                               \
                                                                        \
        __val = pfx##ioswab##bwlq(__mem, val);                          \
                                                                        \
-       if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+       if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
                *__mem = __val;                                         \
        else if (cpu_has_64bits) {                                      \
                unsigned long __flags;                                  \
@@ -327,9 +327,9 @@ static inline void pfx##write##bwlq(type val,                               \
                        local_irq_save(__flags);                        \
                __asm__ __volatile__(                                   \
                        ".set   mips3"          "\t\t# __writeq""\n\t"  \
-                       "dsll32 %L0, %L0, 0"                    "\n\t"  \
-                       "dsrl32 %L0, %L0, 0"                    "\n\t"  \
-                       "dsll32 %M0, %M0, 0"                    "\n\t"  \
+                       "dsll32 %L0, %L0, 0"                    "\n\t"  \
+                       "dsrl32 %L0, %L0, 0"                    "\n\t"  \
+                       "dsll32 %M0, %M0, 0"                    "\n\t"  \
                        "or     %L0, %L0, %M0"                  "\n\t"  \
                        "sd     %L0, %2"                        "\n\t"  \
                        ".set   mips0"                          "\n"    \
@@ -348,7 +348,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)        \
                                                                        \
        __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
                                                                        \
-       if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+       if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
                __val = *__mem;                                         \
        else if (cpu_has_64bits) {                                      \
                unsigned long __flags;                                  \
@@ -356,9 +356,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)        \
                if (irq)                                                \
                        local_irq_save(__flags);                        \
                __asm__ __volatile__(                                   \
-                       ".set   mips3"          "\t\t# __readq" "\n\t"  \
+                       ".set   mips3"          "\t\t# __readq" "\n\t"  \
                        "ld     %L0, %1"                        "\n\t"  \
-                       "dsra32 %M0, %L0, 0"                    "\n\t"  \
+                       "dsra32 %M0, %L0, 0"                    "\n\t"  \
                        "sll    %L0, %L0, 0"                    "\n\t"  \
                        ".set   mips0"                          "\n"    \
                        : "=r" (__val)                                  \
@@ -586,7 +586,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 
 #else /* Sane hardware */
 
-#define dma_cache_wback_inv(start,size)        \
+#define dma_cache_wback_inv(start,size) \
        do { (void) (start); (void) (size); } while (0)
 #define dma_cache_wback(start,size)    \
        do { (void) (start); (void) (size); } while (0)
index 7c36b0e..16c94a2 100644 (file)
@@ -74,7 +74,7 @@ struct sgi_crime {
 #define CRIME_RE_IDLE_E_INT            BIT(24)
 #define CRIME_RE_EMPTY_L_INT           BIT(25)
 #define CRIME_RE_FULL_L_INT            BIT(26)
-#define CRIME_RE_IDLE_L_INT                    BIT(27)
+#define CRIME_RE_IDLE_L_INT            BIT(27)
 #define CRIME_SOFT0_INT                        BIT(28)
 #define CRIME_SOFT1_INT                        BIT(29)
 #define CRIME_SOFT2_INT                        BIT(30)
@@ -118,7 +118,7 @@ struct sgi_crime {
 #define CRIME_MEM_REF_COUNTER_MASK     0x3ff           /* 10bit */
 
        volatile unsigned long mem_error_stat;
-#define CRIME_MEM_ERROR_STAT_MASK       0x0ff7ffff     /* 28-bit register */
+#define CRIME_MEM_ERROR_STAT_MASK      0x0ff7ffff      /* 28-bit register */
 #define CRIME_MEM_ERROR_MACE_ID                0x0000007f
 #define CRIME_MEM_ERROR_MACE_ACCESS    0x00000080
 #define CRIME_MEM_ERROR_RE_ID          0x00007f00
@@ -134,8 +134,8 @@ struct sgi_crime {
 #define CRIME_MEM_ERROR_MEM_ECC_RD     0x00800000
 #define CRIME_MEM_ERROR_MEM_ECC_RMW    0x01000000
 #define CRIME_MEM_ERROR_INV            0x0e000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD        0x02000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR        0x04000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
 #define CRIME_MEM_ERROR_INV_MEM_ADDR_RMW 0x08000000
 
        volatile unsigned long mem_error_addr;
index 85bc530..72e3368 100644 (file)
@@ -13,7 +13,7 @@
 
 /*
  * This list reflects the assignment of interrupt numbers to
- * interrupting events.  Order is fairly irrelevant to handling
+ * interrupting events.         Order is fairly irrelevant to handling
  * priority.  This differs from irix.
  */
 
index c523123..253ed7e 100644 (file)
@@ -250,12 +250,12 @@ struct mace_ps2 {
  * -> drivers/i2c/algos/i2c-algo-sgi.c */
 struct mace_i2c {
        volatile unsigned long config;
-#define MACEI2C_RESET           BIT(0)
-#define MACEI2C_FAST            BIT(1)
-#define MACEI2C_DATA_OVERRIDE   BIT(2)
-#define MACEI2C_CLOCK_OVERRIDE  BIT(3)
-#define MACEI2C_DATA_STATUS     BIT(4)
-#define MACEI2C_CLOCK_STATUS    BIT(5)
+#define MACEI2C_RESET          BIT(0)
+#define MACEI2C_FAST           BIT(1)
+#define MACEI2C_DATA_OVERRIDE  BIT(2)
+#define MACEI2C_CLOCK_OVERRIDE BIT(3)
+#define MACEI2C_DATA_STATUS    BIT(4)
+#define MACEI2C_CLOCK_STATUS   BIT(5)
        volatile unsigned long control;
        volatile unsigned long data;
 };
index 78dbb8a..7bc2cdb 100644 (file)
@@ -32,7 +32,7 @@ struct irqaction;
 
 extern unsigned long irq_hwmask[];
 extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
-                          unsigned long hwmask);
+                         unsigned long hwmask);
 
 static inline void smtc_im_ack_irq(unsigned int irq)
 {
@@ -60,7 +60,7 @@ extern void smtc_forward_irq(struct irq_data *d);
  * if option is enabled.
  *
  * Up through Linux 2.6.22 (at least) cpumask operations are very
- * inefficient on MIPS.  Initial prototypes of SMTC IRQ affinity
+ * inefficient on MIPS.         Initial prototypes of SMTC IRQ affinity
  * used a "fast path" per-IRQ-descriptor cache of affinity information
  * to reduce latency.  As there is a project afoot to optimize the
  * cpumask implementations, this version is optimistically assuming
@@ -133,7 +133,7 @@ extern void free_irqno(unsigned int irq);
 
 /*
  * Before R2 the timer and performance counter interrupts were both fixed to
- * IE7.  Since R2 their number has to be read from the c0_intctl register.
+ * IE7.         Since R2 their number has to be read from the c0_intctl register.
  */
 #define CP0_LEGACY_COMPARE_IRQ 7
 #define CP0_LEGACY_PERFCNT_IRQ 7
index ef6a07c..3f11fdb 100644 (file)
@@ -17,4 +17,10 @@ extern void mips_cpu_irq_init(void);
 extern void rm7k_cpu_irq_init(void);
 extern void rm9k_cpu_irq_init(void);
 
+#ifdef CONFIG_IRQ_DOMAIN
+struct device_node;
+extern int mips_cpu_intc_init(struct device_node *of_node,
+                             struct device_node *parent);
+#endif
+
 #endif /* _ASM_IRQ_CPU_H */
index 24c6cda..b4af6eb 100644 (file)
@@ -18,7 +18,7 @@
  * kernel or user mode? (CP0_STATUS)
  */
 #define KU_MASK 0x08
-#define        KU_USER 0x08
+#define KU_USER 0x08
 #define KU_KERN 0x00
 
 #else
@@ -26,7 +26,7 @@
  * kernel or user mode?
  */
 #define KU_MASK 0x18
-#define        KU_USER 0x10
+#define KU_USER 0x10
 #define KU_KERN 0x00
 
 #endif
index 83f449d..a61970d 100644 (file)
@@ -16,7 +16,7 @@
  * instead of 0xe0000000.
  */
 
-#define JAZZ_LOCAL_IO_SPACE     0xe0000000
+#define JAZZ_LOCAL_IO_SPACE    0xe0000000
 
 /*
  * Revision numbers in PICA_ASIC_REVISION
  * 0xf0000001 - Rev2
  * 0xf0000002 - Rev3
  */
-#define PICA_ASIC_REVISION      0xe0000008
+#define PICA_ASIC_REVISION     0xe0000008
 
 /*
  * The segments of the seven segment LED are mapped
  * to the control bits as follows:
  *
- *         (7)
- *      ---------
- *      |       |
- *  (2) |       | (6)
- *      |  (1)  |
- *      ---------
- *      |       |
- *  (3) |       | (5)
- *      |  (4)  |
- *      --------- . (0)
+ *        (7)
+ *     ---------
+ *     |       |
+ *  (2) |      | (6)
+ *     |  (1)  |
+ *     ---------
+ *     |       |
+ *  (3) |      | (5)
+ *     |  (4)  |
+ *     --------- . (0)
  */
-#define PICA_LED                0xe000f000
+#define PICA_LED               0xe000f000
 
 /*
  * Some characters for the LED control registers
  * control each of the seven segments and the dot independently.
  * It's only a toy, anyway...
  */
-#define LED_DOT                 0x01
-#define LED_SPACE               0x00
-#define LED_0                   0xfc
-#define LED_1                   0x60
-#define LED_2                   0xda
-#define LED_3                   0xf2
-#define LED_4                   0x66
-#define LED_5                   0xb6
-#define LED_6                   0xbe
-#define LED_7                   0xe0
-#define LED_8                   0xfe
-#define LED_9                   0xf6
-#define LED_A                   0xee
-#define LED_b                   0x3e
-#define LED_C                   0x9c
-#define LED_d                   0x7a
-#define LED_E                   0x9e
-#define LED_F                   0x8e
+#define LED_DOT                        0x01
+#define LED_SPACE              0x00
+#define LED_0                  0xfc
+#define LED_1                  0x60
+#define LED_2                  0xda
+#define LED_3                  0xf2
+#define LED_4                  0x66
+#define LED_5                  0xb6
+#define LED_6                  0xbe
+#define LED_7                  0xe0
+#define LED_8                  0xfe
+#define LED_9                  0xf6
+#define LED_A                  0xee
+#define LED_b                  0x3e
+#define LED_C                  0x9c
+#define LED_d                  0x7a
+#define LED_E                  0x9e
+#define LED_F                  0x8e
 
 #ifndef __ASSEMBLY__
 
@@ -96,9 +96,9 @@ static __inline__ void pica_set_led(unsigned int bits)
  * This address is just a guess and seems to differ from
  * other mips machines such as RC3xxx...
  */
-#define JAZZ_KEYBOARD_ADDRESS   0xe0005000
-#define JAZZ_KEYBOARD_DATA      0xe0005000
-#define JAZZ_KEYBOARD_COMMAND   0xe0005001
+#define JAZZ_KEYBOARD_ADDRESS  0xe0005000
+#define JAZZ_KEYBOARD_DATA     0xe0005000
+#define JAZZ_KEYBOARD_COMMAND  0xe0005001
 
 #ifndef __ASSEMBLY__
 
@@ -119,28 +119,28 @@ typedef struct {
 /*
  * For now. Needs to be changed for RC3xxx support. See below.
  */
-#define keyboard_hardware       jazz_keyboard_hardware
+#define keyboard_hardware      jazz_keyboard_hardware
 
 #endif /* !__ASSEMBLY__ */
 
 /*
  * i8042 keyboard controller for most other Mips machines.
  */
-#define MIPS_KEYBOARD_ADDRESS   0xb9005000
-#define MIPS_KEYBOARD_DATA      0xb9005003
-#define MIPS_KEYBOARD_COMMAND   0xb9005007
+#define MIPS_KEYBOARD_ADDRESS  0xb9005000
+#define MIPS_KEYBOARD_DATA     0xb9005003
+#define MIPS_KEYBOARD_COMMAND  0xb9005007
 
 /*
  * Serial and parallel ports (WD 16C552) on the Mips JAZZ
  */
-#define JAZZ_SERIAL1_BASE       (unsigned int)0xe0006000
-#define JAZZ_SERIAL2_BASE       (unsigned int)0xe0007000
-#define JAZZ_PARALLEL_BASE      (unsigned int)0xe0008000
+#define JAZZ_SERIAL1_BASE      (unsigned int)0xe0006000
+#define JAZZ_SERIAL2_BASE      (unsigned int)0xe0007000
+#define JAZZ_PARALLEL_BASE     (unsigned int)0xe0008000
 
 /*
  * Dummy Device Address. Used in jazzdma.c
  */
-#define JAZZ_DUMMY_DEVICE       0xe000d000
+#define JAZZ_DUMMY_DEVICE      0xe000d000
 
 /*
  * JAZZ timer registers and interrupt no.
@@ -148,8 +148,8 @@ typedef struct {
  * cpu level 6, but to keep compatibility with PC stuff
  * it is remapped to vector 0. See arch/mips/kernel/entry.S.
  */
-#define JAZZ_TIMER_INTERVAL     0xe0000228
-#define JAZZ_TIMER_REGISTER     0xe0000230
+#define JAZZ_TIMER_INTERVAL    0xe0000228
+#define JAZZ_TIMER_REGISTER    0xe0000230
 
 /*
  * DRAM configuration register
@@ -176,13 +176,13 @@ typedef struct {
 #endif
 #endif /* !__ASSEMBLY__ */
 
-#define PICA_DRAM_CONFIG        0xe00fffe0
+#define PICA_DRAM_CONFIG       0xe00fffe0
 
 /*
  * JAZZ interrupt control registers
  */
-#define JAZZ_IO_IRQ_SOURCE      0xe0010000
-#define JAZZ_IO_IRQ_ENABLE      0xe0010002
+#define JAZZ_IO_IRQ_SOURCE     0xe0010000
+#define JAZZ_IO_IRQ_ENABLE     0xe0010002
 
 /*
  * JAZZ Interrupt Level definitions
@@ -190,20 +190,20 @@ typedef struct {
  * This is somewhat broken.  For reasons which nobody can remember anymore
  * we remap the Jazz interrupts to the usual ISA style interrupt numbers.
  */
-#define JAZZ_IRQ_START          24
-#define JAZZ_IRQ_END            (24 + 9)
-#define JAZZ_PARALLEL_IRQ       (JAZZ_IRQ_START + 0)
-#define JAZZ_FLOPPY_IRQ         (JAZZ_IRQ_START + 1)
-#define JAZZ_SOUND_IRQ          (JAZZ_IRQ_START + 2)
-#define JAZZ_VIDEO_IRQ          (JAZZ_IRQ_START + 3)
-#define JAZZ_ETHERNET_IRQ       (JAZZ_IRQ_START + 4)
-#define JAZZ_SCSI_IRQ           (JAZZ_IRQ_START + 5)
-#define JAZZ_KEYBOARD_IRQ       (JAZZ_IRQ_START + 6)
-#define JAZZ_MOUSE_IRQ          (JAZZ_IRQ_START + 7)
-#define JAZZ_SERIAL1_IRQ        (JAZZ_IRQ_START + 8)
-#define JAZZ_SERIAL2_IRQ        (JAZZ_IRQ_START + 9)
-
-#define JAZZ_TIMER_IRQ          (MIPS_CPU_IRQ_BASE+6)
+#define JAZZ_IRQ_START         24
+#define JAZZ_IRQ_END           (24 + 9)
+#define JAZZ_PARALLEL_IRQ      (JAZZ_IRQ_START + 0)
+#define JAZZ_FLOPPY_IRQ                (JAZZ_IRQ_START + 1)
+#define JAZZ_SOUND_IRQ         (JAZZ_IRQ_START + 2)
+#define JAZZ_VIDEO_IRQ         (JAZZ_IRQ_START + 3)
+#define JAZZ_ETHERNET_IRQ      (JAZZ_IRQ_START + 4)
+#define JAZZ_SCSI_IRQ          (JAZZ_IRQ_START + 5)
+#define JAZZ_KEYBOARD_IRQ      (JAZZ_IRQ_START + 6)
+#define JAZZ_MOUSE_IRQ         (JAZZ_IRQ_START + 7)
+#define JAZZ_SERIAL1_IRQ       (JAZZ_IRQ_START + 8)
+#define JAZZ_SERIAL2_IRQ       (JAZZ_IRQ_START + 9)
+
+#define JAZZ_TIMER_IRQ         (MIPS_CPU_IRQ_BASE+6)
 
 
 /*
@@ -211,46 +211,46 @@ typedef struct {
  * Note: Channels 4...7 are not used with respect to the Acer PICA-61
  * chipset which does not provide these DMA channels.
  */
-#define JAZZ_SCSI_DMA           0              /* SCSI */
-#define JAZZ_FLOPPY_DMA         1              /* FLOPPY */
-#define JAZZ_AUDIOL_DMA         2              /* AUDIO L */
-#define JAZZ_AUDIOR_DMA         3              /* AUDIO R */
+#define JAZZ_SCSI_DMA          0              /* SCSI */
+#define JAZZ_FLOPPY_DMA                1              /* FLOPPY */
+#define JAZZ_AUDIOL_DMA                2              /* AUDIO L */
+#define JAZZ_AUDIOR_DMA                3              /* AUDIO R */
 
 /*
  * JAZZ R4030 MCT_ADR chip (DMA controller)
  * Note: Virtual Addresses !
  */
 #define JAZZ_R4030_CONFIG      0xE0000000      /* R4030 config register */
-#define JAZZ_R4030_REVISION     0xE0000008     /* same as PICA_ASIC_REVISION */
+#define JAZZ_R4030_REVISION    0xE0000008      /* same as PICA_ASIC_REVISION */
 #define JAZZ_R4030_INV_ADDR    0xE0000010      /* Invalid Address register */
 
-#define JAZZ_R4030_TRSTBL_BASE  0xE0000018     /* Translation Table Base */
-#define JAZZ_R4030_TRSTBL_LIM   0xE0000020     /* Translation Table Limit */
-#define JAZZ_R4030_TRSTBL_INV   0xE0000028     /* Translation Table Invalidate */
+#define JAZZ_R4030_TRSTBL_BASE 0xE0000018      /* Translation Table Base */
+#define JAZZ_R4030_TRSTBL_LIM  0xE0000020      /* Translation Table Limit */
+#define JAZZ_R4030_TRSTBL_INV  0xE0000028      /* Translation Table Invalidate */
 
-#define JAZZ_R4030_CACHE_MTNC   0xE0000030     /* Cache Maintenance */
-#define JAZZ_R4030_R_FAIL_ADDR  0xE0000038     /* Remote Failed Address */
-#define JAZZ_R4030_M_FAIL_ADDR  0xE0000040     /* Memory Failed Address */
+#define JAZZ_R4030_CACHE_MTNC  0xE0000030      /* Cache Maintenance */
+#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038      /* Remote Failed Address */
+#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040      /* Memory Failed Address */
 
-#define JAZZ_R4030_CACHE_PTAG   0xE0000048     /* I/O Cache Physical Tag */
-#define JAZZ_R4030_CACHE_LTAG   0xE0000050     /* I/O Cache Logical Tag */
-#define JAZZ_R4030_CACHE_BMASK  0xE0000058     /* I/O Cache Byte Mask */
-#define JAZZ_R4030_CACHE_BWIN   0xE0000060     /* I/O Cache Buffer Window */
+#define JAZZ_R4030_CACHE_PTAG  0xE0000048      /* I/O Cache Physical Tag */
+#define JAZZ_R4030_CACHE_LTAG  0xE0000050      /* I/O Cache Logical Tag */
+#define JAZZ_R4030_CACHE_BMASK 0xE0000058      /* I/O Cache Byte Mask */
+#define JAZZ_R4030_CACHE_BWIN  0xE0000060      /* I/O Cache Buffer Window */
 
 /*
  * Remote Speed Registers.
  *
- *  0: free,      1: Ethernet,  2: SCSI,      3: Floppy,
- *  4: RTC,       5: Kb./Mouse  6: serial 1,  7: serial 2,
- *  8: parallel,  9: NVRAM,    10: CPU,      11: PROM,
+ *  0: free,     1: Ethernet,  2: SCSI,      3: Floppy,
+ *  4: RTC,      5: Kb./Mouse  6: serial 1,  7: serial 2,
+ *  8: parallel,  9: NVRAM,    10: CPU,             11: PROM,
  * 12: reserved, 13: free,     14: 7seg LED, 15: ???
  */
 #define JAZZ_R4030_REM_SPEED   0xE0000070      /* 16 Remote Speed Registers */
                                                /* 0xE0000070,78,80... 0xE00000E8 */
-#define JAZZ_R4030_IRQ_ENABLE   0xE00000E8     /* Internal Interrupt Enable */
-#define JAZZ_R4030_INVAL_ADDR   0xE0000010     /* Invalid address Register */
-#define JAZZ_R4030_IRQ_SOURCE   0xE0000200     /* Interrupt Source Register */
-#define JAZZ_R4030_I386_ERROR   0xE0000208     /* i386/EISA Bus Error */
+#define JAZZ_R4030_IRQ_ENABLE  0xE00000E8      /* Internal Interrupt Enable */
+#define JAZZ_R4030_INVAL_ADDR  0xE0000010      /* Invalid address Register */
+#define JAZZ_R4030_IRQ_SOURCE  0xE0000200      /* Interrupt Source Register */
+#define JAZZ_R4030_I386_ERROR  0xE0000208      /* i386/EISA Bus Error */
 
 /*
  * Virtual (E)ISA controller address
index 8bb37bb..2cefc3c 100644 (file)
@@ -10,7 +10,7 @@
 extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
 extern int vdma_free(unsigned long laddr);
 extern int vdma_remap(unsigned long laddr, unsigned long paddr,
-                      unsigned long size);
+                     unsigned long size);
 extern unsigned long vdma_phys2log(unsigned long paddr);
 extern unsigned long vdma_log2phys(unsigned long laddr);
 extern void vdma_stats(void);          /* for debugging only */
@@ -35,14 +35,14 @@ extern int vdma_get_enable(int channel);
  * Macros to get page no. and offset of a given address
  * Note that VDMA_PAGE() works for physical addresses only
  */
-#define VDMA_PAGE(a)            ((unsigned int)(a) >> 12)
-#define VDMA_OFFSET(a)          ((unsigned int)(a) & (VDMA_PAGESIZE-1))
+#define VDMA_PAGE(a)           ((unsigned int)(a) >> 12)
+#define VDMA_OFFSET(a)         ((unsigned int)(a) & (VDMA_PAGESIZE-1))
 
 /*
  * error code returned by vdma_alloc()
  * (See also arch/mips/kernel/jazzdma.c)
  */
-#define VDMA_ERROR              0xffffffff
+#define VDMA_ERROR             0xffffffff
 
 /*
  * VDMA pagetable entry description
@@ -59,37 +59,37 @@ typedef volatile struct VDMA_PGTBL_ENTRY {
  */
 #define JAZZ_R4030_CHNL_MODE   0xE0000100      /* 8 DMA Channel Mode Registers, */
                                                /* 0xE0000100,120,140... */
-#define JAZZ_R4030_CHNL_ENABLE  0xE0000108     /* 8 DMA Channel Enable Regs, */
+#define JAZZ_R4030_CHNL_ENABLE 0xE0000108      /* 8 DMA Channel Enable Regs, */
                                                /* 0xE0000108,128,148... */
-#define JAZZ_R4030_CHNL_COUNT   0xE0000110     /* 8 DMA Channel Byte Cnt Regs, */
+#define JAZZ_R4030_CHNL_COUNT  0xE0000110      /* 8 DMA Channel Byte Cnt Regs, */
                                                /* 0xE0000110,130,150... */
 #define JAZZ_R4030_CHNL_ADDR   0xE0000118      /* 8 DMA Channel Address Regs, */
                                                /* 0xE0000118,138,158... */
 
 /* channel enable register bits */
 
-#define R4030_CHNL_ENABLE        (1<<0)
-#define R4030_CHNL_WRITE         (1<<1)
-#define R4030_TC_INTR            (1<<8)
-#define R4030_MEM_INTR           (1<<9)
-#define R4030_ADDR_INTR          (1<<10)
+#define R4030_CHNL_ENABLE       (1<<0)
+#define R4030_CHNL_WRITE        (1<<1)
+#define R4030_TC_INTR           (1<<8)
+#define R4030_MEM_INTR          (1<<9)
+#define R4030_ADDR_INTR                 (1<<10)
 
 /*
  * Channel mode register bits
  */
-#define R4030_MODE_ATIME_40      (0) /* device access time on remote bus */
-#define R4030_MODE_ATIME_80      (1)
-#define R4030_MODE_ATIME_120     (2)
-#define R4030_MODE_ATIME_160     (3)
-#define R4030_MODE_ATIME_200     (4)
-#define R4030_MODE_ATIME_240     (5)
-#define R4030_MODE_ATIME_280     (6)
-#define R4030_MODE_ATIME_320     (7)
-#define R4030_MODE_WIDTH_8       (1<<3)        /* device data bus width */
-#define R4030_MODE_WIDTH_16      (2<<3)
-#define R4030_MODE_WIDTH_32      (3<<3)
-#define R4030_MODE_INTR_EN       (1<<5)
-#define R4030_MODE_BURST         (1<<6)        /* Rev. 2 only */
-#define R4030_MODE_FAST_ACK      (1<<7)        /* Rev. 2 only */
+#define R4030_MODE_ATIME_40     (0) /* device access time on remote bus */
+#define R4030_MODE_ATIME_80     (1)
+#define R4030_MODE_ATIME_120    (2)
+#define R4030_MODE_ATIME_160    (3)
+#define R4030_MODE_ATIME_200    (4)
+#define R4030_MODE_ATIME_240    (5)
+#define R4030_MODE_ATIME_280    (6)
+#define R4030_MODE_ATIME_320    (7)
+#define R4030_MODE_WIDTH_8      (1<<3) /* device data bus width */
+#define R4030_MODE_WIDTH_16     (2<<3)
+#define R4030_MODE_WIDTH_32     (3<<3)
+#define R4030_MODE_INTR_EN      (1<<5)
+#define R4030_MODE_BURST        (1<<6) /* Rev. 2 only */
+#define R4030_MODE_FAST_ACK     (1<<7) /* Rev. 2 only */
 
 #endif /* _ASM_JAZZDMA_H */
index 58e91ed..c1909dc 100644 (file)
@@ -2,7 +2,7 @@
 #define _ASM_KMAP_TYPES_H
 
 #ifdef CONFIG_DEBUG_HIGHMEM
-#define  __WITH_KM_FENCE
+#define         __WITH_KM_FENCE
 #endif
 
 #include <asm-generic/kmap_types.h>
index 1fbbca0..daba1f9 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/kdebug.h>
 #include <asm/inst.h>
 
-#define  __ARCH_WANT_KPROBES_INSN_SLOT
+#define         __ARCH_WANT_KPROBES_INSN_SLOT
 
 struct kprobe;
 struct pt_regs;
index 3dac203..d918b82 100644 (file)
@@ -1,12 +1,12 @@
 #include <asm/addrspace.h>
 
 /* lasat 100 */
-#define AT93C_REG_100               KSEG1ADDR(0x1c810000)
-#define AT93C_RDATA_REG_100         AT93C_REG_100
-#define AT93C_RDATA_SHIFT_100       4
-#define AT93C_WDATA_SHIFT_100       4
-#define AT93C_CS_M_100              (1 << 5)
-#define AT93C_CLK_M_100             (1 << 3)
+#define AT93C_REG_100              KSEG1ADDR(0x1c810000)
+#define AT93C_RDATA_REG_100        AT93C_REG_100
+#define AT93C_RDATA_SHIFT_100      4
+#define AT93C_WDATA_SHIFT_100      4
+#define AT93C_CS_M_100             (1 << 5)
+#define AT93C_CLK_M_100                    (1 << 3)
 
 /* lasat 200 */
 #define AT93C_REG_200          KSEG1ADDR(0x11000000)
index e8ff70f..9e32b4d 100644 (file)
@@ -100,7 +100,7 @@ struct lasat_eeprom_struct_pre7 {
 
 /* Configuration descriptor encoding - see the doc for details */
 
-#define LASAT_W0_DSCTYPE(v)            (((v))         & 0xf)
+#define LASAT_W0_DSCTYPE(v)            (((v))         & 0xf)
 #define LASAT_W0_BMID(v)               (((v) >> 0x04) & 0xf)
 #define LASAT_W0_CPUTYPE(v)            (((v) >> 0x08) & 0xf)
 #define LASAT_W0_BUSSPEED(v)           (((v) >> 0x0c) & 0xf)
@@ -109,7 +109,7 @@ struct lasat_eeprom_struct_pre7 {
 #define LASAT_W0_SDRAMBANKS(v)         (((v) >> 0x18) & 0xf)
 #define LASAT_W0_L2CACHE(v)            (((v) >> 0x1c) & 0xf)
 
-#define LASAT_W1_EDHAC(v)              (((v))         & 0xf)
+#define LASAT_W1_EDHAC(v)              (((v))         & 0xf)
 #define LASAT_W1_HIFN(v)               (((v) >> 0x04) & 0x1)
 #define LASAT_W1_ISDN(v)               (((v) >> 0x05) & 0x1)
 #define LASAT_W1_IDE(v)                        (((v) >> 0x06) & 0x1)
@@ -239,7 +239,7 @@ static inline void lasat_ndelay(unsigned int ns)
        __delay(ns / lasat_ndelay_divider);
 }
 
-#define IS_LASAT_200()     (current_cpu_data.cputype == CPU_R5000)
+#define IS_LASAT_200()    (current_cpu_data.cputype == CPU_R5000)
 
 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
 
@@ -247,11 +247,11 @@ static inline void lasat_ndelay(unsigned int ns)
 #define LASAT_SERVICEMODE_MAGIC_2     0xfedeabba
 
 /* Lasat 100 boards */
-#define LASAT_GT_BASE           (KSEG1ADDR(0x14000000))
+#define LASAT_GT_BASE          (KSEG1ADDR(0x14000000))
 
 /* Lasat 200 boards */
-#define Vrc5074_PHYS_BASE       0x1fa00000
-#define Vrc5074_BASE            (KSEG1ADDR(Vrc5074_PHYS_BASE))
-#define PCI_WINDOW1             0x1a000000
+#define Vrc5074_PHYS_BASE      0x1fa00000
+#define Vrc5074_BASE           (KSEG1ADDR(Vrc5074_PHYS_BASE))
+#define PCI_WINDOW1            0x1a000000
 
 #endif /* _LASAT_H */
index 1c37d70..a2f6c7a 100644 (file)
@@ -1,7 +1,7 @@
 #include <asm/lasat/lasat.h>
 
 /* Lasat 100 boards serial configuration */
-#define LASAT_BASE_BAUD_100            (7372800 / 16)
+#define LASAT_BASE_BAUD_100            (7372800 / 16)
 #define LASAT_UART_REGS_BASE_100       0x1c8b0000
 #define LASAT_UART_REGS_SHIFT_100      2
 #define LASATINT_UART_100              16
index 94fde8d..d44622c 100644 (file)
@@ -15,10 +15,10 @@ typedef struct
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
-#define local_set(l, i)        atomic_long_set(&(l)->a, (i))
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
 
-#define local_add(i, l)        atomic_long_add((i), (&(l)->a))
-#define local_sub(i, l)        atomic_long_sub((i), (&(l)->a))
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
 #define local_inc(l)   atomic_long_inc(&(l)->a)
 #define local_dec(l)   atomic_long_dec(&(l)->a)
 
index cabf862..e6eaf53 100644 (file)
@@ -9,7 +9,7 @@
 extern spinlock_t rtc_lock;
 
 struct m48t37_rtc {
-       volatile u8     pad[0x7ff0];    /* NVRAM */
+       volatile u8     pad[0x7ff0];    /* NVRAM */
        volatile u8     flags;
        volatile u8     century;
        volatile u8     alarm_sec;
index 07d3fad..a47ea0c 100644 (file)
@@ -40,9 +40,9 @@
 #define AR7_REGS_USB   (AR7_REGS_BASE + 0x1200)
 #define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
 #define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
-#define AR7_REGS_VLYNQ0        (AR7_REGS_BASE + 0x1800)
+#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
 #define AR7_REGS_DCL   (AR7_REGS_BASE + 0x1a00)
-#define AR7_REGS_VLYNQ1        (AR7_REGS_BASE + 0x1c00)
+#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
 #define AR7_REGS_MDIO  (AR7_REGS_BASE + 0x1e00)
 #define AR7_REGS_IRQ   (AR7_REGS_BASE + 0x2400)
 #define AR7_REGS_MAC1  (AR7_REGS_BASE + 0x2800)
@@ -52,7 +52,7 @@
 #define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
 
 /* Titan registers */
-#define TITAN_REGS_ESWITCH_BASE        (0x08640000)
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
 #define TITAN_REGS_MAC0                (TITAN_REGS_ESWITCH_BASE)
 #define TITAN_REGS_MAC1                (TITAN_REGS_ESWITCH_BASE + 0x0800)
 #define TITAN_REGS_MDIO                (TITAN_REGS_ESWITCH_BASE + 0x02000)
@@ -72,9 +72,9 @@
 
 /* GPIO control registers */
 #define AR7_GPIO_INPUT 0x0
-#define AR7_GPIO_OUTPUT        0x4
+#define AR7_GPIO_OUTPUT 0x4
 #define AR7_GPIO_DIR   0x8
-#define AR7_GPIO_ENABLE        0xc
+#define AR7_GPIO_ENABLE 0xc
 #define TITAN_GPIO_INPUT_0     0x0
 #define TITAN_GPIO_INPUT_1     0x4
 #define TITAN_GPIO_OUTPUT_0    0x8
 #define AR7_CHIP_7200  0x2b
 #define AR7_CHIP_7300  0x05
 #define AR7_CHIP_TITAN 0x07
-#define TITAN_CHIP_1050        0x0f
-#define TITAN_CHIP_1055        0x0e
-#define TITAN_CHIP_1056        0x0d
-#define TITAN_CHIP_1060        0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
 
 /* Interrupts */
 #define AR7_IRQ_UART0  15
index 39e9757..7ad10e3 100644 (file)
@@ -9,7 +9,7 @@
 #ifndef __ASM_AR7_IRQ_H
 #define __ASM_AR7_IRQ_H
 
-#define NR_IRQS        256
+#define NR_IRQS 256
 
 #include_next <irq.h>
 
index a5e0f17..b86a125 100644 (file)
 #define AR71XX_UART_SIZE       0x100
 #define AR71XX_USB_CTRL_BASE   (AR71XX_APB_BASE + 0x00030000)
 #define AR71XX_USB_CTRL_SIZE   0x100
-#define AR71XX_GPIO_BASE        (AR71XX_APB_BASE + 0x00040000)
-#define AR71XX_GPIO_SIZE        0x100
+#define AR71XX_GPIO_BASE       (AR71XX_APB_BASE + 0x00040000)
+#define AR71XX_GPIO_SIZE       0x100
 #define AR71XX_PLL_BASE                (AR71XX_APB_BASE + 0x00050000)
 #define AR71XX_PLL_SIZE                0x100
 #define AR71XX_RESET_BASE      (AR71XX_APB_BASE + 0x00060000)
 #define AR71XX_RESET_SIZE      0x100
 
+#define AR71XX_PCI_MEM_BASE    0x10000000
+#define AR71XX_PCI_MEM_SIZE    0x07000000
+
+#define AR71XX_PCI_WIN0_OFFS   0x10000000
+#define AR71XX_PCI_WIN1_OFFS   0x11000000
+#define AR71XX_PCI_WIN2_OFFS   0x12000000
+#define AR71XX_PCI_WIN3_OFFS   0x13000000
+#define AR71XX_PCI_WIN4_OFFS   0x14000000
+#define AR71XX_PCI_WIN5_OFFS   0x15000000
+#define AR71XX_PCI_WIN6_OFFS   0x16000000
+#define AR71XX_PCI_WIN7_OFFS   0x07000000
+
+#define AR71XX_PCI_CFG_BASE    \
+       (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE    0x100
+
 #define AR7240_USB_CTRL_BASE   (AR71XX_APB_BASE + 0x00030000)
 #define AR7240_USB_CTRL_SIZE   0x100
 #define AR7240_OHCI_BASE       0x1b000000
 #define AR7240_OHCI_SIZE       0x1000
 
+#define AR724X_PCI_MEM_BASE    0x10000000
+#define AR724X_PCI_MEM_SIZE    0x04000000
+
+#define AR724X_PCI_CFG_BASE    0x14000000
+#define AR724X_PCI_CFG_SIZE    0x1000
+#define AR724X_PCI_CRP_BASE    (AR71XX_APB_BASE + 0x000c0000)
+#define AR724X_PCI_CRP_SIZE    0x1000
+#define AR724X_PCI_CTRL_BASE   (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE   0x100
+
 #define AR724X_EHCI_BASE       0x1b000000
 #define AR724X_EHCI_SIZE       0x1000
 
 #define AR934X_SRIF_BASE       (AR71XX_APB_BASE + 0x00116000)
 #define AR934X_SRIF_SIZE       0x1000
 
+#define QCA955X_PCI_MEM_BASE0  0x10000000
+#define QCA955X_PCI_MEM_BASE1  0x12000000
+#define QCA955X_PCI_MEM_SIZE   0x02000000
+#define QCA955X_PCI_CFG_BASE0  0x14000000
+#define QCA955X_PCI_CFG_BASE1  0x16000000
+#define QCA955X_PCI_CFG_SIZE   0x1000
+#define QCA955X_PCI_CRP_BASE0  (AR71XX_APB_BASE + 0x000c0000)
+#define QCA955X_PCI_CRP_BASE1  (AR71XX_APB_BASE + 0x00250000)
+#define QCA955X_PCI_CRP_SIZE   0x1000
+#define QCA955X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA955X_PCI_CTRL_SIZE  0x100
+
+#define QCA955X_WMAC_BASE      (AR71XX_APB_BASE + 0x00100000)
+#define QCA955X_WMAC_SIZE      0x20000
+#define QCA955X_EHCI0_BASE     0x1b000000
+#define QCA955X_EHCI1_BASE     0x1b400000
+#define QCA955X_EHCI_SIZE      0x1000
+
 /*
  * DDR_CTRL block
  */
 #define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
 #define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
 
+#define QCA955X_PLL_CPU_CONFIG_REG             0x00
+#define QCA955X_PLL_DDR_CONFIG_REG             0x04
+#define QCA955X_PLL_CLK_CTRL_REG               0x08
+
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT     0
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK      0x3f
+#define QCA955X_PLL_CPU_CONFIG_NINT_SHIFT      6
+#define QCA955X_PLL_CPU_CONFIG_NINT_MASK       0x3f
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT    12
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_MASK     0x1f
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT    19
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK     0x3
+
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT     0
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_MASK      0x3ff
+#define QCA955X_PLL_DDR_CONFIG_NINT_SHIFT      10
+#define QCA955X_PLL_DDR_CONFIG_NINT_MASK       0x3f
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT    16
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_MASK     0x1f
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT    23
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK     0x7
+
+#define QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS            BIT(2)
+#define QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS            BIT(3)
+#define QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS            BIT(4)
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT                5
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK         0x1f
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT                10
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK         0x1f
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT                15
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK         0x1f
+#define QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL                BIT(20)
+#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL                BIT(21)
+#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL                BIT(24)
+
 /*
  * USB_CONFIG block
  */
 #define AR934X_RESET_REG_BOOTSTRAP             0xb0
 #define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS  0xac
 
+#define QCA955X_RESET_REG_RESET_MODULE         0x1c
+#define QCA955X_RESET_REG_BOOTSTRAP            0xb0
+#define QCA955X_RESET_REG_EXT_INT_STATUS       0xac
+
 #define MISC_INT_ETHSW                 BIT(12)
 #define MISC_INT_TIMER4                        BIT(10)
 #define MISC_INT_TIMER3                        BIT(9)
 #define AR934X_BOOTSTRAP_EJTAG_MODE    BIT(5)
 #define AR934X_BOOTSTRAP_REF_CLK_40    BIT(4)
 #define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
-#define AR934X_BOOTSTRAP_SDRAM_DISABLED        BIT(1)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
 #define AR934X_BOOTSTRAP_DDR1          BIT(0)
 
+#define QCA955X_BOOTSTRAP_REF_CLK_40   BIT(4)
+
 #define AR934X_PCIE_WMAC_INT_WMAC_MISC         BIT(0)
 #define AR934X_PCIE_WMAC_INT_WMAC_TX           BIT(1)
 #define AR934X_PCIE_WMAC_INT_WMAC_RXLP         BIT(2)
         AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
         AR934X_PCIE_WMAC_INT_PCIE_RC3)
 
+#define QCA955X_EXT_INT_WMAC_MISC              BIT(0)
+#define QCA955X_EXT_INT_WMAC_TX                        BIT(1)
+#define QCA955X_EXT_INT_WMAC_RXLP              BIT(2)
+#define QCA955X_EXT_INT_WMAC_RXHP              BIT(3)
+#define QCA955X_EXT_INT_PCIE_RC1               BIT(4)
+#define QCA955X_EXT_INT_PCIE_RC1_INT0          BIT(5)
+#define QCA955X_EXT_INT_PCIE_RC1_INT1          BIT(6)
+#define QCA955X_EXT_INT_PCIE_RC1_INT2          BIT(7)
+#define QCA955X_EXT_INT_PCIE_RC1_INT3          BIT(8)
+#define QCA955X_EXT_INT_PCIE_RC2               BIT(12)
+#define QCA955X_EXT_INT_PCIE_RC2_INT0          BIT(13)
+#define QCA955X_EXT_INT_PCIE_RC2_INT1          BIT(14)
+#define QCA955X_EXT_INT_PCIE_RC2_INT2          BIT(15)
+#define QCA955X_EXT_INT_PCIE_RC2_INT3          BIT(16)
+#define QCA955X_EXT_INT_USB1                   BIT(24)
+#define QCA955X_EXT_INT_USB2                   BIT(28)
+
+#define QCA955X_EXT_INT_WMAC_ALL \
+       (QCA955X_EXT_INT_WMAC_MISC | QCA955X_EXT_INT_WMAC_TX | \
+        QCA955X_EXT_INT_WMAC_RXLP | QCA955X_EXT_INT_WMAC_RXHP)
+
+#define QCA955X_EXT_INT_PCIE_RC1_ALL \
+       (QCA955X_EXT_INT_PCIE_RC1 | QCA955X_EXT_INT_PCIE_RC1_INT0 | \
+        QCA955X_EXT_INT_PCIE_RC1_INT1 | QCA955X_EXT_INT_PCIE_RC1_INT2 | \
+        QCA955X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA955X_EXT_INT_PCIE_RC2_ALL \
+       (QCA955X_EXT_INT_PCIE_RC2 | QCA955X_EXT_INT_PCIE_RC2_INT0 | \
+        QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
+        QCA955X_EXT_INT_PCIE_RC2_INT3)
+
 #define REV_ID_MAJOR_MASK              0xfff0
 #define REV_ID_MAJOR_AR71XX            0x00a0
 #define REV_ID_MAJOR_AR913X            0x00b0
 #define REV_ID_MAJOR_AR9341            0x0120
 #define REV_ID_MAJOR_AR9342            0x1120
 #define REV_ID_MAJOR_AR9344            0x2120
+#define REV_ID_MAJOR_QCA9556           0x0130
+#define REV_ID_MAJOR_QCA9558           0x1130
 
 #define AR71XX_REV_ID_MINOR_MASK       0x3
 #define AR71XX_REV_ID_MINOR_AR7130     0x0
 
 #define AR724X_REV_ID_REVISION_MASK    0x3
 
-#define AR934X_REV_ID_REVISION_MASK     0xf
+#define AR934X_REV_ID_REVISION_MASK    0xf
+
+#define QCA955X_REV_ID_REVISION_MASK   0xf
 
 /*
  * SPI block
 #define AR71XX_GPIO_REG_INT_ENABLE     0x24
 #define AR71XX_GPIO_REG_FUNC           0x28
 
+#define AR934X_GPIO_REG_FUNC           0x6c
+
 #define AR71XX_GPIO_COUNT              16
 #define AR7240_GPIO_COUNT              18
 #define AR7241_GPIO_COUNT              20
 #define AR913X_GPIO_COUNT              22
 #define AR933X_GPIO_COUNT              30
 #define AR934X_GPIO_COUNT              23
+#define QCA955X_GPIO_COUNT             24
 
 /*
  * SRIF block
index 5273055..c2917b3 100644 (file)
 
 #define AR933X_UART_CS_PARITY_S                0
 #define AR933X_UART_CS_PARITY_M                0x3
-#define   AR933X_UART_CS_PARITY_NONE   0
-#define   AR933X_UART_CS_PARITY_ODD    1
-#define   AR933X_UART_CS_PARITY_EVEN   2
+#define          AR933X_UART_CS_PARITY_NONE    0
+#define          AR933X_UART_CS_PARITY_ODD     1
+#define          AR933X_UART_CS_PARITY_EVEN    2
 #define AR933X_UART_CS_IF_MODE_S       2
 #define AR933X_UART_CS_IF_MODE_M       0x3
-#define   AR933X_UART_CS_IF_MODE_NONE  0
-#define   AR933X_UART_CS_IF_MODE_DTE   1
-#define   AR933X_UART_CS_IF_MODE_DCE   2
+#define          AR933X_UART_CS_IF_MODE_NONE   0
+#define          AR933X_UART_CS_IF_MODE_DTE    1
+#define          AR933X_UART_CS_IF_MODE_DCE    2
 #define AR933X_UART_CS_FLOW_CTRL_S     4
 #define AR933X_UART_CS_FLOW_CTRL_M     0x3
 #define AR933X_UART_CS_DMA_EN          BIT(6)
index 4f248c3..1557934 100644 (file)
@@ -32,6 +32,8 @@ enum ath79_soc_type {
        ATH79_SOC_AR9341,
        ATH79_SOC_AR9342,
        ATH79_SOC_AR9344,
+       ATH79_SOC_QCA9556,
+       ATH79_SOC_QCA9558,
 };
 
 extern enum ath79_soc_type ath79_soc;
@@ -98,6 +100,21 @@ static inline int soc_is_ar934x(void)
        return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
 }
 
+static inline int soc_is_qca9556(void)
+{
+       return ath79_soc == ATH79_SOC_QCA9556;
+}
+
+static inline int soc_is_qca9558(void)
+{
+       return ath79_soc == ATH79_SOC_QCA9558;
+}
+
+static inline int soc_is_qca955x(void)
+{
+       return soc_is_qca9556() || soc_is_qca9558();
+}
+
 extern void __iomem *ath79_ddr_base;
 extern void __iomem *ath79_pll_base;
 extern void __iomem *ath79_reset_base;
index ea4b66d..ddb947e 100644 (file)
@@ -49,7 +49,7 @@
 #define cpu_has_64bits         0
 #define cpu_has_64bit_zero_reg 0
 #define cpu_has_64bit_gp_regs  0
-#define cpu_has_64bit_addresses        0
+#define cpu_has_64bit_addresses 0
 
 #define cpu_dcache_line_size() 32
 #define cpu_icache_line_size() 32
index 0968f69..5c9ca76 100644 (file)
 #define __ASM_MACH_ATH79_IRQ_H
 
 #define MIPS_CPU_IRQ_BASE      0
-#define NR_IRQS                        48
+#define NR_IRQS                        51
+
+#define ATH79_CPU_IRQ(_x)      (MIPS_CPU_IRQ_BASE + (_x))
 
 #define ATH79_MISC_IRQ_BASE    8
 #define ATH79_MISC_IRQ_COUNT   32
+#define ATH79_MISC_IRQ(_x)     (ATH79_MISC_IRQ_BASE + (_x))
 
 #define ATH79_PCI_IRQ_BASE     (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
 #define ATH79_PCI_IRQ_COUNT    6
 #define ATH79_IP2_IRQ_COUNT    2
 #define ATH79_IP2_IRQ(_x)      (ATH79_IP2_IRQ_BASE + (_x))
 
-#define ATH79_CPU_IRQ_IP2      (MIPS_CPU_IRQ_BASE + 2)
-#define ATH79_CPU_IRQ_USB      (MIPS_CPU_IRQ_BASE + 3)
-#define ATH79_CPU_IRQ_GE0      (MIPS_CPU_IRQ_BASE + 4)
-#define ATH79_CPU_IRQ_GE1      (MIPS_CPU_IRQ_BASE + 5)
-#define ATH79_CPU_IRQ_MISC     (MIPS_CPU_IRQ_BASE + 6)
-#define ATH79_CPU_IRQ_TIMER    (MIPS_CPU_IRQ_BASE + 7)
-
-#define ATH79_MISC_IRQ_TIMER   (ATH79_MISC_IRQ_BASE + 0)
-#define ATH79_MISC_IRQ_ERROR   (ATH79_MISC_IRQ_BASE + 1)
-#define ATH79_MISC_IRQ_GPIO    (ATH79_MISC_IRQ_BASE + 2)
-#define ATH79_MISC_IRQ_UART    (ATH79_MISC_IRQ_BASE + 3)
-#define ATH79_MISC_IRQ_WDOG    (ATH79_MISC_IRQ_BASE + 4)
-#define ATH79_MISC_IRQ_PERFC   (ATH79_MISC_IRQ_BASE + 5)
-#define ATH79_MISC_IRQ_OHCI    (ATH79_MISC_IRQ_BASE + 6)
-#define ATH79_MISC_IRQ_DMA     (ATH79_MISC_IRQ_BASE + 7)
-#define ATH79_MISC_IRQ_TIMER2  (ATH79_MISC_IRQ_BASE + 8)
-#define ATH79_MISC_IRQ_TIMER3  (ATH79_MISC_IRQ_BASE + 9)
-#define ATH79_MISC_IRQ_TIMER4  (ATH79_MISC_IRQ_BASE + 10)
-#define ATH79_MISC_IRQ_ETHSW   (ATH79_MISC_IRQ_BASE + 12)
+#define ATH79_IP3_IRQ_BASE     (ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT)
+#define ATH79_IP3_IRQ_COUNT     3
+#define ATH79_IP3_IRQ(_x)       (ATH79_IP3_IRQ_BASE + (_x))
 
 #include_next <irq.h>
 
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
deleted file mode 100644 (file)
index 7868f7f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X PCI support
- *
- *  Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_H
-#define __ASM_MACH_ATH79_PCI_H
-
-#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
-int ar71xx_pcibios_init(void);
-#else
-static inline int ar71xx_pcibios_init(void) { return 0; }
-#endif
-
-#if defined(CONFIG_PCI_AR724X)
-int ar724x_pcibios_init(int irq);
-#else
-static inline int ar724x_pcibios_init(int irq) { return 0; }
-#endif
-
-#endif /* __ASM_MACH_ATH79_PCI_H */
index 569828d..3e11a46 100644 (file)
@@ -349,7 +349,7 @@ extern void au1300_vss_block_control(int block, int enable);
 #define AU1000_INTC0_INT_LAST  (AU1000_INTC0_INT_BASE + 31)
 #define AU1000_INTC1_INT_BASE  (AU1000_INTC0_INT_LAST + 1)
 #define AU1000_INTC1_INT_LAST  (AU1000_INTC1_INT_BASE + 31)
-#define AU1000_MAX_INTR        AU1000_INTC1_INT_LAST
+#define AU1000_MAX_INTR                AU1000_INTC1_INT_LAST
 
 /* Au1300-style (GPIC): 1 controller with up to 128 sources */
 #define ALCHEMY_GPIC_INT_BASE  (MIPS_CPU_IRQ_BASE + 8)
@@ -589,7 +589,7 @@ enum soc_au1550_ints {
        AU1550_GPIO14_INT,
        AU1550_GPIO15_INT,
        AU1550_GPIO200_INT,
-       AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
+       AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
        AU1550_GPIO16_INT,
        AU1550_GPIO17_INT,
        AU1550_GPIO20_INT,
@@ -603,7 +603,7 @@ enum soc_au1550_ints {
        AU1550_GPIO28_INT,
        AU1550_GPIO206_INT,
        AU1550_GPIO207_INT,
-       AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
+       AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
 };
 
 enum soc_au1200_ints {
@@ -636,7 +636,7 @@ enum soc_au1200_ints {
        AU1200_GPIO205_INT,
        AU1200_GPIO206_INT,
        AU1200_GPIO207_INT,
-       AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
+       AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
        AU1200_USB_INT,
        AU1200_LCD_INT,
        AU1200_MAE_BOTH_INT,
@@ -823,7 +823,7 @@ enum soc_au1200_ints {
 #define GPIC_GPIO_TO_BIT(gpio) \
        (1 << ((gpio) & 0x1f))
 
-#define GPIC_GPIO_BANKOFF(gpio)        \
+#define GPIC_GPIO_BANKOFF(gpio) \
        (((gpio) >> 5) * 4)
 
 /* Pin Control bits: who owns the pin, what does it do */
@@ -958,32 +958,32 @@ enum soc_au1200_ints {
 #define MEM_STSTAT             0xB4001104
 
 #define MEM_STNAND_CMD         0x0
-#define MEM_STNAND_ADDR        0x4
-#define MEM_STNAND_DATA        0x20
+#define MEM_STNAND_ADDR                0x4
+#define MEM_STNAND_DATA                0x20
 
 
 /* Programmable Counters 0 and 1 */
 #define SYS_BASE               0xB1900000
 #define SYS_COUNTER_CNTRL      (SYS_BASE + 0x14)
-#  define SYS_CNTRL_E1S        (1 << 23)
-#  define SYS_CNTRL_T1S        (1 << 20)
-#  define SYS_CNTRL_M21        (1 << 19)
-#  define SYS_CNTRL_M11        (1 << 18)
-#  define SYS_CNTRL_M01        (1 << 17)
-#  define SYS_CNTRL_C1S        (1 << 16)
+#  define SYS_CNTRL_E1S                (1 << 23)
+#  define SYS_CNTRL_T1S                (1 << 20)
+#  define SYS_CNTRL_M21                (1 << 19)
+#  define SYS_CNTRL_M11                (1 << 18)
+#  define SYS_CNTRL_M01                (1 << 17)
+#  define SYS_CNTRL_C1S                (1 << 16)
 #  define SYS_CNTRL_BP         (1 << 14)
-#  define SYS_CNTRL_EN1        (1 << 13)
-#  define SYS_CNTRL_BT1        (1 << 12)
-#  define SYS_CNTRL_EN0        (1 << 11)
-#  define SYS_CNTRL_BT0        (1 << 10)
+#  define SYS_CNTRL_EN1                (1 << 13)
+#  define SYS_CNTRL_BT1                (1 << 12)
+#  define SYS_CNTRL_EN0                (1 << 11)
+#  define SYS_CNTRL_BT0                (1 << 10)
 #  define SYS_CNTRL_E0         (1 << 8)
-#  define SYS_CNTRL_E0S        (1 << 7)
-#  define SYS_CNTRL_32S        (1 << 5)
-#  define SYS_CNTRL_T0S        (1 << 4)
-#  define SYS_CNTRL_M20        (1 << 3)
-#  define SYS_CNTRL_M10        (1 << 2)
-#  define SYS_CNTRL_M00        (1 << 1)
-#  define SYS_CNTRL_C0S        (1 << 0)
+#  define SYS_CNTRL_E0S                (1 << 7)
+#  define SYS_CNTRL_32S                (1 << 5)
+#  define SYS_CNTRL_T0S                (1 << 4)
+#  define SYS_CNTRL_M20                (1 << 3)
+#  define SYS_CNTRL_M10                (1 << 2)
+#  define SYS_CNTRL_M00                (1 << 1)
+#  define SYS_CNTRL_C0S                (1 << 0)
 
 /* Programmable Counter 0 Registers */
 #define SYS_TOYTRIM            (SYS_BASE + 0)
@@ -1003,33 +1003,33 @@ enum soc_au1200_ints {
 
 /* I2S Controller */
 #define I2S_DATA               0xB1000000
-#  define I2S_DATA_MASK        0xffffff
+#  define I2S_DATA_MASK                0xffffff
 #define I2S_CONFIG             0xB1000004
-#  define I2S_CONFIG_XU        (1 << 25)
-#  define I2S_CONFIG_XO        (1 << 24)
-#  define I2S_CONFIG_RU        (1 << 23)
-#  define I2S_CONFIG_RO        (1 << 22)
-#  define I2S_CONFIG_TR        (1 << 21)
-#  define I2S_CONFIG_TE        (1 << 20)
-#  define I2S_CONFIG_TF        (1 << 19)
-#  define I2S_CONFIG_RR        (1 << 18)
-#  define I2S_CONFIG_RE        (1 << 17)
-#  define I2S_CONFIG_RF        (1 << 16)
-#  define I2S_CONFIG_PD        (1 << 11)
-#  define I2S_CONFIG_LB        (1 << 10)
-#  define I2S_CONFIG_IC        (1 << 9)
+#  define I2S_CONFIG_XU                (1 << 25)
+#  define I2S_CONFIG_XO                (1 << 24)
+#  define I2S_CONFIG_RU                (1 << 23)
+#  define I2S_CONFIG_RO                (1 << 22)
+#  define I2S_CONFIG_TR                (1 << 21)
+#  define I2S_CONFIG_TE                (1 << 20)
+#  define I2S_CONFIG_TF                (1 << 19)
+#  define I2S_CONFIG_RR                (1 << 18)
+#  define I2S_CONFIG_RE                (1 << 17)
+#  define I2S_CONFIG_RF                (1 << 16)
+#  define I2S_CONFIG_PD                (1 << 11)
+#  define I2S_CONFIG_LB                (1 << 10)
+#  define I2S_CONFIG_IC                (1 << 9)
 #  define I2S_CONFIG_FM_BIT    7
 #  define I2S_CONFIG_FM_MASK   (0x3 << I2S_CONFIG_FM_BIT)
 #    define I2S_CONFIG_FM_I2S  (0x0 << I2S_CONFIG_FM_BIT)
 #    define I2S_CONFIG_FM_LJ   (0x1 << I2S_CONFIG_FM_BIT)
 #    define I2S_CONFIG_FM_RJ   (0x2 << I2S_CONFIG_FM_BIT)
-#  define I2S_CONFIG_TN        (1 << 6)
-#  define I2S_CONFIG_RN        (1 << 5)
+#  define I2S_CONFIG_TN                (1 << 6)
+#  define I2S_CONFIG_RN                (1 << 5)
 #  define I2S_CONFIG_SZ_BIT    0
 #  define I2S_CONFIG_SZ_MASK   (0x1F << I2S_CONFIG_SZ_BIT)
 
 #define I2S_CONTROL            0xB1000008
-#  define I2S_CONTROL_D        (1 << 1)
+#  define I2S_CONTROL_D                (1 << 1)
 #  define I2S_CONTROL_CE       (1 << 0)
 
 
@@ -1037,16 +1037,16 @@ enum soc_au1200_ints {
 
 /* 4 byte offsets from AU1000_ETH_BASE */
 #define MAC_CONTROL            0x0
-#  define MAC_RX_ENABLE        (1 << 2)
-#  define MAC_TX_ENABLE        (1 << 3)
-#  define MAC_DEF_CHECK        (1 << 5)
-#  define MAC_SET_BL(X)        (((X) & 0x3) << 6)
+#  define MAC_RX_ENABLE                (1 << 2)
+#  define MAC_TX_ENABLE                (1 << 3)
+#  define MAC_DEF_CHECK                (1 << 5)
+#  define MAC_SET_BL(X)                (((X) & 0x3) << 6)
 #  define MAC_AUTO_PAD         (1 << 8)
 #  define MAC_DISABLE_RETRY    (1 << 10)
 #  define MAC_DISABLE_BCAST    (1 << 11)
 #  define MAC_LATE_COL         (1 << 12)
-#  define MAC_HASH_MODE        (1 << 13)
-#  define MAC_HASH_ONLY        (1 << 15)
+#  define MAC_HASH_MODE                (1 << 13)
+#  define MAC_HASH_ONLY                (1 << 15)
 #  define MAC_PASS_ALL         (1 << 16)
 #  define MAC_INVERSE_FILTER   (1 << 17)
 #  define MAC_PROMISCUOUS      (1 << 18)
@@ -1083,9 +1083,9 @@ enum soc_au1200_ints {
 #  define MAC_EN_RESET0                (1 << 1)
 #  define MAC_EN_TOSS          (0 << 2)
 #  define MAC_EN_CACHEABLE     (1 << 3)
-#  define MAC_EN_RESET1        (1 << 4)
-#  define MAC_EN_RESET2        (1 << 5)
-#  define MAC_DMA_RESET        (1 << 6)
+#  define MAC_EN_RESET1                (1 << 4)
+#  define MAC_EN_RESET2                (1 << 5)
+#  define MAC_DMA_RESET                (1 << 6)
 
 /* Ethernet Controller DMA Channels */
 
@@ -1095,7 +1095,7 @@ enum soc_au1200_ints {
 #define MAC_TX_BUFF0_STATUS    0x0
 #  define TX_FRAME_ABORTED     (1 << 0)
 #  define TX_JAB_TIMEOUT       (1 << 1)
-#  define TX_NO_CARRIER        (1 << 2)
+#  define TX_NO_CARRIER                (1 << 2)
 #  define TX_LOSS_CARRIER      (1 << 3)
 #  define TX_EXC_DEF           (1 << 4)
 #  define TX_LATE_COLL_ABORT   (1 << 5)
@@ -1106,7 +1106,7 @@ enum soc_au1200_ints {
 #  define TX_COLL_CNT_MASK     (0xF << 10)
 #  define TX_PKT_RETRY         (1 << 31)
 #define MAC_TX_BUFF0_ADDR      0x4
-#  define TX_DMA_ENABLE        (1 << 0)
+#  define TX_DMA_ENABLE                (1 << 0)
 #  define TX_T_DONE            (1 << 1)
 #  define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
 #define MAC_TX_BUFF0_LEN       0x8
@@ -1125,7 +1125,7 @@ enum soc_au1200_ints {
 /* offsets from MAC_RX_RING_ADDR */
 #define MAC_RX_BUFF0_STATUS    0x0
 #  define RX_FRAME_LEN_MASK    0x3fff
-#  define RX_WDOG_TIMER        (1 << 14)
+#  define RX_WDOG_TIMER                (1 << 14)
 #  define RX_RUNT              (1 << 15)
 #  define RX_OVERLEN           (1 << 16)
 #  define RX_COLL              (1 << 17)
@@ -1148,7 +1148,7 @@ enum soc_au1200_ints {
                    RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
                    RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
 #define MAC_RX_BUFF0_ADDR      0x4
-#  define RX_DMA_ENABLE        (1 << 0)
+#  define RX_DMA_ENABLE                (1 << 0)
 #  define RX_T_DONE            (1 << 1)
 #  define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
 #  define RX_SET_BUFF_ADDR(X)  ((X) & 0xffffffc0)
@@ -1173,34 +1173,34 @@ enum soc_au1200_ints {
 
 /* SSIO */
 #define SSI0_STATUS            0xB1600000
-#  define SSI_STATUS_BF        (1 << 4)
-#  define SSI_STATUS_OF        (1 << 3)
-#  define SSI_STATUS_UF        (1 << 2)
+#  define SSI_STATUS_BF                (1 << 4)
+#  define SSI_STATUS_OF                (1 << 3)
+#  define SSI_STATUS_UF                (1 << 2)
 #  define SSI_STATUS_D         (1 << 1)
 #  define SSI_STATUS_B         (1 << 0)
 #define SSI0_INT               0xB1600004
 #  define SSI_INT_OI           (1 << 3)
 #  define SSI_INT_UI           (1 << 2)
 #  define SSI_INT_DI           (1 << 1)
-#define SSI0_INT_ENABLE        0xB1600008
+#define SSI0_INT_ENABLE                0xB1600008
 #  define SSI_INTE_OIE         (1 << 3)
 #  define SSI_INTE_UIE         (1 << 2)
 #  define SSI_INTE_DIE         (1 << 1)
 #define SSI0_CONFIG            0xB1600020
-#  define SSI_CONFIG_AO        (1 << 24)
-#  define SSI_CONFIG_DO        (1 << 23)
+#  define SSI_CONFIG_AO                (1 << 24)
+#  define SSI_CONFIG_DO                (1 << 23)
 #  define SSI_CONFIG_ALEN_BIT  20
 #  define SSI_CONFIG_ALEN_MASK (0x7 << 20)
 #  define SSI_CONFIG_DLEN_BIT  16
 #  define SSI_CONFIG_DLEN_MASK (0x7 << 16)
-#  define SSI_CONFIG_DD        (1 << 11)
-#  define SSI_CONFIG_AD        (1 << 10)
+#  define SSI_CONFIG_DD                (1 << 11)
+#  define SSI_CONFIG_AD                (1 << 10)
 #  define SSI_CONFIG_BM_BIT    8
 #  define SSI_CONFIG_BM_MASK   (0x3 << 8)
-#  define SSI_CONFIG_CE        (1 << 7)
-#  define SSI_CONFIG_DP        (1 << 6)
-#  define SSI_CONFIG_DL        (1 << 5)
-#  define SSI_CONFIG_EP        (1 << 4)
+#  define SSI_CONFIG_CE                (1 << 7)
+#  define SSI_CONFIG_DP                (1 << 6)
+#  define SSI_CONFIG_DL                (1 << 5)
+#  define SSI_CONFIG_EP                (1 << 4)
 #define SSI0_ADATA             0xB1600024
 #  define SSI_AD_D             (1 << 24)
 #  define SSI_AD_ADDR_BIT      16
@@ -1210,12 +1210,12 @@ enum soc_au1200_ints {
 #define SSI0_CLKDIV            0xB1600028
 #define SSI0_CONTROL           0xB1600100
 #  define SSI_CONTROL_CD       (1 << 1)
-#  define SSI_CONTROL_E        (1 << 0)
+#  define SSI_CONTROL_E                (1 << 0)
 
 /* SSI1 */
 #define SSI1_STATUS            0xB1680000
 #define SSI1_INT               0xB1680004
-#define SSI1_INT_ENABLE        0xB1680008
+#define SSI1_INT_ENABLE                0xB1680008
 #define SSI1_CONFIG            0xB1680020
 #define SSI1_ADATA             0xB1680024
 #define SSI1_CLKDIV            0xB1680028
@@ -1242,8 +1242,8 @@ enum soc_au1200_ints {
 
 #define SSI_CONFIG_AO          (1 << 24)
 #define SSI_CONFIG_DO          (1 << 23)
-#define SSI_CONFIG_ALEN        (7 << 20)
-#define SSI_CONFIG_DLEN        (15 << 16)
+#define SSI_CONFIG_ALEN                (7 << 20)
+#define SSI_CONFIG_DLEN                (15 << 16)
 #define SSI_CONFIG_DD          (1 << 11)
 #define SSI_CONFIG_AD          (1 << 10)
 #define SSI_CONFIG_BM          (3 << 8)
@@ -1305,7 +1305,7 @@ struct au1k_irda_platform_data {
 #  define SYS_PF_CS            (1 << 16)       /* EXTCLK0/32KHz to gpio2 */
 #  define SYS_PF_EX0           (1 << 9)        /* GPIO2/clock */
 
-/* Au1550 only.  Redefines lots of pins */
+/* Au1550 only.         Redefines lots of pins */
 #  define SYS_PF_PSC2_MASK     (7 << 17)
 #  define SYS_PF_PSC2_AC97     0
 #  define SYS_PF_PSC2_SPI      0
@@ -1322,33 +1322,33 @@ struct au1k_irda_platform_data {
 #  define SYS_PF_MUST_BE_SET   ((1 << 5) | (1 << 2))
 
 /* Au1200 only */
-#define SYS_PINFUNC_DMA        (1 << 31)
-#define SYS_PINFUNC_S0A        (1 << 30)
-#define SYS_PINFUNC_S1A        (1 << 29)
-#define SYS_PINFUNC_LP0        (1 << 28)
-#define SYS_PINFUNC_LP1        (1 << 27)
-#define SYS_PINFUNC_LD16       (1 << 26)
-#define SYS_PINFUNC_LD8        (1 << 25)
-#define SYS_PINFUNC_LD1        (1 << 24)
-#define SYS_PINFUNC_LD0        (1 << 23)
-#define SYS_PINFUNC_P1A        (3 << 21)
-#define SYS_PINFUNC_P1B        (1 << 20)
-#define SYS_PINFUNC_FS3        (1 << 19)
-#define SYS_PINFUNC_P0A        (3 << 17)
+#define SYS_PINFUNC_DMA                (1 << 31)
+#define SYS_PINFUNC_S0A                (1 << 30)
+#define SYS_PINFUNC_S1A                (1 << 29)
+#define SYS_PINFUNC_LP0                (1 << 28)
+#define SYS_PINFUNC_LP1                (1 << 27)
+#define SYS_PINFUNC_LD16       (1 << 26)
+#define SYS_PINFUNC_LD8                (1 << 25)
+#define SYS_PINFUNC_LD1                (1 << 24)
+#define SYS_PINFUNC_LD0                (1 << 23)
+#define SYS_PINFUNC_P1A                (3 << 21)
+#define SYS_PINFUNC_P1B                (1 << 20)
+#define SYS_PINFUNC_FS3                (1 << 19)
+#define SYS_PINFUNC_P0A                (3 << 17)
 #define SYS_PINFUNC_CS         (1 << 16)
-#define SYS_PINFUNC_CIM        (1 << 15)
-#define SYS_PINFUNC_P1C        (1 << 14)
-#define SYS_PINFUNC_U1T        (1 << 12)
-#define SYS_PINFUNC_U1R        (1 << 11)
-#define SYS_PINFUNC_EX1        (1 << 10)
-#define SYS_PINFUNC_EX0        (1 << 9)
-#define SYS_PINFUNC_U0R        (1 << 8)
+#define SYS_PINFUNC_CIM                (1 << 15)
+#define SYS_PINFUNC_P1C                (1 << 14)
+#define SYS_PINFUNC_U1T                (1 << 12)
+#define SYS_PINFUNC_U1R                (1 << 11)
+#define SYS_PINFUNC_EX1                (1 << 10)
+#define SYS_PINFUNC_EX0                (1 << 9)
+#define SYS_PINFUNC_U0R                (1 << 8)
 #define SYS_PINFUNC_MC         (1 << 7)
-#define SYS_PINFUNC_S0B        (1 << 6)
-#define SYS_PINFUNC_S0C        (1 << 5)
-#define SYS_PINFUNC_P0B        (1 << 4)
-#define SYS_PINFUNC_U0T        (1 << 3)
-#define SYS_PINFUNC_S1B        (1 << 2)
+#define SYS_PINFUNC_S0B                (1 << 6)
+#define SYS_PINFUNC_S0C                (1 << 5)
+#define SYS_PINFUNC_P0B                (1 << 4)
+#define SYS_PINFUNC_U0T                (1 << 3)
+#define SYS_PINFUNC_S1B                (1 << 2)
 
 /* Power Management */
 #define SYS_SCRATCH0           0xB1900018
@@ -1405,7 +1405,7 @@ struct au1k_irda_platform_data {
 #  define SYS_CS_DI2           (1 << 16)
 #  define SYS_CS_CI2           (1 << 15)
 
-#  define SYS_CS_ML_BIT        7
+#  define SYS_CS_ML_BIT                7
 #  define SYS_CS_ML_MASK       (0x7 << SYS_CS_ML_BIT)
 #  define SYS_CS_DL            (1 << 6)
 #  define SYS_CS_CL            (1 << 5)
@@ -1554,8 +1554,8 @@ struct au1k_irda_platform_data {
 #define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16)
 #define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff)
 #define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16)
-#define PCI_MWBASEREVCCL_REV(x)  (((x) & 0xff) << 8)
-#define PCI_MWBASEREVCCL_CCL(x)  ((x) & 0xff)
+#define PCI_MWBASEREVCCL_REV(x)         (((x) & 0xff) << 8)
+#define PCI_MWBASEREVCCL_CCL(x)         ((x) & 0xff)
 #define PCI_ID_DID(x)          (((x) & 0xffff) << 16)
 #define PCI_ID_VID(x)          ((x) & 0xffff)
 #define PCI_STATCMD_STATUS(x)  (((x) & 0xffff) << 16)
index ba4cf0e..7cedca5 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/spinlock.h>    /* And spinlocks */
 #include <linux/delay.h>
 
-#define NUM_AU1000_DMA_CHANNELS        8
+#define NUM_AU1000_DMA_CHANNELS 8
 
 /* DMA Channel Register Offsets */
 #define DMA_MODE_SET           0x00000000
@@ -47,7 +47,7 @@
 #define DMA_DS                 (1 << 15)
 #define DMA_BE                 (1 << 13)
 #define DMA_DR                 (1 << 12)
-#define DMA_TS8                (1 << 11)
+#define DMA_TS8                        (1 << 11)
 #define DMA_DW_BIT             9
 #define DMA_DW_MASK            (0x03 << DMA_DW_BIT)
 #define DMA_DW8                        (0 << DMA_DW_BIT)
@@ -59,9 +59,9 @@
 #define DMA_GO                 (1 << 5)
 #define DMA_AB                 (1 << 4)
 #define DMA_D1                 (1 << 3)
-#define DMA_BE1                (1 << 2)
+#define DMA_BE1                        (1 << 2)
 #define DMA_D0                 (1 << 1)
-#define DMA_BE0                (1 << 0)
+#define DMA_BE0                        (1 << 0)
 
 #define DMA_PERIPHERAL_ADDR    0x00000008
 #define DMA_BUFFER0_START      0x0000000C
@@ -246,7 +246,7 @@ static inline void init_dma(unsigned int dmanr)
                mode |= DMA_IE;
 
        au_writel(~mode, chan->io + DMA_MODE_CLEAR);
-       au_writel(mode,  chan->io + DMA_MODE_SET);
+       au_writel(mode,  chan->io + DMA_MODE_SET);
 }
 
 /*
index e221659..cadab91 100644 (file)
@@ -148,7 +148,7 @@ struct au1xmmc_platform_data {
 /*
  *  SD_STATUS bit definitions.
  */
-#define SD_STATUS_DCRCW        (0x00000007)
+#define SD_STATUS_DCRCW (0x00000007)
 #define SD_STATUS_xx1  (0x00000008)
 #define SD_STATUS_CB   (0x00000010)
 #define SD_STATUS_DB   (0x00000020)
index 217810e..ca8077a 100644 (file)
@@ -103,7 +103,7 @@ typedef volatile struct au1xxx_ddma_desc {
         * Lets have some SW data following -- make sure it's 32 bytes.
         */
        u32     sw_status;
-       u32     sw_context;
+       u32     sw_context;
        u32     sw_reserved[6];
 } au1x_ddma_desc_t;
 
@@ -123,7 +123,7 @@ typedef volatile struct au1xxx_ddma_desc {
 #define DSCR_CMD0_CV           (0x1 << 2)      /* Clear Valid when done */
 #define DSCR_CMD0_ST_MASK      (0x3 << 0)      /* Status instruction */
 
-#define SW_STATUS_INUSE        (1 << 0)
+#define SW_STATUS_INUSE                (1 << 0)
 
 /* Command 0 device IDs. */
 #define AU1550_DSCR_CMD0_UART0_TX      0
@@ -195,8 +195,8 @@ typedef volatile struct au1xxx_ddma_desc {
 #define AU1300_DSCR_CMD0_SDMS_RX0      9
 #define AU1300_DSCR_CMD0_SDMS_TX1      10
 #define AU1300_DSCR_CMD0_SDMS_RX1      11
-#define AU1300_DSCR_CMD0_AES_TX        12
-#define AU1300_DSCR_CMD0_AES_RX        13
+#define AU1300_DSCR_CMD0_AES_TX               12
+#define AU1300_DSCR_CMD0_AES_RX               13
 #define AU1300_DSCR_CMD0_PSC0_TX       14
 #define AU1300_DSCR_CMD0_PSC0_RX       15
 #define AU1300_DSCR_CMD0_PSC1_TX       16
@@ -205,12 +205,12 @@ typedef volatile struct au1xxx_ddma_desc {
 #define AU1300_DSCR_CMD0_PSC2_RX       19
 #define AU1300_DSCR_CMD0_PSC3_TX       20
 #define AU1300_DSCR_CMD0_PSC3_RX       21
-#define AU1300_DSCR_CMD0_LCD           22
+#define AU1300_DSCR_CMD0_LCD          22
 #define AU1300_DSCR_CMD0_NAND_FLASH    23
 #define AU1300_DSCR_CMD0_SDMS_TX2      24
 #define AU1300_DSCR_CMD0_SDMS_RX2      25
 #define AU1300_DSCR_CMD0_CIM_SYNC      26
-#define AU1300_DSCR_CMD0_UDMA          27
+#define AU1300_DSCR_CMD0_UDMA         27
 #define AU1300_DSCR_CMD0_DMA_REQ0      28
 #define AU1300_DSCR_CMD0_DMA_REQ1      29
 
@@ -298,7 +298,7 @@ typedef volatile struct au1xxx_ddma_desc {
 #define DSCR_NXTPTR_MS         (1 << 27)
 
 /* The number of DBDMA channels. */
-#define NUM_DBDMA_CHANS        16
+#define NUM_DBDMA_CHANS 16
 
 /*
  * DDMA API definitions
@@ -316,7 +316,7 @@ typedef struct dbdma_device_table {
 
 
 typedef struct dbdma_chan_config {
-       spinlock_t      lock;
+       spinlock_t      lock;
 
        u32                     chan_flags;
        u32                     chan_index;
index e306384..bb91b89 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * include/asm-mips/mach-au1x00/au1xxx_ide.h  version 01.30.00   Aug. 02 2005
+ * include/asm-mips/mach-au1x00/au1xxx_ide.h  version 01.30.00  Aug. 02 2005
  *
  * BRIEF MODULE DESCRIPTION
  * AMD Alchemy Au1xxx IDE interface routines over the Static Bus
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  * Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
- *       Interface and Linux Device Driver" Application Note.
+ *      Interface and Linux Device Driver" Application Note.
  */
 
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
 #define DMA_WAIT_TIMEOUT       100
-#define NUM_DESCRIPTORS        PRD_ENTRIES
+#define NUM_DESCRIPTORS                PRD_ENTRIES
 #else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */
-#define NUM_DESCRIPTORS        2
+#define NUM_DESCRIPTORS                2
 #endif
 
 #ifndef AU1XXX_ATA_RQSIZE
@@ -84,8 +84,8 @@ typedef struct {
 #define TWP_MASK               (0x3F << 14)
 #define TCSW_MASK              (0x0F << 10)
 #define TPM_MASK               (0x0F << 6)
-#define TA_MASK                (0x3F << 0)
-#define TS_MASK                (1 << 8)
+#define TA_MASK                        (0x3F << 0)
+#define TS_MASK                        (1 << 8)
 
 /* Timing parameters PIO mode 0 */
 #define SBC_IDE_PIO0_TCSOE     (0x04 << 29)
@@ -96,7 +96,7 @@ typedef struct {
 #define SBC_IDE_PIO0_TWP       (0x10 << 14)
 #define SBC_IDE_PIO0_TCSW      (0x04 << 10)
 #define SBC_IDE_PIO0_TPM       (0x00 << 6)
-#define SBC_IDE_PIO0_TA        (0x15 << 0)
+#define SBC_IDE_PIO0_TA                (0x15 << 0)
 /* Timing parameters PIO mode 1 */
 #define SBC_IDE_PIO1_TCSOE     (0x03 << 29)
 #define SBC_IDE_PIO1_TOECS     (0x01 << 26)
@@ -106,7 +106,7 @@ typedef struct {
 #define SBC_IDE_PIO1_TWP       (0x08 << 14)
 #define SBC_IDE_PIO1_TCSW      (0x03 << 10)
 #define SBC_IDE_PIO1_TPM       (0x00 << 6)
-#define SBC_IDE_PIO1_TA        (0x0B << 0)
+#define SBC_IDE_PIO1_TA                (0x0B << 0)
 /* Timing parameters PIO mode 2 */
 #define SBC_IDE_PIO2_TCSOE     (0x05 << 29)
 #define SBC_IDE_PIO2_TOECS     (0x01 << 26)
@@ -116,7 +116,7 @@ typedef struct {
 #define SBC_IDE_PIO2_TWP       (0x1F << 14)
 #define SBC_IDE_PIO2_TCSW      (0x05 << 10)
 #define SBC_IDE_PIO2_TPM       (0x00 << 6)
-#define SBC_IDE_PIO2_TA        (0x22 << 0)
+#define SBC_IDE_PIO2_TA                (0x22 << 0)
 /* Timing parameters PIO mode 3 */
 #define SBC_IDE_PIO3_TCSOE     (0x05 << 29)
 #define SBC_IDE_PIO3_TOECS     (0x01 << 26)
@@ -126,7 +126,7 @@ typedef struct {
 #define SBC_IDE_PIO3_TWP       (0x15 << 14)
 #define SBC_IDE_PIO3_TCSW      (0x05 << 10)
 #define SBC_IDE_PIO3_TPM       (0x00 << 6)
-#define SBC_IDE_PIO3_TA        (0x1A << 0)
+#define SBC_IDE_PIO3_TA                (0x1A << 0)
 /* Timing parameters PIO mode 4 */
 #define SBC_IDE_PIO4_TCSOE     (0x04 << 29)
 #define SBC_IDE_PIO4_TOECS     (0x01 << 26)
@@ -136,7 +136,7 @@ typedef struct {
 #define SBC_IDE_PIO4_TWP       (0x0D << 14)
 #define SBC_IDE_PIO4_TCSW      (0x03 << 10)
 #define SBC_IDE_PIO4_TPM       (0x00 << 6)
-#define SBC_IDE_PIO4_TA        (0x12 << 0)
+#define SBC_IDE_PIO4_TA                (0x12 << 0)
 /* Timing parameters MDMA mode 0 */
 #define SBC_IDE_MDMA0_TCSOE    (0x03 << 29)
 #define SBC_IDE_MDMA0_TOECS    (0x01 << 26)
index 4e3f3bc..8a9cd75 100644 (file)
@@ -53,7 +53,7 @@
 
 #define PSC_CTRL_DISABLE       0
 #define PSC_CTRL_SUSPEND       2
-#define PSC_CTRL_ENABLE        3
+#define PSC_CTRL_ENABLE                3
 
 /* AC97 Registers. */
 #define PSC_AC97CFG_OFFSET     0x00000008
@@ -85,8 +85,8 @@
 #define PSC_AC97CFG_SE_ENABLE  (1 << 25)
 
 #define PSC_AC97CFG_LEN_MASK   (0xf << 21)
-#define PSC_AC97CFG_TXSLOT_MASK        (0x3ff << 11)
-#define PSC_AC97CFG_RXSLOT_MASK        (0x3ff << 1)
+#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
+#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
 #define PSC_AC97CFG_GE_ENABLE  (1)
 
 /* Enable slots 3-12. */
@@ -95,7 +95,7 @@
 
 /*
  * The word length equation is ((x) * 2) + 2, so choose 'x' appropriately.
- * The only sensible numbers are 7, 9, or possibly 11.  Nah, just do the
+ * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
  * arithmetic in the macro.
  */
 #define PSC_AC97CFG_SET_LEN(x) (((((x) - 2) / 2) & 0xf) << 21)
index 73853b5..796afd0 100644 (file)
 #include <asm/mach-au1x00/au1000.h>
 
 /* The default GPIO numberspace as documented in the Alchemy manuals.
- * GPIO0-31 from GPIO1 block,   GPIO200-215 from GPIO2 block.
+ * GPIO0-31 from GPIO1 block,  GPIO200-215 from GPIO2 block.
  */
 #define ALCHEMY_GPIO1_BASE     0
 #define ALCHEMY_GPIO2_BASE     200
 
 #define ALCHEMY_GPIO1_NUM      32
 #define ALCHEMY_GPIO2_NUM      16
-#define ALCHEMY_GPIO1_MAX      (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
+#define ALCHEMY_GPIO1_MAX      (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
 #define ALCHEMY_GPIO2_MAX      (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1)
 
 #define MAKE_IRQ(intc, off)    (AU1000_INTC##intc##_INT_BASE + (off))
@@ -67,7 +67,7 @@ static inline int au1500_gpio1_to_irq(int gpio)
        switch (gpio) {
        case 0 ... 15:
        case 20:
-       case 23 ... 28: return MAKE_IRQ(1, gpio);
+       case 23 ... 28: return MAKE_IRQ(1, gpio);
        }
 
        return -ENXIO;
@@ -139,8 +139,8 @@ static inline int au1550_gpio1_to_irq(int gpio)
 
        switch (gpio) {
        case 0 ... 15:
-       case 20 ... 28: return MAKE_IRQ(1, gpio);
-       case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
+       case 20 ... 28: return MAKE_IRQ(1, gpio);
+       case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
        }
 
        return -ENXIO;
@@ -152,9 +152,9 @@ static inline int au1550_gpio2_to_irq(int gpio)
 
        switch (gpio) {
        case 0:         return MAKE_IRQ(1, 16);
-       case 1 ... 5:   return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
+       case 1 ... 5:   return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
        case 6 ... 7:   return MAKE_IRQ(1, 29 + gpio - 6);
-       case 8 ... 15:  return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
+       case 8 ... 15:  return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
        }
 
        return -ENXIO;
@@ -190,7 +190,7 @@ static inline int au1200_gpio2_to_irq(int gpio)
        case 0 ... 2:   return MAKE_IRQ(0, 5 + gpio - 0);
        case 3:         return MAKE_IRQ(0, 22);
        case 4 ... 7:   return MAKE_IRQ(0, 24 + gpio - 4);
-       case 8 ... 15:  return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
+       case 8 ... 15:  return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
        }
 
        return -ENXIO;
@@ -428,7 +428,7 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
 /**
  * alchemy_gpio2_enable -  Activate GPIO2 block.
  *
- * The GPIO2 block must be enabled excplicitly to work.  On systems
+ * The GPIO2 block must be enabled excplicitly to work.         On systems
  * where this isn't done by the bootloader, this macro can be used.
  */
 static inline void alchemy_gpio2_enable(void)
@@ -533,7 +533,7 @@ static inline int alchemy_irq_to_gpio(int irq)
  *     2 (1 for Au1000) gpio_chips are registered.
  *
  *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- *     the boards' gpio.h must provide the linux gpio wrapper functions,
+ *     the boards' gpio.h must provide the linux gpio wrapper functions,
  *
  *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
  *     inlinable gpio functions are provided which enable access to the
index fb9975c..ce02894 100644 (file)
@@ -130,7 +130,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
 *      A gpiochip for the 75 GPIOs is registered.
 *
 *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-*      the boards' gpio.h must provide the linux gpio wrapper functions,
+*      the boards' gpio.h must provide the linux gpio wrapper functions,
 *
 *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
 *      inlinable gpio functions are provided which enable access to the
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
new file mode 100644 (file)
index 0000000..b8e7be8
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ *  Copyright (C) 2005, Broadcom Corporation
+ *  Copyright (C) 2006, Felix Fietkau <nbd@openwrt.org>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+struct nvram_header {
+       u32 magic;
+       u32 len;
+       u32 crc_ver_init;       /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+       u32 config_refresh;     /* 0:15 sdram_config, 16:31 sdram_refresh */
+       u32 config_ncdl;        /* ncdl values for memc */
+};
+
+#define NVRAM_HEADER           0x48534C46      /* 'FLSH' */
+#define NVRAM_VERSION          1
+#define NVRAM_HEADER_SIZE      20
+#define NVRAM_SPACE            0x8000
+
+#define FLASH_MIN              0x00020000      /* Minimum flash size */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len);
+
+static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
+{
+       if (strchr(buf, ':'))
+               sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
+                       &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+                       &macaddr[5]);
+       else if (strchr(buf, '-'))
+               sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
+                       &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+                       &macaddr[5]);
+       else
+               printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
+}
+
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h
deleted file mode 100644 (file)
index 69ef3ef..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  Copyright (C) 2005, Broadcom Corporation
- *  Copyright (C) 2006, Felix Fietkau <nbd@openwrt.org>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#ifndef __NVRAM_H
-#define __NVRAM_H
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-struct nvram_header {
-       u32 magic;
-       u32 len;
-       u32 crc_ver_init;       /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
-       u32 config_refresh;     /* 0:15 sdram_config, 16:31 sdram_refresh */
-       u32 config_ncdl;        /* ncdl values for memc */
-};
-
-#define NVRAM_HEADER           0x48534C46      /* 'FLSH' */
-#define NVRAM_VERSION          1
-#define NVRAM_HEADER_SIZE      20
-#define NVRAM_SPACE            0x8000
-
-#define FLASH_MIN              0x00020000      /* Minimum flash size */
-
-#define NVRAM_MAX_VALUE_LEN 255
-#define NVRAM_MAX_PARAM_LEN 64
-
-#define NVRAM_ERR_INV_PARAM    -8
-#define NVRAM_ERR_ENVNOTFOUND  -9
-
-extern int nvram_getenv(char *name, char *val, size_t val_len);
-
-static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
-{
-       if (strchr(buf, ':'))
-               sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
-                       &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
-                       &macaddr[5]);
-       else if (strchr(buf, '-'))
-               sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
-                       &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
-                       &macaddr[5]);
-       else
-               printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
-}
-
-#endif
index dbd5b5a..cb922b9 100644 (file)
@@ -182,7 +182,7 @@ enum bcm63xx_regs_set {
 #define BCM_6328_PERF_BASE             (0xb0000000)
 #define BCM_6328_TIMER_BASE            (0xb0000040)
 #define BCM_6328_WDT_BASE              (0xb000005c)
-#define BCM_6328_UART0_BASE             (0xb0000100)
+#define BCM_6328_UART0_BASE            (0xb0000100)
 #define BCM_6328_UART1_BASE            (0xb0000120)
 #define BCM_6328_GPIO_BASE             (0xb0000080)
 #define BCM_6328_SPI_BASE              (0xdeadbeef)
index 03a54df..7033144 100644 (file)
@@ -88,7 +88,7 @@
 #define bcm_mpi_readl(o)       bcm_rset_readl(RSET_MPI, (o))
 #define bcm_mpi_writel(v, o)   bcm_rset_writel(RSET_MPI, (v), (o))
 #define bcm_pcmcia_readl(o)    bcm_rset_readl(RSET_PCMCIA, (o))
-#define bcm_pcmcia_writel(v, o)        bcm_rset_writel(RSET_PCMCIA, (v), (o))
+#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
 #define bcm_pcie_readl(o)      bcm_rset_readl(RSET_PCIE, (o))
 #define bcm_pcie_writel(v, o)  bcm_rset_writel(RSET_PCIE, (v), (o))
 #define bcm_sdram_readl(o)     bcm_rset_readl(RSET_SDRAM, (o))
index a5bbff3..1e89df7 100644 (file)
@@ -19,7 +19,7 @@ struct bcm_enet_desc {
 #define DMADESC_SOP_MASK       (1 << 13)
 #define DMADESC_ESOP_MASK      (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
 #define DMADESC_WRAP_MASK      (1 << 12)
-#define DMADESC_USB_NOZERO_MASK        (1 << 1)
+#define DMADESC_USB_NOZERO_MASK (1 << 1)
 #define DMADESC_USB_ZERO_MASK  (1 << 0)
 
 /* status */
index c3eeb90..81b4702 100644 (file)
                                        CKCTL_6368_NAND_EN |            \
                                        CKCTL_6368_IPSEC_EN)
 
-/* System PLL Control register  */
+/* System PLL Control register */
 #define PERF_SYS_PLL_CTL_REG           0x8
 #define SYS_PLL_SOFT_RESET             0x1
 
 #define SOFTRESET_6338_DMAMEM_MASK     (1 << 6)
 #define SOFTRESET_6338_SAR_MASK                (1 << 7)
 #define SOFTRESET_6338_ACLC_MASK       (1 << 8)
-#define SOFTRESET_6338_ADSLMIPSPLL_MASK        (1 << 10)
+#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
 #define SOFTRESET_6338_ALL      (SOFTRESET_6338_SPI_MASK |             \
                                  SOFTRESET_6338_ENET_MASK |            \
                                  SOFTRESET_6338_USBH_MASK |            \
 #define SOFTRESET_6348_DMAMEM_MASK     (1 << 6)
 #define SOFTRESET_6348_SAR_MASK                (1 << 7)
 #define SOFTRESET_6348_ACLC_MASK       (1 << 8)
-#define SOFTRESET_6348_ADSLMIPSPLL_MASK        (1 << 10)
+#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
 
 #define SOFTRESET_6348_ALL      (SOFTRESET_6348_SPI_MASK |             \
                                  SOFTRESET_6348_ENET_MASK |            \
 
 
 #define GPIO_PINMUX_OTHR_REG           0x24
-#define GPIO_PINMUX_OTHR_6328_USB_SHIFT        12
+#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
 #define GPIO_PINMUX_OTHR_6328_USB_MASK (3 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
 #define GPIO_PINMUX_OTHR_6328_USB_HOST (1 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
 #define GPIO_PINMUX_OTHR_6328_USB_DEV  (2 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
 /* those bits must be kept as read in gpio basemode register*/
 
 #define GPIO_STRAPBUS_REG              0x40
-#define STRAPBUS_6358_BOOT_SEL_PARALLEL        (1 << 1)
+#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
 #define STRAPBUS_6358_BOOT_SEL_SERIAL  (0 << 1)
 #define STRAPBUS_6368_BOOT_SEL_MASK    0x3
 #define STRAPBUS_6368_BOOT_SEL_NAND    0
 #define STRAPBUS_6368_BOOT_SEL_SERIAL  1
-#define STRAPBUS_6368_BOOT_SEL_PARALLEL        3
+#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
 
 
 /*************************************************************************
 #define USBH_PRIV_SWAP_OHCI_DATA_MASK  (1 << USBH_PRIV_SWAP_OHCI_DATA_SHIFT)
 
 #define USBH_PRIV_UTMI_CTL_6368_REG    0x10
-#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT        12
+#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
 #define USBH_PRIV_UTMI_CTL_NODRIV_MASK (0xf << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT)
 #define USBH_PRIV_UTMI_CTL_HOSTB_SHIFT 0
 #define USBH_PRIV_UTMI_CTL_HOSTB_MASK  (0xf << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT)
 #define USBD_CONTROL_INIT_SEL_MASK     (0xf << USBD_CONTROL_INIT_SEL_SHIFT)
 #define USBD_CONTROL_FIFO_RESET_SHIFT  6
 #define USBD_CONTROL_FIFO_RESET_MASK   (3 << USBD_CONTROL_FIFO_RESET_SHIFT)
-#define USBD_CONTROL_SETUPERRLOCK_SHIFT        5
+#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
 #define USBD_CONTROL_SETUPERRLOCK_MASK (1 << USBD_CONTROL_SETUPERRLOCK_SHIFT)
 #define USBD_CONTROL_DONE_CSRS_SHIFT   0
 #define USBD_CONTROL_DONE_CSRS_MASK    (1 << USBD_CONTROL_DONE_CSRS_SHIFT)
 #define USBD_STRAPS_APP_SELF_PWR_MASK  (1 << USBD_STRAPS_APP_SELF_PWR_SHIFT)
 #define USBD_STRAPS_APP_DISCON_SHIFT   9
 #define USBD_STRAPS_APP_DISCON_MASK    (1 << USBD_STRAPS_APP_DISCON_SHIFT)
-#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT        8
+#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
 #define USBD_STRAPS_APP_CSRPRGSUP_MASK (1 << USBD_STRAPS_APP_CSRPRGSUP_SHIFT)
 #define USBD_STRAPS_APP_RMTWKUP_SHIFT  6
 #define USBD_STRAPS_APP_RMTWKUP_MASK   (1 << USBD_STRAPS_APP_RMTWKUP_SHIFT)
 #define USBD_EPNUM_TYPEMAP_REG         0x50
 #define USBD_EPNUM_TYPEMAP_TYPE_SHIFT  8
 #define USBD_EPNUM_TYPEMAP_TYPE_MASK   (0x3 << USBD_EPNUM_TYPEMAP_TYPE_SHIFT)
-#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT        0
+#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
 #define USBD_EPNUM_TYPEMAP_DMA_CH_MASK (0xf << USBD_EPNUM_TYPEMAP_DMACH_SHIFT)
 
 /* Misc per-endpoint settings */
 #define MPI_L2PREMAP_IS_CARDBUS_MASK   (1 << 2)
 
 #define MPI_PCIMODESEL_REG             0x144
-#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK        (1 << 0)
-#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK        (1 << 1)
+#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
+#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
 #define MPI_PCIMODESEL_EXT_ARB_MASK    (1 << 2)
 #define MPI_PCIMODESEL_PREFETCH_SHIFT  4
 #define MPI_PCIMODESEL_PREFETCH_MASK   (0xf << MPI_PCIMODESEL_PREFETCH_SHIFT)
index 9332e78..2bbfc8d 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_MACH_BCM63XX_IRQ_H
 #define __ASM_MACH_BCM63XX_IRQ_H
 
-#define NR_IRQS        128
+#define NR_IRQS 128
 #define MIPS_CPU_IRQ_BASE 0
 
 #endif
index 502bb18..60fc4c3 100644 (file)
@@ -51,8 +51,8 @@ enum octeon_irq {
 /* 256 - 511 represent the MSI interrupts 0-255 */
 #define OCTEON_IRQ_MSI_BIT0    (256)
 
-#define OCTEON_IRQ_MSI_LAST      (OCTEON_IRQ_MSI_BIT0 + 255)
-#define OCTEON_IRQ_LAST          (OCTEON_IRQ_MSI_LAST + 1)
+#define OCTEON_IRQ_MSI_LAST     (OCTEON_IRQ_MSI_BIT0 + 255)
+#define OCTEON_IRQ_LAST                 (OCTEON_IRQ_MSI_LAST + 1)
 #endif
 
 #endif
index dedef7d..1e7dbb1 100644 (file)
@@ -16,7 +16,7 @@
 #define CP0_PRID_OCTEON_PASS1 0x000d0000
 #define CP0_PRID_OCTEON_CN30XX 0x000d0200
 
-.macro  kernel_entry_setup
+.macro kernel_entry_setup
        # Registers set by bootloader:
        # (only 32 bits set by bootloader, all addresses are physical
        # addresses, and need to have the appropriate memory region set
        .set push
        .set arch=octeon
        # Read the cavium mem control register
-       dmfc0   v0, CP0_CVMMEMCTL_REG
+       dmfc0   v0, CP0_CVMMEMCTL_REG
        # Clear the lower 6 bits, the CVMSEG size
-       dins    v0, $0, 0, 6
-       ori     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
-       dmtc0   v0, CP0_CVMMEMCTL_REG   # Write the cavium mem control register
-       dmfc0   v0, CP0_CVMCTL_REG      # Read the cavium control register
+       dins    v0, $0, 0, 6
+       ori     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+       dmtc0   v0, CP0_CVMMEMCTL_REG   # Write the cavium mem control register
+       dmfc0   v0, CP0_CVMCTL_REG      # Read the cavium control register
 #ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED
        # Disable unaligned load/store support but leave HW fixup enabled
        or  v0, v0, 0x5001
@@ -69,14 +69,14 @@ skip:
        and     v0, v0, v1
        ori     v0, v0, (6 << 7)
        # Write the cavium control register
-       dmtc0   v0, CP0_CVMCTL_REG
+       dmtc0   v0, CP0_CVMCTL_REG
        sync
        # Flush dcache after config change
-       cache   9, 0($0)
+       cache   9, 0($0)
        # Get my core id
-       rdhwr   v0, $0
+       rdhwr   v0, $0
        # Jump the master to kernel_entry
-       bne     a2, zero, octeon_main_processor
+       bne     a2, zero, octeon_main_processor
        nop
 
 #ifdef CONFIG_SMP
@@ -87,21 +87,21 @@ skip:
        #
 
        # This is the variable where the next core to boot os stored
-       PTR_LA  t0, octeon_processor_boot
+       PTR_LA  t0, octeon_processor_boot
 octeon_spin_wait_boot:
        # Get the core id of the next to be booted
-       LONG_L  t1, (t0)
+       LONG_L  t1, (t0)
        # Keep looping if it isn't me
        bne t1, v0, octeon_spin_wait_boot
        nop
        # Get my GP from the global variable
-       PTR_LA  t0, octeon_processor_gp
-       LONG_L  gp, (t0)
+       PTR_LA  t0, octeon_processor_gp
+       LONG_L  gp, (t0)
        # Get my SP from the global variable
-       PTR_LA  t0, octeon_processor_sp
-       LONG_L  sp, (t0)
+       PTR_LA  t0, octeon_processor_sp
+       LONG_L  sp, (t0)
        # Set the SP global variable to zero so the master knows we've started
-       LONG_S  zero, (t0)
+       LONG_S  zero, (t0)
 #ifdef __OCTEON__
        syncw
        syncw
@@ -130,7 +130,7 @@ octeon_main_processor:
 /*
  * Do SMP slave processor setup necessary before we can savely execute C code.
  */
-       .macro  smp_slave_setup
+       .macro  smp_slave_setup
        .endm
 
 #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
index babc837..71d4bfa 100644 (file)
@@ -32,9 +32,9 @@
 #define cpu_scache_line_size() 0
 
 #ifdef CONFIG_64BIT
-#define cpu_has_llsc            0
+#define cpu_has_llsc           0
 #else
-#define cpu_has_llsc            1
+#define cpu_has_llsc           1
 #endif
 
 #define cpu_has_mips16         0
index f8afec3..6fe475b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (C) 2006  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 16f1cf5..3c3ed4a 100644 (file)
@@ -110,7 +110,7 @@ enum bcsr_whoami_boards {
        BCSR_WHOAMI_DB1300,
 };
 
-/* STATUS reg.  Unless otherwise noted, they're valid on all boards.
+/* STATUS reg. Unless otherwise noted, they're valid on all boards.
  * PB1200 = DB1200.
  */
 #define BCSR_STATUS_PC0VS              0x0003
@@ -190,7 +190,7 @@ enum bcsr_whoami_boards {
 #define BCSR_RESETS_OTPWRPROT          0x1000  /* DB1300 */
 #define BCSR_RESETS_OTPCSB             0x2000  /* DB1300 */
 #define BCSR_RESETS_OTGPWR             0x4000  /* DB1300 */
-#define BCSR_RESETS_USBHPWR            0x8000  /* DB1300 */
+#define BCSR_RESETS_USBHPWR            0x8000  /* DB1300 */
 
 #define BCSR_BOARD_LCDVEE              0x0001
 #define BCSR_BOARD_LCDVDD              0x0002
index b2a8319..d3cce73 100644 (file)
@@ -63,7 +63,7 @@
  * the interrupt define and subtracting the DB1200_INT_BEGIN value.
  *
  *   Example: IDE bis pos is  = 64 - 64
- *            ETH bit pos is  = 65 - 64
+ *           ETH bit pos is  = 65 - 64
  */
 enum external_db1200_ints {
        DB1200_INT_BEGIN        = AU1000_MAX_INTR + 1,
index 7fe5fb3..3d1ede4 100644 (file)
@@ -21,7 +21,7 @@
 #define DB1300_SD1_INSERT_INT  (DB1300_FIRST_INT + 12)
 #define DB1300_SD1_EJECT_INT   (DB1300_FIRST_INT + 13)
 #define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
-#define DB1300_HOST_VBUS_OC_INT        (DB1300_FIRST_INT + 15)
+#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
 #define DB1300_LAST_INT                (DB1300_FIRST_INT + 15)
 
 /* SMSC9210 CS */
index 5439eb8..2f7155d 100644 (file)
@@ -8,7 +8,7 @@
 #ifndef __ASM_MACH_EMMA2RH_IRQ_H
 #define __ASM_MACH_EMMA2RH_IRQ_H
 
-#define NR_IRQS        256
+#define NR_IRQS 256
 
 #include_next <irq.h>
 
index 7c185bb..42be9e9 100644 (file)
@@ -8,6 +8,6 @@
 #ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
 #define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
 
-/* Intentionally empty file ...  */
+/* Intentionally empty file ...         */
 
 #endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */
index a38f4d4..5b5cd68 100644 (file)
@@ -98,7 +98,7 @@ static inline void fd_disable_irq(void)
 static inline int fd_request_irq(void)
 {
        return request_irq(FLOPPY_IRQ, floppy_interrupt,
-                          0, "floppy", NULL);
+                          0, "floppy", NULL);
 }
 
 static inline void fd_free_irq(void)
@@ -106,7 +106,7 @@ static inline void fd_free_irq(void)
        free_irq(FLOPPY_IRQ, NULL);
 }
 
-#define fd_free_irq()           free_irq(FLOPPY_IRQ, NULL);
+#define fd_free_irq()          free_irq(FLOPPY_IRQ, NULL);
 
 
 static inline unsigned long fd_getfdaddr1(void)
index 9c93a5b..affa66f 100644 (file)
@@ -51,7 +51,7 @@ static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long si
 /*
  * insw() and gang might be called with interrupts disabled, so we can't
  * send IPIs for flushing due to the potencial of deadlocks, see the comment
- * above smp_call_function() in arch/mips/kernel/smp.c.  We work around the
+ * above smp_call_function() in arch/mips/kernel/smp.c.         We work around the
  * problem by disabling preemption so we know we actually perform the flush
  * on the processor that actually has the lines to be flushed which hopefully
  * is even better for performance anyway.
@@ -123,7 +123,7 @@ static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
        __ide_flush_epilogue();
 }
 
-/* ide_insw calls insw, not __ide_insw.  Why? */
+/* ide_insw calls insw, not __ide_insw.         Why? */
 #undef insw
 #undef insl
 #undef outsw
index e014264..139cd20 100644 (file)
@@ -9,12 +9,12 @@
 #define __ASM_MACH_GENERIC_IRQ_H
 
 #ifndef NR_IRQS
-#define NR_IRQS        128
+#define NR_IRQS 128
 #endif
 
 #ifdef CONFIG_I8259
 #ifndef I8259A_IRQ_BASE
-#define I8259A_IRQ_BASE        0
+#define I8259A_IRQ_BASE 0
 #endif
 #endif
 
index d7a9efd..73d717a 100644 (file)
@@ -69,7 +69,7 @@
 #define HIGHMEM_START          (_AC(1, UL) << _AC(59, UL))
 #endif
 
-#define TO_PHYS(x)             (             ((x) & TO_PHYS_MASK))
+#define TO_PHYS(x)             (             ((x) & TO_PHYS_MASK))
 #define TO_CAC(x)              (CAC_BASE   | ((x) & TO_PHYS_MASK))
 #define TO_UNCAC(x)            (UNCAC_BASE | ((x) & TO_PHYS_MASK))
 
index 624d66c..a323efb 100644 (file)
@@ -51,8 +51,8 @@
         * We might not get launched at the address the kernel is linked to,
         * so we jump there.
         */
-       PTR_LA  t0, 0f
-       jr      t0
+       PTR_LA  t0, 0f
+       jr      t0
 0:
        .endm
 
index 986a3b9..ebc9377 100644 (file)
@@ -7,7 +7,7 @@
 
 #define pa_to_nid(addr)                NASID_TO_COMPACT_NODEID(NASID_GET(addr))
 
-#define LEVELS_PER_SLICE        128
+#define LEVELS_PER_SLICE       128
 
 struct slice_data {
        unsigned long irq_enable_mask[2];
index b2cf641..defd135 100644 (file)
@@ -34,7 +34,7 @@ extern int pcibus_to_node(struct pci_bus *);
 
 extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
 
-#define node_distance(from, to)        (__node_distances[(from)][(to)])
+#define node_distance(from, to) (__node_distances[(from)][(to)])
 
 #include <asm-generic/topology.h>
 
index 50d344c..65e9c85 100644 (file)
@@ -28,7 +28,7 @@
 #define cpu_has_ic_fills_f_dc  0
 #define cpu_has_dsp            0
 #define cpu_has_dsp2           0
-#define cpu_icache_snoops_remote_store  1
+#define cpu_icache_snoops_remote_store 1
 #define cpu_has_mipsmt         0
 #define cpu_has_userlocal      0
 
index 05aabb2..5edf05d 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
  * Copyright (C) 2000, 2002  Maciej W. Rozycki
  * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- * 2004        pf
+ * 2004 pf
  */
 #ifndef _ASM_MACH_IP28_SPACES_H
 #define _ASM_MACH_IP28_SPACES_H
index c8fb5aa..073f0c4 100644 (file)
@@ -50,7 +50,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
        return pa;
 }
 
-/* This is almost certainly wrong but it's what dma-ip32.c used to use  */
+/* This is almost certainly wrong but it's what dma-ip32.c used to use */
 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
        dma_addr_t dma_addr)
 {
index 7237a93..9807ecd 100644 (file)
@@ -17,7 +17,7 @@
 #define MIPS4K_ICACHE_REFILL_WAR       0
 #define MIPS_CACHE_SYNC_WAR            0
 #define TX49XX_ICACHE_INDEX_INV_WAR    0
-#define ICACHE_REFILLS_WORKAROUND_WAR   1
+#define ICACHE_REFILLS_WORKAROUND_WAR  1
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
index 88b5acb..62aa1e2 100644 (file)
@@ -90,7 +90,7 @@ static inline void fd_disable_irq(void)
 static inline int fd_request_irq(void)
 {
        return request_irq(FLOPPY_IRQ, floppy_interrupt,
-                          0, "floppy", NULL);
+                          0, "floppy", NULL);
 }
 
 static inline void fd_free_irq(void)
index 1b7408d..16659cd 100644 (file)
@@ -2,7 +2,7 @@
  *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index a3be121..98b4e7c 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ7420/JZ4740 DMA definitions
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
@@ -40,9 +40,9 @@ enum jz4740_dma_width {
 };
 
 enum jz4740_dma_transfer_size {
-       JZ4740_DMA_TRANSFER_SIZE_4BYTE  = 0,
-       JZ4740_DMA_TRANSFER_SIZE_1BYTE  = 1,
-       JZ4740_DMA_TRANSFER_SIZE_2BYTE  = 2,
+       JZ4740_DMA_TRANSFER_SIZE_4BYTE  = 0,
+       JZ4740_DMA_TRANSFER_SIZE_1BYTE  = 1,
+       JZ4740_DMA_TRANSFER_SIZE_2BYTE  = 2,
        JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
        JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
 };
@@ -87,4 +87,4 @@ uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma);
 void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma,
        jz4740_dma_complete_callback_t cb);
 
-#endif  /* __ASM_JZ4740_DMA_H__ */
+#endif /* __ASM_JZ4740_DMA_H__ */
index 1a6482e..eaacba7 100644 (file)
@@ -198,7 +198,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
 #define JZ_GPIO_FUNC_MEM_ADDR14                JZ_GPIO_FUNC1
 #define JZ_GPIO_FUNC_MEM_ADDR15                JZ_GPIO_FUNC1
 #define JZ_GPIO_FUNC_MEM_ADDR16                JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_CLS           JZ_GPIO_FUNC1
+#define JZ_GPIO_FUNC_LCD_CLS           JZ_GPIO_FUNC1
 #define JZ_GPIO_FUNC_LCD_SPL           JZ_GPIO_FUNC1
 #define JZ_GPIO_FUNC_MEM_DCS           JZ_GPIO_FUNC1
 #define JZ_GPIO_FUNC_MEM_RAS           JZ_GPIO_FUNC1
index 5ad1a9c..df50736 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 IRQ definitions
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 163e81d..72cfebd 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform device definitions
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index a7759fb..8750a1d 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform timer support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index fccac35..98d6a2f 100644 (file)
@@ -44,7 +44,7 @@
 
 /* BOOT_SEL - find what boot media we have */
 #define BS_FLASH               0x1
-#define BS_SPI                  0x4
+#define BS_SPI                 0x4
 
 /* global register ranges */
 extern __iomem void *ltq_ebu_membase;
index 5e8a6e9..f196cce 100644 (file)
@@ -34,6 +34,7 @@ extern spinlock_t ebu_lock;
 extern void ltq_disable_irq(struct irq_data *data);
 extern void ltq_mask_and_ack_irq(struct irq_data *data);
 extern void ltq_enable_irq(struct irq_data *data);
+extern int ltq_eiu_get_irq(int exin);
 
 /* clock handling */
 extern int clk_activate(struct clk *clk);
@@ -41,6 +42,7 @@ extern void clk_deactivate(struct clk *clk);
 extern struct clk *clk_get_cpu(void);
 extern struct clk *clk_get_fpi(void);
 extern struct clk *clk_get_io(void);
+extern struct clk *clk_get_ppe(void);
 
 /* find out what bootsource we have */
 extern unsigned char ltq_boot_select(void);
index b6c568c..358ca97 100644 (file)
@@ -7,17 +7,17 @@
 #ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H
 #define __ASM_MIPS_MACH_LANTIQ_WAR_H
 
-#define R4600_V1_INDEX_ICACHEOP_WAR     0
-#define R4600_V1_HIT_CACHEOP_WAR        0
-#define R4600_V2_HIT_CACHEOP_WAR        0
-#define R5432_CP0_INTERRUPT_WAR         0
-#define BCM1250_M3_WAR                  0
-#define SIBYTE_1956_WAR                 0
-#define MIPS4K_ICACHE_REFILL_WAR        0
-#define MIPS_CACHE_SYNC_WAR             0
-#define TX49XX_ICACHE_INDEX_INV_WAR     0
-#define ICACHE_REFILLS_WORKAROUND_WAR   0
-#define R10000_LLSC_WAR                 0
-#define MIPS34K_MISSED_ITLB_WAR         0
+#define R4600_V1_INDEX_ICACHEOP_WAR    0
+#define R4600_V1_HIT_CACHEOP_WAR       0
+#define R4600_V2_HIT_CACHEOP_WAR       0
+#define R5432_CP0_INTERRUPT_WAR                0
+#define BCM1250_M3_WAR                 0
+#define SIBYTE_1956_WAR                        0
+#define MIPS4K_ICACHE_REFILL_WAR       0
+#define MIPS_CACHE_SYNC_WAR            0
+#define TX49XX_ICACHE_INDEX_INV_WAR    0
+#define ICACHE_REFILLS_WORKAROUND_WAR  0
+#define R10000_LLSC_WAR                        0
+#define MIPS34K_MISSED_ITLB_WAR                0
 
 #endif
index 872943a..5f8693d 100644 (file)
@@ -21,7 +21,7 @@
 #define LTQ_DESC_SIZE          0x08    /* each descriptor is 64bit */
 #define LTQ_DESC_NUM           0x40    /* 64 descriptors / channel */
 
-#define LTQ_DMA_OWN            BIT(31) /* owner bit */
+#define LTQ_DMA_OWN            BIT(31) /* owner bit */
 #define LTQ_DMA_C              BIT(30) /* complete bit */
 #define LTQ_DMA_SOP            BIT(29) /* start of packet */
 #define LTQ_DMA_EOP            BIT(28) /* end of packet */
@@ -38,7 +38,7 @@ struct ltq_dma_channel {
        int nr;                         /* the channel number */
        int irq;                        /* the mapped irq */
        int desc;                       /* the current descriptor */
-       struct ltq_dma_desc *desc_base; /* the descriptor base */
+       struct ltq_dma_desc *desc_base; /* the descriptor base */
        int phys;                       /* physical addr */
 };
 
index 1a9ad45..c253d3f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  This is a direct copy of the ev96100.h file, with a global
- * search and replace.  The numbers are the same.
+ * search and replace. The numbers are the same.
  *
  *  The reason I'm duplicating this is so that the 64120/96100
  * defines won't be confusing in the source code.
@@ -18,8 +18,8 @@
  *
  *   (Guessing ...)
  */
-#define GT_PCI_MEM_BASE        0x12000000UL
-#define GT_PCI_MEM_SIZE        0x02000000UL
+#define GT_PCI_MEM_BASE 0x12000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
 #define GT_PCI_IO_BASE 0x10000000UL
 #define GT_PCI_IO_SIZE 0x02000000UL
 #define GT_ISA_IO_BASE PCI_IO_BASE
index 1a05d85..75fd8c0 100644 (file)
@@ -8,9 +8,9 @@
  * Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
  *
  * reference: /proc/cpuinfo,
- *     arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
- *     arch/mips/kernel/proc.c(show_cpuinfo),
- *      loongson2f user manual.
+ *     arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ *     arch/mips/kernel/proc.c(show_cpuinfo),
+ *     loongson2f user manual.
  */
 
 #ifndef __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H
@@ -37,7 +37,7 @@
 #define cpu_has_fpu            1
 #define cpu_has_ic_fills_f_dc  0
 #define cpu_has_inclusive_pcaches      1
-#define cpu_has_llsc           1
+#define cpu_has_llsc           1
 #define cpu_has_mcheck         0
 #define cpu_has_mdmx           0
 #define cpu_has_mips16         0
index 2a8e2bb..a0ee0cb 100644 (file)
@@ -5,8 +5,8 @@
  * Author : jlliu <liujl@lemote.com>
  */
 
-#ifndef        _CS5536_H
-#define        _CS5536_H
+#ifndef _CS5536_H
+#define _CS5536_H
 
 #include <linux/types.h>
 
@@ -16,237 +16,237 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
 /*
  * MSR module base
  */
-#define        CS5536_SB_MSR_BASE      (0x00000000)
-#define        CS5536_GLIU_MSR_BASE    (0x10000000)
-#define        CS5536_ILLEGAL_MSR_BASE (0x20000000)
-#define        CS5536_USB_MSR_BASE     (0x40000000)
-#define        CS5536_IDE_MSR_BASE     (0x60000000)
-#define        CS5536_DIVIL_MSR_BASE   (0x80000000)
-#define        CS5536_ACC_MSR_BASE     (0xa0000000)
-#define        CS5536_UNUSED_MSR_BASE  (0xc0000000)
-#define        CS5536_GLCP_MSR_BASE    (0xe0000000)
+#define CS5536_SB_MSR_BASE     (0x00000000)
+#define CS5536_GLIU_MSR_BASE   (0x10000000)
+#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
+#define CS5536_USB_MSR_BASE    (0x40000000)
+#define CS5536_IDE_MSR_BASE    (0x60000000)
+#define CS5536_DIVIL_MSR_BASE  (0x80000000)
+#define CS5536_ACC_MSR_BASE    (0xa0000000)
+#define CS5536_UNUSED_MSR_BASE (0xc0000000)
+#define CS5536_GLCP_MSR_BASE   (0xe0000000)
 
-#define        SB_MSR_REG(offset)      (CS5536_SB_MSR_BASE     | (offset))
-#define        GLIU_MSR_REG(offset)    (CS5536_GLIU_MSR_BASE   | (offset))
-#define        ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
-#define        USB_MSR_REG(offset)     (CS5536_USB_MSR_BASE    | (offset))
-#define        IDE_MSR_REG(offset)     (CS5536_IDE_MSR_BASE    | (offset))
-#define        DIVIL_MSR_REG(offset)   (CS5536_DIVIL_MSR_BASE  | (offset))
-#define        ACC_MSR_REG(offset)     (CS5536_ACC_MSR_BASE    | (offset))
-#define        UNUSED_MSR_REG(offset)  (CS5536_UNUSED_MSR_BASE | (offset))
-#define        GLCP_MSR_REG(offset)    (CS5536_GLCP_MSR_BASE   | (offset))
+#define SB_MSR_REG(offset)     (CS5536_SB_MSR_BASE     | (offset))
+#define GLIU_MSR_REG(offset)   (CS5536_GLIU_MSR_BASE   | (offset))
+#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
+#define USB_MSR_REG(offset)    (CS5536_USB_MSR_BASE    | (offset))
+#define IDE_MSR_REG(offset)    (CS5536_IDE_MSR_BASE    | (offset))
+#define DIVIL_MSR_REG(offset)  (CS5536_DIVIL_MSR_BASE  | (offset))
+#define ACC_MSR_REG(offset)    (CS5536_ACC_MSR_BASE    | (offset))
+#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
+#define GLCP_MSR_REG(offset)   (CS5536_GLCP_MSR_BASE   | (offset))
 
 /*
  * BAR SPACE OF VIRTUAL PCI :
  * range for pci probe use, length is the actual size.
  */
 /* IO space for all DIVIL modules */
-#define        CS5536_IRQ_RANGE        0xffffffe0 /* USERD FOR PCI PROBE */
-#define        CS5536_IRQ_LENGTH       0x20    /* THE REGS ACTUAL LENGTH */
-#define        CS5536_SMB_RANGE        0xfffffff8
-#define        CS5536_SMB_LENGTH       0x08
-#define        CS5536_GPIO_RANGE       0xffffff00
-#define        CS5536_GPIO_LENGTH      0x100
-#define        CS5536_MFGPT_RANGE      0xffffffc0
-#define        CS5536_MFGPT_LENGTH     0x40
-#define        CS5536_ACPI_RANGE       0xffffffe0
-#define        CS5536_ACPI_LENGTH      0x20
-#define        CS5536_PMS_RANGE        0xffffff80
-#define        CS5536_PMS_LENGTH       0x80
+#define CS5536_IRQ_RANGE       0xffffffe0 /* USERD FOR PCI PROBE */
+#define CS5536_IRQ_LENGTH      0x20    /* THE REGS ACTUAL LENGTH */
+#define CS5536_SMB_RANGE       0xfffffff8
+#define CS5536_SMB_LENGTH      0x08
+#define CS5536_GPIO_RANGE      0xffffff00
+#define CS5536_GPIO_LENGTH     0x100
+#define CS5536_MFGPT_RANGE     0xffffffc0
+#define CS5536_MFGPT_LENGTH    0x40
+#define CS5536_ACPI_RANGE      0xffffffe0
+#define CS5536_ACPI_LENGTH     0x20
+#define CS5536_PMS_RANGE       0xffffff80
+#define CS5536_PMS_LENGTH      0x80
 /* IO space for IDE */
-#define        CS5536_IDE_RANGE        0xfffffff0
-#define        CS5536_IDE_LENGTH       0x10
+#define CS5536_IDE_RANGE       0xfffffff0
+#define CS5536_IDE_LENGTH      0x10
 /* IO space for ACC */
-#define        CS5536_ACC_RANGE        0xffffff80
-#define        CS5536_ACC_LENGTH       0x80
+#define CS5536_ACC_RANGE       0xffffff80
+#define CS5536_ACC_LENGTH      0x80
 /* MEM space for ALL USB modules */
-#define        CS5536_OHCI_RANGE       0xfffff000
-#define        CS5536_OHCI_LENGTH      0x1000
-#define        CS5536_EHCI_RANGE       0xfffff000
-#define        CS5536_EHCI_LENGTH      0x1000
+#define CS5536_OHCI_RANGE      0xfffff000
+#define CS5536_OHCI_LENGTH     0x1000
+#define CS5536_EHCI_RANGE      0xfffff000
+#define CS5536_EHCI_LENGTH     0x1000
 
 /*
  * PCI MSR ACCESS
  */
-#define        PCI_MSR_CTRL            0xF0
-#define        PCI_MSR_ADDR            0xF4
-#define        PCI_MSR_DATA_LO         0xF8
-#define        PCI_MSR_DATA_HI         0xFC
+#define PCI_MSR_CTRL           0xF0
+#define PCI_MSR_ADDR           0xF4
+#define PCI_MSR_DATA_LO                0xF8
+#define PCI_MSR_DATA_HI                0xFC
 
 /**************** MSR *****************************/
 
 /*
  * GLIU STANDARD MSR
  */
-#define        GLIU_CAP                0x00
-#define        GLIU_CONFIG             0x01
-#define        GLIU_SMI                0x02
-#define        GLIU_ERROR              0x03
-#define        GLIU_PM                 0x04
-#define        GLIU_DIAG               0x05
+#define GLIU_CAP               0x00
+#define GLIU_CONFIG            0x01
+#define GLIU_SMI               0x02
+#define GLIU_ERROR             0x03
+#define GLIU_PM                        0x04
+#define GLIU_DIAG              0x05
 
 /*
  * GLIU SPEC. MSR
  */
-#define        GLIU_P2D_BM0            0x20
-#define        GLIU_P2D_BM1            0x21
-#define        GLIU_P2D_BM2            0x22
-#define        GLIU_P2D_BMK0           0x23
-#define        GLIU_P2D_BMK1           0x24
-#define        GLIU_P2D_BM3            0x25
-#define        GLIU_P2D_BM4            0x26
-#define        GLIU_COH                0x80
-#define        GLIU_PAE                0x81
-#define        GLIU_ARB                0x82
-#define        GLIU_ASMI               0x83
-#define        GLIU_AERR               0x84
-#define        GLIU_DEBUG              0x85
-#define        GLIU_PHY_CAP            0x86
-#define        GLIU_NOUT_RESP          0x87
-#define        GLIU_NOUT_WDATA         0x88
-#define        GLIU_WHOAMI             0x8B
-#define        GLIU_SLV_DIS            0x8C
-#define        GLIU_IOD_BM0            0xE0
-#define        GLIU_IOD_BM1            0xE1
-#define        GLIU_IOD_BM2            0xE2
-#define        GLIU_IOD_BM3            0xE3
-#define        GLIU_IOD_BM4            0xE4
-#define        GLIU_IOD_BM5            0xE5
-#define        GLIU_IOD_BM6            0xE6
-#define        GLIU_IOD_BM7            0xE7
-#define        GLIU_IOD_BM8            0xE8
-#define        GLIU_IOD_BM9            0xE9
-#define        GLIU_IOD_SC0            0xEA
-#define        GLIU_IOD_SC1            0xEB
-#define        GLIU_IOD_SC2            0xEC
-#define        GLIU_IOD_SC3            0xED
-#define        GLIU_IOD_SC4            0xEE
-#define        GLIU_IOD_SC5            0xEF
-#define        GLIU_IOD_SC6            0xF0
-#define        GLIU_IOD_SC7            0xF1
+#define GLIU_P2D_BM0           0x20
+#define GLIU_P2D_BM1           0x21
+#define GLIU_P2D_BM2           0x22
+#define GLIU_P2D_BMK0          0x23
+#define GLIU_P2D_BMK1          0x24
+#define GLIU_P2D_BM3           0x25
+#define GLIU_P2D_BM4           0x26
+#define GLIU_COH               0x80
+#define GLIU_PAE               0x81
+#define GLIU_ARB               0x82
+#define GLIU_ASMI              0x83
+#define GLIU_AERR              0x84
+#define GLIU_DEBUG             0x85
+#define GLIU_PHY_CAP           0x86
+#define GLIU_NOUT_RESP         0x87
+#define GLIU_NOUT_WDATA                0x88
+#define GLIU_WHOAMI            0x8B
+#define GLIU_SLV_DIS           0x8C
+#define GLIU_IOD_BM0           0xE0
+#define GLIU_IOD_BM1           0xE1
+#define GLIU_IOD_BM2           0xE2
+#define GLIU_IOD_BM3           0xE3
+#define GLIU_IOD_BM4           0xE4
+#define GLIU_IOD_BM5           0xE5
+#define GLIU_IOD_BM6           0xE6
+#define GLIU_IOD_BM7           0xE7
+#define GLIU_IOD_BM8           0xE8
+#define GLIU_IOD_BM9           0xE9
+#define GLIU_IOD_SC0           0xEA
+#define GLIU_IOD_SC1           0xEB
+#define GLIU_IOD_SC2           0xEC
+#define GLIU_IOD_SC3           0xED
+#define GLIU_IOD_SC4           0xEE
+#define GLIU_IOD_SC5           0xEF
+#define GLIU_IOD_SC6           0xF0
+#define GLIU_IOD_SC7           0xF1
 
 /*
  * SB STANDARD
  */
-#define        SB_CAP          0x00
-#define        SB_CONFIG       0x01
-#define        SB_SMI          0x02
-#define        SB_ERROR        0x03
-#define        SB_MAR_ERR_EN           0x00000001
-#define        SB_TAR_ERR_EN           0x00000002
-#define        SB_RSVD_BIT1            0x00000004
-#define        SB_EXCEP_ERR_EN         0x00000008
-#define        SB_SYSE_ERR_EN          0x00000010
-#define        SB_PARE_ERR_EN          0x00000020
-#define        SB_TAS_ERR_EN           0x00000040
-#define        SB_MAR_ERR_FLAG         0x00010000
-#define        SB_TAR_ERR_FLAG         0x00020000
-#define        SB_RSVD_BIT2            0x00040000
-#define        SB_EXCEP_ERR_FLAG       0x00080000
-#define        SB_SYSE_ERR_FLAG        0x00100000
-#define        SB_PARE_ERR_FLAG        0x00200000
-#define        SB_TAS_ERR_FLAG         0x00400000
-#define        SB_PM           0x04
-#define        SB_DIAG         0x05
+#define SB_CAP         0x00
+#define SB_CONFIG      0x01
+#define SB_SMI         0x02
+#define SB_ERROR       0x03
+#define SB_MAR_ERR_EN          0x00000001
+#define SB_TAR_ERR_EN          0x00000002
+#define SB_RSVD_BIT1           0x00000004
+#define SB_EXCEP_ERR_EN                0x00000008
+#define SB_SYSE_ERR_EN         0x00000010
+#define SB_PARE_ERR_EN         0x00000020
+#define SB_TAS_ERR_EN          0x00000040
+#define SB_MAR_ERR_FLAG                0x00010000
+#define SB_TAR_ERR_FLAG                0x00020000
+#define SB_RSVD_BIT2           0x00040000
+#define SB_EXCEP_ERR_FLAG      0x00080000
+#define SB_SYSE_ERR_FLAG       0x00100000
+#define SB_PARE_ERR_FLAG       0x00200000
+#define SB_TAS_ERR_FLAG                0x00400000
+#define SB_PM          0x04
+#define SB_DIAG                0x05
 
 /*
  * SB SPEC.
  */
-#define        SB_CTRL         0x10
-#define        SB_R0           0x20
-#define        SB_R1           0x21
-#define        SB_R2           0x22
-#define        SB_R3           0x23
-#define        SB_R4           0x24
-#define        SB_R5           0x25
-#define        SB_R6           0x26
-#define        SB_R7           0x27
-#define        SB_R8           0x28
-#define        SB_R9           0x29
-#define        SB_R10          0x2A
-#define        SB_R11          0x2B
-#define        SB_R12          0x2C
-#define        SB_R13          0x2D
-#define        SB_R14          0x2E
-#define        SB_R15          0x2F
+#define SB_CTRL                0x10
+#define SB_R0          0x20
+#define SB_R1          0x21
+#define SB_R2          0x22
+#define SB_R3          0x23
+#define SB_R4          0x24
+#define SB_R5          0x25
+#define SB_R6          0x26
+#define SB_R7          0x27
+#define SB_R8          0x28
+#define SB_R9          0x29
+#define SB_R10         0x2A
+#define SB_R11         0x2B
+#define SB_R12         0x2C
+#define SB_R13         0x2D
+#define SB_R14         0x2E
+#define SB_R15         0x2F
 
 /*
  * GLCP STANDARD
  */
-#define        GLCP_CAP                0x00
-#define        GLCP_CONFIG             0x01
-#define        GLCP_SMI                0x02
-#define        GLCP_ERROR              0x03
-#define        GLCP_PM                 0x04
-#define        GLCP_DIAG               0x05
+#define GLCP_CAP               0x00
+#define GLCP_CONFIG            0x01
+#define GLCP_SMI               0x02
+#define GLCP_ERROR             0x03
+#define GLCP_PM                        0x04
+#define GLCP_DIAG              0x05
 
 /*
  * GLCP SPEC.
  */
-#define        GLCP_CLK_DIS_DELAY      0x08
-#define        GLCP_PM_CLK_DISABLE     0x09
-#define        GLCP_GLB_PM             0x0B
-#define        GLCP_DBG_OUT            0x0C
-#define        GLCP_RSVD1              0x0D
-#define        GLCP_SOFT_COM           0x0E
-#define        SOFT_BAR_SMB_FLAG       0x00000001
-#define        SOFT_BAR_GPIO_FLAG      0x00000002
-#define        SOFT_BAR_MFGPT_FLAG     0x00000004
-#define        SOFT_BAR_IRQ_FLAG       0x00000008
-#define        SOFT_BAR_PMS_FLAG       0x00000010
-#define        SOFT_BAR_ACPI_FLAG      0x00000020
-#define        SOFT_BAR_IDE_FLAG       0x00000400
-#define        SOFT_BAR_ACC_FLAG       0x00000800
-#define        SOFT_BAR_OHCI_FLAG      0x00001000
-#define        SOFT_BAR_EHCI_FLAG      0x00002000
-#define        GLCP_RSVD2              0x0F
-#define        GLCP_CLK_OFF            0x10
-#define        GLCP_CLK_ACTIVE         0x11
-#define        GLCP_CLK_DISABLE        0x12
-#define        GLCP_CLK4ACK            0x13
-#define        GLCP_SYS_RST            0x14
-#define        GLCP_RSVD3              0x15
-#define        GLCP_DBG_CLK_CTRL       0x16
-#define        GLCP_CHIP_REV_ID        0x17
+#define GLCP_CLK_DIS_DELAY     0x08
+#define GLCP_PM_CLK_DISABLE    0x09
+#define GLCP_GLB_PM            0x0B
+#define GLCP_DBG_OUT           0x0C
+#define GLCP_RSVD1             0x0D
+#define GLCP_SOFT_COM          0x0E
+#define SOFT_BAR_SMB_FLAG      0x00000001
+#define SOFT_BAR_GPIO_FLAG     0x00000002
+#define SOFT_BAR_MFGPT_FLAG    0x00000004
+#define SOFT_BAR_IRQ_FLAG      0x00000008
+#define SOFT_BAR_PMS_FLAG      0x00000010
+#define SOFT_BAR_ACPI_FLAG     0x00000020
+#define SOFT_BAR_IDE_FLAG      0x00000400
+#define SOFT_BAR_ACC_FLAG      0x00000800
+#define SOFT_BAR_OHCI_FLAG     0x00001000
+#define SOFT_BAR_EHCI_FLAG     0x00002000
+#define GLCP_RSVD2             0x0F
+#define GLCP_CLK_OFF           0x10
+#define GLCP_CLK_ACTIVE                0x11
+#define GLCP_CLK_DISABLE       0x12
+#define GLCP_CLK4ACK           0x13
+#define GLCP_SYS_RST           0x14
+#define GLCP_RSVD3             0x15
+#define GLCP_DBG_CLK_CTRL      0x16
+#define GLCP_CHIP_REV_ID       0x17
 
 /* PIC */
-#define        PIC_YSEL_LOW            0x20
-#define        PIC_YSEL_LOW_USB_SHIFT          8
-#define        PIC_YSEL_LOW_ACC_SHIFT          16
-#define        PIC_YSEL_LOW_FLASH_SHIFT        24
-#define        PIC_YSEL_HIGH           0x21
-#define        PIC_ZSEL_LOW            0x22
-#define        PIC_ZSEL_HIGH           0x23
-#define        PIC_IRQM_PRIM           0x24
-#define        PIC_IRQM_LPC            0x25
-#define        PIC_XIRR_STS_LOW        0x26
-#define        PIC_XIRR_STS_HIGH       0x27
-#define        PCI_SHDW                0x34
+#define PIC_YSEL_LOW           0x20
+#define PIC_YSEL_LOW_USB_SHIFT         8
+#define PIC_YSEL_LOW_ACC_SHIFT         16
+#define PIC_YSEL_LOW_FLASH_SHIFT       24
+#define PIC_YSEL_HIGH          0x21
+#define PIC_ZSEL_LOW           0x22
+#define PIC_ZSEL_HIGH          0x23
+#define PIC_IRQM_PRIM          0x24
+#define PIC_IRQM_LPC           0x25
+#define PIC_XIRR_STS_LOW       0x26
+#define PIC_XIRR_STS_HIGH      0x27
+#define PCI_SHDW               0x34
 
 /*
  * DIVIL STANDARD
  */
-#define        DIVIL_CAP               0x00
-#define        DIVIL_CONFIG            0x01
-#define        DIVIL_SMI               0x02
-#define        DIVIL_ERROR             0x03
-#define        DIVIL_PM                0x04
-#define        DIVIL_DIAG              0x05
+#define DIVIL_CAP              0x00
+#define DIVIL_CONFIG           0x01
+#define DIVIL_SMI              0x02
+#define DIVIL_ERROR            0x03
+#define DIVIL_PM               0x04
+#define DIVIL_DIAG             0x05
 
 /*
  * DIVIL SPEC.
  */
-#define        DIVIL_LBAR_IRQ          0x08
-#define        DIVIL_LBAR_KEL          0x09
-#define        DIVIL_LBAR_SMB          0x0B
-#define        DIVIL_LBAR_GPIO         0x0C
-#define        DIVIL_LBAR_MFGPT        0x0D
-#define        DIVIL_LBAR_ACPI         0x0E
-#define        DIVIL_LBAR_PMS          0x0F
-#define        DIVIL_LEG_IO            0x14
-#define        DIVIL_BALL_OPTS         0x15
-#define        DIVIL_SOFT_IRQ          0x16
-#define        DIVIL_SOFT_RESET        0x17
+#define DIVIL_LBAR_IRQ         0x08
+#define DIVIL_LBAR_KEL         0x09
+#define DIVIL_LBAR_SMB         0x0B
+#define DIVIL_LBAR_GPIO                0x0C
+#define DIVIL_LBAR_MFGPT       0x0D
+#define DIVIL_LBAR_ACPI                0x0E
+#define DIVIL_LBAR_PMS         0x0F
+#define DIVIL_LEG_IO           0x14
+#define DIVIL_BALL_OPTS                0x15
+#define DIVIL_SOFT_IRQ         0x16
+#define DIVIL_SOFT_RESET       0x17
 
 /* MFGPT */
 #define MFGPT_IRQ      0x28
@@ -254,52 +254,52 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
 /*
  * IDE STANDARD
  */
-#define        IDE_CAP         0x00
-#define        IDE_CONFIG      0x01
-#define        IDE_SMI         0x02
-#define        IDE_ERROR       0x03
-#define        IDE_PM          0x04
-#define        IDE_DIAG        0x05
+#define IDE_CAP                0x00
+#define IDE_CONFIG     0x01
+#define IDE_SMI                0x02
+#define IDE_ERROR      0x03
+#define IDE_PM         0x04
+#define IDE_DIAG       0x05
 
 /*
  * IDE SPEC.
  */
-#define        IDE_IO_BAR      0x08
-#define        IDE_CFG         0x10
-#define        IDE_DTC         0x12
-#define        IDE_CAST        0x13
-#define        IDE_ETC         0x14
-#define        IDE_INTERNAL_PM 0x15
+#define IDE_IO_BAR     0x08
+#define IDE_CFG                0x10
+#define IDE_DTC                0x12
+#define IDE_CAST       0x13
+#define IDE_ETC                0x14
+#define IDE_INTERNAL_PM 0x15
 
 /*
  * ACC STANDARD
  */
-#define        ACC_CAP         0x00
-#define        ACC_CONFIG      0x01
-#define        ACC_SMI         0x02
-#define        ACC_ERROR       0x03
-#define        ACC_PM          0x04
-#define        ACC_DIAG        0x05
+#define ACC_CAP                0x00
+#define ACC_CONFIG     0x01
+#define ACC_SMI                0x02
+#define ACC_ERROR      0x03
+#define ACC_PM         0x04
+#define ACC_DIAG       0x05
 
 /*
  * USB STANDARD
  */
-#define        USB_CAP         0x00
-#define        USB_CONFIG      0x01
-#define        USB_SMI         0x02
-#define        USB_ERROR       0x03
-#define        USB_PM          0x04
-#define        USB_DIAG        0x05
+#define USB_CAP                0x00
+#define USB_CONFIG     0x01
+#define USB_SMI                0x02
+#define USB_ERROR      0x03
+#define USB_PM         0x04
+#define USB_DIAG       0x05
 
 /*
  * USB SPEC.
  */
-#define        USB_OHCI        0x08
-#define        USB_EHCI        0x09
+#define USB_OHCI       0x08
+#define USB_EHCI       0x09
 
 /****************** NATIVE ***************************/
 /* GPIO : I/O SPACE; REG : 32BITS */
-#define        GPIOL_OUT_VAL           0x00
-#define        GPIOL_OUT_EN            0x04
+#define GPIOL_OUT_VAL          0x00
+#define GPIOL_OUT_EN           0x04
 
 #endif                         /* _CS5536_H */
index 4b493d6..021d017 100644 (file)
@@ -25,7 +25,7 @@ static inline void __maybe_unused enable_mfgpt0_counter(void)
 #endif
 
 #define MFGPT_TICK_RATE 14318000
-#define COMPARE  ((MFGPT_TICK_RATE + HZ/2) / HZ)
+#define COMPARE         ((MFGPT_TICK_RATE + HZ/2) / HZ)
 
 #define MFGPT_BASE     mfgpt_base
 #define MFGPT0_CMP2    (MFGPT_BASE + 2)
index 0dca9c8..8a7ecb4 100644 (file)
@@ -8,8 +8,8 @@
  * Author : jlliu, liujl@lemote.com
  */
 
-#ifndef        _CS5536_PCI_H
-#define        _CS5536_PCI_H
+#ifndef _CS5536_PCI_H
+#define _CS5536_PCI_H
 
 #include <linux/types.h>
 #include <linux/pci_regs.h>
 extern void cs5536_pci_conf_write4(int function, int reg, u32 value);
 extern u32 cs5536_pci_conf_read4(int function, int reg);
 
-#define        CS5536_ACC_INTR         9
-#define        CS5536_IDE_INTR         14
-#define        CS5536_USB_INTR         11
-#define        CS5536_MFGPT_INTR       5
-#define        CS5536_UART1_INTR       4
-#define        CS5536_UART2_INTR       3
+#define CS5536_ACC_INTR                9
+#define CS5536_IDE_INTR                14
+#define CS5536_USB_INTR                11
+#define CS5536_MFGPT_INTR      5
+#define CS5536_UART1_INTR      4
+#define CS5536_UART2_INTR      3
 
 /************** PCI BUS DEVICE FUNCTION ***************/
 
 /*
  * PCI bus device function
  */
-#define        PCI_BUS_CS5536          0
-#define        PCI_IDSEL_CS5536        14
+#define PCI_BUS_CS5536         0
+#define PCI_IDSEL_CS5536       14
 
 /********** STANDARD PCI-2.2 EXPANSION ****************/
 
@@ -45,21 +45,21 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
        (((mod_dev_id) << 16) | (sys_vendor_id))
 
 /* VENDOR ID */
-#define        CS5536_VENDOR_ID        0x1022
+#define CS5536_VENDOR_ID       0x1022
 
 /* DEVICE ID */
-#define        CS5536_ISA_DEVICE_ID            0x2090
-#define        CS5536_IDE_DEVICE_ID            0x209a
-#define        CS5536_ACC_DEVICE_ID            0x2093
-#define        CS5536_OHCI_DEVICE_ID           0x2094
-#define        CS5536_EHCI_DEVICE_ID           0x2095
+#define CS5536_ISA_DEVICE_ID           0x2090
+#define CS5536_IDE_DEVICE_ID           0x209a
+#define CS5536_ACC_DEVICE_ID           0x2093
+#define CS5536_OHCI_DEVICE_ID          0x2094
+#define CS5536_EHCI_DEVICE_ID          0x2095
 
 /* CLASS CODE : CLASS SUB-CLASS INTERFACE */
-#define        CS5536_ISA_CLASS_CODE           0x060100
+#define CS5536_ISA_CLASS_CODE          0x060100
 #define CS5536_IDE_CLASS_CODE          0x010180
-#define        CS5536_ACC_CLASS_CODE           0x040100
-#define        CS5536_OHCI_CLASS_CODE          0x0C0310
-#define        CS5536_EHCI_CLASS_CODE          0x0C0320
+#define CS5536_ACC_CLASS_CODE          0x040100
+#define CS5536_OHCI_CLASS_CODE         0x0C0310
+#define CS5536_EHCI_CLASS_CODE         0x0C0320
 
 /* BHLC : BIST HEADER-TYPE LATENCY-TIMER CACHE-LINE-SIZE */
 
@@ -67,40 +67,40 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
        ((PCI_NONE_BIST << 24) | ((header_type) << 16) \
                | ((latency_timer) << 8) | PCI_NORMAL_CACHE_LINE_SIZE);
 
-#define        PCI_NONE_BIST                   0x00    /* RO not implemented yet. */
-#define        PCI_BRIDGE_HEADER_TYPE          0x80    /* RO */
-#define        PCI_NORMAL_HEADER_TYPE          0x00
-#define        PCI_NORMAL_LATENCY_TIMER        0x00
-#define        PCI_NORMAL_CACHE_LINE_SIZE      0x08    /* RW */
+#define PCI_NONE_BIST                  0x00    /* RO not implemented yet. */
+#define PCI_BRIDGE_HEADER_TYPE         0x80    /* RO */
+#define PCI_NORMAL_HEADER_TYPE         0x00
+#define PCI_NORMAL_LATENCY_TIMER       0x00
+#define PCI_NORMAL_CACHE_LINE_SIZE     0x08    /* RW */
 
 /* BAR */
-#define        PCI_BAR0_REG                    0x10
-#define        PCI_BAR1_REG                    0x14
-#define        PCI_BAR2_REG                    0x18
-#define        PCI_BAR3_REG                    0x1c
-#define        PCI_BAR4_REG                    0x20
-#define        PCI_BAR5_REG                    0x24
-#define        PCI_BAR_COUNT                   6
-#define        PCI_BAR_RANGE_MASK              0xFFFFFFFF
+#define PCI_BAR0_REG                   0x10
+#define PCI_BAR1_REG                   0x14
+#define PCI_BAR2_REG                   0x18
+#define PCI_BAR3_REG                   0x1c
+#define PCI_BAR4_REG                   0x20
+#define PCI_BAR5_REG                   0x24
+#define PCI_BAR_COUNT                  6
+#define PCI_BAR_RANGE_MASK             0xFFFFFFFF
 
 /* CARDBUS CIS POINTER */
-#define        PCI_CARDBUS_CIS_POINTER         0x00000000
+#define PCI_CARDBUS_CIS_POINTER                0x00000000
 
-/* SUBSYSTEM VENDOR ID  */
-#define        CS5536_SUB_VENDOR_ID            CS5536_VENDOR_ID
+/* SUBSYSTEM VENDOR ID */
+#define CS5536_SUB_VENDOR_ID           CS5536_VENDOR_ID
 
 /* SUBSYSTEM ID */
-#define        CS5536_ISA_SUB_ID               CS5536_ISA_DEVICE_ID
-#define        CS5536_IDE_SUB_ID               CS5536_IDE_DEVICE_ID
-#define        CS5536_ACC_SUB_ID               CS5536_ACC_DEVICE_ID
-#define        CS5536_OHCI_SUB_ID              CS5536_OHCI_DEVICE_ID
-#define        CS5536_EHCI_SUB_ID              CS5536_EHCI_DEVICE_ID
+#define CS5536_ISA_SUB_ID              CS5536_ISA_DEVICE_ID
+#define CS5536_IDE_SUB_ID              CS5536_IDE_DEVICE_ID
+#define CS5536_ACC_SUB_ID              CS5536_ACC_DEVICE_ID
+#define CS5536_OHCI_SUB_ID             CS5536_OHCI_DEVICE_ID
+#define CS5536_EHCI_SUB_ID             CS5536_EHCI_DEVICE_ID
 
 /* EXPANSION ROM BAR */
-#define        PCI_EXPANSION_ROM_BAR           0x00000000
+#define PCI_EXPANSION_ROM_BAR          0x00000000
 
 /* CAPABILITIES POINTER */
-#define        PCI_CAPLIST_POINTER             0x00000000
+#define PCI_CAPLIST_POINTER            0x00000000
 #define PCI_CAPLIST_USB_POINTER                0x40
 /* INTERRUPT */
 
@@ -108,46 +108,46 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
        ((PCI_MAX_LATENCY << 24) | (PCI_MIN_GRANT << 16) | \
                ((pin) << 8) | (mod_intr))
 
-#define        PCI_MAX_LATENCY                 0x40
-#define        PCI_MIN_GRANT                   0x00
-#define        PCI_DEFAULT_PIN                 0x01
+#define PCI_MAX_LATENCY                        0x40
+#define PCI_MIN_GRANT                  0x00
+#define PCI_DEFAULT_PIN                        0x01
 
 /*********** EXPANSION PCI REG ************************/
 
 /*
  * ISA EXPANSION
  */
-#define        PCI_UART1_INT_REG       0x50
+#define PCI_UART1_INT_REG      0x50
 #define PCI_UART2_INT_REG      0x54
-#define        PCI_ISA_FIXUP_REG       0x58
+#define PCI_ISA_FIXUP_REG      0x58
 
 /*
  * IDE EXPANSION
  */
-#define        PCI_IDE_CFG_REG         0x40
-#define        CS5536_IDE_FLASH_SIGNATURE      0xDEADBEEF
-#define        PCI_IDE_DTC_REG         0x48
-#define        PCI_IDE_CAST_REG        0x4C
-#define        PCI_IDE_ETC_REG         0x50
-#define        PCI_IDE_PM_REG          0x54
-#define        PCI_IDE_INT_REG         0x60
+#define PCI_IDE_CFG_REG                0x40
+#define CS5536_IDE_FLASH_SIGNATURE     0xDEADBEEF
+#define PCI_IDE_DTC_REG                0x48
+#define PCI_IDE_CAST_REG       0x4C
+#define PCI_IDE_ETC_REG                0x50
+#define PCI_IDE_PM_REG         0x54
+#define PCI_IDE_INT_REG                0x60
 
 /*
  * ACC EXPANSION
  */
-#define        PCI_ACC_INT_REG         0x50
+#define PCI_ACC_INT_REG                0x50
 
 /*
  * OHCI EXPANSION : INTTERUPT IS IMPLEMENTED BY THE OHCI
  */
-#define        PCI_OHCI_PM_REG         0x40
-#define        PCI_OHCI_INT_REG        0x50
+#define PCI_OHCI_PM_REG                0x40
+#define PCI_OHCI_INT_REG       0x50
 
 /*
  * EHCI EXPANSION
  */
-#define        PCI_EHCI_LEGSMIEN_REG   0x50
-#define        PCI_EHCI_LEGSMISTS_REG  0x54
-#define        PCI_EHCI_FLADJ_REG      0x60
+#define PCI_EHCI_LEGSMIEN_REG  0x50
+#define PCI_EHCI_LEGSMISTS_REG 0x54
+#define PCI_EHCI_FLADJ_REG     0x60
 
 #endif                         /* _CS5536_PCI_H_ */
index 21c4ece..1f17c18 100644 (file)
@@ -5,8 +5,8 @@
  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  */
 
-#ifndef        _CS5536_VSM_H
-#define        _CS5536_VSM_H
+#ifndef _CS5536_VSM_H
+#define _CS5536_VSM_H
 
 #include <linux/types.h>
 
index e30e73d..211a7b7 100644 (file)
@@ -10,8 +10,8 @@
  * (at your option) any later version.
  */
 
-#ifndef        __STLS2F_GPIO_H
-#define        __STLS2F_GPIO_H
+#ifndef __STLS2F_GPIO_H
+#define __STLS2F_GPIO_H
 
 #include <asm-generic/gpio.h>
 
index 5222a00..b286534 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -52,7 +52,7 @@ extern void mach_irq_dispatch(unsigned int pending);
 extern int mach_i8259_irq(void);
 
 /* We need this in some places... */
-#define delay()        ({              \
+#define delay() ({             \
        int x;                          \
        for (x = 0; x < 100000; x++)    \
                __asm__ __volatile__(""); \
@@ -82,13 +82,13 @@ static inline void do_perfcnt_IRQ(void)
 
 #define LOONGSON_BOOT_BASE     0x1fc00000
 #define LOONGSON_BOOT_SIZE     0x00100000      /* 1M */
-#define LOONGSON_BOOT_TOP      (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
-#define LOONGSON_REG_BASE      0x1fe00000
-#define LOONGSON_REG_SIZE      0x00100000      /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_BOOT_TOP      (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE      0x1fe00000
+#define LOONGSON_REG_SIZE      0x00100000      /* 256Bytes + 256Bytes + ??? */
 #define LOONGSON_REG_TOP       (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
 
-#define LOONGSON_LIO1_BASE     0x1ff00000
-#define LOONGSON_LIO1_SIZE     0x00100000      /* 1M */
+#define LOONGSON_LIO1_BASE     0x1ff00000
+#define LOONGSON_LIO1_SIZE     0x00100000      /* 1M */
 #define LOONGSON_LIO1_TOP      (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
 
 #define LOONGSON_PCILO0_BASE   0x10000000
@@ -115,13 +115,13 @@ static inline void do_perfcnt_IRQ(void)
 #define LOONGSON_PCI_REG(x)    LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
 #define LOONGSON_PCIDID                LOONGSON_PCI_REG(0x00)
 #define LOONGSON_PCICMD                LOONGSON_PCI_REG(0x04)
-#define LOONGSON_PCICLASS      LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCICLASS      LOONGSON_PCI_REG(0x08)
 #define LOONGSON_PCILTIMER     LOONGSON_PCI_REG(0x0c)
-#define LOONGSON_PCIBASE0      LOONGSON_PCI_REG(0x10)
-#define LOONGSON_PCIBASE1      LOONGSON_PCI_REG(0x14)
-#define LOONGSON_PCIBASE2      LOONGSON_PCI_REG(0x18)
-#define LOONGSON_PCIBASE3      LOONGSON_PCI_REG(0x1c)
-#define LOONGSON_PCIBASE4      LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIBASE0      LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1      LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2      LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3      LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4      LOONGSON_PCI_REG(0x20)
 #define LOONGSON_PCIEXPRBASE   LOONGSON_PCI_REG(0x30)
 #define LOONGSON_PCIINT                LOONGSON_PCI_REG(0x3c)
 
@@ -132,7 +132,7 @@ static inline void do_perfcnt_IRQ(void)
 #define LOONGSON_PCICMD_MABORT_CLR     0x20000000
 #define LOONGSON_PCICMD_MTABORT_CLR    0x10000000
 #define LOONGSON_PCICMD_TABORT_CLR     0x08000000
-#define LOONGSON_PCICMD_MPERR_CLR      0x01000000
+#define LOONGSON_PCICMD_MPERR_CLR      0x01000000
 #define LOONGSON_PCICMD_PERRRESPEN     0x00000040
 #define LOONGSON_PCICMD_ASTEPEN                0x00000080
 #define LOONGSON_PCICMD_SERREN         0x00000100
@@ -142,7 +142,7 @@ static inline void do_perfcnt_IRQ(void)
 /* Loongson h/w Configuration */
 
 #define LOONGSON_GENCFG_OFFSET         0x4
-#define LOONGSON_GENCFG        LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
 
 #define LOONGSON_GENCFG_DEBUGMODE      0x00000001
 #define LOONGSON_GENCFG_SNOOPEN                0x00000002
@@ -173,25 +173,25 @@ static inline void do_perfcnt_IRQ(void)
 
 /* GPIO Regs - r/w */
 
-#define LOONGSON_GPIODATA              LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIODATA              LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
 #define LOONGSON_GPIOIE                        LOONGSON_REG(LOONGSON_REGBASE + 0x20)
 
 /* ICU Configuration Regs - r/w */
 
 #define LOONGSON_INTEDGE               LOONGSON_REG(LOONGSON_REGBASE + 0x24)
-#define LOONGSON_INTSTEER              LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTSTEER              LOONGSON_REG(LOONGSON_REGBASE + 0x28)
 #define LOONGSON_INTPOL                        LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
 
 /* ICU Enable Regs - IntEn & IntISR are r/o. */
 
-#define LOONGSON_INTENSET              LOONGSON_REG(LOONGSON_REGBASE + 0x30)
-#define LOONGSON_INTENCLR              LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTENSET              LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR              LOONGSON_REG(LOONGSON_REGBASE + 0x34)
 #define LOONGSON_INTEN                 LOONGSON_REG(LOONGSON_REGBASE + 0x38)
 #define LOONGSON_INTISR                        LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
 
 /* ICU */
 #define LOONGSON_ICU_MBOXES            0x0000000f
-#define LOONGSON_ICU_MBOXES_SHIFT      0
+#define LOONGSON_ICU_MBOXES_SHIFT      0
 #define LOONGSON_ICU_DMARDY            0x00000010
 #define LOONGSON_ICU_DMAEMPTY          0x00000020
 #define LOONGSON_ICU_COPYRDY           0x00000040
@@ -212,10 +212,10 @@ static inline void do_perfcnt_IRQ(void)
 
 /* PCI prefetch window base & mask */
 
-#define LOONGSON_MEM_WIN_BASE_L        LOONGSON_REG(LOONGSON_REGBASE + 0x40)
-#define LOONGSON_MEM_WIN_BASE_H        LOONGSON_REG(LOONGSON_REGBASE + 0x44)
-#define LOONGSON_MEM_WIN_MASK_L        LOONGSON_REG(LOONGSON_REGBASE + 0x48)
-#define LOONGSON_MEM_WIN_MASK_H        LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+#define LOONGSON_MEM_WIN_BASE_L                LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H                LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L                LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H                LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
 
 /* PCI_Hit*_Sel_* */
 
index 4321338..3810d5c 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 3b23ee8..f4a36d7 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index da96ed4..96bfb1c 100644 (file)
@@ -3,8 +3,8 @@
  *
  * IRQ mappings for Loongson 1
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 4e18e88..5c437c2 100644 (file)
@@ -3,8 +3,8 @@
  *
  * Register mappings for Loongson 1
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 718a122..30c13e5 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index b871dc4..34859a4 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index a81fa3d..fb6a3ff 100644 (file)
@@ -3,8 +3,8 @@
  *
  * Loongson 1 Clock Register Definitions.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index f897de6..6574568 100644 (file)
@@ -3,8 +3,8 @@
  *
  * Loongson 1 watchdog register definitions.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 37e3583..de3b66a 100644 (file)
@@ -23,8 +23,8 @@
 /* #define cpu_has_watch       ? */
 #define cpu_has_divec          1
 #define cpu_has_vce            0
-/* #define cpu_has_cache_cdex_p        ? */
-/* #define cpu_has_cache_cdex_s        ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
@@ -50,8 +50,8 @@
 /* #define cpu_has_watch       ? */
 #define cpu_has_divec          1
 #define cpu_has_vce            0
-/* #define cpu_has_cache_cdex_p        ? */
-/* #define cpu_has_cache_cdex_s        ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
index 9b9da26..47cfe64 100644 (file)
@@ -2,7 +2,7 @@
 #define __ASM_MACH_MIPS_IRQ_H
 
 
-#define NR_IRQS        256
+#define NR_IRQS 256
 
 #include_next <irq.h>
 
index 0f86314..62a4b28 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  This is a direct copy of the ev96100.h file, with a global
- * search and replace.  The numbers are the same.
+ * search and replace. The numbers are the same.
  *
  *  The reason I'm duplicating this is so that the 64120/96100
  * defines won't be confusing in the source code.
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..016fa94
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_mips16         1
+#define cpu_has_dsp            1
+/* #define cpu_has_dsp2                ??? - do runtime detection */
+#define cpu_has_mipsmt         1
+#define cpu_has_fpu            0
+
+#define cpu_has_mips32r1       0
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#endif /* __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h
new file mode 100644 (file)
index 0000000..ebdbab9
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * include/asm-mips/pmc-sierra/msp71xx/gpio.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * @author Patrick Glass <patrickglass@gmail.com>
+ */
+
+#ifndef __PMC_MSP71XX_GPIO_H
+#define __PMC_MSP71XX_GPIO_H
+
+/* Max number of gpio's is 28 on chip plus 3 banks of I2C IO Expanders */
+#define ARCH_NR_GPIOS (28 + (3 * 8))
+
+/* new generic GPIO API - see Documentation/gpio.txt */
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep  __gpio_cansleep
+
+/* Setup calls for the gpio and gpio extended */
+extern void msp71xx_init_gpio(void);
+extern void msp71xx_init_gpio_extended(void);
+extern int msp71xx_set_output_drive(unsigned gpio, int value);
+
+/* Custom output drive functionss */
+static inline int gpio_set_output_drive(unsigned gpio, int value)
+{
+       return msp71xx_set_output_drive(gpio, value);
+}
+
+/* IRQ's are not supported for gpio lines */
+static inline int gpio_to_irq(unsigned gpio)
+{
+       return -EINVAL;
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+       return -EINVAL;
+}
+
+#endif /* __PMC_MSP71XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h
new file mode 100644 (file)
index 0000000..ac863e2
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Defines for the MSP interrupt controller.
+ *
+ * Copyright (C) 1999 MIPS Technologies, Inc.  All rights reserved.
+ * Author: Carsten Langgaard, carstenl@mips.com
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#ifndef _MSP_CIC_INT_H
+#define _MSP_CIC_INT_H
+
+/*
+ * The PMC-Sierra CIC interrupts are all centrally managed by the
+ * CIC sub-system.
+ * We attempt to keep the interrupt numbers as consistent as possible
+ * across all of the MSP devices, but some differences will creep in ...
+ * The interrupts which are directly forwarded to the MIPS core interrupts
+ * are assigned interrupts in the range 0-7, interrupts cascaded through
+ * the CIC are assigned interrupts 8-39.  The cascade occurs on C_IRQ4
+ * (MSP_INT_CIC).  Currently we don't really distinguish between VPE1
+ * and VPE0 (or thread contexts for that matter).  Will have to fix.
+ * The PER interrupts are assigned interrupts in the range 40-71.
+*/
+
+
+/*
+ * IRQs directly forwarded to the CPU
+ */
+#define MSP_MIPS_INTBASE       0
+#define MSP_INT_SW0            0       /* IRQ for swint0,       C_SW0  */
+#define MSP_INT_SW1            1       /* IRQ for swint1,       C_SW1  */
+#define MSP_INT_MAC0           2       /* IRQ for MAC 0,        C_IRQ0 */
+#define MSP_INT_MAC1           3       /* IRQ for MAC 1,        C_IRQ1 */
+#define MSP_INT_USB            4       /* IRQ for USB,          C_IRQ2 */
+#define MSP_INT_SAR            5       /* IRQ for ADSL2+ SAR,   C_IRQ3 */
+#define MSP_INT_CIC            6       /* IRQ for CIC block,    C_IRQ4 */
+#define MSP_INT_SEC            7       /* IRQ for Sec engine,   C_IRQ5 */
+
+/*
+ * IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
+ * These defines should be tied to the register definitions for the CIC
+ * interrupt routine.  For now, just use hard-coded values.
+ */
+#define MSP_CIC_INTBASE                (MSP_MIPS_INTBASE + 8)
+#define MSP_INT_EXT0           (MSP_CIC_INTBASE + 0)
+                                       /* External interrupt 0         */
+#define MSP_INT_EXT1           (MSP_CIC_INTBASE + 1)
+                                       /* External interrupt 1         */
+#define MSP_INT_EXT2           (MSP_CIC_INTBASE + 2)
+                                       /* External interrupt 2         */
+#define MSP_INT_EXT3           (MSP_CIC_INTBASE + 3)
+                                       /* External interrupt 3         */
+#define MSP_INT_CPUIF          (MSP_CIC_INTBASE + 4)
+                                       /* CPU interface interrupt      */
+#define MSP_INT_EXT4           (MSP_CIC_INTBASE + 5)
+                                       /* External interrupt 4         */
+#define MSP_INT_CIC_USB                (MSP_CIC_INTBASE + 6)
+                                       /* Cascaded IRQ for USB         */
+#define MSP_INT_MBOX           (MSP_CIC_INTBASE + 7)
+                                       /* Sec engine mailbox IRQ       */
+#define MSP_INT_EXT5           (MSP_CIC_INTBASE + 8)
+                                       /* External interrupt 5         */
+#define MSP_INT_TDM            (MSP_CIC_INTBASE + 9)
+                                       /* TDM interrupt                */
+#define MSP_INT_CIC_MAC0       (MSP_CIC_INTBASE + 10)
+                                       /* Cascaded IRQ for MAC 0       */
+#define MSP_INT_CIC_MAC1       (MSP_CIC_INTBASE + 11)
+                                       /* Cascaded IRQ for MAC 1       */
+#define MSP_INT_CIC_SEC                (MSP_CIC_INTBASE + 12)
+                                       /* Cascaded IRQ for sec engine  */
+#define MSP_INT_PER            (MSP_CIC_INTBASE + 13)
+                                       /* Peripheral interrupt         */
+#define MSP_INT_TIMER0         (MSP_CIC_INTBASE + 14)
+                                       /* SLP timer 0                  */
+#define MSP_INT_TIMER1         (MSP_CIC_INTBASE + 15)
+                                       /* SLP timer 1                  */
+#define MSP_INT_TIMER2         (MSP_CIC_INTBASE + 16)
+                                       /* SLP timer 2                  */
+#define MSP_INT_VPE0_TIMER     (MSP_CIC_INTBASE + 17)
+                                       /* VPE0 MIPS timer              */
+#define MSP_INT_BLKCP          (MSP_CIC_INTBASE + 18)
+                                       /* Block Copy                   */
+#define MSP_INT_UART0          (MSP_CIC_INTBASE + 19)
+                                       /* UART 0                       */
+#define MSP_INT_PCI            (MSP_CIC_INTBASE + 20)
+                                       /* PCI subsystem                */
+#define MSP_INT_EXT6           (MSP_CIC_INTBASE + 21)
+                                       /* External interrupt 5         */
+#define MSP_INT_PCI_MSI                (MSP_CIC_INTBASE + 22)
+                                       /* PCI Message Signal           */
+#define MSP_INT_CIC_SAR                (MSP_CIC_INTBASE + 23)
+                                       /* Cascaded ADSL2+ SAR IRQ      */
+#define MSP_INT_DSL            (MSP_CIC_INTBASE + 24)
+                                       /* ADSL2+ IRQ                   */
+#define MSP_INT_CIC_ERR                (MSP_CIC_INTBASE + 25)
+                                       /* SLP error condition          */
+#define MSP_INT_VPE1_TIMER     (MSP_CIC_INTBASE + 26)
+                                       /* VPE1 MIPS timer              */
+#define MSP_INT_VPE0_PC                (MSP_CIC_INTBASE + 27)
+                                       /* VPE0 Performance counter     */
+#define MSP_INT_VPE1_PC                (MSP_CIC_INTBASE + 28)
+                                       /* VPE1 Performance counter     */
+#define MSP_INT_EXT7           (MSP_CIC_INTBASE + 29)
+                                       /* External interrupt 5         */
+#define MSP_INT_VPE0_SW                (MSP_CIC_INTBASE + 30)
+                                       /* VPE0 Software interrupt      */
+#define MSP_INT_VPE1_SW                (MSP_CIC_INTBASE + 31)
+                                       /* VPE0 Software interrupt      */
+
+/*
+ * IRQs cascaded on CIC PER interrupt (MSP_INT_PER)
+ */
+#define MSP_PER_INTBASE                (MSP_CIC_INTBASE + 32)
+/* Reserved                                       0-1                  */
+#define MSP_INT_UART1          (MSP_PER_INTBASE + 2)
+                                       /* UART 1                       */
+/* Reserved                                       3-5                  */
+#define MSP_INT_2WIRE          (MSP_PER_INTBASE + 6)
+                                       /* 2-wire                       */
+#define MSP_INT_TM0            (MSP_PER_INTBASE + 7)
+                                       /* Peripheral timer block out 0 */
+#define MSP_INT_TM1            (MSP_PER_INTBASE + 8)
+                                       /* Peripheral timer block out 1 */
+/* Reserved                                       9                    */
+#define MSP_INT_SPRX           (MSP_PER_INTBASE + 10)
+                                       /* SPI RX complete              */
+#define MSP_INT_SPTX           (MSP_PER_INTBASE + 11)
+                                       /* SPI TX complete              */
+#define MSP_INT_GPIO           (MSP_PER_INTBASE + 12)
+                                       /* GPIO                         */
+#define MSP_INT_PER_ERR                (MSP_PER_INTBASE + 13)
+                                       /* Peripheral error             */
+/* Reserved                                       14-31                */
+
+#endif /* !_MSP_CIC_INT_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h
new file mode 100644 (file)
index 0000000..daacebb
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ *
+ * Macros for external SMP-safe access to the PMC MSP71xx reference
+ * board GPIO pins
+ *
+ * Copyright 2010 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MSP_GPIO_MACROS_H__
+#define __MSP_GPIO_MACROS_H__
+
+#include <msp_regops.h>
+#include <msp_regs.h>
+
+#ifdef CONFIG_PMC_MSP7120_GW
+#define MSP_NUM_GPIOS          20
+#else
+#define MSP_NUM_GPIOS          28
+#endif
+
+/* -- GPIO Enumerations -- */
+enum msp_gpio_data {
+       MSP_GPIO_LO = 0,
+       MSP_GPIO_HI = 1,
+       MSP_GPIO_NONE,          /* Special - Means pin is out of range */
+       MSP_GPIO_TOGGLE,        /* Special - Sets pin to opposite */
+};
+
+enum msp_gpio_mode {
+       MSP_GPIO_INPUT          = 0x0,
+       /* MSP_GPIO_ INTERRUPT  = 0x1,  Not supported yet */
+       MSP_GPIO_UART_INPUT     = 0x2,  /* Only GPIO 4 or 5 */
+       MSP_GPIO_OUTPUT         = 0x8,
+       MSP_GPIO_UART_OUTPUT    = 0x9,  /* Only GPIO 2 or 3 */
+       MSP_GPIO_PERIF_TIMERA   = 0x9,  /* Only GPIO 0 or 1 */
+       MSP_GPIO_PERIF_TIMERB   = 0xa,  /* Only GPIO 0 or 1 */
+       MSP_GPIO_UNKNOWN        = 0xb,  /* No such GPIO or mode */
+};
+
+/* -- Static Tables -- */
+
+/* Maps pins to data register */
+static volatile u32 * const MSP_GPIO_DATA_REGISTER[] = {
+       /* GPIO 0 and 1 on the first register */
+       GPIO_DATA1_REG, GPIO_DATA1_REG,
+       /* GPIO 2, 3, 4, and 5 on the second register */
+       GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG,
+       /* GPIO 6, 7, 8, and 9 on the third register */
+       GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG,
+       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
+       GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG,
+       GPIO_DATA4_REG, GPIO_DATA4_REG,
+       /* GPIO 16 - 23 on the first strange EXTENDED register */
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       /* GPIO 24 - 27 on the second strange EXTENDED register */
+       EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
+       EXTENDED_GPIO2_REG,
+};
+
+/* Maps pins to mode register */
+static volatile u32 * const MSP_GPIO_MODE_REGISTER[] = {
+       /* GPIO 0 and 1 on the first register */
+       GPIO_CFG1_REG, GPIO_CFG1_REG,
+       /* GPIO 2, 3, 4, and 5 on the second register */
+       GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG,
+       /* GPIO 6, 7, 8, and 9 on the third register */
+       GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG,
+       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
+       GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG,
+       GPIO_CFG4_REG, GPIO_CFG4_REG,
+       /* GPIO 16 - 23 on the first strange EXTENDED register */
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
+       /* GPIO 24 - 27 on the second strange EXTENDED register */
+       EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
+       EXTENDED_GPIO2_REG,
+};
+
+/* Maps 'basic' pins to relative offset from 0 per register */
+static int MSP_GPIO_OFFSET[] = {
+       /* GPIO 0 and 1 on the first register */
+       0, 0,
+       /* GPIO 2, 3, 4, and 5 on the second register */
+       2, 2, 2, 2,
+       /* GPIO 6, 7, 8, and 9 on the third register */
+       6, 6, 6, 6,
+       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
+       10, 10, 10, 10, 10, 10,
+};
+
+/* Maps MODE to allowed pin mask */
+static unsigned int MSP_GPIO_MODE_ALLOWED[] = {
+       0xffffffff,     /* Mode 0 - INPUT */
+       0x00000,        /* Mode 1 - INTERRUPT */
+       0x00030,        /* Mode 2 - UART_INPUT (GPIO 4, 5)*/
+       0, 0, 0, 0, 0,  /* Modes 3, 4, 5, 6, and 7 are reserved */
+       0xffffffff,     /* Mode 8 - OUTPUT */
+       0x0000f,        /* Mode 9 - UART_OUTPUT/
+                               PERF_TIMERA (GPIO 0, 1, 2, 3) */
+       0x00003,        /* Mode a - PERF_TIMERB (GPIO 0, 1) */
+       0x00000,        /* Mode b - Not really a mode! */
+};
+
+/* -- Bit masks -- */
+
+/* This gives you the 'register relative offset gpio' number */
+#define OFFSET_GPIO_NUMBER(gpio)       (gpio - MSP_GPIO_OFFSET[gpio])
+
+/* These take the 'register relative offset gpio' number */
+#define BASIC_DATA_REG_MASK(ogpio)             (1 << ogpio)
+#define BASIC_MODE_REG_VALUE(mode, ogpio)      \
+       (mode << BASIC_MODE_REG_SHIFT(ogpio))
+#define BASIC_MODE_REG_MASK(ogpio)             \
+       BASIC_MODE_REG_VALUE(0xf, ogpio)
+#define BASIC_MODE_REG_SHIFT(ogpio)            (ogpio * 4)
+#define BASIC_MODE_REG_FROM_REG(data, ogpio)   \
+       ((data & BASIC_MODE_REG_MASK(ogpio)) >> BASIC_MODE_REG_SHIFT(ogpio))
+
+/* These take the actual GPIO number (0 through 15) */
+#define BASIC_DATA_MASK(gpio)  \
+       BASIC_DATA_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
+#define BASIC_MODE_MASK(gpio)  \
+       BASIC_MODE_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
+#define BASIC_MODE(mode, gpio) \
+       BASIC_MODE_REG_VALUE(mode, OFFSET_GPIO_NUMBER(gpio))
+#define BASIC_MODE_SHIFT(gpio) \
+       BASIC_MODE_REG_SHIFT(OFFSET_GPIO_NUMBER(gpio))
+#define BASIC_MODE_FROM_REG(data, gpio) \
+       BASIC_MODE_REG_FROM_REG(data, OFFSET_GPIO_NUMBER(gpio))
+
+/*
+ * Each extended GPIO register is 32 bits long and is responsible for up to
+ * eight GPIOs. The least significant 16 bits contain the set and clear bit
+ * pair for each of the GPIOs. The most significant 16 bits contain the
+ * disable and enable bit pair for each of the GPIOs. For example, the
+ * extended GPIO reg for GPIOs 16-23 is as follows:
+ *
+ *     31: GPIO23_DISABLE
+ *     ...
+ *     19: GPIO17_DISABLE
+ *     18: GPIO17_ENABLE
+ *     17: GPIO16_DISABLE
+ *     16: GPIO16_ENABLE
+ *     ...
+ *     3:  GPIO17_SET
+ *     2:  GPIO17_CLEAR
+ *     1:  GPIO16_SET
+ *     0:  GPIO16_CLEAR
+ */
+
+/* This gives the 'register relative offset gpio' number */
+#define EXTENDED_OFFSET_GPIO(gpio)     (gpio < 24 ? gpio - 16 : gpio - 24)
+
+/* These take the 'register relative offset gpio' number */
+#define EXTENDED_REG_DISABLE(ogpio)    (0x2 << ((ogpio * 2) + 16))
+#define EXTENDED_REG_ENABLE(ogpio)     (0x1 << ((ogpio * 2) + 16))
+#define EXTENDED_REG_SET(ogpio)                (0x2 << (ogpio * 2))
+#define EXTENDED_REG_CLR(ogpio)                (0x1 << (ogpio * 2))
+
+/* These take the actual GPIO number (16 through 27) */
+#define EXTENDED_DISABLE(gpio) \
+       EXTENDED_REG_DISABLE(EXTENDED_OFFSET_GPIO(gpio))
+#define EXTENDED_ENABLE(gpio)  \
+       EXTENDED_REG_ENABLE(EXTENDED_OFFSET_GPIO(gpio))
+#define EXTENDED_SET(gpio)     \
+       EXTENDED_REG_SET(EXTENDED_OFFSET_GPIO(gpio))
+#define EXTENDED_CLR(gpio)     \
+       EXTENDED_REG_CLR(EXTENDED_OFFSET_GPIO(gpio))
+
+#define EXTENDED_FULL_MASK             (0xffffffff)
+
+/* -- API inline-functions -- */
+
+/*
+ * Gets the current value of the specified pin
+ */
+static inline enum msp_gpio_data msp_gpio_pin_get(unsigned int gpio)
+{
+       u32 pinhi_mask = 0, pinhi_mask2 = 0;
+
+       if (gpio >= MSP_NUM_GPIOS)
+               return MSP_GPIO_NONE;
+
+       if (gpio < 16) {
+               pinhi_mask = BASIC_DATA_MASK(gpio);
+       } else {
+               /*
+                * Two cases are possible with the EXTENDED register:
+                *  - In output mode (ENABLED flag set), check the CLR bit
+                *  - In input mode (ENABLED flag not set), check the SET bit
+                */
+               pinhi_mask = EXTENDED_ENABLE(gpio) | EXTENDED_CLR(gpio);
+               pinhi_mask2 = EXTENDED_SET(gpio);
+       }
+       if (((*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask) == pinhi_mask) ||
+           (*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask2))
+               return MSP_GPIO_HI;
+       else
+               return MSP_GPIO_LO;
+}
+
+/* Sets the specified pin to the specified value */
+static inline void msp_gpio_pin_set(enum msp_gpio_data data, unsigned int gpio)
+{
+       if (gpio >= MSP_NUM_GPIOS)
+               return;
+
+       if (gpio < 16) {
+               if (data == MSP_GPIO_TOGGLE)
+                       toggle_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                       BASIC_DATA_MASK(gpio));
+               else if (data == MSP_GPIO_HI)
+                       set_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                       BASIC_DATA_MASK(gpio));
+               else
+                       clear_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                       BASIC_DATA_MASK(gpio));
+       } else {
+               if (data == MSP_GPIO_TOGGLE) {
+                       /* Special ugly case:
+                        *   We have to read the CLR bit.
+                        *   If set, we write the CLR bit.
+                        *   If not, we write the SET bit.
+                        */
+                       u32 tmpdata;
+
+                       custom_read_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                                               tmpdata);
+                       if (tmpdata & EXTENDED_CLR(gpio))
+                               tmpdata = EXTENDED_CLR(gpio);
+                       else
+                               tmpdata = EXTENDED_SET(gpio);
+                       custom_write_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                                               tmpdata);
+               } else {
+                       u32 newdata;
+
+                       if (data == MSP_GPIO_HI)
+                               newdata = EXTENDED_SET(gpio);
+                       else
+                               newdata = EXTENDED_CLR(gpio);
+                       set_value_reg32(MSP_GPIO_DATA_REGISTER[gpio],
+                                               EXTENDED_FULL_MASK, newdata);
+               }
+       }
+}
+
+/* Sets the specified pin to the specified value */
+static inline void msp_gpio_pin_hi(unsigned int gpio)
+{
+       msp_gpio_pin_set(MSP_GPIO_HI, gpio);
+}
+
+/* Sets the specified pin to the specified value */
+static inline void msp_gpio_pin_lo(unsigned int gpio)
+{
+       msp_gpio_pin_set(MSP_GPIO_LO, gpio);
+}
+
+/* Sets the specified pin to the opposite value */
+static inline void msp_gpio_pin_toggle(unsigned int gpio)
+{
+       msp_gpio_pin_set(MSP_GPIO_TOGGLE, gpio);
+}
+
+/* Gets the mode of the specified pin */
+static inline enum msp_gpio_mode msp_gpio_pin_get_mode(unsigned int gpio)
+{
+       enum msp_gpio_mode retval = MSP_GPIO_UNKNOWN;
+       uint32_t data;
+
+       if (gpio >= MSP_NUM_GPIOS)
+               return retval;
+
+       data = *MSP_GPIO_MODE_REGISTER[gpio];
+
+       if (gpio < 16) {
+               retval = BASIC_MODE_FROM_REG(data, gpio);
+       } else {
+               /* Extended pins can only be either INPUT or OUTPUT */
+               if (data & EXTENDED_ENABLE(gpio))
+                       retval = MSP_GPIO_OUTPUT;
+               else
+                       retval = MSP_GPIO_INPUT;
+       }
+
+       return retval;
+}
+
+/*
+ * Sets the specified mode on the requested pin
+ * Returns 0 on success, or -1 if that mode is not allowed on this pin
+ */
+static inline int msp_gpio_pin_mode(enum msp_gpio_mode mode, unsigned int gpio)
+{
+       u32 modemask, newmode;
+
+       if ((1 << gpio) & ~MSP_GPIO_MODE_ALLOWED[mode])
+               return -1;
+
+       if (gpio >= MSP_NUM_GPIOS)
+               return -1;
+
+       if (gpio < 16) {
+               modemask = BASIC_MODE_MASK(gpio);
+               newmode =  BASIC_MODE(mode, gpio);
+       } else {
+               modemask = EXTENDED_FULL_MASK;
+               if (mode == MSP_GPIO_INPUT)
+                       newmode = EXTENDED_DISABLE(gpio);
+               else
+                       newmode = EXTENDED_ENABLE(gpio);
+       }
+       /* Do the set atomically */
+       set_value_reg32(MSP_GPIO_MODE_REGISTER[gpio], modemask, newmode);
+
+       return 0;
+}
+
+#endif /* __MSP_GPIO_MACROS_H__ */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h
new file mode 100644 (file)
index 0000000..29f8bf7
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Defines for the MSP interrupt handlers.
+ *
+ * Copyright (C) 2005, PMC-Sierra, Inc.         All rights reserved.
+ * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#ifndef _MSP_INT_H
+#define _MSP_INT_H
+
+/*
+ * The PMC-Sierra MSP product line has at least two different interrupt
+ * controllers, the SLP register based scheme and the CIC interrupt
+ * controller block mechanism. This file distinguishes between them
+ * so that devices see a uniform interface.
+ */
+
+#if defined(CONFIG_IRQ_MSP_SLP)
+       #include "msp_slp_int.h"
+#elif defined(CONFIG_IRQ_MSP_CIC)
+       #include "msp_cic_int.h"
+#else
+       #error "What sort of interrupt controller does *your* MSP have?"
+#endif
+
+#endif /* !_MSP_INT_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h
new file mode 100644 (file)
index 0000000..24948cc
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2000-2006 PMC-Sierra INC.
+ *
+ *     This program is free software; you can redistribute it
+ *     and/or modify it under the terms of the GNU General
+ *     Public License as published by the Free Software
+ *     Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     This program is distributed in the hope that it will be
+ *     useful, but WITHOUT ANY WARRANTY; without even the implied
+ *     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *     PURPOSE.  See the GNU General Public License for more
+ *     details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this program; if not, write to the Free
+ *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
+ *     02139, USA.
+ *
+ * PMC-SIERRA INC. DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS
+ * SOFTWARE.
+ */
+
+#ifndef _MSP_PCI_H_
+#define _MSP_PCI_H_
+
+#define MSP_HAS_PCI(ID) (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
+
+/*
+ * It is convenient to program the OATRAN register so that
+ * Athena virtual address space and PCI address space are
+ * the same. This is not a requirement, just a convenience.
+ *
+ * The only hard restrictions on the value of OATRAN is that
+ * OATRAN must not be programmed to allow translated memory
+ * addresses to fall within the lowest 512MB of
+ * PCI address space. This region is hardcoded
+ * for use as Athena PCI Host Controller target
+ * access memory space to the Athena's SDRAM.
+ *
+ * Note that OATRAN applies only to memory accesses, not
+ * to I/O accesses.
+ *
+ * To program OATRAN to make Athena virtual address space
+ * and PCI address space have the same values, OATRAN
+ * is to be programmed to 0xB8000000. The top seven
+ * bits of the value mimic the seven bits clipped off
+ * by the PCI Host controller.
+ *
+ * With OATRAN at the said value, when the CPU does
+ * an access to its virtual address at, say 0xB900_5000,
+ * the address appearing on the PCI bus will be
+ * 0xB900_5000.
+ *    - Michael Penner
+ */
+#define MSP_PCI_OATRAN         0xB8000000UL
+
+#define MSP_PCI_SPACE_BASE     (MSP_PCI_OATRAN + 0x1002000UL)
+#define MSP_PCI_SPACE_SIZE     (0x3000000UL - 0x2000)
+#define MSP_PCI_SPACE_END \
+               (MSP_PCI_SPACE_BASE + MSP_PCI_SPACE_SIZE - 1)
+#define MSP_PCI_IOSPACE_BASE   (MSP_PCI_OATRAN + 0x1001000UL)
+#define MSP_PCI_IOSPACE_SIZE   0x1000
+#define MSP_PCI_IOSPACE_END  \
+               (MSP_PCI_IOSPACE_BASE + MSP_PCI_IOSPACE_SIZE - 1)
+
+/* IRQ for PCI status interrupts */
+#define PCI_STAT_IRQ   20
+
+#define QFLUSH_REG_1   0xB7F40000
+
+typedef volatile unsigned int pcireg;
+typedef void * volatile ppcireg;
+
+struct pci_block_copy
+{
+    pcireg   unused1; /* +0x00 */
+    pcireg   unused2; /* +0x04 */
+    ppcireg  unused3; /* +0x08 */
+    ppcireg  unused4; /* +0x0C */
+    pcireg   unused5; /* +0x10 */
+    pcireg   unused6; /* +0x14 */
+    pcireg   unused7; /* +0x18 */
+    ppcireg  unused8; /* +0x1C */
+    ppcireg  unused9; /* +0x20 */
+    pcireg   unusedA; /* +0x24 */
+    ppcireg  unusedB; /* +0x28 */
+    ppcireg  unusedC; /* +0x2C */
+};
+
+enum
+{
+    config_device_vendor,  /* 0 */
+    config_status_command, /* 1 */
+    config_class_revision, /* 2 */
+    config_BIST_header_latency_cache, /* 3 */
+    config_BAR0,          /* 4 */
+    config_BAR1,          /* 5 */
+    config_BAR2,          /* 6 */
+    config_not_used7,     /* 7 */
+    config_not_used8,     /* 8 */
+    config_not_used9,     /* 9 */
+    config_CIS,                   /* 10 */
+    config_subsystem,     /* 11 */
+    config_not_used12,    /* 12 */
+    config_capabilities,   /* 13 */
+    config_not_used14,    /* 14 */
+    config_lat_grant_irq,  /* 15 */
+    config_message_control,/* 16 */
+    config_message_addr,   /* 17 */
+    config_message_data,   /* 18 */
+    config_VPD_addr,      /* 19 */
+    config_VPD_data,      /* 20 */
+    config_maxregs        /* 21 - number of registers */
+};
+
+struct msp_pci_regs
+{
+    pcireg hop_unused_00; /* +0x00 */
+    pcireg hop_unused_04; /* +0x04 */
+    pcireg hop_unused_08; /* +0x08 */
+    pcireg hop_unused_0C; /* +0x0C */
+    pcireg hop_unused_10; /* +0x10 */
+    pcireg hop_unused_14; /* +0x14 */
+    pcireg hop_unused_18; /* +0x18 */
+    pcireg hop_unused_1C; /* +0x1C */
+    pcireg hop_unused_20; /* +0x20 */
+    pcireg hop_unused_24; /* +0x24 */
+    pcireg hop_unused_28; /* +0x28 */
+    pcireg hop_unused_2C; /* +0x2C */
+    pcireg hop_unused_30; /* +0x30 */
+    pcireg hop_unused_34; /* +0x34 */
+    pcireg if_control;   /* +0x38 */
+    pcireg oatran;       /* +0x3C */
+    pcireg reset_ctl;    /* +0x40 */
+    pcireg config_addr;          /* +0x44 */
+    pcireg hop_unused_48; /* +0x48 */
+    pcireg msg_signaled_int_status; /* +0x4C */
+    pcireg msg_signaled_int_mask;   /* +0x50 */
+    pcireg if_status;    /* +0x54 */
+    pcireg if_mask;      /* +0x58 */
+    pcireg hop_unused_5C; /* +0x5C */
+    pcireg hop_unused_60; /* +0x60 */
+    pcireg hop_unused_64; /* +0x64 */
+    pcireg hop_unused_68; /* +0x68 */
+    pcireg hop_unused_6C; /* +0x6C */
+    pcireg hop_unused_70; /* +0x70 */
+
+    struct pci_block_copy pci_bc[2] __attribute__((aligned(64)));
+
+    pcireg error_hdr1; /* +0xE0 */
+    pcireg error_hdr2; /* +0xE4 */
+
+    pcireg config[config_maxregs] __attribute__((aligned(256)));
+
+};
+
+#define BPCI_CFGADDR_BUSNUM_SHF 16
+#define BPCI_CFGADDR_FUNCTNUM_SHF 8
+#define BPCI_CFGADDR_REGNUM_SHF 2
+#define BPCI_CFGADDR_ENABLE (1<<31)
+
+#define BPCI_IFCONTROL_RTO (1<<20) /* Retry timeout */
+#define BPCI_IFCONTROL_HCE (1<<16) /* Host configuration enable */
+#define BPCI_IFCONTROL_CTO_SHF 12  /* Shift count for CTO bits */
+#define BPCI_IFCONTROL_SE  (1<<5)  /* Enable exceptions on errors */
+#define BPCI_IFCONTROL_BIST (1<<4) /* Use BIST in per. mode */
+#define BPCI_IFCONTROL_CAP (1<<3)  /* Enable capabilities */
+#define BPCI_IFCONTROL_MMC_SHF 0   /* Shift count for MMC bits */
+
+#define BPCI_IFSTATUS_MGT  (1<<8)  /* Master Grant timeout */
+#define BPCI_IFSTATUS_MTT  (1<<9)  /* Master TRDY timeout */
+#define BPCI_IFSTATUS_MRT  (1<<10) /* Master retry timeout */
+#define BPCI_IFSTATUS_BC0F (1<<13) /* Block copy 0 fault */
+#define BPCI_IFSTATUS_BC1F (1<<14) /* Block copy 1 fault */
+#define BPCI_IFSTATUS_PCIU (1<<15) /* PCI unable to respond */
+#define BPCI_IFSTATUS_BSIZ (1<<16) /* PCI access with illegal size */
+#define BPCI_IFSTATUS_BADD (1<<17) /* PCI access with illegal addr */
+#define BPCI_IFSTATUS_RTO  (1<<18) /* Retry time out */
+#define BPCI_IFSTATUS_SER  (1<<19) /* System error */
+#define BPCI_IFSTATUS_PER  (1<<20) /* Parity error */
+#define BPCI_IFSTATUS_LCA  (1<<21) /* Local CPU abort */
+#define BPCI_IFSTATUS_MEM  (1<<22) /* Memory prot. violation */
+#define BPCI_IFSTATUS_ARB  (1<<23) /* Arbiter timed out */
+#define BPCI_IFSTATUS_STA  (1<<27) /* Signaled target abort */
+#define BPCI_IFSTATUS_TA   (1<<28) /* Target abort */
+#define BPCI_IFSTATUS_MA   (1<<29) /* Master abort */
+#define BPCI_IFSTATUS_PEI  (1<<30) /* Parity error as initiator */
+#define BPCI_IFSTATUS_PET  (1<<31) /* Parity error as target */
+
+#define BPCI_RESETCTL_PR (1<<0)           /* True if reset asserted */
+#define BPCI_RESETCTL_RT (1<<4)           /* Release time */
+#define BPCI_RESETCTL_CT (1<<8)           /* Config time */
+#define BPCI_RESETCTL_PE (1<<12)   /* PCI enabled */
+#define BPCI_RESETCTL_HM (1<<13)   /* PCI host mode */
+#define BPCI_RESETCTL_RI (1<<14)   /* PCI reset in */
+
+extern struct msp_pci_regs msp_pci_regs
+                       __attribute__((section(".register")));
+extern unsigned long msp_pci_config_space
+                       __attribute__((section(".register")));
+
+#endif /* !_MSP_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h
new file mode 100644 (file)
index 0000000..4d3052a
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * MIPS boards bootprom interface for the Linux kernel.
+ *
+ * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ * Author: Carsten Langgaard, carstenl@mips.com
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#ifndef _ASM_MSP_PROM_H
+#define _ASM_MSP_PROM_H
+
+#include <linux/types.h>
+
+#define DEVICEID                       "deviceid"
+#define FEATURES                       "features"
+#define PROM_ENV                       "prom_env"
+#define PROM_ENV_FILE                  "/proc/"PROM_ENV
+#define PROM_ENV_SIZE                  256
+
+#define CPU_DEVID_FAMILY               0x0000ff00
+#define CPU_DEVID_REVISION             0x000000ff
+
+#define FPGA_IS_POLO(revision) \
+               (((revision >= 0xb0) && (revision < 0xd0)))
+#define FPGA_IS_5000(revision) \
+               ((revision >= 0x80) && (revision <= 0x90))
+#define FPGA_IS_ZEUS(revision)         ((revision < 0x7f))
+#define FPGA_IS_DUET(revision) \
+               (((revision >= 0xa0) && (revision < 0xb0)))
+#define FPGA_IS_MSP4200(revision)      ((revision >= 0xd0))
+#define FPGA_IS_MSP7100(revision)      ((revision >= 0xd0))
+
+#define MACHINE_TYPE_POLO              "POLO"
+#define MACHINE_TYPE_DUET              "DUET"
+#define MACHINE_TYPE_ZEUS              "ZEUS"
+#define MACHINE_TYPE_MSP2000REVB       "MSP2000REVB"
+#define MACHINE_TYPE_MSP5000           "MSP5000"
+#define MACHINE_TYPE_MSP4200           "MSP4200"
+#define MACHINE_TYPE_MSP7120           "MSP7120"
+#define MACHINE_TYPE_MSP7130           "MSP7130"
+#define MACHINE_TYPE_OTHER             "OTHER"
+
+#define MACHINE_TYPE_POLO_FPGA         "POLO-FPGA"
+#define MACHINE_TYPE_DUET_FPGA         "DUET-FPGA"
+#define MACHINE_TYPE_ZEUS_FPGA         "ZEUS_FPGA"
+#define MACHINE_TYPE_MSP2000REVB_FPGA  "MSP2000REVB-FPGA"
+#define MACHINE_TYPE_MSP5000_FPGA      "MSP5000-FPGA"
+#define MACHINE_TYPE_MSP4200_FPGA      "MSP4200-FPGA"
+#define MACHINE_TYPE_MSP7100_FPGA      "MSP7100-FPGA"
+#define MACHINE_TYPE_OTHER_FPGA                "OTHER-FPGA"
+
+/* Device Family definitions */
+#define FAMILY_FPGA                    0x0000
+#define FAMILY_ZEUS                    0x1000
+#define FAMILY_POLO                    0x2000
+#define FAMILY_DUET                    0x4000
+#define FAMILY_TRIAD                   0x5000
+#define FAMILY_MSP4200                 0x4200
+#define FAMILY_MSP4200_FPGA            0x4f00
+#define FAMILY_MSP7100                 0x7100
+#define FAMILY_MSP7100_FPGA            0x7f00
+
+/* Device Type definitions */
+#define TYPE_MSP7120                   0x7120
+#define TYPE_MSP7130                   0x7130
+
+#define ENET_KEY               'E'
+#define ENETTXD_KEY            'e'
+#define PCI_KEY                        'P'
+#define PCIMUX_KEY             'p'
+#define SEC_KEY                        'S'
+#define SPAD_KEY               'D'
+#define TDM_KEY                        'T'
+#define ZSP_KEY                        'Z'
+
+#define FEATURE_NOEXIST                '-'
+#define FEATURE_EXIST          '+'
+
+#define ENET_MII               'M'
+#define ENET_RMII              'R'
+
+#define ENETTXD_FALLING                'F'
+#define ENETTXD_RISING         'R'
+
+#define PCI_HOST               'H'
+#define PCI_PERIPHERAL         'P'
+
+#define PCIMUX_FULL            'F'
+#define PCIMUX_SINGLE          'S'
+
+#define SEC_DUET               'D'
+#define SEC_POLO               'P'
+#define SEC_SLOW               'S'
+#define SEC_TRIAD              'T'
+
+#define SPAD_POLO              'P'
+
+#define TDM_DUET               'D'     /* DUET TDMs might exist */
+#define TDM_POLO               'P'     /* POLO TDMs might exist */
+#define TDM_TRIAD              'T'     /* TRIAD TDMs might exist */
+
+#define ZSP_DUET               'D'     /* one DUET zsp engine */
+#define ZSP_TRIAD              'T'     /* two TRIAD zsp engines */
+
+extern char *prom_getenv(char *name);
+extern void prom_init_cmdline(void);
+extern void prom_meminit(void);
+extern void prom_fixup_mem_map(unsigned long start_mem,
+                              unsigned long end_mem);
+
+extern int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr);
+extern unsigned long get_deviceid(void);
+extern char identify_enet(unsigned long interface_num);
+extern char identify_enetTxD(unsigned long interface_num);
+extern char identify_pci(void);
+extern char identify_sec(void);
+extern char identify_spad(void);
+extern char identify_sec(void);
+extern char identify_tdm(void);
+extern char identify_zsp(void);
+extern unsigned long identify_family(void);
+extern unsigned long identify_revision(void);
+
+/*
+ * The following macro calls prom_printf and puts the format string
+ * into an init section so it can be reclaimed.
+ */
+#define ppfinit(f, x...) \
+       do { \
+               static char _f[] __initdata = KERN_INFO f; \
+               printk(_f, ## x); \
+       } while (0)
+
+/* Memory descriptor management. */
+#define PROM_MAX_PMEMBLOCKS    7       /* 6 used */
+
+enum yamon_memtypes {
+       yamon_dontuse,
+       yamon_prom,
+       yamon_free,
+};
+
+struct prom_pmemblock {
+       unsigned long base; /* Within KSEG0. */
+       unsigned int size;  /* In bytes. */
+       unsigned int type;  /* free or prom memory */
+};
+
+extern int prom_argc;
+extern char **prom_argv;
+extern char **prom_envp;
+extern int *prom_vec;
+extern struct prom_pmemblock *prom_getmdesc(void);
+
+#endif /* !_ASM_MSP_PROM_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
new file mode 100644 (file)
index 0000000..2dbc7a8
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * SMP/VPE-safe functions to access "registers" (see note).
+ *
+ * NOTES:
+* - These macros use ll/sc instructions, so it is your responsibility to
+ * ensure these are available on your platform before including this file.
+ * - The MIPS32 spec states that ll/sc results are undefined for uncached
+ * accesses. This means they can't be used on HW registers accessed
+ * through kseg1. Code which requires these macros for this purpose must
+ * front-end the registers with cached memory "registers" and have a single
+ * thread update the actual HW registers.
+ * - A maximum of 2k of code can be inserted between ll and sc. Every
+ * memory accesses between the instructions will increase the chance of
+ * sc failing and having to loop.
+ * - When using custom_read_reg32/custom_write_reg32 only perform the
+ * necessary logical operations on the register value in between these
+ * two calls. All other logic should be performed before the first call.
+  * - There is a bug on the R10000 chips which has a workaround. If you
+ * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
+ * to be non-zero.  If you are using this header from within linux, you may
+ * include <asm/war.h> before including this file to have this defined
+ * appropriately for you.
+ *
+ * Copyright 2005-2007 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
+ *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
+ *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc., 675
+ *  Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_REGOPS_H__
+#define __ASM_REGOPS_H__
+
+#include <linux/types.h>
+
+#include <asm/war.h>
+
+#ifndef R10000_LLSC_WAR
+#define R10000_LLSC_WAR 0
+#endif
+
+#if R10000_LLSC_WAR == 1
+#define __beqz "beqzl  "
+#else
+#define __beqz "beqz   "
+#endif
+
+#ifndef _LINUX_TYPES_H
+typedef unsigned int u32;
+#endif
+
+/*
+ * Sets all the masked bits to the corresponding value bits
+ */
+static inline void set_value_reg32(volatile u32 *const addr,
+                                       u32 const mask,
+                                       u32 const value)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    mips3                           \n"
+       "1:     ll      %0, %1  # set_value_reg32       \n"
+       "       and     %0, %2                          \n"
+       "       or      %0, %3                          \n"
+       "       sc      %0, %1                          \n"
+       "       "__beqz"%0, 1b                          \n"
+       "       nop                                     \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp), "=m" (*addr)
+       : "ir" (~mask), "ir" (value), "m" (*addr));
+}
+
+/*
+ * Sets all the masked bits to '1'
+ */
+static inline void set_reg32(volatile u32 *const addr,
+                               u32 const mask)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    mips3                           \n"
+       "1:     ll      %0, %1          # set_reg32     \n"
+       "       or      %0, %2                          \n"
+       "       sc      %0, %1                          \n"
+       "       "__beqz"%0, 1b                          \n"
+       "       nop                                     \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp), "=m" (*addr)
+       : "ir" (mask), "m" (*addr));
+}
+
+/*
+ * Sets all the masked bits to '0'
+ */
+static inline void clear_reg32(volatile u32 *const addr,
+                               u32 const mask)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    mips3                           \n"
+       "1:     ll      %0, %1          # clear_reg32   \n"
+       "       and     %0, %2                          \n"
+       "       sc      %0, %1                          \n"
+       "       "__beqz"%0, 1b                          \n"
+       "       nop                                     \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp), "=m" (*addr)
+       : "ir" (~mask), "m" (*addr));
+}
+
+/*
+ * Toggles all masked bits from '0' to '1' and '1' to '0'
+ */
+static inline void toggle_reg32(volatile u32 *const addr,
+                               u32 const mask)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    mips3                           \n"
+       "1:     ll      %0, %1          # toggle_reg32  \n"
+       "       xor     %0, %2                          \n"
+       "       sc      %0, %1                          \n"
+       "       "__beqz"%0, 1b                          \n"
+       "       nop                                     \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp), "=m" (*addr)
+       : "ir" (mask), "m" (*addr));
+}
+
+/*
+ * Read all masked bits others are returned as '0'
+ */
+static inline u32 read_reg32(volatile u32 *const addr,
+                               u32 const mask)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    noreorder                       \n"
+       "       lw      %0, %1          # read          \n"
+       "       and     %0, %2          # mask          \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp)
+       : "m" (*addr), "ir" (mask));
+
+       return temp;
+}
+
+/*
+ * blocking_read_reg32 - Read address with blocking load
+ *
+ * Uncached writes need to be read back to ensure they reach RAM.
+ * The returned value must be 'used' to prevent from becoming a
+ * non-blocking load.
+ */
+static inline u32 blocking_read_reg32(volatile u32 *const addr)
+{
+       u32 temp;
+
+       __asm__ __volatile__(
+       "       .set    push                            \n"
+       "       .set    noreorder                       \n"
+       "       lw      %0, %1          # read          \n"
+       "       move    %0, %0          # block         \n"
+       "       .set    pop                             \n"
+       : "=&r" (temp)
+       : "m" (*addr));
+
+       return temp;
+}
+
+/*
+ * For special strange cases only:
+ *
+ * If you need custom processing within a ll/sc loop, use the following macros
+ * VERY CAREFULLY:
+ *
+ *   u32 tmp;                          <-- Define a variable to hold the data
+ *
+ *   custom_read_reg32(address, tmp);  <-- Reads the address and put the value
+ *                                             in the 'tmp' variable given
+ *
+ *     From here on out, you are (basically) atomic, so don't do anything too
+ *     fancy!
+ *     Also, this code may loop if the end of this block fails to write
+ *     everything back safely due do the other CPU, so do NOT do anything
+ *     with side-effects!
+ *
+ *   custom_write_reg32(address, tmp); <-- Writes back 'tmp' safely.
+ */
+#define custom_read_reg32(address, tmp)                                \
+       __asm__ __volatile__(                                   \
+       "       .set    push                            \n"     \
+       "       .set    mips3                           \n"     \
+       "1:     ll      %0, %1  #custom_read_reg32      \n"     \
+       "       .set    pop                             \n"     \
+       : "=r" (tmp), "=m" (*address)                           \
+       : "m" (*address))
+
+#define custom_write_reg32(address, tmp)                       \
+       __asm__ __volatile__(                                   \
+       "       .set    push                            \n"     \
+       "       .set    mips3                           \n"     \
+       "       sc      %0, %1  #custom_write_reg32     \n"     \
+       "       "__beqz"%0, 1b                          \n"     \
+       "       nop                                     \n"     \
+       "       .set    pop                             \n"     \
+       : "=&r" (tmp), "=m" (*address)                          \
+       : "0" (tmp), "m" (*address))
+
+#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h
new file mode 100644 (file)
index 0000000..da3a8de
--- /dev/null
@@ -0,0 +1,664 @@
+/*
+ * Defines for the address space, registers and register configuration
+ * (bit masks, access macros etc) for the PMC-Sierra line of MSP products.
+ * This file contains addess maps for all the devices in the line of
+ * products but only has register definitions and configuration masks for
+ * registers which aren't definitely associated with any device.  Things
+ * like clock settings, reset access, the ELB etc.  Individual device
+ * drivers will reference the appropriate XXX_BASE value defined here
+ * and have individual registers offset from that.
+ *
+ * Copyright (C) 2005-2007 PMC-Sierra, Inc.  All rights reserved.
+ * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#include <asm/addrspace.h>
+#include <linux/types.h>
+
+#ifndef _ASM_MSP_REGS_H
+#define _ASM_MSP_REGS_H
+
+/*
+ ########################################################################
+ #  Address space and device base definitions                          #
+ ########################################################################
+ */
+
+/*
+ ***************************************************************************
+ * System Logic and Peripherals (ELB, UART0, etc) device address space    *
+ ***************************************************************************
+ */
+#define MSP_SLP_BASE           0x1c000000
+                                       /* System Logic and Peripherals */
+#define MSP_RST_BASE           (MSP_SLP_BASE + 0x10)
+                                       /* System reset register base   */
+#define MSP_RST_SIZE           0x0C    /* System reset register space  */
+
+#define MSP_WTIMER_BASE                (MSP_SLP_BASE + 0x04C)
+                                       /* watchdog timer base          */
+#define MSP_ITIMER_BASE                (MSP_SLP_BASE + 0x054)
+                                       /* internal timer base          */
+#define MSP_UART0_BASE         (MSP_SLP_BASE + 0x100)
+                                       /* UART0 controller base        */
+#define MSP_BCPY_CTRL_BASE     (MSP_SLP_BASE + 0x120)
+                                       /* Block Copy controller base   */
+#define MSP_BCPY_DESC_BASE     (MSP_SLP_BASE + 0x160)
+                                       /* Block Copy descriptor base   */
+
+/*
+ ***************************************************************************
+ * PCI address space                                                      *
+ ***************************************************************************
+ */
+#define MSP_PCI_BASE           0x19000000
+
+/*
+ ***************************************************************************
+ * MSbus device address space                                             *
+ ***************************************************************************
+ */
+#define MSP_MSB_BASE           0x18000000
+                                       /* MSbus address start          */
+#define MSP_PER_BASE           (MSP_MSB_BASE + 0x400000)
+                                       /* Peripheral device registers  */
+#define MSP_MAC0_BASE          (MSP_MSB_BASE + 0x600000)
+                                       /* MAC A device registers       */
+#define MSP_MAC1_BASE          (MSP_MSB_BASE + 0x700000)
+                                       /* MAC B device registers       */
+#define MSP_MAC_SIZE           0xE0    /* MAC register space           */
+
+#define MSP_SEC_BASE           (MSP_MSB_BASE + 0x800000)
+                                       /* Security Engine registers    */
+#define MSP_MAC2_BASE          (MSP_MSB_BASE + 0x900000)
+                                       /* MAC C device registers       */
+#define MSP_ADSL2_BASE         (MSP_MSB_BASE + 0xA80000)
+                                       /* ADSL2 device registers       */
+#define MSP_USB0_BASE          (MSP_MSB_BASE + 0xB00000)
+                                       /* USB0 device registers        */
+#define MSP_USB1_BASE          (MSP_MSB_BASE + 0x300000)
+                                       /* USB1 device registers        */
+#define MSP_CPUIF_BASE         (MSP_MSB_BASE + 0xC00000)
+                                       /* CPU interface registers      */
+
+/* Devices within the MSbus peripheral block */
+#define MSP_UART1_BASE         (MSP_PER_BASE + 0x030)
+                                       /* UART1 controller base        */
+#define MSP_SPI_BASE           (MSP_PER_BASE + 0x058)
+                                       /* SPI/MPI control registers    */
+#define MSP_TWI_BASE           (MSP_PER_BASE + 0x090)
+                                       /* Two-wire control registers   */
+#define MSP_PTIMER_BASE                (MSP_PER_BASE + 0x0F0)
+                                       /* Programmable timer control   */
+
+/*
+ ***************************************************************************
+ * Physical Memory configuration address space                            *
+ ***************************************************************************
+ */
+#define MSP_MEM_CFG_BASE       0x17f00000
+
+#define MSP_MEM_INDIRECT_CTL_10 0x10
+
+/*
+ * Notes:
+ *  1) The SPI registers are split into two blocks, one offset from the
+ *     MSP_SPI_BASE by 0x00 and the other offset from the MSP_SPI_BASE by
+ *     0x68.  The SPI driver definitions for the register must be aware
+ *     of this.
+ *  2) The block copy engine register are divided into two regions, one
+ *     for the control/configuration of the engine proper and one for the
+ *     values of the descriptors used in the copy process.  These have
+ *     different base defines (CTRL_BASE vs DESC_BASE)
+ *  3) These constants are for physical addresses which means that they
+ *     work correctly with "ioremap" and friends.  This means that device
+ *     drivers will need to remap these addresses using ioremap and perhaps
+ *     the readw/writew macros.         Or they could use the regptr() macro
+ *     defined below, but the readw/writew calls are the correct thing.
+ *  4) The UARTs have an additional status register offset from the base
+ *     address.         This register isn't used in the standard 8250 driver but
+ *     may be used in other software.  Consult the hardware datasheet for
+ *     offset details.
+ *  5) For some unknown reason the security engine (MSP_SEC_BASE) registers
+ *     start at an offset of 0x84 from the base address but the block of
+ *     registers before this is reserved for the security engine.  The
+ *     driver will have to be aware of this but it makes the register
+ *     definitions line up better with the documentation.
+ */
+
+/*
+ ########################################################################
+ #  System register definitions.  Not associated with a specific device #
+ ########################################################################
+ */
+
+/*
+ * This macro maps the physical register number into uncached space
+ * and (for C code) casts it into a u32 pointer so it can be dereferenced
+ * Normally these would be accessed with ioremap and readX/writeX, but
+ * these are convenient for a lot of internal kernel code.
+ */
+#ifdef __ASSEMBLER__
+       #define regptr(addr) (KSEG1ADDR(addr))
+#else
+       #define regptr(addr) ((volatile u32 *const)(KSEG1ADDR(addr)))
+#endif
+
+/*
+ ***************************************************************************
+ * System Logic and Peripherals (RESET, ELB, etc) registers               *
+ ***************************************************************************
+ */
+
+/* System Control register definitions */
+#define DEV_ID_REG     regptr(MSP_SLP_BASE + 0x00)
+                                       /* Device-ID                 RO */
+#define FWR_ID_REG     regptr(MSP_SLP_BASE + 0x04)
+                                       /* Firmware-ID Register      RW */
+#define SYS_ID_REG0    regptr(MSP_SLP_BASE + 0x08)
+                                       /* System-ID Register-0      RW */
+#define SYS_ID_REG1    regptr(MSP_SLP_BASE + 0x0C)
+                                       /* System-ID Register-1      RW */
+
+/* System Reset register definitions */
+#define RST_STS_REG    regptr(MSP_SLP_BASE + 0x10)
+                                       /* System Reset Status       RO */
+#define RST_SET_REG    regptr(MSP_SLP_BASE + 0x14)
+                                       /* System Set Reset          WO */
+#define RST_CLR_REG    regptr(MSP_SLP_BASE + 0x18)
+                                       /* System Clear Reset        WO */
+
+/* System Clock Registers */
+#define PCI_SLP_REG    regptr(MSP_SLP_BASE + 0x1C)
+                                       /* PCI clock generator       RW */
+#define URT_SLP_REG    regptr(MSP_SLP_BASE + 0x20)
+                                       /* UART clock generator      RW */
+/* reserved                  (MSP_SLP_BASE + 0x24)                     */
+/* reserved                  (MSP_SLP_BASE + 0x28)                     */
+#define PLL1_SLP_REG   regptr(MSP_SLP_BASE + 0x2C)
+                                       /* PLL1 clock generator      RW */
+#define PLL0_SLP_REG   regptr(MSP_SLP_BASE + 0x30)
+                                       /* PLL0 clock generator      RW */
+#define MIPS_SLP_REG   regptr(MSP_SLP_BASE + 0x34)
+                                       /* MIPS clock generator      RW */
+#define VE_SLP_REG     regptr(MSP_SLP_BASE + 0x38)
+                                       /* Voice Eng clock generator RW */
+/* reserved                  (MSP_SLP_BASE + 0x3C)                     */
+#define MSB_SLP_REG    regptr(MSP_SLP_BASE + 0x40)
+                                       /* MS-Bus clock generator    RW */
+#define SMAC_SLP_REG   regptr(MSP_SLP_BASE + 0x44)
+                                       /* Sec & MAC clock generator RW */
+#define PERF_SLP_REG   regptr(MSP_SLP_BASE + 0x48)
+                                       /* Per & TDM clock generator RW */
+
+/* Interrupt Controller Registers */
+#define SLP_INT_STS_REG regptr(MSP_SLP_BASE + 0x70)
+                                       /* Interrupt status register RW */
+#define SLP_INT_MSK_REG regptr(MSP_SLP_BASE + 0x74)
+                                       /* Interrupt enable/mask     RW */
+#define SE_MBOX_REG    regptr(MSP_SLP_BASE + 0x78)
+                                       /* Security Engine mailbox   RW */
+#define VE_MBOX_REG    regptr(MSP_SLP_BASE + 0x7C)
+                                       /* Voice Engine mailbox      RW */
+
+/* ELB Controller Registers */
+#define CS0_CNFG_REG   regptr(MSP_SLP_BASE + 0x80)
+                                       /* ELB CS0 Configuration Reg    */
+#define CS0_ADDR_REG   regptr(MSP_SLP_BASE + 0x84)
+                                       /* ELB CS0 Base Address Reg     */
+#define CS0_MASK_REG   regptr(MSP_SLP_BASE + 0x88)
+                                       /* ELB CS0 Mask Register        */
+#define CS0_ACCESS_REG regptr(MSP_SLP_BASE + 0x8C)
+                                       /* ELB CS0 access register      */
+
+#define CS1_CNFG_REG   regptr(MSP_SLP_BASE + 0x90)
+                                       /* ELB CS1 Configuration Reg    */
+#define CS1_ADDR_REG   regptr(MSP_SLP_BASE + 0x94)
+                                       /* ELB CS1 Base Address Reg     */
+#define CS1_MASK_REG   regptr(MSP_SLP_BASE + 0x98)
+                                       /* ELB CS1 Mask Register        */
+#define CS1_ACCESS_REG regptr(MSP_SLP_BASE + 0x9C)
+                                       /* ELB CS1 access register      */
+
+#define CS2_CNFG_REG   regptr(MSP_SLP_BASE + 0xA0)
+                                       /* ELB CS2 Configuration Reg    */
+#define CS2_ADDR_REG   regptr(MSP_SLP_BASE + 0xA4)
+                                       /* ELB CS2 Base Address Reg     */
+#define CS2_MASK_REG   regptr(MSP_SLP_BASE + 0xA8)
+                                       /* ELB CS2 Mask Register        */
+#define CS2_ACCESS_REG regptr(MSP_SLP_BASE + 0xAC)
+                                       /* ELB CS2 access register      */
+
+#define CS3_CNFG_REG   regptr(MSP_SLP_BASE + 0xB0)
+                                       /* ELB CS3 Configuration Reg    */
+#define CS3_ADDR_REG   regptr(MSP_SLP_BASE + 0xB4)
+                                       /* ELB CS3 Base Address Reg     */
+#define CS3_MASK_REG   regptr(MSP_SLP_BASE + 0xB8)
+                                       /* ELB CS3 Mask Register        */
+#define CS3_ACCESS_REG regptr(MSP_SLP_BASE + 0xBC)
+                                       /* ELB CS3 access register      */
+
+#define CS4_CNFG_REG   regptr(MSP_SLP_BASE + 0xC0)
+                                       /* ELB CS4 Configuration Reg    */
+#define CS4_ADDR_REG   regptr(MSP_SLP_BASE + 0xC4)
+                                       /* ELB CS4 Base Address Reg     */
+#define CS4_MASK_REG   regptr(MSP_SLP_BASE + 0xC8)
+                                       /* ELB CS4 Mask Register        */
+#define CS4_ACCESS_REG regptr(MSP_SLP_BASE + 0xCC)
+                                       /* ELB CS4 access register      */
+
+#define CS5_CNFG_REG   regptr(MSP_SLP_BASE + 0xD0)
+                                       /* ELB CS5 Configuration Reg    */
+#define CS5_ADDR_REG   regptr(MSP_SLP_BASE + 0xD4)
+                                       /* ELB CS5 Base Address Reg     */
+#define CS5_MASK_REG   regptr(MSP_SLP_BASE + 0xD8)
+                                       /* ELB CS5 Mask Register        */
+#define CS5_ACCESS_REG regptr(MSP_SLP_BASE + 0xDC)
+                                       /* ELB CS5 access register      */
+
+/* reserved                           0xE0 - 0xE8                      */
+#define ELB_1PC_EN_REG regptr(MSP_SLP_BASE + 0xEC)
+                                       /* ELB single PC card detect    */
+
+/* reserved                           0xF0 - 0xF8                      */
+#define ELB_CLK_CFG_REG regptr(MSP_SLP_BASE + 0xFC)
+                                       /* SDRAM read/ELB timing Reg    */
+
+/* Extended UART status registers */
+#define UART0_STATUS_REG       regptr(MSP_UART0_BASE + 0x0c0)
+                                       /* UART Status Register 0       */
+#define UART1_STATUS_REG       regptr(MSP_UART1_BASE + 0x170)
+                                       /* UART Status Register 1       */
+
+/* Performance monitoring registers */
+#define PERF_MON_CTRL_REG      regptr(MSP_SLP_BASE + 0x140)
+                                       /* Performance monitor control  */
+#define PERF_MON_CLR_REG       regptr(MSP_SLP_BASE + 0x144)
+                                       /* Performance monitor clear    */
+#define PERF_MON_CNTH_REG      regptr(MSP_SLP_BASE + 0x148)
+                                       /* Perf monitor counter high    */
+#define PERF_MON_CNTL_REG      regptr(MSP_SLP_BASE + 0x14C)
+                                       /* Perf monitor counter low     */
+
+/* System control registers */
+#define SYS_CTRL_REG           regptr(MSP_SLP_BASE + 0x150)
+                                       /* System control register      */
+#define SYS_ERR1_REG           regptr(MSP_SLP_BASE + 0x154)
+                                       /* System Error status 1        */
+#define SYS_ERR2_REG           regptr(MSP_SLP_BASE + 0x158)
+                                       /* System Error status 2        */
+#define SYS_INT_CFG_REG                regptr(MSP_SLP_BASE + 0x15C)
+                                       /* System Interrupt config      */
+
+/* Voice Engine Memory configuration */
+#define VE_MEM_REG             regptr(MSP_SLP_BASE + 0x17C)
+                                       /* Voice engine memory config   */
+
+/* CPU/SLP Error Status registers */
+#define CPU_ERR1_REG           regptr(MSP_SLP_BASE + 0x180)
+                                       /* CPU/SLP Error status 1       */
+#define CPU_ERR2_REG           regptr(MSP_SLP_BASE + 0x184)
+                                       /* CPU/SLP Error status 1       */
+
+/* Extended GPIO registers      */
+#define EXTENDED_GPIO1_REG     regptr(MSP_SLP_BASE + 0x188)
+#define EXTENDED_GPIO2_REG     regptr(MSP_SLP_BASE + 0x18c)
+#define EXTENDED_GPIO_REG      EXTENDED_GPIO1_REG
+                                       /* Backward-compatibility       */
+
+/* System Error registers */
+#define SLP_ERR_STS_REG                regptr(MSP_SLP_BASE + 0x190)
+                                       /* Int status for SLP errors    */
+#define SLP_ERR_MSK_REG                regptr(MSP_SLP_BASE + 0x194)
+                                       /* Int mask for SLP errors      */
+#define SLP_ELB_ERST_REG       regptr(MSP_SLP_BASE + 0x198)
+                                       /* External ELB reset           */
+#define SLP_BOOT_STS_REG       regptr(MSP_SLP_BASE + 0x19C)
+                                       /* Boot Status                  */
+
+/* Extended ELB addressing */
+#define CS0_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A0)
+                                       /* CS0 Extended address         */
+#define CS1_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A4)
+                                       /* CS1 Extended address         */
+#define CS2_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A8)
+                                       /* CS2 Extended address         */
+#define CS3_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1AC)
+                                       /* CS3 Extended address         */
+/* reserved                                          0x1B0             */
+#define CS5_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1B4)
+                                       /* CS5 Extended address         */
+
+/* PLL Adjustment registers */
+#define PLL_LOCK_REG           regptr(MSP_SLP_BASE + 0x200)
+                                       /* PLL0 lock status             */
+#define PLL_ARST_REG           regptr(MSP_SLP_BASE + 0x204)
+                                       /* PLL Analog reset status      */
+#define PLL0_ADJ_REG           regptr(MSP_SLP_BASE + 0x208)
+                                       /* PLL0 Adjustment value        */
+#define PLL1_ADJ_REG           regptr(MSP_SLP_BASE + 0x20C)
+                                       /* PLL1 Adjustment value        */
+
+/*
+ ***************************************************************************
+ * Peripheral Register definitions                                        *
+ ***************************************************************************
+ */
+
+/* Peripheral status */
+#define PER_CTRL_REG           regptr(MSP_PER_BASE + 0x50)
+                                       /* Peripheral control register  */
+#define PER_STS_REG            regptr(MSP_PER_BASE + 0x54)
+                                       /* Peripheral status register   */
+
+/* SPI/MPI Registers */
+#define SMPI_TX_SZ_REG         regptr(MSP_PER_BASE + 0x58)
+                                       /* SPI/MPI Tx Size register     */
+#define SMPI_RX_SZ_REG         regptr(MSP_PER_BASE + 0x5C)
+                                       /* SPI/MPI Rx Size register     */
+#define SMPI_CTL_REG           regptr(MSP_PER_BASE + 0x60)
+                                       /* SPI/MPI Control register     */
+#define SMPI_MS_REG            regptr(MSP_PER_BASE + 0x64)
+                                       /* SPI/MPI Chip Select reg      */
+#define SMPI_CORE_DATA_REG     regptr(MSP_PER_BASE + 0xC0)
+                                       /* SPI/MPI Core Data reg        */
+#define SMPI_CORE_CTRL_REG     regptr(MSP_PER_BASE + 0xC4)
+                                       /* SPI/MPI Core Control reg     */
+#define SMPI_CORE_STAT_REG     regptr(MSP_PER_BASE + 0xC8)
+                                       /* SPI/MPI Core Status reg      */
+#define SMPI_CORE_SSEL_REG     regptr(MSP_PER_BASE + 0xCC)
+                                       /* SPI/MPI Core Ssel reg        */
+#define SMPI_FIFO_REG          regptr(MSP_PER_BASE + 0xD0)
+                                       /* SPI/MPI Data FIFO reg        */
+
+/* Peripheral Block Error Registers          */
+#define PER_ERR_STS_REG                regptr(MSP_PER_BASE + 0x70)
+                                       /* Error Bit Status Register    */
+#define PER_ERR_MSK_REG                regptr(MSP_PER_BASE + 0x74)
+                                       /* Error Bit Mask Register      */
+#define PER_HDR1_REG           regptr(MSP_PER_BASE + 0x78)
+                                       /* Error Header 1 Register      */
+#define PER_HDR2_REG           regptr(MSP_PER_BASE + 0x7C)
+                                       /* Error Header 2 Register      */
+
+/* Peripheral Block Interrupt Registers              */
+#define PER_INT_STS_REG                regptr(MSP_PER_BASE + 0x80)
+                                       /* Interrupt status register    */
+#define PER_INT_MSK_REG                regptr(MSP_PER_BASE + 0x84)
+                                       /* Interrupt Mask Register      */
+#define GPIO_INT_STS_REG       regptr(MSP_PER_BASE + 0x88)
+                                       /* GPIO interrupt status reg    */
+#define GPIO_INT_MSK_REG       regptr(MSP_PER_BASE + 0x8C)
+                                       /* GPIO interrupt MASK Reg      */
+
+/* POLO GPIO registers                       */
+#define POLO_GPIO_DAT1_REG     regptr(MSP_PER_BASE + 0x0E0)
+                                       /* Polo GPIO[8:0]  data reg     */
+#define POLO_GPIO_CFG1_REG     regptr(MSP_PER_BASE + 0x0E4)
+                                       /* Polo GPIO[7:0]  config reg   */
+#define POLO_GPIO_CFG2_REG     regptr(MSP_PER_BASE + 0x0E8)
+                                       /* Polo GPIO[15:8] config reg   */
+#define POLO_GPIO_OD1_REG      regptr(MSP_PER_BASE + 0x0EC)
+                                       /* Polo GPIO[31:0] output drive */
+#define POLO_GPIO_CFG3_REG     regptr(MSP_PER_BASE + 0x170)
+                                       /* Polo GPIO[23:16] config reg  */
+#define POLO_GPIO_DAT2_REG     regptr(MSP_PER_BASE + 0x174)
+                                       /* Polo GPIO[15:9]  data reg    */
+#define POLO_GPIO_DAT3_REG     regptr(MSP_PER_BASE + 0x178)
+                                       /* Polo GPIO[23:16]  data reg   */
+#define POLO_GPIO_DAT4_REG     regptr(MSP_PER_BASE + 0x17C)
+                                       /* Polo GPIO[31:24]  data reg   */
+#define POLO_GPIO_DAT5_REG     regptr(MSP_PER_BASE + 0x180)
+                                       /* Polo GPIO[39:32]  data reg   */
+#define POLO_GPIO_DAT6_REG     regptr(MSP_PER_BASE + 0x184)
+                                       /* Polo GPIO[47:40]  data reg   */
+#define POLO_GPIO_DAT7_REG     regptr(MSP_PER_BASE + 0x188)
+                                       /* Polo GPIO[54:48]  data reg   */
+#define POLO_GPIO_CFG4_REG     regptr(MSP_PER_BASE + 0x18C)
+                                       /* Polo GPIO[31:24] config reg  */
+#define POLO_GPIO_CFG5_REG     regptr(MSP_PER_BASE + 0x190)
+                                       /* Polo GPIO[39:32] config reg  */
+#define POLO_GPIO_CFG6_REG     regptr(MSP_PER_BASE + 0x194)
+                                       /* Polo GPIO[47:40] config reg  */
+#define POLO_GPIO_CFG7_REG     regptr(MSP_PER_BASE + 0x198)
+                                       /* Polo GPIO[54:48] config reg  */
+#define POLO_GPIO_OD2_REG      regptr(MSP_PER_BASE + 0x19C)
+                                       /* Polo GPIO[54:32] output drive */
+
+/* Generic GPIO registers                    */
+#define GPIO_DATA1_REG         regptr(MSP_PER_BASE + 0x170)
+                                       /* GPIO[1:0] data register      */
+#define GPIO_DATA2_REG         regptr(MSP_PER_BASE + 0x174)
+                                       /* GPIO[5:2] data register      */
+#define GPIO_DATA3_REG         regptr(MSP_PER_BASE + 0x178)
+                                       /* GPIO[9:6] data register      */
+#define GPIO_DATA4_REG         regptr(MSP_PER_BASE + 0x17C)
+                                       /* GPIO[15:10] data register    */
+#define GPIO_CFG1_REG          regptr(MSP_PER_BASE + 0x180)
+                                       /* GPIO[1:0] config register    */
+#define GPIO_CFG2_REG          regptr(MSP_PER_BASE + 0x184)
+                                       /* GPIO[5:2] config register    */
+#define GPIO_CFG3_REG          regptr(MSP_PER_BASE + 0x188)
+                                       /* GPIO[9:6] config register    */
+#define GPIO_CFG4_REG          regptr(MSP_PER_BASE + 0x18C)
+                                       /* GPIO[15:10] config register  */
+#define GPIO_OD_REG            regptr(MSP_PER_BASE + 0x190)
+                                       /* GPIO[15:0] output drive      */
+
+/*
+ ***************************************************************************
+ * CPU Interface register definitions                                     *
+ ***************************************************************************
+ */
+#define PCI_FLUSH_REG          regptr(MSP_CPUIF_BASE + 0x00)
+                                       /* PCI-SDRAM queue flush trigger */
+#define OCP_ERR1_REG           regptr(MSP_CPUIF_BASE + 0x04)
+                                       /* OCP Error Attribute 1        */
+#define OCP_ERR2_REG           regptr(MSP_CPUIF_BASE + 0x08)
+                                       /* OCP Error Attribute 2        */
+#define OCP_STS_REG            regptr(MSP_CPUIF_BASE + 0x0C)
+                                       /* OCP Error Status             */
+#define CPUIF_PM_REG           regptr(MSP_CPUIF_BASE + 0x10)
+                                       /* CPU policy configuration     */
+#define CPUIF_CFG_REG          regptr(MSP_CPUIF_BASE + 0x10)
+                                       /* Misc configuration options   */
+
+/* Central Interrupt Controller Registers */
+#define MSP_CIC_BASE           (MSP_CPUIF_BASE + 0x8000)
+                                       /* Central Interrupt registers  */
+#define CIC_EXT_CFG_REG                regptr(MSP_CIC_BASE + 0x00)
+                                       /* External interrupt config    */
+#define CIC_STS_REG            regptr(MSP_CIC_BASE + 0x04)
+                                       /* CIC Interrupt Status         */
+#define CIC_VPE0_MSK_REG       regptr(MSP_CIC_BASE + 0x08)
+                                       /* VPE0 Interrupt Mask          */
+#define CIC_VPE1_MSK_REG       regptr(MSP_CIC_BASE + 0x0C)
+                                       /* VPE1 Interrupt Mask          */
+#define CIC_TC0_MSK_REG                regptr(MSP_CIC_BASE + 0x10)
+                                       /* Thread Context 0 Int Mask    */
+#define CIC_TC1_MSK_REG                regptr(MSP_CIC_BASE + 0x14)
+                                       /* Thread Context 1 Int Mask    */
+#define CIC_TC2_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
+                                       /* Thread Context 2 Int Mask    */
+#define CIC_TC3_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
+                                       /* Thread Context 3 Int Mask    */
+#define CIC_TC4_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
+                                       /* Thread Context 4 Int Mask    */
+#define CIC_PCIMSI_STS_REG     regptr(MSP_CIC_BASE + 0x18)
+#define CIC_PCIMSI_MSK_REG     regptr(MSP_CIC_BASE + 0x18)
+#define CIC_PCIFLSH_REG                regptr(MSP_CIC_BASE + 0x18)
+#define CIC_VPE0_SWINT_REG     regptr(MSP_CIC_BASE + 0x08)
+
+
+/*
+ ***************************************************************************
+ * Memory controller registers                                            *
+ ***************************************************************************
+ */
+#define MEM_CFG1_REG           regptr(MSP_MEM_CFG_BASE + 0x00)
+#define MEM_SS_ADDR            regptr(MSP_MEM_CFG_BASE + 0x00)
+#define MEM_SS_DATA            regptr(MSP_MEM_CFG_BASE + 0x04)
+#define MEM_SS_WRITE           regptr(MSP_MEM_CFG_BASE + 0x08)
+
+/*
+ ***************************************************************************
+ * PCI controller registers                                               *
+ ***************************************************************************
+ */
+#define PCI_BASE_REG           regptr(MSP_PCI_BASE + 0x00)
+#define PCI_CONFIG_SPACE_REG   regptr(MSP_PCI_BASE + 0x800)
+#define PCI_JTAG_DEVID_REG     regptr(MSP_SLP_BASE + 0x13c)
+
+/*
+ ########################################################################
+ #  Register content & macro definitions                               #
+ ########################################################################
+ */
+
+/*
+ ***************************************************************************
+ * DEV_ID defines                                                         *
+ ***************************************************************************
+ */
+#define DEV_ID_PCI_DIS         (1 << 26)       /* Set if PCI disabled */
+#define DEV_ID_PCI_HOST                (1 << 20)       /* Set if PCI host */
+#define DEV_ID_SINGLE_PC       (1 << 19)       /* Set if single PC Card */
+#define DEV_ID_FAMILY          (0xff << 8)     /* family ID code */
+#define POLO_ZEUS_SUB_FAMILY   (0x7  << 16)    /* sub family for Polo/Zeus */
+
+#define MSPFPGA_ID             (0x00  << 8)    /* you are on your own here */
+#define MSP5000_ID             (0x50  << 8)
+#define MSP4F00_ID             (0x4f  << 8)    /* FPGA version of MSP4200 */
+#define MSP4E00_ID             (0x4f  << 8)    /* FPGA version of MSP7120 */
+#define MSP4200_ID             (0x42  << 8)
+#define MSP4000_ID             (0x40  << 8)
+#define MSP2XXX_ID             (0x20  << 8)
+#define MSPZEUS_ID             (0x10  << 8)
+
+#define MSP2004_SUB_ID         (0x0   << 16)
+#define MSP2005_SUB_ID         (0x1   << 16)
+#define MSP2006_SUB_ID         (0x1   << 16)
+#define MSP2007_SUB_ID         (0x2   << 16)
+#define MSP2010_SUB_ID         (0x3   << 16)
+#define MSP2015_SUB_ID         (0x4   << 16)
+#define MSP2020_SUB_ID         (0x5   << 16)
+#define MSP2100_SUB_ID         (0x6   << 16)
+
+/*
+ ***************************************************************************
+ * RESET defines                                                          *
+ ***************************************************************************
+ */
+#define MSP_GR_RST             (0x01 << 0)     /* Global reset bit     */
+#define MSP_MR_RST             (0x01 << 1)     /* MIPS reset bit       */
+#define MSP_PD_RST             (0x01 << 2)     /* PVC DMA reset bit    */
+#define MSP_PP_RST             (0x01 << 3)     /* PVC reset bit        */
+/* reserved                                                            */
+#define MSP_EA_RST             (0x01 << 6)     /* Mac A reset bit      */
+#define MSP_EB_RST             (0x01 << 7)     /* Mac B reset bit      */
+#define MSP_SE_RST             (0x01 << 8)     /* Security Eng reset bit */
+#define MSP_PB_RST             (0x01 << 9)     /* Per block reset bit  */
+#define MSP_EC_RST             (0x01 << 10)    /* Mac C reset bit      */
+#define MSP_TW_RST             (0x01 << 11)    /* TWI reset bit        */
+#define MSP_SPI_RST            (0x01 << 12)    /* SPI/MPI reset bit    */
+#define MSP_U1_RST             (0x01 << 13)    /* UART1 reset bit      */
+#define MSP_U0_RST             (0x01 << 14)    /* UART0 reset bit      */
+
+/*
+ ***************************************************************************
+ * UART defines                                                                   *
+ ***************************************************************************
+ */
+#define MSP_BASE_BAUD          25000000
+#define MSP_UART_REG_LEN       0x20
+
+/*
+ ***************************************************************************
+ * ELB defines                                                            *
+ ***************************************************************************
+ */
+#define PCCARD_32              0x02    /* Set if is PCCARD 32 (Cardbus) */
+#define SINGLE_PCCARD          0x01    /* Set to enable single PC card */
+
+/*
+ ***************************************************************************
+ * CIC defines                                                            *
+ ***************************************************************************
+ */
+
+/* CIC_EXT_CFG_REG */
+#define EXT_INT_POL(eirq)                      (1 << (eirq + 8))
+#define EXT_INT_EDGE(eirq)                     (1 << eirq)
+
+#define CIC_EXT_SET_TRIGGER_LEVEL(reg, eirq)   (reg &= ~EXT_INT_EDGE(eirq))
+#define CIC_EXT_SET_TRIGGER_EDGE(reg, eirq)    (reg |= EXT_INT_EDGE(eirq))
+#define CIC_EXT_SET_ACTIVE_HI(reg, eirq)       (reg |= EXT_INT_POL(eirq))
+#define CIC_EXT_SET_ACTIVE_LO(reg, eirq)       (reg &= ~EXT_INT_POL(eirq))
+#define CIC_EXT_SET_ACTIVE_RISING              CIC_EXT_SET_ACTIVE_HI
+#define CIC_EXT_SET_ACTIVE_FALLING             CIC_EXT_SET_ACTIVE_LO
+
+#define CIC_EXT_IS_TRIGGER_LEVEL(reg, eirq) \
+                               ((reg & EXT_INT_EDGE(eirq)) == 0)
+#define CIC_EXT_IS_TRIGGER_EDGE(reg, eirq)     (reg & EXT_INT_EDGE(eirq))
+#define CIC_EXT_IS_ACTIVE_HI(reg, eirq)                (reg & EXT_INT_POL(eirq))
+#define CIC_EXT_IS_ACTIVE_LO(reg, eirq) \
+                               ((reg & EXT_INT_POL(eirq)) == 0)
+#define CIC_EXT_IS_ACTIVE_RISING               CIC_EXT_IS_ACTIVE_HI
+#define CIC_EXT_IS_ACTIVE_FALLING              CIC_EXT_IS_ACTIVE_LO
+
+/*
+ ***************************************************************************
+ * Memory Controller defines                                              *
+ ***************************************************************************
+ */
+
+/* Indirect memory controller registers */
+#define DDRC_CFG(n)            (n)
+#define DDRC_DEBUG(n)          (0x04 + n)
+#define DDRC_CTL(n)            (0x40 + n)
+
+/* Macro to perform DDRC indirect write */
+#define DDRC_INDIRECT_WRITE(reg, mask, value) \
+({ \
+       *MEM_SS_ADDR = (((mask) & 0xf) << 8) | ((reg) & 0xff); \
+       *MEM_SS_DATA = (value); \
+       *MEM_SS_WRITE = 1; \
+})
+
+/*
+ ***************************************************************************
+ * SPI/MPI Mode                                                                   *
+ ***************************************************************************
+ */
+#define SPI_MPI_RX_BUSY                0x00008000      /* SPI/MPI Receive Busy */
+#define SPI_MPI_FIFO_EMPTY     0x00004000      /* SPI/MPI Fifo Empty   */
+#define SPI_MPI_TX_BUSY                0x00002000      /* SPI/MPI Transmit Busy */
+#define SPI_MPI_FIFO_FULL      0x00001000      /* SPI/MPU FIFO full    */
+
+/*
+ ***************************************************************************
+ * SPI/MPI Control Register                                               *
+ ***************************************************************************
+ */
+#define SPI_MPI_RX_START       0x00000004      /* Start receive command */
+#define SPI_MPI_FLUSH_Q                0x00000002      /* Flush SPI/MPI Queue */
+#define SPI_MPI_TX_START       0x00000001      /* Start Transmit Command */
+
+#endif /* !_ASM_MSP_REGS_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h
new file mode 100644 (file)
index 0000000..51a66dc
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Defines for the MSP interrupt controller.
+ *
+ * Copyright (C) 1999 MIPS Technologies, Inc.  All rights reserved.
+ * Author: Carsten Langgaard, carstenl@mips.com
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#ifndef _MSP_SLP_INT_H
+#define _MSP_SLP_INT_H
+
+/*
+ * The PMC-Sierra SLP interrupts are arranged in a 3 level cascaded
+ * hierarchical system.         The first level are the direct MIPS interrupts
+ * and are assigned the interrupt range 0-7.  The second level is the SLM
+ * interrupt controller and is assigned the range 8-39.         The third level
+ * comprises the Peripherial block, the PCI block, the PCI MSI block and
+ * the SLP.  The PCI interrupts and the SLP errors are handled by the
+ * relevant subsystems so the core interrupt code needs only concern
+ * itself with the Peripheral block.  These are assigned interrupts in
+ * the range 40-71.
+ */
+
+/*
+ * IRQs directly connected to CPU
+ */
+#define MSP_MIPS_INTBASE       0
+#define MSP_INT_SW0            0  /* IRQ for swint0,         C_SW0  */
+#define MSP_INT_SW1            1  /* IRQ for swint1,         C_SW1  */
+#define MSP_INT_MAC0           2  /* IRQ for MAC 0,          C_IRQ0 */
+#define MSP_INT_MAC1           3  /* IRQ for MAC 1,          C_IRQ1 */
+#define MSP_INT_C_IRQ2         4  /* Wired off,              C_IRQ2 */
+#define MSP_INT_VE             5  /* IRQ for Voice Engine,   C_IRQ3 */
+#define MSP_INT_SLP            6  /* IRQ for SLM block,      C_IRQ4 */
+#define MSP_INT_TIMER          7  /* IRQ for the MIPS timer, C_IRQ5 */
+
+/*
+ * IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
+ * These defines should be tied to the register definition for the SLM
+ * interrupt routine.  For now, just use hard-coded values.
+ */
+#define MSP_SLP_INTBASE                (MSP_MIPS_INTBASE + 8)
+#define MSP_INT_EXT0           (MSP_SLP_INTBASE + 0)
+                                       /* External interrupt 0         */
+#define MSP_INT_EXT1           (MSP_SLP_INTBASE + 1)
+                                       /* External interrupt 1         */
+#define MSP_INT_EXT2           (MSP_SLP_INTBASE + 2)
+                                       /* External interrupt 2         */
+#define MSP_INT_EXT3           (MSP_SLP_INTBASE + 3)
+                                       /* External interrupt 3         */
+/* Reserved                                       4-7                  */
+
+/*
+ *************************************************************************
+ * DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER *
+ * Some MSP produces have this interrupt labelled as Voice and some are         *
+ * SEC mbox ...                                                                 *
+ *************************************************************************
+ */
+#define MSP_INT_SLP_VE         (MSP_SLP_INTBASE + 8)
+                                       /* Cascaded IRQ for Voice Engine*/
+#define MSP_INT_SLP_TDM                (MSP_SLP_INTBASE + 9)
+                                       /* TDM interrupt                */
+#define MSP_INT_SLP_MAC0       (MSP_SLP_INTBASE + 10)
+                                       /* Cascaded IRQ for MAC 0       */
+#define MSP_INT_SLP_MAC1       (MSP_SLP_INTBASE + 11)
+                                       /* Cascaded IRQ for MAC 1       */
+#define MSP_INT_SEC            (MSP_SLP_INTBASE + 12)
+                                       /* IRQ for security engine      */
+#define MSP_INT_PER            (MSP_SLP_INTBASE + 13)
+                                       /* Peripheral interrupt         */
+#define MSP_INT_TIMER0         (MSP_SLP_INTBASE + 14)
+                                       /* SLP timer 0                  */
+#define MSP_INT_TIMER1         (MSP_SLP_INTBASE + 15)
+                                       /* SLP timer 1                  */
+#define MSP_INT_TIMER2         (MSP_SLP_INTBASE + 16)
+                                       /* SLP timer 2                  */
+#define MSP_INT_SLP_TIMER      (MSP_SLP_INTBASE + 17)
+                                       /* Cascaded MIPS timer          */
+#define MSP_INT_BLKCP          (MSP_SLP_INTBASE + 18)
+                                       /* Block Copy                   */
+#define MSP_INT_UART0          (MSP_SLP_INTBASE + 19)
+                                       /* UART 0                       */
+#define MSP_INT_PCI            (MSP_SLP_INTBASE + 20)
+                                       /* PCI subsystem                */
+#define MSP_INT_PCI_DBELL      (MSP_SLP_INTBASE + 21)
+                                       /* PCI doorbell                 */
+#define MSP_INT_PCI_MSI                (MSP_SLP_INTBASE + 22)
+                                       /* PCI Message Signal           */
+#define MSP_INT_PCI_BC0                (MSP_SLP_INTBASE + 23)
+                                       /* PCI Block Copy 0             */
+#define MSP_INT_PCI_BC1                (MSP_SLP_INTBASE + 24)
+                                       /* PCI Block Copy 1             */
+#define MSP_INT_SLP_ERR                (MSP_SLP_INTBASE + 25)
+                                       /* SLP error condition          */
+#define MSP_INT_MAC2           (MSP_SLP_INTBASE + 26)
+                                       /* IRQ for MAC2                 */
+/* Reserved                                       26-31                */
+
+/*
+ * IRQs cascaded on SLP PER interrupt (MSP_INT_PER)
+ */
+#define MSP_PER_INTBASE                (MSP_SLP_INTBASE + 32)
+/* Reserved                                       0-1                  */
+#define MSP_INT_UART1          (MSP_PER_INTBASE + 2)
+                                       /* UART 1                       */
+/* Reserved                                       3-5                  */
+#define MSP_INT_2WIRE          (MSP_PER_INTBASE + 6)
+                                       /* 2-wire                       */
+#define MSP_INT_TM0            (MSP_PER_INTBASE + 7)
+                                       /* Peripheral timer block out 0 */
+#define MSP_INT_TM1            (MSP_PER_INTBASE + 8)
+                                       /* Peripheral timer block out 1 */
+/* Reserved                                       9                    */
+#define MSP_INT_SPRX           (MSP_PER_INTBASE + 10)
+                                       /* SPI RX complete              */
+#define MSP_INT_SPTX           (MSP_PER_INTBASE + 11)
+                                       /* SPI TX complete              */
+#define MSP_INT_GPIO           (MSP_PER_INTBASE + 12)
+                                       /* GPIO                         */
+#define MSP_INT_PER_ERR                (MSP_PER_INTBASE + 13)
+                                       /* Peripheral error             */
+/* Reserved                                       14-31                */
+
+#endif /* !_MSP_SLP_INT_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
new file mode 100644 (file)
index 0000000..aa45e6a
--- /dev/null
@@ -0,0 +1,144 @@
+/******************************************************************
+ * Copyright (c) 2000-2007 PMC-Sierra INC.
+ *
+ *     This program is free software; you can redistribute it
+ *     and/or modify it under the terms of the GNU General
+ *     Public License as published by the Free Software
+ *     Foundation; either version 2 of the License, or (at your
+ *     option) any later version.
+ *
+ *     This program is distributed in the hope that it will be
+ *     useful, but WITHOUT ANY WARRANTY; without even the implied
+ *     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ *     PURPOSE.  See the GNU General Public License for more
+ *     details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this program; if not, write to the Free
+ *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
+ *     02139, USA.
+ *
+ * PMC-SIERRA INC. DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS
+ * SOFTWARE.
+ */
+#ifndef MSP_USB_H_
+#define MSP_USB_H_
+
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+#define NUM_USB_DEVS   2
+#else
+#define NUM_USB_DEVS   1
+#endif
+
+/* Register spaces for USB host 0 */
+#define MSP_USB0_MAB_START     (MSP_USB0_BASE + 0x0)
+#define MSP_USB0_MAB_END       (MSP_USB0_BASE + 0x17)
+#define MSP_USB0_ID_START      (MSP_USB0_BASE + 0x40000)
+#define MSP_USB0_ID_END                (MSP_USB0_BASE + 0x4008f)
+#define MSP_USB0_HS_START      (MSP_USB0_BASE + 0x40100)
+#define MSP_USB0_HS_END                (MSP_USB0_BASE + 0x401FF)
+
+/* Register spaces for USB host 1 */
+#define MSP_USB1_MAB_START     (MSP_USB1_BASE + 0x0)
+#define MSP_USB1_MAB_END       (MSP_USB1_BASE + 0x17)
+#define MSP_USB1_ID_START      (MSP_USB1_BASE + 0x40000)
+#define MSP_USB1_ID_END                (MSP_USB1_BASE + 0x4008f)
+#define MSP_USB1_HS_START      (MSP_USB1_BASE + 0x40100)
+#define MSP_USB1_HS_END                (MSP_USB1_BASE + 0x401ff)
+
+/* USB Identification registers */
+struct msp_usbid_regs {
+       u32 id;         /* 0x0: Identification register */
+       u32 hwgen;      /* 0x4: General HW params */
+       u32 hwhost;     /* 0x8: Host HW params */
+       u32 hwdev;      /* 0xc: Device HW params */
+       u32 hwtxbuf;    /* 0x10: Tx buffer HW params */
+       u32 hwrxbuf;    /* 0x14: Rx buffer HW params */
+       u32 reserved[26];
+       u32 timer0_load; /* 0x80: General-purpose timer 0 load*/
+       u32 timer0_ctrl; /* 0x84: General-purpose timer 0 control */
+       u32 timer1_load; /* 0x88: General-purpose timer 1 load*/
+       u32 timer1_ctrl; /* 0x8c: General-purpose timer 1 control */
+};
+
+/* MSBus to AMBA registers */
+struct msp_mab_regs {
+       u32 isr;        /* 0x0: Interrupt status */
+       u32 imr;        /* 0x4: Interrupt mask */
+       u32 thcr0;      /* 0x8: Transaction header capture 0 */
+       u32 thcr1;      /* 0xc: Transaction header capture 1 */
+       u32 int_stat;   /* 0x10: Interrupt status summary */
+       u32 phy_cfg;    /* 0x14: USB phy config */
+};
+
+/* EHCI registers */
+struct msp_usbhs_regs {
+       u32 hciver;     /* 0x0: Version and offset to operational regs */
+       u32 hcsparams;  /* 0x4: Host control structural parameters */
+       u32 hccparams;  /* 0x8: Host control capability parameters */
+       u32 reserved0[5];
+       u32 dciver;     /* 0x20: Device interface version */
+       u32 dccparams;  /* 0x24: Device control capability parameters */
+       u32 reserved1[6];
+       u32 cmd;        /* 0x40: USB command */
+       u32 sts;        /* 0x44: USB status */
+       u32 int_ena;    /* 0x48: USB interrupt enable */
+       u32 frindex;    /* 0x4c: Frame index */
+       u32 reserved3;
+       union {
+               struct {
+                       u32 flb_addr; /* 0x54: Frame list base address */
+                       u32 next_async_addr; /* 0x58: next asynchronous addr */
+                       u32 ttctrl; /* 0x5c: embedded transaction translator
+                                                       async buffer status */
+                       u32 burst_size; /* 0x60: Controller burst size */
+                       u32 tx_fifo_ctrl; /* 0x64: Tx latency FIFO tuning */
+                       u32 reserved0[4];
+                       u32 endpt_nak; /* 0x78: Endpoint NAK */
+                       u32 endpt_nak_ena; /* 0x7c: Endpoint NAK enable */
+                       u32 cfg_flag; /* 0x80: Config flag */
+                       u32 port_sc1; /* 0x84: Port status & control 1 */
+                       u32 reserved1[7];
+                       u32 otgsc;      /* 0xa4: OTG status & control */
+                       u32 mode;       /* 0xa8: USB controller mode */
+               } host;
+
+               struct {
+                       u32 dev_addr; /* 0x54: Device address */
+                       u32 endpt_list_addr; /* 0x58: Endpoint list address */
+                       u32 reserved0[7];
+                       u32 endpt_nak;  /* 0x74 */
+                       u32 endpt_nak_ctrl; /* 0x78 */
+                       u32 cfg_flag; /* 0x80 */
+                       u32 port_sc1; /* 0x84: Port status & control 1 */
+                       u32 reserved[7];
+                       u32 otgsc;      /* 0xa4: OTG status & control */
+                       u32 mode;       /* 0xa8: USB controller mode */
+                       u32 endpt_setup_stat; /* 0xac */
+                       u32 endpt_prime; /* 0xb0 */
+                       u32 endpt_flush; /* 0xb4 */
+                       u32 endpt_stat; /* 0xb8 */
+                       u32 endpt_complete; /* 0xbc */
+                       u32 endpt_ctrl0; /* 0xc0 */
+                       u32 endpt_ctrl1; /* 0xc4 */
+                       u32 endpt_ctrl2; /* 0xc8 */
+                       u32 endpt_ctrl3; /* 0xcc */
+               } device;
+       } u;
+};
+/*
+ * Container for the more-generic platform_device.
+ * This exists mainly as a way to map the non-standard register
+ * spaces and make them accessible to the USB ISR.
+ */
+struct mspusb_device {
+       struct msp_mab_regs   __iomem *mab_regs;
+       struct msp_usbid_regs __iomem *usbid_regs;
+       struct msp_usbhs_regs __iomem *usbhs_regs;
+       struct platform_device dev;
+};
+
+#define to_mspusb_device(x) container_of((x), struct mspusb_device, dev)
+#define TO_HOST_ID(x) ((x) & 0x3)
+#endif /*MSP_USB_H_*/
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/war.h b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
new file mode 100644 (file)
index 0000000..a60bf9d
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_PMC_SIERRA_WAR_H
+#define __ASM_MIPS_PMC_SIERRA_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR    0
+#define R4600_V1_HIT_CACHEOP_WAR       0
+#define R4600_V2_HIT_CACHEOP_WAR       0
+#define R5432_CP0_INTERRUPT_WAR                0
+#define BCM1250_M3_WAR                 0
+#define SIBYTE_1956_WAR                        0
+#define MIPS4K_ICACHE_REFILL_WAR       0
+#define MIPS_CACHE_SYNC_WAR            0
+#define TX49XX_ICACHE_INDEX_INV_WAR    0
+#define ICACHE_REFILLS_WORKAROUND_WAR  0
+#define R10000_LLSC_WAR                        0
+#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
+       defined(CONFIG_PMC_MSP7120_FPGA)
+#define MIPS34K_MISSED_ITLB_WAR                1
+#else
+#define MIPS34K_MISSED_ITLB_WAR                0
+#endif
+
+#endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */
index 6d70264..daa85ce 100644 (file)
 #define PNX833X_TIMER_IRQ                              (MIPS_CPU_IRQ_BASE + 7)
 
 /* Interrupts supported by PIC */
-#define PNX833X_PIC_I2C0_INT                   (PNX833X_PIC_IRQ_BASE +  1)
-#define PNX833X_PIC_I2C1_INT                   (PNX833X_PIC_IRQ_BASE +  2)
-#define PNX833X_PIC_UART0_INT                  (PNX833X_PIC_IRQ_BASE +  3)
-#define PNX833X_PIC_UART1_INT                  (PNX833X_PIC_IRQ_BASE +  4)
-#define PNX833X_PIC_TS_IN0_DV_INT              (PNX833X_PIC_IRQ_BASE +  5)
-#define PNX833X_PIC_TS_IN0_DMA_INT             (PNX833X_PIC_IRQ_BASE +  6)
-#define PNX833X_PIC_GPIO_INT                   (PNX833X_PIC_IRQ_BASE +  7)
-#define PNX833X_PIC_AUDIO_DEC_INT              (PNX833X_PIC_IRQ_BASE +  8)
-#define PNX833X_PIC_VIDEO_DEC_INT              (PNX833X_PIC_IRQ_BASE +  9)
+#define PNX833X_PIC_I2C0_INT                   (PNX833X_PIC_IRQ_BASE +  1)
+#define PNX833X_PIC_I2C1_INT                   (PNX833X_PIC_IRQ_BASE +  2)
+#define PNX833X_PIC_UART0_INT                  (PNX833X_PIC_IRQ_BASE +  3)
+#define PNX833X_PIC_UART1_INT                  (PNX833X_PIC_IRQ_BASE +  4)
+#define PNX833X_PIC_TS_IN0_DV_INT              (PNX833X_PIC_IRQ_BASE +  5)
+#define PNX833X_PIC_TS_IN0_DMA_INT             (PNX833X_PIC_IRQ_BASE +  6)
+#define PNX833X_PIC_GPIO_INT                   (PNX833X_PIC_IRQ_BASE +  7)
+#define PNX833X_PIC_AUDIO_DEC_INT              (PNX833X_PIC_IRQ_BASE +  8)
+#define PNX833X_PIC_VIDEO_DEC_INT              (PNX833X_PIC_IRQ_BASE +  9)
 #define PNX833X_PIC_CONFIG_INT                 (PNX833X_PIC_IRQ_BASE + 10)
 #define PNX833X_PIC_AOI_INT                            (PNX833X_PIC_IRQ_BASE + 11)
 #define PNX833X_PIC_SYNC_INT                   (PNX833X_PIC_IRQ_BASE + 12)
index 100f528..e6fc3a9 100644 (file)
@@ -73,7 +73,7 @@
 
 
 #define PNX833X_RESET_CONTROL          PNX833X_REG(0x8004)
-#define PNX833X_RESET_CONTROL_2        PNX833X_REG(0x8014)
+#define PNX833X_RESET_CONTROL_2                PNX833X_REG(0x8014)
 
 #define PNX833X_PIC_REG(offs)          PNX833X_REG(0x01000 + (offs))
 #define PNX833X_PIC_INT_PRIORITY       PNX833X_PIC_REG(0x0)
 #define PNX833X_PIC_INT_SRC_INT_SRC_SHIFT      3
 #define PNX833X_PIC_INT_REG(irq)       PNX833X_PIC_REG(0x10 + 4*(irq))
 
-#define PNX833X_CLOCK_CPUCP_CTL        PNX833X_REG(0x9228)
+#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228)
 #define PNX833X_CLOCK_CPUCP_CTL_EXIT_RESET     0x00000002ul    /* bit 1 */
 #define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_MASK 0x00000018ul    /* bits 4:3 */
-#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT        3
+#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3
 
 #define PNX8335_CLOCK_PLL_CPU_CTL              PNX833X_REG(0x9020)
 #define PNX8335_CLOCK_PLL_CPU_CTL_FREQ_MASK    0x1f
 #define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_MASK  (1 << 14)
 #define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_SHIFT 14
 
-#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK        (1 << 7)
+#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7)
 #define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_SHIFT       7
 
 #define PNX833X_MIU_SEL0_BURST_PAGE_LEN_MASK   (0xF << 9)
 #define PNX833X_MIU_CONFIG_SPI_OPCODE_MASK     (0xFF << 3)
 #define PNX833X_MIU_CONFIG_SPI_OPCODE_SHIFT    3
 
-#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK        (1 << 2)
+#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2)
 #define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_SHIFT       2
 
-#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK        (1 << 1)
+#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1)
 #define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_SHIFT       1
 
 #define PNX833X_MIU_CONFIG_SPI_SYNC_MASK       (1 << 0)
diff --git a/arch/mips/include/asm/mach-pnx8550/cm.h b/arch/mips/include/asm/mach-pnx8550/cm.h
deleted file mode 100644 (file)
index bb0a56c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *   Clock module specific definitions
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_CM_H
-#define __PNX8550_CM_H
-
-#define PNX8550_CM_BASE        0xBBE47000
-
-#define PNX8550_CM_PLL0_CTL    *(volatile unsigned long *)(PNX8550_CM_BASE + 0x000)
-#define PNX8550_CM_PLL1_CTL    *(volatile unsigned long *)(PNX8550_CM_BASE + 0x004)
-#define PNX8550_CM_PLL2_CTL    *(volatile unsigned long *)(PNX8550_CM_BASE + 0x008)
-#define PNX8550_CM_PLL3_CTL    *(volatile unsigned long *)(PNX8550_CM_BASE + 0x00C)
-
-// Table not complete.....
-
-#define PNX8550_CM_PLL_BLOCKED_MASK     0x80000000
-#define PNX8550_CM_PLL_LOCK_MASK        0x40000000
-#define PNX8550_CM_PLL_CURRENT_ADJ_MASK 0x3c000000
-#define PNX8550_CM_PLL_N_MASK           0x01ff0000
-#define PNX8550_CM_PLL_M_MASK           0x00003f00
-#define PNX8550_CM_PLL_P_MASK           0x0000000c
-#define PNX8550_CM_PLL_PD_MASK          0x00000002
-
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/glb.h b/arch/mips/include/asm/mach-pnx8550/glb.h
deleted file mode 100644 (file)
index 07aa85e..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *   PNX8550 global definitions
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_GLB_H
-#define __PNX8550_GLB_H
-
-#define PNX8550_GLB1_BASE      0xBBE63000
-#define PNX8550_GLB2_BASE      0xBBE4d000
-#define PNX8550_RESET_BASE      0xBBE60000
-
-/* PCI Inta Output Enable Registers */
-#define PNX8550_GLB2_ENAB_INTA_O       *(volatile unsigned long *)(PNX8550_GLB2_BASE + 0x050)
-
-/* Bit 1:Enable DAC Powerdown
-     0:DACs are enabled and are working normally
-     1:DACs are powerdown
-*/
-#define PNX8550_GLB_DAC_PD      0x2
-/*   Bit 0:Enable of PCI inta output
-     0 = Disable PCI inta output
-     1 = Enable PCI inta output
-*/
-#define PNX8550_GLB_ENABLE_INTA_O 0x1
-
-/* PCI Direct Mappings */
-#define PNX8550_PCIMEM         0x12000000
-#define PNX8550_PCIMEM_SIZE    0x08000000
-#define PNX8550_PCIIO          0x1c000000
-#define PNX8550_PCIIO_SIZE     0x02000000      /* 32M */
-
-#define PNX8550_PORT_BASE      KSEG1
-
-// GPIO def
-#define PNX8550_GPIO_BASE      0x1Be00000
-
-#define PNX8550_GPIO_DIRQ0      (PNX8550_GPIO_BASE + 0x104500)
-#define PNX8550_GPIO_MC1         (PNX8550_GPIO_BASE + 0x104004)
-#define PNX8550_GPIO_MC_31_BIT   30
-#define PNX8550_GPIO_MC_30_BIT   28
-#define PNX8550_GPIO_MC_29_BIT   26
-#define PNX8550_GPIO_MC_28_BIT   24
-#define PNX8550_GPIO_MC_27_BIT   22
-#define PNX8550_GPIO_MC_26_BIT   20
-#define PNX8550_GPIO_MC_25_BIT   18
-#define PNX8550_GPIO_MC_24_BIT   16
-#define PNX8550_GPIO_MC_23_BIT   14
-#define PNX8550_GPIO_MC_22_BIT   12
-#define PNX8550_GPIO_MC_21_BIT   10
-#define PNX8550_GPIO_MC_20_BIT   8
-#define PNX8550_GPIO_MC_19_BIT   6
-#define PNX8550_GPIO_MC_18_BIT   4
-#define PNX8550_GPIO_MC_17_BIT   2
-#define PNX8550_GPIO_MC_16_BIT   0
-
-#define PNX8550_GPIO_MODE_PRIMOP    0x1
-#define PNX8550_GPIO_MODE_NO_OPENDR 0x2
-#define PNX8550_GPIO_MODE_OPENDR    0x3
-
-// RESET module
-#define PNX8550_RST_CTL             *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x0)
-#define PNX8550_RST_CAUSE           *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x4)
-#define PNX8550_RST_EN_WATCHDOG     *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x8)
-
-#define PNX8550_RST_REL_MIPS_RST_N     0x8
-#define PNX8550_RST_DO_SW_RST          0x4
-#define PNX8550_RST_REL_SYS_RST_OUT    0x2
-#define PNX8550_RST_ASSERT_SYS_RST_OUT 0x1
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/int.h b/arch/mips/include/asm/mach-pnx8550/int.h
deleted file mode 100644 (file)
index 0e0668b..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *   Interrupt specific definitions
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_INT_H
-#define __PNX8550_INT_H
-
-#define PNX8550_GIC_BASE       0xBBE3E000
-
-#define PNX8550_GIC_PRIMASK_0  *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x000)
-#define PNX8550_GIC_PRIMASK_1  *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x004)
-#define PNX8550_GIC_VECTOR_0   *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x100)
-#define PNX8550_GIC_VECTOR_1   *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x104)
-#define PNX8550_GIC_PEND_1_31  *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x200)
-#define PNX8550_GIC_PEND_32_63 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x204)
-#define PNX8550_GIC_PEND_64_70 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x208)
-#define PNX8550_GIC_FEATURES   *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x300)
-#define PNX8550_GIC_REQ(x)     *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x400 + (x)*4)
-#define PNX8550_GIC_MOD_ID     *(volatile unsigned long *)(PNX8550_GIC_BASE + 0xFFC)
-
-// cp0 is two software + six hw exceptions
-#define PNX8550_INT_CP0_TOTINT 8
-#define PNX8550_INT_CP0_MIN    0
-#define PNX8550_INT_CP0_MAX    (PNX8550_INT_CP0_MIN + PNX8550_INT_CP0_TOTINT - 1)
-
-#define MIPS_CPU_GIC_IRQ        2
-#define MIPS_CPU_TIMER_IRQ      7
-
-// GIC are 71 exceptions connected to cp0's first hardware exception
-#define PNX8550_INT_GIC_TOTINT 71
-#define PNX8550_INT_GIC_MIN    (PNX8550_INT_CP0_MAX+1)
-#define PNX8550_INT_GIC_MAX    (PNX8550_INT_GIC_MIN + PNX8550_INT_GIC_TOTINT - 1)
-
-#define PNX8550_INT_UNDEF              (PNX8550_INT_GIC_MIN+0)
-#define PNX8550_INT_IPC_TARGET0_MIPS   (PNX8550_INT_GIC_MIN+1)
-#define PNX8550_INT_IPC_TARGET1_TM32_1 (PNX8550_INT_GIC_MIN+2)
-#define PNX8550_INT_IPC_TARGET1_TM32_2 (PNX8550_INT_GIC_MIN+3)
-#define PNX8550_INT_RESERVED_4         (PNX8550_INT_GIC_MIN+4)
-#define PNX8550_INT_USB                (PNX8550_INT_GIC_MIN+5)
-#define PNX8550_INT_GPIO_EQ1           (PNX8550_INT_GIC_MIN+6)
-#define PNX8550_INT_GPIO_EQ2           (PNX8550_INT_GIC_MIN+7)
-#define PNX8550_INT_GPIO_EQ3           (PNX8550_INT_GIC_MIN+8)
-#define PNX8550_INT_GPIO_EQ4           (PNX8550_INT_GIC_MIN+9)
-
-#define PNX8550_INT_GPIO_EQ5           (PNX8550_INT_GIC_MIN+10)
-#define PNX8550_INT_GPIO_EQ6           (PNX8550_INT_GIC_MIN+11)
-#define PNX8550_INT_RESERVED_12        (PNX8550_INT_GIC_MIN+12)
-#define PNX8550_INT_QVCP1              (PNX8550_INT_GIC_MIN+13)
-#define PNX8550_INT_QVCP2              (PNX8550_INT_GIC_MIN+14)
-#define PNX8550_INT_I2C1               (PNX8550_INT_GIC_MIN+15)
-#define PNX8550_INT_I2C2               (PNX8550_INT_GIC_MIN+16)
-#define PNX8550_INT_ISO_UART1          (PNX8550_INT_GIC_MIN+17)
-#define PNX8550_INT_ISO_UART2          (PNX8550_INT_GIC_MIN+18)
-#define PNX8550_INT_UART1              (PNX8550_INT_GIC_MIN+19)
-
-#define PNX8550_INT_UART2              (PNX8550_INT_GIC_MIN+20)
-#define PNX8550_INT_QNTR               (PNX8550_INT_GIC_MIN+21)
-#define PNX8550_INT_RESERVED22         (PNX8550_INT_GIC_MIN+22)
-#define PNX8550_INT_T_DSC              (PNX8550_INT_GIC_MIN+23)
-#define PNX8550_INT_M_DSC              (PNX8550_INT_GIC_MIN+24)
-#define PNX8550_INT_RESERVED25         (PNX8550_INT_GIC_MIN+25)
-#define PNX8550_INT_2D_DRAW_ENG        (PNX8550_INT_GIC_MIN+26)
-#define PNX8550_INT_MEM_BASED_SCALAR1  (PNX8550_INT_GIC_MIN+27)
-#define PNX8550_INT_VIDEO_MPEG         (PNX8550_INT_GIC_MIN+28)
-#define PNX8550_INT_VIDEO_INPUT_P1     (PNX8550_INT_GIC_MIN+29)
-
-#define PNX8550_INT_VIDEO_INPUT_P2     (PNX8550_INT_GIC_MIN+30)
-#define PNX8550_INT_SPDI1              (PNX8550_INT_GIC_MIN+31)
-#define PNX8550_INT_SPDO               (PNX8550_INT_GIC_MIN+32)
-#define PNX8550_INT_AUDIO_INPUT1       (PNX8550_INT_GIC_MIN+33)
-#define PNX8550_INT_AUDIO_OUTPUT1      (PNX8550_INT_GIC_MIN+34)
-#define PNX8550_INT_AUDIO_INPUT2       (PNX8550_INT_GIC_MIN+35)
-#define PNX8550_INT_AUDIO_OUTPUT2      (PNX8550_INT_GIC_MIN+36)
-#define PNX8550_INT_MEMBASED_SCALAR2   (PNX8550_INT_GIC_MIN+37)
-#define PNX8550_INT_VPK                (PNX8550_INT_GIC_MIN+38)
-#define PNX8550_INT_MPEG1_MIPS         (PNX8550_INT_GIC_MIN+39)
-
-#define PNX8550_INT_MPEG1_TM           (PNX8550_INT_GIC_MIN+40)
-#define PNX8550_INT_MPEG2_MIPS         (PNX8550_INT_GIC_MIN+41)
-#define PNX8550_INT_MPEG2_TM           (PNX8550_INT_GIC_MIN+42)
-#define PNX8550_INT_TS_DMA             (PNX8550_INT_GIC_MIN+43)
-#define PNX8550_INT_EDMA               (PNX8550_INT_GIC_MIN+44)
-#define PNX8550_INT_TM_DEBUG1          (PNX8550_INT_GIC_MIN+45)
-#define PNX8550_INT_TM_DEBUG2          (PNX8550_INT_GIC_MIN+46)
-#define PNX8550_INT_PCI_INTA           (PNX8550_INT_GIC_MIN+47)
-#define PNX8550_INT_CLOCK_MODULE       (PNX8550_INT_GIC_MIN+48)
-#define PNX8550_INT_PCI_XIO_INTA_PCI   (PNX8550_INT_GIC_MIN+49)
-
-#define PNX8550_INT_PCI_XIO_INTB_DMA   (PNX8550_INT_GIC_MIN+50)
-#define PNX8550_INT_PCI_XIO_INTC_GPPM  (PNX8550_INT_GIC_MIN+51)
-#define PNX8550_INT_PCI_XIO_INTD_GPXIO (PNX8550_INT_GIC_MIN+52)
-#define PNX8550_INT_DVD_CSS            (PNX8550_INT_GIC_MIN+53)
-#define PNX8550_INT_VLD                (PNX8550_INT_GIC_MIN+54)
-#define PNX8550_INT_GPIO_TSU_7_0       (PNX8550_INT_GIC_MIN+55)
-#define PNX8550_INT_GPIO_TSU_15_8      (PNX8550_INT_GIC_MIN+56)
-#define PNX8550_INT_GPIO_CTU_IR        (PNX8550_INT_GIC_MIN+57)
-#define PNX8550_INT_GPIO0              (PNX8550_INT_GIC_MIN+58)
-#define PNX8550_INT_GPIO1              (PNX8550_INT_GIC_MIN+59)
-
-#define PNX8550_INT_GPIO2              (PNX8550_INT_GIC_MIN+60)
-#define PNX8550_INT_GPIO3              (PNX8550_INT_GIC_MIN+61)
-#define PNX8550_INT_GPIO4              (PNX8550_INT_GIC_MIN+62)
-#define PNX8550_INT_GPIO5              (PNX8550_INT_GIC_MIN+63)
-#define PNX8550_INT_GPIO6              (PNX8550_INT_GIC_MIN+64)
-#define PNX8550_INT_GPIO7              (PNX8550_INT_GIC_MIN+65)
-#define PNX8550_INT_PMAN_SECURITY      (PNX8550_INT_GIC_MIN+66)
-#define PNX8550_INT_I2C3               (PNX8550_INT_GIC_MIN+67)
-#define PNX8550_INT_RESERVED_68        (PNX8550_INT_GIC_MIN+68)
-#define PNX8550_INT_SPDI2              (PNX8550_INT_GIC_MIN+69)
-
-#define PNX8550_INT_I2C4               (PNX8550_INT_GIC_MIN+70)
-
-// Timer are 3 exceptions connected to cp0's 7th hardware exception
-#define PNX8550_INT_TIMER_TOTINT       3
-#define PNX8550_INT_TIMER_MIN         (PNX8550_INT_GIC_MAX+1)
-#define PNX8550_INT_TIMER_MAX          (PNX8550_INT_TIMER_MIN + PNX8550_INT_TIMER_TOTINT - 1)
-
-#define PNX8550_INT_TIMER1             (PNX8550_INT_TIMER_MIN+0)
-#define PNX8550_INT_TIMER2             (PNX8550_INT_TIMER_MIN+1)
-#define PNX8550_INT_TIMER3             (PNX8550_INT_TIMER_MIN+2)
-#define PNX8550_INT_WATCHDOG           PNX8550_INT_TIMER3
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h b/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h
deleted file mode 100644 (file)
index bdde00c..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- */
-#ifndef __ASM_MACH_KERNEL_ENTRY_INIT_H
-#define __ASM_MACH_KERNEL_ENTRY_INIT_H
-
-#include <asm/cacheops.h>
-#include <asm/addrspace.h>
-
-#define CO_CONFIGPR_VALID  0x3F1F41FF    /* valid bits to write to ConfigPR */
-#define HAZARD_CP0 nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;
-#define CACHE_OPC      0xBC000000  /* MIPS cache instruction opcode */
-#define ICACHE_LINE_SIZE        32      /* Instruction cache line size bytes */
-#define DCACHE_LINE_SIZE        32      /* Data cache line size in bytes */
-
-#define ICACHE_SET_COUNT        256     /* Instruction cache set count */
-#define DCACHE_SET_COUNT        128     /* Data cache set count */
-
-#define ICACHE_SET_SIZE         (ICACHE_SET_COUNT * ICACHE_LINE_SIZE)
-#define DCACHE_SET_SIZE         (DCACHE_SET_COUNT * DCACHE_LINE_SIZE)
-
-       .macro  kernel_entry_setup
-       .set    push
-       .set    noreorder
-       /*
-        * PNX8550 entry point, when running a non compressed
-        * kernel. When loading a zImage, the head.S code in
-        * arch/mips/zboot/pnx8550 will init the caches and,
-        * decompress the kernel, and branch to kernel_entry.
-                */
-cache_begin:   li      t0, (1<<28)
-       mtc0    t0, CP0_STATUS /* cp0 usable */
-       HAZARD_CP0
-
-       mtc0    zero, CP0_CAUSE
-       HAZARD_CP0
-
-
-       /* Set static virtual to phys address translation and TLB disabled */
-       mfc0    t0, CP0_CONFIG, 7
-       HAZARD_CP0
-
-       and     t0, ~((1<<19) | (1<<20))     /* TLB/MAP cleared */
-       mtc0    t0, CP0_CONFIG, 7
-       HAZARD_CP0
-
-       /* CPU boots with kseg0 cache algo set to 0x2 -- uncached */
-
-       init_icache
-       nop
-       init_dcache
-       nop
-
-       cachePr4450ICReset
-       nop
-
-       cachePr4450DCReset
-       nop
-
-       /* read ConfigPR into t0 */
-       mfc0    t0, CP0_CONFIG, 7
-       HAZARD_CP0
-
-       /*  enable the TLB */
-       or      t0, (1<<19)
-
-       /* disable the ICACHE: at least 10x slower */
-       /* or      t0, (1<<26) */
-
-       /* disable the DCACHE; CONFIG_CPU_HAS_LLSC should not be set  */
-       /* or      t0, (1<<27) */
-
-       and     t0, CO_CONFIGPR_VALID
-
-       /* enable TLB. */
-       mtc0    t0, CP0_CONFIG, 7
-       HAZARD_CP0
-cache_end:
-       /* Setup CMEM_0 to MMIO address space, 2MB */
-       lui    t0, 0x1BE0
-       addi   t0, t0, 0x3
-       mtc0   $8, $22, 4
-       nop
-
-       /* Setup CMEM_1, 128MB */
-       lui    t0, 0x1000
-       addi   t0, t0, 0xf
-       mtc0   $8, $22, 5
-       nop
-
-
-       /* Setup CMEM_2, 32MB */
-       lui    t0, 0x1C00
-       addi   t0, t0, 0xb
-       mtc0   $8, $22, 6
-       nop
-
-       /* Setup CMEM_3, 0MB */
-       lui    t0, 0x0
-       addi   t0, t0, 0x0
-       mtc0   $8, $22, 7
-       nop
-
-       /* Enable cache */
-       mfc0    t0, CP0_CONFIG
-       HAZARD_CP0
-       and     t0, t0, 0xFFFFFFF8
-       or      t0, t0, 3
-       mtc0    t0, CP0_CONFIG
-       HAZARD_CP0
-       .set    pop
-       .endm
-
-       .macro  init_icache
-       .set    push
-       .set    noreorder
-
-       /* Get Cache Configuration */
-       mfc0    t3, CP0_CONFIG, 1
-       HAZARD_CP0
-
-       /* get cache Line size */
-
-       srl   t1, t3, 19   /* C0_CONFIGPR_IL_SHIFT */
-       andi  t1, t1, 0x7  /* C0_CONFIGPR_IL_MASK */
-       beq   t1, zero, pr4450_instr_cache_invalidated /* if zero instruction cache is absent */
-       nop
-       addiu t0, t1, 1
-       ori   t1, zero, 1
-       sllv  t1, t1, t0
-
-       /* get max cache Index */
-       srl   t2, t3, 22  /* C0_CONFIGPR_IS_SHIFT */
-       andi  t2, t2, 0x7 /* C0_CONFIGPR_IS_MASK */
-       addiu t0, t2, 6
-       ori   t2, zero, 1
-       sllv  t2, t2, t0
-
-       /* get max cache way */
-       srl   t3, t3, 16  /* C0_CONFIGPR_IA_SHIFT */
-       andi  t3, t3, 0x7 /* C0_CONFIGPR_IA_MASK */
-       addiu t3, t3, 1
-
-       /* total no of cache lines */
-       multu t2, t3             /* max index * max way */
-       mflo  t2
-       addiu t2, t2, -1
-
-       move  t0, zero
-pr4450_next_instruction_cache_set:
-       cache  Index_Invalidate_I, 0(t0)
-       addu  t0, t0, t1         /* add bytes in a line */
-       bne   t2, zero, pr4450_next_instruction_cache_set
-       addiu t2, t2, -1   /* reduce no of lines to invalidate by one */
-pr4450_instr_cache_invalidated:
-       .set    pop
-       .endm
-
-       .macro  init_dcache
-       .set    push
-       .set    noreorder
-       move t1, zero
-
-       /* Store Tag Information */
-       mtc0    zero, CP0_TAGLO, 0
-       HAZARD_CP0
-
-       mtc0    zero, CP0_TAGHI, 0
-       HAZARD_CP0
-
-       /* Cache size is 16384 = 512 lines x 32 bytes per line */
-       or       t2, zero, (128*4)-1  /* 512 lines  */
-       /* Invalidate all lines */
-2:
-       cache Index_Store_Tag_D, 0(t1)
-       addiu    t2, t2, -1
-       bne      t2, zero, 2b
-       addiu    t1, t1, 32        /* 32 bytes in a line */
-       .set pop
-       .endm
-
-       .macro  cachePr4450ICReset
-       .set    push
-       .set    noreorder
-
-       /* Save CP0 status reg on entry; */
-       /* disable interrupts during cache reset */
-       mfc0    t0, CP0_STATUS      /* T0 = interrupt status on entry */
-       HAZARD_CP0
-
-       mtc0    zero, CP0_STATUS   /* disable CPU interrupts */
-       HAZARD_CP0
-
-       or      t1, zero, zero              /* T1 = starting cache index (0) */
-       ori     t2, zero, (256 - 1) /* T2 = inst cache set cnt - 1 */
-
-       icache_invd_loop:
-       /* 9 == register t1 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
-               (0 * ICACHE_SET_SIZE)  /* invalidate inst cache WAY0 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
-               (1 * ICACHE_SET_SIZE)  /* invalidate inst cache WAY1 */
-
-       addiu   t1, t1, ICACHE_LINE_SIZE    /* T1 = next cache line index */
-       bne     t2, zero, icache_invd_loop /* T2 = 0 if all sets invalidated */
-       addiu   t2, t2, -1        /* decrement T2 set cnt (delay slot) */
-
-       /* Initialize the latches in the instruction cache tag */
-       /* that drive the way selection tri-state bus drivers, by doing a */
-       /* dummy load while the instruction cache is still disabled. */
-       /* TODO: Is this needed ? */
-       la      t1, KSEG0            /* T1 = cached memory base address */
-       lw      zero, 0x0000(t1)      /* (dummy read of first memory word) */
-
-       mtc0    t0, CP0_STATUS        /* restore interrupt status on entry */
-       HAZARD_CP0
-       .set    pop
-       .endm
-
-       .macro  cachePr4450DCReset
-       .set    push
-       .set    noreorder
-       mfc0    t0, CP0_STATUS           /* T0 = interrupt status on entry */
-       HAZARD_CP0
-       mtc0    zero, CP0_STATUS         /* disable CPU interrupts */
-       HAZARD_CP0
-
-       /* Writeback/invalidate entire data cache sets/ways/lines */
-       or      t1, zero, zero              /* T1 = starting cache index (0) */
-       ori     t2, zero, (DCACHE_SET_COUNT - 1) /* T2 = data cache set cnt - 1 */
-
-       dcache_wbinvd_loop:
-       /* 9 == register t1 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
-               (0 * DCACHE_SET_SIZE)  /* writeback/invalidate WAY0 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
-               (1 * DCACHE_SET_SIZE)  /* writeback/invalidate WAY1 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
-               (2 * DCACHE_SET_SIZE)  /* writeback/invalidate WAY2 */
-       .word   CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
-               (3 * DCACHE_SET_SIZE)  /* writeback/invalidate WAY3 */
-
-       addiu   t1, t1, DCACHE_LINE_SIZE  /* T1 = next data cache line index */
-       bne     t2, zero, dcache_wbinvd_loop /* T2 = 0 when wbinvd entire cache */
-       addiu   t2, t2, -1          /* decrement T2 set cnt (delay slot) */
-
-       /* Initialize the latches in the data cache tag that drive the way
-       selection tri-state bus drivers, by doing a dummy load while the
-       data cache is still in the disabled mode.  TODO: Is this needed ? */
-       la      t1, KSEG0            /* T1 = cached memory base address */
-       lw      zero, 0x0000(t1)      /* (dummy read of first memory word) */
-
-       mtc0    t0, CP0_STATUS       /* restore interrupt status on entry */
-       HAZARD_CP0
-       .set    pop
-       .endm
-
-#endif /* __ASM_MACH_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-pnx8550/nand.h b/arch/mips/include/asm/mach-pnx8550/nand.h
deleted file mode 100644 (file)
index aefbc51..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef __PNX8550_NAND_H
-#define __PNX8550_NAND_H
-
-#define PNX8550_NAND_BASE_ADDR   0x10000000
-#define PNX8550_PCIXIO_BASE     0xBBE40000
-
-#define PNX8550_DMA_EXT_ADDR     *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x800)
-#define PNX8550_DMA_INT_ADDR     *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x804)
-#define PNX8550_DMA_TRANS_SIZE   *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x808)
-#define PNX8550_DMA_CTRL         *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x80c)
-#define PNX8550_XIO_SEL0         *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x814)
-#define PNX8550_GPXIO_ADDR       *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x820)
-#define PNX8550_GPXIO_WR         *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x824)
-#define PNX8550_GPXIO_RD         *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x828)
-#define PNX8550_GPXIO_CTRL       *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x82C)
-#define PNX8550_XIO_FLASH_CTRL   *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x830)
-#define PNX8550_GPXIO_INT_STATUS *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb0)
-#define PNX8550_GPXIO_INT_ENABLE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb4)
-#define PNX8550_GPXIO_INT_CLEAR  *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb8)
-#define PNX8550_DMA_INT_STATUS   *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd0)
-#define PNX8550_DMA_INT_ENABLE   *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd4)
-#define PNX8550_DMA_INT_CLEAR    *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd8)
-
-#define PNX8550_XIO_SEL0_EN_16BIT    0x00800000
-#define PNX8550_XIO_SEL0_USE_ACK     0x00400000
-#define PNX8550_XIO_SEL0_REN_HIGH    0x00100000
-#define PNX8550_XIO_SEL0_REN_LOW     0x00040000
-#define PNX8550_XIO_SEL0_WEN_HIGH    0x00010000
-#define PNX8550_XIO_SEL0_WEN_LOW     0x00004000
-#define PNX8550_XIO_SEL0_WAIT        0x00000200
-#define PNX8550_XIO_SEL0_OFFSET      0x00000020
-#define PNX8550_XIO_SEL0_TYPE_68360  0x00000000
-#define PNX8550_XIO_SEL0_TYPE_NOR    0x00000008
-#define PNX8550_XIO_SEL0_TYPE_NAND   0x00000010
-#define PNX8550_XIO_SEL0_TYPE_IDE    0x00000018
-#define PNX8550_XIO_SEL0_SIZE_8MB    0x00000000
-#define PNX8550_XIO_SEL0_SIZE_16MB   0x00000002
-#define PNX8550_XIO_SEL0_SIZE_32MB   0x00000004
-#define PNX8550_XIO_SEL0_SIZE_64MB   0x00000006
-#define PNX8550_XIO_SEL0_ENAB        0x00000001
-
-#define PNX8550_SEL0_DEFAULT ((PNX8550_XIO_SEL0_EN_16BIT)  | \
-                              (PNX8550_XIO_SEL0_REN_HIGH*0)| \
-                             (PNX8550_XIO_SEL0_REN_LOW*2) | \
-                             (PNX8550_XIO_SEL0_WEN_HIGH*0)| \
-                              (PNX8550_XIO_SEL0_WEN_LOW*2) | \
-                             (PNX8550_XIO_SEL0_WAIT*4)    | \
-                             (PNX8550_XIO_SEL0_OFFSET*0)  | \
-                             (PNX8550_XIO_SEL0_TYPE_NAND) | \
-                             (PNX8550_XIO_SEL0_SIZE_32MB) | \
-                             (PNX8550_XIO_SEL0_ENAB))
-
-#define PNX8550_GPXIO_PENDING        0x00000200
-#define PNX8550_GPXIO_DONE           0x00000100
-#define PNX8550_GPXIO_CLR_DONE       0x00000080
-#define PNX8550_GPXIO_INIT           0x00000040
-#define PNX8550_GPXIO_READ_CMD       0x00000010
-#define PNX8550_GPXIO_BEN            0x0000000F
-
-#define PNX8550_XIO_FLASH_64MB       0x00200000
-#define PNX8550_XIO_FLASH_INC_DATA   0x00100000
-#define PNX8550_XIO_FLASH_CMD_PH     0x000C0000
-#define PNX8550_XIO_FLASH_CMD_PH2    0x00080000
-#define PNX8550_XIO_FLASH_CMD_PH1    0x00040000
-#define PNX8550_XIO_FLASH_CMD_PH0    0x00000000
-#define PNX8550_XIO_FLASH_ADR_PH     0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH3    0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH2    0x00020000
-#define PNX8550_XIO_FLASH_ADR_PH1    0x00010000
-#define PNX8550_XIO_FLASH_ADR_PH0    0x00000000
-#define PNX8550_XIO_FLASH_CMD_B(x)   ((x<<8) & 0x0000FF00)
-#define PNX8550_XIO_FLASH_CMD_A(x)   (x & 0x000000FF)
-
-#define PNX8550_XIO_INT_ACK          0x00004000
-#define PNX8550_XIO_INT_COMPL        0x00002000
-#define PNX8550_XIO_INT_NONSUP       0x00000200
-#define PNX8550_XIO_INT_ABORT        0x00000004
-
-#define PNX8550_DMA_CTRL_SINGLE_DATA 0x00000400
-#define PNX8550_DMA_CTRL_SND2XIO     0x00000200
-#define PNX8550_DMA_CTRL_FIX_ADDR    0x00000100
-#define PNX8550_DMA_CTRL_BURST_8     0x00000000
-#define PNX8550_DMA_CTRL_BURST_16    0x00000020
-#define PNX8550_DMA_CTRL_BURST_32    0x00000040
-#define PNX8550_DMA_CTRL_BURST_64    0x00000060
-#define PNX8550_DMA_CTRL_BURST_128   0x00000080
-#define PNX8550_DMA_CTRL_BURST_256   0x000000A0
-#define PNX8550_DMA_CTRL_BURST_512   0x000000C0
-#define PNX8550_DMA_CTRL_BURST_NORES 0x000000E0
-#define PNX8550_DMA_CTRL_INIT_DMA    0x00000010
-#define PNX8550_DMA_CTRL_CMD_TYPE    0x0000000F
-
-/* see PCI system arch, page 100 for the full list: */
-#define PNX8550_DMA_CTRL_PCI_CMD_READ    0x00000006
-#define PNX8550_DMA_CTRL_PCI_CMD_WRITE   0x00000007
-
-#define PNX8550_DMA_INT_STAT_ACK_DONE  (1<<14)
-#define PNX8550_DMA_INT_STAT_DMA_DONE  (1<<12)
-#define PNX8550_DMA_INT_STAT_DMA_ERR   (1<<9)
-#define PNX8550_DMA_INT_STAT_PERR5     (1<<5)
-#define PNX8550_DMA_INT_STAT_PERR4     (1<<4)
-#define PNX8550_DMA_INT_STAT_M_ABORT   (1<<2)
-#define PNX8550_DMA_INT_STAT_T_ABORT   (1<<1)
-
-#define PNX8550_DMA_INT_EN_ACK_DONE    (1<<14)
-#define PNX8550_DMA_INT_EN_DMA_DONE    (1<<12)
-#define PNX8550_DMA_INT_EN_DMA_ERR     (1<<9)
-#define PNX8550_DMA_INT_EN_PERR5       (1<<5)
-#define PNX8550_DMA_INT_EN_PERR4       (1<<4)
-#define PNX8550_DMA_INT_EN_M_ABORT     (1<<2)
-#define PNX8550_DMA_INT_EN_T_ABORT     (1<<1)
-
-#define PNX8550_DMA_INT_CLR_ACK_DONE   (1<<14)
-#define PNX8550_DMA_INT_CLR_DMA_DONE   (1<<12)
-#define PNX8550_DMA_INT_CLR_DMA_ERR    (1<<9)
-#define PNX8550_DMA_INT_CLR_PERR5      (1<<5)
-#define PNX8550_DMA_INT_CLR_PERR4      (1<<4)
-#define PNX8550_DMA_INT_CLR_M_ABORT    (1<<2)
-#define PNX8550_DMA_INT_CLR_T_ABORT    (1<<1)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/pci.h b/arch/mips/include/asm/mach-pnx8550/pci.h
deleted file mode 100644 (file)
index b921508..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PCI specific definitions
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_PCI_H
-#define __PNX8550_PCI_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#define PCI_ACCESS_READ  0
-#define PCI_ACCESS_WRITE 1
-
-#define PCI_CMD_IOR                     0x20
-#define PCI_CMD_IOW                     0x30
-#define PCI_CMD_CONFIG_READ             0xa0
-#define PCI_CMD_CONFIG_WRITE            0xb0
-
-#define PCI_IO_TIMEOUT                  1000
-#define PCI_IO_RETRY                   5
-/* Timeout for IO and CFG accesses.
-   This is in 1/1024 th of a jiffie(=10ms)
-   i.e. approx 10us */
-#define PCI_IO_JIFFIES_TIMEOUT          40
-#define PCI_IO_JIFFIES_SHIFT            10
-
-#define PCI_BYTE_ENABLE_MASK           0x0000000f
-#define PCI_CFG_BUS_SHIFT               16
-#define PCI_CFG_FUNC_SHIFT              8
-#define PCI_CFG_REG_SHIFT               2
-
-#define PCI_BASE                  0x1be00000
-#define PCI_SETUP                 0x00040010
-#define PCI_DIS_REQGNT           (1<<30)
-#define PCI_DIS_REQGNTA          (1<<29)
-#define PCI_DIS_REQGNTB          (1<<28)
-#define PCI_D2_SUPPORT           (1<<27)
-#define PCI_D1_SUPPORT           (1<<26)
-#define PCI_EN_TA                (1<<24)
-#define PCI_EN_PCI2MMI           (1<<23)
-#define PCI_EN_XIO               (1<<22)
-#define PCI_BASE18_PREF          (1<<21)
-#define SIZE_16M                 0x3
-#define SIZE_32M                 0x4
-#define SIZE_64M                 0x5
-#define SIZE_128M                0x6
-#define PCI_SETUP_BASE18_SIZE(X) (X<<18)
-#define PCI_SETUP_BASE18_EN      (1<<17)
-#define PCI_SETUP_BASE14_PREF    (1<<16)
-#define PCI_SETUP_BASE14_SIZE(X) (X<<12)
-#define PCI_SETUP_BASE14_EN      (1<<11)
-#define PCI_SETUP_BASE10_PREF    (1<<10)
-#define PCI_SETUP_BASE10_SIZE(X) (X<<7)
-#define PCI_SETUP_CFGMANAGE_EN   (1<<1)
-#define PCI_SETUP_PCIARB_EN      (1<<0)
-
-#define PCI_CTRL                  0x040014
-#define PCI_SWPB_DCS_PCI         (1<<16)
-#define PCI_SWPB_PCI_PCI         (1<<15)
-#define PCI_SWPB_PCI_DCS         (1<<14)
-#define PCI_REG_WR_POST          (1<<13)
-#define PCI_XIO_WR_POST          (1<<12)
-#define PCI_PCI2_WR_POST         (1<<13)
-#define PCI_PCI1_WR_POST         (1<<12)
-#define PCI_SERR_SEEN            (1<<11)
-#define PCI_B10_SPEC_RD          (1<<6)
-#define PCI_B14_SPEC_RD          (1<<5)
-#define PCI_B18_SPEC_RD          (1<<4)
-#define PCI_B10_NOSUBWORD        (1<<3)
-#define PCI_B14_NOSUBWORD        (1<<2)
-#define PCI_B18_NOSUBWORD        (1<<1)
-#define PCI_RETRY_TMREN          (1<<0)
-
-#define PCI_BASE1_LO              0x040018
-#define PCI_BASE1_HI              0x04001C
-#define PCI_BASE2_LO              0x040020
-#define PCI_BASE2_HI              0x040024
-#define PCI_RDLIFETIM             0x040028
-#define PCI_GPPM_ADDR             0x04002C
-#define PCI_GPPM_WDAT             0x040030
-#define PCI_GPPM_RDAT             0x040034
-#define PCI_GPPM_CTRL             0x040038
-#define GPPM_DONE                (1<<10)
-#define INIT_PCI_CYCLE           (1<<9)
-#define GPPM_CMD(X)              (((X)&0xf)<<4)
-#define GPPM_BYTEEN(X)           ((X)&0xf)
-#define PCI_UNLOCKREG             0x04003C
-#define UNLOCK_SSID(X)           (((X)&0xff)<<8)
-#define UNLOCK_SETUP(X)          (((X)&0xff)<<0)
-#define UNLOCK_MAGIC             0xCA
-#define PCI_DEV_VEND_ID           0x040040
-#define DEVICE_ID(X)             (((X)>>16)&0xffff)
-#define VENDOR_ID(X)             (((X)&0xffff))
-#define PCI_CFG_CMDSTAT           0x040044
-#define PCI_CFG_STATUS(X)            (((X)>>16)&0xffff)
-#define PCI_CFG_COMMAND(X)           ((X)&0xffff)
-#define PCI_CLASS_REV             0x040048
-#define PCI_CLASSCODE(X)         (((X)>>8)&0xffffff)
-#define PCI_REVID(X)             ((X)&0xff)
-#define PCI_LAT_TMR     0x04004c
-#define PCI_BASE10      0x040050
-#define PCI_BASE14      0x040054
-#define PCI_BASE18      0x040058
-#define PCI_SUBSYS_ID   0x04006c
-#define PCI_CAP_PTR     0x040074
-#define PCI_CFG_MISC    0x04007c
-#define PCI_PMC         0x040080
-#define PCI_PWR_STATE   0x040084
-#define PCI_IO          0x040088
-#define PCI_SLVTUNING   0x04008C
-#define PCI_DMATUNING   0x040090
-#define PCI_DMAEADDR    0x040800
-#define PCI_DMAIADDR    0x040804
-#define PCI_DMALEN      0x040808
-#define PCI_DMACTRL     0x04080C
-#define PCI_XIOCTRL     0x040810
-#define PCI_SEL0PROF    0x040814
-#define PCI_SEL1PROF    0x040818
-#define PCI_SEL2PROF    0x04081C
-#define PCI_GPXIOADDR   0x040820
-#define PCI_NANDCTRLS   0x400830
-#define PCI_SEL3PROF    0x040834
-#define PCI_SEL4PROF    0x040838
-#define PCI_GPXIO_STAT  0x040FB0
-#define PCI_GPXIO_IMASK 0x040FB4
-#define PCI_GPXIO_ICLR  0x040FB8
-#define PCI_GPXIO_ISET  0x040FBC
-#define PCI_GPPM_STATUS 0x040FC0
-#define GPPM_DONE      (1<<10)
-#define GPPM_ERR       (1<<9)
-#define GPPM_MPAR_ERR  (1<<8)
-#define GPPM_PAR_ERR   (1<<7)
-#define GPPM_R_MABORT  (1<<2)
-#define GPPM_R_TABORT  (1<<1)
-#define PCI_GPPM_IMASK  0x040FC4
-#define PCI_GPPM_ICLR   0x040FC8
-#define PCI_GPPM_ISET   0x040FCC
-#define PCI_DMA_STATUS  0x040FD0
-#define PCI_DMA_IMASK   0x040FD4
-#define PCI_DMA_ICLR    0x040FD8
-#define PCI_DMA_ISET    0x040FDC
-#define PCI_ISTATUS     0x040FE0
-#define PCI_IMASK       0x040FE4
-#define PCI_ICLR        0x040FE8
-#define PCI_ISET        0x040FEC
-#define PCI_MOD_ID      0x040FFC
-
-/*
- *  PCI configuration cycle AD bus definition
- */
-/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF           0
-#define PCI_CFG_TYPE0_FUNC_SHF          8
-
-/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF           0
-#define PCI_CFG_TYPE1_FUNC_SHF          8
-#define PCI_CFG_TYPE1_DEV_SHF           11
-#define PCI_CFG_TYPE1_BUS_SHF           16
-
-/*
- *  Ethernet device DP83816 definition
- */
-#define DP83816_IRQ_ETHER               66
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/uart.h b/arch/mips/include/asm/mach-pnx8550/uart.h
deleted file mode 100644 (file)
index ad7608d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __IP3106_UART_H
-#define __IP3106_UART_H
-
-#include <int.h>
-
-/* early macros for kgdb use. fixme: clean this up */
-
-#define UART_BASE              0xbbe4a000      /* PNX8550 */
-
-#define PNX8550_UART_PORT0     (UART_BASE)
-#define PNX8550_UART_PORT1     (UART_BASE + 0x1000)
-
-#define PNX8550_UART_INT(x)            (PNX8550_INT_GIC_MIN+19+x)
-#define IRQ_TO_UART(x)                 (x-PNX8550_INT_GIC_MIN-19)
-
-/* early macros needed for prom/kgdb */
-
-#define ip3106_lcr(base, port)    *(volatile u32 *)(base+(port*0x1000) + 0x000)
-#define ip3106_mcr(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0x004)
-#define ip3106_baud(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0x008)
-#define ip3106_cfg(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0x00C)
-#define ip3106_fifo(base, port)         *(volatile u32 *)(base+(port*0x1000) + 0x028)
-#define ip3106_istat(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE0)
-#define ip3106_ien(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0xFE4)
-#define ip3106_iclr(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0xFE8)
-#define ip3106_iset(base, port)  *(volatile u32 *)(base+(port*0x1000) + 0xFEC)
-#define ip3106_pd(base, port)    *(volatile u32 *)(base+(port*0x1000) + 0xFF4)
-#define ip3106_mid(base, port)   *(volatile u32 *)(base+(port*0x1000) + 0xFFC)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/usb.h b/arch/mips/include/asm/mach-pnx8550/usb.h
deleted file mode 100644 (file)
index 483b7fc..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *  USB specific definitions
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_USB_H
-#define __PNX8550_USB_H
-
-/*
- * USB Host controller
- */
-
-#define PNX8550_USB_OHCI_OP_BASE       0x1be48000
-#define PNX8550_USB_OHCI_OP_LEN                0x1000
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/war.h b/arch/mips/include/asm/mach-pnx8550/war.h
deleted file mode 100644 (file)
index de8894c..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_PNX8550_WAR_H
-#define __ASM_MIPS_MACH_PNX8550_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR    0
-#define R4600_V1_HIT_CACHEOP_WAR       0
-#define R4600_V2_HIT_CACHEOP_WAR       0
-#define R5432_CP0_INTERRUPT_WAR                0
-#define BCM1250_M3_WAR                 0
-#define SIBYTE_1956_WAR                        0
-#define MIPS4K_ICACHE_REFILL_WAR       0
-#define MIPS_CACHE_SYNC_WAR            0
-#define TX49XX_ICACHE_INDEX_INV_WAR    0
-#define ICACHE_REFILLS_WORKAROUND_WAR  0
-#define R10000_LLSC_WAR                        0
-#define MIPS34K_MISSED_ITLB_WAR                0
-
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
index c7077a6..b341108 100644 (file)
@@ -23,9 +23,9 @@
 #include <linux/platform_device.h>
 #include <asm/mach-powertv/asic_regs.h>
 
-#define DVR_CAPABLE     (1<<0)
-#define PCIE_CAPABLE    (1<<1)
-#define FFS_CAPABLE     (1<<2)
+#define DVR_CAPABLE    (1<<0)
+#define PCIE_CAPABLE   (1<<1)
+#define FFS_CAPABLE    (1<<2)
 #define DISPLAY_CAPABLE (1<<3)
 
 /* Platform Family types
@@ -111,7 +111,7 @@ enum sys_reboot_type {
                                         * Older drivers may report as
                                         * userReboot. */
        sys_hardware_reset = 0x09,      /* HW watchdog or front-panel
-                                        * reset button reset.  Older
+                                        * reset button reset.  Older
                                         * drivers may report as
                                         * userReboot. */
        sys_watchdogInterrupt = 0x0A    /* Pre-watchdog interrupt */
index deecb26..06712ab 100644 (file)
@@ -49,8 +49,8 @@ enum asic_type {
 #define UART1_INTEN    uart1_inten
 #define UART1_CONFIG1  uart1_config1
 #define UART1_CONFIG2  uart1_config2
-#define UART1_DIVISORHI        uart1_divisorhi
-#define UART1_DIVISORLO        uart1_divisorlo
+#define UART1_DIVISORHI uart1_divisorhi
+#define UART1_DIVISORLO uart1_divisorlo
 #define UART1_DATA     uart1_data
 #define UART1_STATUS   uart1_status
 
index 3537164..f831672 100644 (file)
@@ -4,7 +4,7 @@
  * for more details.
  *
  * Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009  Cisco Systems, Inc.
+ * Portions Copyright (C) 2009 Cisco Systems, Inc.
  * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
  *
  */
index 4fd652c..6c463be 100644 (file)
@@ -16,7 +16,7 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-#ifndef        _ASM_MACH_POWERTV_INTERRUPTS_H_
+#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
 #define _ASM_MACH_POWERTV_INTERRUPTS_H_
 
 /*
@@ -49,9 +49,9 @@
                                                 * glue logic inside SPARC ILC
                                                 * (see INT_SBAG_STAT, below,
                                                 * for individual interrupts) */
-#define irq_qam_b_fec          (ibase+116)     /* QAM  B FEC Interrupt */
+#define irq_qam_b_fec          (ibase+116)     /* QAM  B FEC Interrupt */
 #define irq_qam_a_fec          (ibase+115)     /* QAM A FEC Interrupt */
-/* 114 unused  (bit 18) */
+/* 114 unused  (bit 18) */
 #define irq_mailbox            (ibase+113)     /* Mailbox Debug Interrupt  --
                                                 * Ored by glue logic inside
                                                 * SPARC ILC (see
@@ -99,9 +99,9 @@
 #define irq_sata1              (ibase+87)      /* SATA 1 Interrupt */
 #define irq_dtcp               (ibase+86)      /* DTCP Interrupt */
 #define irq_pciexp1            (ibase+85)      /* PCI Express 1 Interrupt */
-/* 84 unused   (bit 20) */
-/* 83 unused   (bit 19) */
-/* 82 unused   (bit 18) */
+/* 84 unused   (bit 20) */
+/* 83 unused   (bit 19) */
+/* 82 unused   (bit 18) */
 #define irq_sata2              (ibase+81)      /* SATA2 Interrupt */
 #define irq_uart2              (ibase+80)      /* UART2 Interrupt */
 #define irq_legacy_usb         (ibase+79)      /* Legacy USB Host ISR (1.1
 #define irq_mod_dma            (ibase+70)      /* Modulator DMA Interrupt */
 #define irq_byte_eng1          (ibase+69)      /* Byte Engine Interrupt [1] */
 #define irq_byte_eng0          (ibase+68)      /* Byte Engine Interrupt [0] */
-/* 67 unused   (bit 03) */
-/* 66 unused   (bit 02) */
-/* 65 unused   (bit 01) */
-/* 64 unused   (bit 00) */
+/* 67 unused   (bit 03) */
+/* 66 unused   (bit 02) */
+/* 65 unused   (bit 01) */
+/* 64 unused   (bit 00) */
 /*------------- Register: int_stat_1 */
-/* 63 unused   (bit 31) */
-/* 62 unused   (bit 30) */
-/* 61 unused   (bit 29) */
-/* 60 unused   (bit 28) */
-/* 59 unused   (bit 27) */
-/* 58 unused   (bit 26) */
-/* 57 unused   (bit 25) */
-/* 56 unused   (bit 24) */
+/* 63 unused   (bit 31) */
+/* 62 unused   (bit 30) */
+/* 61 unused   (bit 29) */
+/* 60 unused   (bit 28) */
+/* 59 unused   (bit 27) */
+/* 58 unused   (bit 26) */
+/* 57 unused   (bit 25) */
+/* 56 unused   (bit 24) */
 #define irq_buf_dma_mem2mem    (ibase+55)      /* BufDMA Memory to Memory
                                                 * Interrupt */
-#define irq_buf_dma_usbtransmit        (ibase+54)      /* BufDMA USB Transmit
+#define irq_buf_dma_usbtransmit (ibase+54)     /* BufDMA USB Transmit
                                                 * Interrupt */
 #define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
                                                 * Interrupt */
                                                 * Interrupt */
 #define irq_buf_dma_usbrecv    (ibase+51)      /* BufDMA USB Receive
                                                 * Interrupt */
-#define irq_buf_dma_qpskpodrecv        (ibase+50)      /* BufDMA QPSK/POD Receive
+#define irq_buf_dma_qpskpodrecv (ibase+50)     /* BufDMA QPSK/POD Receive
                                                 * Interrupt */
 #define irq_buf_dma_recv_error (ibase+49)      /* BufDMA Receive Error
                                                 * Interrupt */
                                                 * Module */
 #define irq_gpio2              (ibase+37)      /* GP I/O IRQ 2 - From GP I/O
                                                 * Module (ABE_intN) */
-#define irq_pcrcmplt1          (ibase+36)      /* PCR Capture Complete  or
+#define irq_pcrcmplt1          (ibase+36)      /* PCR Capture Complete  or
                                                 * Discontinuity 1 */
 #define irq_pcrcmplt2          (ibase+35)      /* PCR Capture Complete or
                                                 * Discontinuity 2 */
 #define irq_qpsk_hecerr                (ibase+11)      /* QPSK HEC Error Interrupt */
 #define irq_qpsk_crcerr                (ibase+10)      /* QPSK AAL-5 CRC Error
                                                 * Interrupt */
-/* 9 unused    (bit 09) */
-/* 8 unused    (bit 08) */
-#define irq_psicrcerr          (ibase+7)       /* QAM PSI CRC Error
+/* 9 unused    (bit 09) */
+/* 8 unused    (bit 08) */
+#define irq_psicrcerr          (ibase+7)       /* QAM PSI CRC Error
                                                 * Interrupt */
-#define irq_psilength_err      (ibase+6)       /* QAM PSI Length Error
+#define irq_psilength_err      (ibase+6)       /* QAM PSI Length Error
                                                 * Interrupt */
-#define irq_esfforward         (ibase+5)       /* ESF Interrupt Mark From
+#define irq_esfforward         (ibase+5)       /* ESF Interrupt Mark From
                                                 * Forward Path Reference -
                                                 * every 3ms when forward Mbits
                                                 * and forward slot control
                                                 * bytes are updated. */
-#define irq_esfreverse         (ibase+4)       /* ESF Interrupt Mark from
+#define irq_esfreverse         (ibase+4)       /* ESF Interrupt Mark from
                                                 * Reverse Path Reference -
                                                 * delayed from forward mark by
                                                 * the ranging delay plus a
                                                 * 1.554 M upstream rates and
                                                 * every 6 ms for 256K upstream
                                                 * rate. */
-#define irq_aloha_timeout      (ibase+3)       /* Slotted-Aloha timeout on
+#define irq_aloha_timeout      (ibase+3)       /* Slotted-Aloha timeout on
                                                 * Channel 1. */
-#define irq_reservation                (ibase+2)       /* Partial (or Incremental)
+#define irq_reservation                (ibase+2)       /* Partial (or Incremental)
                                                 * Reservation Message Completed
                                                 * or Slotted aloha verify for
                                                 * channel 1. */
-#define irq_aloha3             (ibase+1)       /* Slotted-Aloha Message Verify
+#define irq_aloha3             (ibase+1)       /* Slotted-Aloha Message Verify
                                                 * Interrupt or Reservation
                                                 * increment completed for
                                                 * channel 3. */
-#define irq_mpeg_d             (ibase+0)       /* MPEG Decoder Interrupt */
+#define irq_mpeg_d             (ibase+0)       /* MPEG Decoder Interrupt */
 #endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
new file mode 100644 (file)
index 0000000..5a508f9
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ *  Ralink SoC register definitions
+ *
+ *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#ifndef _RALINK_REGS_H_
+#define _RALINK_REGS_H_
+
+extern __iomem void *rt_sysc_membase;
+extern __iomem void *rt_memc_membase;
+
+static inline void rt_sysc_w32(u32 val, unsigned reg)
+{
+       __raw_writel(val, rt_sysc_membase + reg);
+}
+
+static inline u32 rt_sysc_r32(unsigned reg)
+{
+       return __raw_readl(rt_sysc_membase + reg);
+}
+
+static inline void rt_memc_w32(u32 val, unsigned reg)
+{
+       __raw_writel(val, rt_memc_membase + reg);
+}
+
+static inline u32 rt_memc_r32(unsigned reg)
+{
+       return __raw_readl(rt_memc_membase + reg);
+}
+
+#endif /* _RALINK_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
new file mode 100644 (file)
index 0000000..7d344f2
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT305X_REGS_H_
+#define _RT305X_REGS_H_
+
+enum rt305x_soc_type {
+       RT305X_SOC_UNKNOWN = 0,
+       RT305X_SOC_RT3050,
+       RT305X_SOC_RT3052,
+       RT305X_SOC_RT3350,
+       RT305X_SOC_RT3352,
+       RT305X_SOC_RT5350,
+};
+
+extern enum rt305x_soc_type rt305x_soc;
+
+static inline int soc_is_rt3050(void)
+{
+       return rt305x_soc == RT305X_SOC_RT3050;
+}
+
+static inline int soc_is_rt3052(void)
+{
+       return rt305x_soc == RT305X_SOC_RT3052;
+}
+
+static inline int soc_is_rt305x(void)
+{
+       return soc_is_rt3050() || soc_is_rt3052();
+}
+
+static inline int soc_is_rt3350(void)
+{
+       return rt305x_soc == RT305X_SOC_RT3350;
+}
+
+static inline int soc_is_rt3352(void)
+{
+       return rt305x_soc == RT305X_SOC_RT3352;
+}
+
+static inline int soc_is_rt5350(void)
+{
+       return rt305x_soc == RT305X_SOC_RT5350;
+}
+
+#define RT305X_SYSC_BASE               0x10000000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_ID               0x0c
+#define SYSC_REG_SYSTEM_CONFIG         0x10
+
+#define RT3052_CHIP_NAME0              0x30335452
+#define RT3052_CHIP_NAME1              0x20203235
+
+#define RT3350_CHIP_NAME0              0x33335452
+#define RT3350_CHIP_NAME1              0x20203035
+
+#define RT3352_CHIP_NAME0              0x33335452
+#define RT3352_CHIP_NAME1              0x20203235
+
+#define RT5350_CHIP_NAME0              0x33355452
+#define RT5350_CHIP_NAME1              0x20203035
+
+#define CHIP_ID_ID_MASK                        0xff
+#define CHIP_ID_ID_SHIFT               8
+#define CHIP_ID_REV_MASK               0xff
+
+#define RT305X_SYSCFG_CPUCLK_SHIFT             18
+#define RT305X_SYSCFG_CPUCLK_MASK              0x1
+#define RT305X_SYSCFG_CPUCLK_LOW               0x0
+#define RT305X_SYSCFG_CPUCLK_HIGH              0x1
+
+#define RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT      2
+#define RT305X_SYSCFG_CPUCLK_MASK              0x1
+#define RT305X_SYSCFG_SRAM_CS0_MODE_WDT                0x1
+
+#define RT3352_SYSCFG0_CPUCLK_SHIFT    8
+#define RT3352_SYSCFG0_CPUCLK_MASK     0x1
+#define RT3352_SYSCFG0_CPUCLK_LOW      0x0
+#define RT3352_SYSCFG0_CPUCLK_HIGH     0x1
+
+#define RT5350_SYSCFG0_CPUCLK_SHIFT    8
+#define RT5350_SYSCFG0_CPUCLK_MASK     0x3
+#define RT5350_SYSCFG0_CPUCLK_360      0x0
+#define RT5350_SYSCFG0_CPUCLK_320      0x2
+#define RT5350_SYSCFG0_CPUCLK_300      0x3
+
+/* multi function gpio pins */
+#define RT305X_GPIO_I2C_SD             1
+#define RT305X_GPIO_I2C_SCLK           2
+#define RT305X_GPIO_SPI_EN             3
+#define RT305X_GPIO_SPI_CLK            4
+/* GPIO 7-14 is shared between UART0, PCM  and I2S interfaces */
+#define RT305X_GPIO_7                  7
+#define RT305X_GPIO_10                 10
+#define RT305X_GPIO_14                 14
+#define RT305X_GPIO_UART1_TXD          15
+#define RT305X_GPIO_UART1_RXD          16
+#define RT305X_GPIO_JTAG_TDO           17
+#define RT305X_GPIO_JTAG_TDI           18
+#define RT305X_GPIO_MDIO_MDC           22
+#define RT305X_GPIO_MDIO_MDIO          23
+#define RT305X_GPIO_SDRAM_MD16         24
+#define RT305X_GPIO_SDRAM_MD31         39
+#define RT305X_GPIO_GE0_TXD0           40
+#define RT305X_GPIO_GE0_RXCLK          51
+
+#define RT305X_GPIO_MODE_I2C           BIT(0)
+#define RT305X_GPIO_MODE_SPI           BIT(1)
+#define RT305X_GPIO_MODE_UART0_SHIFT   2
+#define RT305X_GPIO_MODE_UART0_MASK    0x7
+#define RT305X_GPIO_MODE_UART0(x)      ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
+#define RT305X_GPIO_MODE_UARTF         0x0
+#define RT305X_GPIO_MODE_PCM_UARTF     0x1
+#define RT305X_GPIO_MODE_PCM_I2S       0x2
+#define RT305X_GPIO_MODE_I2S_UARTF     0x3
+#define RT305X_GPIO_MODE_PCM_GPIO      0x4
+#define RT305X_GPIO_MODE_GPIO_UARTF    0x5
+#define RT305X_GPIO_MODE_GPIO_I2S      0x6
+#define RT305X_GPIO_MODE_GPIO          0x7
+#define RT305X_GPIO_MODE_UART1         BIT(5)
+#define RT305X_GPIO_MODE_JTAG          BIT(6)
+#define RT305X_GPIO_MODE_MDIO          BIT(7)
+#define RT305X_GPIO_MODE_SDRAM         BIT(8)
+#define RT305X_GPIO_MODE_RGMII         BIT(9)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/war.h b/arch/mips/include/asm/mach-ralink/war.h
new file mode 100644 (file)
index 0000000..a7b712c
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MACH_RALINK_WAR_H
+#define __ASM_MACH_RALINK_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR    0
+#define R4600_V1_HIT_CACHEOP_WAR       0
+#define R4600_V2_HIT_CACHEOP_WAR       0
+#define R5432_CP0_INTERRUPT_WAR                0
+#define BCM1250_M3_WAR                 0
+#define SIBYTE_1956_WAR                        0
+#define MIPS4K_ICACHE_REFILL_WAR       0
+#define MIPS_CACHE_SYNC_WAR            0
+#define TX49XX_ICACHE_INDEX_INV_WAR    0
+#define RM9000_CDEX_SMP_WAR            0
+#define ICACHE_REFILLS_WORKAROUND_WAR  0
+#define R10000_LLSC_WAR                        0
+#define MIPS34K_MISSED_ITLB_WAR                0
+
+#endif /* __ASM_MACH_RALINK_WAR_H */
index 291e2cf..e1cad0c 100644 (file)
@@ -138,4 +138,4 @@ struct ddr_ram {
 #define RC32434_DLLED_DBE_BIT          0
 #define RC32434_DLLED_DTE_BIT          1
 
-#endif  /* _ASM_RC32434_DDR_H_ */
+#endif /* _ASM_RC32434_DDR_H_ */
index 5f898b5..4322191 100644 (file)
@@ -5,7 +5,7 @@
  * DMA register definition.
  *
  * Author : ryan.holmQVist@idt.com
- * Date   : 20011005
+ * Date          : 20011005
  */
 
 #ifndef __ASM_RC32434_DMA_H
@@ -71,10 +71,10 @@ struct dma_reg {
 #define DMA_CHAN_DONE_BIT              (1 << 1)
 #define DMA_CHAN_MODE_BIT              (1 << 2)
 #define DMA_CHAN_MODE_MSK              0x0000000c
-#define  DMA_CHAN_MODE_AUTO            0
-#define  DMA_CHAN_MODE_BURST           1
-#define  DMA_CHAN_MODE_XFRT            2
-#define  DMA_CHAN_MODE_RSVD            3
+#define         DMA_CHAN_MODE_AUTO             0
+#define         DMA_CHAN_MODE_BURST            1
+#define         DMA_CHAN_MODE_XFRT             2
+#define         DMA_CHAN_MODE_RSVD             3
 #define DMA_CHAN_ACT_BIT               (1 << 4)
 
 /* DMA status registers */
@@ -100,4 +100,4 @@ struct dma_channel {
        struct dma_reg ch[DMA_CHAN_COUNT];
 };
 
-#endif  /* __ASM_RC32434_DMA_H */
+#endif /* __ASM_RC32434_DMA_H */
index 173a9f9..28c5406 100644 (file)
@@ -5,7 +5,7 @@
  * DMA register definition.
  *
  * Author : ryan.holmQVist@idt.com
- * Date   : 20011005
+ * Date          : 20011005
  */
 
 #ifndef _ASM_RC32434_DMA_V_H_
@@ -49,4 +49,4 @@ static inline void rc32434_chain_dma(struct dma_reg *ch, u32 dma_addr)
        __raw_writel(dma_addr, &ch->dmandptr);
 }
 
-#endif  /* _ASM_RC32434_DMA_V_H_ */
+#endif /* _ASM_RC32434_DMA_V_H_ */
index a25cbc5..c2645fa 100644 (file)
@@ -26,8 +26,8 @@
  *
  */
 
-#ifndef        __ASM_RC32434_ETH_H
-#define        __ASM_RC32434_ETH_H
+#ifndef __ASM_RC32434_ETH_H
+#define __ASM_RC32434_ETH_H
 
 
 #define ETH0_BASE_ADDR         0x18060000
@@ -217,4 +217,4 @@ struct eth_regs {
 #define ETH_TX_LE              (1 << 16)
 #define ETH_TX_CC              0x001E0000
 
-#endif  /* __ASM_RC32434_ETH_H */
+#endif /* __ASM_RC32434_ETH_H */
index 12ee8d5..4dee0a3 100644 (file)
@@ -5,7 +5,7 @@
  * GPIO register definition.
  *
  * Author : ryan.holmQVist@idt.com
- * Date   : 20011005
+ * Date          : 20011005
  * Copyright (C) 2001, 2002 Ryan Holm <ryan.holmQVist@idt.com>
  * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
  */
@@ -26,9 +26,9 @@
 #define irq_to_gpio(irq)       (irq - (8 + 4 * 32))
 
 struct rb532_gpio_reg {
-       u32   gpiofunc;   /* GPIO Function Register
+       u32   gpiofunc;   /* GPIO Function Register
                           * gpiofunc[x]==0 bit = gpio
-                          * func[x]==1  bit = altfunc
+                          * func[x]==1  bit = altfunc
                           */
        u32   gpiocfg;    /* GPIO Configuration Register
                           * gpiocfg[x]==0 bit = input
index 023a5b1..b76dec9 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_RC32434_IRQ_H
 #define __ASM_RC32434_IRQ_H
 
-#define NR_IRQS        256
+#define NR_IRQS 256
 
 #include <asm/mach-generic/irq.h>
 #include <asm/mach-rc32434/rb.h>
 
 #define UART0_IRQ              (GROUP3_IRQ_BASE + 0)
 
-#define ETH0_DMA_RX_IRQ        (GROUP1_IRQ_BASE + 0)
-#define ETH0_DMA_TX_IRQ        (GROUP1_IRQ_BASE + 1)
-#define ETH0_RX_OVR_IRQ        (GROUP3_IRQ_BASE + 9)
-#define ETH0_TX_UND_IRQ        (GROUP3_IRQ_BASE + 10)
+#define ETH0_DMA_RX_IRQ                (GROUP1_IRQ_BASE + 0)
+#define ETH0_DMA_TX_IRQ                (GROUP1_IRQ_BASE + 1)
+#define ETH0_RX_OVR_IRQ                (GROUP3_IRQ_BASE + 9)
+#define ETH0_TX_UND_IRQ                (GROUP3_IRQ_BASE + 10)
 
 #define GPIO_MAPPED_IRQ_BASE   GROUP4_IRQ_BASE
 #define GPIO_MAPPED_IRQ_GROUP  4
 
-#endif  /* __ASM_RC32434_IRQ_H */
+#endif /* __ASM_RC32434_IRQ_H */
index 410638f..6f40d15 100644 (file)
@@ -151,11 +151,11 @@ struct pci_msu {
 #define         PCI_CFGA_REG_PBA2      (0x18 >> 2)     /* use PCIPBA_ */
 #define         PCI_CFGA_REG_PBA3      (0x1c >> 2)     /* use PCIPBA_ */
 #define         PCI_CFGA_REG_SUBSYS    (0x2c >> 2)     /* use PCFGSS_ */
-#define  PCI_CFGA_REG_3C       (0x3C >> 2)     /* use PCFG3C_ */
+#define         PCI_CFGA_REG_3C        (0x3C >> 2)     /* use PCFG3C_ */
 #define         PCI_CFGA_REG_PBBA0C    (0x44 >> 2)     /* use PCIPBAC_ */
-#define  PCI_CFGA_REG_PBA0M    (0x48 >> 2)
+#define         PCI_CFGA_REG_PBA0M     (0x48 >> 2)
 #define         PCI_CFGA_REG_PBA1C     (0x4c >> 2)     /* use PCIPBAC_ */
-#define  PCI_CFGA_REG_PBA1M    (0x50 >> 2)
+#define         PCI_CFGA_REG_PBA1M     (0x50 >> 2)
 #define         PCI_CFGA_REG_PBA2C     (0x54 >> 2)     /* use PCIPBAC_ */
 #define         PCI_CFGA_REG_PBA2M     (0x58 >> 2)
 #define         PCI_CFGA_REG_PBA3C     (0x5c >> 2)     /* use PCIPBAC_ */
@@ -164,9 +164,9 @@ struct pci_msu {
 #define PCI_CFGA_FUNC_BIT      8
 #define PCI_CFGA_FUNC          0x00000700
 #define PCI_CFGA_DEV_BIT       11
-#define        PCI_CFGA_DEV            0x0000f800
-#define        PCI_CFGA_DEV_INTERN     0
-#define        PCI_CFGA_BUS_BIT        16
+#define PCI_CFGA_DEV           0x0000f800
+#define PCI_CFGA_DEV_INTERN    0
+#define PCI_CFGA_BUS_BIT       16
 #define PCI CFGA_BUS           0x00ff0000
 #define PCI_CFGA_BUS_TYPE0     0
 #define PCI_CFGA_EN            (1 << 31)
@@ -201,13 +201,13 @@ struct pci_msu {
 #define PCI_PBAC_P             (1 << 1)
 #define PCI_PBAC_SIZE_BIT      2
 #define PCI_PBAC_SIZE          0x0000007c
-#define        PCI_PBAC_SB             (1 << 7)
-#define        PCI_PBAC_PP             (1 << 8)
+#define PCI_PBAC_SB            (1 << 7)
+#define PCI_PBAC_PP            (1 << 8)
 #define PCI_PBAC_MR_BIT                9
 #define PCI_PBAC_MR            0x00000600
 #define         PCI_PBAC_MR_RD         0
 #define         PCI_PBAC_MR_RD_LINE    1
-#define  PCI_PBAC_MR_RD_MULT   2
+#define         PCI_PBAC_MR_RD_MULT    2
 #define PCI_PBAC_MRL           (1 << 11)
 #define PCI_PBAC_MRM           (1 << 12)
 #define PCI_PBAC_TRP           (1 << 13)
@@ -227,14 +227,14 @@ struct pci_msu {
  */
 
 #define PCI_LBAC_MSI           (1 << 0)
-#define  PCI_LBAC_MSI_MEM      0
-#define  PCI_LBAC_MSI_IO       1
+#define         PCI_LBAC_MSI_MEM       0
+#define         PCI_LBAC_MSI_IO        1
 #define PCI_LBAC_SIZE_BIT      2
 #define PCI_LBAC_SIZE          0x0000007c
 #define PCI_LBAC_SB            (1 << 7)
 #define PCI_LBAC_RT            (1 << 8)
-#define  PCI_LBAC_RT_NO_PREF   0
-#define  PCI_LBAC_RT_PREF      1
+#define         PCI_LBAC_RT_NO_PREF    0
+#define         PCI_LBAC_RT_PREF       1
 
 /*
  * PCI Local Base Address [0|1|2|3] Mapping Register
@@ -279,16 +279,16 @@ struct pci_msu {
 #define PCI_DMAD_PT            0x00c00000      /* preferred transaction field */
 /* These are for reads (DMA channel 8) */
 #define PCI_DMAD_DEVCMD_MR     0               /* memory read */
-#define        PCI_DMAD_DEVCMD_MRL     1               /* memory read line */
-#define        PCI_DMAD_DEVCMD_MRM     2               /* memory read multiple */
-#define        PCI_DMAD_DEVCMD_IOR     3               /* I/O read */
+#define PCI_DMAD_DEVCMD_MRL    1               /* memory read line */
+#define PCI_DMAD_DEVCMD_MRM    2               /* memory read multiple */
+#define PCI_DMAD_DEVCMD_IOR    3               /* I/O read */
 /* These are for writes (DMA channel 9) */
 #define PCI_DMAD_DEVCMD_MW     0               /* memory write */
-#define        PCI_DMAD_DEVCMD_MWI     1               /* memory write invalidate */
-#define        PCI_DMAD_DEVCMD_IOW     3               /* I/O write */
+#define PCI_DMAD_DEVCMD_MWI    1               /* memory write invalidate */
+#define PCI_DMAD_DEVCMD_IOW    3               /* I/O write */
 
 /* Swap byte field applies to both DMA channel 8 and 9 */
-#define        PCI_DMAD_SB             (1 << 24)       /* swap byte field */
+#define PCI_DMAD_SB            (1 << 24)       /* swap byte field */
 
 
 /*
@@ -309,7 +309,7 @@ struct pci_msu {
 #define PCI_MSU_M1             (1 << 1)
 #define PCI_MSU_DB             (1 << 2)
 
-#define PCI_MSG_ADDR           0xB8088010
+#define PCI_MSG_ADDR           0xB8088010
 #define PCI0_ADDR              0xB8080000
 #define rc32434_pci ((struct pci_reg *) PCI0_ADDR)
 #define rc32434_pci_msg ((struct pci_msu *) PCI_MSG_ADDR)
@@ -331,9 +331,9 @@ struct pci_msu {
 #define PCILBA_SIZE_MASK       0x1F
 #define SIZE_256MB             0x1C
 #define SIZE_128MB             0x1B
-#define SIZE_64MB               0x1A
+#define SIZE_64MB              0x1A
 #define SIZE_32MB              0x19
-#define SIZE_16MB               0x18
+#define SIZE_16MB              0x18
 #define SIZE_4MB               0x16
 #define SIZE_2MB               0x15
 #define SIZE_1MB               0x14
@@ -363,7 +363,7 @@ struct pci_msu {
 #define KORINA_CONFIG23_ADDR   0x8000005C
 #define KORINA_CONFIG24_ADDR   0x80000060
 #define KORINA_CONFIG25_ADDR   0x80000064
-#define KORINA_CMD             (PCI_CFG04_CMD_IO_ENA | \
+#define KORINA_CMD             (PCI_CFG04_CMD_IO_ENA | \
                                 PCI_CFG04_CMD_MEM_ENA | \
                                 PCI_CFG04_CMD_BM_ENA | \
                                 PCI_CFG04_CMD_MW_INV | \
@@ -401,8 +401,8 @@ struct pci_msu {
 #define KORINA_BAR3    0x48000008      /* Spare 128 MB Memory */
 
 #define KORINA_CNFG4   KORINA_BAR0
-#define KORINA_CNFG5    KORINA_BAR1
-#define KORINA_CNFG6   KORINA_BAR2
+#define KORINA_CNFG5   KORINA_BAR1
+#define KORINA_CNFG6   KORINA_BAR2
 #define KORINA_CNFG7   KORINA_BAR3
 
 #define KORINA_SUBSYS_VENDOR_ID 0x011d
@@ -410,20 +410,20 @@ struct pci_msu {
 #define KORINA_CNFG8           0
 #define KORINA_CNFG9           0
 #define KORINA_CNFG10          0
-#define KORINA_CNFG11  ((KORINA_SUBSYS_VENDOR_ID<<16) | \
+#define KORINA_CNFG11  ((KORINA_SUBSYS_VENDOR_ID<<16) | \
                          KORINA_SUBSYSTEM_ID)
 #define KORINA_INT_LINE                1
 #define KORINA_INT_PIN         1
 #define KORINA_MIN_GNT         8
 #define KORINA_MAX_LAT         0x38
 #define KORINA_CNFG12          0
-#define KORINA_CNFG13          0
+#define KORINA_CNFG13          0
 #define KORINA_CNFG14          0
 #define KORINA_CNFG15  ((KORINA_MAX_LAT<<24) | \
                         (KORINA_MIN_GNT<<16) | \
                         (KORINA_INT_PIN<<8)  | \
                          KORINA_INT_LINE)
-#define        KORINA_RETRY_LIMIT      0x80
+#define KORINA_RETRY_LIMIT     0x80
 #define KORINA_TRDY_LIMIT      0x80
 #define KORINA_CNFG16 ((KORINA_RETRY_LIMIT<<8) | \
                        KORINA_TRDY_LIMIT)
@@ -475,7 +475,7 @@ struct pci_msu {
 #define KORINA_PBA3M   0
 #define KORINA_CNFG24  KORINA_PBA3M
 
-#define        PCITC_DTIMER_VAL        8
+#define PCITC_DTIMER_VAL       8
 #define PCITC_RTIMER_VAL       0x10
 
-#endif  /* __ASM_RC32434_PCI_H */
+#endif /* __ASM_RC32434_PCI_H */
index 6dc5f8d..aac8ce8 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/genhd.h>
 
 #define REGBASE                0x18000000
-#define IDT434_REG_BASE        ((volatile void *) KSEG1ADDR(REGBASE))
+#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
 #define UART0BASE      0x58000
 #define RST            (1 << 15)
 #define DEV0BASE       0x010000
@@ -80,10 +80,10 @@ struct cf_device {
 struct mpmc_device {
        unsigned char   state;
        spinlock_t      lock;
-       void __iomem    *base;
+       void __iomem    *base;
 };
 
 extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
 extern unsigned char get_latch_u5(void);
 
-#endif  /* __ASM_RC32434_RB_H */
+#endif /* __ASM_RC32434_RB_H */
index fce25d4..02fd32b 100644 (file)
@@ -16,4 +16,4 @@ static inline void rc32434_sync(void)
        __asm__ volatile ("sync");
 }
 
-#endif  /* _ASM_RC32434_RC32434_H_ */
+#endif /* _ASM_RC32434_RC32434_H_ */
index e49b1d5..cda26bb 100644 (file)
@@ -51,15 +51,15 @@ struct timer {
 #define RC32434_CTC_TO_BIT             1
 
 /* Real time clock registers */
-#define RC32434_RTC_MSK(x)              BIT_TO_MASK(x)
-#define RC32434_RTC_CE_BIT              0
-#define RC32434_RTC_TO_BIT              1
-#define RC32434_RTC_RQE_BIT             2
+#define RC32434_RTC_MSK(x)             BIT_TO_MASK(x)
+#define RC32434_RTC_CE_BIT             0
+#define RC32434_RTC_TO_BIT             1
+#define RC32434_RTC_RQE_BIT            2
 
 /* Counter registers */
-#define RC32434_RCOUNT_BIT              0
-#define RC32434_RCOUNT_MSK              0x0000ffff
-#define RC32434_RCOMP_BIT               0
-#define RC32434_RCOMP_MSK               0x0000ffff
+#define RC32434_RCOUNT_BIT             0
+#define RC32434_RCOUNT_MSK             0x0000ffff
+#define RC32434_RCOMP_BIT              0
+#define RC32434_RCOMP_MSK              0x0000ffff
 
-#endif  /* __ASM_RC32434_TIMER_H */
+#endif /* __ASM_RC32434_TIMER_H */
index 7f3e3f9..d9c8284 100644 (file)
@@ -23,8 +23,8 @@
 /* #define cpu_has_watch       ? */
 #define cpu_has_divec          1
 #define cpu_has_vce            0
-/* #define cpu_has_cache_cdex_p        ? */
-/* #define cpu_has_cache_cdex_s        ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
@@ -53,8 +53,8 @@
 /* #define cpu_has_watch       ? */
 #define cpu_has_divec          1
 #define cpu_has_vce            0
-/* #define cpu_has_cache_cdex_p        ? */
-/* #define cpu_has_cache_cdex_s        ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
index 652ea4c..5d154cf 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_MACH_MIPS_IRQ_H
 #define __ASM_MACH_MIPS_IRQ_H
 
-#define NR_IRQS        256
+#define NR_IRQS 256
 
 
 #include_next <irq.h>
index 176f5b3..0a227d4 100644 (file)
@@ -21,12 +21,12 @@ extern int sb1250_m3_workaround_needed(void);
 #endif
 
 #define BCM1250_M3_WAR sb1250_m3_workaround_needed()
-#define SIBYTE_1956_WAR        1
+#define SIBYTE_1956_WAR 1
 
 #else
 
 #define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR        0
+#define SIBYTE_1956_WAR 0
 
 #endif
 
index 83746b8..00fa368 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * This is a direct copy of the ev96100.h file, with a global
- * search and replace.  The numbers are the same.
+ * search and replace. The numbers are the same.
  *
  * The reason I'm duplicating this is so that the 64120/96100
  * defines won't be confusing in the source code.
 /*
  * This is the CPU physical memory map of PPMC Board:
  *
- *    0x00000000-0x03FFFFFF      - 64MB SDRAM (SCS[0]#)
- *    0x1C000000-0x1C000000      - LED (CS0)
- *    0x1C800000-0x1C800007      - UART 16550 port (CS1)
- *    0x1F000000-0x1F000000      - MailBox (CS3)
- *    0x1FC00000-0x20000000      - 4MB Flash (BOOT CS)
+ *    0x00000000-0x03FFFFFF     - 64MB SDRAM (SCS[0]#)
+ *    0x1C000000-0x1C000000     - LED (CS0)
+ *    0x1C800000-0x1C800007     - UART 16550 port (CS1)
+ *    0x1F000000-0x1F000000     - MailBox (CS3)
+ *    0x1FC00000-0x20000000     - 4MB Flash (BOOT CS)
  */
 
 #define WRPPMC_SDRAM_SCS0_BASE 0x00000000
@@ -39,8 +39,8 @@
  *
  * NOTE: We only have PCI_0 hose interface
  */
-#define GT_PCI_MEM_BASE        0x13000000UL
-#define GT_PCI_MEM_SIZE        0x02000000UL
+#define GT_PCI_MEM_BASE 0x13000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
 #define GT_PCI_IO_BASE 0x11000000UL
 #define GT_PCI_IO_SIZE 0x02000000UL
 
index 4a08dbe..9e1ad26 100644 (file)
@@ -26,7 +26,7 @@
  * MC146818A or Dallas DS12887 data sheet for details.
  *
  * BUG: This routine does not handle hour overflow properly; it just
- *      sets the minutes. Usually you'll only notice that after reboot!
+ *     sets the minutes. Usually you'll only notice that after reboot!
  */
 static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
 {
@@ -77,7 +77,7 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
         * battery and quartz) will not reset the oscillator and will not
         * update precisely 500 ms later. You won't find this mentioned in
         * the Dallas Semiconductor data sheets, but who believes data
-        * sheets anyway ...                           -- Markus Kuhn
+        * sheets anyway ...                           -- Markus Kuhn
         */
        CMOS_WRITE(save_control, RTC_CONTROL);
        CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
index d14e2ad..b2048d1 100644 (file)
@@ -41,18 +41,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 #define BONITO_BOOT_BASE               0x1fc00000
 #define BONITO_BOOT_SIZE               0x00100000
-#define BONITO_BOOT_TOP                (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
+#define BONITO_BOOT_TOP                        (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
 #define BONITO_FLASH_BASE              0x1c000000
 #define BONITO_FLASH_SIZE              0x03000000
 #define BONITO_FLASH_TOP               (BONITO_FLASH_BASE+BONITO_FLASH_SIZE-1)
 #define BONITO_SOCKET_BASE             0x1f800000
 #define BONITO_SOCKET_SIZE             0x00400000
 #define BONITO_SOCKET_TOP              (BONITO_SOCKET_BASE+BONITO_SOCKET_SIZE-1)
-#define BONITO_REG_BASE                0x1fe00000
-#define BONITO_REG_SIZE                0x00040000
+#define BONITO_REG_BASE                        0x1fe00000
+#define BONITO_REG_SIZE                        0x00040000
 #define BONITO_REG_TOP                 (BONITO_REG_BASE+BONITO_REG_SIZE-1)
-#define BONITO_DEV_BASE                0x1ff00000
-#define BONITO_DEV_SIZE                0x00100000
+#define BONITO_DEV_BASE                        0x1ff00000
+#define BONITO_DEV_SIZE                        0x00100000
 #define BONITO_DEV_TOP                 (BONITO_DEV_BASE+BONITO_DEV_SIZE-1)
 #define BONITO_PCILO_BASE              0x10000000
 #define BONITO_PCILO_SIZE              0x0c000000
@@ -79,14 +79,14 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 /* PCI Configuration  Registers */
 
-#define BONITO_PCI_REG(x)               BONITO(BONITO_PCICONFIGBASE + (x))
+#define BONITO_PCI_REG(x)              BONITO(BONITO_PCICONFIGBASE + (x))
 #define BONITO_PCIDID                  BONITO_PCI_REG(0x00)
 #define BONITO_PCICMD                  BONITO_PCI_REG(0x04)
-#define BONITO_PCICLASS                BONITO_PCI_REG(0x08)
+#define BONITO_PCICLASS                        BONITO_PCI_REG(0x08)
 #define BONITO_PCILTIMER               BONITO_PCI_REG(0x0c)
-#define BONITO_PCIBASE0                BONITO_PCI_REG(0x10)
-#define BONITO_PCIBASE1                BONITO_PCI_REG(0x14)
-#define BONITO_PCIBASE2                BONITO_PCI_REG(0x18)
+#define BONITO_PCIBASE0                        BONITO_PCI_REG(0x10)
+#define BONITO_PCIBASE1                        BONITO_PCI_REG(0x14)
+#define BONITO_PCIBASE2                        BONITO_PCI_REG(0x18)
 #define BONITO_PCIEXPRBASE             BONITO_PCI_REG(0x30)
 #define BONITO_PCIINT                  BONITO_PCI_REG(0x3c)
 
@@ -95,7 +95,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_PCICMD_MABORT_CLR       0x20000000
 #define BONITO_PCICMD_MTABORT_CLR      0x10000000
 #define BONITO_PCICMD_TABORT_CLR       0x08000000
-#define BONITO_PCICMD_MPERR_CLR        0x01000000
+#define BONITO_PCICMD_MPERR_CLR                0x01000000
 #define BONITO_PCICMD_PERRRESPEN       0x00000040
 #define BONITO_PCICMD_ASTEPEN          0x00000080
 #define BONITO_PCICMD_SERREN           0x00000100
@@ -139,7 +139,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 /* Other Bonito configuration */
 
-#define BONITO_BONGENCFG_OFFSET         0x4
+#define BONITO_BONGENCFG_OFFSET                0x4
 #define BONITO_BONGENCFG               BONITO(BONITO_REGBASE + BONITO_BONGENCFG_OFFSET)
 
 #define BONITO_BONGENCFG_DEBUGMODE     0x00000001
@@ -165,7 +165,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 /* 2. IO & IDE configuration */
 
-#define BONITO_IODEVCFG                BONITO(BONITO_REGBASE + 0x08)
+#define BONITO_IODEVCFG                        BONITO(BONITO_REGBASE + 0x08)
 
 /* 3. IO & IDE configuration */
 
@@ -181,33 +181,33 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 /* GPIO Regs - r/w */
 
-#define BONITO_GPIODATA_OFFSET          0x1c
-#define BONITO_GPIODATA                BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
+#define BONITO_GPIODATA_OFFSET         0x1c
+#define BONITO_GPIODATA                        BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
 #define BONITO_GPIOIE                  BONITO(BONITO_REGBASE + 0x20)
 
 /* ICU Configuration Regs - r/w */
 
 #define BONITO_INTEDGE                 BONITO(BONITO_REGBASE + 0x24)
-#define BONITO_INTSTEER                BONITO(BONITO_REGBASE + 0x28)
+#define BONITO_INTSTEER                        BONITO(BONITO_REGBASE + 0x28)
 #define BONITO_INTPOL                  BONITO(BONITO_REGBASE + 0x2c)
 
 /* ICU Enable Regs - IntEn & IntISR are r/o. */
 
-#define BONITO_INTENSET                BONITO(BONITO_REGBASE + 0x30)
-#define BONITO_INTENCLR                BONITO(BONITO_REGBASE + 0x34)
+#define BONITO_INTENSET                        BONITO(BONITO_REGBASE + 0x30)
+#define BONITO_INTENCLR                        BONITO(BONITO_REGBASE + 0x34)
 #define BONITO_INTEN                   BONITO(BONITO_REGBASE + 0x38)
 #define BONITO_INTISR                  BONITO(BONITO_REGBASE + 0x3c)
 
 /* PCI mail boxes */
 
-#define BONITO_PCIMAIL0_OFFSET          0x40
-#define BONITO_PCIMAIL1_OFFSET          0x44
-#define BONITO_PCIMAIL2_OFFSET          0x48
-#define BONITO_PCIMAIL3_OFFSET          0x4c
-#define BONITO_PCIMAIL0                BONITO(BONITO_REGBASE + 0x40)
-#define BONITO_PCIMAIL1                BONITO(BONITO_REGBASE + 0x44)
-#define BONITO_PCIMAIL2                BONITO(BONITO_REGBASE + 0x48)
-#define BONITO_PCIMAIL3                BONITO(BONITO_REGBASE + 0x4c)
+#define BONITO_PCIMAIL0_OFFSET         0x40
+#define BONITO_PCIMAIL1_OFFSET         0x44
+#define BONITO_PCIMAIL2_OFFSET         0x48
+#define BONITO_PCIMAIL3_OFFSET         0x4c
+#define BONITO_PCIMAIL0                        BONITO(BONITO_REGBASE + 0x40)
+#define BONITO_PCIMAIL1                        BONITO(BONITO_REGBASE + 0x44)
+#define BONITO_PCIMAIL2                        BONITO(BONITO_REGBASE + 0x48)
+#define BONITO_PCIMAIL3                        BONITO(BONITO_REGBASE + 0x4c)
 
 
 /* 6. PCI cache */
@@ -216,7 +216,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_PCICACHETAG             BONITO(BONITO_REGBASE + 0x54)
 
 #define BONITO_PCIBADADDR              BONITO(BONITO_REGBASE + 0x58)
-#define BONITO_PCIMSTAT                BONITO(BONITO_REGBASE + 0x5c)
+#define BONITO_PCIMSTAT                        BONITO(BONITO_REGBASE + 0x5c)
 
 
 /*
@@ -228,20 +228,20 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 #define BONITO_CONFIGBASE              0x000
 #define BONITO_BONITOBASE              0x100
-#define BONITO_LDMABASE                0x200
+#define BONITO_LDMABASE                        0x200
 #define BONITO_COPBASE                 0x300
 #define BONITO_REG_BLOCKMASK           0x300
 
-#define BONITO_LDMACTRL                BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMASTAT                BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMAADDR                BONITO(BONITO_LDMABASE + 0x4)
+#define BONITO_LDMACTRL                        BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMASTAT                        BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMAADDR                        BONITO(BONITO_LDMABASE + 0x4)
 #define BONITO_LDMAGO                  BONITO(BONITO_LDMABASE + 0x8)
-#define BONITO_LDMADATA                BONITO(BONITO_LDMABASE + 0xc)
+#define BONITO_LDMADATA                        BONITO(BONITO_LDMABASE + 0xc)
 
 #define BONITO_COPCTRL                 BONITO(BONITO_COPBASE + 0x0)
 #define BONITO_COPSTAT                 BONITO(BONITO_COPBASE + 0x0)
-#define BONITO_COPPADDR                BONITO(BONITO_COPBASE + 0x4)
-#define BONITO_COPDADDR                BONITO(BONITO_COPBASE + 0x8)
+#define BONITO_COPPADDR                        BONITO(BONITO_COPBASE + 0x4)
+#define BONITO_COPDADDR                        BONITO(BONITO_COPBASE + 0x8)
 #define BONITO_COPGO                   BONITO(BONITO_COPBASE + 0xc)
 
 
@@ -257,7 +257,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_IDECOPGO_DMA_SIZE_SHIFT 0
 #define BONITO_IDECOPGO_DMA_WRITE      0x00010000
 #define BONITO_IDECOPGO_DMAWCOUNT      0x000f0000
-#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT        16
+#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
 
 #define BONITO_IDECOPCTRL_DMA_STARTBIT 0x80000000
 #define BONITO_IDECOPCTRL_DMA_RSTBIT   0x40000000
@@ -291,11 +291,11 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_SDCFG_DRAMMODESET       0x00200000
 /* --- */
 #define BONITO_SDCFG_DRAMEXTREGS       0x00400000
-#define BONITO_SDCFG_DRAMPARITY        0x00800000
+#define BONITO_SDCFG_DRAMPARITY                0x00800000
 /* Added by RPF 11-9-00 */
-#define BONITO_SDCFG_DRAMBURSTLEN      0x03000000
-#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT        24
-#define BONITO_SDCFG_DRAMMODESET_DONE  0x80000000
+#define BONITO_SDCFG_DRAMBURSTLEN      0x03000000
+#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
+#define BONITO_SDCFG_DRAMMODESET_DONE  0x80000000
 /* --- */
 
 /* PCI Cache - pciCacheCtrl */
@@ -308,7 +308,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 #define BONITO_PCICACHECTRL_IOBCCOH_PRES       0x00000100
 #define BONITO_PCICACHECTRL_IOBCCOH_EN 0x00000200
-#define BONITO_PCICACHECTRL_CPUCOH_PRES        0x00000400
+#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
 #define BONITO_PCICACHECTRL_CPUCOH_EN  0x00000800
 
 #define BONITO_IODEVCFG_BUFFBIT_CS0    0x00000001
@@ -343,18 +343,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 
 /* gpio */
 #define BONITO_GPIO_GPIOW              0x000003ff
-#define BONITO_GPIO_GPIOW_SHIFT        0
+#define BONITO_GPIO_GPIOW_SHIFT                0
 #define BONITO_GPIO_GPIOR              0x01ff0000
-#define BONITO_GPIO_GPIOR_SHIFT        16
+#define BONITO_GPIO_GPIOR_SHIFT                16
 #define BONITO_GPIO_GPINR              0xfe000000
-#define BONITO_GPIO_GPINR_SHIFT        25
+#define BONITO_GPIO_GPINR_SHIFT                25
 #define BONITO_GPIO_IOW(N)             (1<<(BONITO_GPIO_GPIOW_SHIFT+(N)))
 #define BONITO_GPIO_IOR(N)             (1<<(BONITO_GPIO_GPIOR_SHIFT+(N)))
 #define BONITO_GPIO_INR(N)             (1<<(BONITO_GPIO_GPINR_SHIFT+(N)))
 
 /* ICU */
 #define BONITO_ICU_MBOXES              0x0000000f
-#define BONITO_ICU_MBOXES_SHIFT        0
+#define BONITO_ICU_MBOXES_SHIFT                0
 #define BONITO_ICU_DMARDY              0x00000010
 #define BONITO_ICU_DMAEMPTY            0x00000020
 #define BONITO_ICU_COPYRDY             0x00000040
@@ -384,13 +384,13 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_PCIMAP_PCIMAP_2         0x00040000
 #define BONITO_PCIMAP_WIN(WIN, ADDR)   ((((ADDR)>>26) & BONITO_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
 
-#define BONITO_PCIMAP_WINSIZE           (1<<26)
+#define BONITO_PCIMAP_WINSIZE          (1<<26)
 #define BONITO_PCIMAP_WINOFFSET(ADDR)  ((ADDR) & (BONITO_PCIMAP_WINSIZE - 1))
 #define BONITO_PCIMAP_WINBASE(ADDR)    ((ADDR) << 26)
 
 /* pcimembaseCfg */
 
-#define BONITO_PCIMEMBASECFG_MASK               0xf0000000
+#define BONITO_PCIMEMBASECFG_MASK              0xf0000000
 #define BONITO_PCIMEMBASECFG_MEMBASE0_MASK     0x0000001f
 #define BONITO_PCIMEMBASECFG_MEMBASE0_MASK_SHIFT       0
 #define BONITO_PCIMEMBASECFG_MEMBASE0_TRANS    0x000003e0
@@ -406,21 +406,21 @@ extern unsigned long _pcictrl_bonito_pcicfg;
 #define BONITO_PCIMEMBASECFG_MEMBASE1_IO       0x00800000
 
 #define BONITO_PCIMEMBASECFG_ASHIFT    23
-#define BONITO_PCIMEMBASECFG_AMASK              0x007fffff
+#define BONITO_PCIMEMBASECFG_AMASK             0x007fffff
 #define BONITO_PCIMEMBASECFGSIZE(WIN, SIZE)    (((~((SIZE)-1))>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)
 #define BONITO_PCIMEMBASECFGBASE(WIN, BASE)    (((BASE)>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS)
 
 #define BONITO_PCIMEMBASECFG_SIZE(WIN, CFG)  (((((~(CFG)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)) << (BONITO_PCIMEMBASECFG_ASHIFT - BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) | BONITO_PCIMEMBASECFG_AMASK)
 
 
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)  ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)  ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)         ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)         ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
 #define BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
 
-#define BONITO_PCITOPHYS(WIN, ADDR, CFG)          ( \
-                                                  (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
-                                                  (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
-                                                )
+#define BONITO_PCITOPHYS(WIN, ADDR, CFG)         ( \
+                                                 (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
+                                                 (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
+                                               )
 
 /* PCICmd */
 
index 6e23ceb..44a09a6 100644 (file)
@@ -1,21 +1,14 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
  * Defines of the MIPS boards specific address-MAP, registers, etc.
+ *
+ * Copyright (C) 2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #ifndef __ASM_MIPS_BOARDS_GENERIC_H
 #define __ASM_MIPS_BOARDS_GENERIC_H
 /*
  * Display register base.
  */
-#define ASCII_DISPLAY_WORD_BASE    0x1f000410
-#define ASCII_DISPLAY_POS_BASE     0x1f000418
-
-
-/*
- * Yamon Prom print address.
- */
-#define YAMON_PROM_PRINT_ADDR      0x1fc00504
-
+#define ASCII_DISPLAY_WORD_BASE           0x1f000410
+#define ASCII_DISPLAY_POS_BASE    0x1f000418
 
 /*
  * Reset register.
  */
-#define SOFTRES_REG       0x1f000500
-#define GORESET           0x42
+#define SOFTRES_REG      0x1f000500
+#define GORESET                  0x42
 
 /*
  * Revision register.
  */
-#define MIPS_REVISION_REG                  0x1fc00010
-#define MIPS_REVISION_CORID_QED_RM5261     0
-#define MIPS_REVISION_CORID_CORE_LV        1
-#define MIPS_REVISION_CORID_BONITO64       2
-#define MIPS_REVISION_CORID_CORE_20K       3
-#define MIPS_REVISION_CORID_CORE_FPGA      4
-#define MIPS_REVISION_CORID_CORE_MSC       5
-#define MIPS_REVISION_CORID_CORE_EMUL      6
-#define MIPS_REVISION_CORID_CORE_FPGA2     7
-#define MIPS_REVISION_CORID_CORE_FPGAR2    8
-#define MIPS_REVISION_CORID_CORE_FPGA3     9
-#define MIPS_REVISION_CORID_CORE_24K       10
-#define MIPS_REVISION_CORID_CORE_FPGA4     11
-#define MIPS_REVISION_CORID_CORE_FPGA5     12
+#define MIPS_REVISION_REG                 0x1fc00010
+#define MIPS_REVISION_CORID_QED_RM5261    0
+#define MIPS_REVISION_CORID_CORE_LV       1
+#define MIPS_REVISION_CORID_BONITO64      2
+#define MIPS_REVISION_CORID_CORE_20K      3
+#define MIPS_REVISION_CORID_CORE_FPGA     4
+#define MIPS_REVISION_CORID_CORE_MSC      5
+#define MIPS_REVISION_CORID_CORE_EMUL     6
+#define MIPS_REVISION_CORID_CORE_FPGA2    7
+#define MIPS_REVISION_CORID_CORE_FPGAR2           8
+#define MIPS_REVISION_CORID_CORE_FPGA3    9
+#define MIPS_REVISION_CORID_CORE_24K      10
+#define MIPS_REVISION_CORID_CORE_FPGA4    11
+#define MIPS_REVISION_CORID_CORE_FPGA5    12
 
 /**** Artificial corid defines ****/
 /*
 
 extern int mips_revision_sconid;
 
+#ifdef CONFIG_OF
+extern struct boot_param_header __dtb_start;
+#endif
+
 #ifdef CONFIG_PCI
 extern void mips_pcibios_init(void);
 #else
 #define mips_pcibios_init() do { } while (0)
 #endif
 
-#endif  /* __ASM_MIPS_BOARDS_GENERIC_H */
+#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
index d8ae7f9..653477e 100644 (file)
@@ -16,11 +16,11 @@ struct cpulaunch {
 #else
 
 #define LOG2CPULAUNCH  5
-#define        LAUNCH_PC       0
-#define        LAUNCH_GP       4
-#define        LAUNCH_SP       8
-#define        LAUNCH_A0       12
-#define        LAUNCH_FLAGS    28
+#define LAUNCH_PC      0
+#define LAUNCH_GP      4
+#define LAUNCH_SP      8
+#define LAUNCH_A0      12
+#define LAUNCH_FLAGS   28
 
 #endif
 
index c189157..722bc88 100644 (file)
@@ -33,9 +33,9 @@
  * Malta I/O ports base address for the Galileo GT64120 and Algorithmics
  * Bonito system controllers.
  */
-#define MALTA_GT_PORT_BASE      get_gt_port_base(GT_PCI0IOLD_OFS)
-#define MALTA_BONITO_PORT_BASE  ((unsigned long)ioremap (0x1fd00000, 0x10000))
-#define MALTA_MSC_PORT_BASE     get_msc_port_base(MSC01_PCI_SC2PIOBASL)
+#define MALTA_GT_PORT_BASE     get_gt_port_base(GT_PCI0IOLD_OFS)
+#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
+#define MALTA_MSC_PORT_BASE    get_msc_port_base(MSC01_PCI_SC2PIOBASL)
 
 static inline unsigned long get_gt_port_base(unsigned long reg)
 {
@@ -77,8 +77,8 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
 /*
  * Malta RTC-device indirect register access.
  */
-#define MALTA_RTC_ADR_REG       0x70
-#define MALTA_RTC_DAT_REG       0x71
+#define MALTA_RTC_ADR_REG      0x70
+#define MALTA_RTC_DAT_REG      0x71
 
 /*
  * Malta SMSC FDC37M817 Super I/O Controller register.
index 6692448..e330732 100644 (file)
@@ -4,8 +4,8 @@
  * for more details.
  *
  * Copyright (C) 2000,2012 MIPS Technologies, Inc.  All rights reserved.
- *      Carsten Langgaard <carstenl@mips.com>
- *      Steven J. Hill <sjhill@mips.com>
+ *     Carsten Langgaard <carstenl@mips.com>
+ *     Steven J. Hill <sjhill@mips.com>
  */
 #ifndef _MIPS_MALTAINT_H
 #define _MIPS_MALTAINT_H
@@ -24,9 +24,9 @@
 #define MIPSCPU_INT_I8259A     MIPSCPU_INT_MB0
 #define MIPSCPU_INT_MB1                3
 #define MIPSCPU_INT_SMI                MIPSCPU_INT_MB1
-#define MIPSCPU_INT_IPI0       MIPSCPU_INT_MB1 /* GIC IPI */
+#define MIPSCPU_INT_IPI0       MIPSCPU_INT_MB1 /* GIC IPI */
 #define MIPSCPU_INT_MB2                4
-#define MIPSCPU_INT_IPI1       MIPSCPU_INT_MB2 /* GIC IPI */
+#define MIPSCPU_INT_IPI1       MIPSCPU_INT_MB2 /* GIC IPI */
 #define MIPSCPU_INT_MB3                5
 #define MIPSCPU_INT_COREHI     MIPSCPU_INT_MB3
 #define MIPSCPU_INT_MB4                6
index 2971d60..a02596c 100644 (file)
@@ -53,7 +53,7 @@
 #define PIIX4_OCW2_SP          (0x6 << 5)
 #define PIIX4_OCW2_NOP         (0x2 << 5)
 
-#define PIIX4_OCW2_SEL          (0x0 << 3)
+#define PIIX4_OCW2_SEL         (0x0 << 3)
 
 #define PIIX4_OCW2_ILS_0       0
 #define PIIX4_OCW2_ILS_1       1
@@ -72,9 +72,9 @@
 #define PIIX4_OCW2_ILS_14      6
 #define PIIX4_OCW2_ILS_15      7
 
-#define PIIX4_OCW3_SEL          (0x1 << 3)
+#define PIIX4_OCW3_SEL         (0x1 << 3)
 
-#define PIIX4_OCW3_IRR          0x2
-#define PIIX4_OCW3_ISR          0x3
+#define PIIX4_OCW3_IRR         0x2
+#define PIIX4_OCW3_ISR         0x3
 
 #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
index a9db576..e7aed3e 100644 (file)
@@ -39,9 +39,9 @@ extern int get_ethernet_addr(char *ethernet_addr);
 /* Memory descriptor management. */
 #define PROM_MAX_PMEMBLOCKS    32
 struct prom_pmemblock {
-        unsigned long base; /* Within KSEG0. */
-        unsigned int size;  /* In bytes. */
-        unsigned int type;  /* free or prom memory */
+       unsigned long base; /* Within KSEG0. */
+       unsigned int size;  /* In bytes. */
+       unsigned int type;  /* free or prom memory */
 };
 
 #endif /* !(_MIPS_PROM_H) */
index d634d9a..6b17aaf 100644 (file)
@@ -4,8 +4,8 @@
  * for more details.
  *
  * Copyright (C) 2000,2012 MIPS Technologies, Inc.  All rights reserved.
- *      Douglas Leung <douglas@mips.com>
- *      Steven J. Hill <sjhill@mips.com>
+ *     Douglas Leung <douglas@mips.com>
+ *     Steven J. Hill <sjhill@mips.com>
  */
 #ifndef _MIPS_SEAD3INT_H
 #define _MIPS_SEAD3INT_H
index acb7c23..b112fdc 100644 (file)
 #ifndef _ASM_MIPS_BOARDS_SIM_H
 #define _ASM_MIPS_BOARDS_SIM_H
 
-#define STATS_ON        1
-#define STATS_OFF       2
-#define STATS_CLEAR     3
-#define STATS_DUMP      4
+#define STATS_ON       1
+#define STATS_OFF      2
+#define STATS_CLEAR    3
+#define STATS_DUMP     4
 #define TRACE_ON               5
-#define TRACE_OFF       6
+#define TRACE_OFF      6
 
 
 #define simcfg(code)                                           \
 ({                                        \
-       __asm__  __volatile__( \
-        "sltiu $0,$0, %0" \
+       __asm__  __volatile__( \
+       "sltiu $0,$0, %0" \
                ::"i"(code)                                     \
                ); \
 })
index 5b3cb85..38b7704 100644 (file)
@@ -270,14 +270,14 @@ static inline void ehb(void)
 
 #define mftc0(rt,sel)                                                  \
 ({                                                                     \
-        unsigned long  __res;                                          \
+        unsigned long  __res;                                          \
                                                                        \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    mips32r2                                \n"     \
        "       .set    noat                                    \n"     \
-       "       # mftc0 $1, $" #rt ", " #sel "                  \n"     \
-       "       .word   0x41000800 | (" #rt " << 16) | " #sel " \n"     \
+       "       # mftc0 $1, $" #rt ", " #sel "                  \n"     \
+       "       .word   0x41000800 | (" #rt " << 16) | " #sel " \n"     \
        "       move    %0, $1                                  \n"     \
        "       .set    pop                                     \n"     \
        : "=r" (__res));                                                \
@@ -334,7 +334,7 @@ do {                                                                        \
        "       .set    noat                                    \n"     \
        "       move    $1, %0                                  \n"     \
        "       # mttc0 %0," #rd ", " #sel "                    \n"     \
-       "       .word   0x41810000 | (" #rd " << 11) | " #sel " \n"     \
+       "       .word   0x41810000 | (" #rd " << 11) | " #sel " \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
        : "r" (v));                                                     \
index 7e4e6f8..12b70c2 100644 (file)
  * Status Register Values
  */
 
-#define FPU_CSR_FLUSH   0x01000000      /* flush denormalised results to 0 */
-#define FPU_CSR_COND    0x00800000      /* $fcc0 */
-#define FPU_CSR_COND0   0x00800000      /* $fcc0 */
-#define FPU_CSR_COND1   0x02000000      /* $fcc1 */
-#define FPU_CSR_COND2   0x04000000      /* $fcc2 */
-#define FPU_CSR_COND3   0x08000000      /* $fcc3 */
-#define FPU_CSR_COND4   0x10000000      /* $fcc4 */
-#define FPU_CSR_COND5   0x20000000      /* $fcc5 */
-#define FPU_CSR_COND6   0x40000000      /* $fcc6 */
-#define FPU_CSR_COND7   0x80000000      /* $fcc7 */
+#define FPU_CSR_FLUSH  0x01000000      /* flush denormalised results to 0 */
+#define FPU_CSR_COND   0x00800000      /* $fcc0 */
+#define FPU_CSR_COND0  0x00800000      /* $fcc0 */
+#define FPU_CSR_COND1  0x02000000      /* $fcc1 */
+#define FPU_CSR_COND2  0x04000000      /* $fcc2 */
+#define FPU_CSR_COND3  0x08000000      /* $fcc3 */
+#define FPU_CSR_COND4  0x10000000      /* $fcc4 */
+#define FPU_CSR_COND5  0x20000000      /* $fcc5 */
+#define FPU_CSR_COND6  0x40000000      /* $fcc6 */
+#define FPU_CSR_COND7  0x80000000      /* $fcc7 */
 
 /*
  * Bits 18 - 20 of the FPU Status Register will be read as 0,
  * E the exception enable
  * S the sticky/flag bit
 */
-#define FPU_CSR_ALL_X   0x0003f000
-#define FPU_CSR_UNI_X   0x00020000
-#define FPU_CSR_INV_X   0x00010000
-#define FPU_CSR_DIV_X   0x00008000
-#define FPU_CSR_OVF_X   0x00004000
-#define FPU_CSR_UDF_X   0x00002000
-#define FPU_CSR_INE_X   0x00001000
-
-#define FPU_CSR_ALL_E   0x00000f80
-#define FPU_CSR_INV_E   0x00000800
-#define FPU_CSR_DIV_E   0x00000400
-#define FPU_CSR_OVF_E   0x00000200
-#define FPU_CSR_UDF_E   0x00000100
-#define FPU_CSR_INE_E   0x00000080
-
-#define FPU_CSR_ALL_S   0x0000007c
-#define FPU_CSR_INV_S   0x00000040
-#define FPU_CSR_DIV_S   0x00000020
-#define FPU_CSR_OVF_S   0x00000010
-#define FPU_CSR_UDF_S   0x00000008
-#define FPU_CSR_INE_S   0x00000004
+#define FPU_CSR_ALL_X  0x0003f000
+#define FPU_CSR_UNI_X  0x00020000
+#define FPU_CSR_INV_X  0x00010000
+#define FPU_CSR_DIV_X  0x00008000
+#define FPU_CSR_OVF_X  0x00004000
+#define FPU_CSR_UDF_X  0x00002000
+#define FPU_CSR_INE_X  0x00001000
+
+#define FPU_CSR_ALL_E  0x00000f80
+#define FPU_CSR_INV_E  0x00000800
+#define FPU_CSR_DIV_E  0x00000400
+#define FPU_CSR_OVF_E  0x00000200
+#define FPU_CSR_UDF_E  0x00000100
+#define FPU_CSR_INE_E  0x00000080
+
+#define FPU_CSR_ALL_S  0x0000007c
+#define FPU_CSR_INV_S  0x00000040
+#define FPU_CSR_DIV_S  0x00000020
+#define FPU_CSR_OVF_S  0x00000010
+#define FPU_CSR_UDF_S  0x00000008
+#define FPU_CSR_INE_S  0x00000004
 
 /* Bits 0 and 1 of FPU Status Register specify the rounding mode */
 #define FPU_CSR_RM     0x00000003
-#define FPU_CSR_RN      0x0     /* nearest */
-#define FPU_CSR_RZ      0x1     /* towards zero */
-#define FPU_CSR_RU      0x2     /* towards +Infinity */
-#define FPU_CSR_RD      0x3     /* towards -Infinity */
+#define FPU_CSR_RN     0x0     /* nearest */
+#define FPU_CSR_RZ     0x1     /* towards zero */
+#define FPU_CSR_RU     0x2     /* towards +Infinity */
+#define FPU_CSR_RD     0x3     /* towards -Infinity */
 
 
 /*
  * Default page size for a given kernel configuration
  */
 #ifdef CONFIG_PAGE_SIZE_4KB
-#define PM_DEFAULT_MASK        PM_4K
+#define PM_DEFAULT_MASK PM_4K
 #elif defined(CONFIG_PAGE_SIZE_8KB)
-#define PM_DEFAULT_MASK        PM_8K
+#define PM_DEFAULT_MASK PM_8K
 #elif defined(CONFIG_PAGE_SIZE_16KB)
-#define PM_DEFAULT_MASK        PM_16K
+#define PM_DEFAULT_MASK PM_16K
 #elif defined(CONFIG_PAGE_SIZE_32KB)
-#define PM_DEFAULT_MASK        PM_32K
+#define PM_DEFAULT_MASK PM_32K
 #elif defined(CONFIG_PAGE_SIZE_64KB)
-#define PM_DEFAULT_MASK        PM_64K
+#define PM_DEFAULT_MASK PM_64K
 #else
 #error Bad page size configuration!
 #endif
 /*
  * PageGrain bits
  */
-#define PG_RIE         (_ULCAST_(1) <<  31)
-#define PG_XIE         (_ULCAST_(1) <<  30)
-#define PG_ELPA                (_ULCAST_(1) <<  29)
-#define PG_ESP         (_ULCAST_(1) <<  28)
+#define PG_RIE         (_ULCAST_(1) <<  31)
+#define PG_XIE         (_ULCAST_(1) <<  30)
+#define PG_ELPA                (_ULCAST_(1) <<  29)
+#define PG_ESP         (_ULCAST_(1) <<  28)
 
 /*
  * R4x00 interrupt enable / cause bits
  */
-#define IE_SW0          (_ULCAST_(1) <<  8)
-#define IE_SW1          (_ULCAST_(1) <<  9)
-#define IE_IRQ0         (_ULCAST_(1) << 10)
-#define IE_IRQ1         (_ULCAST_(1) << 11)
-#define IE_IRQ2         (_ULCAST_(1) << 12)
-#define IE_IRQ3         (_ULCAST_(1) << 13)
-#define IE_IRQ4         (_ULCAST_(1) << 14)
-#define IE_IRQ5         (_ULCAST_(1) << 15)
+#define IE_SW0         (_ULCAST_(1) <<  8)
+#define IE_SW1         (_ULCAST_(1) <<  9)
+#define IE_IRQ0                (_ULCAST_(1) << 10)
+#define IE_IRQ1                (_ULCAST_(1) << 11)
+#define IE_IRQ2                (_ULCAST_(1) << 12)
+#define IE_IRQ3                (_ULCAST_(1) << 13)
+#define IE_IRQ4                (_ULCAST_(1) << 14)
+#define IE_IRQ5                (_ULCAST_(1) << 15)
 
 /*
  * R4x00 interrupt cause bits
  */
-#define C_SW0           (_ULCAST_(1) <<  8)
-#define C_SW1           (_ULCAST_(1) <<  9)
-#define C_IRQ0          (_ULCAST_(1) << 10)
-#define C_IRQ1          (_ULCAST_(1) << 11)
-#define C_IRQ2          (_ULCAST_(1) << 12)
-#define C_IRQ3          (_ULCAST_(1) << 13)
-#define C_IRQ4          (_ULCAST_(1) << 14)
-#define C_IRQ5          (_ULCAST_(1) << 15)
+#define C_SW0          (_ULCAST_(1) <<  8)
+#define C_SW1          (_ULCAST_(1) <<  9)
+#define C_IRQ0         (_ULCAST_(1) << 10)
+#define C_IRQ1         (_ULCAST_(1) << 11)
+#define C_IRQ2         (_ULCAST_(1) << 12)
+#define C_IRQ3         (_ULCAST_(1) << 13)
+#define C_IRQ4         (_ULCAST_(1) << 14)
+#define C_IRQ5         (_ULCAST_(1) << 15)
 
 /*
  * Bitfields in the R4xx0 cp0 status register
 #  define KSU_KERNEL           0x00000000
 #define ST0_UX                 0x00000020
 #define ST0_SX                 0x00000040
-#define ST0_KX                         0x00000080
+#define ST0_KX                 0x00000080
 #define ST0_DE                 0x00010000
 #define ST0_CE                 0x00020000
 
 /*
  * Bitfields in the R[23]000 cp0 status register.
  */
-#define ST0_IEC                 0x00000001
+#define ST0_IEC                        0x00000001
 #define ST0_KUC                        0x00000002
 #define ST0_IEP                        0x00000004
 #define ST0_KUP                        0x00000008
 /*
  * Bits specific to the R4640/R4650
  */
-#define ST0_UM                 (_ULCAST_(1) <<  4)
+#define ST0_UM                 (_ULCAST_(1) <<  4)
 #define ST0_IL                 (_ULCAST_(1) << 23)
 #define ST0_DL                 (_ULCAST_(1) << 24)
 
  */
 #define TX39_CONF_ICS_SHIFT    19
 #define TX39_CONF_ICS_MASK     0x00380000
-#define TX39_CONF_ICS_1KB      0x00000000
-#define TX39_CONF_ICS_2KB      0x00080000
-#define TX39_CONF_ICS_4KB      0x00100000
-#define TX39_CONF_ICS_8KB      0x00180000
-#define TX39_CONF_ICS_16KB     0x00200000
+#define TX39_CONF_ICS_1KB      0x00000000
+#define TX39_CONF_ICS_2KB      0x00080000
+#define TX39_CONF_ICS_4KB      0x00100000
+#define TX39_CONF_ICS_8KB      0x00180000
+#define TX39_CONF_ICS_16KB     0x00200000
 
 #define TX39_CONF_DCS_SHIFT    16
 #define TX39_CONF_DCS_MASK     0x00070000
-#define TX39_CONF_DCS_1KB      0x00000000
-#define TX39_CONF_DCS_2KB      0x00010000
-#define TX39_CONF_DCS_4KB      0x00020000
-#define TX39_CONF_DCS_8KB      0x00030000
-#define TX39_CONF_DCS_16KB     0x00040000
-
-#define TX39_CONF_CWFON        0x00004000
-#define TX39_CONF_WBON         0x00002000
+#define TX39_CONF_DCS_1KB      0x00000000
+#define TX39_CONF_DCS_2KB      0x00010000
+#define TX39_CONF_DCS_4KB      0x00020000
+#define TX39_CONF_DCS_8KB      0x00030000
+#define TX39_CONF_DCS_16KB     0x00040000
+
+#define TX39_CONF_CWFON                0x00004000
+#define TX39_CONF_WBON         0x00002000
 #define TX39_CONF_RF_SHIFT     10
 #define TX39_CONF_RF_MASK      0x00000c00
 #define TX39_CONF_DOZE         0x00000200
  * Status register bits available in all MIPS CPUs.
  */
 #define ST0_IM                 0x0000ff00
-#define  STATUSB_IP0           8
-#define  STATUSF_IP0           (_ULCAST_(1) <<  8)
-#define  STATUSB_IP1           9
-#define  STATUSF_IP1           (_ULCAST_(1) <<  9)
-#define  STATUSB_IP2           10
-#define  STATUSF_IP2           (_ULCAST_(1) << 10)
-#define  STATUSB_IP3           11
-#define  STATUSF_IP3           (_ULCAST_(1) << 11)
-#define  STATUSB_IP4           12
-#define  STATUSF_IP4           (_ULCAST_(1) << 12)
-#define  STATUSB_IP5           13
-#define  STATUSF_IP5           (_ULCAST_(1) << 13)
-#define  STATUSB_IP6           14
-#define  STATUSF_IP6           (_ULCAST_(1) << 14)
-#define  STATUSB_IP7           15
-#define  STATUSF_IP7           (_ULCAST_(1) << 15)
-#define  STATUSB_IP8           0
-#define  STATUSF_IP8           (_ULCAST_(1) <<  0)
-#define  STATUSB_IP9           1
-#define  STATUSF_IP9           (_ULCAST_(1) <<  1)
-#define  STATUSB_IP10          2
-#define  STATUSF_IP10          (_ULCAST_(1) <<  2)
-#define  STATUSB_IP11          3
-#define  STATUSF_IP11          (_ULCAST_(1) <<  3)
-#define  STATUSB_IP12          4
-#define  STATUSF_IP12          (_ULCAST_(1) <<  4)
-#define  STATUSB_IP13          5
-#define  STATUSF_IP13          (_ULCAST_(1) <<  5)
-#define  STATUSB_IP14          6
-#define  STATUSF_IP14          (_ULCAST_(1) <<  6)
-#define  STATUSB_IP15          7
-#define  STATUSF_IP15          (_ULCAST_(1) <<  7)
+#define         STATUSB_IP0            8
+#define         STATUSF_IP0            (_ULCAST_(1) <<  8)
+#define         STATUSB_IP1            9
+#define         STATUSF_IP1            (_ULCAST_(1) <<  9)
+#define         STATUSB_IP2            10
+#define         STATUSF_IP2            (_ULCAST_(1) << 10)
+#define         STATUSB_IP3            11
+#define         STATUSF_IP3            (_ULCAST_(1) << 11)
+#define         STATUSB_IP4            12
+#define         STATUSF_IP4            (_ULCAST_(1) << 12)
+#define         STATUSB_IP5            13
+#define         STATUSF_IP5            (_ULCAST_(1) << 13)
+#define         STATUSB_IP6            14
+#define         STATUSF_IP6            (_ULCAST_(1) << 14)
+#define         STATUSB_IP7            15
+#define         STATUSF_IP7            (_ULCAST_(1) << 15)
+#define         STATUSB_IP8            0
+#define         STATUSF_IP8            (_ULCAST_(1) <<  0)
+#define         STATUSB_IP9            1
+#define         STATUSF_IP9            (_ULCAST_(1) <<  1)
+#define         STATUSB_IP10           2
+#define         STATUSF_IP10           (_ULCAST_(1) <<  2)
+#define         STATUSB_IP11           3
+#define         STATUSF_IP11           (_ULCAST_(1) <<  3)
+#define         STATUSB_IP12           4
+#define         STATUSF_IP12           (_ULCAST_(1) <<  4)
+#define         STATUSB_IP13           5
+#define         STATUSF_IP13           (_ULCAST_(1) <<  5)
+#define         STATUSB_IP14           6
+#define         STATUSF_IP14           (_ULCAST_(1) <<  6)
+#define         STATUSB_IP15           7
+#define         STATUSF_IP15           (_ULCAST_(1) <<  7)
 #define ST0_CH                 0x00040000
 #define ST0_NMI                        0x00080000
 #define ST0_SR                 0x00100000
  *
  * Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
  */
-#define  CAUSEB_EXCCODE                2
-#define  CAUSEF_EXCCODE                (_ULCAST_(31)  <<  2)
-#define  CAUSEB_IP             8
-#define  CAUSEF_IP             (_ULCAST_(255) <<  8)
-#define  CAUSEB_IP0            8
-#define  CAUSEF_IP0            (_ULCAST_(1)   <<  8)
-#define  CAUSEB_IP1            9
-#define  CAUSEF_IP1            (_ULCAST_(1)   <<  9)
-#define  CAUSEB_IP2            10
-#define  CAUSEF_IP2            (_ULCAST_(1)   << 10)
-#define  CAUSEB_IP3            11
-#define  CAUSEF_IP3            (_ULCAST_(1)   << 11)
-#define  CAUSEB_IP4            12
-#define  CAUSEF_IP4            (_ULCAST_(1)   << 12)
-#define  CAUSEB_IP5            13
-#define  CAUSEF_IP5            (_ULCAST_(1)   << 13)
-#define  CAUSEB_IP6            14
-#define  CAUSEF_IP6            (_ULCAST_(1)   << 14)
-#define  CAUSEB_IP7            15
-#define  CAUSEF_IP7            (_ULCAST_(1)   << 15)
-#define  CAUSEB_IV             23
-#define  CAUSEF_IV             (_ULCAST_(1)   << 23)
-#define  CAUSEB_PCI            26
-#define  CAUSEF_PCI            (_ULCAST_(1)   << 26)
-#define  CAUSEB_CE             28
-#define  CAUSEF_CE             (_ULCAST_(3)   << 28)
-#define  CAUSEB_TI             30
-#define  CAUSEF_TI             (_ULCAST_(1)   << 30)
-#define  CAUSEB_BD             31
-#define  CAUSEF_BD             (_ULCAST_(1)   << 31)
+#define         CAUSEB_EXCCODE         2
+#define         CAUSEF_EXCCODE         (_ULCAST_(31)  <<  2)
+#define         CAUSEB_IP              8
+#define         CAUSEF_IP              (_ULCAST_(255) <<  8)
+#define         CAUSEB_IP0             8
+#define         CAUSEF_IP0             (_ULCAST_(1)   <<  8)
+#define         CAUSEB_IP1             9
+#define         CAUSEF_IP1             (_ULCAST_(1)   <<  9)
+#define         CAUSEB_IP2             10
+#define         CAUSEF_IP2             (_ULCAST_(1)   << 10)
+#define         CAUSEB_IP3             11
+#define         CAUSEF_IP3             (_ULCAST_(1)   << 11)
+#define         CAUSEB_IP4             12
+#define         CAUSEF_IP4             (_ULCAST_(1)   << 12)
+#define         CAUSEB_IP5             13
+#define         CAUSEF_IP5             (_ULCAST_(1)   << 13)
+#define         CAUSEB_IP6             14
+#define         CAUSEF_IP6             (_ULCAST_(1)   << 14)
+#define         CAUSEB_IP7             15
+#define         CAUSEF_IP7             (_ULCAST_(1)   << 15)
+#define         CAUSEB_IV              23
+#define         CAUSEF_IV              (_ULCAST_(1)   << 23)
+#define         CAUSEB_PCI             26
+#define         CAUSEF_PCI             (_ULCAST_(1)   << 26)
+#define         CAUSEB_CE              28
+#define         CAUSEF_CE              (_ULCAST_(3)   << 28)
+#define         CAUSEB_TI              30
+#define         CAUSEF_TI              (_ULCAST_(1)   << 30)
+#define         CAUSEB_BD              31
+#define         CAUSEF_BD              (_ULCAST_(1)   << 31)
 
 /*
  * Bits in the coprocessor 0 config register.
 #define CONF_BE                        (_ULCAST_(1) << 15)
 
 /* Bits common to various processors.  */
-#define CONF_CU                        (_ULCAST_(1) <<  3)
-#define CONF_DB                        (_ULCAST_(1) <<  4)
-#define CONF_IB                        (_ULCAST_(1) <<  5)
-#define CONF_DC                        (_ULCAST_(7) <<  6)
-#define CONF_IC                        (_ULCAST_(7) <<  9)
+#define CONF_CU                        (_ULCAST_(1) <<  3)
+#define CONF_DB                        (_ULCAST_(1) <<  4)
+#define CONF_IB                        (_ULCAST_(1) <<  5)
+#define CONF_DC                        (_ULCAST_(7) <<  6)
+#define CONF_IC                        (_ULCAST_(7) <<  9)
 #define CONF_EB                        (_ULCAST_(1) << 13)
 #define CONF_EM                        (_ULCAST_(1) << 14)
 #define CONF_SM                        (_ULCAST_(1) << 16)
 #define CONF_EC                        (_ULCAST_(7) << 28)
 #define CONF_CM                        (_ULCAST_(1) << 31)
 
-/* Bits specific to the R4xx0.  */
+/* Bits specific to the R4xx0. */
 #define R4K_CONF_SW            (_ULCAST_(1) << 20)
 #define R4K_CONF_SS            (_ULCAST_(1) << 21)
 #define R4K_CONF_SB            (_ULCAST_(3) << 22)
 
-/* Bits specific to the R5000.  */
+/* Bits specific to the R5000. */
 #define R5K_CONF_SE            (_ULCAST_(1) << 12)
 #define R5K_CONF_SS            (_ULCAST_(3) << 20)
 
-/* Bits specific to the RM7000.  */
-#define RM7K_CONF_SE           (_ULCAST_(1) <<  3)
+/* Bits specific to the RM7000.         */
+#define RM7K_CONF_SE           (_ULCAST_(1) <<  3)
 #define RM7K_CONF_TE           (_ULCAST_(1) << 12)
 #define RM7K_CONF_CLK          (_ULCAST_(1) << 16)
 #define RM7K_CONF_TC           (_ULCAST_(1) << 17)
 #define RM7K_CONF_SI           (_ULCAST_(3) << 20)
 #define RM7K_CONF_SC           (_ULCAST_(1) << 31)
 
-/* Bits specific to the R10000.  */
-#define R10K_CONF_DN           (_ULCAST_(3) <<  3)
-#define R10K_CONF_CT           (_ULCAST_(1) <<  5)
-#define R10K_CONF_PE           (_ULCAST_(1) <<  6)
-#define R10K_CONF_PM           (_ULCAST_(3) <<  7)
-#define R10K_CONF_EC           (_ULCAST_(15)<<  9)
+/* Bits specific to the R10000.         */
+#define R10K_CONF_DN           (_ULCAST_(3) <<  3)
+#define R10K_CONF_CT           (_ULCAST_(1) <<  5)
+#define R10K_CONF_PE           (_ULCAST_(1) <<  6)
+#define R10K_CONF_PM           (_ULCAST_(3) <<  7)
+#define R10K_CONF_EC           (_ULCAST_(15)<<  9)
 #define R10K_CONF_SB           (_ULCAST_(1) << 13)
 #define R10K_CONF_SK           (_ULCAST_(1) << 14)
 #define R10K_CONF_SS           (_ULCAST_(7) << 16)
 #define R10K_CONF_DC           (_ULCAST_(7) << 26)
 #define R10K_CONF_IC           (_ULCAST_(7) << 29)
 
-/* Bits specific to the VR41xx.  */
+/* Bits specific to the VR41xx.         */
 #define VR41_CONF_CS           (_ULCAST_(1) << 12)
 #define VR41_CONF_P4K          (_ULCAST_(1) << 13)
 #define VR41_CONF_BP           (_ULCAST_(1) << 16)
 #define VR41_CONF_M16          (_ULCAST_(1) << 20)
 #define VR41_CONF_AD           (_ULCAST_(1) << 23)
 
-/* Bits specific to the R30xx.  */
+/* Bits specific to the R30xx. */
 #define R30XX_CONF_FDM         (_ULCAST_(1) << 19)
 #define R30XX_CONF_REV         (_ULCAST_(1) << 22)
 #define R30XX_CONF_AC          (_ULCAST_(1) << 23)
 #define TX49_CONF_HALT         (_ULCAST_(1) << 18)
 #define TX49_CONF_CWFON                (_ULCAST_(1) << 27)
 
-/* Bits specific to the MIPS32/64 PRA.  */
-#define MIPS_CONF_MT           (_ULCAST_(7) <<  7)
+/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_MT           (_ULCAST_(7) <<  7)
 #define MIPS_CONF_AR           (_ULCAST_(7) << 10)
 #define MIPS_CONF_AT           (_ULCAST_(3) << 13)
 #define MIPS_CONF_M            (_ULCAST_(1) << 31)
 /*
  * Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above.
  */
-#define MIPS_CONF1_FP          (_ULCAST_(1) <<  0)
-#define MIPS_CONF1_EP          (_ULCAST_(1) <<  1)
-#define MIPS_CONF1_CA          (_ULCAST_(1) <<  2)
-#define MIPS_CONF1_WR          (_ULCAST_(1) <<  3)
-#define MIPS_CONF1_PC          (_ULCAST_(1) <<  4)
-#define MIPS_CONF1_MD          (_ULCAST_(1) <<  5)
-#define MIPS_CONF1_C2          (_ULCAST_(1) <<  6)
-#define MIPS_CONF1_DA          (_ULCAST_(7) <<  7)
+#define MIPS_CONF1_FP          (_ULCAST_(1) <<  0)
+#define MIPS_CONF1_EP          (_ULCAST_(1) <<  1)
+#define MIPS_CONF1_CA          (_ULCAST_(1) <<  2)
+#define MIPS_CONF1_WR          (_ULCAST_(1) <<  3)
+#define MIPS_CONF1_PC          (_ULCAST_(1) <<  4)
+#define MIPS_CONF1_MD          (_ULCAST_(1) <<  5)
+#define MIPS_CONF1_C2          (_ULCAST_(1) <<  6)
+#define MIPS_CONF1_DA          (_ULCAST_(7) <<  7)
 #define MIPS_CONF1_DL          (_ULCAST_(7) << 10)
 #define MIPS_CONF1_DS          (_ULCAST_(7) << 13)
 #define MIPS_CONF1_IA          (_ULCAST_(7) << 16)
 #define MIPS_CONF1_IS          (_ULCAST_(7) << 22)
 #define MIPS_CONF1_TLBS                (_ULCAST_(63)<< 25)
 
-#define MIPS_CONF2_SA          (_ULCAST_(15)<<  0)
-#define MIPS_CONF2_SL          (_ULCAST_(15)<<  4)
-#define MIPS_CONF2_SS          (_ULCAST_(15)<<  8)
+#define MIPS_CONF2_SA          (_ULCAST_(15)<<  0)
+#define MIPS_CONF2_SL          (_ULCAST_(15)<<  4)
+#define MIPS_CONF2_SS          (_ULCAST_(15)<<  8)
 #define MIPS_CONF2_SU          (_ULCAST_(15)<< 12)
 #define MIPS_CONF2_TA          (_ULCAST_(15)<< 16)
 #define MIPS_CONF2_TL          (_ULCAST_(15)<< 20)
 #define MIPS_CONF2_TS          (_ULCAST_(15)<< 24)
 #define MIPS_CONF2_TU          (_ULCAST_(7) << 28)
 
-#define MIPS_CONF3_TL          (_ULCAST_(1) <<  0)
-#define MIPS_CONF3_SM          (_ULCAST_(1) <<  1)
-#define MIPS_CONF3_MT          (_ULCAST_(1) <<  2)
-#define MIPS_CONF3_SP          (_ULCAST_(1) <<  4)
-#define MIPS_CONF3_VINT                (_ULCAST_(1) <<  5)
-#define MIPS_CONF3_VEIC                (_ULCAST_(1) <<  6)
-#define MIPS_CONF3_LPA         (_ULCAST_(1) <<  7)
+#define MIPS_CONF3_TL          (_ULCAST_(1) <<  0)
+#define MIPS_CONF3_SM          (_ULCAST_(1) <<  1)
+#define MIPS_CONF3_MT          (_ULCAST_(1) <<  2)
+#define MIPS_CONF3_SP          (_ULCAST_(1) <<  4)
+#define MIPS_CONF3_VINT                (_ULCAST_(1) <<  5)
+#define MIPS_CONF3_VEIC                (_ULCAST_(1) <<  6)
+#define MIPS_CONF3_LPA         (_ULCAST_(1) <<  7)
 #define MIPS_CONF3_DSP         (_ULCAST_(1) << 10)
 #define MIPS_CONF3_DSP2P       (_ULCAST_(1) << 11)
 #define MIPS_CONF3_RXI         (_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI                (_ULCAST_(1) << 13)
+#define MIPS_CONF3_ISA         (_ULCAST_(3) << 14)
+#define MIPS_CONF3_VZ          (_ULCAST_(1) << 23)
 
 #define MIPS_CONF4_MMUSIZEEXT  (_ULCAST_(255) << 0)
 #define MIPS_CONF4_MMUEXTDEF   (_ULCAST_(3) << 14)
 #ifndef __ASSEMBLY__
 
 /*
- * Functions to access the R10000 performance counters.  These are basically
+ * Functions to access the R10000 performance counters.         These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
  * performance counter number encoded into bits 1 ... 5 of the instruction.
  * Only performance counters 0 to 1 actually exist, so for a non-R10000 aware
        unsigned int __res;                                     \
        __asm__ __volatile__(                                   \
        "mfpc\t%0, %1"                                          \
-        : "=r" (__res)                                         \
+       : "=r" (__res)                                          \
        : "i" (counter));                                       \
                                                                \
-        __res;                                                 \
+       __res;                                                  \
 })
 
-#define write_r10k_perf_cntr(counter,val)                       \
+#define write_r10k_perf_cntr(counter,val)                      \
 do {                                                           \
        __asm__ __volatile__(                                   \
        "mtpc\t%0, %1"                                          \
@@ -651,13 +653,13 @@ do {                                                              \
        unsigned int __res;                                     \
        __asm__ __volatile__(                                   \
        "mfps\t%0, %1"                                          \
-        : "=r" (__res)                                         \
+       : "=r" (__res)                                          \
        : "i" (counter));                                       \
                                                                \
-        __res;                                                 \
+       __res;                                                  \
 })
 
-#define write_r10k_perf_cntl(counter,val)                       \
+#define write_r10k_perf_cntl(counter,val)                      \
 do {                                                           \
        __asm__ __volatile__(                                   \
        "mtps\t%0, %1"                                          \
@@ -847,20 +849,20 @@ do {                                                                      \
 #define write_c0_context(val)  __write_ulong_c0_register($4, 0, val)
 
 #define read_c0_userlocal()    __read_ulong_c0_register($4, 2)
-#define write_c0_userlocal(val)        __write_ulong_c0_register($4, 2, val)
+#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
 
 #define read_c0_pagemask()     __read_32bit_c0_register($5, 0)
 #define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
 
 #define read_c0_pagegrain()    __read_32bit_c0_register($5, 1)
-#define write_c0_pagegrain(val)        __write_32bit_c0_register($5, 1, val)
+#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
 
 #define read_c0_wired()                __read_32bit_c0_register($6, 0)
 #define write_c0_wired(val)    __write_32bit_c0_register($6, 0, val)
 
 #define read_c0_info()         __read_32bit_c0_register($7, 0)
 
-#define read_c0_cache()                __read_32bit_c0_register($7, 0) /* TX39xx */
+#define read_c0_cache()                __read_32bit_c0_register($7, 0) /* TX39xx */
 #define write_c0_cache(val)    __write_32bit_c0_register($7, 0, val)
 
 #define read_c0_badvaddr()     __read_ulong_c0_register($8, 0)
@@ -975,7 +977,7 @@ do {                                                                        \
 #define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val)
 
 #define read_c0_framemask()    __read_32bit_c0_register($21, 0)
-#define write_c0_framemask(val)        __write_32bit_c0_register($21, 0, val)
+#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
 
 #define read_c0_diag()         __read_32bit_c0_register($22, 0)
 #define write_c0_diag(val)     __write_32bit_c0_register($22, 0, val)
@@ -1005,27 +1007,27 @@ do {                                                                    \
  * MIPS32 / MIPS64 performance counters
  */
 #define read_c0_perfctrl0()    __read_32bit_c0_register($25, 0)
-#define write_c0_perfctrl0(val)        __write_32bit_c0_register($25, 0, val)
+#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
 #define read_c0_perfcntr0()    __read_32bit_c0_register($25, 1)
-#define write_c0_perfcntr0(val)        __write_32bit_c0_register($25, 1, val)
+#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
 #define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1)
 #define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val)
 #define read_c0_perfctrl1()    __read_32bit_c0_register($25, 2)
-#define write_c0_perfctrl1(val)        __write_32bit_c0_register($25, 2, val)
+#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
 #define read_c0_perfcntr1()    __read_32bit_c0_register($25, 3)
-#define write_c0_perfcntr1(val)        __write_32bit_c0_register($25, 3, val)
+#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
 #define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3)
 #define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val)
 #define read_c0_perfctrl2()    __read_32bit_c0_register($25, 4)
-#define write_c0_perfctrl2(val)        __write_32bit_c0_register($25, 4, val)
+#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
 #define read_c0_perfcntr2()    __read_32bit_c0_register($25, 5)
-#define write_c0_perfcntr2(val)        __write_32bit_c0_register($25, 5, val)
+#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
 #define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5)
 #define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val)
 #define read_c0_perfctrl3()    __read_32bit_c0_register($25, 6)
-#define write_c0_perfctrl3(val)        __write_32bit_c0_register($25, 6, val)
+#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
 #define read_c0_perfcntr3()    __read_32bit_c0_register($25, 7)
-#define write_c0_perfcntr3(val)        __write_32bit_c0_register($25, 7, val)
+#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
 #define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
 #define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
 
@@ -1033,12 +1035,12 @@ do {                                                                    \
 #define write_c0_ecc(val)      __write_32bit_c0_register($26, 0, val)
 
 #define read_c0_derraddr0()    __read_ulong_c0_register($26, 1)
-#define write_c0_derraddr0(val)        __write_ulong_c0_register($26, 1, val)
+#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
 
 #define read_c0_cacheerr()     __read_32bit_c0_register($27, 0)
 
 #define read_c0_derraddr1()    __read_ulong_c0_register($27, 1)
-#define write_c0_derraddr1(val)        __write_ulong_c0_register($27, 1, val)
+#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
 
 #define read_c0_taglo()                __read_32bit_c0_register($28, 0)
 #define write_c0_taglo(val)    __write_32bit_c0_register($28, 0, val)
@@ -1083,9 +1085,9 @@ do {                                                                      \
 #define write_c0_cvmctl(val)   __write_64bit_c0_register($9, 7, val)
 
 #define read_c0_cvmmemctl()    __read_64bit_c0_register($11, 7)
-#define write_c0_cvmmemctl(val)        __write_64bit_c0_register($11, 7, val)
+#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
 /*
- * The cacheerr registers are not standardized.  On OCTEON, they are
+ * The cacheerr registers are not standardized.         On OCTEON, they are
  * 64 bits wide.
  */
 #define read_octeon_c0_icacheerr()     __read_64bit_c0_register($27, 0)
@@ -1142,48 +1144,42 @@ do {                                                                    \
 /*
  * Macros to access the floating point coprocessor control registers
  */
-#define read_32bit_cp1_register(source)                         \
-({ int __res;                                                   \
-       __asm__ __volatile__(                                   \
-       ".set\tpush\n\t"                                        \
-       ".set\treorder\n\t"                                     \
-       /* gas fails to assemble cfc1 for some archs (octeon).*/ \
-       ".set\tmips1\n\t"                                       \
-        "cfc1\t%0,"STR(source)"\n\t"                            \
-       ".set\tpop"                                             \
-        : "=r" (__res));                                        \
-        __res;})
+#define read_32bit_cp1_register(source)                                        \
+({                                                                     \
+       int __res;                                                      \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
+       "       .set    reorder                                 \n"     \
+       "       # gas fails to assemble cfc1 for some archs,    \n"     \
+       "       # like Octeon.                                  \n"     \
+       "       .set    mips1                                   \n"     \
+       "       cfc1    %0,"STR(source)"                        \n"     \
+       "       .set    pop                                     \n"     \
+       : "=r" (__res));                                                \
+       __res;                                                          \
+})
 
+#ifdef HAVE_AS_DSP
 #define rddsp(mask)                                                    \
 ({                                                                     \
-       unsigned int __res;                                             \
+       unsigned int __dspctl;                                          \
                                                                        \
        __asm__ __volatile__(                                           \
-       "       .set    push                            \n"             \
-       "       .set    noat                            \n"             \
-       "       # rddsp $1, %x1                         \n"             \
-       "       .word   0x7c000cb8 | (%x1 << 16)        \n"             \
-       "       move    %0, $1                          \n"             \
-       "       .set    pop                             \n"             \
-       : "=r" (__res)                                                  \
+       "       rddsp   %0, %x1                                 \n"     \
+       : "=r" (__dspctl)                                               \
        : "i" (mask));                                                  \
-       __res;                                                          \
+       __dspctl;                                                       \
 })
 
 #define wrdsp(val, mask)                                               \
 do {                                                                   \
        __asm__ __volatile__(                                           \
-       "       .set    push                                    \n"     \
-       "       .set    noat                                    \n"     \
-       "       move    $1, %0                                  \n"     \
-       "       # wrdsp $1, %x1                                 \n"     \
-       "       .word   0x7c2004f8 | (%x1 << 11)                \n"     \
-       "       .set    pop                                     \n"     \
-        :                                                              \
+       "       wrdsp   %0, %x1                                 \n"     \
+       :                                                               \
        : "r" (val), "i" (mask));                                       \
 } while (0)
 
-#if 0  /* Need DSP ASE capable assembler ... */
 #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
 #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
 #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
@@ -1206,230 +1202,177 @@ do {                                                                  \
 
 #else
 
-#define mfhi0()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mfhi  %0, $ac0                \n"                     \
-       "       .word   0x00000810              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mfhi1()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mfhi  %0, $ac1                \n"                     \
-       "       .word   0x00200810              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mfhi2()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mfhi  %0, $ac2                \n"                     \
-       "       .word   0x00400810              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mfhi3()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mfhi  %0, $ac3                \n"                     \
-       "       .word   0x00600810              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mflo0()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mflo  %0, $ac0                \n"                     \
-       "       .word   0x00000812              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mflo1()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mflo  %0, $ac1                \n"                     \
-       "       .word   0x00200812              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mflo2()                                                                \
-({                                                                     \
-       unsigned long __treg;                                           \
-                                                                       \
-       __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mflo  %0, $ac2                \n"                     \
-       "       .word   0x00400812              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mflo3()                                                                \
+#ifdef CONFIG_CPU_MICROMIPS
+#define rddsp(mask)                                                    \
 ({                                                                     \
-       unsigned long __treg;                                           \
+       unsigned int __res;                                             \
                                                                        \
        __asm__ __volatile__(                                           \
-       "       .set    push                    \n"                     \
-       "       .set    noat                    \n"                     \
-       "       # mflo  %0, $ac3                \n"                     \
-       "       .word   0x00600812              \n"                     \
-       "       move    %0, $1                  \n"                     \
-       "       .set    pop                     \n"                     \
-       : "=r" (__treg));                                               \
-       __treg;                                                         \
-})
-
-#define mthi0(x)                                                       \
-do {                                                                   \
-       __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
-       "       move    $1, %0                                  \n"     \
-       "       # mthi  $1, $ac0                                \n"     \
-       "       .word   0x00200011                              \n"     \
+       "       # rddsp $1, %x1                                 \n"     \
+       "       .hword  ((0x0020067c | (%x1 << 14)) >> 16)      \n"     \
+       "       .hword  ((0x0020067c | (%x1 << 14)) & 0xffff)   \n"     \
+       "       move    %0, $1                                  \n"     \
        "       .set    pop                                     \n"     \
-       :                                                               \
-       : "r" (x));                                                     \
-} while (0)
+       : "=r" (__res)                                                  \
+       : "i" (mask));                                                  \
+       __res;                                                          \
+})
 
-#define mthi1(x)                                                       \
+#define wrdsp(val, mask)                                               \
 do {                                                                   \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
        "       move    $1, %0                                  \n"     \
-       "       # mthi  $1, $ac1                                \n"     \
-       "       .word   0x00200811                              \n"     \
+       "       # wrdsp $1, %x1                                 \n"     \
+       "       .hword  ((0x0020167c | (%x1 << 14)) >> 16)      \n"     \
+       "       .hword  ((0x0020167c | (%x1 << 14)) & 0xffff)   \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
-       : "r" (x));                                                     \
+       : "r" (val), "i" (mask));                                       \
 } while (0)
 
-#define mthi2(x)                                                       \
-do {                                                                   \
+#define _umips_dsp_mfxxx(ins)                                          \
+({                                                                     \
+       unsigned long __treg;                                           \
+                                                                       \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
-       "       move    $1, %0                                  \n"     \
-       "       # mthi  $1, $ac2                                \n"     \
-       "       .word   0x00201011                              \n"     \
+       "       .hword  0x0001                                  \n"     \
+       "       .hword  %x1                                     \n"     \
+       "       move    %0, $1                                  \n"     \
        "       .set    pop                                     \n"     \
-       :                                                               \
-       : "r" (x));                                                     \
-} while (0)
+       : "=r" (__treg)                                                 \
+       : "i" (ins));                                                   \
+       __treg;                                                         \
+})
 
-#define mthi3(x)                                                       \
+#define _umips_dsp_mtxxx(val, ins)                                     \
 do {                                                                   \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
        "       move    $1, %0                                  \n"     \
-       "       # mthi  $1, $ac3                                \n"     \
-       "       .word   0x00201811                              \n"     \
+       "       .hword  0x0001                                  \n"     \
+       "       .hword  %x1                                     \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
-       : "r" (x));                                                     \
+       : "r" (val), "i" (ins));                                        \
 } while (0)
 
-#define mtlo0(x)                                                       \
-do {                                                                   \
+#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
+#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
+
+#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
+#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
+
+#define mflo0() _umips_dsp_mflo(0)
+#define mflo1() _umips_dsp_mflo(1)
+#define mflo2() _umips_dsp_mflo(2)
+#define mflo3() _umips_dsp_mflo(3)
+
+#define mfhi0() _umips_dsp_mfhi(0)
+#define mfhi1() _umips_dsp_mfhi(1)
+#define mfhi2() _umips_dsp_mfhi(2)
+#define mfhi3() _umips_dsp_mfhi(3)
+
+#define mtlo0(x) _umips_dsp_mtlo(x, 0)
+#define mtlo1(x) _umips_dsp_mtlo(x, 1)
+#define mtlo2(x) _umips_dsp_mtlo(x, 2)
+#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+
+#define mthi0(x) _umips_dsp_mthi(x, 0)
+#define mthi1(x) _umips_dsp_mthi(x, 1)
+#define mthi2(x) _umips_dsp_mthi(x, 2)
+#define mthi3(x) _umips_dsp_mthi(x, 3)
+
+#else  /* !CONFIG_CPU_MICROMIPS */
+#define rddsp(mask)                                                    \
+({                                                                     \
+       unsigned int __res;                                             \
+                                                                       \
        __asm__ __volatile__(                                           \
-       "       .set    push                                    \n"     \
-       "       .set    noat                                    \n"     \
-       "       move    $1, %0                                  \n"     \
-       "       # mtlo  $1, $ac0                                \n"     \
-       "       .word   0x00200013                              \n"     \
-       "       .set    pop                                     \n"     \
-       :                                                               \
-       : "r" (x));                                                     \
-} while (0)
+       "       .set    push                            \n"             \
+       "       .set    noat                            \n"             \
+       "       # rddsp $1, %x1                         \n"             \
+       "       .word   0x7c000cb8 | (%x1 << 16)        \n"             \
+       "       move    %0, $1                          \n"             \
+       "       .set    pop                             \n"             \
+       : "=r" (__res)                                                  \
+       : "i" (mask));                                                  \
+       __res;                                                          \
+})
 
-#define mtlo1(x)                                                       \
+#define wrdsp(val, mask)                                               \
 do {                                                                   \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
        "       move    $1, %0                                  \n"     \
-       "       # mtlo  $1, $ac1                                \n"     \
-       "       .word   0x00200813                              \n"     \
+       "       # wrdsp $1, %x1                                 \n"     \
+       "       .word   0x7c2004f8 | (%x1 << 11)                \n"     \
        "       .set    pop                                     \n"     \
-                                                                     \
-       : "r" (x));                                                     \
+        :                                                              \
+       : "r" (val), "i" (mask));                                       \
 } while (0)
 
-#define mtlo2(x)                                                       \
-do {                                                                   \
+#define _dsp_mfxxx(ins)                                                        \
+({                                                                     \
+       unsigned long __treg;                                           \
+                                                                       \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
-       "       move    $1, %0                                  \n"     \
-       "       # mtlo  $1, $ac2                                \n"     \
-       "       .word   0x00201013                              \n"     \
+       "       .word   (0x00000810 | %1)                       \n"     \
+       "       move    %0, $1                                  \n"     \
        "       .set    pop                                     \n"     \
-       :                                                               \
-       : "r" (x));                                                     \
-} while (0)
+       : "=r" (__treg)                                                 \
+       : "i" (ins));                                                   \
+       __treg;                                                         \
+})
 
-#define mtlo3(x)                                                       \
+#define _dsp_mtxxx(val, ins)                                           \
 do {                                                                   \
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noat                                    \n"     \
        "       move    $1, %0                                  \n"     \
-       "       # mtlo  $1, $ac3                                \n"     \
-       "       .word   0x00201813                              \n"     \
+       "       .word   (0x00200011 | %1)                       \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
-       : "r" (x));                                                     \
+       : "r" (val), "i" (ins));                                        \
 } while (0)
 
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+
+#define mflo0() _dsp_mflo(0)
+#define mflo1() _dsp_mflo(1)
+#define mflo2() _dsp_mflo(2)
+#define mflo3() _dsp_mflo(3)
+
+#define mfhi0() _dsp_mfhi(0)
+#define mfhi1() _dsp_mfhi(1)
+#define mfhi2() _dsp_mfhi(2)
+#define mfhi3() _dsp_mfhi(3)
+
+#define mtlo0(x) _dsp_mtlo(x, 0)
+#define mtlo1(x) _dsp_mtlo(x, 1)
+#define mtlo2(x) _dsp_mtlo(x, 2)
+#define mtlo3(x) _dsp_mtlo(x, 3)
+
+#define mthi0(x) _dsp_mthi(x, 0)
+#define mthi1(x) _dsp_mthi(x, 1)
+#define mthi2(x) _dsp_mthi(x, 2)
+#define mthi3(x) _dsp_mthi(x, 3)
+
+#endif /* CONFIG_CPU_MICROMIPS */
 #endif
 
 /*
index 45cfa1a..e81d719 100644 (file)
@@ -77,7 +77,7 @@ extern unsigned long pgd_current[];
 #define ASID_INC       0x1
 extern unsigned long smtc_asid_mask;
 #define ASID_MASK      (smtc_asid_mask)
-#define        HW_ASID_MASK    0xff
+#define HW_ASID_MASK   0xff
 /* End SMTC/34K debug hack */
 #else /* FIXME: not correct for R6000 */
 
@@ -140,7 +140,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                             struct task_struct *tsk)
+                            struct task_struct *tsk)
 {
        unsigned int cpu = smp_processor_id();
        unsigned long flags;
@@ -238,7 +238,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
        }
        /* See comments for similar code above */
        write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
-                        cpu_asid(cpu, next));
+                        cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
 #else
index d92406a..ff7f074 100644 (file)
  * Register offset addresses
  *****************************************************************************/
 
-#define MSC01_IC_RST_OFS     0x00008    /* Software reset              */
-#define MSC01_IC_ENAL_OFS    0x00100    /* Int_in enable mask 31:0     */
-#define MSC01_IC_ENAH_OFS    0x00108    /* Int_in enable mask 63:32    */
-#define MSC01_IC_DISL_OFS    0x00120    /* Int_in disable mask 31:0    */
-#define MSC01_IC_DISH_OFS    0x00128    /* Int_in disable mask 63:32   */
-#define MSC01_IC_ISBL_OFS    0x00140    /* Raw int_in 31:0             */
-#define MSC01_IC_ISBH_OFS    0x00148    /* Raw int_in 63:32            */
-#define MSC01_IC_ISAL_OFS    0x00160    /* Masked int_in 31:0          */
-#define MSC01_IC_ISAH_OFS    0x00168    /* Masked int_in 63:32         */
-#define MSC01_IC_LVL_OFS     0x00180    /* Disable priority int_out    */
-#define MSC01_IC_RAMW_OFS    0x00180    /* Shadow set RAM (EI)        */
-#define MSC01_IC_OSB_OFS     0x00188    /* Raw int_out                 */
-#define MSC01_IC_OSA_OFS     0x00190    /* Masked int_out              */
-#define MSC01_IC_GENA_OFS    0x00198    /* Global HW int enable        */
-#define MSC01_IC_BASE_OFS    0x001a0    /* Base address of IC_VEC      */
-#define MSC01_IC_VEC_OFS     0x001b0    /* Active int's vector address */
-#define MSC01_IC_EOI_OFS     0x001c0    /* Enable lower level ints     */
-#define MSC01_IC_CFG_OFS     0x001c8    /* Configuration register      */
-#define MSC01_IC_TRLD_OFS    0x001d0    /* Interval timer reload val   */
-#define MSC01_IC_TVAL_OFS    0x001e0    /* Interval timer current val  */
-#define MSC01_IC_TCFG_OFS    0x001f0    /* Interval timer config       */
-#define MSC01_IC_SUP_OFS     0x00200    /* Set up int_in line 0        */
-#define MSC01_IC_ENA_OFS     0x00800    /* Int_in enable mask 63:0     */
-#define MSC01_IC_DIS_OFS     0x00820    /* Int_in disable mask 63:0    */
-#define MSC01_IC_ISB_OFS     0x00840    /* Raw int_in 63:0             */
-#define MSC01_IC_ISA_OFS     0x00860    /* Masked int_in 63:0          */
+#define MSC01_IC_RST_OFS     0x00008   /* Software reset              */
+#define MSC01_IC_ENAL_OFS    0x00100   /* Int_in enable mask 31:0     */
+#define MSC01_IC_ENAH_OFS    0x00108   /* Int_in enable mask 63:32    */
+#define MSC01_IC_DISL_OFS    0x00120   /* Int_in disable mask 31:0    */
+#define MSC01_IC_DISH_OFS    0x00128   /* Int_in disable mask 63:32   */
+#define MSC01_IC_ISBL_OFS    0x00140   /* Raw int_in 31:0             */
+#define MSC01_IC_ISBH_OFS    0x00148   /* Raw int_in 63:32            */
+#define MSC01_IC_ISAL_OFS    0x00160   /* Masked int_in 31:0          */
+#define MSC01_IC_ISAH_OFS    0x00168   /* Masked int_in 63:32         */
+#define MSC01_IC_LVL_OFS     0x00180   /* Disable priority int_out    */
+#define MSC01_IC_RAMW_OFS    0x00180   /* Shadow set RAM (EI)         */
+#define MSC01_IC_OSB_OFS     0x00188   /* Raw int_out                 */
+#define MSC01_IC_OSA_OFS     0x00190   /* Masked int_out              */
+#define MSC01_IC_GENA_OFS    0x00198   /* Global HW int enable        */
+#define MSC01_IC_BASE_OFS    0x001a0   /* Base address of IC_VEC      */
+#define MSC01_IC_VEC_OFS     0x001b0   /* Active int's vector address */
+#define MSC01_IC_EOI_OFS     0x001c0   /* Enable lower level ints     */
+#define MSC01_IC_CFG_OFS     0x001c8   /* Configuration register      */
+#define MSC01_IC_TRLD_OFS    0x001d0   /* Interval timer reload val   */
+#define MSC01_IC_TVAL_OFS    0x001e0   /* Interval timer current val  */
+#define MSC01_IC_TCFG_OFS    0x001f0   /* Interval timer config       */
+#define MSC01_IC_SUP_OFS     0x00200   /* Set up int_in line 0        */
+#define MSC01_IC_ENA_OFS     0x00800   /* Int_in enable mask 63:0     */
+#define MSC01_IC_DIS_OFS     0x00820   /* Int_in disable mask 63:0    */
+#define MSC01_IC_ISB_OFS     0x00840   /* Raw int_in 63:0             */
+#define MSC01_IC_ISA_OFS     0x00860   /* Masked int_in 63:0          */
 
 /*****************************************************************************
  * Register field encodings
  *****************************************************************************/
 
-#define MSC01_IC_RST_RST_SHF      0
-#define MSC01_IC_RST_RST_MSK      0x00000001
-#define MSC01_IC_RST_RST_BIT      MSC01_IC_RST_RST_MSK
-#define MSC01_IC_LVL_LVL_SHF      0
-#define MSC01_IC_LVL_LVL_MSK      0x000000ff
-#define MSC01_IC_LVL_SPUR_SHF     16
-#define MSC01_IC_LVL_SPUR_MSK     0x00010000
-#define MSC01_IC_LVL_SPUR_BIT     MSC01_IC_LVL_SPUR_MSK
+#define MSC01_IC_RST_RST_SHF     0
+#define MSC01_IC_RST_RST_MSK     0x00000001
+#define MSC01_IC_RST_RST_BIT     MSC01_IC_RST_RST_MSK
+#define MSC01_IC_LVL_LVL_SHF     0
+#define MSC01_IC_LVL_LVL_MSK     0x000000ff
+#define MSC01_IC_LVL_SPUR_SHF    16
+#define MSC01_IC_LVL_SPUR_MSK    0x00010000
+#define MSC01_IC_LVL_SPUR_BIT    MSC01_IC_LVL_SPUR_MSK
 #define MSC01_IC_RAMW_RIPL_SHF   0
 #define MSC01_IC_RAMW_RIPL_MSK   0x0000003f
 #define MSC01_IC_RAMW_DATA_SHF   6
 #define MSC01_IC_RAMW_READ_SHF   31
 #define MSC01_IC_RAMW_READ_MSK   0x80000000
 #define MSC01_IC_RAMW_READ_BIT   MSC01_IC_RAMW_READ_MSK
-#define MSC01_IC_OSB_OSB_SHF      0
-#define MSC01_IC_OSB_OSB_MSK      0x000000ff
-#define MSC01_IC_OSA_OSA_SHF      0
-#define MSC01_IC_OSA_OSA_MSK      0x000000ff
-#define MSC01_IC_GENA_GENA_SHF    0
-#define MSC01_IC_GENA_GENA_MSK    0x00000001
-#define MSC01_IC_GENA_GENA_BIT    MSC01_IC_GENA_GENA_MSK
-#define MSC01_IC_CFG_DIS_SHF      0
-#define MSC01_IC_CFG_DIS_MSK      0x00000001
-#define MSC01_IC_CFG_DIS_BIT      MSC01_IC_CFG_DIS_MSK
-#define MSC01_IC_CFG_SHFT_SHF     8
-#define MSC01_IC_CFG_SHFT_MSK     0x00000f00
-#define MSC01_IC_TCFG_ENA_SHF     0
-#define MSC01_IC_TCFG_ENA_MSK     0x00000001
-#define MSC01_IC_TCFG_ENA_BIT     MSC01_IC_TCFG_ENA_MSK
-#define MSC01_IC_TCFG_INT_SHF     8
-#define MSC01_IC_TCFG_INT_MSK     0x00000100
-#define MSC01_IC_TCFG_INT_BIT     MSC01_IC_TCFG_INT_MSK
-#define MSC01_IC_TCFG_EDGE_SHF    16
-#define MSC01_IC_TCFG_EDGE_MSK    0x00010000
-#define MSC01_IC_TCFG_EDGE_BIT    MSC01_IC_TCFG_EDGE_MSK
-#define MSC01_IC_SUP_PRI_SHF      0
-#define MSC01_IC_SUP_PRI_MSK      0x00000007
-#define MSC01_IC_SUP_EDGE_SHF     8
-#define MSC01_IC_SUP_EDGE_MSK     0x00000100
-#define MSC01_IC_SUP_EDGE_BIT     MSC01_IC_SUP_EDGE_MSK
-#define MSC01_IC_SUP_STEP         8
+#define MSC01_IC_OSB_OSB_SHF     0
+#define MSC01_IC_OSB_OSB_MSK     0x000000ff
+#define MSC01_IC_OSA_OSA_SHF     0
+#define MSC01_IC_OSA_OSA_MSK     0x000000ff
+#define MSC01_IC_GENA_GENA_SHF   0
+#define MSC01_IC_GENA_GENA_MSK   0x00000001
+#define MSC01_IC_GENA_GENA_BIT   MSC01_IC_GENA_GENA_MSK
+#define MSC01_IC_CFG_DIS_SHF     0
+#define MSC01_IC_CFG_DIS_MSK     0x00000001
+#define MSC01_IC_CFG_DIS_BIT     MSC01_IC_CFG_DIS_MSK
+#define MSC01_IC_CFG_SHFT_SHF    8
+#define MSC01_IC_CFG_SHFT_MSK    0x00000f00
+#define MSC01_IC_TCFG_ENA_SHF    0
+#define MSC01_IC_TCFG_ENA_MSK    0x00000001
+#define MSC01_IC_TCFG_ENA_BIT    MSC01_IC_TCFG_ENA_MSK
+#define MSC01_IC_TCFG_INT_SHF    8
+#define MSC01_IC_TCFG_INT_MSK    0x00000100
+#define MSC01_IC_TCFG_INT_BIT    MSC01_IC_TCFG_INT_MSK
+#define MSC01_IC_TCFG_EDGE_SHF   16
+#define MSC01_IC_TCFG_EDGE_MSK   0x00010000
+#define MSC01_IC_TCFG_EDGE_BIT   MSC01_IC_TCFG_EDGE_MSK
+#define MSC01_IC_SUP_PRI_SHF     0
+#define MSC01_IC_SUP_PRI_MSK     0x00000007
+#define MSC01_IC_SUP_EDGE_SHF    8
+#define MSC01_IC_SUP_EDGE_MSK    0x00000100
+#define MSC01_IC_SUP_EDGE_BIT    MSC01_IC_SUP_EDGE_MSK
+#define MSC01_IC_SUP_STEP        8
 
 /*
  * MIPS System controller interrupt register base.
  * Absolute register addresses
  *****************************************************************************/
 
-#define MSC01_IC_RST     (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
-#define MSC01_IC_ENAL    (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
-#define MSC01_IC_ENAH    (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
-#define MSC01_IC_DISL    (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
-#define MSC01_IC_DISH    (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
-#define MSC01_IC_ISBL    (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
-#define MSC01_IC_ISBH    (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
-#define MSC01_IC_ISAL    (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
-#define MSC01_IC_ISAH    (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
-#define MSC01_IC_LVL     (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
-#define MSC01_IC_RAMW    (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
-#define MSC01_IC_OSB     (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
-#define MSC01_IC_OSA     (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
-#define MSC01_IC_GENA    (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
-#define MSC01_IC_BASE    (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
-#define MSC01_IC_VEC     (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
-#define MSC01_IC_EOI     (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
-#define MSC01_IC_CFG     (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
-#define MSC01_IC_TRLD    (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
-#define MSC01_IC_TVAL    (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
-#define MSC01_IC_TCFG    (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
-#define MSC01_IC_SUP     (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
-#define MSC01_IC_ENA     (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
-#define MSC01_IC_DIS     (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
-#define MSC01_IC_ISB     (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
-#define MSC01_IC_ISA     (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
+#define MSC01_IC_RST    (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
+#define MSC01_IC_ENAL   (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
+#define MSC01_IC_ENAH   (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
+#define MSC01_IC_DISL   (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
+#define MSC01_IC_DISH   (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
+#define MSC01_IC_ISBL   (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
+#define MSC01_IC_ISBH   (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
+#define MSC01_IC_ISAL   (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
+#define MSC01_IC_ISAH   (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
+#define MSC01_IC_LVL    (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
+#define MSC01_IC_RAMW   (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
+#define MSC01_IC_OSB    (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
+#define MSC01_IC_OSA    (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
+#define MSC01_IC_GENA   (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
+#define MSC01_IC_BASE   (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
+#define MSC01_IC_VEC    (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
+#define MSC01_IC_EOI    (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
+#define MSC01_IC_CFG    (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
+#define MSC01_IC_TRLD   (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
+#define MSC01_IC_TVAL   (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
+#define MSC01_IC_TCFG   (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
+#define MSC01_IC_SUP    (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
+#define MSC01_IC_ENA    (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
+#define MSC01_IC_DIS    (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
+#define MSC01_IC_ISB    (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
+#define MSC01_IC_ISA    (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
 
 /*
  * Soc-it interrupts are configurable.
index 42bfd5f..aef560a 100644 (file)
 /*
  * Common SMP definitions
  */
-#define        RESET_VEC_PHYS          0x1fc00000
-#define        RESET_DATA_PHYS         (RESET_VEC_PHYS + (1<<10))
-#define        BOOT_THREAD_MODE        0
-#define        BOOT_NMI_LOCK           4
-#define        BOOT_NMI_HANDLER        8
+#define RESET_VEC_PHYS         0x1fc00000
+#define RESET_DATA_PHYS                (RESET_VEC_PHYS + (1<<10))
+#define BOOT_THREAD_MODE       0
+#define BOOT_NMI_LOCK          4
+#define BOOT_NMI_HANDLER       8
 
 #ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
@@ -80,7 +80,7 @@ extern unsigned int nlm_threads_per_core;
 extern cpumask_t nlm_cpumask;
 
 struct nlm_soc_info {
-       unsigned long coremask; /* cores enabled on the soc */
+       unsigned long coremask; /* cores enabled on the soc */
        unsigned long ebase;
        uint64_t irqmask;
        uint64_t sysbase;       /* only for XLP */
@@ -88,9 +88,9 @@ struct nlm_soc_info {
        spinlock_t piclock;
 };
 
-#define        nlm_get_node(i)         (&nlm_nodes[i])
+#define nlm_get_node(i)                (&nlm_nodes[i])
 #ifdef CONFIG_CPU_XLR
-#define        nlm_current_node()      (&nlm_nodes[0])
+#define nlm_current_node()     (&nlm_nodes[0])
 #else
 #define nlm_current_node()     (&nlm_nodes[nlm_nodeid()])
 #endif
index 72a0c78..419d8ae 100644 (file)
@@ -48,7 +48,7 @@
  * access 64 bit addresses or data.
  *
  * We need to disable interrupts because we save just the lower 32 bits of
- * registers in  interrupt handling. So if we get hit by an interrupt while
+ * registers in         interrupt handling. So if we get hit by an interrupt while
  * using the upper 32 bits of a register, we lose.
  */
 static inline uint32_t nlm_save_flags_kx(void)
index 32ba6d9..8ad2e0f 100644 (file)
@@ -49,7 +49,7 @@
  */
 #define write_c0_eimr(val)                                             \
 do {                                                                   \
-       if (sizeof(unsigned long) == 4) {                               \
+       if (sizeof(unsigned long) == 4) {                               \
                unsigned long __flags;                                  \
                                                                        \
                local_irq_save(__flags);                                \
@@ -68,6 +68,85 @@ do {                                                                 \
                __write_64bit_c0_register($9, 7, (val));                \
 } while (0)
 
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+       __asm__ __volatile__(
+               ".set   push\n\t"
+               ".set   mips64\n\t"
+               ".set   noat\n\t"
+               "li     $1, 1\n\t"
+               "dsllv  $1, $1, %0\n\t"
+               "dmtc0  $1, $9, 6\n\t"
+               ".set   pop"
+               : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+       __asm__ __volatile__(
+               ".set   push\n\t"
+               ".set   mips64\n\t"
+               ".set   noat\n\t"
+               "li     $1, 1\n\t"
+               "dsllv  %0, $1, %0\n\t"
+               "dmfc0  $1, $9, 7\n\t"
+               "or     $1, %0\n\t"
+               "dmtc0  $1, $9, 7\n\t"
+               ".set   pop"
+               : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+       __asm__ __volatile__(
+               ".set   push\n\t"
+               ".set   mips64\n\t"
+               ".set   noat\n\t"
+               "li     $1, 1\n\t"
+               "dsllv  %0, $1, %0\n\t"
+               "dmfc0  $1, $9, 7\n\t"
+               "or     $1, %0\n\t"
+               "xor    $1, %0\n\t"
+               "dmtc0  $1, $9, 7\n\t"
+               ".set   pop"
+               : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+       uint64_t val;
+
+#ifdef CONFIG_64BIT
+       val = read_c0_eimr() & read_c0_eirr();
+#else
+       __asm__ __volatile__(
+               ".set   push\n\t"
+               ".set   mips64\n\t"
+               ".set   noat\n\t"
+               "dmfc0  %M0, $9, 6\n\t"
+               "dmfc0  %L0, $9, 7\n\t"
+               "and    %M0, %L0\n\t"
+               "dsll   %L0, %M0, 32\n\t"
+               "dsra   %M0, %M0, 32\n\t"
+               "dsra   %L0, %L0, 32\n\t"
+               ".set   pop"
+               : "=r" (val));
+#endif
+
+       return val;
+}
+
 static inline int hard_smp_processor_id(void)
 {
        return __read_32bit_c0_register($15, 1) & 0x3ff;
@@ -208,7 +287,7 @@ do {                                                                        \
                        ".set\tmips0\n\t"                               \
                        : : "Jr" (value));                              \
        else                                                            \
-               __asm__ __volatile__(                                   \
+               __asm__ __volatile__(                                   \
                        ".set\tmips32\n\t"                              \
                        "mtc2\t%z0, " #reg ", " #sel "\n\t"             \
                        ".set\tmips0\n\t"                               \
index ca95133..790f0f1 100644 (file)
 
 #define nlm_read_bridge_reg(b, r)      nlm_read_reg(b, r)
 #define nlm_write_bridge_reg(b, r, v)  nlm_write_reg(b, r, v)
-#define        nlm_get_bridge_pcibase(node)    \
+#define nlm_get_bridge_pcibase(node)   \
                        nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
-#define        nlm_get_bridge_regbase(node)    \
+#define nlm_get_bridge_regbase(node)   \
                        (nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
 
 #endif /* __ASSEMBLY__ */
index 7b63a6b..6d2e58a 100644 (file)
@@ -46,6 +46,8 @@
 #define CPU_BLOCKID_FPU                9
 #define CPU_BLOCKID_MAP                10
 
+#define ICU_DEFEATURE          0x100
+
 #define LSU_DEFEATURE          0x304
 #define LSU_DEBUG_ADDR         0x305
 #define LSU_DEBUG_DATA0                0x306
index 2c63f97..9fac46f 100644 (file)
 #ifndef __NLM_HAL_IOMAP_H__
 #define __NLM_HAL_IOMAP_H__
 
-#define XLP_DEFAULT_IO_BASE             0x18000000
+#define XLP_DEFAULT_IO_BASE            0x18000000
 #define XLP_DEFAULT_PCI_ECFG_BASE      XLP_DEFAULT_IO_BASE
 #define XLP_DEFAULT_PCI_CFG_BASE       0x1c000000
 
 #define NMI_BASE                       0xbfc00000
-#define        XLP_IO_CLK                      133333333
+#define XLP_IO_CLK                     133333333
 
 #define XLP_PCIE_CFG_SIZE              0x1000          /* 4K */
 #define XLP_PCIE_DEV_BLK_SIZE          (8 * XLP_PCIE_CFG_SIZE)
@@ -96,8 +96,8 @@
 #define XLP_IO_NAND_OFFSET(node)       XLP_HDR_OFFSET(node, 0, 7, 1)
 #define XLP_IO_SPI_OFFSET(node)                XLP_HDR_OFFSET(node, 0, 7, 2)
 /* SD flash */
-#define XLP_IO_SD_OFFSET(node)          XLP_HDR_OFFSET(node, 0, 7, 3)
-#define XLP_IO_MMC_OFFSET(node, slot)   \
+#define XLP_IO_SD_OFFSET(node)         XLP_HDR_OFFSET(node, 0, 7, 3)
+#define XLP_IO_MMC_OFFSET(node, slot)  \
                ((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
 
 /* PCI config header register id's */
 #define XLP_PCI_SBB_WT_REG             0x3f
 
 /* PCI IDs for SoC device */
-#define        PCI_VENDOR_NETLOGIC             0x184e
-
-#define        PCI_DEVICE_ID_NLM_ROOT          0x1001
-#define        PCI_DEVICE_ID_NLM_ICI           0x1002
-#define        PCI_DEVICE_ID_NLM_PIC           0x1003
-#define        PCI_DEVICE_ID_NLM_PCIE          0x1004
-#define        PCI_DEVICE_ID_NLM_EHCI          0x1007
-#define        PCI_DEVICE_ID_NLM_OHCI          0x1008
-#define        PCI_DEVICE_ID_NLM_NAE           0x1009
-#define        PCI_DEVICE_ID_NLM_POE           0x100A
-#define        PCI_DEVICE_ID_NLM_FMN           0x100B
-#define        PCI_DEVICE_ID_NLM_RAID          0x100D
-#define        PCI_DEVICE_ID_NLM_SAE           0x100D
-#define        PCI_DEVICE_ID_NLM_RSA           0x100E
-#define        PCI_DEVICE_ID_NLM_CMP           0x100F
-#define        PCI_DEVICE_ID_NLM_UART          0x1010
-#define        PCI_DEVICE_ID_NLM_I2C           0x1011
-#define        PCI_DEVICE_ID_NLM_NOR           0x1015
-#define        PCI_DEVICE_ID_NLM_NAND          0x1016
-#define        PCI_DEVICE_ID_NLM_MMC           0x1018
+#define PCI_VENDOR_NETLOGIC            0x184e
+
+#define PCI_DEVICE_ID_NLM_ROOT         0x1001
+#define PCI_DEVICE_ID_NLM_ICI          0x1002
+#define PCI_DEVICE_ID_NLM_PIC          0x1003
+#define PCI_DEVICE_ID_NLM_PCIE         0x1004
+#define PCI_DEVICE_ID_NLM_EHCI         0x1007
+#define PCI_DEVICE_ID_NLM_OHCI         0x1008
+#define PCI_DEVICE_ID_NLM_NAE          0x1009
+#define PCI_DEVICE_ID_NLM_POE          0x100A
+#define PCI_DEVICE_ID_NLM_FMN          0x100B
+#define PCI_DEVICE_ID_NLM_RAID         0x100D
+#define PCI_DEVICE_ID_NLM_SAE          0x100D
+#define PCI_DEVICE_ID_NLM_RSA          0x100E
+#define PCI_DEVICE_ID_NLM_CMP          0x100F
+#define PCI_DEVICE_ID_NLM_UART         0x1010
+#define PCI_DEVICE_ID_NLM_I2C          0x1011
+#define PCI_DEVICE_ID_NLM_NOR          0x1015
+#define PCI_DEVICE_ID_NLM_NAND         0x1016
+#define PCI_DEVICE_ID_NLM_MMC          0x1018
 
 #ifndef __ASSEMBLY__
 
index 66c323d..b559cb9 100644 (file)
  */
 
 #ifndef __NLM_HAL_PCIBUS_H__
-#define        __NLM_HAL_PCIBUS_H__
+#define __NLM_HAL_PCIBUS_H__
 
 /* PCIE Memory and IO regions */
-#define        PCIE_MEM_BASE                   0xd0000000ULL
-#define        PCIE_MEM_LIMIT                  0xdfffffffULL
-#define        PCIE_IO_BASE                    0x14000000ULL
-#define        PCIE_IO_LIMIT                   0x15ffffffULL
+#define PCIE_MEM_BASE                  0xd0000000ULL
+#define PCIE_MEM_LIMIT                 0xdfffffffULL
+#define PCIE_IO_BASE                   0x14000000ULL
+#define PCIE_IO_LIMIT                  0x15ffffffULL
 
-#define        PCIE_BRIDGE_CMD                 0x1
-#define        PCIE_BRIDGE_MSI_CAP             0x14
-#define        PCIE_BRIDGE_MSI_ADDRL           0x15
-#define        PCIE_BRIDGE_MSI_ADDRH           0x16
-#define        PCIE_BRIDGE_MSI_DATA            0x17
+#define PCIE_BRIDGE_CMD                        0x1
+#define PCIE_BRIDGE_MSI_CAP            0x14
+#define PCIE_BRIDGE_MSI_ADDRL          0x15
+#define PCIE_BRIDGE_MSI_ADDRH          0x16
+#define PCIE_BRIDGE_MSI_DATA           0x17
 
 /* XLP Global PCIE configuration space registers */
-#define        PCIE_BYTE_SWAP_MEM_BASE         0x247
-#define        PCIE_BYTE_SWAP_MEM_LIM          0x248
-#define        PCIE_BYTE_SWAP_IO_BASE          0x249
-#define        PCIE_BYTE_SWAP_IO_LIM           0x24A
-#define        PCIE_MSI_STATUS                 0x25A
-#define        PCIE_MSI_EN                     0x25B
-#define        PCIE_INT_EN0                    0x261
+#define PCIE_BYTE_SWAP_MEM_BASE                0x247
+#define PCIE_BYTE_SWAP_MEM_LIM         0x248
+#define PCIE_BYTE_SWAP_IO_BASE         0x249
+#define PCIE_BYTE_SWAP_IO_LIM          0x24A
+#define PCIE_MSI_STATUS                        0x25A
+#define PCIE_MSI_EN                    0x25B
+#define PCIE_INT_EN0                   0x261
 
 /* PCIE_MSI_EN */
-#define        PCIE_MSI_VECTOR_INT_EN          0xFFFFFFFF
+#define PCIE_MSI_VECTOR_INT_EN         0xFFFFFFFF
 
 /* PCIE_INT_EN0 */
-#define        PCIE_MSI_INT_EN                 (1 << 9)
+#define PCIE_MSI_INT_EN                        (1 << 9)
 
 #ifndef __ASSEMBLY__
 
-#define        nlm_read_pcie_reg(b, r)         nlm_read_reg(b, r)
-#define        nlm_write_pcie_reg(b, r, v)     nlm_write_reg(b, r, v)
-#define        nlm_get_pcie_base(node, inst)   \
+#define nlm_read_pcie_reg(b, r)                nlm_read_reg(b, r)
+#define nlm_write_pcie_reg(b, r, v)    nlm_write_reg(b, r, v)
+#define nlm_get_pcie_base(node, inst)  \
                        nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst))
-#define        nlm_get_pcie_regbase(node, inst)        \
+#define nlm_get_pcie_regbase(node, inst)       \
                        (nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ)
 
 int xlp_pcie_link_irt(int link);
index b2e53a5..3df5301 100644 (file)
@@ -36,7 +36,7 @@
 #define _NLM_HAL_PIC_H
 
 /* PIC Specific registers */
-#define PIC_CTRL                0x00
+#define PIC_CTRL               0x00
 
 /* PIC control register defines */
 #define PIC_CTRL_ITV           32 /* interrupt timeout value */
 #define PIC_IRT_DB             16 /* Destination base */
 #define PIC_IRT_DTE            0  /* Destination thread enables */
 
-#define PIC_BYTESWAP            0x02
-#define PIC_STATUS              0x04
+#define PIC_BYTESWAP           0x02
+#define PIC_STATUS             0x04
 #define PIC_INTR_TIMEOUT       0x06
 #define PIC_ICI0_INTR_TIMEOUT  0x08
 #define PIC_ICI1_INTR_TIMEOUT  0x0a
 #define PIC_ICI2_INTR_TIMEOUT  0x0c
 #define PIC_IPI_CTL            0x0e
-#define PIC_INT_ACK             0x10
-#define PIC_INT_PENDING0        0x12
-#define PIC_INT_PENDING1        0x14
-#define PIC_INT_PENDING2        0x16
-
-#define PIC_WDOG0_MAXVAL        0x18
-#define PIC_WDOG0_COUNT         0x1a
-#define PIC_WDOG0_ENABLE0       0x1c
-#define PIC_WDOG0_ENABLE1       0x1e
-#define PIC_WDOG0_BEATCMD       0x20
-#define PIC_WDOG0_BEAT0         0x22
-#define PIC_WDOG0_BEAT1         0x24
-
-#define PIC_WDOG1_MAXVAL        0x26
-#define PIC_WDOG1_COUNT         0x28
-#define PIC_WDOG1_ENABLE0       0x2a
-#define PIC_WDOG1_ENABLE1       0x2c
-#define PIC_WDOG1_BEATCMD       0x2e
-#define PIC_WDOG1_BEAT0         0x30
-#define PIC_WDOG1_BEAT1         0x32
-
-#define PIC_WDOG_MAXVAL(i)      (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
-#define PIC_WDOG_COUNT(i)       (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE0(i)     (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE1(i)     (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEATCMD(i)     (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT0(i)       (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT1(i)       (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
+#define PIC_INT_ACK            0x10
+#define PIC_INT_PENDING0       0x12
+#define PIC_INT_PENDING1       0x14
+#define PIC_INT_PENDING2       0x16
+
+#define PIC_WDOG0_MAXVAL       0x18
+#define PIC_WDOG0_COUNT                0x1a
+#define PIC_WDOG0_ENABLE0      0x1c
+#define PIC_WDOG0_ENABLE1      0x1e
+#define PIC_WDOG0_BEATCMD      0x20
+#define PIC_WDOG0_BEAT0                0x22
+#define PIC_WDOG0_BEAT1                0x24
+
+#define PIC_WDOG1_MAXVAL       0x26
+#define PIC_WDOG1_COUNT                0x28
+#define PIC_WDOG1_ENABLE0      0x2a
+#define PIC_WDOG1_ENABLE1      0x2c
+#define PIC_WDOG1_BEATCMD      0x2e
+#define PIC_WDOG1_BEAT0                0x30
+#define PIC_WDOG1_BEAT1                0x32
+
+#define PIC_WDOG_MAXVAL(i)     (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
+#define PIC_WDOG_COUNT(i)      (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE0(i)    (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE1(i)    (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEATCMD(i)    (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT0(i)      (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT1(i)      (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
 
 #define PIC_TIMER0_MAXVAL    0x34
 #define PIC_TIMER1_MAXVAL    0x36
 #define PIC_TIMER7_COUNT     0x52
 #define PIC_TIMER_COUNT(i)   (PIC_TIMER0_COUNT + ((i) * 2))
 
-#define PIC_ITE0_N0_N1          0x54
-#define PIC_ITE1_N0_N1          0x58
-#define PIC_ITE2_N0_N1          0x5c
-#define PIC_ITE3_N0_N1          0x60
-#define PIC_ITE4_N0_N1          0x64
-#define PIC_ITE5_N0_N1          0x68
-#define PIC_ITE6_N0_N1          0x6c
-#define PIC_ITE7_N0_N1          0x70
-#define PIC_ITE_N0_N1(i)        (PIC_ITE0_N0_N1 + ((i) * 4))
-
-#define PIC_ITE0_N2_N3          0x56
-#define PIC_ITE1_N2_N3          0x5a
-#define PIC_ITE2_N2_N3          0x5e
-#define PIC_ITE3_N2_N3          0x62
-#define PIC_ITE4_N2_N3          0x66
-#define PIC_ITE5_N2_N3          0x6a
-#define PIC_ITE6_N2_N3          0x6e
-#define PIC_ITE7_N2_N3          0x72
-#define PIC_ITE_N2_N3(i)        (PIC_ITE0_N2_N3 + ((i) * 4))
-
-#define PIC_IRT0                0x74
-#define PIC_IRT(i)              (PIC_IRT0 + ((i) * 2))
+#define PIC_ITE0_N0_N1         0x54
+#define PIC_ITE1_N0_N1         0x58
+#define PIC_ITE2_N0_N1         0x5c
+#define PIC_ITE3_N0_N1         0x60
+#define PIC_ITE4_N0_N1         0x64
+#define PIC_ITE5_N0_N1         0x68
+#define PIC_ITE6_N0_N1         0x6c
+#define PIC_ITE7_N0_N1         0x70
+#define PIC_ITE_N0_N1(i)       (PIC_ITE0_N0_N1 + ((i) * 4))
+
+#define PIC_ITE0_N2_N3         0x56
+#define PIC_ITE1_N2_N3         0x5a
+#define PIC_ITE2_N2_N3         0x5e
+#define PIC_ITE3_N2_N3         0x62
+#define PIC_ITE4_N2_N3         0x66
+#define PIC_ITE5_N2_N3         0x6a
+#define PIC_ITE6_N2_N3         0x6e
+#define PIC_ITE7_N2_N3         0x72
+#define PIC_ITE_N2_N3(i)       (PIC_ITE0_N2_N3 + ((i) * 4))
+
+#define PIC_IRT0               0x74
+#define PIC_IRT(i)             (PIC_IRT0 + ((i) * 2))
 
 #define TIMER_CYCLES_MAXVAL    0xffffffffffffffffULL
 
 #define PIC_LOCAL_SCHEDULING           1
 #define PIC_GLOBAL_SCHEDULING          0
 
+#define PIC_CLK_HZ                     133333333
+
 #define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r)
 #define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
 #define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
@@ -315,6 +317,12 @@ nlm_pic_read_timer(uint64_t base, int timer)
        return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
 }
 
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+       return (uint32_t)nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
+}
+
 static inline void
 nlm_pic_write_timer(uint64_t base, int timer, uint64_t value)
 {
@@ -376,9 +384,9 @@ nlm_pic_ack(uint64_t base, int irt_num)
 }
 
 static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
 {
-       nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
+       nlm_pic_write_irt_direct(base, irt, en, 0, 0, irq, hwt);
 }
 
 int nlm_irq_to_irt(int irq);
index 258e8cc..470e52b 100644 (file)
 * @author Netlogic Microsystems
 * @brief HAL for System configuration registers
 */
-#define        SYS_CHIP_RESET                          0x00
-#define        SYS_POWER_ON_RESET_CFG                  0x01
-#define        SYS_EFUSE_DEVICE_CFG_STATUS0            0x02
-#define        SYS_EFUSE_DEVICE_CFG_STATUS1            0x03
-#define        SYS_EFUSE_DEVICE_CFG_STATUS2            0x04
-#define        SYS_EFUSE_DEVICE_CFG3                   0x05
-#define        SYS_EFUSE_DEVICE_CFG4                   0x06
-#define        SYS_EFUSE_DEVICE_CFG5                   0x07
-#define        SYS_EFUSE_DEVICE_CFG6                   0x08
-#define        SYS_EFUSE_DEVICE_CFG7                   0x09
-#define        SYS_PLL_CTRL                            0x0a
-#define        SYS_CPU_RESET                           0x0b
-#define        SYS_CPU_NONCOHERENT_MODE                0x0d
-#define        SYS_CORE_DFS_DIS_CTRL                   0x0e
-#define        SYS_CORE_DFS_RST_CTRL                   0x0f
-#define        SYS_CORE_DFS_BYP_CTRL                   0x10
-#define        SYS_CORE_DFS_PHA_CTRL                   0x11
-#define        SYS_CORE_DFS_DIV_INC_CTRL               0x12
-#define        SYS_CORE_DFS_DIV_DEC_CTRL               0x13
-#define        SYS_CORE_DFS_DIV_VALUE                  0x14
-#define        SYS_RESET                               0x15
-#define        SYS_DFS_DIS_CTRL                        0x16
-#define        SYS_DFS_RST_CTRL                        0x17
-#define        SYS_DFS_BYP_CTRL                        0x18
-#define        SYS_DFS_DIV_INC_CTRL                    0x19
-#define        SYS_DFS_DIV_DEC_CTRL                    0x1a
-#define        SYS_DFS_DIV_VALUE0                      0x1b
-#define        SYS_DFS_DIV_VALUE1                      0x1c
-#define        SYS_SENSE_AMP_DLY                       0x1d
-#define        SYS_SOC_SENSE_AMP_DLY                   0x1e
-#define        SYS_CTRL0                               0x1f
-#define        SYS_CTRL1                               0x20
-#define        SYS_TIMEOUT_BS1                         0x21
-#define        SYS_BYTE_SWAP                           0x22
-#define        SYS_VRM_VID                             0x23
-#define        SYS_PWR_RAM_CMD                         0x24
-#define        SYS_PWR_RAM_ADDR                        0x25
-#define        SYS_PWR_RAM_DATA0                       0x26
-#define        SYS_PWR_RAM_DATA1                       0x27
-#define        SYS_PWR_RAM_DATA2                       0x28
-#define        SYS_PWR_UCODE                           0x29
-#define        SYS_CPU0_PWR_STATUS                     0x2a
-#define        SYS_CPU1_PWR_STATUS                     0x2b
-#define        SYS_CPU2_PWR_STATUS                     0x2c
-#define        SYS_CPU3_PWR_STATUS                     0x2d
-#define        SYS_CPU4_PWR_STATUS                     0x2e
-#define        SYS_CPU5_PWR_STATUS                     0x2f
-#define        SYS_CPU6_PWR_STATUS                     0x30
-#define        SYS_CPU7_PWR_STATUS                     0x31
-#define        SYS_STATUS                              0x32
-#define        SYS_INT_POL                             0x33
-#define        SYS_INT_TYPE                            0x34
-#define        SYS_INT_STATUS                          0x35
-#define        SYS_INT_MASK0                           0x36
-#define        SYS_INT_MASK1                           0x37
-#define        SYS_UCO_S_ECC                           0x38
-#define        SYS_UCO_M_ECC                           0x39
-#define        SYS_UCO_ADDR                            0x3a
-#define        SYS_UCO_INSTR                           0x3b
-#define        SYS_MEM_BIST0                           0x3c
-#define        SYS_MEM_BIST1                           0x3d
-#define        SYS_MEM_BIST2                           0x3e
-#define        SYS_MEM_BIST3                           0x3f
-#define        SYS_MEM_BIST4                           0x40
-#define        SYS_MEM_BIST5                           0x41
-#define        SYS_MEM_BIST6                           0x42
-#define        SYS_MEM_BIST7                           0x43
-#define        SYS_MEM_BIST8                           0x44
-#define        SYS_MEM_BIST9                           0x45
-#define        SYS_MEM_BIST10                          0x46
-#define        SYS_MEM_BIST11                          0x47
-#define        SYS_MEM_BIST12                          0x48
-#define        SYS_SCRTCH0                             0x49
-#define        SYS_SCRTCH1                             0x4a
-#define        SYS_SCRTCH2                             0x4b
-#define        SYS_SCRTCH3                             0x4c
+#define SYS_CHIP_RESET                         0x00
+#define SYS_POWER_ON_RESET_CFG                 0x01
+#define SYS_EFUSE_DEVICE_CFG_STATUS0           0x02
+#define SYS_EFUSE_DEVICE_CFG_STATUS1           0x03
+#define SYS_EFUSE_DEVICE_CFG_STATUS2           0x04
+#define SYS_EFUSE_DEVICE_CFG3                  0x05
+#define SYS_EFUSE_DEVICE_CFG4                  0x06
+#define SYS_EFUSE_DEVICE_CFG5                  0x07
+#define SYS_EFUSE_DEVICE_CFG6                  0x08
+#define SYS_EFUSE_DEVICE_CFG7                  0x09
+#define SYS_PLL_CTRL                           0x0a
+#define SYS_CPU_RESET                          0x0b
+#define SYS_CPU_NONCOHERENT_MODE               0x0d
+#define SYS_CORE_DFS_DIS_CTRL                  0x0e
+#define SYS_CORE_DFS_RST_CTRL                  0x0f
+#define SYS_CORE_DFS_BYP_CTRL                  0x10
+#define SYS_CORE_DFS_PHA_CTRL                  0x11
+#define SYS_CORE_DFS_DIV_INC_CTRL              0x12
+#define SYS_CORE_DFS_DIV_DEC_CTRL              0x13
+#define SYS_CORE_DFS_DIV_VALUE                 0x14
+#define SYS_RESET                              0x15
+#define SYS_DFS_DIS_CTRL                       0x16
+#define SYS_DFS_RST_CTRL                       0x17
+#define SYS_DFS_BYP_CTRL                       0x18
+#define SYS_DFS_DIV_INC_CTRL                   0x19
+#define SYS_DFS_DIV_DEC_CTRL                   0x1a
+#define SYS_DFS_DIV_VALUE0                     0x1b
+#define SYS_DFS_DIV_VALUE1                     0x1c
+#define SYS_SENSE_AMP_DLY                      0x1d
+#define SYS_SOC_SENSE_AMP_DLY                  0x1e
+#define SYS_CTRL0                              0x1f
+#define SYS_CTRL1                              0x20
+#define SYS_TIMEOUT_BS1                                0x21
+#define SYS_BYTE_SWAP                          0x22
+#define SYS_VRM_VID                            0x23
+#define SYS_PWR_RAM_CMD                                0x24
+#define SYS_PWR_RAM_ADDR                       0x25
+#define SYS_PWR_RAM_DATA0                      0x26
+#define SYS_PWR_RAM_DATA1                      0x27
+#define SYS_PWR_RAM_DATA2                      0x28
+#define SYS_PWR_UCODE                          0x29
+#define SYS_CPU0_PWR_STATUS                    0x2a
+#define SYS_CPU1_PWR_STATUS                    0x2b
+#define SYS_CPU2_PWR_STATUS                    0x2c
+#define SYS_CPU3_PWR_STATUS                    0x2d
+#define SYS_CPU4_PWR_STATUS                    0x2e
+#define SYS_CPU5_PWR_STATUS                    0x2f
+#define SYS_CPU6_PWR_STATUS                    0x30
+#define SYS_CPU7_PWR_STATUS                    0x31
+#define SYS_STATUS                             0x32
+#define SYS_INT_POL                            0x33
+#define SYS_INT_TYPE                           0x34
+#define SYS_INT_STATUS                         0x35
+#define SYS_INT_MASK0                          0x36
+#define SYS_INT_MASK1                          0x37
+#define SYS_UCO_S_ECC                          0x38
+#define SYS_UCO_M_ECC                          0x39
+#define SYS_UCO_ADDR                           0x3a
+#define SYS_UCO_INSTR                          0x3b
+#define SYS_MEM_BIST0                          0x3c
+#define SYS_MEM_BIST1                          0x3d
+#define SYS_MEM_BIST2                          0x3e
+#define SYS_MEM_BIST3                          0x3f
+#define SYS_MEM_BIST4                          0x40
+#define SYS_MEM_BIST5                          0x41
+#define SYS_MEM_BIST6                          0x42
+#define SYS_MEM_BIST7                          0x43
+#define SYS_MEM_BIST8                          0x44
+#define SYS_MEM_BIST9                          0x45
+#define SYS_MEM_BIST10                         0x46
+#define SYS_MEM_BIST11                         0x47
+#define SYS_MEM_BIST12                         0x48
+#define SYS_SCRTCH0                            0x49
+#define SYS_SCRTCH1                            0x4a
+#define SYS_SCRTCH2                            0x4b
+#define SYS_SCRTCH3                            0x4c
 
 #ifndef __ASSEMBLY__
 
-#define        nlm_read_sys_reg(b, r)          nlm_read_reg(b, r)
-#define        nlm_write_sys_reg(b, r, v)      nlm_write_reg(b, r, v)
-#define        nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
-#define        nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
+#define nlm_read_sys_reg(b, r)         nlm_read_reg(b, r)
+#define nlm_write_sys_reg(b, r, v)     nlm_write_reg(b, r, v)
+#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
+#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
 
 #endif
 #endif
index 6a7046c..86d16e1 100644 (file)
@@ -91,8 +91,8 @@
 
 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
 
-#define        nlm_read_uart_reg(b, r)         nlm_read_reg(b, r)
-#define        nlm_write_uart_reg(b, r, v)     nlm_write_reg(b, r, v)
+#define nlm_read_uart_reg(b, r)                nlm_read_reg(b, r)
+#define nlm_write_uart_reg(b, r, v)    nlm_write_reg(b, r, v)
 #define nlm_get_uart_pcibase(node, inst)       \
                nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
 #define nlm_get_uart_regbase(node, inst)       \
index 68d5167..2a78929 100644 (file)
 #include <asm/netlogic/mips-extns.h> /* for COP2 access */
 
 /* Station IDs */
-#define        FMN_STNID_CPU0                  0x00
-#define        FMN_STNID_CPU1                  0x08
-#define        FMN_STNID_CPU2                  0x10
-#define        FMN_STNID_CPU3                  0x18
-#define        FMN_STNID_CPU4                  0x20
-#define        FMN_STNID_CPU5                  0x28
-#define        FMN_STNID_CPU6                  0x30
-#define        FMN_STNID_CPU7                  0x38
-
-#define        FMN_STNID_XGS0_TX               64
-#define        FMN_STNID_XMAC0_00_TX           64
-#define        FMN_STNID_XMAC0_01_TX           65
-#define        FMN_STNID_XMAC0_02_TX           66
-#define        FMN_STNID_XMAC0_03_TX           67
-#define        FMN_STNID_XMAC0_04_TX           68
-#define        FMN_STNID_XMAC0_05_TX           69
-#define        FMN_STNID_XMAC0_06_TX           70
-#define        FMN_STNID_XMAC0_07_TX           71
-#define        FMN_STNID_XMAC0_08_TX           72
-#define        FMN_STNID_XMAC0_09_TX           73
-#define        FMN_STNID_XMAC0_10_TX           74
-#define        FMN_STNID_XMAC0_11_TX           75
-#define        FMN_STNID_XMAC0_12_TX           76
-#define        FMN_STNID_XMAC0_13_TX           77
-#define        FMN_STNID_XMAC0_14_TX           78
-#define        FMN_STNID_XMAC0_15_TX           79
-
-#define        FMN_STNID_XGS1_TX               80
-#define        FMN_STNID_XMAC1_00_TX           80
-#define        FMN_STNID_XMAC1_01_TX           81
-#define        FMN_STNID_XMAC1_02_TX           82
-#define        FMN_STNID_XMAC1_03_TX           83
-#define        FMN_STNID_XMAC1_04_TX           84
-#define        FMN_STNID_XMAC1_05_TX           85
-#define        FMN_STNID_XMAC1_06_TX           86
-#define        FMN_STNID_XMAC1_07_TX           87
-#define        FMN_STNID_XMAC1_08_TX           88
-#define        FMN_STNID_XMAC1_09_TX           89
-#define        FMN_STNID_XMAC1_10_TX           90
-#define        FMN_STNID_XMAC1_11_TX           91
-#define        FMN_STNID_XMAC1_12_TX           92
-#define        FMN_STNID_XMAC1_13_TX           93
-#define        FMN_STNID_XMAC1_14_TX           94
-#define        FMN_STNID_XMAC1_15_TX           95
-
-#define        FMN_STNID_GMAC                  96
-#define        FMN_STNID_GMACJFR_0             96
-#define        FMN_STNID_GMACRFR_0             97
-#define        FMN_STNID_GMACTX0               98
-#define        FMN_STNID_GMACTX1               99
-#define        FMN_STNID_GMACTX2               100
-#define        FMN_STNID_GMACTX3               101
-#define        FMN_STNID_GMACJFR_1             102
-#define        FMN_STNID_GMACRFR_1             103
-
-#define        FMN_STNID_DMA                   104
-#define        FMN_STNID_DMA_0                 104
-#define        FMN_STNID_DMA_1                 105
-#define        FMN_STNID_DMA_2                 106
-#define        FMN_STNID_DMA_3                 107
-
-#define        FMN_STNID_XGS0FR                112
-#define        FMN_STNID_XMAC0JFR              112
-#define        FMN_STNID_XMAC0RFR              113
-
-#define        FMN_STNID_XGS1FR                114
-#define        FMN_STNID_XMAC1JFR              114
-#define        FMN_STNID_XMAC1RFR              115
-#define        FMN_STNID_SEC                   120
-#define        FMN_STNID_SEC0                  120
-#define        FMN_STNID_SEC1                  121
-#define        FMN_STNID_SEC2                  122
-#define        FMN_STNID_SEC3                  123
-#define        FMN_STNID_PK0                   124
-#define        FMN_STNID_SEC_RSA               124
-#define        FMN_STNID_SEC_RSVD0             125
-#define        FMN_STNID_SEC_RSVD1             126
-#define        FMN_STNID_SEC_RSVD2             127
-
-#define        FMN_STNID_GMAC1                 80
-#define        FMN_STNID_GMAC1_FR_0            81
-#define        FMN_STNID_GMAC1_TX0             82
-#define        FMN_STNID_GMAC1_TX1             83
-#define        FMN_STNID_GMAC1_TX2             84
-#define        FMN_STNID_GMAC1_TX3             85
-#define        FMN_STNID_GMAC1_FR_1            87
-#define        FMN_STNID_GMAC0                 96
-#define        FMN_STNID_GMAC0_FR_0            97
-#define        FMN_STNID_GMAC0_TX0             98
-#define        FMN_STNID_GMAC0_TX1             99
-#define        FMN_STNID_GMAC0_TX2             100
-#define        FMN_STNID_GMAC0_TX3             101
-#define        FMN_STNID_GMAC0_FR_1            103
-#define        FMN_STNID_CMP_0                 108
-#define        FMN_STNID_CMP_1                 109
-#define        FMN_STNID_CMP_2                 110
-#define        FMN_STNID_CMP_3                 111
-#define        FMN_STNID_PCIE_0                116
-#define        FMN_STNID_PCIE_1                117
-#define        FMN_STNID_PCIE_2                118
-#define        FMN_STNID_PCIE_3                119
-#define        FMN_STNID_XLS_PK0               121
+#define FMN_STNID_CPU0                 0x00
+#define FMN_STNID_CPU1                 0x08
+#define FMN_STNID_CPU2                 0x10
+#define FMN_STNID_CPU3                 0x18
+#define FMN_STNID_CPU4                 0x20
+#define FMN_STNID_CPU5                 0x28
+#define FMN_STNID_CPU6                 0x30
+#define FMN_STNID_CPU7                 0x38
+
+#define FMN_STNID_XGS0_TX              64
+#define FMN_STNID_XMAC0_00_TX          64
+#define FMN_STNID_XMAC0_01_TX          65
+#define FMN_STNID_XMAC0_02_TX          66
+#define FMN_STNID_XMAC0_03_TX          67
+#define FMN_STNID_XMAC0_04_TX          68
+#define FMN_STNID_XMAC0_05_TX          69
+#define FMN_STNID_XMAC0_06_TX          70
+#define FMN_STNID_XMAC0_07_TX          71
+#define FMN_STNID_XMAC0_08_TX          72
+#define FMN_STNID_XMAC0_09_TX          73
+#define FMN_STNID_XMAC0_10_TX          74
+#define FMN_STNID_XMAC0_11_TX          75
+#define FMN_STNID_XMAC0_12_TX          76
+#define FMN_STNID_XMAC0_13_TX          77
+#define FMN_STNID_XMAC0_14_TX          78
+#define FMN_STNID_XMAC0_15_TX          79
+
+#define FMN_STNID_XGS1_TX              80
+#define FMN_STNID_XMAC1_00_TX          80
+#define FMN_STNID_XMAC1_01_TX          81
+#define FMN_STNID_XMAC1_02_TX          82
+#define FMN_STNID_XMAC1_03_TX          83
+#define FMN_STNID_XMAC1_04_TX          84
+#define FMN_STNID_XMAC1_05_TX          85
+#define FMN_STNID_XMAC1_06_TX          86
+#define FMN_STNID_XMAC1_07_TX          87
+#define FMN_STNID_XMAC1_08_TX          88
+#define FMN_STNID_XMAC1_09_TX          89
+#define FMN_STNID_XMAC1_10_TX          90
+#define FMN_STNID_XMAC1_11_TX          91
+#define FMN_STNID_XMAC1_12_TX          92
+#define FMN_STNID_XMAC1_13_TX          93
+#define FMN_STNID_XMAC1_14_TX          94
+#define FMN_STNID_XMAC1_15_TX          95
+
+#define FMN_STNID_GMAC                 96
+#define FMN_STNID_GMACJFR_0            96
+#define FMN_STNID_GMACRFR_0            97
+#define FMN_STNID_GMACTX0              98
+#define FMN_STNID_GMACTX1              99
+#define FMN_STNID_GMACTX2              100
+#define FMN_STNID_GMACTX3              101
+#define FMN_STNID_GMACJFR_1            102
+#define FMN_STNID_GMACRFR_1            103
+
+#define FMN_STNID_DMA                  104
+#define FMN_STNID_DMA_0                        104
+#define FMN_STNID_DMA_1                        105
+#define FMN_STNID_DMA_2                        106
+#define FMN_STNID_DMA_3                        107
+
+#define FMN_STNID_XGS0FR               112
+#define FMN_STNID_XMAC0JFR             112
+#define FMN_STNID_XMAC0RFR             113
+
+#define FMN_STNID_XGS1FR               114
+#define FMN_STNID_XMAC1JFR             114
+#define FMN_STNID_XMAC1RFR             115
+#define FMN_STNID_SEC                  120
+#define FMN_STNID_SEC0                 120
+#define FMN_STNID_SEC1                 121
+#define FMN_STNID_SEC2                 122
+#define FMN_STNID_SEC3                 123
+#define FMN_STNID_PK0                  124
+#define FMN_STNID_SEC_RSA              124
+#define FMN_STNID_SEC_RSVD0            125
+#define FMN_STNID_SEC_RSVD1            126
+#define FMN_STNID_SEC_RSVD2            127
+
+#define FMN_STNID_GMAC1                        80
+#define FMN_STNID_GMAC1_FR_0           81
+#define FMN_STNID_GMAC1_TX0            82
+#define FMN_STNID_GMAC1_TX1            83
+#define FMN_STNID_GMAC1_TX2            84
+#define FMN_STNID_GMAC1_TX3            85
+#define FMN_STNID_GMAC1_FR_1           87
+#define FMN_STNID_GMAC0                        96
+#define FMN_STNID_GMAC0_FR_0           97
+#define FMN_STNID_GMAC0_TX0            98
+#define FMN_STNID_GMAC0_TX1            99
+#define FMN_STNID_GMAC0_TX2            100
+#define FMN_STNID_GMAC0_TX3            101
+#define FMN_STNID_GMAC0_FR_1           103
+#define FMN_STNID_CMP_0                        108
+#define FMN_STNID_CMP_1                        109
+#define FMN_STNID_CMP_2                        110
+#define FMN_STNID_CMP_3                        111
+#define FMN_STNID_PCIE_0               116
+#define FMN_STNID_PCIE_1               117
+#define FMN_STNID_PCIE_2               118
+#define FMN_STNID_PCIE_3               119
+#define FMN_STNID_XLS_PK0              121
 
 #define nlm_read_c2_cc0(s)             __read_32bit_c2_register($16, s)
 #define nlm_read_c2_cc1(s)             __read_32bit_c2_register($17, s)
 #define nlm_write_c2_cc14(s, v)                __write_32bit_c2_register($30, s, v)
 #define nlm_write_c2_cc15(s, v)                __write_32bit_c2_register($31, s, v)
 
-#define        nlm_read_c2_status(sel)         __read_32bit_c2_register($2, 0)
-#define        nlm_read_c2_config()            __read_32bit_c2_register($3, 0)
-#define        nlm_write_c2_config(v)          __write_32bit_c2_register($3, 0, v)
-#define        nlm_read_c2_bucksize(b)         __read_32bit_c2_register($4, b)
-#define        nlm_write_c2_bucksize(b, v)     __write_32bit_c2_register($4, b, v)
-
-#define        nlm_read_c2_rx_msg0()           __read_64bit_c2_register($1, 0)
-#define        nlm_read_c2_rx_msg1()           __read_64bit_c2_register($1, 1)
-#define        nlm_read_c2_rx_msg2()           __read_64bit_c2_register($1, 2)
-#define        nlm_read_c2_rx_msg3()           __read_64bit_c2_register($1, 3)
-
-#define        nlm_write_c2_tx_msg0(v)         __write_64bit_c2_register($0, 0, v)
-#define        nlm_write_c2_tx_msg1(v)         __write_64bit_c2_register($0, 1, v)
-#define        nlm_write_c2_tx_msg2(v)         __write_64bit_c2_register($0, 2, v)
-#define        nlm_write_c2_tx_msg3(v)         __write_64bit_c2_register($0, 3, v)
-
-#define        FMN_STN_RX_QSIZE                256
-#define        FMN_NSTATIONS                   128
-#define        FMN_CORE_NBUCKETS               8
+#define nlm_read_c2_status(sel)                __read_32bit_c2_register($2, 0)
+#define nlm_read_c2_config()           __read_32bit_c2_register($3, 0)
+#define nlm_write_c2_config(v)         __write_32bit_c2_register($3, 0, v)
+#define nlm_read_c2_bucksize(b)                __read_32bit_c2_register($4, b)
+#define nlm_write_c2_bucksize(b, v)    __write_32bit_c2_register($4, b, v)
+
+#define nlm_read_c2_rx_msg0()          __read_64bit_c2_register($1, 0)
+#define nlm_read_c2_rx_msg1()          __read_64bit_c2_register($1, 1)
+#define nlm_read_c2_rx_msg2()          __read_64bit_c2_register($1, 2)
+#define nlm_read_c2_rx_msg3()          __read_64bit_c2_register($1, 3)
+
+#define nlm_write_c2_tx_msg0(v)                __write_64bit_c2_register($0, 0, v)
+#define nlm_write_c2_tx_msg1(v)                __write_64bit_c2_register($0, 1, v)
+#define nlm_write_c2_tx_msg2(v)                __write_64bit_c2_register($0, 2, v)
+#define nlm_write_c2_tx_msg3(v)                __write_64bit_c2_register($0, 3, v)
+
+#define FMN_STN_RX_QSIZE               256
+#define FMN_NSTATIONS                  128
+#define FMN_CORE_NBUCKETS              8
 
 static inline void nlm_msgsnd(unsigned int stid)
 {
index 2e768f0..ff4533d 100644 (file)
 #ifndef _ASM_NLM_IOMAP_H
 #define _ASM_NLM_IOMAP_H
 
-#define DEFAULT_NETLOGIC_IO_BASE           CKSEG1ADDR(0x1ef00000)
-#define NETLOGIC_IO_DDR2_CHN0_OFFSET       0x01000
-#define NETLOGIC_IO_DDR2_CHN1_OFFSET       0x02000
-#define NETLOGIC_IO_DDR2_CHN2_OFFSET       0x03000
-#define NETLOGIC_IO_DDR2_CHN3_OFFSET       0x04000
-#define NETLOGIC_IO_PIC_OFFSET             0x08000
-#define NETLOGIC_IO_UART_0_OFFSET          0x14000
-#define NETLOGIC_IO_UART_1_OFFSET          0x15100
+#define DEFAULT_NETLOGIC_IO_BASE          CKSEG1ADDR(0x1ef00000)
+#define NETLOGIC_IO_DDR2_CHN0_OFFSET      0x01000
+#define NETLOGIC_IO_DDR2_CHN1_OFFSET      0x02000
+#define NETLOGIC_IO_DDR2_CHN2_OFFSET      0x03000
+#define NETLOGIC_IO_DDR2_CHN3_OFFSET      0x04000
+#define NETLOGIC_IO_PIC_OFFSET            0x08000
+#define NETLOGIC_IO_UART_0_OFFSET         0x14000
+#define NETLOGIC_IO_UART_1_OFFSET         0x15100
 
-#define NETLOGIC_IO_SIZE                   0x1000
+#define NETLOGIC_IO_SIZE                  0x1000
 
-#define NETLOGIC_IO_BRIDGE_OFFSET          0x00000
+#define NETLOGIC_IO_BRIDGE_OFFSET         0x00000
 
-#define NETLOGIC_IO_RLD2_CHN0_OFFSET       0x05000
-#define NETLOGIC_IO_RLD2_CHN1_OFFSET       0x06000
+#define NETLOGIC_IO_RLD2_CHN0_OFFSET      0x05000
+#define NETLOGIC_IO_RLD2_CHN1_OFFSET      0x06000
 
-#define NETLOGIC_IO_SRAM_OFFSET            0x07000
+#define NETLOGIC_IO_SRAM_OFFSET                   0x07000
 
-#define NETLOGIC_IO_PCIX_OFFSET            0x09000
-#define NETLOGIC_IO_HT_OFFSET              0x0A000
+#define NETLOGIC_IO_PCIX_OFFSET                   0x09000
+#define NETLOGIC_IO_HT_OFFSET             0x0A000
 
-#define NETLOGIC_IO_SECURITY_OFFSET        0x0B000
+#define NETLOGIC_IO_SECURITY_OFFSET       0x0B000
 
-#define NETLOGIC_IO_GMAC_0_OFFSET          0x0C000
-#define NETLOGIC_IO_GMAC_1_OFFSET          0x0D000
-#define NETLOGIC_IO_GMAC_2_OFFSET          0x0E000
-#define NETLOGIC_IO_GMAC_3_OFFSET          0x0F000
+#define NETLOGIC_IO_GMAC_0_OFFSET         0x0C000
+#define NETLOGIC_IO_GMAC_1_OFFSET         0x0D000
+#define NETLOGIC_IO_GMAC_2_OFFSET         0x0E000
+#define NETLOGIC_IO_GMAC_3_OFFSET         0x0F000
 
 /* XLS devices */
-#define NETLOGIC_IO_GMAC_4_OFFSET          0x20000
-#define NETLOGIC_IO_GMAC_5_OFFSET          0x21000
-#define NETLOGIC_IO_GMAC_6_OFFSET          0x22000
-#define NETLOGIC_IO_GMAC_7_OFFSET          0x23000
+#define NETLOGIC_IO_GMAC_4_OFFSET         0x20000
+#define NETLOGIC_IO_GMAC_5_OFFSET         0x21000
+#define NETLOGIC_IO_GMAC_6_OFFSET         0x22000
+#define NETLOGIC_IO_GMAC_7_OFFSET         0x23000
 
-#define NETLOGIC_IO_PCIE_0_OFFSET          0x1E000
-#define NETLOGIC_IO_PCIE_1_OFFSET          0x1F000
-#define NETLOGIC_IO_SRIO_0_OFFSET          0x1E000
-#define NETLOGIC_IO_SRIO_1_OFFSET          0x1F000
+#define NETLOGIC_IO_PCIE_0_OFFSET         0x1E000
+#define NETLOGIC_IO_PCIE_1_OFFSET         0x1F000
+#define NETLOGIC_IO_SRIO_0_OFFSET         0x1E000
+#define NETLOGIC_IO_SRIO_1_OFFSET         0x1F000
 
-#define NETLOGIC_IO_USB_0_OFFSET           0x24000
-#define NETLOGIC_IO_USB_1_OFFSET           0x25000
+#define NETLOGIC_IO_USB_0_OFFSET          0x24000
+#define NETLOGIC_IO_USB_1_OFFSET          0x25000
 
-#define NETLOGIC_IO_COMP_OFFSET            0x1D000
+#define NETLOGIC_IO_COMP_OFFSET                   0x1D000
 /* end XLS devices */
 
 /* XLR devices */
-#define NETLOGIC_IO_SPI4_0_OFFSET          0x10000
-#define NETLOGIC_IO_XGMAC_0_OFFSET         0x11000
-#define NETLOGIC_IO_SPI4_1_OFFSET          0x12000
-#define NETLOGIC_IO_XGMAC_1_OFFSET         0x13000
+#define NETLOGIC_IO_SPI4_0_OFFSET         0x10000
+#define NETLOGIC_IO_XGMAC_0_OFFSET        0x11000
+#define NETLOGIC_IO_SPI4_1_OFFSET         0x12000
+#define NETLOGIC_IO_XGMAC_1_OFFSET        0x13000
 /* end XLR devices */
 
-#define NETLOGIC_IO_I2C_0_OFFSET           0x16000
-#define NETLOGIC_IO_I2C_1_OFFSET           0x17000
+#define NETLOGIC_IO_I2C_0_OFFSET          0x16000
+#define NETLOGIC_IO_I2C_1_OFFSET          0x17000
 
-#define NETLOGIC_IO_GPIO_OFFSET            0x18000
-#define NETLOGIC_IO_FLASH_OFFSET           0x19000
-#define NETLOGIC_IO_TB_OFFSET              0x1C000
+#define NETLOGIC_IO_GPIO_OFFSET                   0x18000
+#define NETLOGIC_IO_FLASH_OFFSET          0x19000
+#define NETLOGIC_IO_TB_OFFSET             0x1C000
 
-#define NETLOGIC_CPLD_OFFSET               KSEG1ADDR(0x1d840000)
+#define NETLOGIC_CPLD_OFFSET              KSEG1ADDR(0x1d840000)
 
 /*
  * Base Address (Virtual) of the PCI Config address space
  * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes
  * ie 1<<24 = 16M
  */
-#define DEFAULT_PCI_CONFIG_BASE         0x18000000
-#define DEFAULT_HT_TYPE0_CFG_BASE       0x16000000
-#define DEFAULT_HT_TYPE1_CFG_BASE       0x17000000
+#define DEFAULT_PCI_CONFIG_BASE                0x18000000
+#define DEFAULT_HT_TYPE0_CFG_BASE      0x16000000
+#define DEFAULT_HT_TYPE1_CFG_BASE      0x17000000
 
 #endif
index 7e39d40..c95d18e 100644 (file)
  */
 
 #define MSI_DATA_VECTOR_SHIFT          0
-#define  MSI_DATA_VECTOR_MASK          0x000000ff
+#define         MSI_DATA_VECTOR_MASK           0x000000ff
 #define         MSI_DATA_VECTOR(v)             (((v) << MSI_DATA_VECTOR_SHIFT) & \
                                                MSI_DATA_VECTOR_MASK)
 
 #define MSI_DATA_DELIVERY_MODE_SHIFT   8
-#define  MSI_DATA_DELIVERY_FIXED       (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
-#define  MSI_DATA_DELIVERY_LOWPRI      (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define         MSI_DATA_DELIVERY_FIXED        (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define         MSI_DATA_DELIVERY_LOWPRI       (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
 
 #define MSI_DATA_LEVEL_SHIFT           14
 #define         MSI_DATA_LEVEL_DEASSERT        (0 << MSI_DATA_LEVEL_SHIFT)
 #define         MSI_DATA_LEVEL_ASSERT          (1 << MSI_DATA_LEVEL_SHIFT)
 
 #define MSI_DATA_TRIGGER_SHIFT         15
-#define  MSI_DATA_TRIGGER_EDGE         (0 << MSI_DATA_TRIGGER_SHIFT)
-#define  MSI_DATA_TRIGGER_LEVEL                (1 << MSI_DATA_TRIGGER_SHIFT)
+#define         MSI_DATA_TRIGGER_EDGE          (0 << MSI_DATA_TRIGGER_SHIFT)
+#define         MSI_DATA_TRIGGER_LEVEL         (1 << MSI_DATA_TRIGGER_SHIFT)
 
 /*
  * Shift/mask fields for msi address
 #define MSI_ADDR_BASE_LO               0xfee00000
 
 #define MSI_ADDR_DEST_MODE_SHIFT       2
-#define  MSI_ADDR_DEST_MODE_PHYSICAL   (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define         MSI_ADDR_DEST_MODE_PHYSICAL    (0 << MSI_ADDR_DEST_MODE_SHIFT)
 #define         MSI_ADDR_DEST_MODE_LOGICAL     (1 << MSI_ADDR_DEST_MODE_SHIFT)
 
 #define MSI_ADDR_REDIRECTION_SHIFT     3
-#define  MSI_ADDR_REDIRECTION_CPU      (0 << MSI_ADDR_REDIRECTION_SHIFT)
-#define  MSI_ADDR_REDIRECTION_LOWPRI   (1 << MSI_ADDR_REDIRECTION_SHIFT)
+#define         MSI_ADDR_REDIRECTION_CPU       (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define         MSI_ADDR_REDIRECTION_LOWPRI    (1 << MSI_ADDR_REDIRECTION_SHIFT)
 
 #define MSI_ADDR_DEST_ID_SHIFT         12
 #define         MSI_ADDR_DEST_ID_MASK          0x00ffff0
-#define  MSI_ADDR_DEST_ID(dest)                (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+#define         MSI_ADDR_DEST_ID(dest)         (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
                                                 MSI_ADDR_DEST_ID_MASK)
 
 #endif /* ASM_RMI_MSIDEF_H */
index 9a691b1..63c9917 100644 (file)
 #ifndef _ASM_NLM_XLR_PIC_H
 #define _ASM_NLM_XLR_PIC_H
 
-#define PIC_CLKS_PER_SEC               66666666ULL
+#define PIC_CLK_HZ                     66666666
 /* PIC hardware interrupt numbers */
 #define PIC_IRT_WD_INDEX               0
 #define PIC_IRT_TIMER_0_INDEX          1
+#define PIC_IRT_TIMER_INDEX(i)         ((i) + PIC_IRT_TIMER_0_INDEX)
 #define PIC_IRT_TIMER_1_INDEX          2
 #define PIC_IRT_TIMER_2_INDEX          3
 #define PIC_IRT_TIMER_3_INDEX          4
 
 /* PIC Registers */
 #define PIC_CTRL                       0x00
+#define PIC_CTRL_STE                   8       /* timer enable start bit */
 #define PIC_IPI                                0x04
 #define PIC_INT_ACK                    0x06
 
 #define PIC_TIMER_COUNT_0_BASE         0x120
 #define PIC_TIMER_COUNT_1_BASE         0x130
 
-#define PIC_IRT_0(picintr)      (PIC_IRT_0_BASE + (picintr))
+#define PIC_IRT_0(picintr)     (PIC_IRT_0_BASE + (picintr))
 #define PIC_IRT_1(picintr)     (PIC_IRT_1_BASE + (picintr))
 
 #define PIC_TIMER_MAXVAL_0(i)  (PIC_TIMER_MAXVAL_0_BASE + (i))
  * 8-39. This leaves the IRQ 0-7 for cpu interrupts like
  * count/compare and FMN
  */
-#define PIC_IRQ_BASE            8
-#define PIC_INTR_TO_IRQ(i)      (PIC_IRQ_BASE + (i))
-#define PIC_IRQ_TO_INTR(i)      ((i) - PIC_IRQ_BASE)
+#define PIC_IRQ_BASE           8
+#define PIC_INTR_TO_IRQ(i)     (PIC_IRQ_BASE + (i))
+#define PIC_IRQ_TO_INTR(i)     ((i) - PIC_IRQ_BASE)
 
 #define PIC_IRT_FIRST_IRQ      PIC_IRQ_BASE
 #define PIC_WD_IRQ             PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX)
 #define PIC_BRIDGE_AERR_IRQ    PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX)
 #define PIC_BRIDGE_BERR_IRQ    PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX)
 #define PIC_BRIDGE_TB_XLR_IRQ  PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX)
-#define PIC_BRIDGE_AERR_NMI_IRQ        PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
+#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
 /* XLS defines */
 #define PIC_GMAC_4_IRQ         PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX)
 #define PIC_GMAC_5_IRQ         PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX)
@@ -251,12 +253,52 @@ nlm_pic_ack(uint64_t base, int irt)
 }
 
 static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
 {
        nlm_write_reg(base, PIC_IRT_0(irt), (1u << hwt));
        /* local scheduling, invalid, level by default */
        nlm_write_reg(base, PIC_IRT_1(irt),
-               (1 << 30) | (1 << 6) | irq);
+               (en << 30) | (1 << 6) | irq);
+}
+
+static inline uint64_t
+nlm_pic_read_timer(uint64_t base, int timer)
+{
+       uint32_t up1, up2, low;
+
+       up1 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+       low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+       up2 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+
+       if (up1 != up2) /* wrapped, get the new low */
+               low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+       return ((uint64_t)up2 << 32) | low;
+
+}
+
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+       return nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+}
+
+static inline void
+nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
+{
+       uint32_t up, low;
+       uint64_t pic_ctrl = nlm_read_reg(base, PIC_CTRL);
+       int en;
+
+       en = (irq > 0);
+       up = value >> 32;
+       low = value & 0xFFFFFFFF;
+       nlm_write_reg(base, PIC_TIMER_MAXVAL_0(timer), low);
+       nlm_write_reg(base, PIC_TIMER_MAXVAL_1(timer), up);
+       nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0);
+
+       /* enable the timer */
+       pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
+       nlm_write_reg(base, PIC_CTRL, pic_ctrl);
 }
 #endif
 #endif /* _ASM_NLM_XLR_PIC_H */
index af0e51a..2e2436d 100644 (file)
@@ -2,7 +2,7 @@
  *  asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
  *
  *  Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com>
- *                     Sony Software Development Center Europe (SDCE), Brussels
+ *                    Sony Software Development Center Europe (SDCE), Brussels
  *
  *  This file is based on the following documentation:
  *
@@ -17,7 +17,7 @@
 
 
     /*
-     *  Physical Device Address Registers (PDARs)
+     * Physical Device Address Registers (PDARs)
      */
 
 #define NILE4_SDRAM0   0x0000  /* SDRAM Bank 0 [R/W] */
@@ -37,7 +37,7 @@
 
 
     /*
-     *  CPU Interface Registers
+     * CPU Interface Registers
      */
 
 #define NILE4_CPUSTAT  0x0080  /* CPU Status [R/W] */
@@ -50,7 +50,7 @@
 
 
     /*
-     *  Memory-Interface Registers
+     * Memory-Interface Registers
      */
 
 #define NILE4_MEMCTRL  0x00C0  /* Memory Control */
@@ -59,7 +59,7 @@
 
 
     /*
-     *  PCI-Bus Registers
+     * PCI-Bus Registers
      */
 
 #define NILE4_PCICTRL  0x00E0  /* PCI Control [R/W] */
@@ -70,7 +70,7 @@
 
 
     /*
-     *  Local-Bus Registers
+     * Local-Bus Registers
      */
 
 #define NILE4_LCNFG    0x0100  /* Local Bus Configuration [R/W] */
@@ -88,7 +88,7 @@
 
 
     /*
-     *  DMA Registers
+     * DMA Registers
      */
 
 #define NILE4_DMACTRL0 0x0180  /* DMA Control 0 [R/W] */
 
 
     /*
-     *  Timer Registers
+     * Timer Registers
      */
 
 #define NILE4_T0CTRL   0x01C0  /* SDRAM Refresh Control [R/W] */
 
 
     /*
-     *  PCI Configuration Space Registers
+     * PCI Configuration Space Registers
      */
 
 #define NILE4_PCI_BASE 0x0200
 
 
     /*
-     *  Serial-Port Registers
+     * Serial-Port Registers
      */
 
-#define NILE4_UART_BASE        0x0300
+#define NILE4_UART_BASE 0x0300
 
 #define NILE4_UARTRBR  0x0300  /* UART Receiver Data Buffer [R] */
 #define NILE4_UARTTHR  0x0300  /* UART Transmitter Data Holding [W] */
 
 
     /*
-     *  Interrupt Lines
+     * Interrupt Lines
      */
 
 #define NILE4_INT_CPCE 0       /* CPU-Interface Parity-Error Interrupt */
 #define NILE4_INT_UART 4       /* UART Interrupt */
 #define NILE4_INT_WDOG 5       /* Watchdog Timer Interrupt */
 #define NILE4_INT_GPT  6       /* General-Purpose Timer Interrupt */
-#define NILE4_INT_LBRTD        7       /* Local-Bus Ready Timer Interrupt */
+#define NILE4_INT_LBRTD 7      /* Local-Bus Ready Timer Interrupt */
 #define NILE4_INT_INTA 8       /* PCI Interrupt Signal INTA# */
 #define NILE4_INT_INTB 9       /* PCI Interrupt Signal INTB# */
 #define NILE4_INT_INTC 10      /* PCI Interrupt Signal INTC# */
 
 
     /*
-     *  Nile 4 Register Access
+     * Nile 4 Register Access
      */
 
 static inline void nile4_sync(void)
@@ -247,7 +247,7 @@ static inline u8 nile4_in8(u32 offset)
 
 
     /*
-     *  Physical Device Address Registers
+     * Physical Device Address Registers
      */
 
 extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
@@ -255,7 +255,7 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
 
 
     /*
-     *  PCI Master Registers
+     * PCI Master Registers
      */
 
 #define NILE4_PCICMD_IACK      0       /* PCI Interrupt Acknowledge */
@@ -265,9 +265,9 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
 
 
     /*
-     *  PCI Address Spaces
+     * PCI Address Spaces
      *
-     *  Note that these are multiplexed using PCIINIT[01]!
+     * Note that these are multiplexed using PCIINIT[01]!
      */
 
 #define NILE4_PCI_IO_BASE      0xa6000000
@@ -280,7 +280,7 @@ extern void nile4_set_pmr(u32 pmr, u32 type, u32 addr);
 
 
     /*
-     *  Interrupt Programming
+     * Interrupt Programming
      */
 
 #define NUM_I8259_INTERRUPTS   16
index 3c74d82..e2d874e 100644 (file)
@@ -84,20 +84,20 @@ typedef enum {
  * Octeon-I HW never interprets this X (<39:36> reserved
  * for future expansion), software should set to 0.
  *
- *  - 0x0 XXX0 0000 0000 to      DRAM         Cached
+ *  - 0x0 XXX0 0000 0000 to     DRAM         Cached
  *  - 0x0 XXX0 0FFF FFFF
  *
- *  - 0x0 XXX0 1000 0000 to      Boot Bus     Uncached  (Converted to 0x1 00X0 1000 0000
- *  - 0x0 XXX0 1FFF FFFF         + EJTAG                           to 0x1 00X0 1FFF FFFF)
+ *  - 0x0 XXX0 1000 0000 to     Boot Bus     Uncached  (Converted to 0x1 00X0 1000 0000
+ *  - 0x0 XXX0 1FFF FFFF        + EJTAG                           to 0x1 00X0 1FFF FFFF)
  *
- *  - 0x0 XXX0 2000 0000 to      DRAM         Cached
+ *  - 0x0 XXX0 2000 0000 to     DRAM         Cached
  *  - 0x0 XXXF FFFF FFFF
  *
- *  - 0x1 00X0 0000 0000 to      Boot Bus     Uncached
+ *  - 0x1 00X0 0000 0000 to     Boot Bus     Uncached
  *  - 0x1 00XF FFFF FFFF
  *
- *  - 0x1 01X0 0000 0000 to      Other NCB    Uncached
- *  - 0x1 FFXF FFFF FFFF         devices
+ *  - 0x1 01X0 0000 0000 to     Other NCB    Uncached
+ *  - 0x1 FFXF FFFF FFFF        devices
  *
  * Decode of all Octeon addresses
  */
@@ -129,9 +129,9 @@ typedef union {
         */
        struct {
                uint64_t R:2;   /* CVMX_MIPS_SPACE_XKPHYS in this case */
-               uint64_t cca:3; /* ignored by octeon */
+               uint64_t cca:3; /* ignored by octeon */
                uint64_t mbz:10;
-               uint64_t pa:49; /* physical address */
+               uint64_t pa:49; /* physical address */
        } sxkphys;
 
        /* physical address */
@@ -253,22 +253,22 @@ typedef union {
 #define CVMX_OCT_DID_ASX1 23ULL
 #define CVMX_OCT_DID_IOB 30ULL
 
-#define CVMX_OCT_DID_PKT_SEND       CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG      CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1       CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2       CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3       CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_PKT_SEND      CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG     CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1      CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2      CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3      CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
 #define CVMX_OCT_DID_TAG_NULL_RD    CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR        CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI        CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR        CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW         CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6          CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO        CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML        CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR        CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR        CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR        CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR        CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+#define CVMX_OCT_DID_TAG_CSR       CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI       CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR       CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW        CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6         CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO       CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML       CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR       CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR       CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR       CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR       CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
 
 #endif /* __CVMX_ADDRESS_H__ */
index 1db1dc2..284fa8d 100644 (file)
@@ -91,11 +91,11 @@ struct cvmx_bootinfo {
 #if (CVMX_BOOTINFO_MIN_VER >= 1)
        /*
         * Several boards support compact flash on the Octeon boot
-        * bus.  The CF memory spaces may be mapped to different
+        * bus.  The CF memory spaces may be mapped to different
         * addresses on different boards.  These are the physical
         * addresses, so care must be taken to use the correct
         * XKPHYS/KSEG0 addressing depending on the application's
-        * ABI.  These values will be 0 if CF is not present.
+        * ABI.  These values will be 0 if CF is not present.
         */
        uint64_t compact_flash_common_base_addr;
        uint64_t compact_flash_attribute_base_addr;
@@ -131,7 +131,7 @@ struct cvmx_bootinfo {
 #define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC                        (1ull << 3)
 /* This flag is set if the TLB mappings are not contained in the
  * 0x10000000 - 0x20000000 boot bus region. */
-#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING     (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING    (1ull << 4)
 #define CVMX_BOOTINFO_CFG_FLAG_BREAK                   (1ull << 5)
 
 #endif /*   (CVMX_BOOTINFO_MAJ_VER == 1) */
@@ -164,9 +164,9 @@ enum cvmx_board_types_enum {
        CVMX_BOARD_TYPE_EBT5600 = 22,
        CVMX_BOARD_TYPE_EBH5201 = 23,
        CVMX_BOARD_TYPE_EBT5200 = 24,
-       CVMX_BOARD_TYPE_CB5600  = 25,
-       CVMX_BOARD_TYPE_CB5601  = 26,
-       CVMX_BOARD_TYPE_CB5200  = 27,
+       CVMX_BOARD_TYPE_CB5600  = 25,
+       CVMX_BOARD_TYPE_CB5601  = 26,
+       CVMX_BOARD_TYPE_CB5200  = 27,
        /* Special 'generic' board type, supports many boards */
        CVMX_BOARD_TYPE_GENERIC = 28,
        CVMX_BOARD_TYPE_EBH5610 = 29,
@@ -223,7 +223,7 @@ enum cvmx_board_types_enum {
        CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
 
        /*
-        * Set aside a range for customer private use.  The SDK won't
+        * Set aside a range for customer private use.  The SDK won't
         * use any numbers in this range.
         */
        CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
index 42db2be..352f1dc 100644 (file)
@@ -39,7 +39,7 @@
 #define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
 
 /* minimum alignment of bootmem alloced blocks */
-#define CVMX_BOOTMEM_ALIGNMENT_SIZE     (16ull)
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE    (16ull)
 
 /* Flags for cvmx_bootmem_phy_mem* functions */
 /* Allocate from end of block instead of beginning */
@@ -151,8 +151,8 @@ extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
  * memory cannot be allocated at the specified address.
  *
  * @size:      Size in bytes of block to allocate
- * @address:   Physical address to allocate memory at.  If this memory is not
- *                  available, the allocation fails.
+ * @address:   Physical address to allocate memory at. If this memory is not
+ *                 available, the allocation fails.
  * @alignment: Alignment required - must be power of 2
  * Returns pointer to block of memory, NULL on error
  */
@@ -181,7 +181,7 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
  * @name:   name of block to free
  *
  * Returns 0 on failure,
- *         !0 on success
+ *        !0 on success
  */
 
 
@@ -210,9 +210,9 @@ extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
  *
  * @size:     Size in bytes of block to allocate
  * @address:  Physical address to allocate memory at.  If this
- *            memory is not available, the allocation fails.
+ *           memory is not available, the allocation fails.
  * @name:     name of block - must be less than CVMX_BOOTMEM_NAME_LEN
- *            bytes
+ *           bytes
  *
  * Returns a pointer to block of memory, NULL on error
  */
@@ -249,7 +249,7 @@ extern int cvmx_bootmem_free_named(char *name);
  * @name:   name of block to free
  *
  * Returns pointer to named block descriptor on success
- *         0 on failure
+ *        0 on failure
  */
 struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
 
@@ -258,20 +258,20 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
  * (optional) requested address and alignment.
  *
  * @req_size: size of region to allocate.  All requests are rounded up
- *            to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *           to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
  *
  * @address_min: Minimum address that block can occupy.
  *
  * @address_max: Specifies the maximum address_min (inclusive) that
- *               the allocation can use.
+ *              the allocation can use.
  *
  * @alignment: Requested alignment of the block.  If this alignment
- *             cannot be met, the allocation fails.  This must be a
- *             power of 2.  (Note: Alignment of
- *             CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- *             internally enforced.  Requested alignments of less than
- *             CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- *             CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ *            cannot be met, the allocation fails.  This must be a
+ *            power of 2.  (Note: Alignment of
+ *            CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ *            internally enforced.  Requested alignments of less than
+ *            CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ *            CVMX_BOOTMEM_ALIGNMENT_SIZE.)
  *
  * @flags:     Flags to control options for the allocation.
  *
@@ -285,21 +285,21 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
  * Allocates a named block of physical memory from the free list, at
  * (optional) requested address and alignment.
  *
- * @param size      size of region to allocate.  All requests are rounded
- *                  up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
- *                  bytes size
+ * @param size     size of region to allocate.  All requests are rounded
+ *                 up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
+ *                 bytes size
  * @param min_addr Minimum address that block can occupy.
  * @param max_addr  Specifies the maximum address_min (inclusive) that
- *                  the allocation can use.
+ *                 the allocation can use.
  * @param alignment Requested alignment of the block.  If this
- *                  alignment cannot be met, the allocation fails.
- *                  This must be a power of 2.  (Note: Alignment of
- *                  CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- *                  internally enforced.  Requested alignments of less
- *                  than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- *                  CVMX_BOOTMEM_ALIGNMENT_SIZE.)
- * @param name      name to assign to named block
- * @param flags     Flags to control options for the allocation.
+ *                 alignment cannot be met, the allocation fails.
+ *                 This must be a power of 2.  (Note: Alignment of
+ *                 CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ *                 internally enforced.  Requested alignments of less
+ *                 than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ *                 CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name     name to assign to named block
+ * @param flags            Flags to control options for the allocation.
  *
  * @return physical address of block allocated, or -1 on failure
  */
@@ -312,14 +312,14 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
  * Finds a named memory block by name.
  * Also used for finding an unused entry in the named block table.
  *
- * @name: Name of memory block to find.  If NULL pointer given, then
- *        finds unused descriptor, if available.
+ * @name: Name of memory block to find.         If NULL pointer given, then
+ *       finds unused descriptor, if available.
  *
  * @flags: Flags to control options for the allocation.
  *
  * Returns Pointer to memory block descriptor, NULL if not found.
- *         If NULL returned when name parameter is NULL, then no memory
- *         block descriptors are available.
+ *        If NULL returned when name parameter is NULL, then no memory
+ *        block descriptors are available.
  */
 struct cvmx_bootmem_named_block_desc *
 cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
@@ -331,31 +331,31 @@ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
  * @flags:  flags for passing options
  *
  * Returns 0 on failure
- *         1 on success
+ *        1 on success
  */
 int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
 
 /**
- * Frees a block to the bootmem allocator list.  This must
+ * Frees a block to the bootmem allocator list.         This must
  * be used with care, as the size provided must match the size
  * of the block that was allocated, or the list will become
  * corrupted.
  *
  * IMPORTANT:  This is only intended to be used as part of named block
  * frees and initial population of the free memory list.
- *                                                      *
+ *                                                     *
  *
  * @phy_addr: physical address of block
  * @size:     size of block in bytes.
  * @flags:    flags for passing options
  *
  * Returns 1 on success,
- *         0 on failure
+ *        0 on failure
  */
 int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
 
 /**
- * Locks the bootmem allocator.  This is useful in certain situations
+ * Locks the bootmem allocator.         This is useful in certain situations
  * where multiple allocations must be made without being interrupted.
  * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
  *
index fed9112..024a71b 100644 (file)
@@ -244,33 +244,33 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
                ".set noreorder\n"
                "1:\n"
                /* Atomic add one to ticket_ptr */
-               "ll     %[my_ticket], %[ticket_ptr]\n"
+               "ll     %[my_ticket], %[ticket_ptr]\n"
                /* and store the original value */
-               "li     %[ticket], 1\n"
+               "li     %[ticket], 1\n"
                /* in my_ticket */
-               "baddu  %[ticket], %[my_ticket]\n"
-               "sc     %[ticket], %[ticket_ptr]\n"
-               "beqz   %[ticket], 1b\n"
+               "baddu  %[ticket], %[my_ticket]\n"
+               "sc     %[ticket], %[ticket_ptr]\n"
+               "beqz   %[ticket], 1b\n"
                " nop\n"
                /* Load the current now_serving ticket */
-               "lbu    %[ticket], %[now_serving]\n"
+               "lbu    %[ticket], %[now_serving]\n"
                "2:\n"
                /* Jump out if now_serving == my_ticket */
-               "beq    %[ticket], %[my_ticket], 4f\n"
+               "beq    %[ticket], %[my_ticket], 4f\n"
                /* Find out how many tickets are in front of me */
-               " subu   %[ticket], %[my_ticket], %[ticket]\n"
+               " subu   %[ticket], %[my_ticket], %[ticket]\n"
                /* Use tickets in front of me minus one to delay */
                "subu  %[ticket], 1\n"
                /* Delay will be ((tickets in front)-1)*32 loops */
-               "cins   %[ticket], %[ticket], 5, 7\n"
+               "cins   %[ticket], %[ticket], 5, 7\n"
                "3:\n"
                /* Loop here until our ticket might be up */
-               "bnez   %[ticket], 3b\n"
-               " subu  %[ticket], 1\n"
+               "bnez   %[ticket], 3b\n"
+               " subu  %[ticket], 1\n"
                /* Jump back up to check out ticket again */
-               "b      2b\n"
+               "b      2b\n"
                /* Load the current now_serving ticket */
-               " lbu   %[ticket], %[now_serving]\n"
+               " lbu   %[ticket], %[now_serving]\n"
                "4:\n"
                ".set pop\n" :
                [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
@@ -313,9 +313,9 @@ static inline __cvmx_cmd_queue_state_t
  *
  * @queue_id:  Hardware command queue to write to
  * @use_locking:
- *                  Use internal locking to ensure exclusive access for queue
- *                  updates. If you don't use this locking you must ensure
- *                  exclusivity some other way. Locking is strongly recommended.
+ *                 Use internal locking to ensure exclusive access for queue
+ *                 updates. If you don't use this locking you must ensure
+ *                 exclusivity some other way. Locking is strongly recommended.
  * @cmd_count: Number of command words to write
  * @cmds:      Array of commands to write
  *
@@ -411,9 +411,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
  *
  * @queue_id: Hardware command queue to write to
  * @use_locking:
- *                 Use internal locking to ensure exclusive access for queue
- *                 updates. If you don't use this locking you must ensure
- *                 exclusivity some other way. Locking is strongly recommended.
+ *                Use internal locking to ensure exclusive access for queue
+ *                updates. If you don't use this locking you must ensure
+ *                exclusivity some other way. Locking is strongly recommended.
  * @cmd1:     Command
  * @cmd2:     Command
  *
@@ -510,9 +510,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
  *
  * @queue_id: Hardware command queue to write to
  * @use_locking:
- *                 Use internal locking to ensure exclusive access for queue
- *                 updates. If you don't use this locking you must ensure
- *                 exclusivity some other way. Locking is strongly recommended.
+ *                Use internal locking to ensure exclusive access for queue
+ *                updates. If you don't use this locking you must ensure
+ *                exclusivity some other way. Locking is strongly recommended.
  * @cmd1:     Command
  * @cmd2:     Command
  * @cmd3:     Command
index 26835d1..f7dd17d 100644 (file)
 
 /* Pools in use */
 /* Packet buffers */
-#define CVMX_FPA_PACKET_POOL                (0)
-#define CVMX_FPA_PACKET_POOL_SIZE           CVMX_FPA_POOL_0_SIZE
+#define CVMX_FPA_PACKET_POOL               (0)
+#define CVMX_FPA_PACKET_POOL_SIZE          CVMX_FPA_POOL_0_SIZE
 /* Work queue entrys */
-#define CVMX_FPA_WQE_POOL                   (1)
-#define CVMX_FPA_WQE_POOL_SIZE              CVMX_FPA_POOL_1_SIZE
+#define CVMX_FPA_WQE_POOL                  (1)
+#define CVMX_FPA_WQE_POOL_SIZE             CVMX_FPA_POOL_1_SIZE
 /* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL         (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL        (2)
 #define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE    CVMX_FPA_POOL_2_SIZE
 
 /*************************  FAU allocation ********************************/
@@ -45,7 +45,7 @@
  * in order of descending size so that all alignment constraints are
  * automatically met.  The enums are linked so that the following enum
  * continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero.  The macros
+ * numbering within each enum always starts with zero. The macros
  * take care of the address increment size, so the values entered
  * always increase by 1.  FAU registers are accessed with byte
  * addresses.
@@ -90,9 +90,9 @@ typedef enum {
  * be taken into account.
  */
 /* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH               (0)
+#define CVMX_SCR_SCRATCH              (0)
 /* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE        (8)
+#define CVMX_SCR_REG_AVAIL_BASE               (8)
 
 /*
  * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
@@ -145,14 +145,14 @@ typedef enum {
  * 1: include
  */
 #define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP      0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP      0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT    0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT    0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER         0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP      0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT    0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT    0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
 #define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP      0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP      0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT    0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT    0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP      0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT    0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT    0
 #define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL    0
 #define CVMX_HELPER_INPUT_TAG_INPUT_PORT       1
 
index a6939fc..ef98f7f 100644 (file)
  */
 
 #define CVMX_FAU_LOAD_IO_ADDRESS    cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR       63, 56
-#define CVMX_FAU_BITS_LEN           55, 48
-#define CVMX_FAU_BITS_INEVAL        35, 14
-#define CVMX_FAU_BITS_TAGWAIT       13, 13
-#define CVMX_FAU_BITS_NOADD         13, 13
-#define CVMX_FAU_BITS_SIZE          12, 11
-#define CVMX_FAU_BITS_REGISTER      10, 0
+#define CVMX_FAU_BITS_SCRADDR      63, 56
+#define CVMX_FAU_BITS_LEN          55, 48
+#define CVMX_FAU_BITS_INEVAL       35, 14
+#define CVMX_FAU_BITS_TAGWAIT      13, 13
+#define CVMX_FAU_BITS_NOADD        13, 13
+#define CVMX_FAU_BITS_SIZE         12, 11
+#define CVMX_FAU_BITS_REGISTER     10, 0
 
 typedef enum {
        CVMX_FAU_OP_SIZE_8 = 0,
@@ -109,11 +109,11 @@ typedef union {
  * Builds a store I/O address for writing to the FAU
  *
  * @noadd:  0 = Store value is atomically added to the current value
- *               1 = Store value is atomically written over the current value
+ *              1 = Store value is atomically written over the current value
  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
- *               - Step by 2 for 16 bit access.
- *               - Step by 4 for 32 bit access.
- *               - Step by 8 for 64 bit access.
+ *              - Step by 2 for 16 bit access.
+ *              - Step by 4 for 32 bit access.
+ *              - Step by 8 for 64 bit access.
  * Returns Address to store for atomic update
  */
 static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
@@ -127,16 +127,16 @@ static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
  * Builds a I/O address for accessing the FAU
  *
  * @tagwait: Should the atomic add wait for the current tag switch
- *                operation to complete.
- *                - 0 = Don't wait
- *                - 1 = Wait for tag switch to complete
+ *               operation to complete.
+ *               - 0 = Don't wait
+ *               - 1 = Wait for tag switch to complete
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
- *                - Step by 4 for 32 bit access.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 2 for 16 bit access.
+ *               - Step by 4 for 32 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to add.
- *                Note: When performing 32 and 64 bit access, only the low
- *                22 bits are available.
+ *               Note: When performing 32 and 64 bit access, only the low
+ *               22 bits are available.
  * Returns Address to read from for atomic update
  */
 static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
@@ -152,9 +152,9 @@ static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
  * Perform an atomic 64 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Value of the register before the update
  */
 static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
@@ -167,9 +167,9 @@ static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
  * Perform an atomic 32 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 4 for 32 bit access.
+ *               - Step by 4 for 32 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Value of the register before the update
  */
 static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
@@ -182,7 +182,7 @@ static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
  * Perform an atomic 16 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
+ *               - Step by 2 for 16 bit access.
  * @value:   Signed value to add.
  * Returns Value of the register before the update
  */
@@ -209,12 +209,12 @@ static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
  * completes
  *
  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
- *               - Step by 8 for 64 bit access.
+ *              - Step by 8 for 64 bit access.
  * @value:  Signed value to add.
- *               Note: Only the low 22 bits are available.
+ *              Note: Only the low 22 bits are available.
  * Returns If a timeout occurs, the error bit will be set. Otherwise
- *         the value of the register before the update will be
- *         returned
+ *        the value of the register before the update will be
+ *        returned
  */
 static inline cvmx_fau_tagwait64_t
 cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -233,12 +233,12 @@ cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
  * completes
  *
  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
- *               - Step by 4 for 32 bit access.
+ *              - Step by 4 for 32 bit access.
  * @value:  Signed value to add.
- *               Note: Only the low 22 bits are available.
+ *              Note: Only the low 22 bits are available.
  * Returns If a timeout occurs, the error bit will be set. Otherwise
- *         the value of the register before the update will be
- *         returned
+ *        the value of the register before the update will be
+ *        returned
  */
 static inline cvmx_fau_tagwait32_t
 cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -257,11 +257,11 @@ cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
  * completes
  *
  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
- *               - Step by 2 for 16 bit access.
+ *              - Step by 2 for 16 bit access.
  * @value:  Signed value to add.
  * Returns If a timeout occurs, the error bit will be set. Otherwise
- *         the value of the register before the update will be
- *         returned
+ *        the value of the register before the update will be
+ *        returned
  */
 static inline cvmx_fau_tagwait16_t
 cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -282,8 +282,8 @@ cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
  * @reg:    FAU atomic register to access. 0 <= reg < 2048.
  * @value:  Signed value to add.
  * Returns If a timeout occurs, the error bit will be set. Otherwise
- *         the value of the register before the update will be
- *         returned
+ *        the value of the register before the update will be
+ *        returned
  */
 static inline cvmx_fau_tagwait8_t
 cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
@@ -301,21 +301,21 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
  *
  * @scraddr: Scratch pad byte address to write to.  Must be 8 byte aligned
  * @value:   Signed value to add.
- *                Note: When performing 32 and 64 bit access, only the low
- *                22 bits are available.
+ *               Note: When performing 32 and 64 bit access, only the low
+ *               22 bits are available.
  * @tagwait: Should the atomic add wait for the current tag switch
- *                operation to complete.
- *                - 0 = Don't wait
- *                - 1 = Wait for tag switch to complete
+ *               operation to complete.
+ *               - 0 = Don't wait
+ *               - 1 = Wait for tag switch to complete
  * @size:    The size of the operation:
- *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits
- *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ *               - CVMX_FAU_OP_SIZE_8  (0) = 8 bits
+ *               - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ *               - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ *               - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
- *                - Step by 4 for 32 bit access.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 2 for 16 bit access.
+ *               - Step by 4 for 32 bit access.
+ *               - Step by 8 for 64 bit access.
  * Returns Data to write using cvmx_send_single
  */
 static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
@@ -337,11 +337,11 @@ static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
  * placed in the scratch memory at byte address scraddr.
  *
  * @scraddr: Scratch memory byte address to put response in.
- *                Must be 8 byte aligned.
+ *               Must be 8 byte aligned.
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Placed in the scratch pad register
  */
 static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
@@ -357,11 +357,11 @@ static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
  * placed in the scratch memory at byte address scraddr.
  *
  * @scraddr: Scratch memory byte address to put response in.
- *                Must be 8 byte aligned.
+ *               Must be 8 byte aligned.
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 4 for 32 bit access.
+ *               - Step by 4 for 32 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Placed in the scratch pad register
  */
 static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
@@ -377,9 +377,9 @@ static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
  * placed in the scratch memory at byte address scraddr.
  *
  * @scraddr: Scratch memory byte address to put response in.
- *                Must be 8 byte aligned.
+ *               Must be 8 byte aligned.
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
+ *               - Step by 2 for 16 bit access.
  * @value:   Signed value to add.
  * Returns Placed in the scratch pad register
  */
@@ -396,7 +396,7 @@ static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
  * placed in the scratch memory at byte address scraddr.
  *
  * @scraddr: Scratch memory byte address to put response in.
- *                Must be 8 byte aligned.
+ *               Must be 8 byte aligned.
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
  * @value:   Signed value to add.
  * Returns Placed in the scratch pad register
@@ -414,14 +414,14 @@ static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
  * switch completes.
  *
  * @scraddr: Scratch memory byte address to put response in.  Must be
- *           8 byte aligned.  If a timeout occurs, the error bit (63)
- *           will be set. Otherwise the value of the register before
- *           the update will be returned
+ *          8 byte aligned.  If a timeout occurs, the error bit (63)
+ *          will be set. Otherwise the value of the register before
+ *          the update will be returned
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Placed in the scratch pad register
  */
 static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
@@ -437,14 +437,14 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
  * switch completes.
  *
  * @scraddr: Scratch memory byte address to put response in.  Must be
- *           8 byte aligned.  If a timeout occurs, the error bit (63)
- *           will be set. Otherwise the value of the register before
- *           the update will be returned
+ *          8 byte aligned.  If a timeout occurs, the error bit (63)
+ *          will be set. Otherwise the value of the register before
+ *          the update will be returned
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 4 for 32 bit access.
+ *               - Step by 4 for 32 bit access.
  * @value:   Signed value to add.
- *                Note: Only the low 22 bits are available.
+ *               Note: Only the low 22 bits are available.
  * Returns Placed in the scratch pad register
  */
 static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
@@ -460,12 +460,12 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
  * switch completes.
  *
  * @scraddr: Scratch memory byte address to put response in.  Must be
- *           8 byte aligned.  If a timeout occurs, the error bit (63)
- *           will be set. Otherwise the value of the register before
- *           the update will be returned
+ *          8 byte aligned.  If a timeout occurs, the error bit (63)
+ *          will be set. Otherwise the value of the register before
+ *          the update will be returned
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
+ *               - Step by 2 for 16 bit access.
  * @value:   Signed value to add.
  *
  * Returns Placed in the scratch pad register
@@ -483,9 +483,9 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
  * switch completes.
  *
  * @scraddr: Scratch memory byte address to put response in.  Must be
- *           8 byte aligned.  If a timeout occurs, the error bit (63)
- *           will be set. Otherwise the value of the register before
- *           the update will be returned
+ *          8 byte aligned.  If a timeout occurs, the error bit (63)
+ *          will be set. Otherwise the value of the register before
+ *          the update will be returned
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
  * @value:   Signed value to add.
@@ -504,7 +504,7 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
  * Perform an atomic 64 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to add.
  */
 static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -516,7 +516,7 @@ static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
  * Perform an atomic 32 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 4 for 32 bit access.
+ *               - Step by 4 for 32 bit access.
  * @value:   Signed value to add.
  */
 static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -528,7 +528,7 @@ static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
  * Perform an atomic 16 bit add
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
+ *               - Step by 2 for 16 bit access.
  * @value:   Signed value to add.
  */
 static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -551,7 +551,7 @@ static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
  * Perform an atomic 64 bit write
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 8 for 64 bit access.
+ *               - Step by 8 for 64 bit access.
  * @value:   Signed value to write.
  */
 static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -563,7 +563,7 @@ static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
  * Perform an atomic 32 bit write
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 4 for 32 bit access.
+ *               - Step by 4 for 32 bit access.
  * @value:   Signed value to write.
  */
 static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -575,7 +575,7 @@ static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
  * Perform an atomic 16 bit write
  *
  * @reg:     FAU atomic register to access. 0 <= reg < 2048.
- *                - Step by 2 for 16 bit access.
+ *               - Step by 2 for 16 bit access.
  * @value:   Signed value to write.
  */
 static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
index 541a1ae..aa26a2c 100644 (file)
@@ -39,9 +39,9 @@
 #include <asm/octeon/cvmx-address.h>
 #include <asm/octeon/cvmx-fpa-defs.h>
 
-#define CVMX_FPA_NUM_POOLS      8
+#define CVMX_FPA_NUM_POOLS     8
 #define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT      128
+#define CVMX_FPA_ALIGNMENT     128
 
 /**
  * Structure describing the data format used for stores to the FPA.
@@ -186,8 +186,8 @@ static inline void *cvmx_fpa_alloc(uint64_t pool)
 /**
  * Asynchronously get a new block from the FPA
  *
- * @scr_addr: Local scratch address to put response in.  This is a byte address,
- *                  but must be 8 byte aligned.
+ * @scr_addr: Local scratch address to put response in.         This is a byte address,
+ *                 but must be 8 byte aligned.
  * @pool:      Pool to get the block from
  */
 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
@@ -212,7 +212,7 @@ static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
  * @ptr:    Block to free
  * @pool:   Pool to put it in
  * @num_cache_lines:
- *               Cache lines to invalidate
+ *              Cache lines to invalidate
  */
 static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
                                        uint64_t num_cache_lines)
@@ -234,7 +234,7 @@ static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
  * @ptr:    Block to free
  * @pool:   Pool to put it in
  * @num_cache_lines:
- *               Cache lines to invalidate
+ *              Cache lines to invalidate
  */
 static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
                                 uint64_t num_cache_lines)
@@ -245,7 +245,7 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
            CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
        /*
         * Make sure that any previous writes to memory go out before
-        * we free this buffer.  This also serves as a barrier to
+        * we free this buffer.  This also serves as a barrier to
         * prevent GCC from reordering operations to after the
         * free.
         */
@@ -259,17 +259,17 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
  * This can only be called once per pool. Make sure proper
  * locking enforces this.
  *
- * @pool:       Pool to initialize
- *                   0 <= pool < 8
- * @name:       Constant character string to name this pool.
- *                   String is not copied.
- * @buffer:     Pointer to the block of memory to use. This must be
- *                   accessible by all processors and external hardware.
+ * @pool:      Pool to initialize
+ *                  0 <= pool < 8
+ * @name:      Constant character string to name this pool.
+ *                  String is not copied.
+ * @buffer:    Pointer to the block of memory to use. This must be
+ *                  accessible by all processors and external hardware.
  * @block_size: Size for each block controlled by the FPA
  * @num_blocks: Number of blocks
  *
  * Returns 0 on Success,
- *         -1 on failure
+ *        -1 on failure
  */
 extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
                               uint64_t block_size, uint64_t num_blocks);
@@ -282,8 +282,8 @@ extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
  *
  * @pool:   Pool to shutdown
  * Returns Zero on success
- *         - Positive is count of missing buffers
- *         - Negative is too many buffers or corrupted pointers
+ *        - Positive is count of missing buffers
+ *        - Negative is too many buffers or corrupted pointers
  */
 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
 
index 442f508..41785dd 100644 (file)
@@ -48,7 +48,7 @@ typedef enum {
  * Fake IPD port, the RGMII/MII interface may use different PHY, use
  * this macro to return appropriate MIX address to read the PHY.
  */
-#define CVMX_HELPER_BOARD_MGMT_IPD_PORT     -10
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT            -10
 
 /**
  * cvmx_override_board_link_get(int ipd_port) is a function
@@ -86,10 +86,10 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
  *
  * @phy_addr:  The address of the PHY to program
  * @link_flags:
- *                  Flags to control autonegotiation.  Bit 0 is autonegotiation
- *                  enable/disable to maintain backware compatibility.
+ *                 Flags to control autonegotiation.  Bit 0 is autonegotiation
+ *                 enable/disable to maintain backware compatibility.
  * @link_info: Link speed to program. If the speed is zero and autonegotiation
- *                  is enabled, all possible negotiation speeds are advertised.
+ *                 is enabled, all possible negotiation speeds are advertised.
  *
  * Returns Zero on success, negative on failure
  */
@@ -111,10 +111,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
  * enumeration from the bootloader.
  *
  * @ipd_port: IPD input port associated with the port we want to get link
- *                 status for.
+ *                status for.
  *
  * Returns The ports link status. If the link isn't fully resolved, this must
- *         return zero.
+ *        return zero.
  */
 extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
 
@@ -134,10 +134,10 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
  *
  * @interface: Interface to probe
  * @supported_ports:
- *                  Number of ports Octeon supports.
+ *                 Number of ports Octeon supports.
  *
  * Returns Number of ports the actual board supports. Many times this will
- *         simple be "support_ports".
+ *        simple be "support_ports".
  */
 extern int __cvmx_helper_board_interface_probe(int interface,
                                               int supported_ports);
index 78295ba..4d7a3db 100644 (file)
@@ -98,9 +98,9 @@ extern int __cvmx_helper_rgmii_link_set(int ipd_port,
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 9a9b6c1..4debb1c 100644 (file)
@@ -92,9 +92,9 @@ extern int __cvmx_helper_sgmii_link_set(int ipd_port,
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 01c8ddd..f446f21 100644 (file)
@@ -57,11 +57,11 @@ extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
  *
  * @queue:  Input queue to setup RED on (0-7)
  * @pass_thresh:
- *               Packets will begin slowly dropping when there are less than
- *               this many packet buffers free in FPA 0.
+ *              Packets will begin slowly dropping when there are less than
+ *              this many packet buffers free in FPA 0.
  * @drop_thresh:
- *               All incoming packets will be dropped when there are less
- *               than this many free packet buffers in FPA 0.
+ *              All incoming packets will be dropped when there are less
+ *              than this many free packet buffers in FPA 0.
  * Returns Zero on success. Negative on failure
  */
 extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
@@ -71,11 +71,11 @@ extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
  * Setup Random Early Drop to automatically begin dropping packets.
  *
  * @pass_thresh:
- *               Packets will begin slowly dropping when there are less than
- *               this many packet buffers free in FPA 0.
+ *              Packets will begin slowly dropping when there are less than
+ *              this many packet buffers free in FPA 0.
  * @drop_thresh:
- *               All incoming packets will be dropped when there are less
- *               than this many free packet buffers in FPA 0.
+ *              All incoming packets will be dropped when there are less
+ *              than this many free packet buffers in FPA 0.
  * Returns Zero on success. Negative on failure
  */
 extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
@@ -84,7 +84,7 @@ extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
  * Get the version of the CVMX libraries.
  *
  * Returns Version string. Note this buffer is allocated statically
- *         and will be shared by all callers.
+ *        and will be shared by all callers.
  */
 extern const char *cvmx_helper_get_version(void);
 
index f6fbc4f..5e89ed7 100644 (file)
@@ -92,9 +92,9 @@ extern int __cvmx_helper_xaui_link_set(int ipd_port,
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 691c814..5a3090d 100644 (file)
@@ -93,12 +93,12 @@ extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
 /**
  * This function enables the IPD and also enables the packet interfaces.
  * The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD.  This should be called by the user program after any additional
+ * IPD.         This should be called by the user program after any additional
  * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
  * is not set in the executive-config.h file.
  *
  * Returns 0 on success
- *         -1 on failure
+ *        -1 on failure
  */
 extern int cvmx_helper_ipd_and_packet_input_enable(void);
 
@@ -128,7 +128,7 @@ extern int cvmx_helper_initialize_packet_io_local(void);
  * @interface: Which interface to return port count for.
  *
  * Returns Port count for interface
- *         -1 for uninitialized interface
+ *        -1 for uninitialized interface
  */
 extern int cvmx_helper_ports_on_interface(int interface);
 
@@ -150,7 +150,7 @@ extern int cvmx_helper_get_number_of_interfaces(void);
  * @interface: Interface to probe
  *
  * Returns Mode of the interface. Unknown or unsupported interfaces return
- *         DISABLED.
+ *        DISABLED.
  */
 extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
                                                                   interface);
@@ -214,9 +214,9 @@ extern int cvmx_helper_interface_enumerate(int interface);
  *
  * @ipd_port: IPD/PKO port to loopback.
  * @enable_internal:
- *                 Non zero if you want internal loopback
+ *                Non zero if you want internal loopback
  * @enable_external:
- *                 Non zero if you want external loopback
+ *                Non zero if you want external loopback
  *
  * Returns Zero on success, negative on failure.
  */
index 115a552..e13490e 100644 (file)
@@ -38,8 +38,8 @@
 #include <asm/octeon/cvmx-ipd-defs.h>
 
 enum cvmx_ipd_mode {
-   CVMX_IPD_OPC_MODE_STT = 0LL,   /* All blocks DRAM, not cached in L2 */
-   CVMX_IPD_OPC_MODE_STF = 1LL,   /* All bloccks into  L2 */
+   CVMX_IPD_OPC_MODE_STT = 0LL,          /* All blocks DRAM, not cached in L2 */
+   CVMX_IPD_OPC_MODE_STF = 1LL,          /* All bloccks into  L2 */
    CVMX_IPD_OPC_MODE_STF1_STT = 2LL,   /* 1st block L2, rest DRAM */
    CVMX_IPD_OPC_MODE_STF2_STT = 3LL    /* 1st, 2nd blocks L2, rest DRAM */
 };
@@ -60,17 +60,17 @@ typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
  *
  * @mbuff_size: Packets buffer size in 8 byte words
  * @first_mbuff_skip:
- *                   Number of 8 byte words to skip in the first buffer
+ *                  Number of 8 byte words to skip in the first buffer
  * @not_first_mbuff_skip:
- *                   Number of 8 byte words to skip in each following buffer
+ *                  Number of 8 byte words to skip in each following buffer
  * @first_back: Must be same as first_mbuff_skip / 128
  * @second_back:
- *                   Must be same as not_first_mbuff_skip / 128
+ *                  Must be same as not_first_mbuff_skip / 128
  * @wqe_fpa_pool:
- *                   FPA pool to get work entries from
+ *                  FPA pool to get work entries from
  * @cache_mode:
  * @back_pres_enable_flag:
- *                   Enable or disable port back pressure
+ *                  Enable or disable port back pressure
  */
 static inline void cvmx_ipd_config(uint64_t mbuff_size,
                                   uint64_t first_mbuff_skip,
index 2c8ff9e..11c0a8f 100644 (file)
 #ifndef __CVMX_L2C_H__
 #define __CVMX_L2C_H__
 
-#define CVMX_L2_ASSOC     cvmx_l2c_get_num_assoc()   /* Deprecated macro, use function */
+#define CVMX_L2_ASSOC    cvmx_l2c_get_num_assoc()   /* Deprecated macro, use function */
 #define CVMX_L2_SET_BITS  cvmx_l2c_get_set_bits()    /* Deprecated macro, use function */
-#define CVMX_L2_SETS      cvmx_l2c_get_num_sets()    /* Deprecated macro, use function */
+#define CVMX_L2_SETS     cvmx_l2c_get_num_sets()    /* Deprecated macro, use function */
 
 
 #define CVMX_L2C_IDX_ADDR_SHIFT 7  /* based on 128 byte cache line size */
-#define CVMX_L2C_IDX_MASK       (cvmx_l2c_get_num_sets() - 1)
+#define CVMX_L2C_IDX_MASK      (cvmx_l2c_get_num_sets() - 1)
 
 /* Defines for index aliasing computations */
 #define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
@@ -67,91 +67,91 @@ union cvmx_l2c_tag {
 
   /* L2C Performance Counter events. */
 enum cvmx_l2c_event {
-       CVMX_L2C_EVENT_CYCLES           =  0,
+       CVMX_L2C_EVENT_CYCLES           =  0,
        CVMX_L2C_EVENT_INSTRUCTION_MISS =  1,
-       CVMX_L2C_EVENT_INSTRUCTION_HIT  =  2,
-       CVMX_L2C_EVENT_DATA_MISS        =  3,
-       CVMX_L2C_EVENT_DATA_HIT         =  4,
-       CVMX_L2C_EVENT_MISS             =  5,
-       CVMX_L2C_EVENT_HIT              =  6,
-       CVMX_L2C_EVENT_VICTIM_HIT       =  7,
-       CVMX_L2C_EVENT_INDEX_CONFLICT   =  8,
-       CVMX_L2C_EVENT_TAG_PROBE        =  9,
-       CVMX_L2C_EVENT_TAG_UPDATE       = 10,
-       CVMX_L2C_EVENT_TAG_COMPLETE     = 11,
-       CVMX_L2C_EVENT_TAG_DIRTY        = 12,
-       CVMX_L2C_EVENT_DATA_STORE_NOP   = 13,
-       CVMX_L2C_EVENT_DATA_STORE_READ  = 14,
+       CVMX_L2C_EVENT_INSTRUCTION_HIT  =  2,
+       CVMX_L2C_EVENT_DATA_MISS        =  3,
+       CVMX_L2C_EVENT_DATA_HIT         =  4,
+       CVMX_L2C_EVENT_MISS             =  5,
+       CVMX_L2C_EVENT_HIT              =  6,
+       CVMX_L2C_EVENT_VICTIM_HIT       =  7,
+       CVMX_L2C_EVENT_INDEX_CONFLICT   =  8,
+       CVMX_L2C_EVENT_TAG_PROBE        =  9,
+       CVMX_L2C_EVENT_TAG_UPDATE       = 10,
+       CVMX_L2C_EVENT_TAG_COMPLETE     = 11,
+       CVMX_L2C_EVENT_TAG_DIRTY        = 12,
+       CVMX_L2C_EVENT_DATA_STORE_NOP   = 13,
+       CVMX_L2C_EVENT_DATA_STORE_READ  = 14,
        CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
-       CVMX_L2C_EVENT_FILL_DATA_VALID  = 16,
-       CVMX_L2C_EVENT_WRITE_REQUEST    = 17,
-       CVMX_L2C_EVENT_READ_REQUEST     = 18,
+       CVMX_L2C_EVENT_FILL_DATA_VALID  = 16,
+       CVMX_L2C_EVENT_WRITE_REQUEST    = 17,
+       CVMX_L2C_EVENT_READ_REQUEST     = 18,
        CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
-       CVMX_L2C_EVENT_XMC_NOP          = 20,
-       CVMX_L2C_EVENT_XMC_LDT          = 21,
-       CVMX_L2C_EVENT_XMC_LDI          = 22,
-       CVMX_L2C_EVENT_XMC_LDD          = 23,
-       CVMX_L2C_EVENT_XMC_STF          = 24,
-       CVMX_L2C_EVENT_XMC_STT          = 25,
-       CVMX_L2C_EVENT_XMC_STP          = 26,
-       CVMX_L2C_EVENT_XMC_STC          = 27,
-       CVMX_L2C_EVENT_XMC_DWB          = 28,
-       CVMX_L2C_EVENT_XMC_PL2          = 29,
-       CVMX_L2C_EVENT_XMC_PSL1         = 30,
-       CVMX_L2C_EVENT_XMC_IOBLD        = 31,
-       CVMX_L2C_EVENT_XMC_IOBST        = 32,
-       CVMX_L2C_EVENT_XMC_IOBDMA       = 33,
-       CVMX_L2C_EVENT_XMC_IOBRSP       = 34,
-       CVMX_L2C_EVENT_XMC_BUS_VALID    = 35,
-       CVMX_L2C_EVENT_XMC_MEM_DATA     = 36,
-       CVMX_L2C_EVENT_XMC_REFL_DATA    = 37,
-       CVMX_L2C_EVENT_XMC_IOBRSP_DATA  = 38,
-       CVMX_L2C_EVENT_RSC_NOP          = 39,
-       CVMX_L2C_EVENT_RSC_STDN         = 40,
-       CVMX_L2C_EVENT_RSC_FILL         = 41,
-       CVMX_L2C_EVENT_RSC_REFL         = 42,
-       CVMX_L2C_EVENT_RSC_STIN         = 43,
-       CVMX_L2C_EVENT_RSC_SCIN         = 44,
-       CVMX_L2C_EVENT_RSC_SCFL         = 45,
-       CVMX_L2C_EVENT_RSC_SCDN         = 46,
-       CVMX_L2C_EVENT_RSC_DATA_VALID   = 47,
-       CVMX_L2C_EVENT_RSC_VALID_FILL   = 48,
-       CVMX_L2C_EVENT_RSC_VALID_STRSP  = 49,
-       CVMX_L2C_EVENT_RSC_VALID_REFL   = 50,
-       CVMX_L2C_EVENT_LRF_REQ          = 51,
-       CVMX_L2C_EVENT_DT_RD_ALLOC      = 52,
-       CVMX_L2C_EVENT_DT_WR_INVAL      = 53,
+       CVMX_L2C_EVENT_XMC_NOP          = 20,
+       CVMX_L2C_EVENT_XMC_LDT          = 21,
+       CVMX_L2C_EVENT_XMC_LDI          = 22,
+       CVMX_L2C_EVENT_XMC_LDD          = 23,
+       CVMX_L2C_EVENT_XMC_STF          = 24,
+       CVMX_L2C_EVENT_XMC_STT          = 25,
+       CVMX_L2C_EVENT_XMC_STP          = 26,
+       CVMX_L2C_EVENT_XMC_STC          = 27,
+       CVMX_L2C_EVENT_XMC_DWB          = 28,
+       CVMX_L2C_EVENT_XMC_PL2          = 29,
+       CVMX_L2C_EVENT_XMC_PSL1         = 30,
+       CVMX_L2C_EVENT_XMC_IOBLD        = 31,
+       CVMX_L2C_EVENT_XMC_IOBST        = 32,
+       CVMX_L2C_EVENT_XMC_IOBDMA       = 33,
+       CVMX_L2C_EVENT_XMC_IOBRSP       = 34,
+       CVMX_L2C_EVENT_XMC_BUS_VALID    = 35,
+       CVMX_L2C_EVENT_XMC_MEM_DATA     = 36,
+       CVMX_L2C_EVENT_XMC_REFL_DATA    = 37,
+       CVMX_L2C_EVENT_XMC_IOBRSP_DATA  = 38,
+       CVMX_L2C_EVENT_RSC_NOP          = 39,
+       CVMX_L2C_EVENT_RSC_STDN         = 40,
+       CVMX_L2C_EVENT_RSC_FILL         = 41,
+       CVMX_L2C_EVENT_RSC_REFL         = 42,
+       CVMX_L2C_EVENT_RSC_STIN         = 43,
+       CVMX_L2C_EVENT_RSC_SCIN         = 44,
+       CVMX_L2C_EVENT_RSC_SCFL         = 45,
+       CVMX_L2C_EVENT_RSC_SCDN         = 46,
+       CVMX_L2C_EVENT_RSC_DATA_VALID   = 47,
+       CVMX_L2C_EVENT_RSC_VALID_FILL   = 48,
+       CVMX_L2C_EVENT_RSC_VALID_STRSP  = 49,
+       CVMX_L2C_EVENT_RSC_VALID_REFL   = 50,
+       CVMX_L2C_EVENT_LRF_REQ          = 51,
+       CVMX_L2C_EVENT_DT_RD_ALLOC      = 52,
+       CVMX_L2C_EVENT_DT_WR_INVAL      = 53,
        CVMX_L2C_EVENT_MAX
 };
 
 /* L2C Performance Counter events for Octeon2. */
 enum cvmx_l2c_tad_event {
-       CVMX_L2C_TAD_EVENT_NONE          = 0,
-       CVMX_L2C_TAD_EVENT_TAG_HIT       = 1,
-       CVMX_L2C_TAD_EVENT_TAG_MISS      = 2,
-       CVMX_L2C_TAD_EVENT_TAG_NOALLOC   = 3,
-       CVMX_L2C_TAD_EVENT_TAG_VICTIM    = 4,
-       CVMX_L2C_TAD_EVENT_SC_FAIL       = 5,
-       CVMX_L2C_TAD_EVENT_SC_PASS       = 6,
-       CVMX_L2C_TAD_EVENT_LFB_VALID     = 7,
-       CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB  = 8,
-       CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB  = 9,
-       CVMX_L2C_TAD_EVENT_QUAD0_INDEX   = 128,
-       CVMX_L2C_TAD_EVENT_QUAD0_READ    = 129,
-       CVMX_L2C_TAD_EVENT_QUAD0_BANK    = 130,
-       CVMX_L2C_TAD_EVENT_QUAD0_WDAT    = 131,
-       CVMX_L2C_TAD_EVENT_QUAD1_INDEX   = 144,
-       CVMX_L2C_TAD_EVENT_QUAD1_READ    = 145,
-       CVMX_L2C_TAD_EVENT_QUAD1_BANK    = 146,
-       CVMX_L2C_TAD_EVENT_QUAD1_WDAT    = 147,
-       CVMX_L2C_TAD_EVENT_QUAD2_INDEX   = 160,
-       CVMX_L2C_TAD_EVENT_QUAD2_READ    = 161,
-       CVMX_L2C_TAD_EVENT_QUAD2_BANK    = 162,
-       CVMX_L2C_TAD_EVENT_QUAD2_WDAT    = 163,
-       CVMX_L2C_TAD_EVENT_QUAD3_INDEX   = 176,
-       CVMX_L2C_TAD_EVENT_QUAD3_READ    = 177,
-       CVMX_L2C_TAD_EVENT_QUAD3_BANK    = 178,
-       CVMX_L2C_TAD_EVENT_QUAD3_WDAT    = 179,
+       CVMX_L2C_TAD_EVENT_NONE          = 0,
+       CVMX_L2C_TAD_EVENT_TAG_HIT       = 1,
+       CVMX_L2C_TAD_EVENT_TAG_MISS      = 2,
+       CVMX_L2C_TAD_EVENT_TAG_NOALLOC   = 3,
+       CVMX_L2C_TAD_EVENT_TAG_VICTIM    = 4,
+       CVMX_L2C_TAD_EVENT_SC_FAIL       = 5,
+       CVMX_L2C_TAD_EVENT_SC_PASS       = 6,
+       CVMX_L2C_TAD_EVENT_LFB_VALID     = 7,
+       CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB  = 8,
+       CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB  = 9,
+       CVMX_L2C_TAD_EVENT_QUAD0_INDEX   = 128,
+       CVMX_L2C_TAD_EVENT_QUAD0_READ    = 129,
+       CVMX_L2C_TAD_EVENT_QUAD0_BANK    = 130,
+       CVMX_L2C_TAD_EVENT_QUAD0_WDAT    = 131,
+       CVMX_L2C_TAD_EVENT_QUAD1_INDEX   = 144,
+       CVMX_L2C_TAD_EVENT_QUAD1_READ    = 145,
+       CVMX_L2C_TAD_EVENT_QUAD1_BANK    = 146,
+       CVMX_L2C_TAD_EVENT_QUAD1_WDAT    = 147,
+       CVMX_L2C_TAD_EVENT_QUAD2_INDEX   = 160,
+       CVMX_L2C_TAD_EVENT_QUAD2_READ    = 161,
+       CVMX_L2C_TAD_EVENT_QUAD2_BANK    = 162,
+       CVMX_L2C_TAD_EVENT_QUAD2_WDAT    = 163,
+       CVMX_L2C_TAD_EVENT_QUAD3_INDEX   = 176,
+       CVMX_L2C_TAD_EVENT_QUAD3_READ    = 177,
+       CVMX_L2C_TAD_EVENT_QUAD3_BANK    = 178,
+       CVMX_L2C_TAD_EVENT_QUAD3_WDAT    = 179,
        CVMX_L2C_TAD_EVENT_MAX
 };
 
@@ -159,10 +159,10 @@ enum cvmx_l2c_tad_event {
  * Configure one of the four L2 Cache performance counters to capture event
  * occurrences.
  *
- * @counter:        The counter to configure. Range 0..3.
- * @event:          The type of L2 Cache event occurrence to count.
+ * @counter:       The counter to configure. Range 0..3.
+ * @event:         The type of L2 Cache event occurrence to count.
  * @clear_on_read:  When asserted, any read of the performance counter
- *                       clears the counter.
+ *                      clears the counter.
  *
  * @note The routine does not clear the counter.
  */
@@ -184,8 +184,8 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter);
  * @core:  The core processor of interest.
  *
  * Returns    The mask specifying the partitioning. 0 bits in mask indicates
- *              the cache 'ways' that a core can evict from.
- *            -1 on error
+ *             the cache 'ways' that a core can evict from.
+ *           -1 on error
  */
 int cvmx_l2c_get_core_way_partition(uint32_t core);
 
@@ -194,16 +194,16 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
  *
  * @core: The core that the partitioning applies to.
  * @mask: The partitioning of the ways expressed as a binary
- *             mask. A 0 bit allows the core to evict cache lines from
- *             a way, while a 1 bit blocks the core from evicting any
- *             lines from that way. There must be at least one allowed
- *             way (0 bit) in the mask.
+ *            mask. A 0 bit allows the core to evict cache lines from
+ *            a way, while a 1 bit blocks the core from evicting any
+ *            lines from that way. There must be at least one allowed
+ *            way (0 bit) in the mask.
  *
 
  * @note If any ways are blocked for all cores and the HW blocks, then
- *       those ways will never have any cache lines evicted from them.
- *       All cores and the hardware blocks are free to read from all
- *       ways regardless of the partitioning.
+ *      those ways will never have any cache lines evicted from them.
+ *      All cores and the hardware blocks are free to read from all
+ *      ways regardless of the partitioning.
  */
 int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
 
@@ -211,8 +211,8 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
  * Return the L2 Cache way partitioning for the hw blocks.
  *
  * Returns    The mask specifying the reserved way. 0 bits in mask indicates
- *              the cache 'ways' that a core can evict from.
- *            -1 on error
+ *             the cache 'ways' that a core can evict from.
+ *           -1 on error
  */
 int cvmx_l2c_get_hw_way_partition(void);
 
@@ -220,16 +220,16 @@ int cvmx_l2c_get_hw_way_partition(void);
  * Partitions the L2 cache for the hardware blocks.
  *
  * @mask: The partitioning of the ways expressed as a binary
- *             mask. A 0 bit allows the core to evict cache lines from
- *             a way, while a 1 bit blocks the core from evicting any
- *             lines from that way. There must be at least one allowed
- *             way (0 bit) in the mask.
+ *            mask. A 0 bit allows the core to evict cache lines from
+ *            a way, while a 1 bit blocks the core from evicting any
+ *            lines from that way. There must be at least one allowed
+ *            way (0 bit) in the mask.
  *
 
  * @note If any ways are blocked for all cores and the HW blocks, then
- *       those ways will never have any cache lines evicted from them.
- *       All cores and the hardware blocks are free to read from all
- *       ways regardless of the partitioning.
+ *      those ways will never have any cache lines evicted from them.
+ *      All cores and the hardware blocks are free to read from all
+ *      ways regardless of the partitioning.
  */
 int cvmx_l2c_set_hw_way_partition(uint32_t mask);
 
@@ -240,7 +240,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask);
  * @addr:   physical address of line to lock
  *
  * Returns 0 on success,
- *         1 if line not locked.
+ *        1 if line not locked.
  */
 int cvmx_l2c_lock_line(uint64_t addr);
 
@@ -258,7 +258,7 @@ int cvmx_l2c_lock_line(uint64_t addr);
  * @len:    Length (in bytes) of region to lock
  *
  * Returns Number of requested lines that where not locked.
- *         0 on success (all locked)
+ *        0 on success (all locked)
  */
 int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
 
@@ -272,7 +272,7 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
  * @address: Physical address to unlock
  *
  * Returns 0: line not unlocked
- *         1: line unlocked
+ *        1: line unlocked
  */
 int cvmx_l2c_unlock_line(uint64_t address);
 
@@ -290,7 +290,7 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
  * Read the L2 controller tag for a given location in L2
  *
  * @association:
- *               Which association to read line from
+ *              Which association to read line from
  * @index:  Which way to read from.
  *
  * Returns l2c tag structure for line requested.
index 6f0cd18..9f6a4f3 100644 (file)
@@ -246,21 +246,21 @@ typedef union {
 } cvmx_mdio_phy_reg_mmd_address_data_t;
 
 /* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE    0
-#define MDIO_CLAUSE_22_READ     1
+#define MDIO_CLAUSE_22_WRITE   0
+#define MDIO_CLAUSE_22_READ    1
 
-#define MDIO_CLAUSE_45_ADDRESS  0
-#define MDIO_CLAUSE_45_WRITE    1
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE   1
 #define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ     3
+#define MDIO_CLAUSE_45_READ    3
 
 /* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD      1
-#define CVMX_MMD_DEVICE_WIS          2
-#define CVMX_MMD_DEVICE_PCS          3
-#define CVMX_MMD_DEVICE_PHY_XS       4
-#define CVMX_MMD_DEVICE_DTS_XS       5
-#define CVMX_MMD_DEVICE_TC           6
+#define CVMX_MMD_DEVICE_PMA_PMD             1
+#define CVMX_MMD_DEVICE_WIS         2
+#define CVMX_MMD_DEVICE_PCS         3
+#define CVMX_MMD_DEVICE_PHY_XS      4
+#define CVMX_MMD_DEVICE_DTS_XS      5
+#define CVMX_MMD_DEVICE_TC          6
 #define CVMX_MMD_DEVICE_CL22_EXT     29
 #define CVMX_MMD_DEVICE_VENDOR_1     30
 #define CVMX_MMD_DEVICE_VENDOR_2     31
@@ -291,7 +291,7 @@ static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
  * registers controlling auto negotiation.
  *
  * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *                 support multiple busses.
+ *                support multiple busses.
  * @phy_id:   The MII phy id
  * @location: Register location to read
  *
@@ -328,13 +328,13 @@ static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
  * registers controlling auto negotiation.
  *
  * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *                 support multiple busses.
+ *                support multiple busses.
  * @phy_id:   The MII phy id
  * @location: Register location to write
  * @val:      Value to write
  *
  * Returns -1 on error
- *         0 on success
+ *        0 on success
  */
 static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
 {
@@ -370,7 +370,7 @@ static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
  * read PHY registers controlling auto negotiation.
  *
  * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *                 support multiple busses.
+ *                support multiple busses.
  * @phy_id:   The MII phy id
  * @device:   MDIO Managable Device (MMD) id
  * @location: Register location to read
@@ -407,7 +407,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
        } while (smi_wr.s.pending && --timeout);
        if (timeout <= 0) {
                cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-                            "device %2d register %2d   TIME OUT(address)\n",
+                            "device %2d register %2d   TIME OUT(address)\n",
                     bus_id, phy_id, device, location);
                return -1;
        }
@@ -425,7 +425,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
 
        if (timeout <= 0) {
                cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-                            "device %2d register %2d   TIME OUT(data)\n",
+                            "device %2d register %2d   TIME OUT(data)\n",
                     bus_id, phy_id, device, location);
                return -1;
        }
@@ -434,7 +434,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
                return smi_rd.s.dat;
        else {
                cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
-                            "device %2d register %2d   INVALID READ\n",
+                            "device %2d register %2d   INVALID READ\n",
                     bus_id, phy_id, device, location);
                return -1;
        }
@@ -445,14 +445,14 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
  * write PHY registers controlling auto negotiation.
  *
  * @bus_id:   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- *                 support multiple busses.
+ *                support multiple busses.
  * @phy_id:   The MII phy id
  * @device:   MDIO Managable Device (MMD) id
  * @location: Register location to write
  * @val:      Value to write
  *
  * Returns -1 on error
- *         0 on success
+ *        0 on success
  */
 static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
                                     int location, int val)
index 05a917d..e975c7d 100644 (file)
@@ -44,7 +44,7 @@ enum cvmx_pip_port_parse_mode {
         */
        CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
        /*
-        * Input packets are assumed to be IP.  Results from non IP
+        * Input packets are assumed to be IP.  Results from non IP
         * packets is undefined. Pointers reference the beginning of
         * the IP header.
         */
index 9e739a6..a76fe5a 100644 (file)
@@ -37,8 +37,8 @@
 #include <asm/octeon/cvmx-fpa.h>
 #include <asm/octeon/cvmx-pip-defs.h>
 
-#define CVMX_PIP_NUM_INPUT_PORTS                40
-#define CVMX_PIP_NUM_WATCHERS                   4
+#define CVMX_PIP_NUM_INPUT_PORTS               40
+#define CVMX_PIP_NUM_WATCHERS                  4
 
 /*
  * Encodes the different error and exception codes
@@ -92,10 +92,10 @@ typedef enum {
 
 /**
  * NOTES
- *       late collision (data received before collision)
- *            late collisions cannot be detected by the receiver
- *            they would appear as JAM bits which would appear as bad FCS
- *            or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ *      late collision (data received before collision)
+ *           late collisions cannot be detected by the receiver
+ *           they would appear as JAM bits which would appear as bad FCS
+ *           or carrier extend error which is CVMX_PIP_EXTEND_ERR
  */
 typedef enum {
        /* No error */
@@ -122,11 +122,11 @@ typedef enum {
         * error)
         */
        CVMX_PIP_UNDER_FCS_ERR = 6ull,
-       /* RGM     7 = FCS error */
+       /* RGM     7 = FCS error */
        CVMX_PIP_GMX_FCS_ERR = 7ull,
        /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
        CVMX_PIP_UNDER_ERR = 8ull,
-       /* RGM     9 = Frame carrier extend error */
+       /* RGM     9 = Frame carrier extend error */
        CVMX_PIP_EXTEND_ERR = 9ull,
        /*
         * RGM 10 = length mismatch (len did not match len in L2
@@ -161,10 +161,10 @@ typedef enum {
        CVMX_PIP_PIP_L2_MAL_HDR = 18L
        /*
         * NOTES: xx = late collision (data received before collision)
-        *       late collisions cannot be detected by the receiver
-        *       they would appear as JAM bits which would appear as
-        *       bad FCS or carrier extend error which is
-        *       CVMX_PIP_EXTEND_ERR
+        *       late collisions cannot be detected by the receiver
+        *       they would appear as JAM bits which would appear as
+        *       bad FCS or carrier extend error which is
+        *       CVMX_PIP_EXTEND_ERR
         */
 } cvmx_pip_rcv_err_t;
 
@@ -192,13 +192,13 @@ typedef struct {
        /* Number of packets processed by PIP */
        uint32_t packets;
        /*
-        * Number of indentified L2 multicast packets.  Does not
+        * Number of indentified L2 multicast packets.  Does not
         * include broadcast packets.  Only includes packets whose
         * parse mode is SKIP_TO_L2
         */
        uint32_t multicast_packets;
        /*
-        * Number of indentified L2 broadcast packets.  Does not
+        * Number of indentified L2 broadcast packets.  Does not
         * include multicast packets.  Only includes packets whose
         * parse mode is SKIP_TO_L2
         */
@@ -287,7 +287,7 @@ typedef union {
  * @port_num: Port number to configure
  * @port_cfg: Port hardware configuration
  * @port_tag_cfg:
- *                 Port POW tagging configuration
+ *                Port POW tagging configuration
  */
 static inline void cvmx_pip_config_port(uint64_t port_num,
                                        union cvmx_pip_prt_cfgx port_cfg,
@@ -298,20 +298,20 @@ static inline void cvmx_pip_config_port(uint64_t port_num,
 }
 #if 0
 /**
- * @deprecated      This function is a thin wrapper around the Pass1 version
- *                  of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- *                  setting the group that is incompatible with this function,
- *                  the preferred upgrade path is to use the CSR directly.
+ * @deprecated     This function is a thin wrapper around the Pass1 version
+ *                 of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ *                 setting the group that is incompatible with this function,
+ *                 the preferred upgrade path is to use the CSR directly.
  *
  * Configure the global QoS packet watchers. Each watcher is
  * capable of matching a field in a packet to determine the
  * QoS queue for scheduling.
  *
- * @watcher:    Watcher number to configure (0 - 3).
+ * @watcher:   Watcher number to configure (0 - 3).
  * @match_type: Watcher match type
  * @match_value:
- *                   Value the watcher will match against
- * @qos:        QoS queue for packets matching this watcher
+ *                  Value the watcher will match against
+ * @qos:       QoS queue for packets matching this watcher
  */
 static inline void cvmx_pip_config_watcher(uint64_t watcher,
                                           cvmx_pip_qos_watch_types match_type,
@@ -331,7 +331,7 @@ static inline void cvmx_pip_config_watcher(uint64_t watcher,
  * Configure the VLAN priority to QoS queue mapping.
  *
  * @vlan_priority:
- *               VLAN priority (0-7)
+ *              VLAN priority (0-7)
  * @qos:    QoS queue for packets matching this watcher
  */
 static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
@@ -451,10 +451,10 @@ static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
  *
  * @interface: Interface to configure (0 or 1)
  * @invert_result:
- *                 Invert the result of the CRC
+ *                Invert the result of the CRC
  * @reflect:  Reflect
  * @initialization_vector:
- *                 CRC initialization vector
+ *                CRC initialization vector
  */
 static inline void cvmx_pip_config_crc(uint64_t interface,
                                       uint64_t invert_result, uint64_t reflect,
@@ -500,13 +500,13 @@ static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
  *
  * @mask_index: Which tag mask to modify (0..3)
  * @offset: Offset into the bitmask to set bits at. Use the GCC macro
- *          offsetof() to determine the offsets into packet headers.
- *          For example, offsetof(ethhdr, protocol) returns the offset
- *          of the ethernet protocol field.  The bitmask selects which
- *          bytes to include the the tag, with bit offset X selecting
- *          byte at offset X from the beginning of the packet data.
+ *         offsetof() to determine the offsets into packet headers.
+ *         For example, offsetof(ethhdr, protocol) returns the offset
+ *         of the ethernet protocol field.  The bitmask selects which
+ *         bytes to include the the tag, with bit offset X selecting
+ *         byte at offset X from the beginning of the packet data.
  * @len:    Number of bytes to include. Usually this is the sizeof()
- *          the field.
+ *         the field.
  */
 static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
                                         uint64_t len)
index c6daeed..f7d2a67 100644 (file)
 #define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
 
 #define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES      ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+#define CVMX_PKO_MAX_OUTPUT_QUEUES     ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
        OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
        OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
                (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
                OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS       40
+#define CVMX_PKO_NUM_OUTPUT_PORTS      40
 /* use this for queues that are not used */
 #define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY  9
-#define CVMX_PKO_ILLEGAL_QUEUE  0xFFFF
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
 #define CVMX_PKO_MAX_QUEUE_DEPTH 0
 
 typedef enum {
@@ -269,13 +269,13 @@ extern void cvmx_pko_shutdown(void);
 /**
  * Configure a output port and the associated queues for use.
  *
- * @port:       Port to configure.
+ * @port:      Port to configure.
  * @base_queue: First queue number to associate with this port.
  * @num_queues: Number of queues t oassociate with this port
- * @priority:   Array of priority levels for each queue. Values are
- *                   allowed to be 1-8. A value of 8 get 8 times the traffic
- *                   of a value of 1. There must be num_queues elements in the
- *                   array.
+ * @priority:  Array of priority levels for each queue. Values are
+ *                  allowed to be 1-8. A value of 8 get 8 times the traffic
+ *                  of a value of 1. There must be num_queues elements in the
+ *                  array.
  */
 extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
                                              uint64_t base_queue,
@@ -285,7 +285,7 @@ extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
 /**
  * Ring the packet output doorbell. This tells the packet
  * output hardware that "len" command words have been added
- * to its pending list.  This command includes the required
+ * to its pending list.         This command includes the required
  * CVMX_SYNCWS before the doorbell ring.
  *
  * @port:   Port the packet is for
@@ -322,18 +322,18 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
  * The use_locking parameter allows the caller to use three
  * possible locking modes.
  * - CVMX_PKO_LOCK_NONE
- *      - PKO doesn't do any locking. It is the responsibility
- *          of the application to make sure that no other core
- *          is accessing the same queue at the same time.
+ *     - PKO doesn't do any locking. It is the responsibility
+ *         of the application to make sure that no other core
+ *         is accessing the same queue at the same time.
  * - CVMX_PKO_LOCK_ATOMIC_TAG
- *      - PKO performs an atomic tagswitch to insure exclusive
- *          access to the output queue. This will maintain
- *          packet ordering on output.
+ *     - PKO performs an atomic tagswitch to insure exclusive
+ *         access to the output queue. This will maintain
+ *         packet ordering on output.
  * - CVMX_PKO_LOCK_CMD_QUEUE
- *      - PKO uses the common command queue locks to insure
- *          exclusive access to the output queue. This is a
- *          memory based ll/sc. This is the most portable
- *          locking mechanism.
+ *     - PKO uses the common command queue locks to insure
+ *         exclusive access to the output queue. This is a
+ *         memory based ll/sc. This is the most portable
+ *         locking mechanism.
  *
  * NOTE: If atomic locking is used, the POW entry CANNOT be
  * descheduled, as it does not contain a valid WQE pointer.
@@ -341,7 +341,7 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
  * @port:   Port to send it on
  * @queue:  Queue to use
  * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- *               CVMX_PKO_LOCK_CMD_QUEUE
+ *              CVMX_PKO_LOCK_CMD_QUEUE
  */
 
 static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
@@ -351,11 +351,11 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
                /*
                 * Must do a full switch here to handle all cases.  We
                 * use a fake WQE pointer, as the POW does not access
-                * this memory.  The WQE pointer and group are only
+                * this memory.  The WQE pointer and group are only
                 * used if this work is descheduled, which is not
                 * supported by the
                 * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
-                * combination.  Note that this is a special case in
+                * combination.  Note that this is a special case in
                 * which these fake values can be used - this is not a
                 * general technique.
                 */
@@ -377,10 +377,10 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
  * @port:   Port to send it on
  * @queue:  Queue to use
  * @pko_command:
- *               PKO HW command word
+ *              PKO HW command word
  * @packet: Packet to send
  * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- *               CVMX_PKO_LOCK_CMD_QUEUE
+ *              CVMX_PKO_LOCK_CMD_QUEUE
  *
  * Returns returns CVMX_PKO_SUCCESS on success, or error code on
  * failure of output
@@ -418,12 +418,12 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
  * @port:   Port to send it on
  * @queue:  Queue to use
  * @pko_command:
- *               PKO HW command word
+ *              PKO HW command word
  * @packet: Packet to send
  * @addr: Plysical address of a work queue entry or physical address
- *        to zero on complete.
+ *       to zero on complete.
  * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- *               CVMX_PKO_LOCK_CMD_QUEUE
+ *              CVMX_PKO_LOCK_CMD_QUEUE
  *
  * Returns returns CVMX_PKO_SUCCESS on success, or error code on
  * failure of output
@@ -588,7 +588,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
  * @port:      Port to rate limit
  * @packets_s: Maximum packet/sec
  * @burst:     Maximum number of packets to burst in a row before rate
- *                  limiting cuts in.
+ *                 limiting cuts in.
  *
  * Returns Zero on success, negative on failure
  */
@@ -601,7 +601,7 @@ extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
  * @port:   Port to rate limit
  * @bits_s: PKO rate limit in bits/sec
  * @burst:  Maximum number of bits to burst before rate
- *               limiting cuts in.
+ *              limiting cuts in.
  *
  * Returns Zero on success, negative on failure
  */
index 92742b2..4b4d0ec 100644 (file)
@@ -70,7 +70,7 @@ enum cvmx_pow_tag_type {
         * The work queue entry from the order - NEVER tag switch from
         * NULL to NULL
         */
-       CVMX_POW_TAG_TYPE_NULL      = 2L,
+       CVMX_POW_TAG_TYPE_NULL      = 2L,
        /* A tag switch to NULL, and there is no space reserved in POW
         * - NEVER tag switch to NULL_NULL
         * - NEVER tag switch from NULL_NULL
@@ -90,7 +90,7 @@ typedef enum {
 } cvmx_pow_wait_t;
 
 /**
- *  POW tag operations.  These are used in the data stored to the POW.
+ *  POW tag operations.         These are used in the data stored to the POW.
  */
 typedef enum {
        /*
@@ -341,14 +341,14 @@ typedef union {
                 * lists.  The two memory-input queue lists associated
                 * with each QOS level are:
                 *
-                * - qosgrp = 0, qosgrp = 8:      QOS0
-                * - qosgrp = 1, qosgrp = 9:      QOS1
-                * - qosgrp = 2, qosgrp = 10:     QOS2
-                * - qosgrp = 3, qosgrp = 11:     QOS3
-                * - qosgrp = 4, qosgrp = 12:     QOS4
-                * - qosgrp = 5, qosgrp = 13:     QOS5
-                * - qosgrp = 6, qosgrp = 14:     QOS6
-                * - qosgrp = 7, qosgrp = 15:     QOS7
+                * - qosgrp = 0, qosgrp = 8:      QOS0
+                * - qosgrp = 1, qosgrp = 9:      QOS1
+                * - qosgrp = 2, qosgrp = 10:     QOS2
+                * - qosgrp = 3, qosgrp = 11:     QOS3
+                * - qosgrp = 4, qosgrp = 12:     QOS4
+                * - qosgrp = 5, qosgrp = 13:     QOS5
+                * - qosgrp = 6, qosgrp = 14:     QOS6
+                * - qosgrp = 7, qosgrp = 15:     QOS7
                 */
                uint64_t qosgrp:4;
                /*
@@ -942,11 +942,11 @@ typedef union {
  *  operations.
  *
  *  NOTE: The following is the behavior of the pending switch bit at the PP
- *       for POW stores (i.e. when did<7:3> == 0xc)
- *     - did<2:0> == 0      => pending switch bit is set
- *     - did<2:0> == 1      => no affect on the pending switch bit
- *     - did<2:0> == 3      => pending switch bit is cleared
- *     - did<2:0> == 7      => no affect on the pending switch bit
+ *      for POW stores (i.e. when did<7:3> == 0xc)
+ *     - did<2:0> == 0     => pending switch bit is set
+ *     - did<2:0> == 1     => no affect on the pending switch bit
+ *     - did<2:0> == 3     => pending switch bit is cleared
+ *     - did<2:0> == 7     => no affect on the pending switch bit
  *     - did<2:0> == others => must not be used
  *     - No other loads/stores have an affect on the pending switch bit
  *     - The switch bus from POW can clear the pending switch bit
@@ -1053,7 +1053,7 @@ static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
 }
 
 #ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest)         CVMX_RDHWR(dest, 30)
+#define CVMX_MF_CHORD(dest)        CVMX_RDHWR(dest, 30)
 #endif
 
 /**
@@ -1097,7 +1097,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
  * so the caller must ensure that there is not a pending tag switch.
  *
  * @wait:   When set, call stalls until work becomes avaiable, or times out.
- *               If not set, returns immediately.
+ *              If not set, returns immediately.
  *
  * Returns Returns the WQE pointer from POW. Returns NULL if no work
  * was available.
@@ -1131,7 +1131,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
  * requesting the new work.
  *
  * @wait:   When set, call stalls until work becomes avaiable, or times out.
- *               If not set, returns immediately.
+ *              If not set, returns immediately.
  *
  * Returns Returns the WQE pointer from POW. Returns NULL if no work
  * was available.
@@ -1148,7 +1148,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
 }
 
 /**
- * Synchronous null_rd request.  Requests a switch out of NULL_NULL POW state.
+ * Synchronous null_rd request.         Requests a switch out of NULL_NULL POW state.
  * This function waits for any previous tag switch to complete before
  * requesting the null_rd.
  *
@@ -1183,11 +1183,11 @@ static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
  * there is not a pending tag switch.
  *
  * @scr_addr: Scratch memory address that response will be returned
- *            to, which is either a valid WQE, or a response with the
- *            invalid bit set.  Byte address, must be 8 byte aligned.
+ *           to, which is either a valid WQE, or a response with the
+ *           invalid bit set.  Byte address, must be 8 byte aligned.
  *
  * @wait: 1 to cause response to wait for work to become available (or
- *        timeout), 0 to cause response to return immediately
+ *       timeout), 0 to cause response to return immediately
  */
 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
                                                       cvmx_pow_wait_t wait)
@@ -1212,11 +1212,11 @@ static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
  * tag switch to complete before requesting the new work.
  *
  * @scr_addr: Scratch memory address that response will be returned
- *            to, which is either a valid WQE, or a response with the
- *            invalid bit set.  Byte address, must be 8 byte aligned.
+ *           to, which is either a valid WQE, or a response with the
+ *           invalid bit set.  Byte address, must be 8 byte aligned.
  *
  * @wait: 1 to cause response to wait for work to become available (or
- *                  timeout), 0 to cause response to return immediately
+ *                 timeout), 0 to cause response to return immediately
  */
 static inline void cvmx_pow_work_request_async(int scr_addr,
                                               cvmx_pow_wait_t wait)
@@ -1234,7 +1234,7 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
  * to wait for the response.
  *
  * @scr_addr: Scratch memory address to get result from Byte address,
- *            must be 8 byte aligned.
+ *           must be 8 byte aligned.
  *
  * Returns Returns the WQE from the scratch register, or NULL if no
  * work was available.
@@ -1260,7 +1260,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
  * @wqe_ptr: pointer to a work queue entry returned by the POW
  *
  * Returns 0 if pointer is valid
- *         1 if invalid (no work was returned)
+ *        1 if invalid (no work was returned)
  */
 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
 {
@@ -1314,7 +1314,7 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
        /*
         * Note that WQE in DRAM is not updated here, as the POW does
         * not read from DRAM once the WQE is in flight.  See hardware
-        * manual for complete details.  It is the application's
+        * manual for complete details.  It is the application's
         * responsibility to keep track of the current tag value if
         * that is important.
         */
@@ -1361,7 +1361,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
        /*
         * Note that WQE in DRAM is not updated here, as the POW does
         * not read from DRAM once the WQE is in flight.  See hardware
-        * manual for complete details.  It is the application's
+        * manual for complete details.  It is the application's
         * responsibility to keep track of the current tag value if
         * that is important.
         */
@@ -1390,7 +1390,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
  * previous tag switch has completed.
  *
  * @wqp:      pointer to work queue entry to submit.  This entry is
- *            updated to match the other parameters
+ *           updated to match the other parameters
  * @tag:      tag value to be assigned to work queue entry
  * @tag_type: type of tag
  * @group:    group value for the work queue entry.
@@ -1429,7 +1429,7 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
        /*
         * Note that WQE in DRAM is not updated here, as the POW does
         * not read from DRAM once the WQE is in flight.  See hardware
-        * manual for complete details.  It is the application's
+        * manual for complete details.  It is the application's
         * responsibility to keep track of the current tag value if
         * that is important.
         */
@@ -1468,10 +1468,10 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
  * before requesting the tag switch.
  *
  * @wqp:      pointer to work queue entry to submit.  This entry is updated
- *            to match the other parameters
+ *           to match the other parameters
  * @tag:      tag value to be assigned to work queue entry
  * @tag_type: type of tag
- * @group:      group value for the work queue entry.
+ * @group:     group value for the work queue entry.
  */
 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
                                        enum cvmx_pow_tag_type tag_type,
@@ -1560,7 +1560,7 @@ static inline void cvmx_pow_tag_sw_null(void)
  * unrelated to the tag that the core currently holds.
  *
  * @wqp:      pointer to work queue entry to submit.  This entry is
- *            updated to match the other parameters
+ *           updated to match the other parameters
  * @tag:      tag value to be assigned to work queue entry
  * @tag_type: type of tag
  * @qos:      Input queue to add to.
@@ -1592,7 +1592,7 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
        ptr.sio.offset = cvmx_ptr_to_phys(wqp);
 
        /*
-        * SYNC write to memory before the work submit.  This is
+        * SYNC write to memory before the work submit.  This is
         * necessary as POW may read values from DRAM at this time.
         */
        CVMX_SYNCWS;
@@ -1604,11 +1604,11 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
  * indicates which groups each core will accept work from. There are
  * 16 groups.
  *
- * @core_num:   core to apply mask to
+ * @core_num:  core to apply mask to
  * @mask:   Group mask. There are 16 groups, so only bits 0-15 are valid,
- *               representing groups 0-15.
- *               Each 1 bit in the mask enables the core to accept work from
- *               the corresponding group.
+ *              representing groups 0-15.
+ *              Each 1 bit in the mask enables the core to accept work from
+ *              the corresponding group.
  */
 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
 {
@@ -1623,14 +1623,14 @@ static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
  * This function sets POW static priorities for a core. Each input queue has
  * an associated priority value.
  *
- * @core_num:   core to apply priorities to
- * @priority:   Vector of 8 priorities, one per POW Input Queue (0-7).
- *                   Highest priority is 0 and lowest is 7. A priority value
- *                   of 0xF instructs POW to skip the Input Queue when
- *                   scheduling to this specific core.
- *                   NOTE: priorities should not have gaps in values, meaning
- *                         {0,1,1,1,1,1,1,1} is a valid configuration while
- *                         {0,2,2,2,2,2,2,2} is not.
+ * @core_num:  core to apply priorities to
+ * @priority:  Vector of 8 priorities, one per POW Input Queue (0-7).
+ *                  Highest priority is 0 and lowest is 7. A priority value
+ *                  of 0xF instructs POW to skip the Input Queue when
+ *                  scheduling to this specific core.
+ *                  NOTE: priorities should not have gaps in values, meaning
+ *                        {0,1,1,1,1,1,1,1} is a valid configuration while
+ *                        {0,2,2,2,2,2,2,2} is not.
  */
 static inline void cvmx_pow_set_priority(uint64_t core_num,
                                         const uint8_t priority[])
@@ -1708,8 +1708,8 @@ static inline void cvmx_pow_set_priority(uint64_t core_num,
  * @tag_type: New tag type
  * @group:    New group value
  * @no_sched: Control whether this work queue entry will be rescheduled.
- *                 - 1 : don't schedule this work
- *                 - 0 : allow this work to be scheduled.
+ *                - 1 : don't schedule this work
+ *                - 0 : allow this work to be scheduled.
  */
 static inline void cvmx_pow_tag_sw_desched_nocheck(
        uint32_t tag,
@@ -1794,8 +1794,8 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
  * @tag_type: New tag type
  * @group:    New group value
  * @no_sched: Control whether this work queue entry will be rescheduled.
- *                 - 1 : don't schedule this work
- *                 - 0 : allow this work to be scheduled.
+ *                - 1 : don't schedule this work
+ *                - 0 : allow this work to be scheduled.
  */
 static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
                                           enum cvmx_pow_tag_type tag_type,
@@ -1819,8 +1819,8 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
  * Descchedules the current work queue entry.
  *
  * @no_sched: no schedule flag value to be set on the work queue
- *            entry.  If this is set the entry will not be
- *            rescheduled.
+ *           entry.  If this is set the entry will not be
+ *           rescheduled.
  */
 static inline void cvmx_pow_desched(uint64_t no_sched)
 {
@@ -1863,7 +1863,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
 *****************************************************/
 
 /*
- * Number of bits of the tag used by software.  The SW bits are always
+ * Number of bits of the tag used by software. The SW bits are always
  * a contiguous block of the high starting at bit 31.  The hardware
  * bits are always the low bits.  By default, the top 8 bits of the
  * tag are reserved for software, and the low 24 are set by the IPD
@@ -1890,7 +1890,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
  * are defined here.
  */
 /* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK  0xFFFF
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
 #define CVMX_TAG_SUBGROUP_SHIFT 16
 #define CVMX_TAG_SUBGROUP_PKO  0x1
 
@@ -1905,12 +1905,12 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
  * This function creates a 32 bit tag value from the two values provided.
  *
  * @sw_bits: The upper bits (number depends on configuration) are set
- *           to this value.  The remainder of bits are set by the
- *           hw_bits parameter.
+ *          to this value.  The remainder of bits are set by the
+ *          hw_bits parameter.
  *
  * @hw_bits: The lower bits (number depends on configuration) are set
- *           to this value.  The remainder of bits are set by the
- *           sw_bits parameter.
+ *          to this value.  The remainder of bits are set by the
+ *          sw_bits parameter.
  *
  * Returns 32 bit value of the combined hw and sw bits.
  */
@@ -1957,7 +1957,7 @@ static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
  *
  * @buffer: Buffer to store capture into
  * @buffer_size:
- *               The size of the supplied buffer
+ *              The size of the supplied buffer
  *
  * Returns Zero on success, negative on failure
  */
@@ -1968,7 +1968,7 @@ extern int cvmx_pow_capture(void *buffer, int buffer_size);
  *
  * @buffer: POW capture from cvmx_pow_capture()
  * @buffer_size:
- *               Size of the buffer
+ *              Size of the buffer
  */
 extern void cvmx_pow_display(void *buffer, int buffer_size);
 
index 96b70cf..8d21cc5 100644 (file)
@@ -39,7 +39,7 @@
  * Note: This define must be a long, not a long long in order to
  * compile without warnings for both 32bit and 64bit.
  */
-#define CVMX_SCRATCH_BASE       (-32768l)      /* 0xffffffffffff8000 */
+#define CVMX_SCRATCH_BASE      (-32768l)       /* 0xffffffffffff8000 */
 
 /**
  * Reads an 8 bit value from the processor local scratchpad memory.
index 3bf53b5..d5038cc 100644 (file)
@@ -84,11 +84,11 @@ static inline int cvmx_spi_is_spi_interface(int interface)
  * Initialize and start the SPI interface.
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  * @num_ports: Number of SPI ports to configure
  *
@@ -102,11 +102,11 @@ extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
  * with its corespondant system.
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  * Returns Zero on success, negative of failure.
  */
@@ -154,7 +154,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
 /**
  * Get current SPI4 initialization callbacks
  *
- * @callbacks:  Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
  *
  * Returns Pointer to cvmx_spi_callbacks_t structure.
  */
@@ -171,11 +171,11 @@ extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
  * Callback to perform SPI4 reset
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  *
  * Returns Zero on success, non-zero error code on failure (will cause
  * SPI initialization to abort)
@@ -187,11 +187,11 @@ extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
  * detection
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @num_ports: Number of ports to configure on SPI
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -204,11 +204,11 @@ extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
  * Callback to perform clock detection
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for clock synchronization in seconds
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -221,11 +221,11 @@ extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
  * Callback to perform link training
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for link to be trained (in seconds)
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -238,11 +238,11 @@ extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
  * Callback to perform calendar data synchronization
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  * @timeout:   Timeout to wait for calendar data in seconds
  *
  * Returns Zero on success, non-zero error code on failure (will cause
@@ -255,11 +255,11 @@ extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
  * Callback to handle interface up
  *
  * @interface: The identifier of the packet interface to configure and
- *                  use as a SPI interface.
+ *                 use as a SPI interface.
  * @mode:      The operating mode for the SPI interface. The interface
- *                  can operate as a full duplex (both Tx and Rx data paths
- *                  active) or as a halfplex (either the Tx data path is
- *                  active or the Rx data path is active, but not both).
+ *                 can operate as a full duplex (both Tx and Rx data paths
+ *                 active) or as a halfplex (either the Tx data path is
+ *                 active or the Rx data path is active, but not both).
  *
  * Returns Zero on success, non-zero error code on failure (will cause
  * SPI initialization to abort)
index a672abb..4f09cff 100644 (file)
@@ -26,7 +26,7 @@
  ***********************license end**************************************/
 
 /**
- * Implementation of spinlocks for Octeon CVMX.  Although similar in
+ * Implementation of spinlocks for Octeon CVMX.         Although similar in
  * function to Linux kernel spinlocks, they are not compatible.
  * Octeon CVMX spinlocks are only used to synchronize with the boot
  * monitor and other non-Linux programs running in the system.
@@ -50,8 +50,8 @@ typedef struct {
 } cvmx_spinlock_t;
 
 /* note - macros not expanded in inline ASM, so values hardcoded */
-#define  CVMX_SPINLOCK_UNLOCKED_VAL  0
-#define  CVMX_SPINLOCK_LOCKED_VAL    1
+#define         CVMX_SPINLOCK_UNLOCKED_VAL  0
+#define         CVMX_SPINLOCK_LOCKED_VAL    1
 
 #define CVMX_SPINLOCK_UNLOCKED_INITIALIZER  {CVMX_SPINLOCK_UNLOCKED_VAL}
 
@@ -96,7 +96,7 @@ static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
  * @lock:   pointer to lock structure
  *
  * Returns 0: lock successfully taken
- *         1: lock not taken, held by someone else
+ *        1: lock not taken, held by someone else
  * These return values match the Linux semantics.
  */
 
@@ -104,16 +104,16 @@ static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
 {
        unsigned int tmp;
 
-       __asm__ __volatile__(".set noreorder         \n"
+       __asm__ __volatile__(".set noreorder         \n"
                             "1: ll   %[tmp], %[val] \n"
                        /* if lock held, fail immediately */
-                            "   bnez %[tmp], 2f     \n"
-                            "   li   %[tmp], 1      \n"
-                            "   sc   %[tmp], %[val] \n"
-                            "   beqz %[tmp], 1b     \n"
-                            "   li   %[tmp], 0      \n"
-                            "2:                     \n"
-                            ".set reorder           \n" :
+                            "   bnez %[tmp], 2f     \n"
+                            "   li   %[tmp], 1      \n"
+                            "   sc   %[tmp], %[val] \n"
+                            "   beqz %[tmp], 1b     \n"
+                            "   li   %[tmp], 0      \n"
+                            "2:                     \n"
+                            ".set reorder           \n" :
                        [val] "+m"(lock->value), [tmp] "=&r"(tmp)
                             : : "memory");
 
@@ -129,14 +129,14 @@ static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
 {
        unsigned int tmp;
 
-       __asm__ __volatile__(".set noreorder         \n"
+       __asm__ __volatile__(".set noreorder         \n"
                             "1: ll   %[tmp], %[val]  \n"
-                            "   bnez %[tmp], 1b     \n"
-                            "   li   %[tmp], 1      \n"
-                            "   sc   %[tmp], %[val] \n"
-                            "   beqz %[tmp], 1b     \n"
-                            "   nop                \n"
-                            ".set reorder           \n" :
+                            "   bnez %[tmp], 1b     \n"
+                            "   li   %[tmp], 1      \n"
+                            "   sc   %[tmp], %[val] \n"
+                            "   beqz %[tmp], 1b     \n"
+                            "   nop                \n"
+                            ".set reorder           \n" :
                        [val] "+m"(lock->value), [tmp] "=&r"(tmp)
                        : : "memory");
 
@@ -163,17 +163,17 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
        unsigned int tmp;
        unsigned int sav;
 
-       __asm__ __volatile__(".set noreorder         \n"
-                            ".set noat              \n"
+       __asm__ __volatile__(".set noreorder         \n"
+                            ".set noat              \n"
                             "1: ll    %[tmp], %[val]  \n"
-                            "   bbit1 %[tmp], 31, 1b    \n"
-                            "   li    $at, 1      \n"
-                            "   ins   %[tmp], $at, 31, 1  \n"
-                            "   sc    %[tmp], %[val] \n"
-                            "   beqz  %[tmp], 1b     \n"
-                            "   nop                \n"
-                            ".set at              \n"
-                            ".set reorder           \n" :
+                            "   bbit1 %[tmp], 31, 1b    \n"
+                            "   li    $at, 1      \n"
+                            "   ins   %[tmp], $at, 31, 1  \n"
+                            "   sc    %[tmp], %[val] \n"
+                            "   beqz  %[tmp], 1b     \n"
+                            "   nop                \n"
+                            ".set at              \n"
+                            ".set reorder           \n" :
                        [val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
                             : : "memory");
 
@@ -187,7 +187,7 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
  *
  * @word:  word to lock bit 31 of
  * Returns 0: lock successfully taken
- *         1: lock not taken, held by someone else
+ *        1: lock not taken, held by someone else
  * These return values match the Linux semantics.
  */
 static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
@@ -198,15 +198,15 @@ static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
                             ".set noat\n"
                             "1: ll    %[tmp], %[val] \n"
                        /* if lock held, fail immediately */
-                            "   bbit1 %[tmp], 31, 2f     \n"
-                            "   li    $at, 1      \n"
-                            "   ins   %[tmp], $at, 31, 1  \n"
-                            "   sc    %[tmp], %[val] \n"
-                            "   beqz  %[tmp], 1b     \n"
-                            "   li    %[tmp], 0      \n"
-                            "2:                     \n"
-                            ".set at              \n"
-                            ".set reorder           \n" :
+                            "   bbit1 %[tmp], 31, 2f     \n"
+                            "   li    $at, 1      \n"
+                            "   ins   %[tmp], $at, 31, 1  \n"
+                            "   sc    %[tmp], %[val] \n"
+                            "   beqz  %[tmp], 1b     \n"
+                            "   li    %[tmp], 0      \n"
+                            "2:                     \n"
+                            ".set at              \n"
+                            ".set reorder           \n" :
                        [val] "+m"(*word), [tmp] "=&r"(tmp)
                        : : "memory");
 
index 61dd574..2131197 100644 (file)
@@ -85,7 +85,7 @@ struct cvmx_sysinfo {
        char board_serial_number[OCTEON_SERIAL_LEN];
        /*
         * Several boards support compact flash on the Octeon boot
-        * bus.  The CF memory spaces may be mapped to different
+        * bus.  The CF memory spaces may be mapped to different
         * addresses on different boards.  These values will be 0 if
         * CF is not present.  Note that these addresses are physical
         * addresses, and it is up to the application to use the
@@ -123,25 +123,25 @@ extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
 
 /**
  * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.)  to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
  * are required to use simple executive files directly.
  *
  * Locking (if required) must be handled outside of this
  * function
  *
  * @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- *                   (bootmem descriptor) @board_type: Octeon board
- *                   type enumeration
+ *                  (bootmem descriptor) @board_type: Octeon board
+ *                  type enumeration
  *
  * @board_rev_major:
- *                   Board major revision
+ *                  Board major revision
  * @board_rev_minor:
- *                   Board minor revision
+ *                  Board minor revision
  * @cpu_clock_hz:
- *                   CPU clock freqency in hertz
+ *                  CPU clock freqency in hertz
  *
  * Returns 0: Failure
- *         1: success
+ *        1: success
  */
 extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
                                           uint16_t board_type,
index df76238..aa0d3d0 100644 (file)
@@ -101,23 +101,23 @@ typedef union {
                 * - 1 = Malformed L4
                 * - 2 = L4 Checksum Error: the L4 checksum value is
                 * - 3 = UDP Length Error: The UDP length field would
-                *       make the UDP data longer than what remains in
-                *       the IP packet (as defined by the IP header
-                *       length field).
+                *       make the UDP data longer than what remains in
+                *       the IP packet (as defined by the IP header
+                *       length field).
                 * - 4 = Bad L4 Port: either the source or destination
-                *       TCP/UDP port is 0.
+                *       TCP/UDP port is 0.
                 * - 8 = TCP FIN Only: the packet is TCP and only the
-                *       FIN flag set.
+                *       FIN flag set.
                 * - 9 = TCP No Flags: the packet is TCP and no flags
-                *       are set.
+                *       are set.
                 * - 10 = TCP FIN RST: the packet is TCP and both FIN
-                *        and RST are set.
+                *        and RST are set.
                 * - 11 = TCP SYN URG: the packet is TCP and both SYN
-                *        and URG are set.
+                *        and URG are set.
                 * - 12 = TCP SYN RST: the packet is TCP and both SYN
-                *        and RST are set.
+                *        and RST are set.
                 * - 13 = TCP SYN FIN: the packet is TCP and both SYN
-                *        and FIN are set.
+                *        and FIN are set.
                 */
                uint64_t L4_error:1;
                /* set if the packet is a fragment */
@@ -127,16 +127,16 @@ typedef union {
                 * failure indicated in err_code below, decode:
                 *
                 * - 1 = Not IP: the IP version field is neither 4 nor
-                *       6.
+                *       6.
                 * - 2 = IPv4 Header Checksum Error: the IPv4 header
-                *       has a checksum violation.
+                *       has a checksum violation.
                 * - 3 = IP Malformed Header: the packet is not long
-                *       enough to contain the IP header.
+                *       enough to contain the IP header.
                 * - 4 = IP Malformed: the packet is not long enough
                 *       to contain the bytes indicated by the IP
                 *       header. Pad is allowed.
                 * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
-                *       Hop Count field are zero.
+                *       Hop Count field are zero.
                 * - 6 = IP Options
                 */
                uint64_t IP_exc:1;
@@ -243,46 +243,46 @@ typedef union {
                 * decode:
                 *
                 * - 1 = partial error: a packet was partially
-                *       received, but internal buffering / bandwidth
-                *       was not adequate to receive the entire
-                *       packet.
+                *       received, but internal buffering / bandwidth
+                *       was not adequate to receive the entire
+                *       packet.
                 * - 2 = jabber error: the RGMII packet was too large
-                *       and is truncated.
+                *       and is truncated.
                 * - 3 = overrun error: the RGMII packet is longer
-                *       than allowed and had an FCS error.
+                *       than allowed and had an FCS error.
                 * - 4 = oversize error: the RGMII packet is longer
-                *       than allowed.
+                *       than allowed.
                 * - 5 = alignment error: the RGMII packet is not an
-                *       integer number of bytes
-                *       and had an FCS error (100M and 10M only).
+                *       integer number of bytes
+                *       and had an FCS error (100M and 10M only).
                 * - 6 = fragment error: the RGMII packet is shorter
-                *       than allowed and had an FCS error.
+                *       than allowed and had an FCS error.
                 * - 7 = GMX FCS error: the RGMII packet had an FCS
-                *       error.
+                *       error.
                 * - 8 = undersize error: the RGMII packet is shorter
-                *       than allowed.
+                *       than allowed.
                 * - 9 = extend error: the RGMII packet had an extend
-                *       error.
+                *       error.
                 * - 10 = length mismatch error: the RGMII packet had
-                *        a length that did not match the length field
-                *        in the L2 HDR.
+                *        a length that did not match the length field
+                *        in the L2 HDR.
                 * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
-                *        packet had one or more data reception errors
-                *        (RXERR) or the SPI4 packet had one or more
-                *        DIP4 errors.
+                *        packet had one or more data reception errors
+                *        (RXERR) or the SPI4 packet had one or more
+                *        DIP4 errors.
                 * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
-                *        packet was not large enough to cover the
-                *        skipped bytes or the SPI4 packet was
-                *        terminated with an About EOPS.
+                *        packet was not large enough to cover the
+                *        skipped bytes or the SPI4 packet was
+                *        terminated with an About EOPS.
                 * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
-                *        RGMII packet had a studder error (data not
-                *        repeated - 10/100M only) or the SPI4 packet
-                *        was sent to an NXA.
+                *        RGMII packet had a studder error (data not
+                *        repeated - 10/100M only) or the SPI4 packet
+                *        was sent to an NXA.
                 * - 16 = FCS error: a SPI4.2 packet had an FCS error.
                 * - 17 = Skip error: a packet was not large enough to
-                *        cover the skipped bytes.
+                *        cover the skipped bytes.
                 * - 18 = L2 header malformed: the packet is not long
-                *        enough to contain the L2.
+                *        enough to contain the L2.
                 */
 
                uint64_t rcv_error:1;
@@ -309,7 +309,7 @@ typedef struct {
 
     /*****************************************************************
      * WORD 0
-     *  HW WRITE: the following 64 bits are filled by HW when a packet arrives
+     * HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
 
     /**
@@ -323,14 +323,14 @@ typedef struct {
     /**
      * Next pointer used by hardware for list maintenance.
      * May be written/read by HW before the work queue
-     *           entry is scheduled to a PP
+     *          entry is scheduled to a PP
      * (Only 36 bits used in Octeon 1)
      */
        uint64_t next_ptr:40;
 
     /*****************************************************************
      * WORD 1
-     *  HW WRITE: the following 64 bits are filled by HW when a packet arrives
+     * HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
 
     /**
@@ -362,8 +362,8 @@ typedef struct {
 
     /**
      * WORD 2 HW WRITE: the following 64-bits are filled in by
-     *   hardware when a packet arrives This indicates a variety of
-     *   status and error conditions.
+     *  hardware when a packet arrives This indicates a variety of
+     *  status and error conditions.
      */
        cvmx_pip_wqe_word2 word2;
 
@@ -373,15 +373,15 @@ typedef struct {
        union cvmx_buf_ptr packet_ptr;
 
     /**
-     *   HW WRITE: octeon will fill in a programmable amount from the
-     *             packet, up to (at most, but perhaps less) the amount
-     *             needed to fill the work queue entry to 128 bytes
+     *  HW WRITE: octeon will fill in a programmable amount from the
+     *            packet, up to (at most, but perhaps less) the amount
+     *            needed to fill the work queue entry to 128 bytes
      *
-     *   If the packet is recognized to be IP, the hardware starts
-     *   (except that the IPv4 header is padded for appropriate
-     *   alignment) writing here where the IP header starts.  If the
-     *   packet is not recognized to be IP, the hardware starts
-     *   writing the beginning of the packet here.
+     *  If the packet is recognized to be IP, the hardware starts
+     *  (except that the IPv4 header is padded for appropriate
+     *  alignment) writing here where the IP header starts.  If the
+     *  packet is not recognized to be IP, the hardware starts
+     *  writing the beginning of the packet here.
      */
        uint8_t packet_data[96];
 
index db58bea..f991e77 100644 (file)
@@ -76,14 +76,14 @@ enum cvmx_mips_space {
 #endif
 
 #if CVMX_ENABLE_DEBUG_PRINTS
-#define cvmx_dprintf        printk
+#define cvmx_dprintf       printk
 #else
 #define cvmx_dprintf(...)   {}
 #endif
 
-#define CVMX_MAX_CORES          (16)
-#define CVMX_CACHE_LINE_SIZE    (128)  /* In bytes */
-#define CVMX_CACHE_LINE_MASK    (CVMX_CACHE_LINE_SIZE - 1)     /* In bytes */
+#define CVMX_MAX_CORES         (16)
+#define CVMX_CACHE_LINE_SIZE   (128)   /* In bytes */
+#define CVMX_CACHE_LINE_MASK   (CVMX_CACHE_LINE_SIZE - 1)      /* In bytes */
 #define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
 #define CAST64(v) ((long long)(long)(v))
 #define CASTPTR(type, v) ((type *)(long)(v))
@@ -133,8 +133,8 @@ static inline uint64_t cvmx_build_io_address(uint64_t major_did,
  *
  * Example: cvmx_build_bits(39,24,value)
  * <pre>
- * 6       5       4       3       3       2       1
- * 3       5       7       9       1       3       5       7      0
+ * 6      5       4       3       3       2       1
+ * 3      5       7       9       1       3       5       7      0
  * +-------+-------+-------+-------+-------+-------+-------+------+
  * 000000000000000000000000___________value000000000000000000000000
  * </pre>
@@ -183,7 +183,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
  * memory pointer (void *).
  *
  * @physical_address:
- *               Hardware physical address to memory
+ *              Hardware physical address to memory
  * Returns Pointer to memory
  */
 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
@@ -207,10 +207,10 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
 
 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
     a simple volatile pointer */
-#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
-static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
-{                                                                       \
-    *CASTPTR(volatile TYPE##_t, addr) = val;                            \
+#define CVMX_BUILD_WRITE64(TYPE, ST)                                   \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)    \
+{                                                                      \
+    *CASTPTR(volatile TYPE##_t, addr) = val;                           \
 }
 
 
@@ -221,19 +221,19 @@ static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
 
 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
     a simple volatile pointer */
-#define CVMX_BUILD_READ64(TYPE, LT)                                     \
-static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
-{                                                                       \
+#define CVMX_BUILD_READ64(TYPE, LT)                                    \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)               \
+{                                                                      \
        return *CASTPTR(volatile TYPE##_t, addr);                       \
 }
 
 
 /* The following defines 8 functions for writing to a 64bit address. Each
     takes two arguments, the address and the value to write.
-    cvmx_write64_int64      cvmx_write64_uint64
-    cvmx_write64_int32      cvmx_write64_uint32
-    cvmx_write64_int16      cvmx_write64_uint16
-    cvmx_write64_int8       cvmx_write64_uint8 */
+    cvmx_write64_int64     cvmx_write64_uint64
+    cvmx_write64_int32     cvmx_write64_uint32
+    cvmx_write64_int16     cvmx_write64_uint16
+    cvmx_write64_int8      cvmx_write64_uint8 */
 CVMX_BUILD_WRITE64(int64, "sd");
 CVMX_BUILD_WRITE64(int32, "sw");
 CVMX_BUILD_WRITE64(int16, "sh");
@@ -246,10 +246,10 @@ CVMX_BUILD_WRITE64(uint8, "sb");
 
 /* The following defines 8 functions for reading from a 64bit address. Each
     takes the address as the only argument
-    cvmx_read64_int64       cvmx_read64_uint64
-    cvmx_read64_int32       cvmx_read64_uint32
-    cvmx_read64_int16       cvmx_read64_uint16
-    cvmx_read64_int8        cvmx_read64_uint8 */
+    cvmx_read64_int64      cvmx_read64_uint64
+    cvmx_read64_int32      cvmx_read64_uint32
+    cvmx_read64_int16      cvmx_read64_uint16
+    cvmx_read64_int8       cvmx_read64_uint8 */
 CVMX_BUILD_READ64(int64, "ld");
 CVMX_BUILD_READ64(int32, "lw");
 CVMX_BUILD_READ64(int16, "lh");
@@ -389,7 +389,7 @@ static inline void cvmx_wait(uint64_t cycles)
 
 /**
  * Reads a chip global cycle counter.  This counts CPU cycles since
- * chip reset.  The counter is 64 bit.
+ * chip reset. The counter is 64 bit.
  * This register does not exist on CN38XX pass 1 silicion
  *
  * Returns Global chip cycle count since chip reset.
@@ -453,7 +453,7 @@ static inline uint32_t cvmx_octeon_num_cores(void)
 
 /**
  * Read a byte of fuse data
- * @byte_addr:   address to read
+ * @byte_addr:  address to read
  *
  * Returns fuse value: 0 or 1
  */
index 8008da2..90e05a8 100644 (file)
@@ -35,7 +35,7 @@
 #include <asm/octeon/cvmx-rnm-defs.h>
 
 enum octeon_feature {
-        /* CN68XX uses port kinds for packet interface */
+       /* CN68XX uses port kinds for packet interface */
        OCTEON_FEATURE_PKND,
        /* CN68XX has different fields in word0 - word2 */
        OCTEON_FEATURE_CN68XX_WQE,
@@ -51,7 +51,7 @@ enum octeon_feature {
        OCTEON_FEATURE_DORM_CRYPTO,
        /* Does this Octeon support PCI express? */
        OCTEON_FEATURE_PCIE,
-        /* Does this Octeon support SRIOs */
+       /* Does this Octeon support SRIOs */
        OCTEON_FEATURE_SRIO,
        /*  Does this Octeon support Interlaken */
        OCTEON_FEATURE_ILK,
@@ -75,7 +75,7 @@ enum octeon_feature {
        /* Octeon MDIO block supports clause 45 transactions for 10
         * Gig support */
        OCTEON_FEATURE_MDIO_CLAUSE_45,
-        /*
+       /*
         *  CN52XX and CN56XX used a block named NPEI for PCIe
         *  access. Newer chips replaced this with SLI+DPI.
         */
@@ -94,10 +94,10 @@ static inline int cvmx_fuse_read(int fuse);
  * be kept out of fast path code.
  *
  * @feature: Feature to check for. This should always be a constant so the
- *                compiler can remove the switch statement through optimization.
+ *               compiler can remove the switch statement through optimization.
  *
  * Returns Non zero if the feature exists. Zero if the feature does not
- *         exist.
+ *        exist.
  */
 static inline int octeon_has_feature(enum octeon_feature feature)
 {
index 349bb2b..e2c122c 100644 (file)
@@ -29,7 +29,7 @@
 
 /*
  * The defines below should be used with the OCTEON_IS_MODEL() macro
- * to determine what model of chip the software is running on.  Models
+ * to determine what model of chip the software is running on. Models
  * ending in 'XX' match multiple models (families), while specific
  * models match only that model.  If a pass (revision) is specified,
  * then only that revision will be matched.  Care should be taken when
  * subject to change at anytime without notice.
  *
  * NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
- * macros should be used outside of this file.  All other macros are
+ * macros should be used outside of this file. All other macros are
  * for internal use only, and may change without notice.
  */
 
-#define OCTEON_FAMILY_MASK      0x00ffff00
+#define OCTEON_FAMILY_MASK     0x00ffff00
 
 /* Flag bits in top byte */
 /* Ignores revision in model checks */
-#define OM_IGNORE_REVISION        0x01000000
+#define OM_IGNORE_REVISION       0x01000000
 /* Check submodels */
-#define OM_CHECK_SUBMODEL         0x02000000
+#define OM_CHECK_SUBMODEL        0x02000000
 /* Match all models previous than the one specified */
 #define OM_MATCH_PREVIOUS_MODELS  0x04000000
 /* Ignores the minor revison on newer parts */
 #define OM_IGNORE_MINOR_REVISION  0x08000000
-#define OM_FLAG_MASK              0xff000000
+#define OM_FLAG_MASK             0xff000000
 
 /* Match all cn5XXX Octeon models. */
-#define OM_MATCH_5XXX_FAMILY_MODELS     0x20000000
+#define OM_MATCH_5XXX_FAMILY_MODELS    0x20000000
 /* Match all cn6XXX Octeon models. */
-#define OM_MATCH_6XXX_FAMILY_MODELS     0x40000000
+#define OM_MATCH_6XXX_FAMILY_MODELS    0x40000000
 /* Match all cnf7XXX Octeon models. */
-#define OM_MATCH_F7XXX_FAMILY_MODELS    0x80000000
+#define OM_MATCH_F7XXX_FAMILY_MODELS   0x80000000
 
 /*
  * CNF7XXX models with new revision encoding
  */
-#define OCTEON_CNF71XX_PASS1_0  0x000d9400
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
 
-#define OCTEON_CNF71XX          (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CNF71XX_PASS1_X  (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CNF71XX         (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
 /*
  * CN6XXX models with new revision encoding
  */
-#define OCTEON_CN68XX_PASS1_0   0x000d9100
-#define OCTEON_CN68XX_PASS1_1   0x000d9101
-#define OCTEON_CN68XX_PASS1_2   0x000d9102
-#define OCTEON_CN68XX_PASS2_0   0x000d9108
+#define OCTEON_CN68XX_PASS1_0  0x000d9100
+#define OCTEON_CN68XX_PASS1_1  0x000d9101
+#define OCTEON_CN68XX_PASS1_2  0x000d9102
+#define OCTEON_CN68XX_PASS2_0  0x000d9108
 
-#define OCTEON_CN68XX           (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN68XX_PASS1_X   (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN68XX_PASS2_X   (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX          (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X  (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X  (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
 #define OCTEON_CN68XX_PASS1    OCTEON_CN68XX_PASS1_X
 #define OCTEON_CN68XX_PASS2    OCTEON_CN68XX_PASS2_X
 
-#define OCTEON_CN66XX_PASS1_0   0x000d9200
-#define OCTEON_CN66XX_PASS1_2   0x000d9202
+#define OCTEON_CN66XX_PASS1_0  0x000d9200
+#define OCTEON_CN66XX_PASS1_2  0x000d9202
 
-#define OCTEON_CN66XX           (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN66XX_PASS1_X   (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN66XX          (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X  (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
-#define OCTEON_CN63XX_PASS1_0   0x000d9000
-#define OCTEON_CN63XX_PASS1_1   0x000d9001
-#define OCTEON_CN63XX_PASS1_2   0x000d9002
-#define OCTEON_CN63XX_PASS2_0   0x000d9008
-#define OCTEON_CN63XX_PASS2_1   0x000d9009
-#define OCTEON_CN63XX_PASS2_2   0x000d900a
+#define OCTEON_CN63XX_PASS1_0  0x000d9000
+#define OCTEON_CN63XX_PASS1_1  0x000d9001
+#define OCTEON_CN63XX_PASS1_2  0x000d9002
+#define OCTEON_CN63XX_PASS2_0  0x000d9008
+#define OCTEON_CN63XX_PASS2_1  0x000d9009
+#define OCTEON_CN63XX_PASS2_2  0x000d900a
 
-#define OCTEON_CN63XX           (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN63XX_PASS1_X   (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS2_X   (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX          (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X  (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X  (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
-#define OCTEON_CN61XX_PASS1_0   0x000d9300
+#define OCTEON_CN61XX_PASS1_0  0x000d9300
 
-#define OCTEON_CN61XX           (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN61XX_PASS1_X   (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN61XX          (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X  (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
 /*
  * CN5XXX models with new revision encoding
  */
-#define OCTEON_CN58XX_PASS1_0   0x000d0300
-#define OCTEON_CN58XX_PASS1_1   0x000d0301
-#define OCTEON_CN58XX_PASS1_2   0x000d0303
-#define OCTEON_CN58XX_PASS2_0   0x000d0308
-#define OCTEON_CN58XX_PASS2_1   0x000d0309
-#define OCTEON_CN58XX_PASS2_2   0x000d030a
-#define OCTEON_CN58XX_PASS2_3   0x000d030b
-
-#define OCTEON_CN58XX           (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN58XX_PASS1_X   (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS2_X   (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS1     OCTEON_CN58XX_PASS1_X
-#define OCTEON_CN58XX_PASS2     OCTEON_CN58XX_PASS2_X
-
-#define OCTEON_CN56XX_PASS1_0   0x000d0400
-#define OCTEON_CN56XX_PASS1_1   0x000d0401
-#define OCTEON_CN56XX_PASS2_0   0x000d0408
-#define OCTEON_CN56XX_PASS2_1   0x000d0409
-
-#define OCTEON_CN56XX           (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN56XX_PASS1_X   (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS2_X   (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS1     OCTEON_CN56XX_PASS1_X
-#define OCTEON_CN56XX_PASS2     OCTEON_CN56XX_PASS2_X
-
-#define OCTEON_CN57XX           OCTEON_CN56XX
-#define OCTEON_CN57XX_PASS1     OCTEON_CN56XX_PASS1
-#define OCTEON_CN57XX_PASS2     OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN55XX           OCTEON_CN56XX
-#define OCTEON_CN55XX_PASS1     OCTEON_CN56XX_PASS1
-#define OCTEON_CN55XX_PASS2     OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN54XX           OCTEON_CN56XX
-#define OCTEON_CN54XX_PASS1     OCTEON_CN56XX_PASS1
-#define OCTEON_CN54XX_PASS2     OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN50XX_PASS1_0   0x000d0600
-
-#define OCTEON_CN50XX           (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN50XX_PASS1_X   (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN50XX_PASS1     OCTEON_CN50XX_PASS1_X
+#define OCTEON_CN58XX_PASS1_0  0x000d0300
+#define OCTEON_CN58XX_PASS1_1  0x000d0301
+#define OCTEON_CN58XX_PASS1_2  0x000d0303
+#define OCTEON_CN58XX_PASS2_0  0x000d0308
+#define OCTEON_CN58XX_PASS2_1  0x000d0309
+#define OCTEON_CN58XX_PASS2_2  0x000d030a
+#define OCTEON_CN58XX_PASS2_3  0x000d030b
+
+#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X  (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X  (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1    OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2    OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0  0x000d0400
+#define OCTEON_CN56XX_PASS1_1  0x000d0401
+#define OCTEON_CN56XX_PASS2_0  0x000d0408
+#define OCTEON_CN56XX_PASS2_1  0x000d0409
+
+#define OCTEON_CN56XX          (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X  (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X  (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1    OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2    OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX          OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1    OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2    OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX          OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1    OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2    OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX          OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1    OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2    OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0  0x000d0600
+
+#define OCTEON_CN50XX          (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X  (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1    OCTEON_CN50XX_PASS1_X
 
 /*
  * NOTE: Octeon CN5000F model is not identifiable using the
  * OCTEON_IS_MODEL() functions, but are treated as CN50XX.
  */
 
-#define OCTEON_CN52XX_PASS1_0   0x000d0700
-#define OCTEON_CN52XX_PASS2_0   0x000d0708
+#define OCTEON_CN52XX_PASS1_0  0x000d0700
+#define OCTEON_CN52XX_PASS2_0  0x000d0708
 
-#define OCTEON_CN52XX           (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN52XX_PASS1_X   (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS2_X   (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS1     OCTEON_CN52XX_PASS1_X
-#define OCTEON_CN52XX_PASS2     OCTEON_CN52XX_PASS2_X
+#define OCTEON_CN52XX          (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X  (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X  (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1    OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2    OCTEON_CN52XX_PASS2_X
 
 /*
  * CN3XXX models with old revision enconding
  */
-#define OCTEON_CN38XX_PASS1     0x000d0000
-#define OCTEON_CN38XX_PASS2     0x000d0001
-#define OCTEON_CN38XX_PASS3     0x000d0003
-#define OCTEON_CN38XX           (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+#define OCTEON_CN38XX_PASS1    0x000d0000
+#define OCTEON_CN38XX_PASS2    0x000d0001
+#define OCTEON_CN38XX_PASS3    0x000d0003
+#define OCTEON_CN38XX          (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
 
-#define OCTEON_CN36XX           OCTEON_CN38XX
-#define OCTEON_CN36XX_PASS2     OCTEON_CN38XX_PASS2
-#define OCTEON_CN36XX_PASS3     OCTEON_CN38XX_PASS3
+#define OCTEON_CN36XX          OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2    OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3    OCTEON_CN38XX_PASS3
 
 /* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
-#define OCTEON_CN31XX_PASS1     0x000d0100
-#define OCTEON_CN31XX_PASS1_1   0x000d0102
-#define OCTEON_CN31XX           (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN31XX_PASS1    0x000d0100
+#define OCTEON_CN31XX_PASS1_1  0x000d0102
+#define OCTEON_CN31XX          (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
 
 /*
  * This model is only used for internal checks, it is not a valid
  * model for the OCTEON_MODEL environment variable.  This matches the
  * CN3010 and CN3005 but NOT the CN3020.
  */
-#define OCTEON_CN30XX_PASS1     0x000d0200
-#define OCTEON_CN30XX_PASS1_1   0x000d0202
-#define OCTEON_CN30XX           (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN30XX_PASS1    0x000d0200
+#define OCTEON_CN30XX_PASS1_1  0x000d0202
+#define OCTEON_CN30XX          (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
 
-#define OCTEON_CN3005_PASS1     (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_0   (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_1   (0x000d0212 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005           (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1    (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0  (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1  (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005          (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
 
-#define OCTEON_CN3010_PASS1     (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_0   (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_1   (0x000d0202 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010           (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1    (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0  (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1  (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010          (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
 
-#define OCTEON_CN3020_PASS1     (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_0   (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_1   (0x000d0112 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020           (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1    (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0  (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1  (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020          (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
 
 /*
  * This matches the complete family of CN3xxx CPUs, and not subsequent
  * models
  */
-#define OCTEON_CN3XXX           (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
-#define OCTEON_CN5XXX           (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
-#define OCTEON_CN6XXX           (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CN3XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX          (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
 
 /* These are used to cover entire families of OCTEON processors */
 #define OCTEON_FAM_1           (OCTEON_CN3XXX)
  */
 
 /* Masks used for the various types of model/family/revision matching */
-#define OCTEON_38XX_FAMILY_MASK      0x00ffff00
+#define OCTEON_38XX_FAMILY_MASK             0x00ffff00
 #define OCTEON_38XX_FAMILY_REV_MASK  0x00ffff0f
-#define OCTEON_38XX_MODEL_MASK       0x00ffff10
+#define OCTEON_38XX_MODEL_MASK      0x00ffff10
 #define OCTEON_38XX_MODEL_REV_MASK   (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
 
 /* CN5XXX and later use different layout of bits in the revision ID field */
-#define OCTEON_58XX_FAMILY_MASK      OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_MASK             OCTEON_38XX_FAMILY_MASK
 #define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK       0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK      0x00ffffc0
 #define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
 #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
-#define OCTEON_5XXX_MODEL_MASK       0x00ff0fc0
+#define OCTEON_5XXX_MODEL_MASK      0x00ff0fc0
 
 /* forward declarations */
 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
@@ -264,7 +264,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
 
 /* NOTE: This for internal use only! */
 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)             \
-((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && (        \
+((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && ( \
                ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
                        && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
                ((((arg_model) & (OM_FLAG_MASK)) == 0)                  \
@@ -276,7 +276,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
                ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
                        && (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
                )) ||                                                   \
-       (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0)  && ( \
+       (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0)  && ( \
                ((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
                        && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
                ((((arg_model) & (OM_FLAG_MASK)) == 0)                  \
@@ -320,7 +320,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
  * Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
  * is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
  * I.e.:
- *  #if OCTEON_IS_MODEL(OCTEON_CN56XX)  ->  #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ *  #if OCTEON_IS_MODEL(OCTEON_CN56XX) ->  #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
  */
 #define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
 #define OCTEON_IS_COMMON_BINARY() 1
index 254e995..a2eed23 100644 (file)
@@ -75,15 +75,15 @@ struct octeon_boot_descriptor {
        uint32_t argc;
        uint32_t argv[OCTEON_ARGV_MAX_ARGS];
 
-#define  BOOT_FLAG_INIT_CORE           (1 << 0)
-#define  OCTEON_BL_FLAG_DEBUG          (1 << 1)
-#define  OCTEON_BL_FLAG_NO_MAGIC       (1 << 2)
+#define         BOOT_FLAG_INIT_CORE            (1 << 0)
+#define         OCTEON_BL_FLAG_DEBUG           (1 << 1)
+#define         OCTEON_BL_FLAG_NO_MAGIC        (1 << 2)
        /* If set, use uart1 for console */
-#define  OCTEON_BL_FLAG_CONSOLE_UART1  (1 << 3)
+#define         OCTEON_BL_FLAG_CONSOLE_UART1   (1 << 3)
        /* If set, use PCI console */
-#define  OCTEON_BL_FLAG_CONSOLE_PCI    (1 << 4)
+#define         OCTEON_BL_FLAG_CONSOLE_PCI     (1 << 4)
        /* Call exit on break on serial port */
-#define  OCTEON_BL_FLAG_BREAK          (1 << 5)
+#define         OCTEON_BL_FLAG_BREAK           (1 << 5)
 
        uint32_t flags;
        uint32_t core_mask;
index c66734b..64ba56a 100644 (file)
@@ -22,7 +22,7 @@
 #define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
 
 /*
- * The RC base of BAR1.  gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * The RC base of BAR1.         gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
  * place BAR1 so it is the same for both.
  */
 #define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
index 9ce5a1e..2474fc5 100644 (file)
@@ -43,7 +43,7 @@ struct __large_pstruct { unsigned long buf[100]; };
        case 1: __get_dbe_asm("lb"); break;                             \
        case 2: __get_dbe_asm("lh"); break;                             \
        case 4: __get_dbe_asm("lw"); break;                             \
-       case 8:  __get_dbe_asm("ld"); break;                            \
+       case 8:  __get_dbe_asm("ld"); break;                            \
        default: __get_dbe_unknown(); break;                            \
        }                                                               \
        x = (__typeof__(*(ptr))) __gu_val;                              \
index dbaec94..99fc547 100644 (file)
@@ -31,7 +31,7 @@
 #define PAGE_SHIFT     16
 #endif
 #define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK       (~(PAGE_SIZE - 1))
+#define PAGE_MASK      (~(PAGE_SIZE - 1))
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 #define HPAGE_SHIFT    (PAGE_SHIFT + PAGE_SHIFT - 3)
@@ -95,11 +95,11 @@ extern void copy_user_highpage(struct page *to, struct page *from,
 #ifdef CONFIG_64BIT_PHYS_ADDR
   #ifdef CONFIG_CPU_MIPS32
     typedef struct { unsigned long pte_low, pte_high; } pte_t;
-    #define pte_val(x)    ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-    #define __pte(x)      ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+    #define pte_val(x)   ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+    #define __pte(x)     ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
   #else
      typedef struct { unsigned long long pte; } pte_t;
-     #define pte_val(x)        ((x).pte)
+     #define pte_val(x) ((x).pte)
      #define __pte(x)  ((pte_t) { (x) } )
   #endif
 #else
@@ -191,8 +191,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
        unsigned long __pfn = (pfn);                                    \
        int __n = pfn_to_nid(__pfn);                                    \
        ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn +         \
-                              NODE_DATA(__n)->node_spanned_pages)      \
-                   : 0);                                               \
+                              NODE_DATA(__n)->node_spanned_pages)      \
+                   : 0);                                               \
 })
 
 #endif
@@ -206,7 +206,7 @@ extern int __virt_addr_valid(const volatile void *kaddr);
 #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
-#define UNCAC_ADDR(addr)       ((addr) - PAGE_OFFSET + UNCAC_BASE +    \
+#define UNCAC_ADDR(addr)       ((addr) - PAGE_OFFSET + UNCAC_BASE +    \
                                                                PHYS_OFFSET)
 #define CAC_ADDR(addr)         ((addr) - UNCAC_BASE + PAGE_OFFSET -    \
                                                                PHYS_OFFSET)
index d69ea74..b8e24fd 100644 (file)
@@ -12,7 +12,7 @@
 
 /*
  * This file essentially defines the interface between board
- * specific PCI code and MIPS common PCI code.  Should potentially put
+ * specific PCI code and MIPS common PCI code. Should potentially put
  * into include/asm/pci.h file.
  */
 
@@ -20,7 +20,7 @@
 #include <linux/of.h>
 
 /*
- * Each pci channel is a top-level PCI bus seem by CPU.  A machine  with
+ * Each pci channel is a top-level PCI bus seem by CPU.         A machine  with
  * multiple PCI channels may have multiple PCI host controllers or a
  * single controller supporting multiple channels.
  */
@@ -99,7 +99,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 struct pci_dev;
 
 /*
- * The PCI address space does equal the physical memory address space.  The
+ * The PCI address space does equal the physical memory address space. The
  * networking and block device layers use this boolean for bounce buffer
  * decisions.  This is set if any hose does not have an IOMMU.
  */
@@ -144,8 +144,13 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 
 extern char * (*pcibios_plat_setup)(char *str);
 
+#ifdef CONFIG_OF
 /* this function parses memory ranges from a device node */
 extern void pci_load_of_ranges(struct pci_controller *hose,
                               struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+                                     struct device_node *node) {}
+#endif
 
 #endif /* _ASM_PCI_H */
index be44fb0..af2c8a3 100644 (file)
@@ -85,7 +85,7 @@ typedef volatile struct bridge_s {
 #define b_wid_llp                      b_widget.w_llp_cfg
 #define b_wid_tflush                   b_widget.w_tflush
 
-       /* bridge-specific widget configuration 0x000058-0x00007F */
+       /* bridge-specific widget configuration 0x000058-0x00007F */
        bridgereg_t         _pad_000058;
        bridgereg_t         b_wid_aux_err;              /* 0x00005C */
        bridgereg_t         _pad_000060;
@@ -167,8 +167,8 @@ typedef volatile struct bridge_s {
                bridgereg_t     __pad;                  /* 0x0002{80,,,88} */
                bridgereg_t     reg;                    /* 0x0002{84,,,8C} */
        } b_rrb_map[2];                                 /* 0x000280 */
-#define        b_even_resp     b_rrb_map[0].reg                /* 0x000284 */
-#define        b_odd_resp      b_rrb_map[1].reg                /* 0x00028C */
+#define b_even_resp    b_rrb_map[0].reg                /* 0x000284 */
+#define b_odd_resp     b_rrb_map[1].reg                /* 0x00028C */
 
        bridgereg_t     _pad_000290;
        bridgereg_t     b_resp_status;                  /* 0x000294 */
@@ -233,7 +233,7 @@ typedef volatile struct bridge_s {
        u8      _pad_030007[0x04fff8];                  /* 0x030008-0x07FFFF */
 
        /* External Address Translation Entry RAM 0x080000-0x0FFFFF */
-       bridge_ate_t    b_ext_ate_ram[0x10000];
+       bridge_ate_t    b_ext_ate_ram[0x10000];
 
        /* Reserved 0x100000-0x1FFFFF */
        char    _pad_100000[0x200000-0x100000];
@@ -400,7 +400,7 @@ typedef struct bridge_err_cmdword_s {
 #define BRIDGE_REV_A                   0x1
 #define BRIDGE_REV_B                   0x2
 #define BRIDGE_REV_C                   0x3
-#define        BRIDGE_REV_D                    0x4
+#define BRIDGE_REV_D                   0x4
 
 /* Bridge widget status register bits definition */
 
@@ -691,21 +691,21 @@ typedef struct bridge_err_cmdword_s {
 #define BRIDGE_CREDIT  3
 
 /* RRB assignment register */
-#define        BRIDGE_RRB_EN   0x8     /* after shifting down */
-#define        BRIDGE_RRB_DEV  0x7     /* after shifting down */
-#define        BRIDGE_RRB_VDEV 0x4     /* after shifting down */
-#define        BRIDGE_RRB_PDEV 0x3     /* after shifting down */
+#define BRIDGE_RRB_EN  0x8     /* after shifting down */
+#define BRIDGE_RRB_DEV 0x7     /* after shifting down */
+#define BRIDGE_RRB_VDEV 0x4    /* after shifting down */
+#define BRIDGE_RRB_PDEV 0x3    /* after shifting down */
 
 /* RRB status register */
-#define        BRIDGE_RRB_VALID(r)     (0x00010000<<(r))
-#define        BRIDGE_RRB_INUSE(r)     (0x00000001<<(r))
+#define BRIDGE_RRB_VALID(r)    (0x00010000<<(r))
+#define BRIDGE_RRB_INUSE(r)    (0x00000001<<(r))
 
 /* RRB clear register */
-#define        BRIDGE_RRB_CLEAR(r)     (0x00000001<<(r))
+#define BRIDGE_RRB_CLEAR(r)    (0x00000001<<(r))
 
 /* xbox system controller declarations */
-#define XBOX_BRIDGE_WID         8
-#define FLASH_PROM1_BASE        0xE00000 /* To read the xbox sysctlr status */
+#define XBOX_BRIDGE_WID                8
+#define FLASH_PROM1_BASE       0xE00000 /* To read the xbox sysctlr status */
 #define XBOX_RPS_EXISTS                1 << 6   /* RPS bit in status register */
 #define XBOX_RPS_FAIL          1 << 4   /* RPS status bit in register */
 
@@ -838,7 +838,7 @@ struct bridge_controller {
        bridge_t                *base;
        nasid_t                 nasid;
        unsigned int            widget_id;
-       unsigned int            irq_cpu;
+       unsigned int            irq_cpu;
        u64                     baddr;
        unsigned int            pci_int[8];
 };
index 5d56bb2..b4204c1 100644 (file)
@@ -47,7 +47,7 @@
 #define USER_PTRS_PER_PGD      (0x80000000UL/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS     0
 
-#define VMALLOC_START     MAP_BASE
+#define VMALLOC_START    MAP_BASE
 
 #define PKMAP_BASE             (0xfe000000UL)
 
@@ -136,7 +136,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #define pte_offset_kernel(dir, address)                                        \
        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 
-#define pte_offset_map(dir, address)                                    \
+#define pte_offset_map(dir, address)                                   \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_unmap(pte) ((void)(pte))
 
@@ -155,7 +155,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 
 #define pte_to_pgoff(_pte)     ((((_pte).pte >> 1 ) & 0x07) | \
                                 (((_pte).pte >> 2 ) & 0x38) | \
-                                (((_pte).pte >> 10) <<  6 ))
+                                (((_pte).pte >> 10) <<  6 ))
 
 #define pgoff_to_pte(off)      ((pte_t) { (((off) & 0x07) << 1 ) | \
                                           (((off) & 0x38) << 2 ) | \
@@ -167,14 +167,14 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 /* Swap entries must have VALID and GLOBAL bits cleared. */
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 #define __swp_type(x)          (((x).val >> 2) & 0x1f)
-#define __swp_offset(x)         ((x).val >> 7)
+#define __swp_offset(x)                 ((x).val >> 7)
 #define __swp_entry(type,offset)       \
-               ((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
+               ((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
 #else
 #define __swp_type(x)          (((x).val >> 8) & 0x1f)
-#define __swp_offset(x)         ((x).val >> 13)
+#define __swp_offset(x)                 ((x).val >> 13)
 #define __swp_entry(type,offset)       \
-               ((swp_entry_t)  { ((type) << 8) | ((offset) << 13) })
+               ((swp_entry_t)  { ((type) << 8) | ((offset) << 13) })
 #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
 
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
@@ -184,7 +184,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #define PTE_FILE_MAX_BITS      30
 
 #define pte_to_pgoff(_pte)     ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off)      ((pte_t) { _PAGE_FILE, (off) << 2 })
+#define pgoff_to_pte(off)      ((pte_t) { _PAGE_FILE, (off) << 2 })
 
 #else
 /*
@@ -194,7 +194,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 
 #define pte_to_pgoff(_pte)     ((((_pte).pte >> 1) & 0x7) | \
                                 (((_pte).pte >> 2) & 0x8) | \
-                                (((_pte).pte >> 8) <<  4))
+                                (((_pte).pte >> 8) <<  4))
 
 #define pgoff_to_pte(off)      ((pte_t) { (((off) & 0x7) << 1) | \
                                           (((off) & 0x8) << 2) | \
@@ -208,7 +208,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
 #define __swp_entry_to_pte(x)  ((pte_t) { 0, (x).val })
 #else
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 #endif
 
index 013d5f7..e1c49a9 100644 (file)
 #define PTRS_PER_PTE   ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 
 #if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD       (1)
+#define USER_PTRS_PER_PGD      (1)
 #else
 #define USER_PTRS_PER_PGD      (TASK_SIZE64 / PGDIR_SIZE)
 #endif
@@ -288,7 +288,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 #define __swp_type(x)          (((x).val >> 32) & 0xff)
 #define __swp_offset(x)                ((x).val >> 40)
 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
 /*
index f6a0439..32aea48 100644 (file)
@@ -21,7 +21,7 @@
  * Similar to the Alpha port, we need to keep track of the ref
  * and mod bits in software.  We have a software "yeah you can read
  * from this page" bit, and a hardware one which actually lets the
- * process read from the page.  On the same token we have a software
+ * process read from the page. On the same token we have a software
  * writable bit and the real hardware one which actually lets the
  * process write to the page, this keeps a mod bit via the hardware
  * dirty bit.
@@ -41,9 +41,9 @@
 #define _PAGE_GLOBAL           (1 << 0)
 #define _PAGE_VALID_SHIFT      1
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ      (1 << 1)  /* synonym                 */
+#define _PAGE_SILENT_READ      (1 << 1)  /* synonym                 */
 #define _PAGE_DIRTY_SHIFT      2
-#define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)  /* The MIPS dirty bit      */
+#define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)  /* The MIPS dirty bit      */
 #define _PAGE_SILENT_WRITE     (1 << 2)
 #define _CACHE_SHIFT           3
 #define _CACHE_MASK            (7 << 3)
@@ -52,7 +52,7 @@
  * The following bits are implemented in software
  *
  * _PAGE_FILE semantics: set:pagecache unset:swap
- */ 
+ */
 #define _PAGE_PRESENT_SHIFT    6
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT       7
 #define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
 #else
 #define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
+#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
 #endif
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 #else
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT)
-#define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
+#define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
 #endif
 
 /* Page cannot be executed */
 
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
-/* synonym                 */
+/* synonym                */
 #define _PAGE_SILENT_READ      (_PAGE_VALID)
 
-/* The MIPS dirty bit      */
+/* The MIPS dirty bit     */
 #define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
 #define _PAGE_SILENT_WRITE     (_PAGE_DIRTY)
 #endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
 
 #ifndef _PFN_SHIFT
-#define _PFN_SHIFT                  PAGE_SHIFT
+#define _PFN_SHIFT                 PAGE_SHIFT
 #endif
 #define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
 
@@ -230,28 +230,28 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 /* No penalty for being coherent on the SB1, so just
    use it for "noncoherent" spaces, too.  Shouldn't hurt. */
 
-#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)
-#define _CACHE_CACHABLE_COW         (5<<_CACHE_SHIFT)
+#define _CACHE_UNCACHED                    (2<<_CACHE_SHIFT)
+#define _CACHE_CACHABLE_COW        (5<<_CACHE_SHIFT)
 #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
 #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
 
 #else
 
-#define _CACHE_CACHABLE_NO_WA      (0<<_CACHE_SHIFT)  /* R4600 only      */
-#define _CACHE_CACHABLE_WA         (1<<_CACHE_SHIFT)  /* R4600 only      */
-#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)  /* R4[0246]00      */
-#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* R4[0246]00      */
-#define _CACHE_CACHABLE_CE          (4<<_CACHE_SHIFT)  /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COW         (5<<_CACHE_SHIFT)  /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COHERENT    (5<<_CACHE_SHIFT)  /* MIPS32R2 CMP    */
-#define _CACHE_CACHABLE_CUW         (6<<_CACHE_SHIFT)  /* R4[04]00MC only */
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)  /* R10000 only     */
+#define _CACHE_CACHABLE_NO_WA      (0<<_CACHE_SHIFT)  /* R4600 only      */
+#define _CACHE_CACHABLE_WA         (1<<_CACHE_SHIFT)  /* R4600 only      */
+#define _CACHE_UNCACHED                    (2<<_CACHE_SHIFT)  /* R4[0246]00      */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* R4[0246]00     */
+#define _CACHE_CACHABLE_CE         (4<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COW        (5<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COHERENT    (5<<_CACHE_SHIFT)  /* MIPS32R2 CMP   */
+#define _CACHE_CACHABLE_CUW        (6<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)  /* R10000 only    */
 
 #endif
 
 #define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
 #define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK  (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
index ec50d52..fdc62fb 100644 (file)
@@ -112,7 +112,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
                 * it better already be global)
                 */
                if (pte_none(*buddy)) {
-                       buddy->pte_low  |= _PAGE_GLOBAL;
+                       buddy->pte_low  |= _PAGE_GLOBAL;
                        buddy->pte_high |= _PAGE_GLOBAL;
                }
        }
@@ -319,7 +319,7 @@ static inline int pte_special(pte_t pte)    { return 0; }
 static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 /*
- * Macro to make mark a page protection value as "uncacheable".  Note
+ * Macro to make mark a page protection value as "uncacheable".         Note
  * that "protection" is really a misnomer here as the protection value
  * contains the memory attribute bits, dirty bits, and various other
  * bits as well.
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h b/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h
deleted file mode 100644 (file)
index 016fa94..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H
-
-#define cpu_has_mips16         1
-#define cpu_has_dsp            1
-/* #define cpu_has_dsp2                ??? - do runtime detection */
-#define cpu_has_mipsmt         1
-#define cpu_has_fpu            0
-
-#define cpu_has_mips32r1       0
-#define cpu_has_mips32r2       1
-#define cpu_has_mips64r1       0
-#define cpu_has_mips64r2       0
-
-#endif /* __ASM_MACH_MSP71XX_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h b/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h
deleted file mode 100644 (file)
index ebdbab9..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * include/asm-mips/pmc-sierra/msp71xx/gpio.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * @author Patrick Glass <patrickglass@gmail.com>
- */
-
-#ifndef __PMC_MSP71XX_GPIO_H
-#define __PMC_MSP71XX_GPIO_H
-
-/* Max number of gpio's is 28 on chip plus 3 banks of I2C IO Expanders */
-#define ARCH_NR_GPIOS (28 + (3 * 8))
-
-/* new generic GPIO API - see Documentation/gpio.txt */
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep  __gpio_cansleep
-
-/* Setup calls for the gpio and gpio extended */
-extern void msp71xx_init_gpio(void);
-extern void msp71xx_init_gpio_extended(void);
-extern int msp71xx_set_output_drive(unsigned gpio, int value);
-
-/* Custom output drive functionss */
-static inline int gpio_set_output_drive(unsigned gpio, int value)
-{
-       return msp71xx_set_output_drive(gpio, value);
-}
-
-/* IRQ's are not supported for gpio lines */
-static inline int gpio_to_irq(unsigned gpio)
-{
-       return -EINVAL;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-       return -EINVAL;
-}
-
-#endif /* __PMC_MSP71XX_GPIO_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h
deleted file mode 100644 (file)
index c84bcf9..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Defines for the MSP interrupt controller.
- *
- * Copyright (C) 1999 MIPS Technologies, Inc.  All rights reserved.
- * Author: Carsten Langgaard, carstenl@mips.com
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#ifndef _MSP_CIC_INT_H
-#define _MSP_CIC_INT_H
-
-/*
- * The PMC-Sierra CIC interrupts are all centrally managed by the
- * CIC sub-system.
- * We attempt to keep the interrupt numbers as consistent as possible
- * across all of the MSP devices, but some differences will creep in ...
- * The interrupts which are directly forwarded to the MIPS core interrupts
- * are assigned interrupts in the range 0-7, interrupts cascaded through
- * the CIC are assigned interrupts 8-39.  The cascade occurs on C_IRQ4
- * (MSP_INT_CIC).  Currently we don't really distinguish between VPE1
- * and VPE0 (or thread contexts for that matter).  Will have to fix.
- * The PER interrupts are assigned interrupts in the range 40-71.
-*/
-
-
-/*
- * IRQs directly forwarded to the CPU
- */
-#define MSP_MIPS_INTBASE       0
-#define MSP_INT_SW0            0       /* IRQ for swint0,       C_SW0  */
-#define MSP_INT_SW1            1       /* IRQ for swint1,       C_SW1  */
-#define MSP_INT_MAC0           2       /* IRQ for MAC 0,        C_IRQ0 */
-#define MSP_INT_MAC1           3       /* IRQ for MAC 1,        C_IRQ1 */
-#define MSP_INT_USB            4       /* IRQ for USB,          C_IRQ2 */
-#define MSP_INT_SAR            5       /* IRQ for ADSL2+ SAR,   C_IRQ3 */
-#define MSP_INT_CIC            6       /* IRQ for CIC block,    C_IRQ4 */
-#define MSP_INT_SEC            7       /* IRQ for Sec engine,   C_IRQ5 */
-
-/*
- * IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
- * These defines should be tied to the register definitions for the CIC
- * interrupt routine.  For now, just use hard-coded values.
- */
-#define MSP_CIC_INTBASE                (MSP_MIPS_INTBASE + 8)
-#define MSP_INT_EXT0           (MSP_CIC_INTBASE + 0)
-                                       /* External interrupt 0         */
-#define MSP_INT_EXT1           (MSP_CIC_INTBASE + 1)
-                                       /* External interrupt 1         */
-#define MSP_INT_EXT2           (MSP_CIC_INTBASE + 2)
-                                       /* External interrupt 2         */
-#define MSP_INT_EXT3           (MSP_CIC_INTBASE + 3)
-                                       /* External interrupt 3         */
-#define MSP_INT_CPUIF          (MSP_CIC_INTBASE + 4)
-                                       /* CPU interface interrupt      */
-#define MSP_INT_EXT4           (MSP_CIC_INTBASE + 5)
-                                       /* External interrupt 4         */
-#define MSP_INT_CIC_USB                (MSP_CIC_INTBASE + 6)
-                                       /* Cascaded IRQ for USB         */
-#define MSP_INT_MBOX           (MSP_CIC_INTBASE + 7)
-                                       /* Sec engine mailbox IRQ       */
-#define MSP_INT_EXT5           (MSP_CIC_INTBASE + 8)
-                                       /* External interrupt 5         */
-#define MSP_INT_TDM            (MSP_CIC_INTBASE + 9)
-                                       /* TDM interrupt                */
-#define MSP_INT_CIC_MAC0       (MSP_CIC_INTBASE + 10)
-                                       /* Cascaded IRQ for MAC 0       */
-#define MSP_INT_CIC_MAC1       (MSP_CIC_INTBASE + 11)
-                                       /* Cascaded IRQ for MAC 1       */
-#define MSP_INT_CIC_SEC                (MSP_CIC_INTBASE + 12)
-                                       /* Cascaded IRQ for sec engine  */
-#define        MSP_INT_PER             (MSP_CIC_INTBASE + 13)
-                                       /* Peripheral interrupt         */
-#define        MSP_INT_TIMER0          (MSP_CIC_INTBASE + 14)
-                                       /* SLP timer 0                  */
-#define        MSP_INT_TIMER1          (MSP_CIC_INTBASE + 15)
-                                       /* SLP timer 1                  */
-#define        MSP_INT_TIMER2          (MSP_CIC_INTBASE + 16)
-                                       /* SLP timer 2                  */
-#define        MSP_INT_VPE0_TIMER      (MSP_CIC_INTBASE + 17)
-                                       /* VPE0 MIPS timer              */
-#define MSP_INT_BLKCP          (MSP_CIC_INTBASE + 18)
-                                       /* Block Copy                   */
-#define MSP_INT_UART0          (MSP_CIC_INTBASE + 19)
-                                       /* UART 0                       */
-#define MSP_INT_PCI            (MSP_CIC_INTBASE + 20)
-                                       /* PCI subsystem                */
-#define MSP_INT_EXT6           (MSP_CIC_INTBASE + 21)
-                                       /* External interrupt 5         */
-#define MSP_INT_PCI_MSI                (MSP_CIC_INTBASE + 22)
-                                       /* PCI Message Signal           */
-#define MSP_INT_CIC_SAR                (MSP_CIC_INTBASE + 23)
-                                       /* Cascaded ADSL2+ SAR IRQ      */
-#define MSP_INT_DSL            (MSP_CIC_INTBASE + 24)
-                                       /* ADSL2+ IRQ                   */
-#define MSP_INT_CIC_ERR                (MSP_CIC_INTBASE + 25)
-                                       /* SLP error condition          */
-#define MSP_INT_VPE1_TIMER     (MSP_CIC_INTBASE + 26)
-                                       /* VPE1 MIPS timer              */
-#define MSP_INT_VPE0_PC                (MSP_CIC_INTBASE + 27)
-                                       /* VPE0 Performance counter     */
-#define MSP_INT_VPE1_PC                (MSP_CIC_INTBASE + 28)
-                                       /* VPE1 Performance counter     */
-#define MSP_INT_EXT7           (MSP_CIC_INTBASE + 29)
-                                       /* External interrupt 5         */
-#define MSP_INT_VPE0_SW                (MSP_CIC_INTBASE + 30)
-                                       /* VPE0 Software interrupt      */
-#define MSP_INT_VPE1_SW                (MSP_CIC_INTBASE + 31)
-                                       /* VPE0 Software interrupt      */
-
-/*
- * IRQs cascaded on CIC PER interrupt (MSP_INT_PER)
- */
-#define MSP_PER_INTBASE                (MSP_CIC_INTBASE + 32)
-/* Reserved                                       0-1                  */
-#define MSP_INT_UART1          (MSP_PER_INTBASE + 2)
-                                       /* UART 1                       */
-/* Reserved                                       3-5                  */
-#define MSP_INT_2WIRE          (MSP_PER_INTBASE + 6)
-                                       /* 2-wire                       */
-#define MSP_INT_TM0            (MSP_PER_INTBASE + 7)
-                                       /* Peripheral timer block out 0 */
-#define MSP_INT_TM1            (MSP_PER_INTBASE + 8)
-                                       /* Peripheral timer block out 1 */
-/* Reserved                                       9                    */
-#define MSP_INT_SPRX           (MSP_PER_INTBASE + 10)
-                                       /* SPI RX complete              */
-#define MSP_INT_SPTX           (MSP_PER_INTBASE + 11)
-                                       /* SPI TX complete              */
-#define MSP_INT_GPIO           (MSP_PER_INTBASE + 12)
-                                       /* GPIO                         */
-#define MSP_INT_PER_ERR                (MSP_PER_INTBASE + 13)
-                                       /* Peripheral error             */
-/* Reserved                                       14-31                */
-
-#endif /* !_MSP_CIC_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h
deleted file mode 100644 (file)
index 156f320..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- *
- * Macros for external SMP-safe access to the PMC MSP71xx reference
- * board GPIO pins
- *
- * Copyright 2010 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MSP_GPIO_MACROS_H__
-#define __MSP_GPIO_MACROS_H__
-
-#include <msp_regops.h>
-#include <msp_regs.h>
-
-#ifdef CONFIG_PMC_MSP7120_GW
-#define MSP_NUM_GPIOS          20
-#else
-#define MSP_NUM_GPIOS          28
-#endif
-
-/* -- GPIO Enumerations -- */
-enum msp_gpio_data {
-       MSP_GPIO_LO = 0,
-       MSP_GPIO_HI = 1,
-       MSP_GPIO_NONE,          /* Special - Means pin is out of range */
-       MSP_GPIO_TOGGLE,        /* Special - Sets pin to opposite */
-};
-
-enum msp_gpio_mode {
-       MSP_GPIO_INPUT          = 0x0,
-       /* MSP_GPIO_ INTERRUPT  = 0x1,  Not supported yet */
-       MSP_GPIO_UART_INPUT     = 0x2,  /* Only GPIO 4 or 5 */
-       MSP_GPIO_OUTPUT         = 0x8,
-       MSP_GPIO_UART_OUTPUT    = 0x9,  /* Only GPIO 2 or 3 */
-       MSP_GPIO_PERIF_TIMERA   = 0x9,  /* Only GPIO 0 or 1 */
-       MSP_GPIO_PERIF_TIMERB   = 0xa,  /* Only GPIO 0 or 1 */
-       MSP_GPIO_UNKNOWN        = 0xb,  /* No such GPIO or mode */
-};
-
-/* -- Static Tables -- */
-
-/* Maps pins to data register */
-static volatile u32 * const MSP_GPIO_DATA_REGISTER[] = {
-       /* GPIO 0 and 1 on the first register */
-       GPIO_DATA1_REG, GPIO_DATA1_REG,
-       /* GPIO 2, 3, 4, and 5 on the second register */
-       GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG, GPIO_DATA2_REG,
-       /* GPIO 6, 7, 8, and 9 on the third register */
-       GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG, GPIO_DATA3_REG,
-       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
-       GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG, GPIO_DATA4_REG,
-       GPIO_DATA4_REG, GPIO_DATA4_REG,
-       /* GPIO 16 - 23 on the first strange EXTENDED register */
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       /* GPIO 24 - 27 on the second strange EXTENDED register */
-       EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
-       EXTENDED_GPIO2_REG,
-};
-
-/* Maps pins to mode register */
-static volatile u32 * const MSP_GPIO_MODE_REGISTER[] = {
-       /* GPIO 0 and 1 on the first register */
-       GPIO_CFG1_REG, GPIO_CFG1_REG,
-       /* GPIO 2, 3, 4, and 5 on the second register */
-       GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG, GPIO_CFG2_REG,
-       /* GPIO 6, 7, 8, and 9 on the third register */
-       GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG, GPIO_CFG3_REG,
-       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
-       GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG, GPIO_CFG4_REG,
-       GPIO_CFG4_REG, GPIO_CFG4_REG,
-       /* GPIO 16 - 23 on the first strange EXTENDED register */
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       EXTENDED_GPIO1_REG, EXTENDED_GPIO1_REG,
-       /* GPIO 24 - 27 on the second strange EXTENDED register */
-       EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG, EXTENDED_GPIO2_REG,
-       EXTENDED_GPIO2_REG,
-};
-
-/* Maps 'basic' pins to relative offset from 0 per register */
-static int MSP_GPIO_OFFSET[] = {
-       /* GPIO 0 and 1 on the first register */
-       0, 0,
-       /* GPIO 2, 3, 4, and 5 on the second register */
-       2, 2, 2, 2,
-       /* GPIO 6, 7, 8, and 9 on the third register */
-       6, 6, 6, 6,
-       /* GPIO 10, 11, 12, 13, 14, and 15 on the fourth register */
-       10, 10, 10, 10, 10, 10,
-};
-
-/* Maps MODE to allowed pin mask */
-static unsigned int MSP_GPIO_MODE_ALLOWED[] = {
-       0xffffffff,     /* Mode 0 - INPUT */
-       0x00000,        /* Mode 1 - INTERRUPT */
-       0x00030,        /* Mode 2 - UART_INPUT (GPIO 4, 5)*/
-       0, 0, 0, 0, 0,  /* Modes 3, 4, 5, 6, and 7 are reserved */
-       0xffffffff,     /* Mode 8 - OUTPUT */
-       0x0000f,        /* Mode 9 - UART_OUTPUT/
-                               PERF_TIMERA (GPIO 0, 1, 2, 3) */
-       0x00003,        /* Mode a - PERF_TIMERB (GPIO 0, 1) */
-       0x00000,        /* Mode b - Not really a mode! */
-};
-
-/* -- Bit masks -- */
-
-/* This gives you the 'register relative offset gpio' number */
-#define OFFSET_GPIO_NUMBER(gpio)       (gpio - MSP_GPIO_OFFSET[gpio])
-
-/* These take the 'register relative offset gpio' number */
-#define BASIC_DATA_REG_MASK(ogpio)             (1 << ogpio)
-#define BASIC_MODE_REG_VALUE(mode, ogpio)      \
-       (mode << BASIC_MODE_REG_SHIFT(ogpio))
-#define BASIC_MODE_REG_MASK(ogpio)             \
-       BASIC_MODE_REG_VALUE(0xf, ogpio)
-#define BASIC_MODE_REG_SHIFT(ogpio)            (ogpio * 4)
-#define BASIC_MODE_REG_FROM_REG(data, ogpio)   \
-       ((data & BASIC_MODE_REG_MASK(ogpio)) >> BASIC_MODE_REG_SHIFT(ogpio))
-
-/* These take the actual GPIO number (0 through 15) */
-#define BASIC_DATA_MASK(gpio)  \
-       BASIC_DATA_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE_MASK(gpio)  \
-       BASIC_MODE_REG_MASK(OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE(mode, gpio) \
-       BASIC_MODE_REG_VALUE(mode, OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE_SHIFT(gpio) \
-       BASIC_MODE_REG_SHIFT(OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE_FROM_REG(data, gpio)        \
-       BASIC_MODE_REG_FROM_REG(data, OFFSET_GPIO_NUMBER(gpio))
-
-/*
- * Each extended GPIO register is 32 bits long and is responsible for up to
- * eight GPIOs. The least significant 16 bits contain the set and clear bit
- * pair for each of the GPIOs. The most significant 16 bits contain the
- * disable and enable bit pair for each of the GPIOs. For example, the
- * extended GPIO reg for GPIOs 16-23 is as follows:
- *
- *     31: GPIO23_DISABLE
- *     ...
- *     19: GPIO17_DISABLE
- *     18: GPIO17_ENABLE
- *     17: GPIO16_DISABLE
- *     16: GPIO16_ENABLE
- *     ...
- *     3:  GPIO17_SET
- *     2:  GPIO17_CLEAR
- *     1:  GPIO16_SET
- *     0:  GPIO16_CLEAR
- */
-
-/* This gives the 'register relative offset gpio' number */
-#define EXTENDED_OFFSET_GPIO(gpio)     (gpio < 24 ? gpio - 16 : gpio - 24)
-
-/* These take the 'register relative offset gpio' number */
-#define EXTENDED_REG_DISABLE(ogpio)    (0x2 << ((ogpio * 2) + 16))
-#define EXTENDED_REG_ENABLE(ogpio)     (0x1 << ((ogpio * 2) + 16))
-#define EXTENDED_REG_SET(ogpio)                (0x2 << (ogpio * 2))
-#define EXTENDED_REG_CLR(ogpio)                (0x1 << (ogpio * 2))
-
-/* These take the actual GPIO number (16 through 27) */
-#define EXTENDED_DISABLE(gpio) \
-       EXTENDED_REG_DISABLE(EXTENDED_OFFSET_GPIO(gpio))
-#define EXTENDED_ENABLE(gpio)  \
-       EXTENDED_REG_ENABLE(EXTENDED_OFFSET_GPIO(gpio))
-#define EXTENDED_SET(gpio)     \
-       EXTENDED_REG_SET(EXTENDED_OFFSET_GPIO(gpio))
-#define EXTENDED_CLR(gpio)     \
-       EXTENDED_REG_CLR(EXTENDED_OFFSET_GPIO(gpio))
-
-#define EXTENDED_FULL_MASK             (0xffffffff)
-
-/* -- API inline-functions -- */
-
-/*
- * Gets the current value of the specified pin
- */
-static inline enum msp_gpio_data msp_gpio_pin_get(unsigned int gpio)
-{
-       u32 pinhi_mask = 0, pinhi_mask2 = 0;
-
-       if (gpio >= MSP_NUM_GPIOS)
-               return MSP_GPIO_NONE;
-
-       if (gpio < 16) {
-               pinhi_mask = BASIC_DATA_MASK(gpio);
-       } else {
-               /*
-                * Two cases are possible with the EXTENDED register:
-                *  - In output mode (ENABLED flag set), check the CLR bit
-                *  - In input mode (ENABLED flag not set), check the SET bit
-                */
-               pinhi_mask = EXTENDED_ENABLE(gpio) | EXTENDED_CLR(gpio);
-               pinhi_mask2 = EXTENDED_SET(gpio);
-       }
-       if (((*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask) == pinhi_mask) ||
-           (*MSP_GPIO_DATA_REGISTER[gpio] & pinhi_mask2))
-               return MSP_GPIO_HI;
-       else
-               return MSP_GPIO_LO;
-}
-
-/* Sets the specified pin to the specified value */
-static inline void msp_gpio_pin_set(enum msp_gpio_data data, unsigned int gpio)
-{
-       if (gpio >= MSP_NUM_GPIOS)
-               return;
-
-       if (gpio < 16) {
-               if (data == MSP_GPIO_TOGGLE)
-                       toggle_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                       BASIC_DATA_MASK(gpio));
-               else if (data == MSP_GPIO_HI)
-                       set_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                       BASIC_DATA_MASK(gpio));
-               else
-                       clear_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                       BASIC_DATA_MASK(gpio));
-       } else {
-               if (data == MSP_GPIO_TOGGLE) {
-                       /* Special ugly case:
-                        *   We have to read the CLR bit.
-                        *   If set, we write the CLR bit.
-                        *   If not, we write the SET bit.
-                        */
-                       u32 tmpdata;
-
-                       custom_read_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                                               tmpdata);
-                       if (tmpdata & EXTENDED_CLR(gpio))
-                               tmpdata = EXTENDED_CLR(gpio);
-                       else
-                               tmpdata = EXTENDED_SET(gpio);
-                       custom_write_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                                               tmpdata);
-               } else {
-                       u32 newdata;
-
-                       if (data == MSP_GPIO_HI)
-                               newdata = EXTENDED_SET(gpio);
-                       else
-                               newdata = EXTENDED_CLR(gpio);
-                       set_value_reg32(MSP_GPIO_DATA_REGISTER[gpio],
-                                               EXTENDED_FULL_MASK, newdata);
-               }
-       }
-}
-
-/* Sets the specified pin to the specified value */
-static inline void msp_gpio_pin_hi(unsigned int gpio)
-{
-       msp_gpio_pin_set(MSP_GPIO_HI, gpio);
-}
-
-/* Sets the specified pin to the specified value */
-static inline void msp_gpio_pin_lo(unsigned int gpio)
-{
-       msp_gpio_pin_set(MSP_GPIO_LO, gpio);
-}
-
-/* Sets the specified pin to the opposite value */
-static inline void msp_gpio_pin_toggle(unsigned int gpio)
-{
-       msp_gpio_pin_set(MSP_GPIO_TOGGLE, gpio);
-}
-
-/* Gets the mode of the specified pin */
-static inline enum msp_gpio_mode msp_gpio_pin_get_mode(unsigned int gpio)
-{
-       enum msp_gpio_mode retval = MSP_GPIO_UNKNOWN;
-       uint32_t data;
-
-       if (gpio >= MSP_NUM_GPIOS)
-               return retval;
-
-       data = *MSP_GPIO_MODE_REGISTER[gpio];
-
-       if (gpio < 16) {
-               retval = BASIC_MODE_FROM_REG(data, gpio);
-       } else {
-               /* Extended pins can only be either INPUT or OUTPUT */
-               if (data & EXTENDED_ENABLE(gpio))
-                       retval = MSP_GPIO_OUTPUT;
-               else
-                       retval = MSP_GPIO_INPUT;
-       }
-
-       return retval;
-}
-
-/*
- * Sets the specified mode on the requested pin
- * Returns 0 on success, or -1 if that mode is not allowed on this pin
- */
-static inline int msp_gpio_pin_mode(enum msp_gpio_mode mode, unsigned int gpio)
-{
-       u32 modemask, newmode;
-
-       if ((1 << gpio) & ~MSP_GPIO_MODE_ALLOWED[mode])
-               return -1;
-
-       if (gpio >= MSP_NUM_GPIOS)
-               return -1;
-
-       if (gpio < 16) {
-               modemask = BASIC_MODE_MASK(gpio);
-               newmode =  BASIC_MODE(mode, gpio);
-       } else {
-               modemask = EXTENDED_FULL_MASK;
-               if (mode == MSP_GPIO_INPUT)
-                       newmode = EXTENDED_DISABLE(gpio);
-               else
-                       newmode = EXTENDED_ENABLE(gpio);
-       }
-       /* Do the set atomically */
-       set_value_reg32(MSP_GPIO_MODE_REGISTER[gpio], modemask, newmode);
-
-       return 0;
-}
-
-#endif /* __MSP_GPIO_MACROS_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h
deleted file mode 100644 (file)
index 1d9f054..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Defines for the MSP interrupt handlers.
- *
- * Copyright (C) 2005, PMC-Sierra, Inc.  All rights reserved.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#ifndef _MSP_INT_H
-#define _MSP_INT_H
-
-/*
- * The PMC-Sierra MSP product line has at least two different interrupt
- * controllers, the SLP register based scheme and the CIC interrupt
- * controller block mechanism.  This file distinguishes between them
- * so that devices see a uniform interface.
- */
-
-#if defined(CONFIG_IRQ_MSP_SLP)
-       #include "msp_slp_int.h"
-#elif defined(CONFIG_IRQ_MSP_CIC)
-       #include "msp_cic_int.h"
-#else
-       #error "What sort of interrupt controller does *your* MSP have?"
-#endif
-
-#endif /* !_MSP_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h
deleted file mode 100644 (file)
index 4156069..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (c) 2000-2006 PMC-Sierra INC.
- *
- *     This program is free software; you can redistribute it
- *     and/or modify it under the terms of the GNU General
- *     Public License as published by the Free Software
- *     Foundation; either version 2 of the License, or (at your
- *     option) any later version.
- *
- *     This program is distributed in the hope that it will be
- *     useful, but WITHOUT ANY WARRANTY; without even the implied
- *     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- *     PURPOSE.  See the GNU General Public License for more
- *     details.
- *
- *     You should have received a copy of the GNU General Public
- *     License along with this program; if not, write to the Free
- *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
- *     02139, USA.
- *
- * PMC-SIERRA INC. DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS
- * SOFTWARE.
- */
-
-#ifndef _MSP_PCI_H_
-#define _MSP_PCI_H_
-
-#define MSP_HAS_PCI(ID)        (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
-
-/*
- * It is convenient to program the OATRAN register so that
- * Athena virtual address space and PCI address space are
- * the same. This is not a requirement, just a convenience.
- *
- * The only hard restrictions on the value of OATRAN is that
- * OATRAN must not be programmed to allow translated memory
- * addresses to fall within the lowest 512MB of
- * PCI address space. This region is hardcoded
- * for use as Athena PCI Host Controller target
- * access memory space to the Athena's SDRAM.
- *
- * Note that OATRAN applies only to memory accesses, not
- * to I/O accesses.
- *
- * To program OATRAN to make Athena virtual address space
- * and PCI address space have the same values, OATRAN
- * is to be programmed to 0xB8000000. The top seven
- * bits of the value mimic the seven bits clipped off
- * by the PCI Host controller.
- *
- * With OATRAN at the said value, when the CPU does
- * an access to its virtual address at, say 0xB900_5000,
- * the address appearing on the PCI bus will be
- * 0xB900_5000.
- *    - Michael Penner
- */
-#define MSP_PCI_OATRAN         0xB8000000UL
-
-#define MSP_PCI_SPACE_BASE     (MSP_PCI_OATRAN + 0x1002000UL)
-#define MSP_PCI_SPACE_SIZE     (0x3000000UL - 0x2000)
-#define MSP_PCI_SPACE_END \
-               (MSP_PCI_SPACE_BASE + MSP_PCI_SPACE_SIZE - 1)
-#define MSP_PCI_IOSPACE_BASE   (MSP_PCI_OATRAN + 0x1001000UL)
-#define MSP_PCI_IOSPACE_SIZE   0x1000
-#define MSP_PCI_IOSPACE_END  \
-               (MSP_PCI_IOSPACE_BASE + MSP_PCI_IOSPACE_SIZE - 1)
-
-/* IRQ for PCI status interrupts */
-#define PCI_STAT_IRQ   20
-
-#define QFLUSH_REG_1   0xB7F40000
-
-typedef volatile unsigned int pcireg;
-typedef void * volatile ppcireg;
-
-struct pci_block_copy
-{
-    pcireg   unused1; /* +0x00 */
-    pcireg   unused2; /* +0x04 */
-    ppcireg  unused3; /* +0x08 */
-    ppcireg  unused4; /* +0x0C */
-    pcireg   unused5; /* +0x10 */
-    pcireg   unused6; /* +0x14 */
-    pcireg   unused7; /* +0x18 */
-    ppcireg  unused8; /* +0x1C */
-    ppcireg  unused9; /* +0x20 */
-    pcireg   unusedA; /* +0x24 */
-    ppcireg  unusedB; /* +0x28 */
-    ppcireg  unusedC; /* +0x2C */
-};
-
-enum
-{
-    config_device_vendor,  /* 0 */
-    config_status_command, /* 1 */
-    config_class_revision, /* 2 */
-    config_BIST_header_latency_cache, /* 3 */
-    config_BAR0,           /* 4 */
-    config_BAR1,           /* 5 */
-    config_BAR2,           /* 6 */
-    config_not_used7,      /* 7 */
-    config_not_used8,      /* 8 */
-    config_not_used9,      /* 9 */
-    config_CIS,            /* 10 */
-    config_subsystem,      /* 11 */
-    config_not_used12,     /* 12 */
-    config_capabilities,   /* 13 */
-    config_not_used14,     /* 14 */
-    config_lat_grant_irq,  /* 15 */
-    config_message_control,/* 16 */
-    config_message_addr,   /* 17 */
-    config_message_data,   /* 18 */
-    config_VPD_addr,       /* 19 */
-    config_VPD_data,       /* 20 */
-    config_maxregs         /* 21 - number of registers */
-};
-
-struct msp_pci_regs
-{
-    pcireg hop_unused_00; /* +0x00 */
-    pcireg hop_unused_04; /* +0x04 */
-    pcireg hop_unused_08; /* +0x08 */
-    pcireg hop_unused_0C; /* +0x0C */
-    pcireg hop_unused_10; /* +0x10 */
-    pcireg hop_unused_14; /* +0x14 */
-    pcireg hop_unused_18; /* +0x18 */
-    pcireg hop_unused_1C; /* +0x1C */
-    pcireg hop_unused_20; /* +0x20 */
-    pcireg hop_unused_24; /* +0x24 */
-    pcireg hop_unused_28; /* +0x28 */
-    pcireg hop_unused_2C; /* +0x2C */
-    pcireg hop_unused_30; /* +0x30 */
-    pcireg hop_unused_34; /* +0x34 */
-    pcireg if_control;    /* +0x38 */
-    pcireg oatran;        /* +0x3C */
-    pcireg reset_ctl;     /* +0x40 */
-    pcireg config_addr;   /* +0x44 */
-    pcireg hop_unused_48; /* +0x48 */
-    pcireg msg_signaled_int_status; /* +0x4C */
-    pcireg msg_signaled_int_mask;   /* +0x50 */
-    pcireg if_status;     /* +0x54 */
-    pcireg if_mask;       /* +0x58 */
-    pcireg hop_unused_5C; /* +0x5C */
-    pcireg hop_unused_60; /* +0x60 */
-    pcireg hop_unused_64; /* +0x64 */
-    pcireg hop_unused_68; /* +0x68 */
-    pcireg hop_unused_6C; /* +0x6C */
-    pcireg hop_unused_70; /* +0x70 */
-
-    struct pci_block_copy pci_bc[2] __attribute__((aligned(64)));
-
-    pcireg error_hdr1; /* +0xE0 */
-    pcireg error_hdr2; /* +0xE4 */
-
-    pcireg config[config_maxregs] __attribute__((aligned(256)));
-
-};
-
-#define BPCI_CFGADDR_BUSNUM_SHF 16
-#define BPCI_CFGADDR_FUNCTNUM_SHF 8
-#define BPCI_CFGADDR_REGNUM_SHF 2
-#define BPCI_CFGADDR_ENABLE (1<<31)
-
-#define BPCI_IFCONTROL_RTO (1<<20) /* Retry timeout */
-#define BPCI_IFCONTROL_HCE (1<<16) /* Host configuration enable */
-#define BPCI_IFCONTROL_CTO_SHF 12  /* Shift count for CTO bits */
-#define BPCI_IFCONTROL_SE  (1<<5)  /* Enable exceptions on errors */
-#define BPCI_IFCONTROL_BIST (1<<4) /* Use BIST in per. mode */
-#define BPCI_IFCONTROL_CAP (1<<3)  /* Enable capabilities */
-#define BPCI_IFCONTROL_MMC_SHF 0   /* Shift count for MMC bits */
-
-#define BPCI_IFSTATUS_MGT  (1<<8)  /* Master Grant timeout */
-#define BPCI_IFSTATUS_MTT  (1<<9)  /* Master TRDY timeout */
-#define BPCI_IFSTATUS_MRT  (1<<10) /* Master retry timeout */
-#define BPCI_IFSTATUS_BC0F (1<<13) /* Block copy 0 fault */
-#define BPCI_IFSTATUS_BC1F (1<<14) /* Block copy 1 fault */
-#define BPCI_IFSTATUS_PCIU (1<<15) /* PCI unable to respond */
-#define BPCI_IFSTATUS_BSIZ (1<<16) /* PCI access with illegal size */
-#define BPCI_IFSTATUS_BADD (1<<17) /* PCI access with illegal addr */
-#define BPCI_IFSTATUS_RTO  (1<<18) /* Retry time out */
-#define BPCI_IFSTATUS_SER  (1<<19) /* System error */
-#define BPCI_IFSTATUS_PER  (1<<20) /* Parity error */
-#define BPCI_IFSTATUS_LCA  (1<<21) /* Local CPU abort */
-#define BPCI_IFSTATUS_MEM  (1<<22) /* Memory prot. violation */
-#define BPCI_IFSTATUS_ARB  (1<<23) /* Arbiter timed out */
-#define BPCI_IFSTATUS_STA  (1<<27) /* Signaled target abort */
-#define BPCI_IFSTATUS_TA   (1<<28) /* Target abort */
-#define BPCI_IFSTATUS_MA   (1<<29) /* Master abort */
-#define BPCI_IFSTATUS_PEI  (1<<30) /* Parity error as initiator */
-#define BPCI_IFSTATUS_PET  (1<<31) /* Parity error as target */
-
-#define BPCI_RESETCTL_PR (1<<0)    /* True if reset asserted */
-#define BPCI_RESETCTL_RT (1<<4)    /* Release time */
-#define BPCI_RESETCTL_CT (1<<8)    /* Config time */
-#define BPCI_RESETCTL_PE (1<<12)   /* PCI enabled */
-#define BPCI_RESETCTL_HM (1<<13)   /* PCI host mode */
-#define BPCI_RESETCTL_RI (1<<14)   /* PCI reset in */
-
-extern struct msp_pci_regs msp_pci_regs
-                       __attribute__((section(".register")));
-extern unsigned long msp_pci_config_space
-                       __attribute__((section(".register")));
-
-#endif /* !_MSP_PCI_H_ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h
deleted file mode 100644 (file)
index 786d82d..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * MIPS boards bootprom interface for the Linux kernel.
- *
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- * Author: Carsten Langgaard, carstenl@mips.com
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#ifndef _ASM_MSP_PROM_H
-#define _ASM_MSP_PROM_H
-
-#include <linux/types.h>
-
-#define DEVICEID                       "deviceid"
-#define FEATURES                       "features"
-#define PROM_ENV                       "prom_env"
-#define PROM_ENV_FILE                  "/proc/"PROM_ENV
-#define PROM_ENV_SIZE                  256
-
-#define CPU_DEVID_FAMILY               0x0000ff00
-#define CPU_DEVID_REVISION             0x000000ff
-
-#define FPGA_IS_POLO(revision) \
-               (((revision >= 0xb0) && (revision < 0xd0)))
-#define FPGA_IS_5000(revision) \
-               ((revision >= 0x80) && (revision <= 0x90))
-#define        FPGA_IS_ZEUS(revision)          ((revision < 0x7f))
-#define FPGA_IS_DUET(revision) \
-               (((revision >= 0xa0) && (revision < 0xb0)))
-#define FPGA_IS_MSP4200(revision)      ((revision >= 0xd0))
-#define FPGA_IS_MSP7100(revision)      ((revision >= 0xd0))
-
-#define MACHINE_TYPE_POLO              "POLO"
-#define MACHINE_TYPE_DUET              "DUET"
-#define        MACHINE_TYPE_ZEUS               "ZEUS"
-#define MACHINE_TYPE_MSP2000REVB       "MSP2000REVB"
-#define MACHINE_TYPE_MSP5000           "MSP5000"
-#define MACHINE_TYPE_MSP4200           "MSP4200"
-#define MACHINE_TYPE_MSP7120           "MSP7120"
-#define MACHINE_TYPE_MSP7130           "MSP7130"
-#define MACHINE_TYPE_OTHER             "OTHER"
-
-#define MACHINE_TYPE_POLO_FPGA         "POLO-FPGA"
-#define MACHINE_TYPE_DUET_FPGA         "DUET-FPGA"
-#define        MACHINE_TYPE_ZEUS_FPGA          "ZEUS_FPGA"
-#define MACHINE_TYPE_MSP2000REVB_FPGA  "MSP2000REVB-FPGA"
-#define MACHINE_TYPE_MSP5000_FPGA      "MSP5000-FPGA"
-#define MACHINE_TYPE_MSP4200_FPGA      "MSP4200-FPGA"
-#define MACHINE_TYPE_MSP7100_FPGA      "MSP7100-FPGA"
-#define MACHINE_TYPE_OTHER_FPGA                "OTHER-FPGA"
-
-/* Device Family definitions */
-#define FAMILY_FPGA                    0x0000
-#define FAMILY_ZEUS                    0x1000
-#define FAMILY_POLO                    0x2000
-#define FAMILY_DUET                    0x4000
-#define FAMILY_TRIAD                   0x5000
-#define FAMILY_MSP4200                 0x4200
-#define FAMILY_MSP4200_FPGA            0x4f00
-#define FAMILY_MSP7100                 0x7100
-#define FAMILY_MSP7100_FPGA            0x7f00
-
-/* Device Type definitions */
-#define TYPE_MSP7120                   0x7120
-#define TYPE_MSP7130                   0x7130
-
-#define ENET_KEY               'E'
-#define ENETTXD_KEY            'e'
-#define PCI_KEY                        'P'
-#define PCIMUX_KEY             'p'
-#define SEC_KEY                        'S'
-#define SPAD_KEY               'D'
-#define TDM_KEY                        'T'
-#define ZSP_KEY                        'Z'
-
-#define FEATURE_NOEXIST                '-'
-#define FEATURE_EXIST          '+'
-
-#define ENET_MII               'M'
-#define ENET_RMII              'R'
-
-#define        ENETTXD_FALLING         'F'
-#define ENETTXD_RISING         'R'
-
-#define PCI_HOST               'H'
-#define PCI_PERIPHERAL         'P'
-
-#define PCIMUX_FULL            'F'
-#define PCIMUX_SINGLE          'S'
-
-#define SEC_DUET               'D'
-#define SEC_POLO               'P'
-#define SEC_SLOW               'S'
-#define SEC_TRIAD              'T'
-
-#define SPAD_POLO              'P'
-
-#define TDM_DUET               'D'     /* DUET TDMs might exist */
-#define TDM_POLO               'P'     /* POLO TDMs might exist */
-#define TDM_TRIAD              'T'     /* TRIAD TDMs might exist */
-
-#define ZSP_DUET               'D'     /* one DUET zsp engine */
-#define ZSP_TRIAD              'T'     /* two TRIAD zsp engines */
-
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem,
-                              unsigned long end_mem);
-
-extern int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr);
-extern unsigned long get_deviceid(void);
-extern char identify_enet(unsigned long interface_num);
-extern char identify_enetTxD(unsigned long interface_num);
-extern char identify_pci(void);
-extern char identify_sec(void);
-extern char identify_spad(void);
-extern char identify_sec(void);
-extern char identify_tdm(void);
-extern char identify_zsp(void);
-extern unsigned long identify_family(void);
-extern unsigned long identify_revision(void);
-
-/*
- * The following macro calls prom_printf and puts the format string
- * into an init section so it can be reclaimed.
- */
-#define ppfinit(f, x...) \
-       do { \
-               static char _f[] __initdata = KERN_INFO f; \
-               printk(_f, ## x); \
-       } while (0)
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS    7       /* 6 used */
-
-enum yamon_memtypes {
-       yamon_dontuse,
-       yamon_prom,
-       yamon_free,
-};
-
-struct prom_pmemblock {
-       unsigned long base; /* Within KSEG0. */
-       unsigned int size;  /* In bytes. */
-       unsigned int type;  /* free or prom memory */
-};
-
-extern int prom_argc;
-extern char **prom_argv;
-extern char **prom_envp;
-extern int *prom_vec;
-extern struct prom_pmemblock *prom_getmdesc(void);
-
-#endif /* !_ASM_MSP_PROM_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
deleted file mode 100644 (file)
index 7d41474..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * SMP/VPE-safe functions to access "registers" (see note).
- *
- * NOTES:
-* - These macros use ll/sc instructions, so it is your responsibility to
- * ensure these are available on your platform before including this file.
- * - The MIPS32 spec states that ll/sc results are undefined for uncached
- * accesses. This means they can't be used on HW registers accessed
- * through kseg1. Code which requires these macros for this purpose must
- * front-end the registers with cached memory "registers" and have a single
- * thread update the actual HW registers.
- * - A maximum of 2k of code can be inserted between ll and sc. Every
- * memory accesses between the instructions will increase the chance of
- * sc failing and having to loop.
- * - When using custom_read_reg32/custom_write_reg32 only perform the
- * necessary logical operations on the register value in between these
- * two calls. All other logic should be performed before the first call.
-  * - There is a bug on the R10000 chips which has a workaround. If you
- * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
- * to be non-zero.  If you are using this header from within linux, you may
- * include <asm/war.h> before including this file to have this defined
- * appropriately for you.
- *
- * Copyright 2005-2007 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
- *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
- *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc., 675
- *  Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __ASM_REGOPS_H__
-#define __ASM_REGOPS_H__
-
-#include <linux/types.h>
-
-#include <asm/war.h>
-
-#ifndef R10000_LLSC_WAR
-#define R10000_LLSC_WAR 0
-#endif
-
-#if R10000_LLSC_WAR == 1
-#define __beqz "beqzl  "
-#else
-#define __beqz "beqz   "
-#endif
-
-#ifndef _LINUX_TYPES_H
-typedef unsigned int u32;
-#endif
-
-/*
- * Sets all the masked bits to the corresponding value bits
- */
-static inline void set_value_reg32(volatile u32 *const addr,
-                                       u32 const mask,
-                                       u32 const value)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    mips3                           \n"
-       "1:     ll      %0, %1  # set_value_reg32       \n"
-       "       and     %0, %2                          \n"
-       "       or      %0, %3                          \n"
-       "       sc      %0, %1                          \n"
-       "       "__beqz"%0, 1b                          \n"
-       "       nop                                     \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp), "=m" (*addr)
-       : "ir" (~mask), "ir" (value), "m" (*addr));
-}
-
-/*
- * Sets all the masked bits to '1'
- */
-static inline void set_reg32(volatile u32 *const addr,
-                               u32 const mask)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    mips3                           \n"
-       "1:     ll      %0, %1          # set_reg32     \n"
-       "       or      %0, %2                          \n"
-       "       sc      %0, %1                          \n"
-       "       "__beqz"%0, 1b                          \n"
-       "       nop                                     \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp), "=m" (*addr)
-       : "ir" (mask), "m" (*addr));
-}
-
-/*
- * Sets all the masked bits to '0'
- */
-static inline void clear_reg32(volatile u32 *const addr,
-                               u32 const mask)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    mips3                           \n"
-       "1:     ll      %0, %1          # clear_reg32   \n"
-       "       and     %0, %2                          \n"
-       "       sc      %0, %1                          \n"
-       "       "__beqz"%0, 1b                          \n"
-       "       nop                                     \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp), "=m" (*addr)
-       : "ir" (~mask), "m" (*addr));
-}
-
-/*
- * Toggles all masked bits from '0' to '1' and '1' to '0'
- */
-static inline void toggle_reg32(volatile u32 *const addr,
-                               u32 const mask)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    mips3                           \n"
-       "1:     ll      %0, %1          # toggle_reg32  \n"
-       "       xor     %0, %2                          \n"
-       "       sc      %0, %1                          \n"
-       "       "__beqz"%0, 1b                          \n"
-       "       nop                                     \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp), "=m" (*addr)
-       : "ir" (mask), "m" (*addr));
-}
-
-/*
- * Read all masked bits others are returned as '0'
- */
-static inline u32 read_reg32(volatile u32 *const addr,
-                               u32 const mask)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    noreorder                       \n"
-       "       lw      %0, %1          # read          \n"
-       "       and     %0, %2          # mask          \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp)
-       : "m" (*addr), "ir" (mask));
-
-       return temp;
-}
-
-/*
- * blocking_read_reg32 - Read address with blocking load
- *
- * Uncached writes need to be read back to ensure they reach RAM.
- * The returned value must be 'used' to prevent from becoming a
- * non-blocking load.
- */
-static inline u32 blocking_read_reg32(volatile u32 *const addr)
-{
-       u32 temp;
-
-       __asm__ __volatile__(
-       "       .set    push                            \n"
-       "       .set    noreorder                       \n"
-       "       lw      %0, %1          # read          \n"
-       "       move    %0, %0          # block         \n"
-       "       .set    pop                             \n"
-       : "=&r" (temp)
-       : "m" (*addr));
-
-       return temp;
-}
-
-/*
- * For special strange cases only:
- *
- * If you need custom processing within a ll/sc loop, use the following macros
- * VERY CAREFULLY:
- *
- *   u32 tmp;                          <-- Define a variable to hold the data
- *
- *   custom_read_reg32(address, tmp);  <-- Reads the address and put the value
- *                                             in the 'tmp' variable given
- *
- *     From here on out, you are (basically) atomic, so don't do anything too
- *     fancy!
- *     Also, this code may loop if the end of this block fails to write
- *     everything back safely due do the other CPU, so do NOT do anything
- *     with side-effects!
- *
- *   custom_write_reg32(address, tmp); <-- Writes back 'tmp' safely.
- */
-#define custom_read_reg32(address, tmp)                                \
-       __asm__ __volatile__(                                   \
-       "       .set    push                            \n"     \
-       "       .set    mips3                           \n"     \
-       "1:     ll      %0, %1  #custom_read_reg32      \n"     \
-       "       .set    pop                             \n"     \
-       : "=r" (tmp), "=m" (*address)                           \
-       : "m" (*address))
-
-#define custom_write_reg32(address, tmp)                       \
-       __asm__ __volatile__(                                   \
-       "       .set    push                            \n"     \
-       "       .set    mips3                           \n"     \
-       "       sc      %0, %1  #custom_write_reg32     \n"     \
-       "       "__beqz"%0, 1b                          \n"     \
-       "       nop                                     \n"     \
-       "       .set    pop                             \n"     \
-       : "=&r" (tmp), "=m" (*address)                          \
-       : "0" (tmp), "m" (*address))
-
-#endif  /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
deleted file mode 100644 (file)
index 692c1b6..0000000
+++ /dev/null
@@ -1,664 +0,0 @@
-/*
- * Defines for the address space, registers and register configuration
- * (bit masks, access macros etc) for the PMC-Sierra line of MSP products.
- * This file contains addess maps for all the devices in the line of
- * products but only has register definitions and configuration masks for
- * registers which aren't definitely associated with any device.  Things
- * like clock settings, reset access, the ELB etc.  Individual device
- * drivers will reference the appropriate XXX_BASE value defined here
- * and have individual registers offset from that.
- *
- * Copyright (C) 2005-2007 PMC-Sierra, Inc.  All rights reserved.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#include <asm/addrspace.h>
-#include <linux/types.h>
-
-#ifndef _ASM_MSP_REGS_H
-#define _ASM_MSP_REGS_H
-
-/*
- ########################################################################
- #  Address space and device base definitions                           #
- ########################################################################
- */
-
-/*
- ***************************************************************************
- * System Logic and Peripherals (ELB, UART0, etc) device address space     *
- ***************************************************************************
- */
-#define MSP_SLP_BASE           0x1c000000
-                                       /* System Logic and Peripherals */
-#define MSP_RST_BASE           (MSP_SLP_BASE + 0x10)
-                                       /* System reset register base   */
-#define MSP_RST_SIZE           0x0C    /* System reset register space  */
-
-#define MSP_WTIMER_BASE                (MSP_SLP_BASE + 0x04C)
-                                       /* watchdog timer base          */
-#define MSP_ITIMER_BASE                (MSP_SLP_BASE + 0x054)
-                                       /* internal timer base          */
-#define MSP_UART0_BASE         (MSP_SLP_BASE + 0x100)
-                                       /* UART0 controller base        */
-#define MSP_BCPY_CTRL_BASE     (MSP_SLP_BASE + 0x120)
-                                       /* Block Copy controller base   */
-#define MSP_BCPY_DESC_BASE     (MSP_SLP_BASE + 0x160)
-                                       /* Block Copy descriptor base   */
-
-/*
- ***************************************************************************
- * PCI address space                                                       *
- ***************************************************************************
- */
-#define MSP_PCI_BASE           0x19000000
-
-/*
- ***************************************************************************
- * MSbus device address space                                              *
- ***************************************************************************
- */
-#define MSP_MSB_BASE           0x18000000
-                                       /* MSbus address start          */
-#define MSP_PER_BASE           (MSP_MSB_BASE + 0x400000)
-                                       /* Peripheral device registers  */
-#define MSP_MAC0_BASE          (MSP_MSB_BASE + 0x600000)
-                                       /* MAC A device registers       */
-#define MSP_MAC1_BASE          (MSP_MSB_BASE + 0x700000)
-                                       /* MAC B device registers       */
-#define MSP_MAC_SIZE           0xE0    /* MAC register space           */
-
-#define MSP_SEC_BASE           (MSP_MSB_BASE + 0x800000)
-                                       /* Security Engine registers    */
-#define MSP_MAC2_BASE          (MSP_MSB_BASE + 0x900000)
-                                       /* MAC C device registers       */
-#define MSP_ADSL2_BASE         (MSP_MSB_BASE + 0xA80000)
-                                       /* ADSL2 device registers       */
-#define MSP_USB0_BASE          (MSP_MSB_BASE + 0xB00000)
-                                       /* USB0 device registers        */
-#define MSP_USB1_BASE          (MSP_MSB_BASE + 0x300000)
-                                       /* USB1 device registers        */
-#define MSP_CPUIF_BASE         (MSP_MSB_BASE + 0xC00000)
-                                       /* CPU interface registers      */
-
-/* Devices within the MSbus peripheral block */
-#define MSP_UART1_BASE         (MSP_PER_BASE + 0x030)
-                                       /* UART1 controller base        */
-#define MSP_SPI_BASE           (MSP_PER_BASE + 0x058)
-                                       /* SPI/MPI control registers    */
-#define MSP_TWI_BASE           (MSP_PER_BASE + 0x090)
-                                       /* Two-wire control registers   */
-#define MSP_PTIMER_BASE                (MSP_PER_BASE + 0x0F0)
-                                       /* Programmable timer control   */
-
-/*
- ***************************************************************************
- * Physical Memory configuration address space                             *
- ***************************************************************************
- */
-#define MSP_MEM_CFG_BASE       0x17f00000
-
-#define MSP_MEM_INDIRECT_CTL_10        0x10
-
-/*
- * Notes:
- *  1) The SPI registers are split into two blocks, one offset from the
- *     MSP_SPI_BASE by 0x00 and the other offset from the MSP_SPI_BASE by
- *     0x68.  The SPI driver definitions for the register must be aware
- *     of this.
- *  2) The block copy engine register are divided into two regions, one
- *     for the control/configuration of the engine proper and one for the
- *     values of the descriptors used in the copy process.  These have
- *     different base defines (CTRL_BASE vs DESC_BASE)
- *  3) These constants are for physical addresses which means that they
- *     work correctly with "ioremap" and friends.  This means that device
- *     drivers will need to remap these addresses using ioremap and perhaps
- *     the readw/writew macros.  Or they could use the regptr() macro
- *     defined below, but the readw/writew calls are the correct thing.
- *  4) The UARTs have an additional status register offset from the base
- *     address.  This register isn't used in the standard 8250 driver but
- *     may be used in other software.  Consult the hardware datasheet for
- *     offset details.
- *  5) For some unknown reason the security engine (MSP_SEC_BASE) registers
- *     start at an offset of 0x84 from the base address but the block of
- *     registers before this is reserved for the security engine.  The
- *     driver will have to be aware of this but it makes the register
- *     definitions line up better with the documentation.
- */
-
-/*
- ########################################################################
- #  System register definitions.  Not associated with a specific device #
- ########################################################################
- */
-
-/*
- * This macro maps the physical register number into uncached space
- * and (for C code) casts it into a u32 pointer so it can be dereferenced
- * Normally these would be accessed with ioremap and readX/writeX, but
- * these are convenient for a lot of internal kernel code.
- */
-#ifdef __ASSEMBLER__
-       #define regptr(addr) (KSEG1ADDR(addr))
-#else
-       #define regptr(addr) ((volatile u32 *const)(KSEG1ADDR(addr)))
-#endif
-
-/*
- ***************************************************************************
- * System Logic and Peripherals (RESET, ELB, etc) registers                *
- ***************************************************************************
- */
-
-/* System Control register definitions */
-#define        DEV_ID_REG      regptr(MSP_SLP_BASE + 0x00)
-                                       /* Device-ID                 RO */
-#define        FWR_ID_REG      regptr(MSP_SLP_BASE + 0x04)
-                                       /* Firmware-ID Register      RW */
-#define        SYS_ID_REG0     regptr(MSP_SLP_BASE + 0x08)
-                                       /* System-ID Register-0      RW */
-#define        SYS_ID_REG1     regptr(MSP_SLP_BASE + 0x0C)
-                                       /* System-ID Register-1      RW */
-
-/* System Reset register definitions */
-#define        RST_STS_REG     regptr(MSP_SLP_BASE + 0x10)
-                                       /* System Reset Status       RO */
-#define        RST_SET_REG     regptr(MSP_SLP_BASE + 0x14)
-                                       /* System Set Reset          WO */
-#define        RST_CLR_REG     regptr(MSP_SLP_BASE + 0x18)
-                                       /* System Clear Reset        WO */
-
-/* System Clock Registers */
-#define PCI_SLP_REG    regptr(MSP_SLP_BASE + 0x1C)
-                                       /* PCI clock generator       RW */
-#define URT_SLP_REG    regptr(MSP_SLP_BASE + 0x20)
-                                       /* UART clock generator      RW */
-/* reserved                  (MSP_SLP_BASE + 0x24)                     */
-/* reserved                  (MSP_SLP_BASE + 0x28)                     */
-#define PLL1_SLP_REG   regptr(MSP_SLP_BASE + 0x2C)
-                                       /* PLL1 clock generator      RW */
-#define PLL0_SLP_REG   regptr(MSP_SLP_BASE + 0x30)
-                                       /* PLL0 clock generator      RW */
-#define MIPS_SLP_REG   regptr(MSP_SLP_BASE + 0x34)
-                                       /* MIPS clock generator      RW */
-#define        VE_SLP_REG      regptr(MSP_SLP_BASE + 0x38)
-                                       /* Voice Eng clock generator RW */
-/* reserved                  (MSP_SLP_BASE + 0x3C)                     */
-#define MSB_SLP_REG    regptr(MSP_SLP_BASE + 0x40)
-                                       /* MS-Bus clock generator    RW */
-#define SMAC_SLP_REG   regptr(MSP_SLP_BASE + 0x44)
-                                       /* Sec & MAC clock generator RW */
-#define PERF_SLP_REG   regptr(MSP_SLP_BASE + 0x48)
-                                       /* Per & TDM clock generator RW */
-
-/* Interrupt Controller Registers */
-#define SLP_INT_STS_REG regptr(MSP_SLP_BASE + 0x70)
-                                       /* Interrupt status register RW */
-#define SLP_INT_MSK_REG regptr(MSP_SLP_BASE + 0x74)
-                                       /* Interrupt enable/mask     RW */
-#define SE_MBOX_REG    regptr(MSP_SLP_BASE + 0x78)
-                                       /* Security Engine mailbox   RW */
-#define VE_MBOX_REG    regptr(MSP_SLP_BASE + 0x7C)
-                                       /* Voice Engine mailbox      RW */
-
-/* ELB Controller Registers */
-#define CS0_CNFG_REG   regptr(MSP_SLP_BASE + 0x80)
-                                       /* ELB CS0 Configuration Reg    */
-#define CS0_ADDR_REG   regptr(MSP_SLP_BASE + 0x84)
-                                       /* ELB CS0 Base Address Reg     */
-#define CS0_MASK_REG   regptr(MSP_SLP_BASE + 0x88)
-                                       /* ELB CS0 Mask Register        */
-#define CS0_ACCESS_REG regptr(MSP_SLP_BASE + 0x8C)
-                                       /* ELB CS0 access register      */
-
-#define CS1_CNFG_REG   regptr(MSP_SLP_BASE + 0x90)
-                                       /* ELB CS1 Configuration Reg    */
-#define CS1_ADDR_REG   regptr(MSP_SLP_BASE + 0x94)
-                                       /* ELB CS1 Base Address Reg     */
-#define CS1_MASK_REG   regptr(MSP_SLP_BASE + 0x98)
-                                       /* ELB CS1 Mask Register        */
-#define CS1_ACCESS_REG regptr(MSP_SLP_BASE + 0x9C)
-                                       /* ELB CS1 access register      */
-
-#define CS2_CNFG_REG   regptr(MSP_SLP_BASE + 0xA0)
-                                       /* ELB CS2 Configuration Reg    */
-#define CS2_ADDR_REG   regptr(MSP_SLP_BASE + 0xA4)
-                                       /* ELB CS2 Base Address Reg     */
-#define CS2_MASK_REG   regptr(MSP_SLP_BASE + 0xA8)
-                                       /* ELB CS2 Mask Register        */
-#define CS2_ACCESS_REG regptr(MSP_SLP_BASE + 0xAC)
-                                       /* ELB CS2 access register      */
-
-#define CS3_CNFG_REG   regptr(MSP_SLP_BASE + 0xB0)
-                                       /* ELB CS3 Configuration Reg    */
-#define CS3_ADDR_REG   regptr(MSP_SLP_BASE + 0xB4)
-                                       /* ELB CS3 Base Address Reg     */
-#define CS3_MASK_REG   regptr(MSP_SLP_BASE + 0xB8)
-                                       /* ELB CS3 Mask Register        */
-#define CS3_ACCESS_REG regptr(MSP_SLP_BASE + 0xBC)
-                                       /* ELB CS3 access register      */
-
-#define CS4_CNFG_REG   regptr(MSP_SLP_BASE + 0xC0)
-                                       /* ELB CS4 Configuration Reg    */
-#define CS4_ADDR_REG   regptr(MSP_SLP_BASE + 0xC4)
-                                       /* ELB CS4 Base Address Reg     */
-#define CS4_MASK_REG   regptr(MSP_SLP_BASE + 0xC8)
-                                       /* ELB CS4 Mask Register        */
-#define CS4_ACCESS_REG regptr(MSP_SLP_BASE + 0xCC)
-                                       /* ELB CS4 access register      */
-
-#define CS5_CNFG_REG   regptr(MSP_SLP_BASE + 0xD0)
-                                       /* ELB CS5 Configuration Reg    */
-#define CS5_ADDR_REG   regptr(MSP_SLP_BASE + 0xD4)
-                                       /* ELB CS5 Base Address Reg     */
-#define CS5_MASK_REG   regptr(MSP_SLP_BASE + 0xD8)
-                                       /* ELB CS5 Mask Register        */
-#define CS5_ACCESS_REG regptr(MSP_SLP_BASE + 0xDC)
-                                       /* ELB CS5 access register      */
-
-/* reserved                           0xE0 - 0xE8                      */
-#define ELB_1PC_EN_REG regptr(MSP_SLP_BASE + 0xEC)
-                                       /* ELB single PC card detect    */
-
-/* reserved                           0xF0 - 0xF8                      */
-#define ELB_CLK_CFG_REG        regptr(MSP_SLP_BASE + 0xFC)
-                                       /* SDRAM read/ELB timing Reg    */
-
-/* Extended UART status registers */
-#define UART0_STATUS_REG       regptr(MSP_UART0_BASE + 0x0c0)
-                                       /* UART Status Register 0       */
-#define UART1_STATUS_REG       regptr(MSP_UART1_BASE + 0x170)
-                                       /* UART Status Register 1       */
-
-/* Performance monitoring registers */
-#define PERF_MON_CTRL_REG      regptr(MSP_SLP_BASE + 0x140)
-                                       /* Performance monitor control  */
-#define PERF_MON_CLR_REG       regptr(MSP_SLP_BASE + 0x144)
-                                       /* Performance monitor clear    */
-#define PERF_MON_CNTH_REG      regptr(MSP_SLP_BASE + 0x148)
-                                       /* Perf monitor counter high    */
-#define PERF_MON_CNTL_REG      regptr(MSP_SLP_BASE + 0x14C)
-                                       /* Perf monitor counter low     */
-
-/* System control registers */
-#define SYS_CTRL_REG           regptr(MSP_SLP_BASE + 0x150)
-                                       /* System control register      */
-#define SYS_ERR1_REG           regptr(MSP_SLP_BASE + 0x154)
-                                       /* System Error status 1        */
-#define SYS_ERR2_REG           regptr(MSP_SLP_BASE + 0x158)
-                                       /* System Error status 2        */
-#define SYS_INT_CFG_REG                regptr(MSP_SLP_BASE + 0x15C)
-                                       /* System Interrupt config      */
-
-/* Voice Engine Memory configuration */
-#define VE_MEM_REG             regptr(MSP_SLP_BASE + 0x17C)
-                                       /* Voice engine memory config   */
-
-/* CPU/SLP Error Status registers */
-#define CPU_ERR1_REG           regptr(MSP_SLP_BASE + 0x180)
-                                       /* CPU/SLP Error status 1       */
-#define CPU_ERR2_REG           regptr(MSP_SLP_BASE + 0x184)
-                                       /* CPU/SLP Error status 1       */
-
-/* Extended GPIO registers       */
-#define EXTENDED_GPIO1_REG     regptr(MSP_SLP_BASE + 0x188)
-#define EXTENDED_GPIO2_REG     regptr(MSP_SLP_BASE + 0x18c)
-#define EXTENDED_GPIO_REG      EXTENDED_GPIO1_REG
-                                       /* Backward-compatibility       */
-
-/* System Error registers */
-#define SLP_ERR_STS_REG                regptr(MSP_SLP_BASE + 0x190)
-                                       /* Int status for SLP errors    */
-#define SLP_ERR_MSK_REG                regptr(MSP_SLP_BASE + 0x194)
-                                       /* Int mask for SLP errors      */
-#define SLP_ELB_ERST_REG       regptr(MSP_SLP_BASE + 0x198)
-                                       /* External ELB reset           */
-#define SLP_BOOT_STS_REG       regptr(MSP_SLP_BASE + 0x19C)
-                                       /* Boot Status                  */
-
-/* Extended ELB addressing */
-#define CS0_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A0)
-                                       /* CS0 Extended address         */
-#define CS1_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A4)
-                                       /* CS1 Extended address         */
-#define CS2_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1A8)
-                                       /* CS2 Extended address         */
-#define CS3_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1AC)
-                                       /* CS3 Extended address         */
-/* reserved                                          0x1B0             */
-#define CS5_EXT_ADDR_REG       regptr(MSP_SLP_BASE + 0x1B4)
-                                       /* CS5 Extended address         */
-
-/* PLL Adjustment registers */
-#define PLL_LOCK_REG           regptr(MSP_SLP_BASE + 0x200)
-                                       /* PLL0 lock status             */
-#define PLL_ARST_REG           regptr(MSP_SLP_BASE + 0x204)
-                                       /* PLL Analog reset status      */
-#define PLL0_ADJ_REG           regptr(MSP_SLP_BASE + 0x208)
-                                       /* PLL0 Adjustment value        */
-#define PLL1_ADJ_REG           regptr(MSP_SLP_BASE + 0x20C)
-                                       /* PLL1 Adjustment value        */
-
-/*
- ***************************************************************************
- * Peripheral Register definitions                                         *
- ***************************************************************************
- */
-
-/* Peripheral status */
-#define PER_CTRL_REG           regptr(MSP_PER_BASE + 0x50)
-                                       /* Peripheral control register  */
-#define PER_STS_REG            regptr(MSP_PER_BASE + 0x54)
-                                       /* Peripheral status register   */
-
-/* SPI/MPI Registers */
-#define SMPI_TX_SZ_REG         regptr(MSP_PER_BASE + 0x58)
-                                       /* SPI/MPI Tx Size register     */
-#define SMPI_RX_SZ_REG         regptr(MSP_PER_BASE + 0x5C)
-                                       /* SPI/MPI Rx Size register     */
-#define SMPI_CTL_REG           regptr(MSP_PER_BASE + 0x60)
-                                       /* SPI/MPI Control register     */
-#define SMPI_MS_REG            regptr(MSP_PER_BASE + 0x64)
-                                       /* SPI/MPI Chip Select reg      */
-#define SMPI_CORE_DATA_REG     regptr(MSP_PER_BASE + 0xC0)
-                                       /* SPI/MPI Core Data reg        */
-#define SMPI_CORE_CTRL_REG     regptr(MSP_PER_BASE + 0xC4)
-                                       /* SPI/MPI Core Control reg     */
-#define SMPI_CORE_STAT_REG     regptr(MSP_PER_BASE + 0xC8)
-                                       /* SPI/MPI Core Status reg      */
-#define SMPI_CORE_SSEL_REG     regptr(MSP_PER_BASE + 0xCC)
-                                       /* SPI/MPI Core Ssel reg        */
-#define SMPI_FIFO_REG          regptr(MSP_PER_BASE + 0xD0)
-                                       /* SPI/MPI Data FIFO reg        */
-
-/* Peripheral Block Error Registers           */
-#define PER_ERR_STS_REG                regptr(MSP_PER_BASE + 0x70)
-                                       /* Error Bit Status Register    */
-#define PER_ERR_MSK_REG                regptr(MSP_PER_BASE + 0x74)
-                                       /* Error Bit Mask Register      */
-#define PER_HDR1_REG           regptr(MSP_PER_BASE + 0x78)
-                                       /* Error Header 1 Register      */
-#define PER_HDR2_REG           regptr(MSP_PER_BASE + 0x7C)
-                                       /* Error Header 2 Register      */
-
-/* Peripheral Block Interrupt Registers       */
-#define PER_INT_STS_REG                regptr(MSP_PER_BASE + 0x80)
-                                       /* Interrupt status register    */
-#define PER_INT_MSK_REG                regptr(MSP_PER_BASE + 0x84)
-                                       /* Interrupt Mask Register      */
-#define GPIO_INT_STS_REG       regptr(MSP_PER_BASE + 0x88)
-                                       /* GPIO interrupt status reg    */
-#define GPIO_INT_MSK_REG       regptr(MSP_PER_BASE + 0x8C)
-                                       /* GPIO interrupt MASK Reg      */
-
-/* POLO GPIO registers                        */
-#define POLO_GPIO_DAT1_REG     regptr(MSP_PER_BASE + 0x0E0)
-                                       /* Polo GPIO[8:0]  data reg     */
-#define POLO_GPIO_CFG1_REG     regptr(MSP_PER_BASE + 0x0E4)
-                                       /* Polo GPIO[7:0]  config reg   */
-#define POLO_GPIO_CFG2_REG     regptr(MSP_PER_BASE + 0x0E8)
-                                       /* Polo GPIO[15:8] config reg   */
-#define POLO_GPIO_OD1_REG      regptr(MSP_PER_BASE + 0x0EC)
-                                       /* Polo GPIO[31:0] output drive */
-#define POLO_GPIO_CFG3_REG     regptr(MSP_PER_BASE + 0x170)
-                                       /* Polo GPIO[23:16] config reg  */
-#define POLO_GPIO_DAT2_REG     regptr(MSP_PER_BASE + 0x174)
-                                       /* Polo GPIO[15:9]  data reg    */
-#define POLO_GPIO_DAT3_REG     regptr(MSP_PER_BASE + 0x178)
-                                       /* Polo GPIO[23:16]  data reg   */
-#define POLO_GPIO_DAT4_REG     regptr(MSP_PER_BASE + 0x17C)
-                                       /* Polo GPIO[31:24]  data reg   */
-#define POLO_GPIO_DAT5_REG     regptr(MSP_PER_BASE + 0x180)
-                                       /* Polo GPIO[39:32]  data reg   */
-#define POLO_GPIO_DAT6_REG     regptr(MSP_PER_BASE + 0x184)
-                                       /* Polo GPIO[47:40]  data reg   */
-#define POLO_GPIO_DAT7_REG     regptr(MSP_PER_BASE + 0x188)
-                                       /* Polo GPIO[54:48]  data reg   */
-#define POLO_GPIO_CFG4_REG     regptr(MSP_PER_BASE + 0x18C)
-                                       /* Polo GPIO[31:24] config reg  */
-#define POLO_GPIO_CFG5_REG     regptr(MSP_PER_BASE + 0x190)
-                                       /* Polo GPIO[39:32] config reg  */
-#define POLO_GPIO_CFG6_REG     regptr(MSP_PER_BASE + 0x194)
-                                       /* Polo GPIO[47:40] config reg  */
-#define POLO_GPIO_CFG7_REG     regptr(MSP_PER_BASE + 0x198)
-                                       /* Polo GPIO[54:48] config reg  */
-#define POLO_GPIO_OD2_REG      regptr(MSP_PER_BASE + 0x19C)
-                                       /* Polo GPIO[54:32] output drive */
-
-/* Generic GPIO registers                     */
-#define GPIO_DATA1_REG         regptr(MSP_PER_BASE + 0x170)
-                                       /* GPIO[1:0] data register      */
-#define GPIO_DATA2_REG         regptr(MSP_PER_BASE + 0x174)
-                                       /* GPIO[5:2] data register      */
-#define GPIO_DATA3_REG         regptr(MSP_PER_BASE + 0x178)
-                                       /* GPIO[9:6] data register      */
-#define GPIO_DATA4_REG         regptr(MSP_PER_BASE + 0x17C)
-                                       /* GPIO[15:10] data register    */
-#define GPIO_CFG1_REG          regptr(MSP_PER_BASE + 0x180)
-                                       /* GPIO[1:0] config register    */
-#define GPIO_CFG2_REG          regptr(MSP_PER_BASE + 0x184)
-                                       /* GPIO[5:2] config register    */
-#define GPIO_CFG3_REG          regptr(MSP_PER_BASE + 0x188)
-                                       /* GPIO[9:6] config register    */
-#define GPIO_CFG4_REG          regptr(MSP_PER_BASE + 0x18C)
-                                       /* GPIO[15:10] config register  */
-#define GPIO_OD_REG            regptr(MSP_PER_BASE + 0x190)
-                                       /* GPIO[15:0] output drive      */
-
-/*
- ***************************************************************************
- * CPU Interface register definitions                                      *
- ***************************************************************************
- */
-#define PCI_FLUSH_REG          regptr(MSP_CPUIF_BASE + 0x00)
-                                       /* PCI-SDRAM queue flush trigger */
-#define OCP_ERR1_REG           regptr(MSP_CPUIF_BASE + 0x04)
-                                       /* OCP Error Attribute 1        */
-#define OCP_ERR2_REG           regptr(MSP_CPUIF_BASE + 0x08)
-                                       /* OCP Error Attribute 2        */
-#define OCP_STS_REG            regptr(MSP_CPUIF_BASE + 0x0C)
-                                       /* OCP Error Status             */
-#define CPUIF_PM_REG           regptr(MSP_CPUIF_BASE + 0x10)
-                                       /* CPU policy configuration     */
-#define CPUIF_CFG_REG          regptr(MSP_CPUIF_BASE + 0x10)
-                                       /* Misc configuration options   */
-
-/* Central Interrupt Controller Registers */
-#define MSP_CIC_BASE           (MSP_CPUIF_BASE + 0x8000)
-                                       /* Central Interrupt registers  */
-#define CIC_EXT_CFG_REG                regptr(MSP_CIC_BASE + 0x00)
-                                       /* External interrupt config    */
-#define CIC_STS_REG            regptr(MSP_CIC_BASE + 0x04)
-                                       /* CIC Interrupt Status         */
-#define CIC_VPE0_MSK_REG       regptr(MSP_CIC_BASE + 0x08)
-                                       /* VPE0 Interrupt Mask          */
-#define CIC_VPE1_MSK_REG       regptr(MSP_CIC_BASE + 0x0C)
-                                       /* VPE1 Interrupt Mask          */
-#define CIC_TC0_MSK_REG                regptr(MSP_CIC_BASE + 0x10)
-                                       /* Thread Context 0 Int Mask    */
-#define CIC_TC1_MSK_REG                regptr(MSP_CIC_BASE + 0x14)
-                                       /* Thread Context 1 Int Mask    */
-#define CIC_TC2_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
-                                       /* Thread Context 2 Int Mask    */
-#define CIC_TC3_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
-                                       /* Thread Context 3 Int Mask    */
-#define CIC_TC4_MSK_REG                regptr(MSP_CIC_BASE + 0x18)
-                                       /* Thread Context 4 Int Mask    */
-#define CIC_PCIMSI_STS_REG     regptr(MSP_CIC_BASE + 0x18)
-#define CIC_PCIMSI_MSK_REG     regptr(MSP_CIC_BASE + 0x18)
-#define CIC_PCIFLSH_REG                regptr(MSP_CIC_BASE + 0x18)
-#define CIC_VPE0_SWINT_REG     regptr(MSP_CIC_BASE + 0x08)
-
-
-/*
- ***************************************************************************
- * Memory controller registers                                             *
- ***************************************************************************
- */
-#define MEM_CFG1_REG           regptr(MSP_MEM_CFG_BASE + 0x00)
-#define MEM_SS_ADDR            regptr(MSP_MEM_CFG_BASE + 0x00)
-#define MEM_SS_DATA            regptr(MSP_MEM_CFG_BASE + 0x04)
-#define MEM_SS_WRITE           regptr(MSP_MEM_CFG_BASE + 0x08)
-
-/*
- ***************************************************************************
- * PCI controller registers                                                *
- ***************************************************************************
- */
-#define PCI_BASE_REG           regptr(MSP_PCI_BASE + 0x00)
-#define PCI_CONFIG_SPACE_REG   regptr(MSP_PCI_BASE + 0x800)
-#define PCI_JTAG_DEVID_REG     regptr(MSP_SLP_BASE + 0x13c)
-
-/*
- ########################################################################
- #  Register content & macro definitions                                #
- ########################################################################
- */
-
-/*
- ***************************************************************************
- * DEV_ID defines                                                          *
- ***************************************************************************
- */
-#define DEV_ID_PCI_DIS         (1 << 26)       /* Set if PCI disabled */
-#define DEV_ID_PCI_HOST                (1 << 20)       /* Set if PCI host */
-#define DEV_ID_SINGLE_PC       (1 << 19)       /* Set if single PC Card */
-#define DEV_ID_FAMILY          (0xff << 8)     /* family ID code */
-#define POLO_ZEUS_SUB_FAMILY   (0x7  << 16)    /* sub family for Polo/Zeus */
-
-#define MSPFPGA_ID             (0x00  << 8)    /* you are on your own here */
-#define MSP5000_ID             (0x50  << 8)
-#define MSP4F00_ID             (0x4f  << 8)    /* FPGA version of MSP4200 */
-#define MSP4E00_ID             (0x4f  << 8)    /* FPGA version of MSP7120 */
-#define MSP4200_ID             (0x42  << 8)
-#define MSP4000_ID             (0x40  << 8)
-#define MSP2XXX_ID             (0x20  << 8)
-#define MSPZEUS_ID             (0x10  << 8)
-
-#define MSP2004_SUB_ID         (0x0   << 16)
-#define MSP2005_SUB_ID         (0x1   << 16)
-#define MSP2006_SUB_ID         (0x1   << 16)
-#define MSP2007_SUB_ID         (0x2   << 16)
-#define MSP2010_SUB_ID         (0x3   << 16)
-#define MSP2015_SUB_ID         (0x4   << 16)
-#define MSP2020_SUB_ID         (0x5   << 16)
-#define MSP2100_SUB_ID         (0x6   << 16)
-
-/*
- ***************************************************************************
- * RESET defines                                                           *
- ***************************************************************************
- */
-#define MSP_GR_RST             (0x01 << 0)     /* Global reset bit     */
-#define MSP_MR_RST             (0x01 << 1)     /* MIPS reset bit       */
-#define MSP_PD_RST             (0x01 << 2)     /* PVC DMA reset bit    */
-#define MSP_PP_RST             (0x01 << 3)     /* PVC reset bit        */
-/* reserved                                                             */
-#define MSP_EA_RST             (0x01 << 6)     /* Mac A reset bit      */
-#define MSP_EB_RST             (0x01 << 7)     /* Mac B reset bit      */
-#define MSP_SE_RST             (0x01 << 8)     /* Security Eng reset bit */
-#define MSP_PB_RST             (0x01 << 9)     /* Per block reset bit  */
-#define MSP_EC_RST             (0x01 << 10)    /* Mac C reset bit      */
-#define MSP_TW_RST             (0x01 << 11)    /* TWI reset bit        */
-#define MSP_SPI_RST            (0x01 << 12)    /* SPI/MPI reset bit    */
-#define MSP_U1_RST             (0x01 << 13)    /* UART1 reset bit      */
-#define MSP_U0_RST             (0x01 << 14)    /* UART0 reset bit      */
-
-/*
- ***************************************************************************
- * UART defines                                                            *
- ***************************************************************************
- */
-#define MSP_BASE_BAUD          25000000
-#define MSP_UART_REG_LEN       0x20
-
-/*
- ***************************************************************************
- * ELB defines                                                             *
- ***************************************************************************
- */
-#define PCCARD_32              0x02    /* Set if is PCCARD 32 (Cardbus) */
-#define SINGLE_PCCARD          0x01    /* Set to enable single PC card */
-
-/*
- ***************************************************************************
- * CIC defines                                                             *
- ***************************************************************************
- */
-
-/* CIC_EXT_CFG_REG */
-#define EXT_INT_POL(eirq)                      (1 << (eirq + 8))
-#define EXT_INT_EDGE(eirq)                     (1 << eirq)
-
-#define CIC_EXT_SET_TRIGGER_LEVEL(reg, eirq)   (reg &= ~EXT_INT_EDGE(eirq))
-#define CIC_EXT_SET_TRIGGER_EDGE(reg, eirq)    (reg |= EXT_INT_EDGE(eirq))
-#define CIC_EXT_SET_ACTIVE_HI(reg, eirq)       (reg |= EXT_INT_POL(eirq))
-#define CIC_EXT_SET_ACTIVE_LO(reg, eirq)       (reg &= ~EXT_INT_POL(eirq))
-#define CIC_EXT_SET_ACTIVE_RISING              CIC_EXT_SET_ACTIVE_HI
-#define CIC_EXT_SET_ACTIVE_FALLING             CIC_EXT_SET_ACTIVE_LO
-
-#define CIC_EXT_IS_TRIGGER_LEVEL(reg, eirq) \
-                               ((reg & EXT_INT_EDGE(eirq)) == 0)
-#define CIC_EXT_IS_TRIGGER_EDGE(reg, eirq)     (reg & EXT_INT_EDGE(eirq))
-#define CIC_EXT_IS_ACTIVE_HI(reg, eirq)                (reg & EXT_INT_POL(eirq))
-#define CIC_EXT_IS_ACTIVE_LO(reg, eirq) \
-                               ((reg & EXT_INT_POL(eirq)) == 0)
-#define CIC_EXT_IS_ACTIVE_RISING               CIC_EXT_IS_ACTIVE_HI
-#define CIC_EXT_IS_ACTIVE_FALLING              CIC_EXT_IS_ACTIVE_LO
-
-/*
- ***************************************************************************
- * Memory Controller defines                                               *
- ***************************************************************************
- */
-
-/* Indirect memory controller registers */
-#define DDRC_CFG(n)            (n)
-#define DDRC_DEBUG(n)          (0x04 + n)
-#define DDRC_CTL(n)            (0x40 + n)
-
-/* Macro to perform DDRC indirect write */
-#define DDRC_INDIRECT_WRITE(reg, mask, value) \
-({ \
-       *MEM_SS_ADDR = (((mask) & 0xf) << 8) | ((reg) & 0xff); \
-       *MEM_SS_DATA = (value); \
-       *MEM_SS_WRITE = 1; \
-})
-
-/*
- ***************************************************************************
- * SPI/MPI Mode                                                            *
- ***************************************************************************
- */
-#define SPI_MPI_RX_BUSY                0x00008000      /* SPI/MPI Receive Busy */
-#define SPI_MPI_FIFO_EMPTY     0x00004000      /* SPI/MPI Fifo Empty   */
-#define SPI_MPI_TX_BUSY                0x00002000      /* SPI/MPI Transmit Busy */
-#define SPI_MPI_FIFO_FULL      0x00001000      /* SPI/MPU FIFO full    */
-
-/*
- ***************************************************************************
- * SPI/MPI Control Register                                                *
- ***************************************************************************
- */
-#define SPI_MPI_RX_START       0x00000004      /* Start receive command */
-#define SPI_MPI_FLUSH_Q                0x00000002      /* Flush SPI/MPI Queue */
-#define SPI_MPI_TX_START       0x00000001      /* Start Transmit Command */
-
-#endif /* !_ASM_MSP_REGS_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h
deleted file mode 100644 (file)
index 96d4c8c..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Defines for the MSP interrupt controller.
- *
- * Copyright (C) 1999 MIPS Technologies, Inc.  All rights reserved.
- * Author: Carsten Langgaard, carstenl@mips.com
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#ifndef _MSP_SLP_INT_H
-#define _MSP_SLP_INT_H
-
-/*
- * The PMC-Sierra SLP interrupts are arranged in a 3 level cascaded
- * hierarchical system.  The first level are the direct MIPS interrupts
- * and are assigned the interrupt range 0-7.  The second level is the SLM
- * interrupt controller and is assigned the range 8-39.  The third level
- * comprises the Peripherial block, the PCI block, the PCI MSI block and
- * the SLP.  The PCI interrupts and the SLP errors are handled by the
- * relevant subsystems so the core interrupt code needs only concern
- * itself with the Peripheral block.  These are assigned interrupts in
- * the range 40-71.
- */
-
-/*
- * IRQs directly connected to CPU
- */
-#define MSP_MIPS_INTBASE       0
-#define MSP_INT_SW0            0  /* IRQ for swint0,         C_SW0  */
-#define MSP_INT_SW1            1  /* IRQ for swint1,         C_SW1  */
-#define MSP_INT_MAC0           2  /* IRQ for MAC 0,          C_IRQ0 */
-#define MSP_INT_MAC1           3  /* IRQ for MAC 1,          C_IRQ1 */
-#define MSP_INT_C_IRQ2         4  /* Wired off,              C_IRQ2 */
-#define MSP_INT_VE             5  /* IRQ for Voice Engine,   C_IRQ3 */
-#define MSP_INT_SLP            6  /* IRQ for SLM block,      C_IRQ4 */
-#define MSP_INT_TIMER          7  /* IRQ for the MIPS timer, C_IRQ5 */
-
-/*
- * IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
- * These defines should be tied to the register definition for the SLM
- * interrupt routine.  For now, just use hard-coded values.
- */
-#define MSP_SLP_INTBASE                (MSP_MIPS_INTBASE + 8)
-#define MSP_INT_EXT0           (MSP_SLP_INTBASE + 0)
-                                       /* External interrupt 0         */
-#define MSP_INT_EXT1           (MSP_SLP_INTBASE + 1)
-                                       /* External interrupt 1         */
-#define MSP_INT_EXT2           (MSP_SLP_INTBASE + 2)
-                                       /* External interrupt 2         */
-#define MSP_INT_EXT3           (MSP_SLP_INTBASE + 3)
-                                       /* External interrupt 3         */
-/* Reserved                                       4-7                  */
-
-/*
- *************************************************************************
- * DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER *
- * Some MSP produces have this interrupt labelled as Voice and some are  *
- * SEC mbox ...                                                          *
- *************************************************************************
- */
-#define MSP_INT_SLP_VE         (MSP_SLP_INTBASE + 8)
-                                       /* Cascaded IRQ for Voice Engine*/
-#define MSP_INT_SLP_TDM                (MSP_SLP_INTBASE + 9)
-                                       /* TDM interrupt                */
-#define MSP_INT_SLP_MAC0       (MSP_SLP_INTBASE + 10)
-                                       /* Cascaded IRQ for MAC 0       */
-#define MSP_INT_SLP_MAC1       (MSP_SLP_INTBASE + 11)
-                                       /* Cascaded IRQ for MAC 1       */
-#define MSP_INT_SEC            (MSP_SLP_INTBASE + 12)
-                                       /* IRQ for security engine      */
-#define        MSP_INT_PER             (MSP_SLP_INTBASE + 13)
-                                       /* Peripheral interrupt         */
-#define        MSP_INT_TIMER0          (MSP_SLP_INTBASE + 14)
-                                       /* SLP timer 0                  */
-#define        MSP_INT_TIMER1          (MSP_SLP_INTBASE + 15)
-                                       /* SLP timer 1                  */
-#define        MSP_INT_TIMER2          (MSP_SLP_INTBASE + 16)
-                                       /* SLP timer 2                  */
-#define        MSP_INT_SLP_TIMER       (MSP_SLP_INTBASE + 17)
-                                       /* Cascaded MIPS timer          */
-#define MSP_INT_BLKCP          (MSP_SLP_INTBASE + 18)
-                                       /* Block Copy                   */
-#define MSP_INT_UART0          (MSP_SLP_INTBASE + 19)
-                                       /* UART 0                       */
-#define MSP_INT_PCI            (MSP_SLP_INTBASE + 20)
-                                       /* PCI subsystem                */
-#define MSP_INT_PCI_DBELL      (MSP_SLP_INTBASE + 21)
-                                       /* PCI doorbell                 */
-#define MSP_INT_PCI_MSI                (MSP_SLP_INTBASE + 22)
-                                       /* PCI Message Signal           */
-#define MSP_INT_PCI_BC0                (MSP_SLP_INTBASE + 23)
-                                       /* PCI Block Copy 0             */
-#define MSP_INT_PCI_BC1                (MSP_SLP_INTBASE + 24)
-                                       /* PCI Block Copy 1             */
-#define MSP_INT_SLP_ERR                (MSP_SLP_INTBASE + 25)
-                                       /* SLP error condition          */
-#define MSP_INT_MAC2           (MSP_SLP_INTBASE + 26)
-                                       /* IRQ for MAC2                 */
-/* Reserved                                       26-31                */
-
-/*
- * IRQs cascaded on SLP PER interrupt (MSP_INT_PER)
- */
-#define MSP_PER_INTBASE                (MSP_SLP_INTBASE + 32)
-/* Reserved                                       0-1                  */
-#define MSP_INT_UART1          (MSP_PER_INTBASE + 2)
-                                       /* UART 1                       */
-/* Reserved                                       3-5                  */
-#define MSP_INT_2WIRE          (MSP_PER_INTBASE + 6)
-                                       /* 2-wire                       */
-#define MSP_INT_TM0            (MSP_PER_INTBASE + 7)
-                                       /* Peripheral timer block out 0 */
-#define MSP_INT_TM1            (MSP_PER_INTBASE + 8)
-                                       /* Peripheral timer block out 1 */
-/* Reserved                                       9                    */
-#define MSP_INT_SPRX           (MSP_PER_INTBASE + 10)
-                                       /* SPI RX complete              */
-#define MSP_INT_SPTX           (MSP_PER_INTBASE + 11)
-                                       /* SPI TX complete              */
-#define MSP_INT_GPIO           (MSP_PER_INTBASE + 12)
-                                       /* GPIO                         */
-#define MSP_INT_PER_ERR                (MSP_PER_INTBASE + 13)
-                                       /* Peripheral error             */
-/* Reserved                                       14-31                */
-
-#endif /* !_MSP_SLP_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h b/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h
deleted file mode 100644 (file)
index 4c9348d..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/******************************************************************
- * Copyright (c) 2000-2007 PMC-Sierra INC.
- *
- *     This program is free software; you can redistribute it
- *     and/or modify it under the terms of the GNU General
- *     Public License as published by the Free Software
- *     Foundation; either version 2 of the License, or (at your
- *     option) any later version.
- *
- *     This program is distributed in the hope that it will be
- *     useful, but WITHOUT ANY WARRANTY; without even the implied
- *     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- *     PURPOSE.  See the GNU General Public License for more
- *     details.
- *
- *     You should have received a copy of the GNU General Public
- *     License along with this program; if not, write to the Free
- *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
- *     02139, USA.
- *
- * PMC-SIERRA INC. DISCLAIMS ANY LIABILITY OF ANY KIND
- * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS
- * SOFTWARE.
- */
-#ifndef MSP_USB_H_
-#define MSP_USB_H_
-
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-#define NUM_USB_DEVS   2
-#else
-#define NUM_USB_DEVS   1
-#endif
-
-/* Register spaces for USB host 0 */
-#define MSP_USB0_MAB_START     (MSP_USB0_BASE + 0x0)
-#define MSP_USB0_MAB_END       (MSP_USB0_BASE + 0x17)
-#define MSP_USB0_ID_START      (MSP_USB0_BASE + 0x40000)
-#define MSP_USB0_ID_END                (MSP_USB0_BASE + 0x4008f)
-#define MSP_USB0_HS_START      (MSP_USB0_BASE + 0x40100)
-#define MSP_USB0_HS_END                (MSP_USB0_BASE + 0x401FF)
-
-/* Register spaces for USB host 1 */
-#define        MSP_USB1_MAB_START      (MSP_USB1_BASE + 0x0)
-#define MSP_USB1_MAB_END       (MSP_USB1_BASE + 0x17)
-#define MSP_USB1_ID_START      (MSP_USB1_BASE + 0x40000)
-#define MSP_USB1_ID_END                (MSP_USB1_BASE + 0x4008f)
-#define MSP_USB1_HS_START      (MSP_USB1_BASE + 0x40100)
-#define MSP_USB1_HS_END                (MSP_USB1_BASE + 0x401ff)
-
-/* USB Identification registers */
-struct msp_usbid_regs {
-       u32 id;         /* 0x0: Identification register */
-       u32 hwgen;      /* 0x4: General HW params */
-       u32 hwhost;     /* 0x8: Host HW params */
-       u32 hwdev;      /* 0xc: Device HW params */
-       u32 hwtxbuf;    /* 0x10: Tx buffer HW params */
-       u32 hwrxbuf;    /* 0x14: Rx buffer HW params */
-       u32 reserved[26];
-       u32 timer0_load; /* 0x80: General-purpose timer 0 load*/
-       u32 timer0_ctrl; /* 0x84: General-purpose timer 0 control */
-       u32 timer1_load; /* 0x88: General-purpose timer 1 load*/
-       u32 timer1_ctrl; /* 0x8c: General-purpose timer 1 control */
-};
-
-/* MSBus to AMBA registers */
-struct msp_mab_regs {
-       u32 isr;        /* 0x0: Interrupt status */
-       u32 imr;        /* 0x4: Interrupt mask */
-       u32 thcr0;      /* 0x8: Transaction header capture 0 */
-       u32 thcr1;      /* 0xc: Transaction header capture 1 */
-       u32 int_stat;   /* 0x10: Interrupt status summary */
-       u32 phy_cfg;    /* 0x14: USB phy config */
-};
-
-/* EHCI registers */
-struct msp_usbhs_regs {
-       u32 hciver;     /* 0x0: Version and offset to operational regs */
-       u32 hcsparams;  /* 0x4: Host control structural parameters */
-       u32 hccparams;  /* 0x8: Host control capability parameters */
-       u32 reserved0[5];
-       u32 dciver;     /* 0x20: Device interface version */
-       u32 dccparams;  /* 0x24: Device control capability parameters */
-       u32 reserved1[6];
-       u32 cmd;        /* 0x40: USB command */
-       u32 sts;        /* 0x44: USB status */
-       u32 int_ena;    /* 0x48: USB interrupt enable */
-       u32 frindex;    /* 0x4c: Frame index */
-       u32 reserved3;
-       union {
-               struct {
-                       u32 flb_addr; /* 0x54: Frame list base address */
-                       u32 next_async_addr; /* 0x58: next asynchronous addr */
-                       u32 ttctrl; /* 0x5c: embedded transaction translator
-                                                       async buffer status */
-                       u32 burst_size; /* 0x60: Controller burst size */
-                       u32 tx_fifo_ctrl; /* 0x64: Tx latency FIFO tuning */
-                       u32 reserved0[4];
-                       u32 endpt_nak; /* 0x78: Endpoint NAK */
-                       u32 endpt_nak_ena; /* 0x7c: Endpoint NAK enable */
-                       u32 cfg_flag; /* 0x80: Config flag */
-                       u32 port_sc1; /* 0x84: Port status & control 1 */
-                       u32 reserved1[7];
-                       u32 otgsc;      /* 0xa4: OTG status & control */
-                       u32 mode;       /* 0xa8: USB controller mode */
-               } host;
-
-               struct {
-                       u32 dev_addr; /* 0x54: Device address */
-                       u32 endpt_list_addr; /* 0x58: Endpoint list address */
-                       u32 reserved0[7];
-                       u32 endpt_nak;  /* 0x74 */
-                       u32 endpt_nak_ctrl; /* 0x78 */
-                       u32 cfg_flag; /* 0x80 */
-                       u32 port_sc1; /* 0x84: Port status & control 1 */
-                       u32 reserved[7];
-                       u32 otgsc;      /* 0xa4: OTG status & control */
-                       u32 mode;       /* 0xa8: USB controller mode */
-                       u32 endpt_setup_stat; /* 0xac */
-                       u32 endpt_prime; /* 0xb0 */
-                       u32 endpt_flush; /* 0xb4 */
-                       u32 endpt_stat; /* 0xb8 */
-                       u32 endpt_complete; /* 0xbc */
-                       u32 endpt_ctrl0; /* 0xc0 */
-                       u32 endpt_ctrl1; /* 0xc4 */
-                       u32 endpt_ctrl2; /* 0xc8 */
-                       u32 endpt_ctrl3; /* 0xcc */
-               } device;
-       } u;
-};
-/*
- * Container for the more-generic platform_device.
- * This exists mainly as a way to map the non-standard register
- * spaces and make them accessible to the USB ISR.
- */
-struct mspusb_device {
-       struct msp_mab_regs   __iomem *mab_regs;
-       struct msp_usbid_regs __iomem *usbid_regs;
-       struct msp_usbhs_regs __iomem *usbhs_regs;
-       struct platform_device dev;
-};
-
-#define to_mspusb_device(x) container_of((x), struct mspusb_device, dev)
-#define TO_HOST_ID(x) ((x) & 0x3)
-#endif /*MSP_USB_H_*/
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h b/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
deleted file mode 100644 (file)
index c74eb16..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_PMC_SIERRA_WAR_H
-#define __ASM_MIPS_PMC_SIERRA_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR    0
-#define R4600_V1_HIT_CACHEOP_WAR       0
-#define R4600_V2_HIT_CACHEOP_WAR       0
-#define R5432_CP0_INTERRUPT_WAR                0
-#define BCM1250_M3_WAR                 0
-#define SIBYTE_1956_WAR                        0
-#define MIPS4K_ICACHE_REFILL_WAR       0
-#define MIPS_CACHE_SYNC_WAR            0
-#define TX49XX_ICACHE_INDEX_INV_WAR    0
-#define ICACHE_REFILLS_WORKAROUND_WAR  0
-#define R10000_LLSC_WAR                        0
-#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
-       defined(CONFIG_PMC_MSP7120_FPGA)
-#define MIPS34K_MISSED_ITLB_WAR         1
-#else
-#define MIPS34K_MISSED_ITLB_WAR         0
-#endif
-
-#endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */
index bd98b50..2a5fa7a 100644 (file)
@@ -112,8 +112,8 @@ struct mips_fpu_struct {
 typedef __u32 dspreg_t;
 
 struct mips_dsp_state {
-       dspreg_t        dspr[NUM_DSP_REGS];
-       unsigned int    dspcontrol;
+       dspreg_t        dspr[NUM_DSP_REGS];
+       unsigned int    dspcontrol;
 };
 
 #define INIT_CPUMASK { \
@@ -137,46 +137,46 @@ union mips_watch_reg_state {
 
 struct octeon_cop2_state {
        /* DMFC2 rt, 0x0201 */
-       unsigned long   cop2_crc_iv;
+       unsigned long   cop2_crc_iv;
        /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
-       unsigned long   cop2_crc_length;
+       unsigned long   cop2_crc_length;
        /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
-       unsigned long   cop2_crc_poly;
+       unsigned long   cop2_crc_poly;
        /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
-       unsigned long   cop2_llm_dat[2];
+       unsigned long   cop2_llm_dat[2];
        /* DMFC2 rt, 0x0084 */
-       unsigned long   cop2_3des_iv;
+       unsigned long   cop2_3des_iv;
        /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
-       unsigned long   cop2_3des_key[3];
+       unsigned long   cop2_3des_key[3];
        /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
-       unsigned long   cop2_3des_result;
+       unsigned long   cop2_3des_result;
        /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
-       unsigned long   cop2_aes_inp0;
+       unsigned long   cop2_aes_inp0;
        /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
-       unsigned long   cop2_aes_iv[2];
+       unsigned long   cop2_aes_iv[2];
        /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
         * rt, 0x0107 */
-       unsigned long   cop2_aes_key[4];
+       unsigned long   cop2_aes_key[4];
        /* DMFC2 rt, 0x0110 */
-       unsigned long   cop2_aes_keylen;
+       unsigned long   cop2_aes_keylen;
        /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
-       unsigned long   cop2_aes_result[2];
+       unsigned long   cop2_aes_result[2];
        /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
         * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
         * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
         * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
         * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
-       unsigned long   cop2_hsh_datw[15];
+       unsigned long   cop2_hsh_datw[15];
        /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
         * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
         * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
-       unsigned long   cop2_hsh_ivw[8];
+       unsigned long   cop2_hsh_ivw[8];
        /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
-       unsigned long   cop2_gfm_mult[2];
+       unsigned long   cop2_gfm_mult[2];
        /* DMFC2 rt, 0x025E - Pass2 */
-       unsigned long   cop2_gfm_poly;
+       unsigned long   cop2_gfm_poly;
        /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
-       unsigned long   cop2_gfm_result[2];
+       unsigned long   cop2_gfm_result[2];
 };
 #define INIT_OCTEON_COP2 {0,}
 
@@ -249,9 +249,9 @@ struct thread_struct {
 #endif /* CONFIG_CPU_CAVIUM_OCTEON */
 
 #define INIT_THREAD  {                                         \
-        /*                                                     \
-         * Saved main processor registers                      \
-         */                                                    \
+       /*                                                      \
+        * Saved main processor registers                       \
+        */                                                     \
        .reg16                  = 0,                            \
        .reg17                  = 0,                            \
        .reg18                  = 0,                            \
@@ -332,7 +332,7 @@ unsigned long get_wchan(struct task_struct *p);
  * aborts compilation on some CPUs.  It's simply not possible to unwind
  * some CPU's stackframes.
  *
- * __builtin_return_address works only for non-leaf functions.  We avoid the
+ * __builtin_return_address works only for non-leaf functions. We avoid the
  * overhead of a function call by forcing the compiler to save the return
  * address register on the stack.
  */
index 54ea47d..a0b2650 100644 (file)
  * for indexed cache operations.  Two issues here:
  *
  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
- *    the index bits from the virtual address.  This breaks with tradition
- *    set by the R4000.  To keep unpleasant surprises from happening we pick
+ *    the index bits from the virtual address. This breaks with tradition
+ *    set by the R4000.         To keep unpleasant surprises from happening we pick
  *    an address in KSEG0 / CKSEG0.
- *  - We need a properly sign extended address for 64-bit code.  To get away
+ *  - We need a properly sign extended address for 64-bit code.         To get away
  *    without ifdefs we let the compiler do it by a type cast.
  */
 #define INDEX_BASE     CKSEG0
@@ -347,7 +347,7 @@ static inline void blast_##pfx##cache##lsize(void)                  \
        unsigned long end = start + current_cpu_data.desc.waysize;      \
        unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
        unsigned long ws_end = current_cpu_data.desc.ways <<            \
-                              current_cpu_data.desc.waybit;            \
+                              current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
        __##pfx##flush_prologue                                         \
@@ -359,7 +359,7 @@ static inline void blast_##pfx##cache##lsize(void)                  \
        __##pfx##flush_epilogue                                         \
 }                                                                      \
                                                                        \
-static inline void blast_##pfx##cache##lsize##_page(unsigned long page)        \
+static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
 {                                                                      \
        unsigned long start = page;                                     \
        unsigned long end = page + PAGE_SIZE;                           \
@@ -381,7 +381,7 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
        unsigned long end = start + PAGE_SIZE;                          \
        unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
        unsigned long ws_end = current_cpu_data.desc.ways <<            \
-                              current_cpu_data.desc.waybit;            \
+                              current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
        __##pfx##flush_prologue                                         \
index 785a518..3c687df 100644 (file)
 /*
  * Symbolic register names for 32 bit ABI
  */
-#define zero    $0      /* wired zero */
-#define AT      $1      /* assembler temp  - uppercase because of ".set at" */
-#define v0      $2      /* return value */
-#define v1      $3
-#define a0      $4      /* argument registers */
-#define a1      $5
-#define a2      $6
-#define a3      $7
-#define t0      $8      /* caller saved */
-#define t1      $9
-#define t2      $10
-#define t3      $11
-#define t4      $12
+#define zero   $0      /* wired zero */
+#define AT     $1      /* assembler temp  - uppercase because of ".set at" */
+#define v0     $2      /* return value */
+#define v1     $3
+#define a0     $4      /* argument registers */
+#define a1     $5
+#define a2     $6
+#define a3     $7
+#define t0     $8      /* caller saved */
+#define t1     $9
+#define t2     $10
+#define t3     $11
+#define t4     $12
 #define ta0    $12
-#define t5      $13
+#define t5     $13
 #define ta1    $13
-#define t6      $14
+#define t6     $14
 #define ta2    $14
-#define t7      $15
+#define t7     $15
 #define ta3    $15
-#define s0      $16     /* callee saved */
-#define s1      $17
-#define s2      $18
-#define s3      $19
-#define s4      $20
-#define s5      $21
-#define s6      $22
-#define s7      $23
-#define t8      $24     /* caller saved */
-#define t9      $25
-#define jp      $25     /* PIC jump register */
-#define k0      $26     /* kernel scratch */
-#define k1      $27
-#define gp      $28     /* global pointer */
-#define sp      $29     /* stack pointer */
-#define fp      $30     /* frame pointer */
+#define s0     $16     /* callee saved */
+#define s1     $17
+#define s2     $18
+#define s3     $19
+#define s4     $20
+#define s5     $21
+#define s6     $22
+#define s7     $23
+#define t8     $24     /* caller saved */
+#define t9     $25
+#define jp     $25     /* PIC jump register */
+#define k0     $26     /* kernel scratch */
+#define k1     $27
+#define gp     $28     /* global pointer */
+#define sp     $29     /* stack pointer */
+#define fp     $30     /* frame pointer */
 #define s8     $30     /* same like fp! */
-#define ra      $31     /* return address */
+#define ra     $31     /* return address */
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
index 4ca3063..90985b6 100644 (file)
@@ -38,7 +38,7 @@ enum rtlx_state {
 #define RTLX_BUFFER_SIZE 2048
 
 /* each channel supports read and write.
-   linux (vpe0) reads lx_buffer  and writes rt_buffer
+   linux (vpe0) reads lx_buffer         and writes rt_buffer
    SP (vpe1) reads rt_buffer and writes lx_buffer
 */
 struct rtlx_channel {
index ae6306e..f29c75c 100644 (file)
@@ -10,7 +10,7 @@
 /*
  * Kludge alert:
  *
- * The generic seccomp code currently allows only a single compat ABI.  Until
+ * The generic seccomp code currently allows only a single compat ABI. Until
  * this is fixed we priorize O32 as the compat ABI over N32.
  */
 #ifdef CONFIG_MIPS32_O32
index 889cf02..24be2b4 100644 (file)
  * three physical connectors, but only two slots, GFX and EXP0.
  *
  * There is 10MB of GIO address space for GIO64 slot devices
- * slot#   slot type address range            size
+ * slot#   slot type address range           size
  * -----   --------- ----------------------- -----
- *   0     GFX       0x1f000000 - 0x1f3fffff   4MB
- *   1     EXP0      0x1f400000 - 0x1f5fffff   2MB
- *   2     EXP1      0x1f600000 - 0x1f9fffff   4MB
+ *   0    GFX       0x1f000000 - 0x1f3fffff   4MB
+ *   1    EXP0      0x1f400000 - 0x1f5fffff   2MB
+ *   2    EXP1      0x1f600000 - 0x1f9fffff   4MB
  *
  * There are un-slotted devices, HPC, I/O and misc devices, which are grouped
  * into the HPC address space.
- *   -     MISC      0x1fb00000 - 0x1fbfffff   1MB
+ *   -    MISC      0x1fb00000 - 0x1fbfffff   1MB
  *
  * Following space is reserved and unused
- *   -     RESERVED  0x18000000 - 0x1effffff 112MB
+ *   -    RESERVED  0x18000000 - 0x1effffff 112MB
  *
  * GIO bus IDs
  *
  * the slot undefined.
  *
  * 32-bit IDs are divided into
- *     bits 0:6        the product ID; ranges from 0x00 to 0x7F.
+ *     bits 0:6        the product ID; ranges from 0x00 to 0x7F.
  *     bit 7           0=GIO Product ID is 8 bits wide
  *                     1=GIO Product ID is 32 bits wide.
- *     bits 8:15       manufacturer version for the product.
+ *     bits 8:15       manufacturer version for the product.
  *     bit 16          0=GIO32 and GIO32-bis, 1=GIO64.
  *     bit 17          0=no ROM present
  *                     1=ROM present on this board AND next three words
index c4729f5..59920b3 100644 (file)
@@ -65,39 +65,39 @@ struct hpc3_scsiregs {
        u32 _unused0[0x1000/4 - 2];     /* padding */
        volatile u32 bcd;       /* byte count info */
 #define HPC3_SBCD_BCNTMSK 0x00003fff /* bytes to transfer from/to memory */
-#define HPC3_SBCD_XIE     0x00004000 /* Send IRQ when done with cur buf */
-#define HPC3_SBCD_EOX     0x00008000 /* Indicates this is last buf in chain */
+#define HPC3_SBCD_XIE    0x00004000 /* Send IRQ when done with cur buf */
+#define HPC3_SBCD_EOX    0x00008000 /* Indicates this is last buf in chain */
 
        volatile u32 ctrl;    /* control register */
-#define HPC3_SCTRL_IRQ    0x01 /* IRQ asserted, either dma done or parity */
+#define HPC3_SCTRL_IRQ   0x01 /* IRQ asserted, either dma done or parity */
 #define HPC3_SCTRL_ENDIAN 0x02 /* DMA endian mode, 0=big 1=little */
-#define HPC3_SCTRL_DIR    0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
+#define HPC3_SCTRL_DIR   0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
 #define HPC3_SCTRL_FLUSH  0x08 /* Tells HPC3 to flush scsi fifos */
 #define HPC3_SCTRL_ACTIVE 0x10 /* SCSI DMA channel is active */
 #define HPC3_SCTRL_AMASK  0x20 /* DMA active inhibits PIO */
 #define HPC3_SCTRL_CRESET 0x40 /* Resets dma channel and external controller */
-#define HPC3_SCTRL_PERR   0x80 /* Bad parity on HPC3 iface to scsi controller */
+#define HPC3_SCTRL_PERR          0x80 /* Bad parity on HPC3 iface to scsi controller */
 
        volatile u32 gfptr;     /* current GIO fifo ptr */
        volatile u32 dfptr;     /* current device fifo ptr */
        volatile u32 dconfig;   /* DMA configuration register */
 #define HPC3_SDCFG_HCLK 0x00001 /* Enable DMA half clock mode */
-#define HPC3_SDCFG_D1   0x00006 /* Cycles to spend in D1 state */
-#define HPC3_SDCFG_D2   0x00038 /* Cycles to spend in D2 state */
-#define HPC3_SDCFG_D3   0x001c0 /* Cycles to spend in D3 state */
+#define HPC3_SDCFG_D1  0x00006 /* Cycles to spend in D1 state */
+#define HPC3_SDCFG_D2  0x00038 /* Cycles to spend in D2 state */
+#define HPC3_SDCFG_D3  0x001c0 /* Cycles to spend in D3 state */
 #define HPC3_SDCFG_HWAT 0x00e00 /* DMA high water mark */
-#define HPC3_SDCFG_HW   0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
+#define HPC3_SDCFG_HW  0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
 #define HPC3_SDCFG_SWAP 0x02000 /* Byte swap all DMA accesses */
 #define HPC3_SDCFG_EPAR 0x04000 /* Enable parity checking for DMA */
 #define HPC3_SDCFG_POLL 0x08000 /* hd_dreq polarity control */
 #define HPC3_SDCFG_ERLY 0x30000 /* hd_dreq behavior control bits */
 
        volatile u32 pconfig;   /* PIO configuration register */
-#define HPC3_SPCFG_P3   0x0003 /* Cycles to spend in P3 state */
-#define HPC3_SPCFG_P2W  0x001c /* Cycles to spend in P2 state for writes */
-#define HPC3_SPCFG_P2R  0x01e0 /* Cycles to spend in P2 state for reads */
-#define HPC3_SPCFG_P1   0x0e00 /* Cycles to spend in P1 state */
-#define HPC3_SPCFG_HW   0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
+#define HPC3_SPCFG_P3  0x0003 /* Cycles to spend in P3 state */
+#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
+#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
+#define HPC3_SPCFG_P1  0x0e00 /* Cycles to spend in P1 state */
+#define HPC3_SPCFG_HW  0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
 #define HPC3_SPCFG_SWAP 0x2000 /* Byte swap all PIO accesses */
 #define HPC3_SPCFG_EPAR 0x4000 /* Enable parity checking for PIO */
 #define HPC3_SPCFG_FUJI 0x8000 /* Fujitsu scsi controller mode for faster dma/pio */
@@ -108,13 +108,13 @@ struct hpc3_scsiregs {
 /* SEEQ ethernet HPC3 registers, only one seeq per HPC3. */
 struct hpc3_ethregs {
        /* Receiver registers. */
-       volatile u32 rx_cbptr;   /* current dma buffer ptr, diagnostic use only */
-       volatile u32 rx_ndptr;   /* next dma descriptor ptr */
+       volatile u32 rx_cbptr;   /* current dma buffer ptr, diagnostic use only */
+       volatile u32 rx_ndptr;   /* next dma descriptor ptr */
        u32 _unused0[0x1000/4 - 2];     /* padding */
        volatile u32 rx_bcd;    /* byte count info */
 #define HPC3_ERXBCD_BCNTMSK 0x00003fff /* bytes to be sent to memory */
-#define HPC3_ERXBCD_XIE     0x20000000 /* HPC3 interrupts cpu at end of this buf */
-#define HPC3_ERXBCD_EOX     0x80000000 /* flags this as end of descriptor chain */
+#define HPC3_ERXBCD_XIE            0x20000000 /* HPC3 interrupts cpu at end of this buf */
+#define HPC3_ERXBCD_EOX            0x80000000 /* flags this as end of descriptor chain */
 
        volatile u32 rx_ctrl;   /* control register */
 #define HPC3_ERXCTRL_STAT50 0x0000003f /* Receive status reg bits of Seeq8003 */
@@ -131,23 +131,23 @@ struct hpc3_ethregs {
        volatile u32 reset;     /* reset register */
 #define HPC3_ERST_CRESET 0x1   /* Reset dma channel and external controller */
 #define HPC3_ERST_CLRIRQ 0x2   /* Clear channel interrupt */
-#define HPC3_ERST_LBACK  0x4   /* Enable diagnostic loopback mode of Seeq8003 */
+#define HPC3_ERST_LBACK         0x4    /* Enable diagnostic loopback mode of Seeq8003 */
 
-       volatile u32 dconfig;    /* DMA configuration register */
-#define HPC3_EDCFG_D1    0x0000f /* Cycles to spend in D1 state for PIO */
-#define HPC3_EDCFG_D2    0x000f0 /* Cycles to spend in D2 state for PIO */
-#define HPC3_EDCFG_D3    0x00f00 /* Cycles to spend in D3 state for PIO */
+       volatile u32 dconfig;    /* DMA configuration register */
+#define HPC3_EDCFG_D1   0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2   0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3   0x00f00 /* Cycles to spend in D3 state for PIO */
 #define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
 #define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
-#define HPC3_EDCFG_FEOP  0x04000 /* Bad packet marker timeout enable */
-#define HPC3_EDCFG_FIRQ  0x08000 /* Another bad packet timeout enable */
-#define HPC3_EDCFG_PTO   0x30000 /* Programmed timeout value for above two */
+#define HPC3_EDCFG_FEOP         0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ         0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO  0x30000 /* Programmed timeout value for above two */
 
-       volatile u32 pconfig;   /* PIO configuration register */
-#define HPC3_EPCFG_P1    0x000f /* Cycles to spend in P1 state for PIO */
-#define HPC3_EPCFG_P2    0x00f0 /* Cycles to spend in P2 state for PIO */
-#define HPC3_EPCFG_P3    0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST   0x1000 /* Diagnistic ram test feature bit */
+       volatile u32 pconfig;   /* PIO configuration register */
+#define HPC3_EPCFG_P1   0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2   0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3   0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST  0x1000 /* Diagnistic ram test feature bit */
 
        u32 _unused2[0x1000/4 - 8];     /* padding */
 
@@ -158,9 +158,9 @@ struct hpc3_ethregs {
        volatile u32 tx_bcd;            /* byte count info */
 #define HPC3_ETXBCD_BCNTMSK 0x00003fff /* bytes to be read from memory */
 #define HPC3_ETXBCD_ESAMP   0x10000000 /* if set, too late to add descriptor */
-#define HPC3_ETXBCD_XIE     0x20000000 /* Interrupt cpu at end of cur desc */
-#define HPC3_ETXBCD_EOP     0x40000000 /* Last byte of cur buf is end of packet */
-#define HPC3_ETXBCD_EOX     0x80000000 /* This buf is the end of desc chain */
+#define HPC3_ETXBCD_XIE            0x20000000  /* Interrupt cpu at end of cur desc */
+#define HPC3_ETXBCD_EOP            0x40000000  /* Last byte of cur buf is end of packet */
+#define HPC3_ETXBCD_EOX            0x80000000  /* This buf is the end of desc chain */
 
        volatile u32 tx_ctrl;           /* control register */
 #define HPC3_ETXCTRL_STAT30 0x0000000f /* Rdonly copy of seeq tx stat reg */
@@ -215,10 +215,10 @@ struct hpc3_regs {
 
        volatile u32 istat1;            /* Irq status, only bits <9:5> reliable. */
        volatile u32 bestat;            /* Bus error interrupt status reg. */
-#define HPC3_BESTAT_BLMASK     0x000ff /* Bus lane where bad parity occurred */
-#define HPC3_BESTAT_CTYPE      0x00100 /* Bus cycle type, 0=PIO 1=DMA */
+#define HPC3_BESTAT_BLMASK     0x000ff /* Bus lane where bad parity occurred */
+#define HPC3_BESTAT_CTYPE      0x00100 /* Bus cycle type, 0=PIO 1=DMA */
 #define HPC3_BESTAT_PIDSHIFT   9
-#define HPC3_BESTAT_PIDMASK    0x3f700 /* DMA channel parity identifier */
+#define HPC3_BESTAT_PIDMASK    0x3f700 /* DMA channel parity identifier */
 
        u32 _unused1[0x14000/4 - 5];    /* padding */
 
@@ -259,7 +259,7 @@ struct hpc3_regs {
 #define HPC3_DMACFG_RTIME              0x00200000
        /* 5 bit burst count for DMA device */
 #define HPC3_DMACFG_BURST_MASK         0x07c00000
-#define HPC3_DMACFG_BURST_SHIFT        22
+#define HPC3_DMACFG_BURST_SHIFT 22
        /* Use live pbus_dreq unsynchronized signal */
 #define HPC3_DMACFG_DRQLIVE            0x08000000
        volatile u32 pbus_piocfg[16][64];
@@ -288,20 +288,20 @@ struct hpc3_regs {
 
        /* PBUS PROM control regs. */
        volatile u32 pbus_promwe;       /* PROM write enable register */
-#define HPC3_PROM_WENAB        0x1     /* Enable writes to the PROM */
+#define HPC3_PROM_WENAB 0x1    /* Enable writes to the PROM */
 
        u32 _unused5[0x0800/4 - 1];
        volatile u32 pbus_promswap;     /* Chip select swap reg */
 #define HPC3_PROM_SWAP 0x1     /* invert GIO addr bit to select prom0 or prom1 */
 
        u32 _unused6[0x0800/4 - 1];
-       volatile u32 pbus_gout; /* PROM general purpose output reg */
+       volatile u32 pbus_gout; /* PROM general purpose output reg */
 #define HPC3_PROM_STAT 0x1     /* General purpose status bit in gout */
 
        u32 _unused7[0x1000/4 - 1];
        volatile u32 rtcregs[14];       /* Dallas clock registers */
        u32 _unused8[50];
-       volatile u32 bbram[8192-50-14]; /* Battery backed ram */
+       volatile u32 bbram[8192-50-14]; /* Battery backed ram */
 };
 
 /*
index 380347b..53c6b1c 100644 (file)
@@ -138,7 +138,7 @@ struct sgioc_regs {
        u8 _sysid[3];
        volatile u8 sysid;
 #define SGIOC_SYSID_FULLHOUSE  0x01
-#define SGIOC_SYSID_BOARDREV(x)        (((x) & 0x1e) >> 1)
+#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
 #define SGIOC_SYSID_CHIPREV(x) (((x) & 0xe0) >> 5)
        u32 _unused2;
        u8 _read[3];
@@ -150,7 +150,7 @@ struct sgioc_regs {
 #define SGIOC_DMASEL_ISDNB     0x01    /* enable isdn B */
 #define SGIOC_DMASEL_ISDNA     0x02    /* enable isdn A */
 #define SGIOC_DMASEL_PPORT     0x04    /* use parallel DMA */
-#define SGIOC_DMASEL_SCLK667MHZ        0x10    /* use 6.67MHZ serial clock */
+#define SGIOC_DMASEL_SCLK667MHZ 0x10   /* use 6.67MHZ serial clock */
 #define SGIOC_DMASEL_SCLKEXT   0x20    /* use external serial clock */
        u32 _unused4;
        u8 _reset[3];
index c0501f9..8db1a35 100644 (file)
@@ -38,8 +38,8 @@
 
 #define SGI_SOFT_0_IRQ SGINT_CPU + 0
 #define SGI_SOFT_1_IRQ SGINT_CPU + 1
-#define SGI_LOCAL_0_IRQ        SGINT_CPU + 2
-#define SGI_LOCAL_1_IRQ        SGINT_CPU + 3
+#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
+#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
 #define SGI_8254_0_IRQ SGINT_CPU + 4
 #define SGI_8254_1_IRQ SGINT_CPU + 5
 #define SGI_BUSERR_IRQ SGINT_CPU + 6
@@ -51,7 +51,7 @@
 #define SGI_WD93_1_IRQ SGINT_LOCAL0 + 2        /* 2nd onboard WD93 */
 #define SGI_ENET_IRQ   SGINT_LOCAL0 + 3        /* onboard ethernet */
 #define SGI_MCDMA_IRQ  SGINT_LOCAL0 + 4        /* MC DMA done */
-#define SGI_PARPORT_IRQ        SGINT_LOCAL0 + 5        /* Parallel port */
+#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5       /* Parallel port */
 #define SGI_GIO_1_IRQ  SGINT_LOCAL0 + 6        /* GE / GIO-1 / 2nd-HPC */
 #define SGI_MAP_0_IRQ  SGINT_LOCAL0 + 7        /* Mappable interrupt 0 */
 
index 1576c23..3a070ce 100644 (file)
@@ -29,10 +29,10 @@ struct sgimc_regs {
 #define SGIMC_CCTRL0_IENAB     0x00002000 /* Allow interrupts from MC */
 #define SGIMC_CCTRL0_ESNOOP    0x00004000 /* Snooping I/O enable */
 #define SGIMC_CCTRL0_EPROMWR   0x00008000 /* Prom writes from cpu enable */
-#define SGIMC_CCTRL0_WRESETPMEM        0x00010000 /* Perform warm reset, preserves mem */
+#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
 #define SGIMC_CCTRL0_LENDIAN   0x00020000 /* Put MC in little-endian mode */
-#define SGIMC_CCTRL0_WRESETDMEM        0x00040000 /* Warm reset, destroys mem contents */
-#define SGIMC_CCTRL0_CMEMBADPAR        0x02000000 /* Generate bad perr from cpu to mem */
+#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
+#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
 #define SGIMC_CCTRL0_R4KNOCHKPARR 0x04000000 /* Don't chk parity on mem data reads */
 #define SGIMC_CCTRL0_GIOBTOB   0x08000000 /* Allow GIO back to back writes */
        u32 _unused1;
@@ -40,13 +40,13 @@ struct sgimc_regs {
 #define SGIMC_CCTRL1_EGIOTIMEO 0x00000010 /* GIO bus timeout enable */
 #define SGIMC_CCTRL1_FIXEDEHPC 0x00001000 /* Fixed HPC endianness */
 #define SGIMC_CCTRL1_LITTLEHPC 0x00002000 /* Little endian HPC */
-#define SGIMC_CCTRL1_FIXEDEEXP0        0x00004000 /* Fixed EXP0 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP0        0x00008000 /* Little endian EXP0 */
-#define SGIMC_CCTRL1_FIXEDEEXP1        0x00010000 /* Fixed EXP1 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP1        0x00020000 /* Little endian EXP1 */
+#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
+#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
 
        u32 _unused2;
-       volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
+       volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
 
        u32 _unused3;
        volatile u32 systemid;  /* MC system ID register, readonly */
@@ -81,11 +81,11 @@ struct sgimc_regs {
 #define SGIMC_GIOPAR_RTIMEGFX  0x00000040 /* GFX device has realtime attr */
 #define SGIMC_GIOPAR_RTIMEEXP0 0x00000080 /* EXP(slot0) has realtime attr */
 #define SGIMC_GIOPAR_RTIMEEXP1 0x00000100 /* EXP(slot1) has realtime attr */
-#define SGIMC_GIOPAR_MASTEREISA        0x00000200 /* EISA bus can act as bus master */
+#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
 #define SGIMC_GIOPAR_ONEBUS    0x00000400 /* Exists one GIO64 pipelined bus */
 #define SGIMC_GIOPAR_MASTERGFX 0x00000800 /* GFX can act as a bus master */
-#define SGIMC_GIOPAR_MASTEREXP0        0x00001000 /* EXP(slot0) can bus master */
-#define SGIMC_GIOPAR_MASTEREXP1        0x00002000 /* EXP(slot1) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
 #define SGIMC_GIOPAR_PLINEEXP0 0x00004000 /* EXP(slot0) has pipeline attr */
 #define SGIMC_GIOPAR_PLINEEXP1 0x00008000 /* EXP(slot1) has pipeline attr */
 
@@ -107,9 +107,9 @@ struct sgimc_regs {
 #define SGIMC_MCONFIG_SBANKS   0x00004000 /* Number of subbanks */
 
        u32 _unused13;
-       volatile u32 cmacc;        /* Mem access config for CPU */
+       volatile u32 cmacc;        /* Mem access config for CPU */
        u32 _unused14;
-       volatile u32 gmacc;        /* Mem access config for GIO */
+       volatile u32 gmacc;        /* Mem access config for GIO */
 
        /* This define applies to both cmacc and gmacc registers above. */
 #define SGIMC_MACC_ALIASBIG    0x20000000 /* 512MB home for alias */
index c950691..96b1a07 100644 (file)
@@ -28,16 +28,16 @@ struct pi1_regs {
 #define PI1_STAT_BUSY          0x80
        u8 _dmactrl[3];
        volatile u8 dmactrl;
-#define PI1_DMACTRL_FIFO_EMPTY 0x01    /* fifo empty R/O */
-#define PI1_DMACTRL_ABORT      0x02    /* reset DMA and internal fifo W/O */
-#define PI1_DMACTRL_STDMODE    0x00    /* bits 2-3 */
-#define PI1_DMACTRL_SGIMODE    0x04    /* bits 2-3 */
-#define PI1_DMACTRL_RICOHMODE  0x08    /* bits 2-3 */
-#define PI1_DMACTRL_HPMODE     0x0c    /* bits 2-3 */
-#define PI1_DMACTRL_BLKMODE    0x10    /* block mode */
-#define PI1_DMACTRL_FIFO_CLEAR 0x20    /* clear fifo W/O */
-#define PI1_DMACTRL_READ       0x40    /* read */
-#define PI1_DMACTRL_RUN                0x80    /* pedal to the metal */
+#define PI1_DMACTRL_FIFO_EMPTY 0x01    /* fifo empty R/O */
+#define PI1_DMACTRL_ABORT      0x02    /* reset DMA and internal fifo W/O */
+#define PI1_DMACTRL_STDMODE    0x00    /* bits 2-3 */
+#define PI1_DMACTRL_SGIMODE    0x04    /* bits 2-3 */
+#define PI1_DMACTRL_RICOHMODE  0x08    /* bits 2-3 */
+#define PI1_DMACTRL_HPMODE     0x0c    /* bits 2-3 */
+#define PI1_DMACTRL_BLKMODE    0x10    /* block mode */
+#define PI1_DMACTRL_FIFO_CLEAR 0x20    /* clear fifo W/O */
+#define PI1_DMACTRL_READ       0x40    /* read */
+#define PI1_DMACTRL_RUN                0x80    /* pedal to the metal */
        u8 _intstat[3];
        volatile u8 intstat;
 #define PI1_INTSTAT_ACK                0x04
index f581157..753275a 100644 (file)
@@ -37,7 +37,7 @@ extern char prom_getchar(void);
  * in chain is CURR is NULL.
  */
 extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
-#define PROM_NULL_MDESC   ((struct linux_mdesc *) 0)
+#define PROM_NULL_MDESC          ((struct linux_mdesc *) 0)
 
 /* Called by prom_init to setup the physical memory pmemblock
  * array.
index 3dce7c7..26ddfff 100644 (file)
 #include <asm/fw/arc/types.h>
 
 /* Various ARCS error codes. */
-#define PROM_ESUCCESS                   0x00
-#define PROM_E2BIG                      0x01
-#define PROM_EACCESS                    0x02
-#define PROM_EAGAIN                     0x03
-#define PROM_EBADF                      0x04
-#define PROM_EBUSY                      0x05
-#define PROM_EFAULT                     0x06
-#define PROM_EINVAL                     0x07
-#define PROM_EIO                        0x08
-#define PROM_EISDIR                     0x09
-#define PROM_EMFILE                     0x0a
-#define PROM_EMLINK                     0x0b
-#define PROM_ENAMETOOLONG               0x0c
-#define PROM_ENODEV                     0x0d
-#define PROM_ENOENT                     0x0e
-#define PROM_ENOEXEC                    0x0f
-#define PROM_ENOMEM                     0x10
-#define PROM_ENOSPC                     0x11
-#define PROM_ENOTDIR                    0x12
-#define PROM_ENOTTY                     0x13
-#define PROM_ENXIO                      0x14
-#define PROM_EROFS                      0x15
+#define PROM_ESUCCESS                  0x00
+#define PROM_E2BIG                     0x01
+#define PROM_EACCESS                   0x02
+#define PROM_EAGAIN                    0x03
+#define PROM_EBADF                     0x04
+#define PROM_EBUSY                     0x05
+#define PROM_EFAULT                    0x06
+#define PROM_EINVAL                    0x07
+#define PROM_EIO                       0x08
+#define PROM_EISDIR                    0x09
+#define PROM_EMFILE                    0x0a
+#define PROM_EMLINK                    0x0b
+#define PROM_ENAMETOOLONG              0x0c
+#define PROM_ENODEV                    0x0d
+#define PROM_ENOENT                    0x0e
+#define PROM_ENOEXEC                   0x0f
+#define PROM_ENOMEM                    0x10
+#define PROM_ENOSPC                    0x11
+#define PROM_ENOTDIR                   0x12
+#define PROM_ENOTTY                    0x13
+#define PROM_ENXIO                     0x14
+#define PROM_EROFS                     0x15
 /* SGI ARCS specific errno's. */
-#define PROM_EADDRNOTAVAIL              0x1f
-#define PROM_ETIMEDOUT                  0x20
-#define PROM_ECONNABORTED               0x21
-#define PROM_ENOCONNECT                 0x22
+#define PROM_EADDRNOTAVAIL             0x1f
+#define PROM_ETIMEDOUT                 0x20
+#define PROM_ECONNABORTED              0x21
+#define PROM_ENOCONNECT                        0x22
 
 /* Device classes, types, and identifiers for prom
  * device inventory queries.
@@ -77,14 +77,14 @@ enum linux_identifier {
 
 /* A prom device tree component. */
 struct linux_component {
-       enum linux_devclass     class;  /* node class */
-       enum linux_devtypes     type;   /* node type */
-       enum linux_identifier   iflags; /* node flags */
-       USHORT                  vers;   /* node version */
-       USHORT                  rev;    /* node revision */
-       ULONG                   key;    /* completely magic */
-       ULONG                   amask;  /* XXX affinity mask??? */
-       ULONG                   cdsize; /* size of configuration data */
+       enum linux_devclass     class;  /* node class */
+       enum linux_devtypes     type;   /* node type */
+       enum linux_identifier   iflags; /* node flags */
+       USHORT                  vers;   /* node version */
+       USHORT                  rev;    /* node revision */
+       ULONG                   key;    /* completely magic */
+       ULONG                   amask;  /* XXX affinity mask??? */
+       ULONG                   cdsize; /* size of configuration data */
        ULONG                   ilen;   /* length of string identifier */
        _PULONG                 iname;  /* string identifier */
 };
@@ -177,13 +177,13 @@ struct linux_finfo {
        struct linux_bigint   end;
        struct linux_bigint   cur;
        enum linux_devtypes   dtype;
-       unsigned long         namelen;
-       unsigned char         attr;
-       char                  name[32]; /* XXX imperical, should be define */
+       unsigned long         namelen;
+       unsigned char         attr;
+       char                  name[32]; /* XXX imperical, should be define */
 };
 
 /* This describes the vector containing function pointers to the ARC
-   firmware functions.  */
+   firmware functions. */
 struct linux_romvec {
        LONG    load;                   /* Load an executable image. */
        LONG    invoke;                 /* Invoke a standalong image. */
@@ -244,7 +244,7 @@ struct linux_romvec {
  */
 typedef struct _SYSTEM_PARAMETER_BLOCK {
        ULONG                   magic;          /* magic cookie */
-#define PROMBLOCK_MAGIC      0x53435241
+#define PROMBLOCK_MAGIC             0x53435241
 
        ULONG                   len;            /* length of parm block */
        USHORT                  ver;            /* ARCS firmware version */
@@ -294,16 +294,16 @@ struct linux_cdata {
 };
 
 /* Common SGI ARCS firmware file descriptors. */
-#define SGIPROM_STDIN     0
-#define SGIPROM_STDOUT    1
+#define SGIPROM_STDIN    0
+#define SGIPROM_STDOUT   1
 
 /* Common SGI ARCS firmware file types. */
-#define SGIPROM_ROFILE    0x01  /* read-only file */
-#define SGIPROM_HFILE     0x02  /* hidden file */
-#define SGIPROM_SFILE     0x04  /* System file */
-#define SGIPROM_AFILE     0x08  /* Archive file */
-#define SGIPROM_DFILE     0x10  /* Directory file */
-#define SGIPROM_DELFILE   0x20  /* Deleted file */
+#define SGIPROM_ROFILE   0x01  /* read-only file */
+#define SGIPROM_HFILE    0x02  /* hidden file */
+#define SGIPROM_SFILE    0x04  /* System file */
+#define SGIPROM_AFILE    0x08  /* Archive file */
+#define SGIPROM_DFILE    0x10  /* Directory file */
+#define SGIPROM_DELFILE          0x20  /* Deleted file */
 
 /* SGI ARCS boot record information. */
 struct sgi_partition {
@@ -318,7 +318,7 @@ struct sgi_partition {
        unsigned char tsect0, tsect1, tsect2, tsect3;
 };
 
-#define SGIBBLOCK_MAGIC   0xaa55
+#define SGIBBLOCK_MAGIC          0xaa55
 #define SGIBBLOCK_MAXPART 0x0004
 
 struct sgi_bootblock {
@@ -332,34 +332,34 @@ struct sgi_bparm_block {
        unsigned short bytes_sect;    /* bytes per sector */
        unsigned char  sect_clust;    /* sectors per cluster */
        unsigned short sect_resv;     /* reserved sectors */
-       unsigned char  nfats;         /* # of allocation tables */
+       unsigned char  nfats;         /* # of allocation tables */
        unsigned short nroot_dirents; /* # of root directory entries */
        unsigned short sect_volume;   /* sectors in volume */
        unsigned char  media_type;    /* media descriptor */
        unsigned short sect_fat;      /* sectors per allocation table */
        unsigned short sect_track;    /* sectors per track */
-       unsigned short nheads;        /* # of heads */
-       unsigned short nhsects;       /* # of hidden sectors */
+       unsigned short nheads;        /* # of heads */
+       unsigned short nhsects;       /* # of hidden sectors */
 };
 
 struct sgi_bsector {
-       unsigned char   jmpinfo[3];
-       unsigned char   manuf_name[8];
+       unsigned char   jmpinfo[3];
+       unsigned char   manuf_name[8];
        struct sgi_bparm_block info;
 };
 
 /* Debugging block used with SGI symmon symbolic debugger. */
-#define SMB_DEBUG_MAGIC   0xfeeddead
+#define SMB_DEBUG_MAGIC          0xfeeddead
 struct linux_smonblock {
-       unsigned long   magic;
-       void            (*handler)(void);  /* Breakpoint routine. */
-       unsigned long   dtable_base;       /* Base addr of dbg table. */
-       int             (*printf)(const char *fmt, ...);
-       unsigned long   btable_base;       /* Breakpoint table. */
-       unsigned long   mpflushreqs;       /* SMP cache flush request list. */
-       unsigned long   ntab;              /* Name table. */
-       unsigned long   stab;              /* Symbol table. */
-       int             smax;              /* Max # of symbols. */
+       unsigned long   magic;
+       void            (*handler)(void);  /* Breakpoint routine. */
+       unsigned long   dtable_base;       /* Base addr of dbg table. */
+       int             (*printf)(const char *fmt, ...);
+       unsigned long   btable_base;       /* Breakpoint table. */
+       unsigned long   mpflushreqs;       /* SMP cache flush request list. */
+       unsigned long   ntab;              /* Name table. */
+       unsigned long   stab;              /* Symbol table. */
+       int             smax;              /* Max # of symbols. */
 };
 
 /*
@@ -369,7 +369,7 @@ struct linux_smonblock {
 #if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
 
 #define __arc_clobbers                                                 \
-       "$2", "$3" /* ... */, "$8", "$9", "$10", "$11",                         \
+       "$2", "$3" /* ... */, "$8", "$9", "$10", "$11",                         \
        "$12", "$13", "$14", "$15", "$16", "$24", "$25", "$31"
 
 #define ARC_CALL0(dest)                                                        \
@@ -447,7 +447,7 @@ struct linux_smonblock {
        "daddu\t$29, 32\n\t"                                            \
        "move\t%0, $2"                                                  \
        : "=r" (__res), "=r" (__vec)                                    \
-       : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3),              \
+       : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3),              \
          "r" (__a4)                                                    \
        : __arc_clobbers);                                              \
        __res;                                                          \
@@ -468,8 +468,8 @@ struct linux_smonblock {
        "daddu\t$29, 32\n\t"                                            \
        "move\t%0, $2"                                                  \
        : "=r" (__res), "=r" (__vec)                                    \
-       : "1" (__vec),                                                  \
-         "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4),               \
+       : "1" (__vec),                                                  \
+         "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4),               \
          "r" (__a5)                                                    \
        : __arc_clobbers);                                              \
        __res;                                                          \
@@ -512,7 +512,7 @@ struct linux_smonblock {
        long __a1 = (long) (a1);                                        \
        long __a2 = (long) (a2);                                        \
        long __a3 = (long) (a3);                                        \
-       long (*__vec)(long, long, long) = (void *) romvec->dest;        \
+       long (*__vec)(long, long, long) = (void *) romvec->dest;        \
                                                                        \
        __res = __vec(__a1, __a2, __a3);                                \
        __res;                                                          \
index 0929072..324d040 100644 (file)
@@ -8,6 +8,6 @@
 
 #define __ARCH_FORCE_SHMLBA    1
 
-#define        SHMLBA 0x40000                  /* attach addr a multiple of this */
+#define SHMLBA 0x40000                 /* attach addr a multiple of this */
 
 #endif /* _ASM_SHMPARAM_H */
index fffb224..6b82ed3 100644 (file)
  * Interrupt sources (Table 22)
  */
 
-#define K_BCM1480_INT_SOURCES               128
+#define K_BCM1480_INT_SOURCES              128
 
 #define _BCM1480_INT_HIGH(k)   (k)
 #define _BCM1480_INT_LOW(k)    ((k)+64)
 
-#define K_BCM1480_INT_ADDR_TRAP             _BCM1480_INT_HIGH(1)
-#define K_BCM1480_INT_GPIO_0                _BCM1480_INT_HIGH(4)
-#define K_BCM1480_INT_GPIO_1                _BCM1480_INT_HIGH(5)
-#define K_BCM1480_INT_GPIO_2                _BCM1480_INT_HIGH(6)
-#define K_BCM1480_INT_GPIO_3                _BCM1480_INT_HIGH(7)
-#define K_BCM1480_INT_PCI_INTA              _BCM1480_INT_HIGH(8)
-#define K_BCM1480_INT_PCI_INTB              _BCM1480_INT_HIGH(9)
-#define K_BCM1480_INT_PCI_INTC              _BCM1480_INT_HIGH(10)
-#define K_BCM1480_INT_PCI_INTD              _BCM1480_INT_HIGH(11)
-#define K_BCM1480_INT_CYCLE_CP0             _BCM1480_INT_HIGH(12)
-#define K_BCM1480_INT_CYCLE_CP1             _BCM1480_INT_HIGH(13)
-#define K_BCM1480_INT_CYCLE_CP2             _BCM1480_INT_HIGH(14)
-#define K_BCM1480_INT_CYCLE_CP3             _BCM1480_INT_HIGH(15)
-#define K_BCM1480_INT_TIMER_0               _BCM1480_INT_HIGH(20)
-#define K_BCM1480_INT_TIMER_1               _BCM1480_INT_HIGH(21)
-#define K_BCM1480_INT_TIMER_2               _BCM1480_INT_HIGH(22)
-#define K_BCM1480_INT_TIMER_3               _BCM1480_INT_HIGH(23)
-#define K_BCM1480_INT_DM_CH_0               _BCM1480_INT_HIGH(28)
-#define K_BCM1480_INT_DM_CH_1               _BCM1480_INT_HIGH(29)
-#define K_BCM1480_INT_DM_CH_2               _BCM1480_INT_HIGH(30)
-#define K_BCM1480_INT_DM_CH_3               _BCM1480_INT_HIGH(31)
-#define K_BCM1480_INT_MAC_0                 _BCM1480_INT_HIGH(36)
-#define K_BCM1480_INT_MAC_0_CH1             _BCM1480_INT_HIGH(37)
-#define K_BCM1480_INT_MAC_1                 _BCM1480_INT_HIGH(38)
-#define K_BCM1480_INT_MAC_1_CH1             _BCM1480_INT_HIGH(39)
-#define K_BCM1480_INT_MAC_2                 _BCM1480_INT_HIGH(40)
-#define K_BCM1480_INT_MAC_2_CH1             _BCM1480_INT_HIGH(41)
-#define K_BCM1480_INT_MAC_3                 _BCM1480_INT_HIGH(42)
-#define K_BCM1480_INT_MAC_3_CH1             _BCM1480_INT_HIGH(43)
-#define K_BCM1480_INT_PMI_LOW               _BCM1480_INT_HIGH(52)
-#define K_BCM1480_INT_PMI_HIGH              _BCM1480_INT_HIGH(53)
-#define K_BCM1480_INT_PMO_LOW               _BCM1480_INT_HIGH(54)
-#define K_BCM1480_INT_PMO_HIGH              _BCM1480_INT_HIGH(55)
-#define K_BCM1480_INT_MBOX_0_0              _BCM1480_INT_HIGH(56)
-#define K_BCM1480_INT_MBOX_0_1              _BCM1480_INT_HIGH(57)
-#define K_BCM1480_INT_MBOX_0_2              _BCM1480_INT_HIGH(58)
-#define K_BCM1480_INT_MBOX_0_3              _BCM1480_INT_HIGH(59)
-#define K_BCM1480_INT_MBOX_1_0              _BCM1480_INT_HIGH(60)
-#define K_BCM1480_INT_MBOX_1_1              _BCM1480_INT_HIGH(61)
-#define K_BCM1480_INT_MBOX_1_2              _BCM1480_INT_HIGH(62)
-#define K_BCM1480_INT_MBOX_1_3              _BCM1480_INT_HIGH(63)
+#define K_BCM1480_INT_ADDR_TRAP                    _BCM1480_INT_HIGH(1)
+#define K_BCM1480_INT_GPIO_0               _BCM1480_INT_HIGH(4)
+#define K_BCM1480_INT_GPIO_1               _BCM1480_INT_HIGH(5)
+#define K_BCM1480_INT_GPIO_2               _BCM1480_INT_HIGH(6)
+#define K_BCM1480_INT_GPIO_3               _BCM1480_INT_HIGH(7)
+#define K_BCM1480_INT_PCI_INTA             _BCM1480_INT_HIGH(8)
+#define K_BCM1480_INT_PCI_INTB             _BCM1480_INT_HIGH(9)
+#define K_BCM1480_INT_PCI_INTC             _BCM1480_INT_HIGH(10)
+#define K_BCM1480_INT_PCI_INTD             _BCM1480_INT_HIGH(11)
+#define K_BCM1480_INT_CYCLE_CP0                    _BCM1480_INT_HIGH(12)
+#define K_BCM1480_INT_CYCLE_CP1                    _BCM1480_INT_HIGH(13)
+#define K_BCM1480_INT_CYCLE_CP2                    _BCM1480_INT_HIGH(14)
+#define K_BCM1480_INT_CYCLE_CP3                    _BCM1480_INT_HIGH(15)
+#define K_BCM1480_INT_TIMER_0              _BCM1480_INT_HIGH(20)
+#define K_BCM1480_INT_TIMER_1              _BCM1480_INT_HIGH(21)
+#define K_BCM1480_INT_TIMER_2              _BCM1480_INT_HIGH(22)
+#define K_BCM1480_INT_TIMER_3              _BCM1480_INT_HIGH(23)
+#define K_BCM1480_INT_DM_CH_0              _BCM1480_INT_HIGH(28)
+#define K_BCM1480_INT_DM_CH_1              _BCM1480_INT_HIGH(29)
+#define K_BCM1480_INT_DM_CH_2              _BCM1480_INT_HIGH(30)
+#define K_BCM1480_INT_DM_CH_3              _BCM1480_INT_HIGH(31)
+#define K_BCM1480_INT_MAC_0                _BCM1480_INT_HIGH(36)
+#define K_BCM1480_INT_MAC_0_CH1                    _BCM1480_INT_HIGH(37)
+#define K_BCM1480_INT_MAC_1                _BCM1480_INT_HIGH(38)
+#define K_BCM1480_INT_MAC_1_CH1                    _BCM1480_INT_HIGH(39)
+#define K_BCM1480_INT_MAC_2                _BCM1480_INT_HIGH(40)
+#define K_BCM1480_INT_MAC_2_CH1                    _BCM1480_INT_HIGH(41)
+#define K_BCM1480_INT_MAC_3                _BCM1480_INT_HIGH(42)
+#define K_BCM1480_INT_MAC_3_CH1                    _BCM1480_INT_HIGH(43)
+#define K_BCM1480_INT_PMI_LOW              _BCM1480_INT_HIGH(52)
+#define K_BCM1480_INT_PMI_HIGH             _BCM1480_INT_HIGH(53)
+#define K_BCM1480_INT_PMO_LOW              _BCM1480_INT_HIGH(54)
+#define K_BCM1480_INT_PMO_HIGH             _BCM1480_INT_HIGH(55)
+#define K_BCM1480_INT_MBOX_0_0             _BCM1480_INT_HIGH(56)
+#define K_BCM1480_INT_MBOX_0_1             _BCM1480_INT_HIGH(57)
+#define K_BCM1480_INT_MBOX_0_2             _BCM1480_INT_HIGH(58)
+#define K_BCM1480_INT_MBOX_0_3             _BCM1480_INT_HIGH(59)
+#define K_BCM1480_INT_MBOX_1_0             _BCM1480_INT_HIGH(60)
+#define K_BCM1480_INT_MBOX_1_1             _BCM1480_INT_HIGH(61)
+#define K_BCM1480_INT_MBOX_1_2             _BCM1480_INT_HIGH(62)
+#define K_BCM1480_INT_MBOX_1_3             _BCM1480_INT_HIGH(63)
 
-#define K_BCM1480_INT_BAD_ECC               _BCM1480_INT_LOW(1)
-#define K_BCM1480_INT_COR_ECC               _BCM1480_INT_LOW(2)
-#define K_BCM1480_INT_IO_BUS                _BCM1480_INT_LOW(3)
-#define K_BCM1480_INT_PERF_CNT              _BCM1480_INT_LOW(4)
-#define K_BCM1480_INT_SW_PERF_CNT           _BCM1480_INT_LOW(5)
-#define K_BCM1480_INT_TRACE_FREEZE          _BCM1480_INT_LOW(6)
-#define K_BCM1480_INT_SW_TRACE_FREEZE       _BCM1480_INT_LOW(7)
-#define K_BCM1480_INT_WATCHDOG_TIMER_0      _BCM1480_INT_LOW(8)
-#define K_BCM1480_INT_WATCHDOG_TIMER_1      _BCM1480_INT_LOW(9)
-#define K_BCM1480_INT_WATCHDOG_TIMER_2      _BCM1480_INT_LOW(10)
-#define K_BCM1480_INT_WATCHDOG_TIMER_3      _BCM1480_INT_LOW(11)
-#define K_BCM1480_INT_PCI_ERROR             _BCM1480_INT_LOW(16)
-#define K_BCM1480_INT_PCI_RESET             _BCM1480_INT_LOW(17)
-#define K_BCM1480_INT_NODE_CONTROLLER       _BCM1480_INT_LOW(18)
-#define K_BCM1480_INT_HOST_BRIDGE           _BCM1480_INT_LOW(19)
-#define K_BCM1480_INT_PORT_0_FATAL          _BCM1480_INT_LOW(20)
-#define K_BCM1480_INT_PORT_0_NONFATAL       _BCM1480_INT_LOW(21)
-#define K_BCM1480_INT_PORT_1_FATAL          _BCM1480_INT_LOW(22)
-#define K_BCM1480_INT_PORT_1_NONFATAL       _BCM1480_INT_LOW(23)
-#define K_BCM1480_INT_PORT_2_FATAL          _BCM1480_INT_LOW(24)
-#define K_BCM1480_INT_PORT_2_NONFATAL       _BCM1480_INT_LOW(25)
-#define K_BCM1480_INT_LDT_SMI               _BCM1480_INT_LOW(32)
-#define K_BCM1480_INT_LDT_NMI               _BCM1480_INT_LOW(33)
-#define K_BCM1480_INT_LDT_INIT              _BCM1480_INT_LOW(34)
-#define K_BCM1480_INT_LDT_STARTUP           _BCM1480_INT_LOW(35)
-#define K_BCM1480_INT_LDT_EXT               _BCM1480_INT_LOW(36)
-#define K_BCM1480_INT_SMB_0                 _BCM1480_INT_LOW(40)
-#define K_BCM1480_INT_SMB_1                 _BCM1480_INT_LOW(41)
-#define K_BCM1480_INT_PCMCIA                _BCM1480_INT_LOW(42)
-#define K_BCM1480_INT_UART_0                _BCM1480_INT_LOW(44)
-#define K_BCM1480_INT_UART_1                _BCM1480_INT_LOW(45)
-#define K_BCM1480_INT_UART_2                _BCM1480_INT_LOW(46)
-#define K_BCM1480_INT_UART_3                _BCM1480_INT_LOW(47)
-#define K_BCM1480_INT_GPIO_4                _BCM1480_INT_LOW(52)
-#define K_BCM1480_INT_GPIO_5                _BCM1480_INT_LOW(53)
-#define K_BCM1480_INT_GPIO_6                _BCM1480_INT_LOW(54)
-#define K_BCM1480_INT_GPIO_7                _BCM1480_INT_LOW(55)
-#define K_BCM1480_INT_GPIO_8                _BCM1480_INT_LOW(56)
-#define K_BCM1480_INT_GPIO_9                _BCM1480_INT_LOW(57)
-#define K_BCM1480_INT_GPIO_10               _BCM1480_INT_LOW(58)
-#define K_BCM1480_INT_GPIO_11               _BCM1480_INT_LOW(59)
-#define K_BCM1480_INT_GPIO_12               _BCM1480_INT_LOW(60)
-#define K_BCM1480_INT_GPIO_13               _BCM1480_INT_LOW(61)
-#define K_BCM1480_INT_GPIO_14               _BCM1480_INT_LOW(62)
-#define K_BCM1480_INT_GPIO_15               _BCM1480_INT_LOW(63)
+#define K_BCM1480_INT_BAD_ECC              _BCM1480_INT_LOW(1)
+#define K_BCM1480_INT_COR_ECC              _BCM1480_INT_LOW(2)
+#define K_BCM1480_INT_IO_BUS               _BCM1480_INT_LOW(3)
+#define K_BCM1480_INT_PERF_CNT             _BCM1480_INT_LOW(4)
+#define K_BCM1480_INT_SW_PERF_CNT          _BCM1480_INT_LOW(5)
+#define K_BCM1480_INT_TRACE_FREEZE         _BCM1480_INT_LOW(6)
+#define K_BCM1480_INT_SW_TRACE_FREEZE      _BCM1480_INT_LOW(7)
+#define K_BCM1480_INT_WATCHDOG_TIMER_0     _BCM1480_INT_LOW(8)
+#define K_BCM1480_INT_WATCHDOG_TIMER_1     _BCM1480_INT_LOW(9)
+#define K_BCM1480_INT_WATCHDOG_TIMER_2     _BCM1480_INT_LOW(10)
+#define K_BCM1480_INT_WATCHDOG_TIMER_3     _BCM1480_INT_LOW(11)
+#define K_BCM1480_INT_PCI_ERROR                    _BCM1480_INT_LOW(16)
+#define K_BCM1480_INT_PCI_RESET                    _BCM1480_INT_LOW(17)
+#define K_BCM1480_INT_NODE_CONTROLLER      _BCM1480_INT_LOW(18)
+#define K_BCM1480_INT_HOST_BRIDGE          _BCM1480_INT_LOW(19)
+#define K_BCM1480_INT_PORT_0_FATAL         _BCM1480_INT_LOW(20)
+#define K_BCM1480_INT_PORT_0_NONFATAL      _BCM1480_INT_LOW(21)
+#define K_BCM1480_INT_PORT_1_FATAL         _BCM1480_INT_LOW(22)
+#define K_BCM1480_INT_PORT_1_NONFATAL      _BCM1480_INT_LOW(23)
+#define K_BCM1480_INT_PORT_2_FATAL         _BCM1480_INT_LOW(24)
+#define K_BCM1480_INT_PORT_2_NONFATAL      _BCM1480_INT_LOW(25)
+#define K_BCM1480_INT_LDT_SMI              _BCM1480_INT_LOW(32)
+#define K_BCM1480_INT_LDT_NMI              _BCM1480_INT_LOW(33)
+#define K_BCM1480_INT_LDT_INIT             _BCM1480_INT_LOW(34)
+#define K_BCM1480_INT_LDT_STARTUP          _BCM1480_INT_LOW(35)
+#define K_BCM1480_INT_LDT_EXT              _BCM1480_INT_LOW(36)
+#define K_BCM1480_INT_SMB_0                _BCM1480_INT_LOW(40)
+#define K_BCM1480_INT_SMB_1                _BCM1480_INT_LOW(41)
+#define K_BCM1480_INT_PCMCIA               _BCM1480_INT_LOW(42)
+#define K_BCM1480_INT_UART_0               _BCM1480_INT_LOW(44)
+#define K_BCM1480_INT_UART_1               _BCM1480_INT_LOW(45)
+#define K_BCM1480_INT_UART_2               _BCM1480_INT_LOW(46)
+#define K_BCM1480_INT_UART_3               _BCM1480_INT_LOW(47)
+#define K_BCM1480_INT_GPIO_4               _BCM1480_INT_LOW(52)
+#define K_BCM1480_INT_GPIO_5               _BCM1480_INT_LOW(53)
+#define K_BCM1480_INT_GPIO_6               _BCM1480_INT_LOW(54)
+#define K_BCM1480_INT_GPIO_7               _BCM1480_INT_LOW(55)
+#define K_BCM1480_INT_GPIO_8               _BCM1480_INT_LOW(56)
+#define K_BCM1480_INT_GPIO_9               _BCM1480_INT_LOW(57)
+#define K_BCM1480_INT_GPIO_10              _BCM1480_INT_LOW(58)
+#define K_BCM1480_INT_GPIO_11              _BCM1480_INT_LOW(59)
+#define K_BCM1480_INT_GPIO_12              _BCM1480_INT_LOW(60)
+#define K_BCM1480_INT_GPIO_13              _BCM1480_INT_LOW(61)
+#define K_BCM1480_INT_GPIO_14              _BCM1480_INT_LOW(62)
+#define K_BCM1480_INT_GPIO_15              _BCM1480_INT_LOW(63)
 
 /*
  * Mask values for each interrupt
  */
 
-#define _BCM1480_INT_MASK(w, n)              _SB_MAKEMASK(w, ((n) & 0x3F))
-#define _BCM1480_INT_MASK1(n)               _SB_MAKEMASK1(((n) & 0x3F))
-#define _BCM1480_INT_OFFSET(n)              (((n) & 0x40) << 6)
+#define _BCM1480_INT_MASK(w, n)                     _SB_MAKEMASK(w, ((n) & 0x3F))
+#define _BCM1480_INT_MASK1(n)              _SB_MAKEMASK1(((n) & 0x3F))
+#define _BCM1480_INT_OFFSET(n)             (((n) & 0x40) << 6)
 
-#define M_BCM1480_INT_CASCADE               _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
+#define M_BCM1480_INT_CASCADE              _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
 
-#define M_BCM1480_INT_ADDR_TRAP             _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
-#define M_BCM1480_INT_GPIO_0                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
-#define M_BCM1480_INT_GPIO_1                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
-#define M_BCM1480_INT_GPIO_2                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
-#define M_BCM1480_INT_GPIO_3                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
-#define M_BCM1480_INT_PCI_INTA              _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
-#define M_BCM1480_INT_PCI_INTB              _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
-#define M_BCM1480_INT_PCI_INTC              _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
-#define M_BCM1480_INT_PCI_INTD              _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
-#define M_BCM1480_INT_CYCLE_CP0             _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
-#define M_BCM1480_INT_CYCLE_CP1             _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
-#define M_BCM1480_INT_CYCLE_CP2             _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
-#define M_BCM1480_INT_CYCLE_CP3             _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
-#define M_BCM1480_INT_TIMER_0               _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
-#define M_BCM1480_INT_TIMER_1               _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
-#define M_BCM1480_INT_TIMER_2               _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
-#define M_BCM1480_INT_TIMER_3               _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
-#define M_BCM1480_INT_DM_CH_0               _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
-#define M_BCM1480_INT_DM_CH_1               _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
-#define M_BCM1480_INT_DM_CH_2               _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
-#define M_BCM1480_INT_DM_CH_3               _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
-#define M_BCM1480_INT_MAC_0                 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
-#define M_BCM1480_INT_MAC_0_CH1             _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
-#define M_BCM1480_INT_MAC_1                 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
-#define M_BCM1480_INT_MAC_1_CH1             _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
-#define M_BCM1480_INT_MAC_2                 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
-#define M_BCM1480_INT_MAC_2_CH1             _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
-#define M_BCM1480_INT_MAC_3                 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
-#define M_BCM1480_INT_MAC_3_CH1             _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
-#define M_BCM1480_INT_PMI_LOW               _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
-#define M_BCM1480_INT_PMI_HIGH              _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
-#define M_BCM1480_INT_PMO_LOW               _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
-#define M_BCM1480_INT_PMO_HIGH              _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
-#define M_BCM1480_INT_MBOX_ALL              _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_0              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_1              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
-#define M_BCM1480_INT_MBOX_0_2              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
-#define M_BCM1480_INT_MBOX_0_3              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
-#define M_BCM1480_INT_MBOX_1_0              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
-#define M_BCM1480_INT_MBOX_1_1              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
-#define M_BCM1480_INT_MBOX_1_2              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
-#define M_BCM1480_INT_MBOX_1_3              _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
-#define M_BCM1480_INT_BAD_ECC               _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
-#define M_BCM1480_INT_COR_ECC               _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
-#define M_BCM1480_INT_IO_BUS                _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
-#define M_BCM1480_INT_PERF_CNT              _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
-#define M_BCM1480_INT_SW_PERF_CNT           _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
-#define M_BCM1480_INT_TRACE_FREEZE          _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
-#define M_BCM1480_INT_SW_TRACE_FREEZE       _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
-#define M_BCM1480_INT_WATCHDOG_TIMER_0      _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
-#define M_BCM1480_INT_WATCHDOG_TIMER_1      _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
-#define M_BCM1480_INT_WATCHDOG_TIMER_2      _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
-#define M_BCM1480_INT_WATCHDOG_TIMER_3      _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
-#define M_BCM1480_INT_PCI_ERROR             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
-#define M_BCM1480_INT_PCI_RESET             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
-#define M_BCM1480_INT_NODE_CONTROLLER       _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
-#define M_BCM1480_INT_HOST_BRIDGE           _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
-#define M_BCM1480_INT_PORT_0_FATAL          _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
-#define M_BCM1480_INT_PORT_0_NONFATAL       _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
-#define M_BCM1480_INT_PORT_1_FATAL          _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
-#define M_BCM1480_INT_PORT_1_NONFATAL       _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
-#define M_BCM1480_INT_PORT_2_FATAL          _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
-#define M_BCM1480_INT_PORT_2_NONFATAL       _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
-#define M_BCM1480_INT_LDT_SMI               _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
-#define M_BCM1480_INT_LDT_NMI               _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
-#define M_BCM1480_INT_LDT_INIT              _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
-#define M_BCM1480_INT_LDT_STARTUP           _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
-#define M_BCM1480_INT_LDT_EXT               _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
-#define M_BCM1480_INT_SMB_0                 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
-#define M_BCM1480_INT_SMB_1                 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
-#define M_BCM1480_INT_PCMCIA                _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
-#define M_BCM1480_INT_UART_0                _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
-#define M_BCM1480_INT_UART_1                _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
-#define M_BCM1480_INT_UART_2                _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
-#define M_BCM1480_INT_UART_3                _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
-#define M_BCM1480_INT_GPIO_4                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
-#define M_BCM1480_INT_GPIO_5                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
-#define M_BCM1480_INT_GPIO_6                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
-#define M_BCM1480_INT_GPIO_7                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
-#define M_BCM1480_INT_GPIO_8                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
-#define M_BCM1480_INT_GPIO_9                _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
-#define M_BCM1480_INT_GPIO_10               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
-#define M_BCM1480_INT_GPIO_11               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
-#define M_BCM1480_INT_GPIO_12               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
-#define M_BCM1480_INT_GPIO_13               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
-#define M_BCM1480_INT_GPIO_14               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
-#define M_BCM1480_INT_GPIO_15               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
+#define M_BCM1480_INT_ADDR_TRAP                    _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
+#define M_BCM1480_INT_GPIO_0               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
+#define M_BCM1480_INT_GPIO_1               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
+#define M_BCM1480_INT_GPIO_2               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
+#define M_BCM1480_INT_GPIO_3               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
+#define M_BCM1480_INT_PCI_INTA             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
+#define M_BCM1480_INT_PCI_INTB             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
+#define M_BCM1480_INT_PCI_INTC             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
+#define M_BCM1480_INT_PCI_INTD             _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
+#define M_BCM1480_INT_CYCLE_CP0                    _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
+#define M_BCM1480_INT_CYCLE_CP1                    _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
+#define M_BCM1480_INT_CYCLE_CP2                    _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
+#define M_BCM1480_INT_CYCLE_CP3                    _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
+#define M_BCM1480_INT_TIMER_0              _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
+#define M_BCM1480_INT_TIMER_1              _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
+#define M_BCM1480_INT_TIMER_2              _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
+#define M_BCM1480_INT_TIMER_3              _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
+#define M_BCM1480_INT_DM_CH_0              _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
+#define M_BCM1480_INT_DM_CH_1              _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
+#define M_BCM1480_INT_DM_CH_2              _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
+#define M_BCM1480_INT_DM_CH_3              _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
+#define M_BCM1480_INT_MAC_0                _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
+#define M_BCM1480_INT_MAC_0_CH1                    _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
+#define M_BCM1480_INT_MAC_1                _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
+#define M_BCM1480_INT_MAC_1_CH1                    _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
+#define M_BCM1480_INT_MAC_2                _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
+#define M_BCM1480_INT_MAC_2_CH1                    _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
+#define M_BCM1480_INT_MAC_3                _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
+#define M_BCM1480_INT_MAC_3_CH1                    _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
+#define M_BCM1480_INT_PMI_LOW              _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
+#define M_BCM1480_INT_PMI_HIGH             _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
+#define M_BCM1480_INT_PMO_LOW              _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
+#define M_BCM1480_INT_PMO_HIGH             _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
+#define M_BCM1480_INT_MBOX_ALL             _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_0             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_1             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
+#define M_BCM1480_INT_MBOX_0_2             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
+#define M_BCM1480_INT_MBOX_0_3             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
+#define M_BCM1480_INT_MBOX_1_0             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
+#define M_BCM1480_INT_MBOX_1_1             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
+#define M_BCM1480_INT_MBOX_1_2             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
+#define M_BCM1480_INT_MBOX_1_3             _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
+#define M_BCM1480_INT_BAD_ECC              _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
+#define M_BCM1480_INT_COR_ECC              _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
+#define M_BCM1480_INT_IO_BUS               _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
+#define M_BCM1480_INT_PERF_CNT             _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
+#define M_BCM1480_INT_SW_PERF_CNT          _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
+#define M_BCM1480_INT_TRACE_FREEZE         _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
+#define M_BCM1480_INT_SW_TRACE_FREEZE      _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
+#define M_BCM1480_INT_WATCHDOG_TIMER_0     _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
+#define M_BCM1480_INT_WATCHDOG_TIMER_1     _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
+#define M_BCM1480_INT_WATCHDOG_TIMER_2     _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
+#define M_BCM1480_INT_WATCHDOG_TIMER_3     _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
+#define M_BCM1480_INT_PCI_ERROR                    _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
+#define M_BCM1480_INT_PCI_RESET                    _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
+#define M_BCM1480_INT_NODE_CONTROLLER      _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
+#define M_BCM1480_INT_HOST_BRIDGE          _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
+#define M_BCM1480_INT_PORT_0_FATAL         _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
+#define M_BCM1480_INT_PORT_0_NONFATAL      _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
+#define M_BCM1480_INT_PORT_1_FATAL         _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
+#define M_BCM1480_INT_PORT_1_NONFATAL      _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
+#define M_BCM1480_INT_PORT_2_FATAL         _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
+#define M_BCM1480_INT_PORT_2_NONFATAL      _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
+#define M_BCM1480_INT_LDT_SMI              _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
+#define M_BCM1480_INT_LDT_NMI              _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
+#define M_BCM1480_INT_LDT_INIT             _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
+#define M_BCM1480_INT_LDT_STARTUP          _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
+#define M_BCM1480_INT_LDT_EXT              _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
+#define M_BCM1480_INT_SMB_0                _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
+#define M_BCM1480_INT_SMB_1                _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
+#define M_BCM1480_INT_PCMCIA               _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
+#define M_BCM1480_INT_UART_0               _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
+#define M_BCM1480_INT_UART_1               _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
+#define M_BCM1480_INT_UART_2               _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
+#define M_BCM1480_INT_UART_3               _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
+#define M_BCM1480_INT_GPIO_4               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
+#define M_BCM1480_INT_GPIO_5               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
+#define M_BCM1480_INT_GPIO_6               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
+#define M_BCM1480_INT_GPIO_7               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
+#define M_BCM1480_INT_GPIO_8               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
+#define M_BCM1480_INT_GPIO_9               _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
+#define M_BCM1480_INT_GPIO_10              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
+#define M_BCM1480_INT_GPIO_11              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
+#define M_BCM1480_INT_GPIO_12              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
+#define M_BCM1480_INT_GPIO_13              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
+#define M_BCM1480_INT_GPIO_14              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
+#define M_BCM1480_INT_GPIO_15              _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
 
 /*
  * Interrupt mappings (Table 18)
  */
 
-#define K_BCM1480_INT_MAP_I0    0              /* interrupt pins on processor */
-#define K_BCM1480_INT_MAP_I1    1
-#define K_BCM1480_INT_MAP_I2    2
-#define K_BCM1480_INT_MAP_I3    3
-#define K_BCM1480_INT_MAP_I4    4
-#define K_BCM1480_INT_MAP_I5    5
-#define K_BCM1480_INT_MAP_NMI   6              /* nonmaskable */
-#define K_BCM1480_INT_MAP_DINT  7              /* debug interrupt */
+#define K_BCM1480_INT_MAP_I0                 /* interrupt pins on processor */
+#define K_BCM1480_INT_MAP_I1   1
+#define K_BCM1480_INT_MAP_I2   2
+#define K_BCM1480_INT_MAP_I3   3
+#define K_BCM1480_INT_MAP_I4   4
+#define K_BCM1480_INT_MAP_I5   5
+#define K_BCM1480_INT_MAP_NMI                /* nonmaskable */
+#define K_BCM1480_INT_MAP_DINT               /* debug interrupt */
 
 /*
  * Interrupt LDT Set Register (Table 19)
  */
 
-#define S_BCM1480_INT_HT_INTMSG             0
-#define M_BCM1480_INT_HT_INTMSG             _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
-#define V_BCM1480_INT_HT_INTMSG(x)          _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
-#define G_BCM1480_INT_HT_INTMSG(x)          _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
+#define S_BCM1480_INT_HT_INTMSG                    0
+#define M_BCM1480_INT_HT_INTMSG                    _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
+#define V_BCM1480_INT_HT_INTMSG(x)         _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
+#define G_BCM1480_INT_HT_INTMSG(x)         _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
 
-#define K_BCM1480_INT_HT_INTMSG_FIXED       0
+#define K_BCM1480_INT_HT_INTMSG_FIXED      0
 #define K_BCM1480_INT_HT_INTMSG_ARBITRATED  1
-#define K_BCM1480_INT_HT_INTMSG_SMI         2
-#define K_BCM1480_INT_HT_INTMSG_NMI         3
-#define K_BCM1480_INT_HT_INTMSG_INIT        4
-#define K_BCM1480_INT_HT_INTMSG_STARTUP     5
-#define K_BCM1480_INT_HT_INTMSG_EXTINT      6
+#define K_BCM1480_INT_HT_INTMSG_SMI        2
+#define K_BCM1480_INT_HT_INTMSG_NMI        3
+#define K_BCM1480_INT_HT_INTMSG_INIT       4
+#define K_BCM1480_INT_HT_INTMSG_STARTUP            5
+#define K_BCM1480_INT_HT_INTMSG_EXTINT     6
 #define K_BCM1480_INT_HT_INTMSG_RESERVED    7
 
-#define M_BCM1480_INT_HT_TRIGGERMODE        _SB_MAKEMASK1(3)
-#define V_BCM1480_INT_HT_EDGETRIGGER        0
-#define V_BCM1480_INT_HT_LEVELTRIGGER       M_BCM1480_INT_HT_TRIGGERMODE
+#define M_BCM1480_INT_HT_TRIGGERMODE       _SB_MAKEMASK1(3)
+#define V_BCM1480_INT_HT_EDGETRIGGER       0
+#define V_BCM1480_INT_HT_LEVELTRIGGER      M_BCM1480_INT_HT_TRIGGERMODE
 
-#define M_BCM1480_INT_HT_DESTMODE           _SB_MAKEMASK1(4)
-#define V_BCM1480_INT_HT_PHYSICALDEST       0
-#define V_BCM1480_INT_HT_LOGICALDEST        M_BCM1480_INT_HT_DESTMODE
+#define M_BCM1480_INT_HT_DESTMODE          _SB_MAKEMASK1(4)
+#define V_BCM1480_INT_HT_PHYSICALDEST      0
+#define V_BCM1480_INT_HT_LOGICALDEST       M_BCM1480_INT_HT_DESTMODE
 
-#define S_BCM1480_INT_HT_INTDEST            5
-#define M_BCM1480_INT_HT_INTDEST            _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
-#define V_BCM1480_INT_HT_INTDEST(x)         _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
-#define G_BCM1480_INT_HT_INTDEST(x)         _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
+#define S_BCM1480_INT_HT_INTDEST           5
+#define M_BCM1480_INT_HT_INTDEST           _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
+#define V_BCM1480_INT_HT_INTDEST(x)        _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
+#define G_BCM1480_INT_HT_INTDEST(x)        _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
 
-#define S_BCM1480_INT_HT_VECTOR             13
-#define M_BCM1480_INT_HT_VECTOR             _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
-#define V_BCM1480_INT_HT_VECTOR(x)          _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
-#define G_BCM1480_INT_HT_VECTOR(x)          _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
+#define S_BCM1480_INT_HT_VECTOR                    13
+#define M_BCM1480_INT_HT_VECTOR                    _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
+#define V_BCM1480_INT_HT_VECTOR(x)         _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
+#define G_BCM1480_INT_HT_VECTOR(x)         _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
 
 /*
  * Vector prefix (Table 4-7)
  */
 
 #define M_BCM1480_HTVECT_RAISE_INTLDT_HIGH  0x00
-#define M_BCM1480_HTVECT_RAISE_MBOX_0       0x40
+#define M_BCM1480_HTVECT_RAISE_MBOX_0      0x40
 #define M_BCM1480_HTVECT_RAISE_INTLDT_LO    0x80
-#define M_BCM1480_HTVECT_RAISE_MBOX_1       0xC0
+#define M_BCM1480_HTVECT_RAISE_MBOX_1      0xC0
 
 #endif /* _BCM1480_INT_H */
index 725d38c..910e5c7 100644 (file)
  * Format of level 2 cache management address (Table 55)
  */
 
-#define S_BCM1480_L2C_MGMT_INDEX            5
-#define M_BCM1480_L2C_MGMT_INDEX            _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
-#define V_BCM1480_L2C_MGMT_INDEX(x)         _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
-#define G_BCM1480_L2C_MGMT_INDEX(x)         _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
+#define S_BCM1480_L2C_MGMT_INDEX           5
+#define M_BCM1480_L2C_MGMT_INDEX           _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
+#define V_BCM1480_L2C_MGMT_INDEX(x)        _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
+#define G_BCM1480_L2C_MGMT_INDEX(x)        _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
 
-#define S_BCM1480_L2C_MGMT_WAY              17
-#define M_BCM1480_L2C_MGMT_WAY              _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
-#define V_BCM1480_L2C_MGMT_WAY(x)           _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
-#define G_BCM1480_L2C_MGMT_WAY(x)           _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
+#define S_BCM1480_L2C_MGMT_WAY             17
+#define M_BCM1480_L2C_MGMT_WAY             _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
+#define V_BCM1480_L2C_MGMT_WAY(x)          _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
+#define G_BCM1480_L2C_MGMT_WAY(x)          _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
 
-#define M_BCM1480_L2C_MGMT_DIRTY            _SB_MAKEMASK1(20)
-#define M_BCM1480_L2C_MGMT_VALID            _SB_MAKEMASK1(21)
+#define M_BCM1480_L2C_MGMT_DIRTY           _SB_MAKEMASK1(20)
+#define M_BCM1480_L2C_MGMT_VALID           _SB_MAKEMASK1(21)
 
-#define S_BCM1480_L2C_MGMT_ECC_DIAG         22
-#define M_BCM1480_L2C_MGMT_ECC_DIAG         _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define V_BCM1480_L2C_MGMT_ECC_DIAG(x)      _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define G_BCM1480_L2C_MGMT_ECC_DIAG(x)      _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
+#define S_BCM1480_L2C_MGMT_ECC_DIAG        22
+#define M_BCM1480_L2C_MGMT_ECC_DIAG        _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define V_BCM1480_L2C_MGMT_ECC_DIAG(x)     _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define G_BCM1480_L2C_MGMT_ECC_DIAG(x)     _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
 
-#define A_BCM1480_L2C_MGMT_TAG_BASE         0x00D0000000
+#define A_BCM1480_L2C_MGMT_TAG_BASE        0x00D0000000
 
-#define BCM1480_L2C_ENTRIES_PER_WAY         4096
-#define BCM1480_L2C_NUM_WAYS                8
+#define BCM1480_L2C_ENTRIES_PER_WAY        4096
+#define BCM1480_L2C_NUM_WAYS               8
 
 
 /*
  * Level 2 Cache Tag register (Table 59)
  */
 
-#define S_BCM1480_L2C_TAG_MBZ               0
-#define M_BCM1480_L2C_TAG_MBZ               _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
+#define S_BCM1480_L2C_TAG_MBZ              0
+#define M_BCM1480_L2C_TAG_MBZ              _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
 
-#define S_BCM1480_L2C_TAG_INDEX             5
-#define M_BCM1480_L2C_TAG_INDEX             _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
-#define V_BCM1480_L2C_TAG_INDEX(x)          _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
-#define G_BCM1480_L2C_TAG_INDEX(x)          _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
+#define S_BCM1480_L2C_TAG_INDEX                    5
+#define M_BCM1480_L2C_TAG_INDEX                    _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
+#define V_BCM1480_L2C_TAG_INDEX(x)         _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
+#define G_BCM1480_L2C_TAG_INDEX(x)         _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
 
 /* Note that index bit 16 is also tag bit 40 */
-#define S_BCM1480_L2C_TAG_TAG               17
-#define M_BCM1480_L2C_TAG_TAG               _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
-#define V_BCM1480_L2C_TAG_TAG(x)            _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
-#define G_BCM1480_L2C_TAG_TAG(x)            _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
+#define S_BCM1480_L2C_TAG_TAG              17
+#define M_BCM1480_L2C_TAG_TAG              _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
+#define V_BCM1480_L2C_TAG_TAG(x)           _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
+#define G_BCM1480_L2C_TAG_TAG(x)           _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
 
-#define S_BCM1480_L2C_TAG_ECC               40
-#define M_BCM1480_L2C_TAG_ECC               _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
-#define V_BCM1480_L2C_TAG_ECC(x)            _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
-#define G_BCM1480_L2C_TAG_ECC(x)            _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
+#define S_BCM1480_L2C_TAG_ECC              40
+#define M_BCM1480_L2C_TAG_ECC              _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
+#define V_BCM1480_L2C_TAG_ECC(x)           _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
+#define G_BCM1480_L2C_TAG_ECC(x)           _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
 
-#define S_BCM1480_L2C_TAG_WAY               46
-#define M_BCM1480_L2C_TAG_WAY               _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
-#define V_BCM1480_L2C_TAG_WAY(x)            _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
-#define G_BCM1480_L2C_TAG_WAY(x)            _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
+#define S_BCM1480_L2C_TAG_WAY              46
+#define M_BCM1480_L2C_TAG_WAY              _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
+#define V_BCM1480_L2C_TAG_WAY(x)           _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
+#define G_BCM1480_L2C_TAG_WAY(x)           _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
 
-#define M_BCM1480_L2C_TAG_DIRTY             _SB_MAKEMASK1(49)
-#define M_BCM1480_L2C_TAG_VALID             _SB_MAKEMASK1(50)
+#define M_BCM1480_L2C_TAG_DIRTY                    _SB_MAKEMASK1(49)
+#define M_BCM1480_L2C_TAG_VALID                    _SB_MAKEMASK1(50)
 
-#define S_BCM1480_L2C_DATA_ECC              51
-#define M_BCM1480_L2C_DATA_ECC              _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
-#define V_BCM1480_L2C_DATA_ECC(x)           _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
-#define G_BCM1480_L2C_DATA_ECC(x)           _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
+#define S_BCM1480_L2C_DATA_ECC             51
+#define M_BCM1480_L2C_DATA_ECC             _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
+#define V_BCM1480_L2C_DATA_ECC(x)          _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
+#define G_BCM1480_L2C_DATA_ECC(x)          _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
 
 
 /*
  * L2 Misc0 Value Register (Table 60)
  */
 
-#define S_BCM1480_L2C_MISC0_WAY_REMOTE      0
-#define M_BCM1480_L2C_MISC0_WAY_REMOTE      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
+#define S_BCM1480_L2C_MISC0_WAY_REMOTE     0
+#define M_BCM1480_L2C_MISC0_WAY_REMOTE     _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
 #define G_BCM1480_L2C_MISC0_WAY_REMOTE(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_REMOTE, M_BCM1480_L2C_MISC0_WAY_REMOTE)
 
-#define S_BCM1480_L2C_MISC0_WAY_LOCAL       8
-#define M_BCM1480_L2C_MISC0_WAY_LOCAL       _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
+#define S_BCM1480_L2C_MISC0_WAY_LOCAL      8
+#define M_BCM1480_L2C_MISC0_WAY_LOCAL      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
 #define G_BCM1480_L2C_MISC0_WAY_LOCAL(x)    _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_LOCAL, M_BCM1480_L2C_MISC0_WAY_LOCAL)
 
-#define S_BCM1480_L2C_MISC0_WAY_ENABLE      16
-#define M_BCM1480_L2C_MISC0_WAY_ENABLE      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
+#define S_BCM1480_L2C_MISC0_WAY_ENABLE     16
+#define M_BCM1480_L2C_MISC0_WAY_ENABLE     _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
 #define G_BCM1480_L2C_MISC0_WAY_ENABLE(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_ENABLE, M_BCM1480_L2C_MISC0_WAY_ENABLE)
 
 #define S_BCM1480_L2C_MISC0_CACHE_DISABLE   24
 #define M_BCM1480_L2C_MISC0_CACHE_DISABLE   _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_DISABLE)
 #define G_BCM1480_L2C_MISC0_CACHE_DISABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_DISABLE, M_BCM1480_L2C_MISC0_CACHE_DISABLE)
 
-#define S_BCM1480_L2C_MISC0_CACHE_QUAD      26
-#define M_BCM1480_L2C_MISC0_CACHE_QUAD      _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
+#define S_BCM1480_L2C_MISC0_CACHE_QUAD     26
+#define M_BCM1480_L2C_MISC0_CACHE_QUAD     _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
 #define G_BCM1480_L2C_MISC0_CACHE_QUAD(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_QUAD, M_BCM1480_L2C_MISC0_CACHE_QUAD)
 
-#define S_BCM1480_L2C_MISC0_MC_PRIORITY      30
-#define M_BCM1480_L2C_MISC0_MC_PRIORITY      _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
+#define S_BCM1480_L2C_MISC0_MC_PRIORITY             30
+#define M_BCM1480_L2C_MISC0_MC_PRIORITY             _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
 
-#define S_BCM1480_L2C_MISC0_ECC_CLEANUP      31
-#define M_BCM1480_L2C_MISC0_ECC_CLEANUP      _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
+#define S_BCM1480_L2C_MISC0_ECC_CLEANUP             31
+#define M_BCM1480_L2C_MISC0_ECC_CLEANUP             _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
 
 
 /*
  * L2 Misc1 Value Register (Table 60)
  */
 
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_0      0
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_0      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_0             0
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_0             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
 #define G_BCM1480_L2C_MISC1_WAY_AGENT_0(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_0, M_BCM1480_L2C_MISC1_WAY_AGENT_0)
 
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_1      8
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_1      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_1             8
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_1             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
 #define G_BCM1480_L2C_MISC1_WAY_AGENT_1(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_1, M_BCM1480_L2C_MISC1_WAY_AGENT_1)
 
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_2      16
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_2      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_2             16
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_2             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
 #define G_BCM1480_L2C_MISC1_WAY_AGENT_2(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_2, M_BCM1480_L2C_MISC1_WAY_AGENT_2)
 
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_3      24
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_3      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_3             24
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_3             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
 #define G_BCM1480_L2C_MISC1_WAY_AGENT_3(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_3, M_BCM1480_L2C_MISC1_WAY_AGENT_3)
 
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_4      32
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_4      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_4             32
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_4             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
 #define G_BCM1480_L2C_MISC1_WAY_AGENT_4(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_4, M_BCM1480_L2C_MISC1_WAY_AGENT_4)
 
 
  * L2 Misc2 Value Register (Table 60)
  */
 
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_8      0
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_8      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_8             0
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_8             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
 #define G_BCM1480_L2C_MISC2_WAY_AGENT_8(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_8, M_BCM1480_L2C_MISC2_WAY_AGENT_8)
 
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_9      8
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_9      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_9             8
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_9             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
 #define G_BCM1480_L2C_MISC2_WAY_AGENT_9(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_9, M_BCM1480_L2C_MISC2_WAY_AGENT_9)
 
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_A      16
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_A      _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_A             16
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_A             _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
 #define G_BCM1480_L2C_MISC2_WAY_AGENT_A(x)   _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_A, M_BCM1480_L2C_MISC2_WAY_AGENT_A)
 
 
index 4307a75..86908fd 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  BCM1280/BCM1480 Board Support Package
     *
-    *  Memory Controller constants              File: bcm1480_mc.h
+    *  Memory Controller constants             File: bcm1480_mc.h
     *
     *  This module contains constants and macros useful for
     *  programming the memory controller.
  * Memory Channel Configuration Register (Table 81)
  */
 
-#define S_BCM1480_MC_INTLV0                 0
-#define M_BCM1480_MC_INTLV0                 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x)              _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0_DEFAULT         V_BCM1480_MC_INTLV0(0)
-
-#define S_BCM1480_MC_INTLV1                 8
-#define M_BCM1480_MC_INTLV1                 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x)              _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1_DEFAULT         V_BCM1480_MC_INTLV1(0)
-
-#define S_BCM1480_MC_INTLV2                 16
-#define M_BCM1480_MC_INTLV2                 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
-#define G_BCM1480_MC_INTLV2(x)              _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2_DEFAULT         V_BCM1480_MC_INTLV2(0)
-
-#define S_BCM1480_MC_CS_MODE                32
-#define M_BCM1480_MC_CS_MODE                _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
-#define G_BCM1480_MC_CS_MODE(x)             _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE_DEFAULT        V_BCM1480_MC_CS_MODE(0)
-
-#define V_BCM1480_MC_CONFIG_DEFAULT         (V_BCM1480_MC_INTLV0_DEFAULT  | \
-                                     V_BCM1480_MC_INTLV1_DEFAULT  | \
-                                     V_BCM1480_MC_INTLV2_DEFAULT  | \
+#define S_BCM1480_MC_INTLV0                0
+#define M_BCM1480_MC_INTLV0                _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x)             _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0_DEFAULT        V_BCM1480_MC_INTLV0(0)
+
+#define S_BCM1480_MC_INTLV1                8
+#define M_BCM1480_MC_INTLV1                _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x)             _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1_DEFAULT        V_BCM1480_MC_INTLV1(0)
+
+#define S_BCM1480_MC_INTLV2                16
+#define M_BCM1480_MC_INTLV2                _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
+#define G_BCM1480_MC_INTLV2(x)             _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2_DEFAULT        V_BCM1480_MC_INTLV2(0)
+
+#define S_BCM1480_MC_CS_MODE               32
+#define M_BCM1480_MC_CS_MODE               _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
+#define G_BCM1480_MC_CS_MODE(x)                    _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE_DEFAULT       V_BCM1480_MC_CS_MODE(0)
+
+#define V_BCM1480_MC_CONFIG_DEFAULT        (V_BCM1480_MC_INTLV0_DEFAULT  | \
+                                    V_BCM1480_MC_INTLV1_DEFAULT  | \
+                                    V_BCM1480_MC_INTLV2_DEFAULT  | \
                                     V_BCM1480_MC_CS_MODE_DEFAULT)
 
 #define K_BCM1480_MC_CS01_MODE             0x03
  * Chip Select Start Address Register (Table 82)
  */
 
-#define S_BCM1480_MC_CS0_START              0
-#define M_BCM1480_MC_CS0_START              _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
-#define V_BCM1480_MC_CS0_START(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
-#define G_BCM1480_MC_CS0_START(x)           _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
+#define S_BCM1480_MC_CS0_START             0
+#define M_BCM1480_MC_CS0_START             _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
+#define V_BCM1480_MC_CS0_START(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
+#define G_BCM1480_MC_CS0_START(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
 
-#define S_BCM1480_MC_CS1_START              16
-#define M_BCM1480_MC_CS1_START              _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
-#define V_BCM1480_MC_CS1_START(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
-#define G_BCM1480_MC_CS1_START(x)           _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
+#define S_BCM1480_MC_CS1_START             16
+#define M_BCM1480_MC_CS1_START             _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
+#define V_BCM1480_MC_CS1_START(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
+#define G_BCM1480_MC_CS1_START(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
 
-#define S_BCM1480_MC_CS2_START              32
-#define M_BCM1480_MC_CS2_START              _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
-#define V_BCM1480_MC_CS2_START(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
-#define G_BCM1480_MC_CS2_START(x)           _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
+#define S_BCM1480_MC_CS2_START             32
+#define M_BCM1480_MC_CS2_START             _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
+#define V_BCM1480_MC_CS2_START(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
+#define G_BCM1480_MC_CS2_START(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
 
-#define S_BCM1480_MC_CS3_START              48
-#define M_BCM1480_MC_CS3_START              _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
-#define V_BCM1480_MC_CS3_START(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
-#define G_BCM1480_MC_CS3_START(x)           _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
+#define S_BCM1480_MC_CS3_START             48
+#define M_BCM1480_MC_CS3_START             _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
+#define V_BCM1480_MC_CS3_START(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
+#define G_BCM1480_MC_CS3_START(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
 
 /*
  * Chip Select End Address Register (Table 83)
  */
 
-#define S_BCM1480_MC_CS0_END                0
-#define M_BCM1480_MC_CS0_END                _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
-#define V_BCM1480_MC_CS0_END(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
-#define G_BCM1480_MC_CS0_END(x)             _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
+#define S_BCM1480_MC_CS0_END               0
+#define M_BCM1480_MC_CS0_END               _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
+#define V_BCM1480_MC_CS0_END(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
+#define G_BCM1480_MC_CS0_END(x)                    _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
 
-#define S_BCM1480_MC_CS1_END                16
-#define M_BCM1480_MC_CS1_END                _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
-#define V_BCM1480_MC_CS1_END(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
-#define G_BCM1480_MC_CS1_END(x)             _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
+#define S_BCM1480_MC_CS1_END               16
+#define M_BCM1480_MC_CS1_END               _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
+#define V_BCM1480_MC_CS1_END(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
+#define G_BCM1480_MC_CS1_END(x)                    _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
 
-#define S_BCM1480_MC_CS2_END                32
-#define M_BCM1480_MC_CS2_END                _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
-#define V_BCM1480_MC_CS2_END(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
-#define G_BCM1480_MC_CS2_END(x)             _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
+#define S_BCM1480_MC_CS2_END               32
+#define M_BCM1480_MC_CS2_END               _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
+#define V_BCM1480_MC_CS2_END(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
+#define G_BCM1480_MC_CS2_END(x)                    _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
 
-#define S_BCM1480_MC_CS3_END                48
-#define M_BCM1480_MC_CS3_END                _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
-#define V_BCM1480_MC_CS3_END(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
-#define G_BCM1480_MC_CS3_END(x)             _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
+#define S_BCM1480_MC_CS3_END               48
+#define M_BCM1480_MC_CS3_END               _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
+#define V_BCM1480_MC_CS3_END(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
+#define G_BCM1480_MC_CS3_END(x)                    _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
 
 /*
  * Row Address Bit Select Register 0 (Table 84)
  */
 
-#define S_BCM1480_MC_ROW00                  0
-#define M_BCM1480_MC_ROW00                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
-#define V_BCM1480_MC_ROW00(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
-#define G_BCM1480_MC_ROW00(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
-
-#define S_BCM1480_MC_ROW01                  8
-#define M_BCM1480_MC_ROW01                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
-#define V_BCM1480_MC_ROW01(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
-#define G_BCM1480_MC_ROW01(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
-
-#define S_BCM1480_MC_ROW02                  16
-#define M_BCM1480_MC_ROW02                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
-#define V_BCM1480_MC_ROW02(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
-#define G_BCM1480_MC_ROW02(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
-
-#define S_BCM1480_MC_ROW03                  24
-#define M_BCM1480_MC_ROW03                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
-#define V_BCM1480_MC_ROW03(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
-#define G_BCM1480_MC_ROW03(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
-
-#define S_BCM1480_MC_ROW04                  32
-#define M_BCM1480_MC_ROW04                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
-#define V_BCM1480_MC_ROW04(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
-#define G_BCM1480_MC_ROW04(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
-
-#define S_BCM1480_MC_ROW05                  40
-#define M_BCM1480_MC_ROW05                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
-#define V_BCM1480_MC_ROW05(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
-#define G_BCM1480_MC_ROW05(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
-
-#define S_BCM1480_MC_ROW06                  48
-#define M_BCM1480_MC_ROW06                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
-#define V_BCM1480_MC_ROW06(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
-#define G_BCM1480_MC_ROW06(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
-
-#define S_BCM1480_MC_ROW07                  56
-#define M_BCM1480_MC_ROW07                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
-#define V_BCM1480_MC_ROW07(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
-#define G_BCM1480_MC_ROW07(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
+#define S_BCM1480_MC_ROW00                 0
+#define M_BCM1480_MC_ROW00                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
+#define V_BCM1480_MC_ROW00(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
+#define G_BCM1480_MC_ROW00(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
+
+#define S_BCM1480_MC_ROW01                 8
+#define M_BCM1480_MC_ROW01                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
+#define V_BCM1480_MC_ROW01(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
+#define G_BCM1480_MC_ROW01(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
+
+#define S_BCM1480_MC_ROW02                 16
+#define M_BCM1480_MC_ROW02                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
+#define V_BCM1480_MC_ROW02(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
+#define G_BCM1480_MC_ROW02(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
+
+#define S_BCM1480_MC_ROW03                 24
+#define M_BCM1480_MC_ROW03                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
+#define V_BCM1480_MC_ROW03(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
+#define G_BCM1480_MC_ROW03(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
+
+#define S_BCM1480_MC_ROW04                 32
+#define M_BCM1480_MC_ROW04                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
+#define V_BCM1480_MC_ROW04(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
+#define G_BCM1480_MC_ROW04(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
+
+#define S_BCM1480_MC_ROW05                 40
+#define M_BCM1480_MC_ROW05                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
+#define V_BCM1480_MC_ROW05(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
+#define G_BCM1480_MC_ROW05(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
+
+#define S_BCM1480_MC_ROW06                 48
+#define M_BCM1480_MC_ROW06                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
+#define V_BCM1480_MC_ROW06(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
+#define G_BCM1480_MC_ROW06(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
+
+#define S_BCM1480_MC_ROW07                 56
+#define M_BCM1480_MC_ROW07                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
+#define V_BCM1480_MC_ROW07(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
+#define G_BCM1480_MC_ROW07(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
 
 /*
  * Row Address Bit Select Register 1 (Table 85)
  */
 
-#define S_BCM1480_MC_ROW08                  0
-#define M_BCM1480_MC_ROW08                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
-#define V_BCM1480_MC_ROW08(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
-#define G_BCM1480_MC_ROW08(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
+#define S_BCM1480_MC_ROW08                 0
+#define M_BCM1480_MC_ROW08                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
+#define V_BCM1480_MC_ROW08(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
+#define G_BCM1480_MC_ROW08(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
 
-#define S_BCM1480_MC_ROW09                  8
-#define M_BCM1480_MC_ROW09                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
-#define V_BCM1480_MC_ROW09(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
-#define G_BCM1480_MC_ROW09(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
+#define S_BCM1480_MC_ROW09                 8
+#define M_BCM1480_MC_ROW09                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
+#define V_BCM1480_MC_ROW09(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
+#define G_BCM1480_MC_ROW09(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
 
-#define S_BCM1480_MC_ROW10                  16
-#define M_BCM1480_MC_ROW10                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
-#define V_BCM1480_MC_ROW10(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
-#define G_BCM1480_MC_ROW10(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
+#define S_BCM1480_MC_ROW10                 16
+#define M_BCM1480_MC_ROW10                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
+#define V_BCM1480_MC_ROW10(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
+#define G_BCM1480_MC_ROW10(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
 
-#define S_BCM1480_MC_ROW11                  24
-#define M_BCM1480_MC_ROW11                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
-#define V_BCM1480_MC_ROW11(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
-#define G_BCM1480_MC_ROW11(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
+#define S_BCM1480_MC_ROW11                 24
+#define M_BCM1480_MC_ROW11                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
+#define V_BCM1480_MC_ROW11(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
+#define G_BCM1480_MC_ROW11(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
 
-#define S_BCM1480_MC_ROW12                  32
-#define M_BCM1480_MC_ROW12                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
-#define V_BCM1480_MC_ROW12(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
-#define G_BCM1480_MC_ROW12(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
+#define S_BCM1480_MC_ROW12                 32
+#define M_BCM1480_MC_ROW12                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
+#define V_BCM1480_MC_ROW12(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
+#define G_BCM1480_MC_ROW12(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
 
-#define S_BCM1480_MC_ROW13                  40
-#define M_BCM1480_MC_ROW13                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
-#define V_BCM1480_MC_ROW13(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
-#define G_BCM1480_MC_ROW13(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
+#define S_BCM1480_MC_ROW13                 40
+#define M_BCM1480_MC_ROW13                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
+#define V_BCM1480_MC_ROW13(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
+#define G_BCM1480_MC_ROW13(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
 
-#define S_BCM1480_MC_ROW14                  48
-#define M_BCM1480_MC_ROW14                  _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
-#define V_BCM1480_MC_ROW14(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
-#define G_BCM1480_MC_ROW14(x)               _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
+#define S_BCM1480_MC_ROW14                 48
+#define M_BCM1480_MC_ROW14                 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
+#define V_BCM1480_MC_ROW14(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
+#define G_BCM1480_MC_ROW14(x)              _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
 
-#define K_BCM1480_MC_ROWX_BIT_SPACING              8
+#define K_BCM1480_MC_ROWX_BIT_SPACING      8
 
 /*
  * Column Address Bit Select Register 0 (Table 86)
  */
 
-#define S_BCM1480_MC_COL00                  0
-#define M_BCM1480_MC_COL00                  _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
-#define V_BCM1480_MC_COL00(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
-#define G_BCM1480_MC_COL00(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
-
-#define S_BCM1480_MC_COL01                  8
-#define M_BCM1480_MC_COL01                  _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
-#define V_BCM1480_MC_COL01(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
-#define G_BCM1480_MC_COL01(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
-
-#define S_BCM1480_MC_COL02                  16
-#define M_BCM1480_MC_COL02                  _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
-#define V_BCM1480_MC_COL02(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
-#define G_BCM1480_MC_COL02(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
-
-#define S_BCM1480_MC_COL03                  24
-#define M_BCM1480_MC_COL03                  _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
-#define V_BCM1480_MC_COL03(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
-#define G_BCM1480_MC_COL03(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
-
-#define S_BCM1480_MC_COL04                  32
-#define M_BCM1480_MC_COL04                  _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
-#define V_BCM1480_MC_COL04(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
-#define G_BCM1480_MC_COL04(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
-
-#define S_BCM1480_MC_COL05                  40
-#define M_BCM1480_MC_COL05                  _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
-#define V_BCM1480_MC_COL05(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
-#define G_BCM1480_MC_COL05(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
-
-#define S_BCM1480_MC_COL06                  48
-#define M_BCM1480_MC_COL06                  _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
-#define V_BCM1480_MC_COL06(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
-#define G_BCM1480_MC_COL06(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
-
-#define S_BCM1480_MC_COL07                  56
-#define M_BCM1480_MC_COL07                  _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
-#define V_BCM1480_MC_COL07(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
-#define G_BCM1480_MC_COL07(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
+#define S_BCM1480_MC_COL00                 0
+#define M_BCM1480_MC_COL00                 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
+#define V_BCM1480_MC_COL00(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
+#define G_BCM1480_MC_COL00(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
+
+#define S_BCM1480_MC_COL01                 8
+#define M_BCM1480_MC_COL01                 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
+#define V_BCM1480_MC_COL01(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
+#define G_BCM1480_MC_COL01(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
+
+#define S_BCM1480_MC_COL02                 16
+#define M_BCM1480_MC_COL02                 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
+#define V_BCM1480_MC_COL02(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
+#define G_BCM1480_MC_COL02(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
+
+#define S_BCM1480_MC_COL03                 24
+#define M_BCM1480_MC_COL03                 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
+#define V_BCM1480_MC_COL03(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
+#define G_BCM1480_MC_COL03(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
+
+#define S_BCM1480_MC_COL04                 32
+#define M_BCM1480_MC_COL04                 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
+#define V_BCM1480_MC_COL04(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
+#define G_BCM1480_MC_COL04(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
+
+#define S_BCM1480_MC_COL05                 40
+#define M_BCM1480_MC_COL05                 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
+#define V_BCM1480_MC_COL05(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
+#define G_BCM1480_MC_COL05(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
+
+#define S_BCM1480_MC_COL06                 48
+#define M_BCM1480_MC_COL06                 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
+#define V_BCM1480_MC_COL06(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
+#define G_BCM1480_MC_COL06(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
+
+#define S_BCM1480_MC_COL07                 56
+#define M_BCM1480_MC_COL07                 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
+#define V_BCM1480_MC_COL07(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
+#define G_BCM1480_MC_COL07(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
 
 /*
  * Column Address Bit Select Register 1 (Table 87)
  */
 
-#define S_BCM1480_MC_COL08                  0
-#define M_BCM1480_MC_COL08                  _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
-#define V_BCM1480_MC_COL08(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
-#define G_BCM1480_MC_COL08(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
+#define S_BCM1480_MC_COL08                 0
+#define M_BCM1480_MC_COL08                 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
+#define V_BCM1480_MC_COL08(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
+#define G_BCM1480_MC_COL08(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
 
-#define S_BCM1480_MC_COL09                  8
-#define M_BCM1480_MC_COL09                  _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
-#define V_BCM1480_MC_COL09(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
-#define G_BCM1480_MC_COL09(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
+#define S_BCM1480_MC_COL09                 8
+#define M_BCM1480_MC_COL09                 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
+#define V_BCM1480_MC_COL09(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
+#define G_BCM1480_MC_COL09(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
 
-#define S_BCM1480_MC_COL10                  16   /* not a valid position, must be prog as 0 */
+#define S_BCM1480_MC_COL10                 16   /* not a valid position, must be prog as 0 */
 
-#define S_BCM1480_MC_COL11                  24
-#define M_BCM1480_MC_COL11                  _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
-#define V_BCM1480_MC_COL11(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
-#define G_BCM1480_MC_COL11(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
+#define S_BCM1480_MC_COL11                 24
+#define M_BCM1480_MC_COL11                 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
+#define V_BCM1480_MC_COL11(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
+#define G_BCM1480_MC_COL11(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
 
-#define S_BCM1480_MC_COL12                  32
-#define M_BCM1480_MC_COL12                  _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
-#define V_BCM1480_MC_COL12(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
-#define G_BCM1480_MC_COL12(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
+#define S_BCM1480_MC_COL12                 32
+#define M_BCM1480_MC_COL12                 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
+#define V_BCM1480_MC_COL12(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
+#define G_BCM1480_MC_COL12(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
 
-#define S_BCM1480_MC_COL13                  40
-#define M_BCM1480_MC_COL13                  _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
-#define V_BCM1480_MC_COL13(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
-#define G_BCM1480_MC_COL13(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
+#define S_BCM1480_MC_COL13                 40
+#define M_BCM1480_MC_COL13                 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
+#define V_BCM1480_MC_COL13(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
+#define G_BCM1480_MC_COL13(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
 
-#define S_BCM1480_MC_COL14                  48
-#define M_BCM1480_MC_COL14                  _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
-#define V_BCM1480_MC_COL14(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
-#define G_BCM1480_MC_COL14(x)               _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
+#define S_BCM1480_MC_COL14                 48
+#define M_BCM1480_MC_COL14                 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
+#define V_BCM1480_MC_COL14(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
+#define G_BCM1480_MC_COL14(x)              _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
 
-#define K_BCM1480_MC_COLX_BIT_SPACING              8
+#define K_BCM1480_MC_COLX_BIT_SPACING      8
 
 /*
  * CS0 and CS1 Bank Address Bit Select Register (Table 88)
  */
 
-#define S_BCM1480_MC_CS01_BANK0             0
-#define M_BCM1480_MC_CS01_BANK0             _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
-#define V_BCM1480_MC_CS01_BANK0(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
-#define G_BCM1480_MC_CS01_BANK0(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
+#define S_BCM1480_MC_CS01_BANK0                    0
+#define M_BCM1480_MC_CS01_BANK0                    _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
+#define V_BCM1480_MC_CS01_BANK0(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
+#define G_BCM1480_MC_CS01_BANK0(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
 
-#define S_BCM1480_MC_CS01_BANK1             8
-#define M_BCM1480_MC_CS01_BANK1             _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
-#define V_BCM1480_MC_CS01_BANK1(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
-#define G_BCM1480_MC_CS01_BANK1(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
+#define S_BCM1480_MC_CS01_BANK1                    8
+#define M_BCM1480_MC_CS01_BANK1                    _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
+#define V_BCM1480_MC_CS01_BANK1(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
+#define G_BCM1480_MC_CS01_BANK1(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
 
-#define S_BCM1480_MC_CS01_BANK2             16
-#define M_BCM1480_MC_CS01_BANK2             _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
-#define V_BCM1480_MC_CS01_BANK2(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
-#define G_BCM1480_MC_CS01_BANK2(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
+#define S_BCM1480_MC_CS01_BANK2                    16
+#define M_BCM1480_MC_CS01_BANK2                    _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
+#define V_BCM1480_MC_CS01_BANK2(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
+#define G_BCM1480_MC_CS01_BANK2(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
 
 /*
  * CS2 and CS3 Bank Address Bit Select Register (Table 89)
  */
 
-#define S_BCM1480_MC_CS23_BANK0             0
-#define M_BCM1480_MC_CS23_BANK0             _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
-#define V_BCM1480_MC_CS23_BANK0(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
-#define G_BCM1480_MC_CS23_BANK0(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
+#define S_BCM1480_MC_CS23_BANK0                    0
+#define M_BCM1480_MC_CS23_BANK0                    _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
+#define V_BCM1480_MC_CS23_BANK0(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
+#define G_BCM1480_MC_CS23_BANK0(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
 
-#define S_BCM1480_MC_CS23_BANK1             8
-#define M_BCM1480_MC_CS23_BANK1             _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
-#define V_BCM1480_MC_CS23_BANK1(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
-#define G_BCM1480_MC_CS23_BANK1(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
+#define S_BCM1480_MC_CS23_BANK1                    8
+#define M_BCM1480_MC_CS23_BANK1                    _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
+#define V_BCM1480_MC_CS23_BANK1(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
+#define G_BCM1480_MC_CS23_BANK1(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
 
-#define S_BCM1480_MC_CS23_BANK2             16
-#define M_BCM1480_MC_CS23_BANK2             _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
-#define V_BCM1480_MC_CS23_BANK2(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
-#define G_BCM1480_MC_CS23_BANK2(x)          _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
+#define S_BCM1480_MC_CS23_BANK2                    16
+#define M_BCM1480_MC_CS23_BANK2                    _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
+#define V_BCM1480_MC_CS23_BANK2(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
+#define G_BCM1480_MC_CS23_BANK2(x)         _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
 
 #define K_BCM1480_MC_CSXX_BANKX_BIT_SPACING  8
 
  * DRAM Command Register (Table 90)
  */
 
-#define S_BCM1480_MC_COMMAND                0
-#define M_BCM1480_MC_COMMAND                _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
-#define V_BCM1480_MC_COMMAND(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
-#define G_BCM1480_MC_COMMAND(x)             _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
+#define S_BCM1480_MC_COMMAND               0
+#define M_BCM1480_MC_COMMAND               _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
+#define V_BCM1480_MC_COMMAND(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
+#define G_BCM1480_MC_COMMAND(x)                    _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
 
-#define K_BCM1480_MC_COMMAND_EMRS           0
-#define K_BCM1480_MC_COMMAND_MRS            1
-#define K_BCM1480_MC_COMMAND_PRE            2
-#define K_BCM1480_MC_COMMAND_AR             3
-#define K_BCM1480_MC_COMMAND_SETRFSH        4
-#define K_BCM1480_MC_COMMAND_CLRRFSH        5
-#define K_BCM1480_MC_COMMAND_SETPWRDN       6
-#define K_BCM1480_MC_COMMAND_CLRPWRDN       7
+#define K_BCM1480_MC_COMMAND_EMRS          0
+#define K_BCM1480_MC_COMMAND_MRS           1
+#define K_BCM1480_MC_COMMAND_PRE           2
+#define K_BCM1480_MC_COMMAND_AR                    3
+#define K_BCM1480_MC_COMMAND_SETRFSH       4
+#define K_BCM1480_MC_COMMAND_CLRRFSH       5
+#define K_BCM1480_MC_COMMAND_SETPWRDN      6
+#define K_BCM1480_MC_COMMAND_CLRPWRDN      7
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define K_BCM1480_MC_COMMAND_EMRS2         8
 #define K_BCM1480_MC_COMMAND_DISABLE_MCLK   11
 #endif
 
-#define V_BCM1480_MC_COMMAND_EMRS           V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
-#define V_BCM1480_MC_COMMAND_MRS            V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
-#define V_BCM1480_MC_COMMAND_PRE            V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
-#define V_BCM1480_MC_COMMAND_AR             V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
-#define V_BCM1480_MC_COMMAND_SETRFSH        V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
-#define V_BCM1480_MC_COMMAND_CLRRFSH        V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
-#define V_BCM1480_MC_COMMAND_SETPWRDN       V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
-#define V_BCM1480_MC_COMMAND_CLRPWRDN       V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
+#define V_BCM1480_MC_COMMAND_EMRS          V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
+#define V_BCM1480_MC_COMMAND_MRS           V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
+#define V_BCM1480_MC_COMMAND_PRE           V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
+#define V_BCM1480_MC_COMMAND_AR                    V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
+#define V_BCM1480_MC_COMMAND_SETRFSH       V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
+#define V_BCM1480_MC_COMMAND_CLRRFSH       V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
+#define V_BCM1480_MC_COMMAND_SETPWRDN      V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
+#define V_BCM1480_MC_COMMAND_CLRPWRDN      V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define V_BCM1480_MC_COMMAND_EMRS2          V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
-#define V_BCM1480_MC_COMMAND_EMRS3          V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
+#define V_BCM1480_MC_COMMAND_EMRS2         V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
+#define V_BCM1480_MC_COMMAND_EMRS3         V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
 #define V_BCM1480_MC_COMMAND_ENABLE_MCLK    V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_ENABLE_MCLK)
 #define V_BCM1480_MC_COMMAND_DISABLE_MCLK   V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_DISABLE_MCLK)
 #endif
 
 #define S_BCM1480_MC_CS0                   4
-#define M_BCM1480_MC_CS0                    _SB_MAKEMASK1(4)
-#define M_BCM1480_MC_CS1                    _SB_MAKEMASK1(5)
-#define M_BCM1480_MC_CS2                    _SB_MAKEMASK1(6)
-#define M_BCM1480_MC_CS3                    _SB_MAKEMASK1(7)
-#define M_BCM1480_MC_CS4                    _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_CS5                    _SB_MAKEMASK1(9)
-#define M_BCM1480_MC_CS6                    _SB_MAKEMASK1(10)
-#define M_BCM1480_MC_CS7                    _SB_MAKEMASK1(11)
+#define M_BCM1480_MC_CS0                   _SB_MAKEMASK1(4)
+#define M_BCM1480_MC_CS1                   _SB_MAKEMASK1(5)
+#define M_BCM1480_MC_CS2                   _SB_MAKEMASK1(6)
+#define M_BCM1480_MC_CS3                   _SB_MAKEMASK1(7)
+#define M_BCM1480_MC_CS4                   _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_CS5                   _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_CS6                   _SB_MAKEMASK1(10)
+#define M_BCM1480_MC_CS7                   _SB_MAKEMASK1(11)
 
-#define M_BCM1480_MC_CS                  _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
-#define V_BCM1480_MC_CS(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
-#define G_BCM1480_MC_CS(x)               _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
+#define M_BCM1480_MC_CS                         _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
+#define V_BCM1480_MC_CS(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
+#define G_BCM1480_MC_CS(x)              _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
 
-#define M_BCM1480_MC_CMD_ACTIVE             _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_CMD_ACTIVE                    _SB_MAKEMASK1(16)
 
 /*
  * DRAM Mode Register (Table 91)
  */
 
-#define S_BCM1480_MC_EMODE                  0
-#define M_BCM1480_MC_EMODE                  _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
-#define G_BCM1480_MC_EMODE(x)               _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE_DEFAULT          V_BCM1480_MC_EMODE(0)
+#define S_BCM1480_MC_EMODE                 0
+#define M_BCM1480_MC_EMODE                 _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
+#define G_BCM1480_MC_EMODE(x)              _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE_DEFAULT         V_BCM1480_MC_EMODE(0)
 
-#define S_BCM1480_MC_MODE                   16
-#define M_BCM1480_MC_MODE                   _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
-#define G_BCM1480_MC_MODE(x)                _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE_DEFAULT           V_BCM1480_MC_MODE(0)
+#define S_BCM1480_MC_MODE                  16
+#define M_BCM1480_MC_MODE                  _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
+#define G_BCM1480_MC_MODE(x)               _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE_DEFAULT          V_BCM1480_MC_MODE(0)
 
-#define S_BCM1480_MC_DRAM_TYPE              32
-#define M_BCM1480_MC_DRAM_TYPE              _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
-#define V_BCM1480_MC_DRAM_TYPE(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
-#define G_BCM1480_MC_DRAM_TYPE(x)           _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
+#define S_BCM1480_MC_DRAM_TYPE             32
+#define M_BCM1480_MC_DRAM_TYPE             _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
+#define V_BCM1480_MC_DRAM_TYPE(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
+#define G_BCM1480_MC_DRAM_TYPE(x)          _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
 
-#define K_BCM1480_MC_DRAM_TYPE_JEDEC        0
-#define K_BCM1480_MC_DRAM_TYPE_FCRAM        1
+#define K_BCM1480_MC_DRAM_TYPE_JEDEC       0
+#define K_BCM1480_MC_DRAM_TYPE_FCRAM       1
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define K_BCM1480_MC_DRAM_TYPE_DDR2        2
 
 #define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1   0
 
-#define V_BCM1480_MC_DRAM_TYPE_JEDEC        V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
-#define V_BCM1480_MC_DRAM_TYPE_FCRAM        V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
+#define V_BCM1480_MC_DRAM_TYPE_JEDEC       V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
+#define V_BCM1480_MC_DRAM_TYPE_FCRAM       V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define V_BCM1480_MC_DRAM_TYPE_DDR2        V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_DDR2)
 #endif
 
-#define M_BCM1480_MC_GANGED                 _SB_MAKEMASK1(36)
-#define M_BCM1480_MC_BY9_INTF               _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_FORCE_ECC64            _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_ECC_DISABLE            _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_GANGED                _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_BY9_INTF              _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_FORCE_ECC64           _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_ECC_DISABLE           _SB_MAKEMASK1(39)
 
-#define S_BCM1480_MC_PG_POLICY              40
-#define M_BCM1480_MC_PG_POLICY              _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
-#define V_BCM1480_MC_PG_POLICY(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
-#define G_BCM1480_MC_PG_POLICY(x)           _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
+#define S_BCM1480_MC_PG_POLICY             40
+#define M_BCM1480_MC_PG_POLICY             _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
+#define V_BCM1480_MC_PG_POLICY(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
+#define G_BCM1480_MC_PG_POLICY(x)          _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
 
-#define K_BCM1480_MC_PG_POLICY_CLOSED       0
+#define K_BCM1480_MC_PG_POLICY_CLOSED      0
 #define K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK 1
 
-#define V_BCM1480_MC_PG_POLICY_CLOSED       V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
+#define V_BCM1480_MC_PG_POLICY_CLOSED      V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
 #define V_BCM1480_MC_PG_POLICY_CAS_TIME_CHK V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #endif
 
 #define V_BCM1480_MC_DRAMMODE_DEFAULT  V_BCM1480_MC_EMODE_DEFAULT | V_BCM1480_MC_MODE_DEFAULT | V_BCM1480_MC_DRAM_TYPE_JEDEC | \
-                                V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
+                               V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
 
 /*
  * Memory Clock Configuration Register (Table 92)
  */
 
-#define S_BCM1480_MC_CLK_RATIO              0
-#define M_BCM1480_MC_CLK_RATIO              _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
-#define V_BCM1480_MC_CLK_RATIO(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
-#define G_BCM1480_MC_CLK_RATIO(x)           _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
+#define S_BCM1480_MC_CLK_RATIO             0
+#define M_BCM1480_MC_CLK_RATIO             _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
+#define V_BCM1480_MC_CLK_RATIO(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
+#define G_BCM1480_MC_CLK_RATIO(x)          _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
 
-#define V_BCM1480_MC_CLK_RATIO_DEFAULT      V_BCM1480_MC_CLK_RATIO(10)
+#define V_BCM1480_MC_CLK_RATIO_DEFAULT     V_BCM1480_MC_CLK_RATIO(10)
 
-#define S_BCM1480_MC_REF_RATE               8
-#define M_BCM1480_MC_REF_RATE               _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
-#define V_BCM1480_MC_REF_RATE(x)            _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
-#define G_BCM1480_MC_REF_RATE(x)            _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
+#define S_BCM1480_MC_REF_RATE              8
+#define M_BCM1480_MC_REF_RATE              _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
+#define V_BCM1480_MC_REF_RATE(x)           _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
+#define G_BCM1480_MC_REF_RATE(x)           _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
 
-#define K_BCM1480_MC_REF_RATE_100MHz        0x31
-#define K_BCM1480_MC_REF_RATE_200MHz        0x62
-#define K_BCM1480_MC_REF_RATE_400MHz        0xC4
+#define K_BCM1480_MC_REF_RATE_100MHz       0x31
+#define K_BCM1480_MC_REF_RATE_200MHz       0x62
+#define K_BCM1480_MC_REF_RATE_400MHz       0xC4
 
-#define V_BCM1480_MC_REF_RATE_100MHz        V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
-#define V_BCM1480_MC_REF_RATE_200MHz        V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
-#define V_BCM1480_MC_REF_RATE_400MHz        V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
-#define V_BCM1480_MC_REF_RATE_DEFAULT       V_BCM1480_MC_REF_RATE_400MHz
+#define V_BCM1480_MC_REF_RATE_100MHz       V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
+#define V_BCM1480_MC_REF_RATE_200MHz       V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
+#define V_BCM1480_MC_REF_RATE_400MHz       V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
+#define V_BCM1480_MC_REF_RATE_DEFAULT      V_BCM1480_MC_REF_RATE_400MHz
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define M_BCM1480_MC_AUTO_REF_DIS          _SB_MAKEMASK1(16)
 
 #define M_BCM1480_MC_CS_ODD_ODT_EN         _SB_MAKEMASK1(32)
 
-#define S_BCM1480_MC_ODT0                  0
+#define S_BCM1480_MC_ODT0                  0
 #define M_BCM1480_MC_ODT0                  _SB_MAKEMASK(8, S_BCM1480_MC_ODT0)
 #define V_BCM1480_MC_ODT0(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ODT0)
 
-#define S_BCM1480_MC_ODT2                  8
+#define S_BCM1480_MC_ODT2                  8
 #define M_BCM1480_MC_ODT2                  _SB_MAKEMASK(8, S_BCM1480_MC_ODT2)
 #define V_BCM1480_MC_ODT2(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ODT2)
 
-#define S_BCM1480_MC_ODT4                  16
+#define S_BCM1480_MC_ODT4                  16
 #define M_BCM1480_MC_ODT4                  _SB_MAKEMASK(8, S_BCM1480_MC_ODT4)
 #define V_BCM1480_MC_ODT4(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ODT4)
 
-#define S_BCM1480_MC_ODT6                  24
+#define S_BCM1480_MC_ODT6                  24
 #define M_BCM1480_MC_ODT6                  _SB_MAKEMASK(8, S_BCM1480_MC_ODT6)
 #define V_BCM1480_MC_ODT6(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_ODT6)
 #endif
  * Memory DLL Configuration Register (Table 93)
  */
 
-#define S_BCM1480_MC_ADDR_COARSE_ADJ         0
-#define M_BCM1480_MC_ADDR_COARSE_ADJ         _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define V_BCM1480_MC_ADDR_COARSE_ADJ(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define G_BCM1480_MC_ADDR_COARSE_ADJ(x)      _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
+#define S_BCM1480_MC_ADDR_COARSE_ADJ        0
+#define M_BCM1480_MC_ADDR_COARSE_ADJ        _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define V_BCM1480_MC_ADDR_COARSE_ADJ(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define G_BCM1480_MC_ADDR_COARSE_ADJ(x)             _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
 #define V_BCM1480_MC_ADDR_COARSE_ADJ_DEFAULT V_BCM1480_MC_ADDR_COARSE_ADJ(0x0)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_ADDR_FREQ_RANGE           8
-#define M_BCM1480_MC_ADDR_FREQ_RANGE           _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define G_BCM1480_MC_ADDR_FREQ_RANGE(x)        _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT   V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_ADDR_FREQ_RANGE           8
+#define M_BCM1480_MC_ADDR_FREQ_RANGE           _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define G_BCM1480_MC_ADDR_FREQ_RANGE(x)                _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT   V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
 #endif
 
-#define S_BCM1480_MC_ADDR_FINE_ADJ          8
-#define M_BCM1480_MC_ADDR_FINE_ADJ          _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define V_BCM1480_MC_ADDR_FINE_ADJ(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define G_BCM1480_MC_ADDR_FINE_ADJ(x)       _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
+#define S_BCM1480_MC_ADDR_FINE_ADJ         8
+#define M_BCM1480_MC_ADDR_FINE_ADJ         _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define V_BCM1480_MC_ADDR_FINE_ADJ(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define G_BCM1480_MC_ADDR_FINE_ADJ(x)      _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
 #define V_BCM1480_MC_ADDR_FINE_ADJ_DEFAULT  V_BCM1480_MC_ADDR_FINE_ADJ(0x8)
 
-#define S_BCM1480_MC_DQI_COARSE_ADJ         16
-#define M_BCM1480_MC_DQI_COARSE_ADJ         _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define V_BCM1480_MC_DQI_COARSE_ADJ(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define G_BCM1480_MC_DQI_COARSE_ADJ(x)      _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
+#define S_BCM1480_MC_DQI_COARSE_ADJ        16
+#define M_BCM1480_MC_DQI_COARSE_ADJ        _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define V_BCM1480_MC_DQI_COARSE_ADJ(x)     _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define G_BCM1480_MC_DQI_COARSE_ADJ(x)     _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
 #define V_BCM1480_MC_DQI_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQI_COARSE_ADJ(0x0)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQI_FREQ_RANGE            24
-#define M_BCM1480_MC_DQI_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define G_BCM1480_MC_DQI_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQI_FREQ_RANGE            24
+#define M_BCM1480_MC_DQI_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define G_BCM1480_MC_DQI_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
 #endif
 
-#define S_BCM1480_MC_DQI_FINE_ADJ           24
-#define M_BCM1480_MC_DQI_FINE_ADJ           _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
-#define V_BCM1480_MC_DQI_FINE_ADJ(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
-#define G_BCM1480_MC_DQI_FINE_ADJ(x)        _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
+#define S_BCM1480_MC_DQI_FINE_ADJ          24
+#define M_BCM1480_MC_DQI_FINE_ADJ          _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
+#define V_BCM1480_MC_DQI_FINE_ADJ(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
+#define G_BCM1480_MC_DQI_FINE_ADJ(x)       _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
 #define V_BCM1480_MC_DQI_FINE_ADJ_DEFAULT   V_BCM1480_MC_DQI_FINE_ADJ(0x8)
 
-#define S_BCM1480_MC_DQO_COARSE_ADJ         32
-#define M_BCM1480_MC_DQO_COARSE_ADJ         _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define V_BCM1480_MC_DQO_COARSE_ADJ(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define G_BCM1480_MC_DQO_COARSE_ADJ(x)      _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
+#define S_BCM1480_MC_DQO_COARSE_ADJ        32
+#define M_BCM1480_MC_DQO_COARSE_ADJ        _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define V_BCM1480_MC_DQO_COARSE_ADJ(x)     _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define G_BCM1480_MC_DQO_COARSE_ADJ(x)     _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
 #define V_BCM1480_MC_DQO_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQO_COARSE_ADJ(0x0)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQO_FREQ_RANGE            40
-#define M_BCM1480_MC_DQO_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define G_BCM1480_MC_DQO_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQO_FREQ_RANGE            40
+#define M_BCM1480_MC_DQO_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define G_BCM1480_MC_DQO_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
 #endif
 
-#define S_BCM1480_MC_DQO_FINE_ADJ           40
-#define M_BCM1480_MC_DQO_FINE_ADJ           _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
-#define V_BCM1480_MC_DQO_FINE_ADJ(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
-#define G_BCM1480_MC_DQO_FINE_ADJ(x)        _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
+#define S_BCM1480_MC_DQO_FINE_ADJ          40
+#define M_BCM1480_MC_DQO_FINE_ADJ          _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
+#define V_BCM1480_MC_DQO_FINE_ADJ(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
+#define G_BCM1480_MC_DQO_FINE_ADJ(x)       _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
 #define V_BCM1480_MC_DQO_FINE_ADJ_DEFAULT   V_BCM1480_MC_DQO_FINE_ADJ(0x8)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_PDSEL            44
-#define M_BCM1480_MC_DLL_PDSEL            _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_PDSEL(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
-#define G_BCM1480_MC_DLL_PDSEL(x)         _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_DEFAULT_PDSEL    V_BCM1480_MC_DLL_PDSEL(0x0)
-
-#define        M_BCM1480_MC_DLL_REGBYPASS        _SB_MAKEMASK1(46)
-#define        M_BCM1480_MC_DQO_SHIFT            _SB_MAKEMASK1(47)
+#define S_BCM1480_MC_DLL_PDSEL           44
+#define M_BCM1480_MC_DLL_PDSEL           _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_PDSEL(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
+#define G_BCM1480_MC_DLL_PDSEL(x)        _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_DEFAULT_PDSEL   V_BCM1480_MC_DLL_PDSEL(0x0)
+
+#define M_BCM1480_MC_DLL_REGBYPASS       _SB_MAKEMASK1(46)
+#define M_BCM1480_MC_DQO_SHIFT           _SB_MAKEMASK1(47)
 #endif
 
-#define S_BCM1480_MC_DLL_DEFAULT           48
-#define M_BCM1480_MC_DLL_DEFAULT           _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
-#define V_BCM1480_MC_DLL_DEFAULT(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
-#define G_BCM1480_MC_DLL_DEFAULT(x)        _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
+#define S_BCM1480_MC_DLL_DEFAULT          48
+#define M_BCM1480_MC_DLL_DEFAULT          _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
+#define G_BCM1480_MC_DLL_DEFAULT(x)       _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
 #define V_BCM1480_MC_DLL_DEFAULT_DEFAULT   V_BCM1480_MC_DLL_DEFAULT(0x10)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define S_BCM1480_MC_DLL_REGCTRL         54
-#define M_BCM1480_MC_DLL_REGCTRL                 _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
-#define V_BCM1480_MC_DLL_REGCTRL(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
-#define G_BCM1480_MC_DLL_REGCTRL(x)       _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
+#define M_BCM1480_MC_DLL_REGCTRL         _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
+#define V_BCM1480_MC_DLL_REGCTRL(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
+#define G_BCM1480_MC_DLL_REGCTRL(x)      _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
 #define V_BCM1480_MC_DLL_DEFAULT_REGCTRL  V_BCM1480_MC_DLL_REGCTRL(0x0)
 #endif
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_FREQ_RANGE            56
-#define M_BCM1480_MC_DLL_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define G_BCM1480_MC_DLL_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DLL_FREQ_RANGE            56
+#define M_BCM1480_MC_DLL_FREQ_RANGE            _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define G_BCM1480_MC_DLL_FREQ_RANGE(x)         _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT    V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
 #endif
 
-#define S_BCM1480_MC_DLL_STEP_SIZE          56
-#define M_BCM1480_MC_DLL_STEP_SIZE          _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
-#define V_BCM1480_MC_DLL_STEP_SIZE(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
-#define G_BCM1480_MC_DLL_STEP_SIZE(x)       _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
+#define S_BCM1480_MC_DLL_STEP_SIZE         56
+#define M_BCM1480_MC_DLL_STEP_SIZE         _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
+#define V_BCM1480_MC_DLL_STEP_SIZE(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
+#define G_BCM1480_MC_DLL_STEP_SIZE(x)      _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
 #define V_BCM1480_MC_DLL_STEP_SIZE_DEFAULT  V_BCM1480_MC_DLL_STEP_SIZE(0x8)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define S_BCM1480_MC_DLL_BGCTRL          60
-#define M_BCM1480_MC_DLL_BGCTRL          _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_BGCTRL(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
-#define G_BCM1480_MC_DLL_BGCTRL(x)       _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL  V_BCM1480_MC_DLL_BGCTRL(0x0)
+#define M_BCM1480_MC_DLL_BGCTRL                  _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_BGCTRL(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
+#define G_BCM1480_MC_DLL_BGCTRL(x)      _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL         V_BCM1480_MC_DLL_BGCTRL(0x0)
 #endif
 
-#define        M_BCM1480_MC_DLL_BYPASS             _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_DLL_BYPASS                    _SB_MAKEMASK1(63)
 
 /*
  * Memory Drive Configuration Register (Table 94)
  */
 
-#define S_BCM1480_MC_RTT_BYP_PULLDOWN       0
-#define M_BCM1480_MC_RTT_BYP_PULLDOWN       _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
+#define S_BCM1480_MC_RTT_BYP_PULLDOWN      0
+#define M_BCM1480_MC_RTT_BYP_PULLDOWN      _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
 #define V_BCM1480_MC_RTT_BYP_PULLDOWN(x)    _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN)
 #define G_BCM1480_MC_RTT_BYP_PULLDOWN(x)    _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN, M_BCM1480_MC_RTT_BYP_PULLDOWN)
 
-#define S_BCM1480_MC_RTT_BYP_PULLUP         6
-#define M_BCM1480_MC_RTT_BYP_PULLUP         _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define V_BCM1480_MC_RTT_BYP_PULLUP(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define G_BCM1480_MC_RTT_BYP_PULLUP(x)      _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
+#define S_BCM1480_MC_RTT_BYP_PULLUP        6
+#define M_BCM1480_MC_RTT_BYP_PULLUP        _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define V_BCM1480_MC_RTT_BYP_PULLUP(x)     _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define G_BCM1480_MC_RTT_BYP_PULLUP(x)     _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
 
-#define M_BCM1480_MC_RTT_BYPASS             _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_RTT_COMP_MOV_AVG       _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_RTT_BYPASS                    _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_RTT_COMP_MOV_AVG      _SB_MAKEMASK1(9)
 
 #define S_BCM1480_MC_PVT_BYP_C1_PULLDOWN    10
 #define M_BCM1480_MC_PVT_BYP_C1_PULLDOWN    _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
 #define V_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
 #define G_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN, M_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
 
-#define S_BCM1480_MC_PVT_BYP_C1_PULLUP      15
-#define M_BCM1480_MC_PVT_BYP_C1_PULLUP      _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C1_PULLUP     15
+#define M_BCM1480_MC_PVT_BYP_C1_PULLUP     _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
 #define V_BCM1480_MC_PVT_BYP_C1_PULLUP(x)   _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
 #define G_BCM1480_MC_PVT_BYP_C1_PULLUP(x)   _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP, M_BCM1480_MC_PVT_BYP_C1_PULLUP)
 
 #define V_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
 #define G_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN, M_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
 
-#define S_BCM1480_MC_PVT_BYP_C2_PULLUP      25
-#define M_BCM1480_MC_PVT_BYP_C2_PULLUP      _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C2_PULLUP     25
+#define M_BCM1480_MC_PVT_BYP_C2_PULLUP     _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
 #define V_BCM1480_MC_PVT_BYP_C2_PULLUP(x)   _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
 #define G_BCM1480_MC_PVT_BYP_C2_PULLUP(x)   _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP, M_BCM1480_MC_PVT_BYP_C2_PULLUP)
 
-#define M_BCM1480_MC_PVT_BYPASS             _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_PVT_COMP_MOV_AVG       _SB_MAKEMASK1(31)
+#define M_BCM1480_MC_PVT_BYPASS                    _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_PVT_COMP_MOV_AVG      _SB_MAKEMASK1(31)
 
-#define M_BCM1480_MC_CLK_CLASS              _SB_MAKEMASK1(34)
-#define M_BCM1480_MC_DATA_CLASS             _SB_MAKEMASK1(35)
-#define M_BCM1480_MC_ADDR_CLASS             _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_CLK_CLASS             _SB_MAKEMASK1(34)
+#define M_BCM1480_MC_DATA_CLASS                    _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_ADDR_CLASS                    _SB_MAKEMASK1(36)
 
-#define M_BCM1480_MC_DQ_ODT_75              _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_DQ_ODT_150             _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_DQS_ODT_75             _SB_MAKEMASK1(39)
-#define M_BCM1480_MC_DQS_ODT_150            _SB_MAKEMASK1(40)
-#define M_BCM1480_MC_DQS_DIFF               _SB_MAKEMASK1(41)
+#define M_BCM1480_MC_DQ_ODT_75             _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_DQ_ODT_150                    _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_DQS_ODT_75                    _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_DQS_ODT_150           _SB_MAKEMASK1(40)
+#define M_BCM1480_MC_DQS_DIFF              _SB_MAKEMASK1(41)
 
 /*
  * ECC Test Data Register (Table 95)
  */
 
-#define S_BCM1480_MC_DATA_INVERT            0
-#define M_DATA_ECC_INVERT           _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_DATA_INVERT           0
+#define M_DATA_ECC_INVERT          _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
 
 /*
  * ECC Test ECC Register (Table 96)
  */
 
-#define S_BCM1480_MC_ECC_INVERT             0
-#define M_BCM1480_MC_ECC_INVERT             _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_ECC_INVERT                    0
+#define M_BCM1480_MC_ECC_INVERT                    _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
 
 /*
  * SDRAM Timing Register  (Table 97)
  */
 
-#define S_BCM1480_MC_tRCD                   0
-#define M_BCM1480_MC_tRCD                   _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
-#define V_BCM1480_MC_tRCD(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
-#define G_BCM1480_MC_tRCD(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
-#define K_BCM1480_MC_tRCD_DEFAULT           3
-#define V_BCM1480_MC_tRCD_DEFAULT           V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
-
-#define S_BCM1480_MC_tCL                    4
-#define M_BCM1480_MC_tCL                    _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
-#define V_BCM1480_MC_tCL(x)                 _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
-#define G_BCM1480_MC_tCL(x)                 _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
-#define K_BCM1480_MC_tCL_DEFAULT            2
-#define V_BCM1480_MC_tCL_DEFAULT            V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
-
-#define M_BCM1480_MC_tCrDh                  _SB_MAKEMASK1(8)
-
-#define S_BCM1480_MC_tWR                    9
-#define M_BCM1480_MC_tWR                    _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
-#define V_BCM1480_MC_tWR(x)                 _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
-#define G_BCM1480_MC_tWR(x)                 _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
-#define K_BCM1480_MC_tWR_DEFAULT            2
-#define V_BCM1480_MC_tWR_DEFAULT            V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
-
-#define S_BCM1480_MC_tCwD                   12
-#define M_BCM1480_MC_tCwD                   _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
-#define V_BCM1480_MC_tCwD(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
-#define G_BCM1480_MC_tCwD(x)                _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
-#define K_BCM1480_MC_tCwD_DEFAULT           1
-#define V_BCM1480_MC_tCwD_DEFAULT           V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
-
-#define S_BCM1480_MC_tRP                    16
-#define M_BCM1480_MC_tRP                    _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
-#define V_BCM1480_MC_tRP(x)                 _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
-#define G_BCM1480_MC_tRP(x)                 _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
-#define K_BCM1480_MC_tRP_DEFAULT            4
-#define V_BCM1480_MC_tRP_DEFAULT            V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
-
-#define S_BCM1480_MC_tRRD                   20
-#define M_BCM1480_MC_tRRD                   _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
-#define V_BCM1480_MC_tRRD(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
-#define G_BCM1480_MC_tRRD(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
-#define K_BCM1480_MC_tRRD_DEFAULT           2
-#define V_BCM1480_MC_tRRD_DEFAULT           V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
-
-#define S_BCM1480_MC_tRCw                   24
-#define M_BCM1480_MC_tRCw                   _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
-#define V_BCM1480_MC_tRCw(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
-#define G_BCM1480_MC_tRCw(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
-#define K_BCM1480_MC_tRCw_DEFAULT           10
-#define V_BCM1480_MC_tRCw_DEFAULT           V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
-
-#define S_BCM1480_MC_tRCr                   32
-#define M_BCM1480_MC_tRCr                   _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
-#define V_BCM1480_MC_tRCr(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
-#define G_BCM1480_MC_tRCr(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
-#define K_BCM1480_MC_tRCr_DEFAULT           9
-#define V_BCM1480_MC_tRCr_DEFAULT           V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
+#define S_BCM1480_MC_tRCD                  0
+#define M_BCM1480_MC_tRCD                  _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
+#define V_BCM1480_MC_tRCD(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
+#define G_BCM1480_MC_tRCD(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
+#define K_BCM1480_MC_tRCD_DEFAULT          3
+#define V_BCM1480_MC_tRCD_DEFAULT          V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
+
+#define S_BCM1480_MC_tCL                   4
+#define M_BCM1480_MC_tCL                   _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
+#define V_BCM1480_MC_tCL(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
+#define G_BCM1480_MC_tCL(x)                _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
+#define K_BCM1480_MC_tCL_DEFAULT           2
+#define V_BCM1480_MC_tCL_DEFAULT           V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
+
+#define M_BCM1480_MC_tCrDh                 _SB_MAKEMASK1(8)
+
+#define S_BCM1480_MC_tWR                   9
+#define M_BCM1480_MC_tWR                   _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
+#define V_BCM1480_MC_tWR(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
+#define G_BCM1480_MC_tWR(x)                _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
+#define K_BCM1480_MC_tWR_DEFAULT           2
+#define V_BCM1480_MC_tWR_DEFAULT           V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
+
+#define S_BCM1480_MC_tCwD                  12
+#define M_BCM1480_MC_tCwD                  _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
+#define V_BCM1480_MC_tCwD(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
+#define G_BCM1480_MC_tCwD(x)               _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
+#define K_BCM1480_MC_tCwD_DEFAULT          1
+#define V_BCM1480_MC_tCwD_DEFAULT          V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
+
+#define S_BCM1480_MC_tRP                   16
+#define M_BCM1480_MC_tRP                   _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
+#define V_BCM1480_MC_tRP(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
+#define G_BCM1480_MC_tRP(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
+#define K_BCM1480_MC_tRP_DEFAULT           4
+#define V_BCM1480_MC_tRP_DEFAULT           V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
+
+#define S_BCM1480_MC_tRRD                  20
+#define M_BCM1480_MC_tRRD                  _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
+#define V_BCM1480_MC_tRRD(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
+#define G_BCM1480_MC_tRRD(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
+#define K_BCM1480_MC_tRRD_DEFAULT          2
+#define V_BCM1480_MC_tRRD_DEFAULT          V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
+
+#define S_BCM1480_MC_tRCw                  24
+#define M_BCM1480_MC_tRCw                  _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
+#define V_BCM1480_MC_tRCw(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
+#define G_BCM1480_MC_tRCw(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
+#define K_BCM1480_MC_tRCw_DEFAULT          10
+#define V_BCM1480_MC_tRCw_DEFAULT          V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
+
+#define S_BCM1480_MC_tRCr                  32
+#define M_BCM1480_MC_tRCr                  _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
+#define V_BCM1480_MC_tRCr(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
+#define G_BCM1480_MC_tRCr(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
+#define K_BCM1480_MC_tRCr_DEFAULT          9
+#define V_BCM1480_MC_tRCr_DEFAULT          V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_tFAW                   40
-#define M_BCM1480_MC_tFAW                   _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
-#define V_BCM1480_MC_tFAW(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
-#define G_BCM1480_MC_tFAW(x)                _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
-#define K_BCM1480_MC_tFAW_DEFAULT           0
-#define V_BCM1480_MC_tFAW_DEFAULT           V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
+#define S_BCM1480_MC_tFAW                  40
+#define M_BCM1480_MC_tFAW                  _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
+#define V_BCM1480_MC_tFAW(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
+#define G_BCM1480_MC_tFAW(x)               _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
+#define K_BCM1480_MC_tFAW_DEFAULT          0
+#define V_BCM1480_MC_tFAW_DEFAULT          V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
 #endif
 
-#define S_BCM1480_MC_tRFC                   48
-#define M_BCM1480_MC_tRFC                   _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
-#define V_BCM1480_MC_tRFC(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
-#define G_BCM1480_MC_tRFC(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
-#define K_BCM1480_MC_tRFC_DEFAULT           12
-#define V_BCM1480_MC_tRFC_DEFAULT           V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
-
-#define S_BCM1480_MC_tFIFO                  56
-#define M_BCM1480_MC_tFIFO                  _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
-#define V_BCM1480_MC_tFIFO(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
-#define G_BCM1480_MC_tFIFO(x)               _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
-#define K_BCM1480_MC_tFIFO_DEFAULT          0
-#define V_BCM1480_MC_tFIFO_DEFAULT          V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
-
-#define S_BCM1480_MC_tW2R                  58
-#define M_BCM1480_MC_tW2R                  _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
-#define V_BCM1480_MC_tW2R(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
-#define G_BCM1480_MC_tW2R(x)               _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
-#define K_BCM1480_MC_tW2R_DEFAULT          1
-#define V_BCM1480_MC_tW2R_DEFAULT          V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
-
-#define S_BCM1480_MC_tR2W                  60
-#define M_BCM1480_MC_tR2W                  _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
-#define V_BCM1480_MC_tR2W(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
-#define G_BCM1480_MC_tR2W(x)               _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
-#define K_BCM1480_MC_tR2W_DEFAULT          0
-#define V_BCM1480_MC_tR2W_DEFAULT          V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
+#define S_BCM1480_MC_tRFC                  48
+#define M_BCM1480_MC_tRFC                  _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
+#define V_BCM1480_MC_tRFC(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
+#define G_BCM1480_MC_tRFC(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
+#define K_BCM1480_MC_tRFC_DEFAULT          12
+#define V_BCM1480_MC_tRFC_DEFAULT          V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
+
+#define S_BCM1480_MC_tFIFO                 56
+#define M_BCM1480_MC_tFIFO                 _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
+#define V_BCM1480_MC_tFIFO(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
+#define G_BCM1480_MC_tFIFO(x)              _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
+#define K_BCM1480_MC_tFIFO_DEFAULT         0
+#define V_BCM1480_MC_tFIFO_DEFAULT         V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
+
+#define S_BCM1480_MC_tW2R                 58
+#define M_BCM1480_MC_tW2R                 _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
+#define V_BCM1480_MC_tW2R(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
+#define G_BCM1480_MC_tW2R(x)              _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
+#define K_BCM1480_MC_tW2R_DEFAULT         1
+#define V_BCM1480_MC_tW2R_DEFAULT         V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
+
+#define S_BCM1480_MC_tR2W                 60
+#define M_BCM1480_MC_tR2W                 _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
+#define V_BCM1480_MC_tR2W(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
+#define G_BCM1480_MC_tR2W(x)              _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
+#define K_BCM1480_MC_tR2W_DEFAULT         0
+#define V_BCM1480_MC_tR2W_DEFAULT         V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
 
 #define M_BCM1480_MC_tR2R                  _SB_MAKEMASK1(62)
 
-#define V_BCM1480_MC_TIMING_DEFAULT         (M_BCM1480_MC_tR2R | \
-                                     V_BCM1480_MC_tFIFO_DEFAULT | \
-                                     V_BCM1480_MC_tR2W_DEFAULT | \
-                                     V_BCM1480_MC_tW2R_DEFAULT | \
-                                     V_BCM1480_MC_tRFC_DEFAULT | \
-                                     V_BCM1480_MC_tRCr_DEFAULT | \
-                                     V_BCM1480_MC_tRCw_DEFAULT | \
-                                     V_BCM1480_MC_tRRD_DEFAULT | \
-                                     V_BCM1480_MC_tRP_DEFAULT | \
-                                     V_BCM1480_MC_tCwD_DEFAULT | \
-                                     V_BCM1480_MC_tWR_DEFAULT | \
-                                     M_BCM1480_MC_tCrDh | \
-                                     V_BCM1480_MC_tCL_DEFAULT | \
-                                     V_BCM1480_MC_tRCD_DEFAULT)
+#define V_BCM1480_MC_TIMING_DEFAULT        (M_BCM1480_MC_tR2R | \
+                                    V_BCM1480_MC_tFIFO_DEFAULT | \
+                                    V_BCM1480_MC_tR2W_DEFAULT | \
+                                    V_BCM1480_MC_tW2R_DEFAULT | \
+                                    V_BCM1480_MC_tRFC_DEFAULT | \
+                                    V_BCM1480_MC_tRCr_DEFAULT | \
+                                    V_BCM1480_MC_tRCw_DEFAULT | \
+                                    V_BCM1480_MC_tRRD_DEFAULT | \
+                                    V_BCM1480_MC_tRP_DEFAULT | \
+                                    V_BCM1480_MC_tCwD_DEFAULT | \
+                                    V_BCM1480_MC_tWR_DEFAULT | \
+                                    M_BCM1480_MC_tCrDh | \
+                                    V_BCM1480_MC_tCL_DEFAULT | \
+                                    V_BCM1480_MC_tRCD_DEFAULT)
 
 /*
  * SDRAM Timing Register 2
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 
-#define S_BCM1480_MC_tAL                   0
-#define M_BCM1480_MC_tAL                   _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
-#define V_BCM1480_MC_tAL(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
-#define G_BCM1480_MC_tAL(x)                _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
-#define K_BCM1480_MC_tAL_DEFAULT           0
-#define V_BCM1480_MC_tAL_DEFAULT           V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
-
-#define S_BCM1480_MC_tRTP                   4
-#define M_BCM1480_MC_tRTP                   _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
-#define V_BCM1480_MC_tRTP(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
-#define G_BCM1480_MC_tRTP(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
-#define K_BCM1480_MC_tRTP_DEFAULT           2
-#define V_BCM1480_MC_tRTP_DEFAULT           V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
-
-#define S_BCM1480_MC_tW2W                   8
-#define M_BCM1480_MC_tW2W                   _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
-#define V_BCM1480_MC_tW2W(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
-#define G_BCM1480_MC_tW2W(x)                _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
-#define K_BCM1480_MC_tW2W_DEFAULT           0
-#define V_BCM1480_MC_tW2W_DEFAULT           V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
-
-#define S_BCM1480_MC_tRAP                   12
-#define M_BCM1480_MC_tRAP                  _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
-#define V_BCM1480_MC_tRAP(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
-#define G_BCM1480_MC_tRAP(x)                _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
-#define K_BCM1480_MC_tRAP_DEFAULT           0
-#define V_BCM1480_MC_tRAP_DEFAULT           V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
+#define S_BCM1480_MC_tAL                  0
+#define M_BCM1480_MC_tAL                  _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
+#define V_BCM1480_MC_tAL(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
+#define G_BCM1480_MC_tAL(x)               _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
+#define K_BCM1480_MC_tAL_DEFAULT          0
+#define V_BCM1480_MC_tAL_DEFAULT          V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
+
+#define S_BCM1480_MC_tRTP                  4
+#define M_BCM1480_MC_tRTP                  _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
+#define V_BCM1480_MC_tRTP(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
+#define G_BCM1480_MC_tRTP(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
+#define K_BCM1480_MC_tRTP_DEFAULT          2
+#define V_BCM1480_MC_tRTP_DEFAULT          V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
+
+#define S_BCM1480_MC_tW2W                  8
+#define M_BCM1480_MC_tW2W                  _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
+#define V_BCM1480_MC_tW2W(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
+#define G_BCM1480_MC_tW2W(x)               _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
+#define K_BCM1480_MC_tW2W_DEFAULT          0
+#define V_BCM1480_MC_tW2W_DEFAULT          V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
+
+#define S_BCM1480_MC_tRAP                  12
+#define M_BCM1480_MC_tRAP                 _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
+#define V_BCM1480_MC_tRAP(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
+#define G_BCM1480_MC_tRAP(x)               _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
+#define K_BCM1480_MC_tRAP_DEFAULT          0
+#define V_BCM1480_MC_tRAP_DEFAULT          V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
 
 #endif
 
  * Global Configuration Register (Table 99)
  */
 
-#define S_BCM1480_MC_BLK_SET_MARK           8
-#define M_BCM1480_MC_BLK_SET_MARK           _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
-#define V_BCM1480_MC_BLK_SET_MARK(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
-#define G_BCM1480_MC_BLK_SET_MARK(x)        _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
+#define S_BCM1480_MC_BLK_SET_MARK          8
+#define M_BCM1480_MC_BLK_SET_MARK          _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
+#define V_BCM1480_MC_BLK_SET_MARK(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
+#define G_BCM1480_MC_BLK_SET_MARK(x)       _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
 
-#define S_BCM1480_MC_BLK_CLR_MARK           12
-#define M_BCM1480_MC_BLK_CLR_MARK           _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
-#define V_BCM1480_MC_BLK_CLR_MARK(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
-#define G_BCM1480_MC_BLK_CLR_MARK(x)        _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
+#define S_BCM1480_MC_BLK_CLR_MARK          12
+#define M_BCM1480_MC_BLK_CLR_MARK          _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
+#define V_BCM1480_MC_BLK_CLR_MARK(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
+#define G_BCM1480_MC_BLK_CLR_MARK(x)       _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
 
-#define M_BCM1480_MC_PKT_PRIORITY           _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_PKT_PRIORITY          _SB_MAKEMASK1(16)
 
-#define S_BCM1480_MC_MAX_AGE                20
-#define M_BCM1480_MC_MAX_AGE                _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
-#define V_BCM1480_MC_MAX_AGE(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
-#define G_BCM1480_MC_MAX_AGE(x)             _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
+#define S_BCM1480_MC_MAX_AGE               20
+#define M_BCM1480_MC_MAX_AGE               _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
+#define V_BCM1480_MC_MAX_AGE(x)                    _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
+#define G_BCM1480_MC_MAX_AGE(x)                    _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
 
-#define M_BCM1480_MC_BERR_DISABLE           _SB_MAKEMASK1(29)
-#define M_BCM1480_MC_FORCE_SEQ              _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_VGEN                   _SB_MAKEMASK1(32)
+#define M_BCM1480_MC_BERR_DISABLE          _SB_MAKEMASK1(29)
+#define M_BCM1480_MC_FORCE_SEQ             _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_VGEN                  _SB_MAKEMASK1(32)
 
-#define S_BCM1480_MC_SLEW                   33
-#define M_BCM1480_MC_SLEW                   _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
-#define V_BCM1480_MC_SLEW(x)                _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
-#define G_BCM1480_MC_SLEW(x)                _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
+#define S_BCM1480_MC_SLEW                  33
+#define M_BCM1480_MC_SLEW                  _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
+#define V_BCM1480_MC_SLEW(x)               _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
+#define G_BCM1480_MC_SLEW(x)               _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
 
-#define M_BCM1480_MC_SSTL_VOLTAGE           _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_SSTL_VOLTAGE          _SB_MAKEMASK1(35)
 
 /*
  * Global Channel Interleave Register (Table 100)
  */
 
-#define S_BCM1480_MC_INTLV0                 0
-#define M_BCM1480_MC_INTLV0                 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x)              _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-
-#define S_BCM1480_MC_INTLV1                 8
-#define M_BCM1480_MC_INTLV1                 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x)              _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x)              _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-
-#define S_BCM1480_MC_INTLV_MODE             16
-#define M_BCM1480_MC_INTLV_MODE             _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
-#define V_BCM1480_MC_INTLV_MODE(x)          _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
-#define G_BCM1480_MC_INTLV_MODE(x)          _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
-
-#define K_BCM1480_MC_INTLV_MODE_NONE        0x0
-#define K_BCM1480_MC_INTLV_MODE_01          0x1
-#define K_BCM1480_MC_INTLV_MODE_23          0x2
-#define K_BCM1480_MC_INTLV_MODE_01_23       0x3
-#define K_BCM1480_MC_INTLV_MODE_0123        0x4
-
-#define V_BCM1480_MC_INTLV_MODE_NONE        V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
-#define V_BCM1480_MC_INTLV_MODE_01          V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
-#define V_BCM1480_MC_INTLV_MODE_23          V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
-#define V_BCM1480_MC_INTLV_MODE_01_23       V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
-#define V_BCM1480_MC_INTLV_MODE_0123        V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
+#define S_BCM1480_MC_INTLV0                0
+#define M_BCM1480_MC_INTLV0                _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x)             _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+
+#define S_BCM1480_MC_INTLV1                8
+#define M_BCM1480_MC_INTLV1                _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x)             _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x)             _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+
+#define S_BCM1480_MC_INTLV_MODE                    16
+#define M_BCM1480_MC_INTLV_MODE                    _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
+#define V_BCM1480_MC_INTLV_MODE(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
+#define G_BCM1480_MC_INTLV_MODE(x)         _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
+
+#define K_BCM1480_MC_INTLV_MODE_NONE       0x0
+#define K_BCM1480_MC_INTLV_MODE_01         0x1
+#define K_BCM1480_MC_INTLV_MODE_23         0x2
+#define K_BCM1480_MC_INTLV_MODE_01_23      0x3
+#define K_BCM1480_MC_INTLV_MODE_0123       0x4
+
+#define V_BCM1480_MC_INTLV_MODE_NONE       V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
+#define V_BCM1480_MC_INTLV_MODE_01         V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
+#define V_BCM1480_MC_INTLV_MODE_23         V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
+#define V_BCM1480_MC_INTLV_MODE_01_23      V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
+#define V_BCM1480_MC_INTLV_MODE_0123       V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
 
 /*
  * ECC Status Register
  */
 
-#define S_BCM1480_MC_ECC_ERR_ADDR           0
-#define M_BCM1480_MC_ECC_ERR_ADDR           _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
-#define V_BCM1480_MC_ECC_ERR_ADDR(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
-#define G_BCM1480_MC_ECC_ERR_ADDR(x)        _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
+#define S_BCM1480_MC_ECC_ERR_ADDR          0
+#define M_BCM1480_MC_ECC_ERR_ADDR          _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
+#define V_BCM1480_MC_ECC_ERR_ADDR(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
+#define G_BCM1480_MC_ECC_ERR_ADDR(x)       _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define M_BCM1480_MC_ECC_ERR_RMW            _SB_MAKEMASK1(60)
+#define M_BCM1480_MC_ECC_ERR_RMW           _SB_MAKEMASK1(60)
 #endif
 
-#define M_BCM1480_MC_ECC_MULT_ERR_DET       _SB_MAKEMASK1(61)
-#define M_BCM1480_MC_ECC_UERR_DET           _SB_MAKEMASK1(62)
-#define M_BCM1480_MC_ECC_CERR_DET           _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_ECC_MULT_ERR_DET      _SB_MAKEMASK1(61)
+#define M_BCM1480_MC_ECC_UERR_DET          _SB_MAKEMASK1(62)
+#define M_BCM1480_MC_ECC_CERR_DET          _SB_MAKEMASK1(63)
 
 /*
  * Global ECC Address Register (Table 102)
  */
 
-#define S_BCM1480_MC_ECC_CORR_ADDR          0
-#define M_BCM1480_MC_ECC_CORR_ADDR          _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
-#define V_BCM1480_MC_ECC_CORR_ADDR(x)       _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
-#define G_BCM1480_MC_ECC_CORR_ADDR(x)       _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
+#define S_BCM1480_MC_ECC_CORR_ADDR         0
+#define M_BCM1480_MC_ECC_CORR_ADDR         _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
+#define V_BCM1480_MC_ECC_CORR_ADDR(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
+#define G_BCM1480_MC_ECC_CORR_ADDR(x)      _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
 
 /*
  * Global ECC Correction Register (Table 103)
  */
 
-#define S_BCM1480_MC_ECC_CORRECT            0
-#define M_BCM1480_MC_ECC_CORRECT            _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
-#define V_BCM1480_MC_ECC_CORRECT(x)         _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
-#define G_BCM1480_MC_ECC_CORRECT(x)         _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
+#define S_BCM1480_MC_ECC_CORRECT           0
+#define M_BCM1480_MC_ECC_CORRECT           _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
+#define V_BCM1480_MC_ECC_CORRECT(x)        _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
+#define G_BCM1480_MC_ECC_CORRECT(x)        _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
 
 /*
  * Global ECC Performance Counters Control Register (Table 104)
  */
 
-#define S_BCM1480_MC_CHANNEL_SELECT         0
-#define M_BCM1480_MC_CHANNEL_SELECT         _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
-#define V_BCM1480_MC_CHANNEL_SELECT(x)      _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
-#define G_BCM1480_MC_CHANNEL_SELECT(x)      _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
-#define K_BCM1480_MC_CHANNEL_SELECT_0       0x1
-#define K_BCM1480_MC_CHANNEL_SELECT_1       0x2
-#define K_BCM1480_MC_CHANNEL_SELECT_2       0x4
-#define K_BCM1480_MC_CHANNEL_SELECT_3       0x8
+#define S_BCM1480_MC_CHANNEL_SELECT        0
+#define M_BCM1480_MC_CHANNEL_SELECT        _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
+#define V_BCM1480_MC_CHANNEL_SELECT(x)     _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
+#define G_BCM1480_MC_CHANNEL_SELECT(x)     _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
+#define K_BCM1480_MC_CHANNEL_SELECT_0      0x1
+#define K_BCM1480_MC_CHANNEL_SELECT_1      0x2
+#define K_BCM1480_MC_CHANNEL_SELECT_2      0x4
+#define K_BCM1480_MC_CHANNEL_SELECT_3      0x8
 
 #endif /* _BCM1480_MC_H */
index 84d168d..ec0dacf 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  BCM1255/BCM1280/BCM1455/BCM1480 Board Support Package
     *
-    *  Register Definitions                     File: bcm1480_regs.h
+    *  Register Definitions                    File: bcm1480_regs.h
     *
     *  This module contains the addresses of the on-chip peripherals
     *  on the BCM1280 and BCM1480.
     * Memory Controller Registers (Section 6)
     ********************************************************************* */
 
-#define A_BCM1480_MC_BASE_0                 0x0010050000
-#define A_BCM1480_MC_BASE_1                 0x0010051000
-#define A_BCM1480_MC_BASE_2                 0x0010052000
-#define A_BCM1480_MC_BASE_3                 0x0010053000
-#define BCM1480_MC_REGISTER_SPACING         0x1000
+#define A_BCM1480_MC_BASE_0                0x0010050000
+#define A_BCM1480_MC_BASE_1                0x0010051000
+#define A_BCM1480_MC_BASE_2                0x0010052000
+#define A_BCM1480_MC_BASE_3                0x0010053000
+#define BCM1480_MC_REGISTER_SPACING        0x1000
 
-#define A_BCM1480_MC_BASE(ctlid)            (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
+#define A_BCM1480_MC_BASE(ctlid)           (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
 #define A_BCM1480_MC_REGISTER(ctlid, reg)    (A_BCM1480_MC_BASE(ctlid)+(reg))
 
-#define R_BCM1480_MC_CONFIG                 0x0000000100
-#define R_BCM1480_MC_CS_START               0x0000000120
-#define R_BCM1480_MC_CS_END                 0x0000000140
-#define S_BCM1480_MC_CS_STARTEND            24
-
-#define R_BCM1480_MC_CS01_ROW0              0x0000000180
-#define R_BCM1480_MC_CS01_ROW1              0x00000001A0
-#define R_BCM1480_MC_CS23_ROW0              0x0000000200
-#define R_BCM1480_MC_CS23_ROW1              0x0000000220
-#define R_BCM1480_MC_CS01_COL0              0x0000000280
-#define R_BCM1480_MC_CS01_COL1              0x00000002A0
-#define R_BCM1480_MC_CS23_COL0              0x0000000300
-#define R_BCM1480_MC_CS23_COL1              0x0000000320
-
-#define R_BCM1480_MC_CSX_BASE               0x0000000180
-#define R_BCM1480_MC_CSX_ROW0               0x0000000000   /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_ROW1               0x0000000020   /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL0               0x0000000100   /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL1               0x0000000120   /* relative to CSX_BASE */
-#define BCM1480_MC_CSX_SPACING              0x0000000080   /* CS23 relative to CS01 */
-
-#define R_BCM1480_MC_CS01_BA                0x0000000380
-#define R_BCM1480_MC_CS23_BA                0x00000003A0
-#define R_BCM1480_MC_DRAMCMD                0x0000000400
-#define R_BCM1480_MC_DRAMMODE               0x0000000420
-#define R_BCM1480_MC_CLOCK_CFG              0x0000000440
-#define R_BCM1480_MC_MCLK_CFG               R_BCM1480_MC_CLOCK_CFG
-#define R_BCM1480_MC_TEST_DATA              0x0000000480
-#define R_BCM1480_MC_TEST_ECC               0x00000004A0
-#define R_BCM1480_MC_TIMING1                0x00000004C0
-#define R_BCM1480_MC_TIMING2                0x00000004E0
-#define R_BCM1480_MC_DLL_CFG                0x0000000500
-#define R_BCM1480_MC_DRIVE_CFG              0x0000000520
+#define R_BCM1480_MC_CONFIG                0x0000000100
+#define R_BCM1480_MC_CS_START              0x0000000120
+#define R_BCM1480_MC_CS_END                0x0000000140
+#define S_BCM1480_MC_CS_STARTEND           24
+
+#define R_BCM1480_MC_CS01_ROW0             0x0000000180
+#define R_BCM1480_MC_CS01_ROW1             0x00000001A0
+#define R_BCM1480_MC_CS23_ROW0             0x0000000200
+#define R_BCM1480_MC_CS23_ROW1             0x0000000220
+#define R_BCM1480_MC_CS01_COL0             0x0000000280
+#define R_BCM1480_MC_CS01_COL1             0x00000002A0
+#define R_BCM1480_MC_CS23_COL0             0x0000000300
+#define R_BCM1480_MC_CS23_COL1             0x0000000320
+
+#define R_BCM1480_MC_CSX_BASE              0x0000000180
+#define R_BCM1480_MC_CSX_ROW0              0x0000000000   /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_ROW1              0x0000000020   /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL0              0x0000000100   /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL1              0x0000000120   /* relative to CSX_BASE */
+#define BCM1480_MC_CSX_SPACING             0x0000000080   /* CS23 relative to CS01 */
+
+#define R_BCM1480_MC_CS01_BA               0x0000000380
+#define R_BCM1480_MC_CS23_BA               0x00000003A0
+#define R_BCM1480_MC_DRAMCMD               0x0000000400
+#define R_BCM1480_MC_DRAMMODE              0x0000000420
+#define R_BCM1480_MC_CLOCK_CFG             0x0000000440
+#define R_BCM1480_MC_MCLK_CFG              R_BCM1480_MC_CLOCK_CFG
+#define R_BCM1480_MC_TEST_DATA             0x0000000480
+#define R_BCM1480_MC_TEST_ECC              0x00000004A0
+#define R_BCM1480_MC_TIMING1               0x00000004C0
+#define R_BCM1480_MC_TIMING2               0x00000004E0
+#define R_BCM1480_MC_DLL_CFG               0x0000000500
+#define R_BCM1480_MC_DRIVE_CFG             0x0000000520
 
 #if SIBYTE_HDR_FEATURE(1480, PASS2)
 #define R_BCM1480_MC_ODT                   0x0000000460
 #endif
 
 /* Global registers (single instance) */
-#define A_BCM1480_MC_GLB_CONFIG             0x0010054100
-#define A_BCM1480_MC_GLB_INTLV              0x0010054120
-#define A_BCM1480_MC_GLB_ECC_STATUS         0x0010054140
-#define A_BCM1480_MC_GLB_ECC_ADDR           0x0010054160
-#define A_BCM1480_MC_GLB_ECC_CORRECT        0x0010054180
+#define A_BCM1480_MC_GLB_CONFIG                    0x0010054100
+#define A_BCM1480_MC_GLB_INTLV             0x0010054120
+#define A_BCM1480_MC_GLB_ECC_STATUS        0x0010054140
+#define A_BCM1480_MC_GLB_ECC_ADDR          0x0010054160
+#define A_BCM1480_MC_GLB_ECC_CORRECT       0x0010054180
 #define A_BCM1480_MC_GLB_PERF_CNT_CONTROL   0x00100541A0
 
 /*  *********************************************************************
     * L2 Cache Control Registers (Section 5)
     ********************************************************************* */
 
-#define A_BCM1480_L2_BASE                   0x0010040000
+#define A_BCM1480_L2_BASE                  0x0010040000
 
-#define A_BCM1480_L2_READ_TAG               0x0010040018
-#define A_BCM1480_L2_ECC_TAG                0x0010040038
-#define A_BCM1480_L2_MISC0_VALUE            0x0010040058
-#define A_BCM1480_L2_MISC1_VALUE            0x0010040078
-#define A_BCM1480_L2_MISC2_VALUE            0x0010040098
-#define A_BCM1480_L2_MISC_CONFIG            0x0010040040       /* x040 */
-#define A_BCM1480_L2_CACHE_DISABLE          0x0010040060       /* x060 */
+#define A_BCM1480_L2_READ_TAG              0x0010040018
+#define A_BCM1480_L2_ECC_TAG               0x0010040038
+#define A_BCM1480_L2_MISC0_VALUE           0x0010040058
+#define A_BCM1480_L2_MISC1_VALUE           0x0010040078
+#define A_BCM1480_L2_MISC2_VALUE           0x0010040098
+#define A_BCM1480_L2_MISC_CONFIG           0x0010040040        /* x040 */
+#define A_BCM1480_L2_CACHE_DISABLE         0x0010040060        /* x060 */
 #define A_BCM1480_L2_MAKECACHEDISABLE(x)    (A_BCM1480_L2_CACHE_DISABLE | (((x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_ENABLE_3_0         0x0010040080       /* x080 */
-#define A_BCM1480_L2_WAY_ENABLE_7_4         0x00100400A0       /* x0A0 */
+#define A_BCM1480_L2_WAY_ENABLE_3_0        0x0010040080        /* x080 */
+#define A_BCM1480_L2_WAY_ENABLE_7_4        0x00100400A0        /* x0A0 */
 #define A_BCM1480_L2_MAKE_WAY_ENABLE_LO(x)  (A_BCM1480_L2_WAY_ENABLE_3_0 | (((x)&0xF) << 12))
 #define A_BCM1480_L2_MAKE_WAY_ENABLE_HI(x)  (A_BCM1480_L2_WAY_ENABLE_7_4 | (((x)&0xF) << 12))
 #define A_BCM1480_L2_MAKE_WAY_DISABLE_LO(x)  (A_BCM1480_L2_WAY_ENABLE_3_0 | (((~x)&0xF) << 12))
 #define A_BCM1480_L2_MAKE_WAY_DISABLE_HI(x)  (A_BCM1480_L2_WAY_ENABLE_7_4 | (((~x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_LOCAL_3_0          0x0010040100       /* x100 */
-#define A_BCM1480_L2_WAY_LOCAL_7_4          0x0010040120       /* x120 */
-#define A_BCM1480_L2_WAY_REMOTE_3_0         0x0010040140       /* x140 */
-#define A_BCM1480_L2_WAY_REMOTE_7_4         0x0010040160       /* x160 */
-#define A_BCM1480_L2_WAY_AGENT_3_0          0x00100400C0       /* xxC0 */
-#define A_BCM1480_L2_WAY_AGENT_7_4          0x00100400E0       /* xxE0 */
+#define A_BCM1480_L2_WAY_LOCAL_3_0         0x0010040100        /* x100 */
+#define A_BCM1480_L2_WAY_LOCAL_7_4         0x0010040120        /* x120 */
+#define A_BCM1480_L2_WAY_REMOTE_3_0        0x0010040140        /* x140 */
+#define A_BCM1480_L2_WAY_REMOTE_7_4        0x0010040160        /* x160 */
+#define A_BCM1480_L2_WAY_AGENT_3_0         0x00100400C0        /* xxC0 */
+#define A_BCM1480_L2_WAY_AGENT_7_4         0x00100400E0        /* xxE0 */
 #define A_BCM1480_L2_WAY_ENABLE(A, banks)   (A | (((~(banks))&0x0F) << 8))
-#define A_BCM1480_L2_BANK_BASE              0x00D0300000
-#define A_BCM1480_L2_BANK_ADDRESS(b)        (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
-#define A_BCM1480_L2_MGMT_TAG_BASE          0x00D0000000
+#define A_BCM1480_L2_BANK_BASE             0x00D0300000
+#define A_BCM1480_L2_BANK_ADDRESS(b)       (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
+#define A_BCM1480_L2_MGMT_TAG_BASE         0x00D0000000
 
 
 /*  *********************************************************************
     * PCI-X Interface Registers (Section 7)
     ********************************************************************* */
 
-#define A_BCM1480_PCI_BASE                  0x0010061400
+#define A_BCM1480_PCI_BASE                 0x0010061400
 
-#define A_BCM1480_PCI_RESET                 0x0010061400
-#define A_BCM1480_PCI_DLL                   0x0010061500
+#define A_BCM1480_PCI_RESET                0x0010061400
+#define A_BCM1480_PCI_DLL                  0x0010061500
 
-#define A_BCM1480_PCI_TYPE00_HEADER         0x002E000000
+#define A_BCM1480_PCI_TYPE00_HEADER        0x002E000000
 
 /*  *********************************************************************
     * Ethernet MAC Registers (Section 11) and DMA Registers (Section 10.6)
 
 /* No register changes with Rev.C BCM1250, but one additional MAC */
 
-#define A_BCM1480_MAC_BASE_2        0x0010066000
+#define A_BCM1480_MAC_BASE_2       0x0010066000
 
 #ifndef A_MAC_BASE_2
-#define A_MAC_BASE_2                A_BCM1480_MAC_BASE_2
+#define A_MAC_BASE_2               A_BCM1480_MAC_BASE_2
 #endif
 
-#define A_BCM1480_MAC_BASE_3        0x0010067000
-#define A_MAC_BASE_3                A_BCM1480_MAC_BASE_3
+#define A_BCM1480_MAC_BASE_3       0x0010067000
+#define A_MAC_BASE_3               A_BCM1480_MAC_BASE_3
 
-#define R_BCM1480_MAC_DMA_OODPKTLOST        0x00000038
+#define R_BCM1480_MAC_DMA_OODPKTLOST       0x00000038
 
 #ifndef R_MAC_DMA_OODPKTLOST
-#define R_MAC_DMA_OODPKTLOST        R_BCM1480_MAC_DMA_OODPKTLOST
+#define R_MAC_DMA_OODPKTLOST       R_BCM1480_MAC_DMA_OODPKTLOST
 #endif
 
 
 /* No significant differences from BCM1250, two DUARTs */
 
 /*  Conventions, per user manual:
- *     DUART    generic, channels A,B,C,D
- *     DUART0   implementing channels A,B
- *     DUART1   inplementing channels C,D
+ *     DUART   generic, channels A,B,C,D
+ *     DUART0  implementing channels A,B
+ *     DUART1  inplementing channels C,D
  */
 
-#define BCM1480_DUART_NUM_PORTS           4
+#define BCM1480_DUART_NUM_PORTS                  4
 
-#define A_BCM1480_DUART0                    0x0010060000
-#define A_BCM1480_DUART1                    0x0010060400
-#define A_BCM1480_DUART(chan)               ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
+#define A_BCM1480_DUART0                   0x0010060000
+#define A_BCM1480_DUART1                   0x0010060400
+#define A_BCM1480_DUART(chan)              ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
 
-#define BCM1480_DUART_CHANREG_SPACING       0x100
+#define BCM1480_DUART_CHANREG_SPACING      0x100
 #define A_BCM1480_DUART_CHANREG(chan, reg)                             \
        (A_BCM1480_DUART(chan) +                                        \
         BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
  * These constants are the absolute addresses.
  */
 
-#define A_BCM1480_DUART_MODE_REG_1_C        0x0010060400
-#define A_BCM1480_DUART_MODE_REG_2_C        0x0010060410
-#define A_BCM1480_DUART_STATUS_C            0x0010060420
-#define A_BCM1480_DUART_CLK_SEL_C           0x0010060430
-#define A_BCM1480_DUART_FULL_CTL_C          0x0010060440
-#define A_BCM1480_DUART_CMD_C               0x0010060450
-#define A_BCM1480_DUART_RX_HOLD_C           0x0010060460
-#define A_BCM1480_DUART_TX_HOLD_C           0x0010060470
-#define A_BCM1480_DUART_OPCR_C              0x0010060480
-#define A_BCM1480_DUART_AUX_CTRL_C          0x0010060490
-
-#define A_BCM1480_DUART_MODE_REG_1_D        0x0010060500
-#define A_BCM1480_DUART_MODE_REG_2_D        0x0010060510
-#define A_BCM1480_DUART_STATUS_D            0x0010060520
-#define A_BCM1480_DUART_CLK_SEL_D           0x0010060530
-#define A_BCM1480_DUART_FULL_CTL_D          0x0010060540
-#define A_BCM1480_DUART_CMD_D               0x0010060550
-#define A_BCM1480_DUART_RX_HOLD_D           0x0010060560
-#define A_BCM1480_DUART_TX_HOLD_D           0x0010060570
-#define A_BCM1480_DUART_OPCR_D              0x0010060580
-#define A_BCM1480_DUART_AUX_CTRL_D          0x0010060590
-
-#define A_BCM1480_DUART_INPORT_CHNG_CD      0x0010060600
-#define A_BCM1480_DUART_AUX_CTRL_CD         0x0010060610
-#define A_BCM1480_DUART_ISR_C               0x0010060620
-#define A_BCM1480_DUART_IMR_C               0x0010060630
-#define A_BCM1480_DUART_ISR_D               0x0010060640
-#define A_BCM1480_DUART_IMR_D               0x0010060650
-#define A_BCM1480_DUART_OUT_PORT_CD         0x0010060660
-#define A_BCM1480_DUART_OPCR_CD             0x0010060670
-#define A_BCM1480_DUART_IN_PORT_CD          0x0010060680
-#define A_BCM1480_DUART_ISR_CD              0x0010060690
-#define A_BCM1480_DUART_IMR_CD              0x00100606A0
-#define A_BCM1480_DUART_SET_OPR_CD          0x00100606B0
-#define A_BCM1480_DUART_CLEAR_OPR_CD        0x00100606C0
-#define A_BCM1480_DUART_INPORT_CHNG_C       0x00100606D0
-#define A_BCM1480_DUART_INPORT_CHNG_D       0x00100606E0
+#define A_BCM1480_DUART_MODE_REG_1_C       0x0010060400
+#define A_BCM1480_DUART_MODE_REG_2_C       0x0010060410
+#define A_BCM1480_DUART_STATUS_C           0x0010060420
+#define A_BCM1480_DUART_CLK_SEL_C          0x0010060430
+#define A_BCM1480_DUART_FULL_CTL_C         0x0010060440
+#define A_BCM1480_DUART_CMD_C              0x0010060450
+#define A_BCM1480_DUART_RX_HOLD_C          0x0010060460
+#define A_BCM1480_DUART_TX_HOLD_C          0x0010060470
+#define A_BCM1480_DUART_OPCR_C             0x0010060480
+#define A_BCM1480_DUART_AUX_CTRL_C         0x0010060490
+
+#define A_BCM1480_DUART_MODE_REG_1_D       0x0010060500
+#define A_BCM1480_DUART_MODE_REG_2_D       0x0010060510
+#define A_BCM1480_DUART_STATUS_D           0x0010060520
+#define A_BCM1480_DUART_CLK_SEL_D          0x0010060530
+#define A_BCM1480_DUART_FULL_CTL_D         0x0010060540
+#define A_BCM1480_DUART_CMD_D              0x0010060550
+#define A_BCM1480_DUART_RX_HOLD_D          0x0010060560
+#define A_BCM1480_DUART_TX_HOLD_D          0x0010060570
+#define A_BCM1480_DUART_OPCR_D             0x0010060580
+#define A_BCM1480_DUART_AUX_CTRL_D         0x0010060590
+
+#define A_BCM1480_DUART_INPORT_CHNG_CD     0x0010060600
+#define A_BCM1480_DUART_AUX_CTRL_CD        0x0010060610
+#define A_BCM1480_DUART_ISR_C              0x0010060620
+#define A_BCM1480_DUART_IMR_C              0x0010060630
+#define A_BCM1480_DUART_ISR_D              0x0010060640
+#define A_BCM1480_DUART_IMR_D              0x0010060650
+#define A_BCM1480_DUART_OUT_PORT_CD        0x0010060660
+#define A_BCM1480_DUART_OPCR_CD                    0x0010060670
+#define A_BCM1480_DUART_IN_PORT_CD         0x0010060680
+#define A_BCM1480_DUART_ISR_CD             0x0010060690
+#define A_BCM1480_DUART_IMR_CD             0x00100606A0
+#define A_BCM1480_DUART_SET_OPR_CD         0x00100606B0
+#define A_BCM1480_DUART_CLEAR_OPR_CD       0x00100606C0
+#define A_BCM1480_DUART_INPORT_CHNG_C      0x00100606D0
+#define A_BCM1480_DUART_INPORT_CHNG_D      0x00100606E0
 
 
 /*  *********************************************************************
 
 /* One additional GPIO register, placed _before_ the BCM1250's GPIO block base */
 
-#define A_BCM1480_GPIO_INT_ADD_TYPE         0x0010061A78
-#define R_BCM1480_GPIO_INT_ADD_TYPE         (-8)
+#define A_BCM1480_GPIO_INT_ADD_TYPE        0x0010061A78
+#define R_BCM1480_GPIO_INT_ADD_TYPE        (-8)
 
 #define A_GPIO_INT_ADD_TYPE    A_BCM1480_GPIO_INT_ADD_TYPE
 #define R_GPIO_INT_ADD_TYPE    R_BCM1480_GPIO_INT_ADD_TYPE
 
 /* Watchdog timers */
 
-#define A_BCM1480_SCD_WDOG_2                0x0010022050
-#define A_BCM1480_SCD_WDOG_3                0x0010022150
+#define A_BCM1480_SCD_WDOG_2               0x0010022050
+#define A_BCM1480_SCD_WDOG_3               0x0010022150
 
-#define BCM1480_SCD_NUM_WDOGS               4
+#define BCM1480_SCD_NUM_WDOGS              4
 
-#define A_BCM1480_SCD_WDOG_BASE(w)       (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
+#define A_BCM1480_SCD_WDOG_BASE(w)      (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
 #define A_BCM1480_SCD_WDOG_REGISTER(w, r) (A_BCM1480_SCD_WDOG_BASE(w) + (r))
 
-#define A_BCM1480_SCD_WDOG_INIT_2       0x0010022050
-#define A_BCM1480_SCD_WDOG_CNT_2        0x0010022058
-#define A_BCM1480_SCD_WDOG_CFG_2        0x0010022060
+#define A_BCM1480_SCD_WDOG_INIT_2      0x0010022050
+#define A_BCM1480_SCD_WDOG_CNT_2       0x0010022058
+#define A_BCM1480_SCD_WDOG_CFG_2       0x0010022060
 
-#define A_BCM1480_SCD_WDOG_INIT_3       0x0010022150
-#define A_BCM1480_SCD_WDOG_CNT_3        0x0010022158
-#define A_BCM1480_SCD_WDOG_CFG_3        0x0010022160
+#define A_BCM1480_SCD_WDOG_INIT_3      0x0010022150
+#define A_BCM1480_SCD_WDOG_CNT_3       0x0010022158
+#define A_BCM1480_SCD_WDOG_CFG_3       0x0010022160
 
 /* BCM1480 has two additional compare registers */
 
 #define A_BCM1480_SCD_ZBBUS_CYCLE_COUNT                A_SCD_ZBBUS_CYCLE_COUNT
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE       0x0010020C00
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0           A_SCD_ZBBUS_CYCLE_CP0
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1           A_SCD_ZBBUS_CYCLE_CP1
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2           0x0010020C10
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3           0x0010020C18
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE      0x0010020C00
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0          A_SCD_ZBBUS_CYCLE_CP0
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1          A_SCD_ZBBUS_CYCLE_CP1
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2          0x0010020C10
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3          0x0010020C18
 
 /*  *********************************************************************
     * System Control Registers (Section 4.2)
 
 /* Scratch register in different place */
 
-#define A_BCM1480_SCD_SCRATCH          0x100200A0
+#define A_BCM1480_SCD_SCRATCH          0x100200A0
 
 /*  *********************************************************************
     * System Address Trap Registers (Section 4.9)
     * System Interrupt Mapper Registers (Sections 4.3-4.5)
     ********************************************************************* */
 
-#define A_BCM1480_IMR_CPU0_BASE             0x0010020000
-#define A_BCM1480_IMR_CPU1_BASE             0x0010022000
-#define A_BCM1480_IMR_CPU2_BASE             0x0010024000
-#define A_BCM1480_IMR_CPU3_BASE             0x0010026000
-#define BCM1480_IMR_REGISTER_SPACING        0x2000
+#define A_BCM1480_IMR_CPU0_BASE                    0x0010020000
+#define A_BCM1480_IMR_CPU1_BASE                    0x0010022000
+#define A_BCM1480_IMR_CPU2_BASE                    0x0010024000
+#define A_BCM1480_IMR_CPU3_BASE                    0x0010026000
+#define BCM1480_IMR_REGISTER_SPACING       0x2000
 #define BCM1480_IMR_REGISTER_SPACING_SHIFT  13
 
-#define A_BCM1480_IMR_MAPPER(cpu)       (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
+#define A_BCM1480_IMR_MAPPER(cpu)      (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
 #define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
 
 /* Most IMR registers are 128 bits, implemented as non-contiguous
    64-bit registers high (_H) and low (_L) */
-#define BCM1480_IMR_HL_SPACING                  0x1000
+#define BCM1480_IMR_HL_SPACING                 0x1000
 
-#define R_BCM1480_IMR_INTERRUPT_DIAG_H          0x0010
-#define R_BCM1480_IMR_LDT_INTERRUPT_H           0x0018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H       0x0020
-#define R_BCM1480_IMR_INTERRUPT_MASK_H          0x0028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_H         0x0038
+#define R_BCM1480_IMR_INTERRUPT_DIAG_H         0x0010
+#define R_BCM1480_IMR_LDT_INTERRUPT_H          0x0018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H      0x0020
+#define R_BCM1480_IMR_INTERRUPT_MASK_H         0x0028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_H                0x0038
 #define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_H 0x0040
-#define R_BCM1480_IMR_LDT_INTERRUPT_SET         0x0048
-#define R_BCM1480_IMR_MAILBOX_0_CPU             0x00C0
-#define R_BCM1480_IMR_MAILBOX_0_SET_CPU         0x00C8
-#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU         0x00D0
-#define R_BCM1480_IMR_MAILBOX_1_CPU             0x00E0
-#define R_BCM1480_IMR_MAILBOX_1_SET_CPU         0x00E8
-#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU         0x00F0
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H   0x0100
-#define BCM1480_IMR_INTERRUPT_STATUS_COUNT      8
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H      0x0200
-#define BCM1480_IMR_INTERRUPT_MAP_COUNT         64
-
-#define R_BCM1480_IMR_INTERRUPT_DIAG_L          0x1010
-#define R_BCM1480_IMR_LDT_INTERRUPT_L           0x1018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L       0x1020
-#define R_BCM1480_IMR_INTERRUPT_MASK_L          0x1028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_L         0x1038
+#define R_BCM1480_IMR_LDT_INTERRUPT_SET                0x0048
+#define R_BCM1480_IMR_MAILBOX_0_CPU            0x00C0
+#define R_BCM1480_IMR_MAILBOX_0_SET_CPU                0x00C8
+#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU                0x00D0
+#define R_BCM1480_IMR_MAILBOX_1_CPU            0x00E0
+#define R_BCM1480_IMR_MAILBOX_1_SET_CPU                0x00E8
+#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU                0x00F0
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H  0x0100
+#define BCM1480_IMR_INTERRUPT_STATUS_COUNT     8
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H     0x0200
+#define BCM1480_IMR_INTERRUPT_MAP_COUNT                64
+
+#define R_BCM1480_IMR_INTERRUPT_DIAG_L         0x1010
+#define R_BCM1480_IMR_LDT_INTERRUPT_L          0x1018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L      0x1020
+#define R_BCM1480_IMR_INTERRUPT_MASK_L         0x1028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_L                0x1038
 #define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_L 0x1040
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L   0x1100
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L      0x1200
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L  0x1100
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L     0x1200
 
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE   0x0010028000
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE   0x0010028100
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE   0x0010028200
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE   0x0010028300
-#define BCM1480_IMR_ALIAS_MAILBOX_SPACING       0100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE  0x0010028000
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE  0x0010028100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE  0x0010028200
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE  0x0010028300
+#define BCM1480_IMR_ALIAS_MAILBOX_SPACING      0100
 
 #define A_BCM1480_IMR_ALIAS_MAILBOX(cpu)     (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
-                                        (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
+                                       (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
 #define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
 
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0           0x0000         /* 0x0x0 */
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET       0x0008         /* 0x0x8 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0          0x0000          /* 0x0x0 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET      0x0008          /* 0x0x8 */
 
 /*
  * these macros work together to build the address of a mailbox
  * register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2)
  * for mbox_0_set_cpu2 returns 0x00100240C8
  */
-#define R_BCM1480_IMR_MAILBOX_CPU         0x00
-#define R_BCM1480_IMR_MAILBOX_SET         0x08
-#define R_BCM1480_IMR_MAILBOX_CLR         0x10
+#define R_BCM1480_IMR_MAILBOX_CPU        0x00
+#define R_BCM1480_IMR_MAILBOX_SET        0x08
+#define R_BCM1480_IMR_MAILBOX_CLR        0x10
 #define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20
 #define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
     (A_BCM1480_IMR_CPU0_BASE + \
 /* BCM1480 has four more performance counter registers, and two control
    registers. */
 
-#define A_BCM1480_SCD_PERF_CNT_BASE         0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_BASE        0x00100204C0
 
-#define A_BCM1480_SCD_PERF_CNT_CFG0         0x00100204C0
-#define A_BCM1480_SCD_PERF_CNT_CFG_0        A_BCM1480_SCD_PERF_CNT_CFG0
-#define A_BCM1480_SCD_PERF_CNT_CFG1         0x00100204C8
-#define A_BCM1480_SCD_PERF_CNT_CFG_1        A_BCM1480_SCD_PERF_CNT_CFG1
+#define A_BCM1480_SCD_PERF_CNT_CFG0        0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_CFG_0       A_BCM1480_SCD_PERF_CNT_CFG0
+#define A_BCM1480_SCD_PERF_CNT_CFG1        0x00100204C8
+#define A_BCM1480_SCD_PERF_CNT_CFG_1       A_BCM1480_SCD_PERF_CNT_CFG1
 
-#define A_BCM1480_SCD_PERF_CNT_0            A_SCD_PERF_CNT_0
-#define A_BCM1480_SCD_PERF_CNT_1            A_SCD_PERF_CNT_1
-#define A_BCM1480_SCD_PERF_CNT_2            A_SCD_PERF_CNT_2
-#define A_BCM1480_SCD_PERF_CNT_3            A_SCD_PERF_CNT_3
+#define A_BCM1480_SCD_PERF_CNT_0           A_SCD_PERF_CNT_0
+#define A_BCM1480_SCD_PERF_CNT_1           A_SCD_PERF_CNT_1
+#define A_BCM1480_SCD_PERF_CNT_2           A_SCD_PERF_CNT_2
+#define A_BCM1480_SCD_PERF_CNT_3           A_SCD_PERF_CNT_3
 
-#define A_BCM1480_SCD_PERF_CNT_4            0x00100204F0
-#define A_BCM1480_SCD_PERF_CNT_5            0x00100204F8
-#define A_BCM1480_SCD_PERF_CNT_6            0x0010020500
-#define A_BCM1480_SCD_PERF_CNT_7            0x0010020508
+#define A_BCM1480_SCD_PERF_CNT_4           0x00100204F0
+#define A_BCM1480_SCD_PERF_CNT_5           0x00100204F8
+#define A_BCM1480_SCD_PERF_CNT_6           0x0010020500
+#define A_BCM1480_SCD_PERF_CNT_7           0x0010020508
 
 #define BCM1480_SCD_NUM_PERF_CNT 8
 #define BCM1480_SCD_PERF_CNT_SPACING 8
 
 /* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
 
-#define A_BCM1480_BUS_ERR_STATUS_DEBUG      0x00100208D8
+#define A_BCM1480_BUS_ERR_STATUS_DEBUG     0x00100208D8
 
 /*  *********************************************************************
     * System Debug Controller Registers (Section 19)
 #define BCM1480_HT_PORT_SPACING                   0x800
 #define A_BCM1480_HT_PORT_HEADER(x)       (A_BCM1480_HT_PORT0_HEADER + ((x)*BCM1480_HT_PORT_SPACING))
 
-#define A_BCM1480_HT_PORT0_HEADER          0x00FE000000
-#define A_BCM1480_HT_PORT1_HEADER          0x00FE000800
-#define A_BCM1480_HT_PORT2_HEADER          0x00FE001000
-#define A_BCM1480_HT_TYPE00_HEADER         0x00FE002000
+#define A_BCM1480_HT_PORT0_HEADER         0x00FE000000
+#define A_BCM1480_HT_PORT1_HEADER         0x00FE000800
+#define A_BCM1480_HT_PORT2_HEADER         0x00FE001000
+#define A_BCM1480_HT_TYPE00_HEADER        0x00FE002000
 
 
 /*  *********************************************************************
     * Node Controller Registers (Section 9)
     ********************************************************************* */
 
-#define A_BCM1480_NC_BASE                   0x00DFBD0000
+#define A_BCM1480_NC_BASE                  0x00DFBD0000
 
-#define A_BCM1480_NC_RLD_FIELD              0x00DFBD0000
-#define A_BCM1480_NC_RLD_TRIGGER            0x00DFBD0020
-#define A_BCM1480_NC_RLD_BAD_ERROR          0x00DFBD0040
-#define A_BCM1480_NC_RLD_COR_ERROR          0x00DFBD0060
-#define A_BCM1480_NC_RLD_ECC_STATUS         0x00DFBD0080
-#define A_BCM1480_NC_RLD_WAY_ENABLE         0x00DFBD00A0
-#define A_BCM1480_NC_RLD_RANDOM_LFSR        0x00DFBD00C0
+#define A_BCM1480_NC_RLD_FIELD             0x00DFBD0000
+#define A_BCM1480_NC_RLD_TRIGGER           0x00DFBD0020
+#define A_BCM1480_NC_RLD_BAD_ERROR         0x00DFBD0040
+#define A_BCM1480_NC_RLD_COR_ERROR         0x00DFBD0060
+#define A_BCM1480_NC_RLD_ECC_STATUS        0x00DFBD0080
+#define A_BCM1480_NC_RLD_WAY_ENABLE        0x00DFBD00A0
+#define A_BCM1480_NC_RLD_RANDOM_LFSR       0x00DFBD00C0
 
-#define A_BCM1480_NC_INTERRUPT_STATUS       0x00DFBD00E0
-#define A_BCM1480_NC_INTERRUPT_ENABLE       0x00DFBD0100
-#define A_BCM1480_NC_TIMEOUT_COUNTER        0x00DFBD0120
+#define A_BCM1480_NC_INTERRUPT_STATUS      0x00DFBD00E0
+#define A_BCM1480_NC_INTERRUPT_ENABLE      0x00DFBD0100
+#define A_BCM1480_NC_TIMEOUT_COUNTER       0x00DFBD0120
 #define A_BCM1480_NC_TIMEOUT_COUNTER_SEL    0x00DFBD0140
 
-#define A_BCM1480_NC_CREDIT_STATUS_REG0     0x00DFBD0200
-#define A_BCM1480_NC_CREDIT_STATUS_REG1     0x00DFBD0220
-#define A_BCM1480_NC_CREDIT_STATUS_REG2     0x00DFBD0240
-#define A_BCM1480_NC_CREDIT_STATUS_REG3     0x00DFBD0260
-#define A_BCM1480_NC_CREDIT_STATUS_REG4     0x00DFBD0280
-#define A_BCM1480_NC_CREDIT_STATUS_REG5     0x00DFBD02A0
-#define A_BCM1480_NC_CREDIT_STATUS_REG6     0x00DFBD02C0
-#define A_BCM1480_NC_CREDIT_STATUS_REG7     0x00DFBD02E0
-#define A_BCM1480_NC_CREDIT_STATUS_REG8     0x00DFBD0300
-#define A_BCM1480_NC_CREDIT_STATUS_REG9     0x00DFBD0320
+#define A_BCM1480_NC_CREDIT_STATUS_REG0            0x00DFBD0200
+#define A_BCM1480_NC_CREDIT_STATUS_REG1            0x00DFBD0220
+#define A_BCM1480_NC_CREDIT_STATUS_REG2            0x00DFBD0240
+#define A_BCM1480_NC_CREDIT_STATUS_REG3            0x00DFBD0260
+#define A_BCM1480_NC_CREDIT_STATUS_REG4            0x00DFBD0280
+#define A_BCM1480_NC_CREDIT_STATUS_REG5            0x00DFBD02A0
+#define A_BCM1480_NC_CREDIT_STATUS_REG6            0x00DFBD02C0
+#define A_BCM1480_NC_CREDIT_STATUS_REG7            0x00DFBD02E0
+#define A_BCM1480_NC_CREDIT_STATUS_REG8            0x00DFBD0300
+#define A_BCM1480_NC_CREDIT_STATUS_REG9            0x00DFBD0320
 #define A_BCM1480_NC_CREDIT_STATUS_REG10    0x00DFBE0000
 #define A_BCM1480_NC_CREDIT_STATUS_REG11    0x00DFBE0020
 #define A_BCM1480_NC_CREDIT_STATUS_REG12    0x00DFBE0040
 
-#define A_BCM1480_NC_SR_TIMEOUT_COUNTER     0x00DFBE0060
+#define A_BCM1480_NC_SR_TIMEOUT_COUNTER            0x00DFBE0060
 #define A_BCM1480_NC_SR_TIMEOUT_COUNTER_SEL 0x00DFBE0080
 
 
     * H&R Block Configuration Registers (Section 12.4)
     ********************************************************************* */
 
-#define A_BCM1480_HR_BASE_0                 0x00DF820000
-#define A_BCM1480_HR_BASE_1                 0x00DF8A0000
-#define A_BCM1480_HR_BASE_2                 0x00DF920000
-#define BCM1480_HR_REGISTER_SPACING         0x80000
+#define A_BCM1480_HR_BASE_0                0x00DF820000
+#define A_BCM1480_HR_BASE_1                0x00DF8A0000
+#define A_BCM1480_HR_BASE_2                0x00DF920000
+#define BCM1480_HR_REGISTER_SPACING        0x80000
 
-#define A_BCM1480_HR_BASE(idx)              (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
-#define A_BCM1480_HR_REGISTER(idx, reg)      (A_BCM1480_HR_BASE(idx) + (reg))
+#define A_BCM1480_HR_BASE(idx)             (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
+#define A_BCM1480_HR_REGISTER(idx, reg)             (A_BCM1480_HR_BASE(idx) + (reg))
 
-#define R_BCM1480_HR_CFG                    0x0000000000
+#define R_BCM1480_HR_CFG                   0x0000000000
 
 #define R_BCM1480_HR_MAPPING               0x0000010010
 
-#define BCM1480_HR_RULE_SPACING             0x0000000010
-#define BCM1480_HR_NUM_RULES                16
-#define BCM1480_HR_OP_OFFSET                0x0000000100
-#define BCM1480_HR_TYPE_OFFSET              0x0000000108
-#define R_BCM1480_HR_RULE_OP(idx)           (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
-#define R_BCM1480_HR_RULE_TYPE(idx)         (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define BCM1480_HR_RULE_SPACING                    0x0000000010
+#define BCM1480_HR_NUM_RULES               16
+#define BCM1480_HR_OP_OFFSET               0x0000000100
+#define BCM1480_HR_TYPE_OFFSET             0x0000000108
+#define R_BCM1480_HR_RULE_OP(idx)          (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define R_BCM1480_HR_RULE_TYPE(idx)        (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
 
-#define BCM1480_HR_LEAF_SPACING             0x0000000010
-#define BCM1480_HR_NUM_LEAVES               10
-#define BCM1480_HR_LEAF_OFFSET              0x0000000300
-#define R_BCM1480_HR_HA_LEAF0(idx)          (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
+#define BCM1480_HR_LEAF_SPACING                    0x0000000010
+#define BCM1480_HR_NUM_LEAVES              10
+#define BCM1480_HR_LEAF_OFFSET             0x0000000300
+#define R_BCM1480_HR_HA_LEAF0(idx)         (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
 
-#define R_BCM1480_HR_EX_LEAF0               0x00000003A0
+#define R_BCM1480_HR_EX_LEAF0              0x00000003A0
 
-#define BCM1480_HR_PATH_SPACING             0x0000000010
-#define BCM1480_HR_NUM_PATHS                16
-#define BCM1480_HR_PATH_OFFSET              0x0000000600
-#define R_BCM1480_HR_PATH(idx)              (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
+#define BCM1480_HR_PATH_SPACING                    0x0000000010
+#define BCM1480_HR_NUM_PATHS               16
+#define BCM1480_HR_PATH_OFFSET             0x0000000600
+#define R_BCM1480_HR_PATH(idx)             (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
 
-#define R_BCM1480_HR_PATH_DEFAULT           0x0000000700
+#define R_BCM1480_HR_PATH_DEFAULT          0x0000000700
 
-#define BCM1480_HR_ROUTE_SPACING            8
-#define BCM1480_HR_NUM_ROUTES               512
-#define BCM1480_HR_ROUTE_OFFSET             0x0000001000
-#define R_BCM1480_HR_RT_WORD(idx)           (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
+#define BCM1480_HR_ROUTE_SPACING           8
+#define BCM1480_HR_NUM_ROUTES              512
+#define BCM1480_HR_ROUTE_OFFSET                    0x0000001000
+#define R_BCM1480_HR_RT_WORD(idx)          (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
 
 
 /* checked to here - ehs */
     * Packet Manager DMA Registers (Section 12.5)
     ********************************************************************* */
 
-#define A_BCM1480_PM_BASE                   0x0010056000
+#define A_BCM1480_PM_BASE                  0x0010056000
 
-#define A_BCM1480_PMI_LCL_0                 0x0010058000
-#define A_BCM1480_PMO_LCL_0                 0x001005C000
-#define A_BCM1480_PMI_OFFSET_0              (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_OFFSET_0              (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_LCL_0                0x0010058000
+#define A_BCM1480_PMO_LCL_0                0x001005C000
+#define A_BCM1480_PMI_OFFSET_0             (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_OFFSET_0             (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
 
-#define BCM1480_PM_LCL_REGISTER_SPACING     0x100
-#define BCM1480_PM_NUM_CHANNELS             32
+#define BCM1480_PM_LCL_REGISTER_SPACING            0x100
+#define BCM1480_PM_NUM_CHANNELS                    32
 
-#define A_BCM1480_PMI_LCL_BASE(idx)             (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMI_LCL_REGISTER(idx, reg)     (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
-#define A_BCM1480_PMO_LCL_BASE(idx)             (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMO_LCL_REGISTER(idx, reg)     (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMI_LCL_BASE(idx)            (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMI_LCL_REGISTER(idx, reg)    (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMO_LCL_BASE(idx)            (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMO_LCL_REGISTER(idx, reg)    (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
 
-#define BCM1480_PM_INT_PACKING              8
-#define BCM1480_PM_INT_FUNCTION_SPACING     0x40
-#define BCM1480_PM_INT_NUM_FUNCTIONS        3
+#define BCM1480_PM_INT_PACKING             8
+#define BCM1480_PM_INT_FUNCTION_SPACING            0x40
+#define BCM1480_PM_INT_NUM_FUNCTIONS       3
 
 /*
  * DMA channel registers relative to A_BCM1480_PMI_LCL_BASE(n) and A_BCM1480_PMO_LCL_BASE(n)
  */
 
-#define R_BCM1480_PM_BASE_SIZE              0x0000000000
-#define R_BCM1480_PM_CNT                    0x0000000008
-#define R_BCM1480_PM_PFCNT                  0x0000000010
-#define R_BCM1480_PM_LAST                   0x0000000018
-#define R_BCM1480_PM_PFINDX                 0x0000000020
-#define R_BCM1480_PM_INT_WMK                0x0000000028
-#define R_BCM1480_PM_CONFIG0                0x0000000030
-#define R_BCM1480_PM_LOCALDEBUG             0x0000000078
-#define R_BCM1480_PM_CACHEABILITY           0x0000000080   /* PMI only */
-#define R_BCM1480_PM_INT_CNFG               0x0000000088
-#define R_BCM1480_PM_DESC_MERGE_TIMER       0x0000000090
-#define R_BCM1480_PM_LOCALDEBUG_PIB         0x00000000F8   /* PMI only */
-#define R_BCM1480_PM_LOCALDEBUG_POB         0x00000000F8   /* PMO only */
+#define R_BCM1480_PM_BASE_SIZE             0x0000000000
+#define R_BCM1480_PM_CNT                   0x0000000008
+#define R_BCM1480_PM_PFCNT                 0x0000000010
+#define R_BCM1480_PM_LAST                  0x0000000018
+#define R_BCM1480_PM_PFINDX                0x0000000020
+#define R_BCM1480_PM_INT_WMK               0x0000000028
+#define R_BCM1480_PM_CONFIG0               0x0000000030
+#define R_BCM1480_PM_LOCALDEBUG                    0x0000000078
+#define R_BCM1480_PM_CACHEABILITY          0x0000000080   /* PMI only */
+#define R_BCM1480_PM_INT_CNFG              0x0000000088
+#define R_BCM1480_PM_DESC_MERGE_TIMER      0x0000000090
+#define R_BCM1480_PM_LOCALDEBUG_PIB        0x00000000F8   /* PMI only */
+#define R_BCM1480_PM_LOCALDEBUG_POB        0x00000000F8   /* PMO only */
 
 /*
  * Global Registers (Not Channelized)
  */
 
-#define A_BCM1480_PMI_GLB_0                 0x0010056000
-#define A_BCM1480_PMO_GLB_0                 0x0010057000
+#define A_BCM1480_PMI_GLB_0                0x0010056000
+#define A_BCM1480_PMO_GLB_0                0x0010057000
 
 /*
  * PM to TX Mapping Register relative to A_BCM1480_PMI_GLB_0 and A_BCM1480_PMO_GLB_0
  */
 
-#define R_BCM1480_PM_PMO_MAPPING            0x00000008C8   /* PMO only */
+#define R_BCM1480_PM_PMO_MAPPING           0x00000008C8   /* PMO only */
 
 #define A_BCM1480_PM_PMO_MAPPING       (A_BCM1480_PMO_GLB_0 + R_BCM1480_PM_PMO_MAPPING)
 
  */
 
 
-#define A_BCM1480_PMI_INT_0                 0x0010056800
-#define A_BCM1480_PMI_INT(q)                (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMI_INT_OFFSET_0          (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_INT_0                 0x0010057800
-#define A_BCM1480_PMO_INT(q)                (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMO_INT_OFFSET_0          (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_INT_0                0x0010056800
+#define A_BCM1480_PMI_INT(q)               (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMI_INT_OFFSET_0         (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_INT_0                0x0010057800
+#define A_BCM1480_PMO_INT(q)               (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMO_INT_OFFSET_0         (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
 
 /*
  * Interrupt registers relative to A_BCM1480_PMI_INT_0 and A_BCM1480_PMO_INT_0
  */
 
-#define R_BCM1480_PM_INT_ST                 0x0000000000
-#define R_BCM1480_PM_INT_MSK                0x0000000040
-#define R_BCM1480_PM_INT_CLR                0x0000000080
-#define R_BCM1480_PM_MRGD_INT               0x00000000C0
+#define R_BCM1480_PM_INT_ST                0x0000000000
+#define R_BCM1480_PM_INT_MSK               0x0000000040
+#define R_BCM1480_PM_INT_CLR               0x0000000080
+#define R_BCM1480_PM_MRGD_INT              0x00000000C0
 
 /*
  * Debug registers (global)
  */
 
 #define A_BCM1480_PM_GLOBALDEBUGMODE_PMI    0x0010056000
-#define A_BCM1480_PM_GLOBALDEBUG_PID        0x00100567F8
-#define A_BCM1480_PM_GLOBALDEBUG_PIB        0x0010056FF8
+#define A_BCM1480_PM_GLOBALDEBUG_PID       0x00100567F8
+#define A_BCM1480_PM_GLOBALDEBUG_PIB       0x0010056FF8
 #define A_BCM1480_PM_GLOBALDEBUGMODE_PMO    0x0010057000
-#define A_BCM1480_PM_GLOBALDEBUG_POD        0x00100577F8
-#define A_BCM1480_PM_GLOBALDEBUG_POB        0x0010057FF8
+#define A_BCM1480_PM_GLOBALDEBUG_POD       0x00100577F8
+#define A_BCM1480_PM_GLOBALDEBUG_POB       0x0010057FF8
 
 /*  *********************************************************************
     *  Switch performance counters
     *  High-Speed Port Registers (Section 13)
     ********************************************************************* */
 
-#define A_BCM1480_HSP_BASE_0                0x00DF810000
-#define A_BCM1480_HSP_BASE_1                0x00DF890000
-#define A_BCM1480_HSP_BASE_2                0x00DF910000
-#define BCM1480_HSP_REGISTER_SPACING        0x80000
+#define A_BCM1480_HSP_BASE_0               0x00DF810000
+#define A_BCM1480_HSP_BASE_1               0x00DF890000
+#define A_BCM1480_HSP_BASE_2               0x00DF910000
+#define BCM1480_HSP_REGISTER_SPACING       0x80000
 
-#define A_BCM1480_HSP_BASE(idx)             (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
+#define A_BCM1480_HSP_BASE(idx)                    (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
 #define A_BCM1480_HSP_REGISTER(idx, reg)     (A_BCM1480_HSP_BASE(idx) + (reg))
 
-#define R_BCM1480_HSP_RX_SPI4_CFG_0           0x0000000000
-#define R_BCM1480_HSP_RX_SPI4_CFG_1           0x0000000008
+#define R_BCM1480_HSP_RX_SPI4_CFG_0          0x0000000000
+#define R_BCM1480_HSP_RX_SPI4_CFG_1          0x0000000008
 #define R_BCM1480_HSP_RX_SPI4_DESKEW_OVERRIDE 0x0000000010
 #define R_BCM1480_HSP_RX_SPI4_DESKEW_DATAPATH 0x0000000018
 #define R_BCM1480_HSP_RX_SPI4_PORT_INT_EN     0x0000000020
 #define R_BCM1480_HSP_RX_SPI4_CALENDAR_0      0x0000000200
 #define R_BCM1480_HSP_RX_SPI4_CALENDAR_1      0x0000000208
 
-#define R_BCM1480_HSP_RX_PLL_CNFG             0x0000000800
-#define R_BCM1480_HSP_RX_CALIBRATION          0x0000000808
-#define R_BCM1480_HSP_RX_TEST                 0x0000000810
-#define R_BCM1480_HSP_RX_DIAG_DETAILS         0x0000000818
-#define R_BCM1480_HSP_RX_DIAG_CRC_0           0x0000000820
-#define R_BCM1480_HSP_RX_DIAG_CRC_1           0x0000000828
-#define R_BCM1480_HSP_RX_DIAG_HTCMD           0x0000000830
-#define R_BCM1480_HSP_RX_DIAG_PKTCTL          0x0000000838
+#define R_BCM1480_HSP_RX_PLL_CNFG            0x0000000800
+#define R_BCM1480_HSP_RX_CALIBRATION         0x0000000808
+#define R_BCM1480_HSP_RX_TEST                0x0000000810
+#define R_BCM1480_HSP_RX_DIAG_DETAILS        0x0000000818
+#define R_BCM1480_HSP_RX_DIAG_CRC_0          0x0000000820
+#define R_BCM1480_HSP_RX_DIAG_CRC_1          0x0000000828
+#define R_BCM1480_HSP_RX_DIAG_HTCMD          0x0000000830
+#define R_BCM1480_HSP_RX_DIAG_PKTCTL         0x0000000838
 
 #define R_BCM1480_HSP_RX_VIS_FLCTRL_COUNTER   0x0000000870
 
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0       0x0000020020
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1       0x0000020028
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2       0x0000020030
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3       0x0000020038
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4       0x0000020040
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5       0x0000020048
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6       0x0000020050
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7       0x0000020058
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0              0x0000020020
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1              0x0000020028
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2              0x0000020030
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3              0x0000020038
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4              0x0000020040
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5              0x0000020048
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6              0x0000020050
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7              0x0000020058
 #define R_BCM1480_HSP_RX_PKT_RAMALLOC(idx)    (R_BCM1480_HSP_RX_PKT_RAMALLOC_0 + 8*(idx))
 
 /* XXX Following registers were shuffled.  Renamed/renumbered per errata. */
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_0      0x0000020078
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_1      0x0000020080
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_2      0x0000020088
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_3      0x0000020090
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_4      0x0000020098
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_5      0x00000200A0
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_0     0x0000020078
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_1     0x0000020080
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_2     0x0000020088
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_3     0x0000020090
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_4     0x0000020098
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_5     0x00000200A0
 
 #define R_BCM1480_HSP_RX_SPI_WATERMARK_0      0x00000200B0
 #define R_BCM1480_HSP_RX_SPI_WATERMARK_1      0x00000200B8
 #define R_BCM1480_HSP_RX_SPI_WATERMARK_7      0x00000200E8
 #define R_BCM1480_HSP_RX_SPI_WATERMARK(idx)   (R_BCM1480_HSP_RX_SPI_WATERMARK_0 + 8*(idx))
 
-#define R_BCM1480_HSP_RX_VIS_CMDQ_0           0x00000200F0
-#define R_BCM1480_HSP_RX_VIS_CMDQ_1           0x00000200F8
-#define R_BCM1480_HSP_RX_VIS_CMDQ_2           0x0000020100
-#define R_BCM1480_HSP_RX_RAM_READCTL          0x0000020108
-#define R_BCM1480_HSP_RX_RAM_READWINDOW       0x0000020110
-#define R_BCM1480_HSP_RX_RF_READCTL           0x0000020118
-#define R_BCM1480_HSP_RX_RF_READWINDOW        0x0000020120
+#define R_BCM1480_HSP_RX_VIS_CMDQ_0          0x00000200F0
+#define R_BCM1480_HSP_RX_VIS_CMDQ_1          0x00000200F8
+#define R_BCM1480_HSP_RX_VIS_CMDQ_2          0x0000020100
+#define R_BCM1480_HSP_RX_RAM_READCTL         0x0000020108
+#define R_BCM1480_HSP_RX_RAM_READWINDOW              0x0000020110
+#define R_BCM1480_HSP_RX_RF_READCTL          0x0000020118
+#define R_BCM1480_HSP_RX_RF_READWINDOW       0x0000020120
 
-#define R_BCM1480_HSP_TX_SPI4_CFG_0           0x0000040000
-#define R_BCM1480_HSP_TX_SPI4_CFG_1           0x0000040008
+#define R_BCM1480_HSP_TX_SPI4_CFG_0          0x0000040000
+#define R_BCM1480_HSP_TX_SPI4_CFG_1          0x0000040008
 #define R_BCM1480_HSP_TX_SPI4_TRAINING_FMT    0x0000040010
 
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0       0x0000040020
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1       0x0000040028
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2       0x0000040030
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3       0x0000040038
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4       0x0000040040
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5       0x0000040048
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6       0x0000040050
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7       0x0000040058
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0              0x0000040020
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1              0x0000040028
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2              0x0000040030
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3              0x0000040038
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4              0x0000040040
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5              0x0000040048
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6              0x0000040050
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7              0x0000040058
 #define R_BCM1480_HSP_TX_PKT_RAMALLOC(idx)    (R_BCM1480_HSP_TX_PKT_RAMALLOC_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_NPC_RAMALLOC         0x0000040078
-#define R_BCM1480_HSP_TX_RSP_RAMALLOC         0x0000040080
-#define R_BCM1480_HSP_TX_PC_RAMALLOC          0x0000040088
+#define R_BCM1480_HSP_TX_NPC_RAMALLOC        0x0000040078
+#define R_BCM1480_HSP_TX_RSP_RAMALLOC        0x0000040080
+#define R_BCM1480_HSP_TX_PC_RAMALLOC         0x0000040088
 #define R_BCM1480_HSP_TX_HTCC_RAMALLOC_0      0x0000040090
 #define R_BCM1480_HSP_TX_HTCC_RAMALLOC_1      0x0000040098
 #define R_BCM1480_HSP_TX_HTCC_RAMALLOC_2      0x00000400A0
 #define R_BCM1480_HSP_TX_PKT_RXPHITCNT_2      0x00000400C0
 #define R_BCM1480_HSP_TX_PKT_RXPHITCNT_3      0x00000400C8
 #define R_BCM1480_HSP_TX_PKT_RXPHITCNT(idx)   (R_BCM1480_HSP_TX_PKT_RXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT       0x00000400D0
-#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT       0x00000400D8
+#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT              0x00000400D0
+#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT              0x00000400D8
 
 #define R_BCM1480_HSP_TX_PKT_TXPHITCNT_0      0x00000400E0
 #define R_BCM1480_HSP_TX_PKT_TXPHITCNT_1      0x00000400E8
 #define R_BCM1480_HSP_TX_PKT_TXPHITCNT_2      0x00000400F0
 #define R_BCM1480_HSP_TX_PKT_TXPHITCNT_3      0x00000400F8
 #define R_BCM1480_HSP_TX_PKT_TXPHITCNT(idx)   (R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT       0x0000040100
-#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT       0x0000040108
+#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT              0x0000040100
+#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT              0x0000040108
 
 #define R_BCM1480_HSP_TX_SPI4_CALENDAR_0      0x0000040200
 #define R_BCM1480_HSP_TX_SPI4_CALENDAR_1      0x0000040208
 
-#define R_BCM1480_HSP_TX_PLL_CNFG             0x0000040800
-#define R_BCM1480_HSP_TX_CALIBRATION          0x0000040808
-#define R_BCM1480_HSP_TX_TEST                 0x0000040810
+#define R_BCM1480_HSP_TX_PLL_CNFG            0x0000040800
+#define R_BCM1480_HSP_TX_CALIBRATION         0x0000040808
+#define R_BCM1480_HSP_TX_TEST                0x0000040810
 
-#define R_BCM1480_HSP_TX_VIS_CMDQ_0           0x0000040840
-#define R_BCM1480_HSP_TX_VIS_CMDQ_1           0x0000040848
-#define R_BCM1480_HSP_TX_VIS_CMDQ_2           0x0000040850
-#define R_BCM1480_HSP_TX_RAM_READCTL          0x0000040860
-#define R_BCM1480_HSP_TX_RAM_READWINDOW       0x0000040868
-#define R_BCM1480_HSP_TX_RF_READCTL           0x0000040870
-#define R_BCM1480_HSP_TX_RF_READWINDOW        0x0000040878
+#define R_BCM1480_HSP_TX_VIS_CMDQ_0          0x0000040840
+#define R_BCM1480_HSP_TX_VIS_CMDQ_1          0x0000040848
+#define R_BCM1480_HSP_TX_VIS_CMDQ_2          0x0000040850
+#define R_BCM1480_HSP_TX_RAM_READCTL         0x0000040860
+#define R_BCM1480_HSP_TX_RAM_READWINDOW              0x0000040868
+#define R_BCM1480_HSP_TX_RF_READCTL          0x0000040870
+#define R_BCM1480_HSP_TX_RF_READWINDOW       0x0000040878
 
 #define R_BCM1480_HSP_TX_SPI4_PORT_INT_STATUS 0x0000040880
 #define R_BCM1480_HSP_TX_SPI4_PORT_INT_EN     0x0000040888
 
 #define R_BCM1480_HSP_TX_NEXT_ADDR_BASE 0x000040400
-#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x)  (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
+#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
 
 
 
     *  Physical Address Map (Table 10 and Figure 7)
     ********************************************************************* */
 
-#define A_BCM1480_PHYS_MEMORY_0                 _SB_MAKE64(0x0000000000)
-#define A_BCM1480_PHYS_MEMORY_SIZE              _SB_MAKE64((256*1024*1024))
-#define A_BCM1480_PHYS_SYSTEM_CTL               _SB_MAKE64(0x0010000000)
-#define A_BCM1480_PHYS_IO_SYSTEM                _SB_MAKE64(0x0010060000)
-#define A_BCM1480_PHYS_GENBUS                   _SB_MAKE64(0x0010090000)
-#define A_BCM1480_PHYS_GENBUS_END               _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES     _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES     _SB_MAKE64(0x0029000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES       _SB_MAKE64(0x002C000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES      _SB_MAKE64(0x002E000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES     _SB_MAKE64(0x002F000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES      _SB_MAKE64(0x0030000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES       _SB_MAKE64(0x0040000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS        _SB_MAKE64(0x0060000000)
-#define A_BCM1480_PHYS_MEMORY_1                 _SB_MAKE64(0x0080000000)
-#define A_BCM1480_PHYS_MEMORY_2                 _SB_MAKE64(0x0090000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS      _SB_MAKE64(0x00A8000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS      _SB_MAKE64(0x00A9000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS        _SB_MAKE64(0x00AC000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS       _SB_MAKE64(0x00AE000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS      _SB_MAKE64(0x00AF000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS       _SB_MAKE64(0x00B0000000)
-#define A_BCM1480_PHYS_MEMORY_3                 _SB_MAKE64(0x00C0000000)
-#define A_BCM1480_PHYS_L2_CACHE_TEST            _SB_MAKE64(0x00D0000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES   _SB_MAKE64(0x00D8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES        _SB_MAKE64(0x00DC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES       _SB_MAKE64(0x00DE000000)
-#define A_BCM1480_PHYS_HS_SUBSYS                _SB_MAKE64(0x00DF000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS    _SB_MAKE64(0x00F8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BITS         _SB_MAKE64(0x00FC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS        _SB_MAKE64(0x00FE000000)
-#define A_BCM1480_PHYS_MEMORY_EXP               _SB_MAKE64(0x0100000000)
-#define A_BCM1480_PHYS_MEMORY_EXP_SIZE          _SB_MAKE64((508*1024*1024*1024))
-#define A_BCM1480_PHYS_PCI_UPPER                _SB_MAKE64(0x1000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES     _SB_MAKE64(0x2000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS      _SB_MAKE64(0x3000000000)
-#define A_BCM1480_PHYS_HT_NODE_ALIAS            _SB_MAKE64(0x4000000000)
-#define A_BCM1480_PHYS_HT_FULLACCESS            _SB_MAKE64(0xF000000000)
+#define A_BCM1480_PHYS_MEMORY_0                        _SB_MAKE64(0x0000000000)
+#define A_BCM1480_PHYS_MEMORY_SIZE             _SB_MAKE64((256*1024*1024))
+#define A_BCM1480_PHYS_SYSTEM_CTL              _SB_MAKE64(0x0010000000)
+#define A_BCM1480_PHYS_IO_SYSTEM               _SB_MAKE64(0x0010060000)
+#define A_BCM1480_PHYS_GENBUS                  _SB_MAKE64(0x0010090000)
+#define A_BCM1480_PHYS_GENBUS_END              _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES    _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES    _SB_MAKE64(0x0029000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES      _SB_MAKE64(0x002C000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES     _SB_MAKE64(0x002E000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES    _SB_MAKE64(0x002F000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES     _SB_MAKE64(0x0030000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES      _SB_MAKE64(0x0040000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS       _SB_MAKE64(0x0060000000)
+#define A_BCM1480_PHYS_MEMORY_1                        _SB_MAKE64(0x0080000000)
+#define A_BCM1480_PHYS_MEMORY_2                        _SB_MAKE64(0x0090000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS     _SB_MAKE64(0x00A8000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS     _SB_MAKE64(0x00A9000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS       _SB_MAKE64(0x00AC000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS      _SB_MAKE64(0x00AE000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS     _SB_MAKE64(0x00AF000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS      _SB_MAKE64(0x00B0000000)
+#define A_BCM1480_PHYS_MEMORY_3                        _SB_MAKE64(0x00C0000000)
+#define A_BCM1480_PHYS_L2_CACHE_TEST           _SB_MAKE64(0x00D0000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES  _SB_MAKE64(0x00D8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES       _SB_MAKE64(0x00DC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES      _SB_MAKE64(0x00DE000000)
+#define A_BCM1480_PHYS_HS_SUBSYS               _SB_MAKE64(0x00DF000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS   _SB_MAKE64(0x00F8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BITS                _SB_MAKE64(0x00FC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS       _SB_MAKE64(0x00FE000000)
+#define A_BCM1480_PHYS_MEMORY_EXP              _SB_MAKE64(0x0100000000)
+#define A_BCM1480_PHYS_MEMORY_EXP_SIZE         _SB_MAKE64((508*1024*1024*1024))
+#define A_BCM1480_PHYS_PCI_UPPER               _SB_MAKE64(0x1000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES    _SB_MAKE64(0x2000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS     _SB_MAKE64(0x3000000000)
+#define A_BCM1480_PHYS_HT_NODE_ALIAS           _SB_MAKE64(0x4000000000)
+#define A_BCM1480_PHYS_HT_FULLACCESS           _SB_MAKE64(0xF000000000)
 
 
 /*  *********************************************************************
     *  L2 Cache as RAM (Table 54)
     ********************************************************************* */
 
-#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE         _SB_MAKE64(0x0000020000)
-#define BCM1480_PHYS_L2CACHE_NUM_WAYS           8
-#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE       _SB_MAKE64(0x0000100000)
-#define A_BCM1480_PHYS_L2CACHE_WAY0             _SB_MAKE64(0x00D0300000)
-#define A_BCM1480_PHYS_L2CACHE_WAY1             _SB_MAKE64(0x00D0320000)
-#define A_BCM1480_PHYS_L2CACHE_WAY2             _SB_MAKE64(0x00D0340000)
-#define A_BCM1480_PHYS_L2CACHE_WAY3             _SB_MAKE64(0x00D0360000)
-#define A_BCM1480_PHYS_L2CACHE_WAY4             _SB_MAKE64(0x00D0380000)
-#define A_BCM1480_PHYS_L2CACHE_WAY5             _SB_MAKE64(0x00D03A0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY6             _SB_MAKE64(0x00D03C0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY7             _SB_MAKE64(0x00D03E0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE                _SB_MAKE64(0x0000020000)
+#define BCM1480_PHYS_L2CACHE_NUM_WAYS          8
+#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE      _SB_MAKE64(0x0000100000)
+#define A_BCM1480_PHYS_L2CACHE_WAY0            _SB_MAKE64(0x00D0300000)
+#define A_BCM1480_PHYS_L2CACHE_WAY1            _SB_MAKE64(0x00D0320000)
+#define A_BCM1480_PHYS_L2CACHE_WAY2            _SB_MAKE64(0x00D0340000)
+#define A_BCM1480_PHYS_L2CACHE_WAY3            _SB_MAKE64(0x00D0360000)
+#define A_BCM1480_PHYS_L2CACHE_WAY4            _SB_MAKE64(0x00D0380000)
+#define A_BCM1480_PHYS_L2CACHE_WAY5            _SB_MAKE64(0x00D03A0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY6            _SB_MAKE64(0x00D03C0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY7            _SB_MAKE64(0x00D03E0000)
 
 #endif /* _BCM1480_REGS_H */
index 2af3706..8a1e2b0 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  BCM1280/BCM1400 Board Support Package
     *
-    *  SCD Constants and Macros                     File: bcm1480_scd.h
+    *  SCD Constants and Macros                            File: bcm1480_scd.h
     *
     *  This module contains constants and macros useful for
     *  manipulating the System Control and Debug module.
  * New part definitions
  */
 
-#define K_SYS_PART_BCM1480          0x1406
-#define K_SYS_PART_BCM1280          0x1206
-#define K_SYS_PART_BCM1455          0x1407
-#define K_SYS_PART_BCM1255          0x1257
-#define K_SYS_PART_BCM1158          0x1156
+#define K_SYS_PART_BCM1480         0x1406
+#define K_SYS_PART_BCM1280         0x1206
+#define K_SYS_PART_BCM1455         0x1407
+#define K_SYS_PART_BCM1255         0x1257
+#define K_SYS_PART_BCM1158         0x1156
 
 /*
  * Manufacturing Information Register (Table 14)
  * Entire register is different from 1250, all new constants below
  */
 
-#define M_BCM1480_SYS_RESERVED0             _SB_MAKEMASK1(0)
-#define M_BCM1480_SYS_HT_MINRSTCNT          _SB_MAKEMASK1(1)
-#define M_BCM1480_SYS_RESERVED2             _SB_MAKEMASK1(2)
-#define M_BCM1480_SYS_RESERVED3             _SB_MAKEMASK1(3)
-#define M_BCM1480_SYS_RESERVED4             _SB_MAKEMASK1(4)
-#define M_BCM1480_SYS_IOB_DIV               _SB_MAKEMASK1(5)
-
-#define S_BCM1480_SYS_PLL_DIV               _SB_MAKE64(6)
-#define M_BCM1480_SYS_PLL_DIV               _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
-#define V_BCM1480_SYS_PLL_DIV(x)            _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
-#define G_BCM1480_SYS_PLL_DIV(x)            _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
-
-#define S_BCM1480_SYS_SW_DIV                _SB_MAKE64(11)
-#define M_BCM1480_SYS_SW_DIV                _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
-#define V_BCM1480_SYS_SW_DIV(x)             _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
-#define G_BCM1480_SYS_SW_DIV(x)             _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
-
-#define M_BCM1480_SYS_PCMCIA_ENABLE         _SB_MAKEMASK1(16)
-#define M_BCM1480_SYS_DUART1_ENABLE         _SB_MAKEMASK1(17)
-
-#define S_BCM1480_SYS_BOOT_MODE             _SB_MAKE64(18)
-#define M_BCM1480_SYS_BOOT_MODE             _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
-#define V_BCM1480_SYS_BOOT_MODE(x)          _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
-#define G_BCM1480_SYS_BOOT_MODE(x)          _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
-#define K_BCM1480_SYS_BOOT_MODE_ROM32       0
-#define K_BCM1480_SYS_BOOT_MODE_ROM8        1
+#define M_BCM1480_SYS_RESERVED0                    _SB_MAKEMASK1(0)
+#define M_BCM1480_SYS_HT_MINRSTCNT         _SB_MAKEMASK1(1)
+#define M_BCM1480_SYS_RESERVED2                    _SB_MAKEMASK1(2)
+#define M_BCM1480_SYS_RESERVED3                    _SB_MAKEMASK1(3)
+#define M_BCM1480_SYS_RESERVED4                    _SB_MAKEMASK1(4)
+#define M_BCM1480_SYS_IOB_DIV              _SB_MAKEMASK1(5)
+
+#define S_BCM1480_SYS_PLL_DIV              _SB_MAKE64(6)
+#define M_BCM1480_SYS_PLL_DIV              _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
+#define V_BCM1480_SYS_PLL_DIV(x)           _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
+#define G_BCM1480_SYS_PLL_DIV(x)           _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
+
+#define S_BCM1480_SYS_SW_DIV               _SB_MAKE64(11)
+#define M_BCM1480_SYS_SW_DIV               _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
+#define V_BCM1480_SYS_SW_DIV(x)                    _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
+#define G_BCM1480_SYS_SW_DIV(x)                    _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
+
+#define M_BCM1480_SYS_PCMCIA_ENABLE        _SB_MAKEMASK1(16)
+#define M_BCM1480_SYS_DUART1_ENABLE        _SB_MAKEMASK1(17)
+
+#define S_BCM1480_SYS_BOOT_MODE                    _SB_MAKE64(18)
+#define M_BCM1480_SYS_BOOT_MODE                    _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
+#define V_BCM1480_SYS_BOOT_MODE(x)         _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
+#define G_BCM1480_SYS_BOOT_MODE(x)         _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
+#define K_BCM1480_SYS_BOOT_MODE_ROM32      0
+#define K_BCM1480_SYS_BOOT_MODE_ROM8       1
 #define K_BCM1480_SYS_BOOT_MODE_SMBUS_SMALL 2
 #define K_BCM1480_SYS_BOOT_MODE_SMBUS_BIG   3
-#define M_BCM1480_SYS_BOOT_MODE_SMBUS       _SB_MAKEMASK1(19)
-
-#define M_BCM1480_SYS_PCI_HOST              _SB_MAKEMASK1(20)
-#define M_BCM1480_SYS_PCI_ARBITER           _SB_MAKEMASK1(21)
-#define M_BCM1480_SYS_BIG_ENDIAN            _SB_MAKEMASK1(22)
-#define M_BCM1480_SYS_GENCLK_EN             _SB_MAKEMASK1(23)
-#define M_BCM1480_SYS_GEN_PARITY_EN         _SB_MAKEMASK1(24)
-#define M_BCM1480_SYS_RESERVED25            _SB_MAKEMASK1(25)
-
-#define S_BCM1480_SYS_CONFIG                26
-#define M_BCM1480_SYS_CONFIG                _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
-#define V_BCM1480_SYS_CONFIG(x)             _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
-#define G_BCM1480_SYS_CONFIG(x)             _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
-
-#define M_BCM1480_SYS_RESERVED32            _SB_MAKEMASK(32, 15)
-
-#define S_BCM1480_SYS_NODEID                47
-#define M_BCM1480_SYS_NODEID                _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
-#define V_BCM1480_SYS_NODEID(x)             _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
-#define G_BCM1480_SYS_NODEID(x)             _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
-
-#define M_BCM1480_SYS_CCNUMA_EN             _SB_MAKEMASK1(51)
-#define M_BCM1480_SYS_CPU_RESET_0           _SB_MAKEMASK1(52)
-#define M_BCM1480_SYS_CPU_RESET_1           _SB_MAKEMASK1(53)
-#define M_BCM1480_SYS_CPU_RESET_2           _SB_MAKEMASK1(54)
-#define M_BCM1480_SYS_CPU_RESET_3           _SB_MAKEMASK1(55)
-#define S_BCM1480_SYS_DISABLECPU0           56
-#define M_BCM1480_SYS_DISABLECPU0           _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
-#define S_BCM1480_SYS_DISABLECPU1           57
-#define M_BCM1480_SYS_DISABLECPU1           _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
-#define S_BCM1480_SYS_DISABLECPU2           58
-#define M_BCM1480_SYS_DISABLECPU2           _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
-#define S_BCM1480_SYS_DISABLECPU3           59
-#define M_BCM1480_SYS_DISABLECPU3           _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
-
-#define M_BCM1480_SYS_SB_SOFTRES            _SB_MAKEMASK1(60)
-#define M_BCM1480_SYS_EXT_RESET             _SB_MAKEMASK1(61)
-#define M_BCM1480_SYS_SYSTEM_RESET          _SB_MAKEMASK1(62)
-#define M_BCM1480_SYS_SW_FLAG               _SB_MAKEMASK1(63)
+#define M_BCM1480_SYS_BOOT_MODE_SMBUS      _SB_MAKEMASK1(19)
+
+#define M_BCM1480_SYS_PCI_HOST             _SB_MAKEMASK1(20)
+#define M_BCM1480_SYS_PCI_ARBITER          _SB_MAKEMASK1(21)
+#define M_BCM1480_SYS_BIG_ENDIAN           _SB_MAKEMASK1(22)
+#define M_BCM1480_SYS_GENCLK_EN                    _SB_MAKEMASK1(23)
+#define M_BCM1480_SYS_GEN_PARITY_EN        _SB_MAKEMASK1(24)
+#define M_BCM1480_SYS_RESERVED25           _SB_MAKEMASK1(25)
+
+#define S_BCM1480_SYS_CONFIG               26
+#define M_BCM1480_SYS_CONFIG               _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
+#define V_BCM1480_SYS_CONFIG(x)                    _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
+#define G_BCM1480_SYS_CONFIG(x)                    _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
+
+#define M_BCM1480_SYS_RESERVED32           _SB_MAKEMASK(32, 15)
+
+#define S_BCM1480_SYS_NODEID               47
+#define M_BCM1480_SYS_NODEID               _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
+#define V_BCM1480_SYS_NODEID(x)                    _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
+#define G_BCM1480_SYS_NODEID(x)                    _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
+
+#define M_BCM1480_SYS_CCNUMA_EN                    _SB_MAKEMASK1(51)
+#define M_BCM1480_SYS_CPU_RESET_0          _SB_MAKEMASK1(52)
+#define M_BCM1480_SYS_CPU_RESET_1          _SB_MAKEMASK1(53)
+#define M_BCM1480_SYS_CPU_RESET_2          _SB_MAKEMASK1(54)
+#define M_BCM1480_SYS_CPU_RESET_3          _SB_MAKEMASK1(55)
+#define S_BCM1480_SYS_DISABLECPU0          56
+#define M_BCM1480_SYS_DISABLECPU0          _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
+#define S_BCM1480_SYS_DISABLECPU1          57
+#define M_BCM1480_SYS_DISABLECPU1          _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
+#define S_BCM1480_SYS_DISABLECPU2          58
+#define M_BCM1480_SYS_DISABLECPU2          _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
+#define S_BCM1480_SYS_DISABLECPU3          59
+#define M_BCM1480_SYS_DISABLECPU3          _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
+
+#define M_BCM1480_SYS_SB_SOFTRES           _SB_MAKEMASK1(60)
+#define M_BCM1480_SYS_EXT_RESET                    _SB_MAKEMASK1(61)
+#define M_BCM1480_SYS_SYSTEM_RESET         _SB_MAKEMASK1(62)
+#define M_BCM1480_SYS_SW_FLAG              _SB_MAKEMASK1(63)
 
 /*
  * Scratch Register (Table 16)
  * Registers: SCD_WDOG_CFG_x
  */
 
-#define M_BCM1480_SCD_WDOG_ENABLE           _SB_MAKEMASK1(0)
+#define M_BCM1480_SCD_WDOG_ENABLE          _SB_MAKEMASK1(0)
 
-#define S_BCM1480_SCD_WDOG_RESET_TYPE       2
-#define M_BCM1480_SCD_WDOG_RESET_TYPE       _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
+#define S_BCM1480_SCD_WDOG_RESET_TYPE      2
+#define M_BCM1480_SCD_WDOG_RESET_TYPE      _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
 #define V_BCM1480_SCD_WDOG_RESET_TYPE(x)    _SB_MAKEVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE)
 #define G_BCM1480_SCD_WDOG_RESET_TYPE(x)    _SB_GETVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE, M_BCM1480_SCD_WDOG_RESET_TYPE)
 
-#define K_BCM1480_SCD_WDOG_RESET_FULL       0  /* actually, (x & 1) == 0  */
-#define K_BCM1480_SCD_WDOG_RESET_SOFT       1
-#define K_BCM1480_SCD_WDOG_RESET_CPU0       3
-#define K_BCM1480_SCD_WDOG_RESET_CPU1       5
-#define K_BCM1480_SCD_WDOG_RESET_CPU2       9
-#define K_BCM1480_SCD_WDOG_RESET_CPU3       17
+#define K_BCM1480_SCD_WDOG_RESET_FULL        /* actually, (x & 1) == 0  */
+#define K_BCM1480_SCD_WDOG_RESET_SOFT      1
+#define K_BCM1480_SCD_WDOG_RESET_CPU0      3
+#define K_BCM1480_SCD_WDOG_RESET_CPU1      5
+#define K_BCM1480_SCD_WDOG_RESET_CPU2      9
+#define K_BCM1480_SCD_WDOG_RESET_CPU3      17
 #define K_BCM1480_SCD_WDOG_RESET_ALL_CPUS   31
 
 
-#define M_BCM1480_SCD_WDOG_HAS_RESET        _SB_MAKEMASK1(8)
+#define M_BCM1480_SCD_WDOG_HAS_RESET       _SB_MAKEMASK1(8)
 
 /*
  * General Timer Initial Count Registers (Table 26)
  * The clear/enable bits are in different locations on the 1250 and 1480.
  */
 
-#define S_SPC_CFG_SRC4              32
-#define M_SPC_CFG_SRC4              _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
-#define V_SPC_CFG_SRC4(x)           _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
-#define G_SPC_CFG_SRC4(x)           _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
+#define S_SPC_CFG_SRC4             32
+#define M_SPC_CFG_SRC4             _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
+#define V_SPC_CFG_SRC4(x)          _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
+#define G_SPC_CFG_SRC4(x)          _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
 
-#define S_SPC_CFG_SRC5              40
-#define M_SPC_CFG_SRC5              _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
-#define V_SPC_CFG_SRC5(x)           _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
-#define G_SPC_CFG_SRC5(x)           _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
+#define S_SPC_CFG_SRC5             40
+#define M_SPC_CFG_SRC5             _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
+#define V_SPC_CFG_SRC5(x)          _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
+#define G_SPC_CFG_SRC5(x)          _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
 
-#define S_SPC_CFG_SRC6              48
-#define M_SPC_CFG_SRC6              _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
-#define V_SPC_CFG_SRC6(x)           _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
-#define G_SPC_CFG_SRC6(x)           _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
+#define S_SPC_CFG_SRC6             48
+#define M_SPC_CFG_SRC6             _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
+#define V_SPC_CFG_SRC6(x)          _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
+#define G_SPC_CFG_SRC6(x)          _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
 
-#define S_SPC_CFG_SRC7              56
-#define M_SPC_CFG_SRC7              _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
-#define V_SPC_CFG_SRC7(x)           _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
-#define G_SPC_CFG_SRC7(x)           _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
+#define S_SPC_CFG_SRC7             56
+#define M_SPC_CFG_SRC7             _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
+#define V_SPC_CFG_SRC7(x)          _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
+#define G_SPC_CFG_SRC7(x)          _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
 
 /*
  * System Performance Counter Control Register (Table 32)
  * Register: PERF_CNT_CFG_1
  * BCM1480 specific
  */
-#define M_BCM1480_SPC_CFG_CLEAR     _SB_MAKEMASK1(0)
+#define M_BCM1480_SPC_CFG_CLEAR            _SB_MAKEMASK1(0)
 #define M_BCM1480_SPC_CFG_ENABLE    _SB_MAKEMASK1(1)
 #if SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_SPC_CFG_CLEAR                        M_BCM1480_SPC_CFG_CLEAR
  * Registers: PERF_CNT_x
  */
 
-#define S_BCM1480_SPC_CNT_COUNT             0
-#define M_BCM1480_SPC_CNT_COUNT             _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
-#define V_BCM1480_SPC_CNT_COUNT(x)          _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
-#define G_BCM1480_SPC_CNT_COUNT(x)          _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
+#define S_BCM1480_SPC_CNT_COUNT                    0
+#define M_BCM1480_SPC_CNT_COUNT                    _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
+#define V_BCM1480_SPC_CNT_COUNT(x)         _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
+#define G_BCM1480_SPC_CNT_COUNT(x)         _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
 
-#define M_BCM1480_SPC_CNT_OFLOW             _SB_MAKEMASK1(40)
+#define M_BCM1480_SPC_CNT_OFLOW                    _SB_MAKEMASK1(40)
 
 
 /*
 #define M_BCM1480_ATRAP_INDEX            _SB_MAKEMASK(4, 0)
 #define M_BCM1480_ATRAP_ADDRESS                  _SB_MAKEMASK(40, 0)
 
-#define S_BCM1480_ATRAP_CFG_CNT            0
-#define M_BCM1480_ATRAP_CFG_CNT            _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
-#define V_BCM1480_ATRAP_CFG_CNT(x)         _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
-#define G_BCM1480_ATRAP_CFG_CNT(x)         _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
+#define S_BCM1480_ATRAP_CFG_CNT                   0
+#define M_BCM1480_ATRAP_CFG_CNT                   _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
+#define V_BCM1480_ATRAP_CFG_CNT(x)        _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
+#define G_BCM1480_ATRAP_CFG_CNT(x)        _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
 
 #define M_BCM1480_ATRAP_CFG_WRITE         _SB_MAKEMASK1(3)
-#define M_BCM1480_ATRAP_CFG_ALL                   _SB_MAKEMASK1(4)
-#define M_BCM1480_ATRAP_CFG_INV                   _SB_MAKEMASK1(5)
+#define M_BCM1480_ATRAP_CFG_ALL                   _SB_MAKEMASK1(4)
+#define M_BCM1480_ATRAP_CFG_INV                   _SB_MAKEMASK1(5)
 #define M_BCM1480_ATRAP_CFG_USESRC        _SB_MAKEMASK1(6)
 #define M_BCM1480_ATRAP_CFG_SRCINV        _SB_MAKEMASK1(7)
 
-#define S_BCM1480_ATRAP_CFG_AGENTID     8
-#define M_BCM1480_ATRAP_CFG_AGENTID     _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
-#define V_BCM1480_ATRAP_CFG_AGENTID(x)  _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
-#define G_BCM1480_ATRAP_CFG_AGENTID(x)  _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
+#define S_BCM1480_ATRAP_CFG_AGENTID    8
+#define M_BCM1480_ATRAP_CFG_AGENTID    _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
+#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
+#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
 
 
-#define K_BCM1480_BUS_AGENT_CPU0            0
-#define K_BCM1480_BUS_AGENT_CPU1            1
-#define K_BCM1480_BUS_AGENT_NC              2
-#define K_BCM1480_BUS_AGENT_IOB             3
-#define K_BCM1480_BUS_AGENT_SCD             4
-#define K_BCM1480_BUS_AGENT_L2C             6
-#define K_BCM1480_BUS_AGENT_MC              7
-#define K_BCM1480_BUS_AGENT_CPU2            8
-#define K_BCM1480_BUS_AGENT_CPU3            9
-#define K_BCM1480_BUS_AGENT_PM              10
+#define K_BCM1480_BUS_AGENT_CPU0           0
+#define K_BCM1480_BUS_AGENT_CPU1           1
+#define K_BCM1480_BUS_AGENT_NC             2
+#define K_BCM1480_BUS_AGENT_IOB                    3
+#define K_BCM1480_BUS_AGENT_SCD                    4
+#define K_BCM1480_BUS_AGENT_L2C                    6
+#define K_BCM1480_BUS_AGENT_MC             7
+#define K_BCM1480_BUS_AGENT_CPU2           8
+#define K_BCM1480_BUS_AGENT_CPU3           9
+#define K_BCM1480_BUS_AGENT_PM             10
 
-#define S_BCM1480_ATRAP_CFG_CATTR           12
-#define M_BCM1480_ATRAP_CFG_CATTR           _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
-#define V_BCM1480_ATRAP_CFG_CATTR(x)        _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
-#define G_BCM1480_ATRAP_CFG_CATTR(x)        _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
+#define S_BCM1480_ATRAP_CFG_CATTR          12
+#define M_BCM1480_ATRAP_CFG_CATTR          _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
+#define V_BCM1480_ATRAP_CFG_CATTR(x)       _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
+#define G_BCM1480_ATRAP_CFG_CATTR(x)       _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
 
 #define K_BCM1480_ATRAP_CFG_CATTR_IGNORE    0
-#define K_BCM1480_ATRAP_CFG_CATTR_UNC       1
+#define K_BCM1480_ATRAP_CFG_CATTR_UNC      1
 #define K_BCM1480_ATRAP_CFG_CATTR_NONCOH    2
 #define K_BCM1480_ATRAP_CFG_CATTR_COHERENT  3
 
-#define M_BCM1480_ATRAP_CFG_CATTRINV        _SB_MAKEMASK1(14)
+#define M_BCM1480_ATRAP_CFG_CATTRINV       _SB_MAKEMASK1(14)
 
 
 /*
 
 #define M_BCM1480_SCD_TRSEQ_TID_MATCH_EN    _SB_MAKEMASK1(25)
 
-#define S_BCM1480_SCD_TRSEQ_SWFUNC          26
-#define M_BCM1480_SCD_TRSEQ_SWFUNC          _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define V_BCM1480_SCD_TRSEQ_SWFUNC(x)       _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define G_BCM1480_SCD_TRSEQ_SWFUNC(x)       _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
+#define S_BCM1480_SCD_TRSEQ_SWFUNC         26
+#define M_BCM1480_SCD_TRSEQ_SWFUNC         _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define V_BCM1480_SCD_TRSEQ_SWFUNC(x)      _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define G_BCM1480_SCD_TRSEQ_SWFUNC(x)      _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
 
 /*
  * Trace Control Register (Table 49)
  * are defined below.
  */
 
-#define S_BCM1480_SCD_TRACE_CFG_MODE        16
-#define M_BCM1480_SCD_TRACE_CFG_MODE        _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define V_BCM1480_SCD_TRACE_CFG_MODE(x)     _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define G_BCM1480_SCD_TRACE_CFG_MODE(x)     _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
+#define S_BCM1480_SCD_TRACE_CFG_MODE       16
+#define M_BCM1480_SCD_TRACE_CFG_MODE       _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define V_BCM1480_SCD_TRACE_CFG_MODE(x)            _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define G_BCM1480_SCD_TRACE_CFG_MODE(x)            _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
 
 #define K_BCM1480_SCD_TRACE_CFG_MODE_BLOCKERS  0
-#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT        1
+#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
 #define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID   2
 
 #endif /* _BCM1480_SCD_H */
index 2d1a26d..ae29dae 100644 (file)
 #ifdef CONFIG_SIBYTE_BIGSUR
 #define SIBYTE_BOARD_NAME "BCM91x80A/B (BigSur)"
 #define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE    1
+#define SIBYTE_HAVE_IDE           1
 #endif
 
 /* Generic bus chip selects */
-#define LEDS_CS         3
-#define LEDS_PHYS       0x100a0000
+#define LEDS_CS                3
+#define LEDS_PHYS      0x100a0000
 
 #ifdef SIBYTE_HAVE_IDE
-#define IDE_CS          4
-#define IDE_PHYS        0x100b0000
-#define K_GPIO_GB_IDE   4
-#define K_INT_GB_IDE    (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS         4
+#define IDE_PHYS       0x100b0000
+#define K_GPIO_GB_IDE  4
+#define K_INT_GB_IDE   (K_INT_GPIO_0 + K_GPIO_GB_IDE)
 #endif
 
 #ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS       6
-#define PCMCIA_PHYS     0x11000000
+#define PCMCIA_CS      6
+#define PCMCIA_PHYS    0x11000000
 #define K_GPIO_PC_READY 9
-#define K_INT_PC_READY  (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
 #endif
 
 #endif /* __ASM_SIBYTE_BIGSUR_H */
index 11cad71..793edba 100644 (file)
 
 #define SIBYTE_BOARD_NAME "Carmel"
 
-#define GPIO_PHY_INTERRUPT      2
-#define GPIO_NONMASKABLE_INT    3
-#define GPIO_CF_INSERTED        6
-#define GPIO_MONTEREY_RESET     7
-#define GPIO_QUADUART_INT       8
-#define GPIO_CF_INT             9
-#define GPIO_FPGA_CCLK          10
-#define GPIO_FPGA_DOUT          11
-#define GPIO_FPGA_DIN           12
-#define GPIO_FPGA_PGM           13
-#define GPIO_FPGA_DONE          14
-#define GPIO_FPGA_INIT          15
+#define GPIO_PHY_INTERRUPT     2
+#define GPIO_NONMASKABLE_INT   3
+#define GPIO_CF_INSERTED       6
+#define GPIO_MONTEREY_RESET    7
+#define GPIO_QUADUART_INT      8
+#define GPIO_CF_INT            9
+#define GPIO_FPGA_CCLK         10
+#define GPIO_FPGA_DOUT         11
+#define GPIO_FPGA_DIN          12
+#define GPIO_FPGA_PGM          13
+#define GPIO_FPGA_DONE         14
+#define GPIO_FPGA_INIT         15
 
-#define LEDS_CS                 2
-#define LEDS_PHYS               0x100C0000
-#define MLEDS_CS                3
-#define MLEDS_PHYS              0x100A0000
-#define UART_CS                 4
-#define UART_PHYS               0x100D0000
-#define ARAVALI_CS              5
-#define ARAVALI_PHYS            0x11000000
-#define IDE_CS                  6
-#define IDE_PHYS                0x100B0000
-#define ARAVALI2_CS             7
-#define ARAVALI2_PHYS           0x100E0000
+#define LEDS_CS                        2
+#define LEDS_PHYS              0x100C0000
+#define MLEDS_CS               3
+#define MLEDS_PHYS             0x100A0000
+#define UART_CS                        4
+#define UART_PHYS              0x100D0000
+#define ARAVALI_CS             5
+#define ARAVALI_PHYS           0x11000000
+#define IDE_CS                 6
+#define IDE_PHYS               0x100B0000
+#define ARAVALI2_CS            7
+#define ARAVALI2_PHYS          0x100E0000
 
 #if defined(CONFIG_SIBYTE_CARMEL)
-#define K_GPIO_GB_IDE   9
-#define K_INT_GB_IDE    (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define K_GPIO_GB_IDE  9
+#define K_INT_GB_IDE   (K_INT_GPIO_0 + K_GPIO_GB_IDE)
 #endif
 
 
index 80c1a05..d45dff9 100644 (file)
@@ -27,8 +27,8 @@
 
 #define SB1250_NR_IRQS 64
 
-#define BCM1480_NR_IRQS                 128
-#define BCM1480_NR_IRQS_HALF            64
+#define BCM1480_NR_IRQS                        128
+#define BCM1480_NR_IRQS_HALF           64
 
 #define SB1250_DUART_MINOR_BASE                64
 
index 09365f9..4364eb8 100644 (file)
     *
     *  Use like:
     *
-    *    #define SIBYTE_HDR_FEATURES   SIBYTE_HDR_FMASK_112x_PASS1
+    *   #define SIBYTE_HDR_FEATURES    SIBYTE_HDR_FMASK_112x_PASS1
     *
     *          Generate defines only for that revision of chip.
     *
-    *    #if SIBYTE_HDR_FEATURE(chip,pass)
+    *   #if SIBYTE_HDR_FEATURE(chip,pass)
     *
     *          True if header features for that revision or later of
-    *          that particular chip type are enabled in SIBYTE_HDR_FEATURES.
-    *          (Use this to bracket #defines for features present in a given
+    *          that particular chip type are enabled in SIBYTE_HDR_FEATURES.
+    *          (Use this to bracket #defines for features present in a given
     *          revision and later.)
     *
     *          Note that there is no implied ordering between chip types.
     *          SIBYTE_HDR_FEATURE(112x, PASS1) is OK, but
     *          SIBYTE_HDR_FEATURE(1120, pass1) is not (for two reasons).
     *
-    *    #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
+    *   #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
     *
     *          Same as SIBYTE_HDR_FEATURE, but true for the named revision
     *          and earlier revisions of the named chip type.
     *
-    *    #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
+    *   #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
     *
     *          Same as SIBYTE_HDR_FEATURE, but only true for the named
     *          revision of the named chip type.  (Note that this CANNOT
@@ -82,7 +82,7 @@
     *          particular chip/revision.  It will be true any time this
     *          chip/revision is included in SIBYTE_HDR_FEATURES.)
     *
-    *    #if SIBYTE_HDR_FEATURE_CHIP(chip)
+    *   #if SIBYTE_HDR_FEATURE_CHIP(chip)
     *
     *          True if header features for (any revision of) that chip type
     *          are enabled in SIBYTE_HDR_FEATURES.  (Use this to bracket
     *  ordering, so be careful when adding support for new minor revs.
     ********************************************************************* */
 
-#define        SIBYTE_HDR_FMASK_1250_ALL               0x000000ff
-#define        SIBYTE_HDR_FMASK_1250_PASS1             0x00000001
-#define        SIBYTE_HDR_FMASK_1250_PASS2             0x00000002
-#define        SIBYTE_HDR_FMASK_1250_PASS3             0x00000004
+#define SIBYTE_HDR_FMASK_1250_ALL              0x000000ff
+#define SIBYTE_HDR_FMASK_1250_PASS1            0x00000001
+#define SIBYTE_HDR_FMASK_1250_PASS2            0x00000002
+#define SIBYTE_HDR_FMASK_1250_PASS3            0x00000004
 
-#define        SIBYTE_HDR_FMASK_112x_ALL               0x00000f00
-#define        SIBYTE_HDR_FMASK_112x_PASS1             0x00000100
+#define SIBYTE_HDR_FMASK_112x_ALL              0x00000f00
+#define SIBYTE_HDR_FMASK_112x_PASS1            0x00000100
 
 #define SIBYTE_HDR_FMASK_1480_ALL              0x0000f000
 #define SIBYTE_HDR_FMASK_1480_PASS1            0x00001000
 #define SIBYTE_HDR_FMASK_1480_PASS2            0x00002000
 
-/* Bit mask for chip/revision.  (use _ALL for all revisions of a chip).  */
-#define        SIBYTE_HDR_FMASK(chip, pass)                                    \
+/* Bit mask for chip/revision. (use _ALL for all revisions of a chip).  */
+#define SIBYTE_HDR_FMASK(chip, pass)                                   \
     (SIBYTE_HDR_FMASK_ ## chip ## _ ## pass)
-#define        SIBYTE_HDR_FMASK_ALLREVS(chip)                                  \
+#define SIBYTE_HDR_FMASK_ALLREVS(chip)                                 \
     (SIBYTE_HDR_FMASK_ ## chip ## _ALL)
 
 /* Default constant value for all chips, all revisions */
-#define        SIBYTE_HDR_FMASK_ALL                                            \
+#define SIBYTE_HDR_FMASK_ALL                                           \
     (SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL             \
      | SIBYTE_HDR_FMASK_1480_ALL)
 
 /* This one is used for the "original" BCM1250/BCM112x chips.  We use this
    to weed out constants and macros that do not exist on later chips like
-   the BCM1480  */
+   the BCM1480 */
 #define SIBYTE_HDR_FMASK_1250_112x_ALL                                 \
     (SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL)
 #define SIBYTE_HDR_FMASK_1250_112x SIBYTE_HDR_FMASK_1250_112x_ALL
 
 #ifndef SIBYTE_HDR_FEATURES
-#define        SIBYTE_HDR_FEATURES                     SIBYTE_HDR_FMASK_ALL
+#define SIBYTE_HDR_FEATURES                    SIBYTE_HDR_FMASK_ALL
 #endif
 
 
 /* Bit mask for revisions of chip exclusively before the named revision.  */
-#define        SIBYTE_HDR_FMASK_BEFORE(chip, pass)                             \
+#define SIBYTE_HDR_FMASK_BEFORE(chip, pass)                            \
     ((SIBYTE_HDR_FMASK(chip, pass) - 1) & SIBYTE_HDR_FMASK_ALLREVS(chip))
 
-/* Bit mask for revisions of chip exclusively after the named revision.  */
-#define        SIBYTE_HDR_FMASK_AFTER(chip, pass)                              \
+/* Bit mask for revisions of chip exclusively after the named revision.         */
+#define SIBYTE_HDR_FMASK_AFTER(chip, pass)                             \
     (~(SIBYTE_HDR_FMASK(chip, pass)                                    \
      | (SIBYTE_HDR_FMASK(chip, pass) - 1)) & SIBYTE_HDR_FMASK_ALLREVS(chip))
 
 /*  *********************************************************************
     *  Naming schemes for constants in these files:
     *
-    *  M_xxx           MASK constant (identifies bits in a register).
-    *                  For multi-bit fields, all bits in the field will
-    *                  be set.
+    *  M_xxx          MASK constant (identifies bits in a register).
+    *                 For multi-bit fields, all bits in the field will
+    *                 be set.
     *
-    *  K_xxx           "Code" constant (value for data in a multi-bit
-    *                  field).  The value is right justified.
+    *  K_xxx          "Code" constant (value for data in a multi-bit
+    *                 field).  The value is right justified.
     *
-    *  V_xxx           "Value" constant.  This is the same as the
-    *                  corresponding "K_xxx" constant, except it is
-    *                  shifted to the correct position in the register.
+    *  V_xxx          "Value" constant.  This is the same as the
+    *                 corresponding "K_xxx" constant, except it is
+    *                 shifted to the correct position in the register.
     *
-    *  S_xxx           SHIFT constant.  This is the number of bits that
-    *                  a field value (code) needs to be shifted
-    *                  (towards the left) to put the value in the right
-    *                  position for the register.
+    *  S_xxx          SHIFT constant.  This is the number of bits that
+    *                 a field value (code) needs to be shifted
+    *                 (towards the left) to put the value in the right
+    *                 position for the register.
     *
-    *  A_xxx           ADDRESS constant.  This will be a physical
-    *                  address.  Use the PHYS_TO_K1 macro to generate
-    *                  a K1SEG address.
+    *  A_xxx          ADDRESS constant.  This will be a physical
+    *                 address.  Use the PHYS_TO_K1 macro to generate
+    *                 a K1SEG address.
     *
-    *  R_xxx           RELATIVE offset constant.  This is an offset from
-    *                  an A_xxx constant (usually the first register in
-    *                  a group).
+    *  R_xxx          RELATIVE offset constant.  This is an offset from
+    *                 an A_xxx constant (usually the first register in
+    *                 a group).
     *
-    *  G_xxx(X)        GET value.  This macro obtains a multi-bit field
-    *                  from a register, masks it, and shifts it to
-    *                  the bottom of the register (retrieving a K_xxx
-    *                  value, for example).
+    *  G_xxx(X)               GET value.  This macro obtains a multi-bit field
+    *                 from a register, masks it, and shifts it to
+    *                 the bottom of the register (retrieving a K_xxx
+    *                 value, for example).
     *
-    *  V_xxx(X)        VALUE.  This macro computes the value of a
-    *                  K_xxx constant shifted to the correct position
-    *                  in the register.
+    *  V_xxx(X)               VALUE.  This macro computes the value of a
+    *                 K_xxx constant shifted to the correct position
+    *                 in the register.
     ********************************************************************* */
 
 
index 6c44dfb..ea81713 100644 (file)
  */
 
 
-#define M_DMA_DROP                  _SB_MAKEMASK1(0)
+#define M_DMA_DROP                 _SB_MAKEMASK1(0)
 
-#define M_DMA_CHAIN_SEL             _SB_MAKEMASK1(1)
-#define M_DMA_RESERVED1             _SB_MAKEMASK1(2)
+#define M_DMA_CHAIN_SEL                    _SB_MAKEMASK1(1)
+#define M_DMA_RESERVED1                    _SB_MAKEMASK1(2)
 
 #define S_DMA_DESC_TYPE                    _SB_MAKE64(1)
 #define M_DMA_DESC_TYPE                    _SB_MAKEMASK(2, S_DMA_DESC_TYPE)
-#define V_DMA_DESC_TYPE(x)          _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
-#define G_DMA_DESC_TYPE(x)          _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
+#define V_DMA_DESC_TYPE(x)         _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
+#define G_DMA_DESC_TYPE(x)         _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
 
 #define K_DMA_DESC_TYPE_RING_AL                0
 #define K_DMA_DESC_TYPE_CHAIN_AL       1
 #define K_DMA_DESC_TYPE_RING_UAL_RMW   3
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define M_DMA_EOP_INT_EN            _SB_MAKEMASK1(3)
-#define M_DMA_HWM_INT_EN            _SB_MAKEMASK1(4)
-#define M_DMA_LWM_INT_EN            _SB_MAKEMASK1(5)
-#define M_DMA_TBX_EN                _SB_MAKEMASK1(6)
-#define M_DMA_TDX_EN                _SB_MAKEMASK1(7)
+#define M_DMA_EOP_INT_EN           _SB_MAKEMASK1(3)
+#define M_DMA_HWM_INT_EN           _SB_MAKEMASK1(4)
+#define M_DMA_LWM_INT_EN           _SB_MAKEMASK1(5)
+#define M_DMA_TBX_EN               _SB_MAKEMASK1(6)
+#define M_DMA_TDX_EN               _SB_MAKEMASK1(7)
 
-#define S_DMA_INT_PKTCNT            _SB_MAKE64(8)
-#define M_DMA_INT_PKTCNT            _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
-#define V_DMA_INT_PKTCNT(x)         _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
-#define G_DMA_INT_PKTCNT(x)         _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
+#define S_DMA_INT_PKTCNT           _SB_MAKE64(8)
+#define M_DMA_INT_PKTCNT           _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
+#define V_DMA_INT_PKTCNT(x)        _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
+#define G_DMA_INT_PKTCNT(x)        _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
 
-#define S_DMA_RINGSZ                _SB_MAKE64(16)
-#define M_DMA_RINGSZ                _SB_MAKEMASK(16, S_DMA_RINGSZ)
-#define V_DMA_RINGSZ(x)             _SB_MAKEVALUE(x, S_DMA_RINGSZ)
-#define G_DMA_RINGSZ(x)             _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
+#define S_DMA_RINGSZ               _SB_MAKE64(16)
+#define M_DMA_RINGSZ               _SB_MAKEMASK(16, S_DMA_RINGSZ)
+#define V_DMA_RINGSZ(x)                    _SB_MAKEVALUE(x, S_DMA_RINGSZ)
+#define G_DMA_RINGSZ(x)                    _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
 
-#define S_DMA_HIGH_WATERMARK        _SB_MAKE64(32)
-#define M_DMA_HIGH_WATERMARK        _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
-#define V_DMA_HIGH_WATERMARK(x)     _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
-#define G_DMA_HIGH_WATERMARK(x)     _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
+#define S_DMA_HIGH_WATERMARK       _SB_MAKE64(32)
+#define M_DMA_HIGH_WATERMARK       _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
+#define V_DMA_HIGH_WATERMARK(x)            _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
+#define G_DMA_HIGH_WATERMARK(x)            _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
 
-#define S_DMA_LOW_WATERMARK         _SB_MAKE64(48)
-#define M_DMA_LOW_WATERMARK         _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
-#define V_DMA_LOW_WATERMARK(x)      _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
-#define G_DMA_LOW_WATERMARK(x)      _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
+#define S_DMA_LOW_WATERMARK        _SB_MAKE64(48)
+#define M_DMA_LOW_WATERMARK        _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
+#define V_DMA_LOW_WATERMARK(x)     _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
+#define G_DMA_LOW_WATERMARK(x)     _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
 
 /*
  * Ethernet and Serial DMA Configuration Register 1 (Table 7-5)
  * Registers: DMA_CONFIG1_SER_x_TX
  */
 
-#define M_DMA_HDR_CF_EN             _SB_MAKEMASK1(0)
-#define M_DMA_ASIC_XFR_EN           _SB_MAKEMASK1(1)
-#define M_DMA_PRE_ADDR_EN           _SB_MAKEMASK1(2)
-#define M_DMA_FLOW_CTL_EN           _SB_MAKEMASK1(3)
-#define M_DMA_NO_DSCR_UPDT          _SB_MAKEMASK1(4)
+#define M_DMA_HDR_CF_EN                    _SB_MAKEMASK1(0)
+#define M_DMA_ASIC_XFR_EN          _SB_MAKEMASK1(1)
+#define M_DMA_PRE_ADDR_EN          _SB_MAKEMASK1(2)
+#define M_DMA_FLOW_CTL_EN          _SB_MAKEMASK1(3)
+#define M_DMA_NO_DSCR_UPDT         _SB_MAKEMASK1(4)
 #define M_DMA_L2CA                 _SB_MAKEMASK1(5)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_DMA_TX_FC_PAUSE_EN       _SB_MAKEMASK1(7)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define M_DMA_MBZ1                  _SB_MAKEMASK(6, 15)
+#define M_DMA_MBZ1                 _SB_MAKEMASK(6, 15)
 
-#define S_DMA_HDR_SIZE              _SB_MAKE64(21)
-#define M_DMA_HDR_SIZE              _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
-#define V_DMA_HDR_SIZE(x)           _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
-#define G_DMA_HDR_SIZE(x)           _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
+#define S_DMA_HDR_SIZE             _SB_MAKE64(21)
+#define M_DMA_HDR_SIZE             _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
+#define V_DMA_HDR_SIZE(x)          _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
+#define G_DMA_HDR_SIZE(x)          _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
 
-#define M_DMA_MBZ2                  _SB_MAKEMASK(5, 32)
+#define M_DMA_MBZ2                 _SB_MAKEMASK(5, 32)
 
-#define S_DMA_ASICXFR_SIZE          _SB_MAKE64(37)
-#define M_DMA_ASICXFR_SIZE          _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
-#define V_DMA_ASICXFR_SIZE(x)       _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
-#define G_DMA_ASICXFR_SIZE(x)       _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
+#define S_DMA_ASICXFR_SIZE         _SB_MAKE64(37)
+#define M_DMA_ASICXFR_SIZE         _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
+#define V_DMA_ASICXFR_SIZE(x)      _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
+#define G_DMA_ASICXFR_SIZE(x)      _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
 
-#define S_DMA_INT_TIMEOUT           _SB_MAKE64(48)
-#define M_DMA_INT_TIMEOUT           _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
-#define V_DMA_INT_TIMEOUT(x)        _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
-#define G_DMA_INT_TIMEOUT(x)        _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
+#define S_DMA_INT_TIMEOUT          _SB_MAKE64(48)
+#define M_DMA_INT_TIMEOUT          _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
+#define V_DMA_INT_TIMEOUT(x)       _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
+#define G_DMA_INT_TIMEOUT(x)       _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
 
 /*
  * Ethernet and Serial DMA Descriptor base address (Table 7-6)
  */
 
-#define M_DMA_DSCRBASE_MBZ          _SB_MAKEMASK(4, 0)
+#define M_DMA_DSCRBASE_MBZ         _SB_MAKEMASK(4, 0)
 
 
 /*
  * ASIC Mode Base Address (Table 7-7)
  */
 
-#define M_DMA_ASIC_BASE_MBZ         _SB_MAKEMASK(20, 0)
+#define M_DMA_ASIC_BASE_MBZ        _SB_MAKEMASK(20, 0)
 
 /*
  * DMA Descriptor Count Registers (Table 7-8)
  * Current Descriptor Address Register (Table 7-11)
  */
 
-#define S_DMA_CURDSCR_ADDR          _SB_MAKE64(0)
-#define M_DMA_CURDSCR_ADDR          _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
-#define S_DMA_CURDSCR_COUNT         _SB_MAKE64(40)
-#define M_DMA_CURDSCR_COUNT         _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
+#define S_DMA_CURDSCR_ADDR         _SB_MAKE64(0)
+#define M_DMA_CURDSCR_ADDR         _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
+#define S_DMA_CURDSCR_COUNT        _SB_MAKE64(40)
+#define M_DMA_CURDSCR_COUNT        _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_DMA_TX_CH_PAUSE_ON       _SB_MAKEMASK1(56)
  * Receive Packet Drop Registers
  */
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_OODLOST_RX           _SB_MAKE64(0)
-#define M_DMA_OODLOST_RX           _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
-#define G_DMA_OODLOST_RX(x)        _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
+#define S_DMA_OODLOST_RX          _SB_MAKE64(0)
+#define M_DMA_OODLOST_RX          _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
+#define G_DMA_OODLOST_RX(x)       _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
 
-#define S_DMA_EOP_COUNT_RX         _SB_MAKE64(16)
-#define M_DMA_EOP_COUNT_RX         _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
-#define G_DMA_EOP_COUNT_RX(x)      _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
+#define S_DMA_EOP_COUNT_RX        _SB_MAKE64(16)
+#define M_DMA_EOP_COUNT_RX        _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
+#define G_DMA_EOP_COUNT_RX(x)     _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
 /*  *********************************************************************
  * Descriptor doubleword "A"  (Table 7-12)
  */
 
-#define S_DMA_DSCRA_OFFSET          _SB_MAKE64(0)
-#define M_DMA_DSCRA_OFFSET          _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
-#define V_DMA_DSCRA_OFFSET(x)       _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
-#define G_DMA_DSCRA_OFFSET(x)       _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
+#define S_DMA_DSCRA_OFFSET         _SB_MAKE64(0)
+#define M_DMA_DSCRA_OFFSET         _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
+#define V_DMA_DSCRA_OFFSET(x)      _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
+#define G_DMA_DSCRA_OFFSET(x)      _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
 
 /* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRA_A_ADDR          _SB_MAKE64(5)
-#define M_DMA_DSCRA_A_ADDR          _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
+#define S_DMA_DSCRA_A_ADDR         _SB_MAKE64(5)
+#define M_DMA_DSCRA_A_ADDR         _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
 
 #define M_DMA_DSCRA_A_ADDR_OFFSET   (M_DMA_DSCRA_OFFSET | M_DMA_DSCRA_A_ADDR)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRA_A_ADDR_UA        _SB_MAKE64(0)
-#define M_DMA_DSCRA_A_ADDR_UA        _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
+#define S_DMA_DSCRA_A_ADDR_UA       _SB_MAKE64(0)
+#define M_DMA_DSCRA_A_ADDR_UA       _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_DMA_DSCRA_A_SIZE          _SB_MAKE64(40)
-#define M_DMA_DSCRA_A_SIZE          _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
-#define V_DMA_DSCRA_A_SIZE(x)       _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
-#define G_DMA_DSCRA_A_SIZE(x)       _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
+#define S_DMA_DSCRA_A_SIZE         _SB_MAKE64(40)
+#define M_DMA_DSCRA_A_SIZE         _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
+#define V_DMA_DSCRA_A_SIZE(x)      _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
+#define G_DMA_DSCRA_A_SIZE(x)      _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define S_DMA_DSCRA_DSCR_CNT       _SB_MAKE64(40)
 #define G_DMA_DSCRA_DSCR_CNT(x)            _SB_GETVALUE(x, S_DMA_DSCRA_DSCR_CNT, M_DMA_DSCRA_DSCR_CNT)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define M_DMA_DSCRA_INTERRUPT       _SB_MAKEMASK1(49)
+#define M_DMA_DSCRA_INTERRUPT      _SB_MAKEMASK1(49)
 #define M_DMA_DSCRA_OFFSETB        _SB_MAKEMASK1(50)
 
-#define S_DMA_DSCRA_STATUS          _SB_MAKE64(51)
-#define M_DMA_DSCRA_STATUS          _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
-#define V_DMA_DSCRA_STATUS(x)       _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
-#define G_DMA_DSCRA_STATUS(x)       _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
+#define S_DMA_DSCRA_STATUS         _SB_MAKE64(51)
+#define M_DMA_DSCRA_STATUS         _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
+#define V_DMA_DSCRA_STATUS(x)      _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
+#define G_DMA_DSCRA_STATUS(x)      _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
 
 /*
  * Descriptor doubleword "B"  (Table 7-13)
  */
 
 
-#define S_DMA_DSCRB_OPTIONS         _SB_MAKE64(0)
-#define M_DMA_DSCRB_OPTIONS         _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
-#define V_DMA_DSCRB_OPTIONS(x)      _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
-#define G_DMA_DSCRB_OPTIONS(x)      _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
+#define S_DMA_DSCRB_OPTIONS        _SB_MAKE64(0)
+#define M_DMA_DSCRB_OPTIONS        _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
+#define V_DMA_DSCRB_OPTIONS(x)     _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
+#define G_DMA_DSCRB_OPTIONS(x)     _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRB_A_SIZE        _SB_MAKE64(8)
-#define M_DMA_DSCRB_A_SIZE        _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
-#define V_DMA_DSCRB_A_SIZE(x)     _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
-#define G_DMA_DSCRB_A_SIZE(x)     _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
+#define S_DMA_DSCRB_A_SIZE       _SB_MAKE64(8)
+#define M_DMA_DSCRB_A_SIZE       _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
+#define V_DMA_DSCRB_A_SIZE(x)    _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
+#define G_DMA_DSCRB_A_SIZE(x)    _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define R_DMA_DSCRB_ADDR            _SB_MAKE64(0x10)
+#define R_DMA_DSCRB_ADDR           _SB_MAKE64(0x10)
 
 /* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRB_B_ADDR          _SB_MAKE64(5)
-#define M_DMA_DSCRB_B_ADDR          _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
+#define S_DMA_DSCRB_B_ADDR         _SB_MAKE64(5)
+#define M_DMA_DSCRB_B_ADDR         _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
 
-#define S_DMA_DSCRB_B_SIZE          _SB_MAKE64(40)
-#define M_DMA_DSCRB_B_SIZE          _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
-#define V_DMA_DSCRB_B_SIZE(x)       _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
-#define G_DMA_DSCRB_B_SIZE(x)       _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
+#define S_DMA_DSCRB_B_SIZE         _SB_MAKE64(40)
+#define M_DMA_DSCRB_B_SIZE         _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
+#define V_DMA_DSCRB_B_SIZE(x)      _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
+#define G_DMA_DSCRB_B_SIZE(x)      _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
 
-#define M_DMA_DSCRB_B_VALID         _SB_MAKEMASK1(49)
+#define M_DMA_DSCRB_B_VALID        _SB_MAKEMASK1(49)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define S_DMA_DSCRB_PKT_SIZE_MSB    _SB_MAKE64(48)
 #define G_DMA_DSCRB_PKT_SIZE_MSB(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE_MSB, M_DMA_DSCRB_PKT_SIZE_MSB)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_DMA_DSCRB_PKT_SIZE        _SB_MAKE64(50)
-#define M_DMA_DSCRB_PKT_SIZE        _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
-#define V_DMA_DSCRB_PKT_SIZE(x)     _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
-#define G_DMA_DSCRB_PKT_SIZE(x)     _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
+#define S_DMA_DSCRB_PKT_SIZE       _SB_MAKE64(50)
+#define M_DMA_DSCRB_PKT_SIZE       _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
+#define V_DMA_DSCRB_PKT_SIZE(x)            _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
+#define G_DMA_DSCRB_PKT_SIZE(x)            _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
 
 /*
  * from pass2 some bits in dscr_b are also used for rx status
  */
-#define S_DMA_DSCRB_STATUS          _SB_MAKE64(0)
-#define M_DMA_DSCRB_STATUS          _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
-#define V_DMA_DSCRB_STATUS(x)       _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
-#define G_DMA_DSCRB_STATUS(x)       _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
+#define S_DMA_DSCRB_STATUS         _SB_MAKE64(0)
+#define M_DMA_DSCRB_STATUS         _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
+#define V_DMA_DSCRB_STATUS(x)      _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
+#define G_DMA_DSCRB_STATUS(x)      _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
 
 /*
  * Ethernet Descriptor Status Bits (Table 7-15)
  */
 
-#define M_DMA_ETHRX_BADIP4CS        _SB_MAKEMASK1(51)
+#define M_DMA_ETHRX_BADIP4CS       _SB_MAKEMASK1(51)
 #define M_DMA_ETHRX_DSCRERR        _SB_MAKEMASK1(52)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_DMA_ETH_CRC_FLAG     _SB_MAKEMASK1(2)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_DMA_ETHRX_RXCH            53
-#define M_DMA_ETHRX_RXCH            _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
-#define V_DMA_ETHRX_RXCH(x)         _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
-#define G_DMA_ETHRX_RXCH(x)         _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
+#define S_DMA_ETHRX_RXCH           53
+#define M_DMA_ETHRX_RXCH           _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
+#define V_DMA_ETHRX_RXCH(x)        _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
+#define G_DMA_ETHRX_RXCH(x)        _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
 
-#define S_DMA_ETHRX_PKTTYPE         55
-#define M_DMA_ETHRX_PKTTYPE         _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
-#define V_DMA_ETHRX_PKTTYPE(x)      _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
-#define G_DMA_ETHRX_PKTTYPE(x)      _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
+#define S_DMA_ETHRX_PKTTYPE        55
+#define M_DMA_ETHRX_PKTTYPE        _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
+#define V_DMA_ETHRX_PKTTYPE(x)     _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
+#define G_DMA_ETHRX_PKTTYPE(x)     _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
 
 #define K_DMA_ETHRX_PKTTYPE_IPV4    0
 #define K_DMA_ETHRX_PKTTYPE_ARPV4   1
-#define K_DMA_ETHRX_PKTTYPE_802     2
+#define K_DMA_ETHRX_PKTTYPE_802            2
 #define K_DMA_ETHRX_PKTTYPE_OTHER   3
 #define K_DMA_ETHRX_PKTTYPE_USER0   4
 #define K_DMA_ETHRX_PKTTYPE_USER1   5
 #define K_DMA_ETHRX_PKTTYPE_USER2   6
 #define K_DMA_ETHRX_PKTTYPE_USER3   7
 
-#define M_DMA_ETHRX_MATCH_HASH      _SB_MAKEMASK1(58)
-#define M_DMA_ETHRX_MATCH_EXACT     _SB_MAKEMASK1(59)
-#define M_DMA_ETHRX_BCAST           _SB_MAKEMASK1(60)
-#define M_DMA_ETHRX_MCAST           _SB_MAKEMASK1(61)
-#define M_DMA_ETHRX_BAD                    _SB_MAKEMASK1(62)
-#define M_DMA_ETHRX_SOP             _SB_MAKEMASK1(63)
+#define M_DMA_ETHRX_MATCH_HASH     _SB_MAKEMASK1(58)
+#define M_DMA_ETHRX_MATCH_EXACT            _SB_MAKEMASK1(59)
+#define M_DMA_ETHRX_BCAST          _SB_MAKEMASK1(60)
+#define M_DMA_ETHRX_MCAST          _SB_MAKEMASK1(61)
+#define M_DMA_ETHRX_BAD                    _SB_MAKEMASK1(62)
+#define M_DMA_ETHRX_SOP                    _SB_MAKEMASK1(63)
 
 /*
  * Ethernet Transmit Status Bits (Table 7-16)
  */
 
-#define M_DMA_ETHTX_SOP                    _SB_MAKEMASK1(63)
+#define M_DMA_ETHTX_SOP                    _SB_MAKEMASK1(63)
 
 /*
  * Ethernet Transmit Options (Table 7-17)
  */
 
-#define K_DMA_ETHTX_NOTSOP          _SB_MAKE64(0x00)
-#define K_DMA_ETHTX_APPENDCRC       _SB_MAKE64(0x01)
-#define K_DMA_ETHTX_REPLACECRC      _SB_MAKE64(0x02)
+#define K_DMA_ETHTX_NOTSOP         _SB_MAKE64(0x00)
+#define K_DMA_ETHTX_APPENDCRC      _SB_MAKE64(0x01)
+#define K_DMA_ETHTX_REPLACECRC     _SB_MAKE64(0x02)
 #define K_DMA_ETHTX_APPENDCRC_APPENDPAD _SB_MAKE64(0x03)
 #define K_DMA_ETHTX_APPENDVLAN_REPLACECRC _SB_MAKE64(0x04)
 #define K_DMA_ETHTX_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x05)
 #define K_DMA_ETHTX_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x6)
-#define K_DMA_ETHTX_NOMODS          _SB_MAKE64(0x07)
-#define K_DMA_ETHTX_RESERVED1       _SB_MAKE64(0x08)
+#define K_DMA_ETHTX_NOMODS         _SB_MAKE64(0x07)
+#define K_DMA_ETHTX_RESERVED1      _SB_MAKE64(0x08)
 #define K_DMA_ETHTX_REPLACESADDR_APPENDCRC _SB_MAKE64(0x09)
 #define K_DMA_ETHTX_REPLACESADDR_REPLACECRC _SB_MAKE64(0x0A)
 #define K_DMA_ETHTX_REPLACESADDR_APPENDCRC_APPENDPAD _SB_MAKE64(0x0B)
 #define K_DMA_ETHTX_REPLACESADDR_APPENDVLAN_REPLACECRC _SB_MAKE64(0x0C)
 #define K_DMA_ETHTX_REPLACESADDR_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x0D)
 #define K_DMA_ETHTX_REPLACESADDR_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x0E)
-#define K_DMA_ETHTX_RESERVED2       _SB_MAKE64(0x0F)
+#define K_DMA_ETHTX_RESERVED2      _SB_MAKE64(0x0F)
 
 /*
  * Serial Receive Options (Table 7-18)
  */
-#define M_DMA_SERRX_CRC_ERROR       _SB_MAKEMASK1(56)
-#define M_DMA_SERRX_ABORT           _SB_MAKEMASK1(57)
-#define M_DMA_SERRX_OCTET_ERROR     _SB_MAKEMASK1(58)
+#define M_DMA_SERRX_CRC_ERROR      _SB_MAKEMASK1(56)
+#define M_DMA_SERRX_ABORT          _SB_MAKEMASK1(57)
+#define M_DMA_SERRX_OCTET_ERROR            _SB_MAKEMASK1(58)
 #define M_DMA_SERRX_LONGFRAME_ERROR _SB_MAKEMASK1(59)
 #define M_DMA_SERRX_SHORTFRAME_ERROR _SB_MAKEMASK1(60)
 #define M_DMA_SERRX_OVERRUN_ERROR   _SB_MAKEMASK1(61)
-#define M_DMA_SERRX_GOOD            _SB_MAKEMASK1(62)
-#define M_DMA_SERRX_SOP             _SB_MAKEMASK1(63)
+#define M_DMA_SERRX_GOOD           _SB_MAKEMASK1(62)
+#define M_DMA_SERRX_SOP                    _SB_MAKEMASK1(63)
 
 /*
  * Serial Transmit Status Bits (Table 7-20)
  * Serial Transmit Options (Table 7-21)
  */
 
-#define K_DMA_SERTX_RESERVED        _SB_MAKEMASK1(0)
-#define K_DMA_SERTX_APPENDCRC       _SB_MAKEMASK1(1)
-#define K_DMA_SERTX_APPENDPAD       _SB_MAKEMASK1(2)
-#define K_DMA_SERTX_ABORT           _SB_MAKEMASK1(3)
+#define K_DMA_SERTX_RESERVED       _SB_MAKEMASK1(0)
+#define K_DMA_SERTX_APPENDCRC      _SB_MAKEMASK1(1)
+#define K_DMA_SERTX_APPENDPAD      _SB_MAKEMASK1(2)
+#define K_DMA_SERTX_ABORT          _SB_MAKEMASK1(3)
 
 
 /*  *********************************************************************
  * Register: DM_DSCR_BASE_3
  */
 
-#define M_DM_DSCR_BASE_MBZ          _SB_MAKEMASK(4, 0)
+#define M_DM_DSCR_BASE_MBZ         _SB_MAKEMASK(4, 0)
 
 /*  Note: Just mask the base address and then OR it in. */
-#define S_DM_DSCR_BASE_ADDR         _SB_MAKE64(4)
-#define M_DM_DSCR_BASE_ADDR         _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
+#define S_DM_DSCR_BASE_ADDR        _SB_MAKE64(4)
+#define M_DM_DSCR_BASE_ADDR        _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
 
-#define S_DM_DSCR_BASE_RINGSZ       _SB_MAKE64(40)
-#define M_DM_DSCR_BASE_RINGSZ       _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
+#define S_DM_DSCR_BASE_RINGSZ      _SB_MAKE64(40)
+#define M_DM_DSCR_BASE_RINGSZ      _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
 #define V_DM_DSCR_BASE_RINGSZ(x)    _SB_MAKEVALUE(x, S_DM_DSCR_BASE_RINGSZ)
 #define G_DM_DSCR_BASE_RINGSZ(x)    _SB_GETVALUE(x, S_DM_DSCR_BASE_RINGSZ, M_DM_DSCR_BASE_RINGSZ)
 
-#define S_DM_DSCR_BASE_PRIORITY     _SB_MAKE64(56)
-#define M_DM_DSCR_BASE_PRIORITY     _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
+#define S_DM_DSCR_BASE_PRIORITY            _SB_MAKE64(56)
+#define M_DM_DSCR_BASE_PRIORITY            _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
 #define V_DM_DSCR_BASE_PRIORITY(x)  _SB_MAKEVALUE(x, S_DM_DSCR_BASE_PRIORITY)
 #define G_DM_DSCR_BASE_PRIORITY(x)  _SB_GETVALUE(x, S_DM_DSCR_BASE_PRIORITY, M_DM_DSCR_BASE_PRIORITY)
 
 #define K_DM_DSCR_BASE_PRIORITY_8   3
 #define K_DM_DSCR_BASE_PRIORITY_16  4
 
-#define M_DM_DSCR_BASE_ACTIVE       _SB_MAKEMASK1(59)
+#define M_DM_DSCR_BASE_ACTIVE      _SB_MAKEMASK1(59)
 #define M_DM_DSCR_BASE_INTERRUPT    _SB_MAKEMASK1(60)
-#define M_DM_DSCR_BASE_RESET        _SB_MAKEMASK1(61)  /* write register */
-#define M_DM_DSCR_BASE_ERROR        _SB_MAKEMASK1(61)  /* read register */
-#define M_DM_DSCR_BASE_ABORT        _SB_MAKEMASK1(62)
-#define M_DM_DSCR_BASE_ENABL        _SB_MAKEMASK1(63)
+#define M_DM_DSCR_BASE_RESET       _SB_MAKEMASK1(61)   /* write register */
+#define M_DM_DSCR_BASE_ERROR       _SB_MAKEMASK1(61)   /* read register */
+#define M_DM_DSCR_BASE_ABORT       _SB_MAKEMASK1(62)
+#define M_DM_DSCR_BASE_ENABL       _SB_MAKEMASK1(63)
 
 /*
  * Data Mover Descriptor Count Register (Table 7-25)
  * Register: DM_CUR_DSCR_ADDR_3
  */
 
-#define S_DM_CUR_DSCR_DSCR_ADDR     _SB_MAKE64(0)
-#define M_DM_CUR_DSCR_DSCR_ADDR     _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
+#define S_DM_CUR_DSCR_DSCR_ADDR            _SB_MAKE64(0)
+#define M_DM_CUR_DSCR_DSCR_ADDR            _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
 
 #define S_DM_CUR_DSCR_DSCR_COUNT    _SB_MAKE64(48)
 #define M_DM_CUR_DSCR_DSCR_COUNT    _SB_MAKEMASK(16, S_DM_CUR_DSCR_DSCR_COUNT)
 #define V_DM_CUR_DSCR_DSCR_COUNT(r) _SB_MAKEVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT)
 #define G_DM_CUR_DSCR_DSCR_COUNT(r) _SB_GETVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT,\
-                                     M_DM_CUR_DSCR_DSCR_COUNT)
+                                    M_DM_CUR_DSCR_DSCR_COUNT)
 
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_DM_PARTIAL_CRC_PARTIAL      _SB_MAKEMASK(32, S_DM_PARTIAL_CRC_PARTIAL)
 #define V_DM_PARTIAL_CRC_PARTIAL(r)   _SB_MAKEVALUE(r, S_DM_PARTIAL_CRC_PARTIAL)
 #define G_DM_PARTIAL_CRC_PARTIAL(r)   _SB_GETVALUE(r, S_DM_PARTIAL_CRC_PARTIAL,\
-                                       M_DM_PARTIAL_CRC_PARTIAL)
+                                      M_DM_PARTIAL_CRC_PARTIAL)
 
 #define S_DM_PARTIAL_TCPCS_PARTIAL    _SB_MAKE64(32)
 #define M_DM_PARTIAL_TCPCS_PARTIAL    _SB_MAKEMASK(16, S_DM_PARTIAL_TCPCS_PARTIAL)
 #define V_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL)
 #define G_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL,\
-                                       M_DM_PARTIAL_TCPCS_PARTIAL)
+                                      M_DM_PARTIAL_TCPCS_PARTIAL)
 
-#define M_DM_PARTIAL_ODD_BYTE         _SB_MAKEMASK1(48)
+#define M_DM_PARTIAL_ODD_BYTE        _SB_MAKEMASK1(48)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
 
  * Register: CRC_DEF_0
  * Register: CRC_DEF_1
  */
-#define S_CRC_DEF_CRC_INIT            _SB_MAKE64(0)
-#define M_CRC_DEF_CRC_INIT            _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
-#define V_CRC_DEF_CRC_INIT(r)         _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
-#define G_CRC_DEF_CRC_INIT(r)         _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
-                                       M_CRC_DEF_CRC_INIT)
-
-#define S_CRC_DEF_CRC_POLY            _SB_MAKE64(32)
-#define M_CRC_DEF_CRC_POLY            _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
-#define V_CRC_DEF_CRC_POLY(r)         _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
-#define G_CRC_DEF_CRC_POLY(r)         _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
-                                       M_CRC_DEF_CRC_POLY)
+#define S_CRC_DEF_CRC_INIT           _SB_MAKE64(0)
+#define M_CRC_DEF_CRC_INIT           _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
+#define V_CRC_DEF_CRC_INIT(r)        _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
+#define G_CRC_DEF_CRC_INIT(r)        _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
+                                      M_CRC_DEF_CRC_INIT)
+
+#define S_CRC_DEF_CRC_POLY           _SB_MAKE64(32)
+#define M_CRC_DEF_CRC_POLY           _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
+#define V_CRC_DEF_CRC_POLY(r)        _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
+#define G_CRC_DEF_CRC_POLY(r)        _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
+                                      M_CRC_DEF_CRC_POLY)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
 
  * Register: CTCP_DEF_0
  * Register: CTCP_DEF_1
  */
-#define S_CTCP_DEF_CRC_TXOR           _SB_MAKE64(0)
-#define M_CTCP_DEF_CRC_TXOR           _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
-#define V_CTCP_DEF_CRC_TXOR(r)        _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
-#define G_CTCP_DEF_CRC_TXOR(r)        _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
-                                       M_CTCP_DEF_CRC_TXOR)
-
-#define S_CTCP_DEF_TCPCS_INIT         _SB_MAKE64(32)
-#define M_CTCP_DEF_TCPCS_INIT         _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
+#define S_CTCP_DEF_CRC_TXOR          _SB_MAKE64(0)
+#define M_CTCP_DEF_CRC_TXOR          _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
+#define V_CTCP_DEF_CRC_TXOR(r)       _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
+#define G_CTCP_DEF_CRC_TXOR(r)       _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
+                                      M_CTCP_DEF_CRC_TXOR)
+
+#define S_CTCP_DEF_TCPCS_INIT        _SB_MAKE64(32)
+#define M_CTCP_DEF_TCPCS_INIT        _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
 #define V_CTCP_DEF_TCPCS_INIT(r)      _SB_MAKEVALUE(r, S_CTCP_DEF_TCPCS_INIT)
 #define G_CTCP_DEF_TCPCS_INIT(r)      _SB_GETVALUE(r, S_CTCP_DEF_TCPCS_INIT,\
-                                       M_CTCP_DEF_TCPCS_INIT)
+                                      M_CTCP_DEF_TCPCS_INIT)
 
-#define S_CTCP_DEF_CRC_WIDTH          _SB_MAKE64(48)
-#define M_CTCP_DEF_CRC_WIDTH          _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
-#define V_CTCP_DEF_CRC_WIDTH(r)       _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
-#define G_CTCP_DEF_CRC_WIDTH(r)       _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
-                                       M_CTCP_DEF_CRC_WIDTH)
+#define S_CTCP_DEF_CRC_WIDTH         _SB_MAKE64(48)
+#define M_CTCP_DEF_CRC_WIDTH         _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
+#define V_CTCP_DEF_CRC_WIDTH(r)              _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
+#define G_CTCP_DEF_CRC_WIDTH(r)              _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
+                                      M_CTCP_DEF_CRC_WIDTH)
 
-#define K_CTCP_DEF_CRC_WIDTH_4        0
-#define K_CTCP_DEF_CRC_WIDTH_2        1
-#define K_CTCP_DEF_CRC_WIDTH_1        2
+#define K_CTCP_DEF_CRC_WIDTH_4       0
+#define K_CTCP_DEF_CRC_WIDTH_2       1
+#define K_CTCP_DEF_CRC_WIDTH_1       2
 
 #define M_CTCP_DEF_CRC_BIT_ORDER      _SB_MAKEMASK1(50)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
 
 /*
- * Data Mover Descriptor Doubleword "A"  (Table 7-26)
+ * Data Mover Descriptor Doubleword "A"         (Table 7-26)
  */
 
-#define S_DM_DSCRA_DST_ADDR         _SB_MAKE64(0)
-#define M_DM_DSCRA_DST_ADDR         _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
+#define S_DM_DSCRA_DST_ADDR        _SB_MAKE64(0)
+#define M_DM_DSCRA_DST_ADDR        _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
 
-#define M_DM_DSCRA_UN_DEST          _SB_MAKEMASK1(40)
-#define M_DM_DSCRA_UN_SRC           _SB_MAKEMASK1(41)
-#define M_DM_DSCRA_INTERRUPT        _SB_MAKEMASK1(42)
+#define M_DM_DSCRA_UN_DEST         _SB_MAKEMASK1(40)
+#define M_DM_DSCRA_UN_SRC          _SB_MAKEMASK1(41)
+#define M_DM_DSCRA_INTERRUPT       _SB_MAKEMASK1(42)
 #if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-#define M_DM_DSCRA_THROTTLE         _SB_MAKEMASK1(43)
+#define M_DM_DSCRA_THROTTLE        _SB_MAKEMASK1(43)
 #endif /* up to 1250 PASS1 */
 
-#define S_DM_DSCRA_DIR_DEST         _SB_MAKE64(44)
-#define M_DM_DSCRA_DIR_DEST         _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
-#define V_DM_DSCRA_DIR_DEST(x)      _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
-#define G_DM_DSCRA_DIR_DEST(x)      _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
+#define S_DM_DSCRA_DIR_DEST        _SB_MAKE64(44)
+#define M_DM_DSCRA_DIR_DEST        _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST(x)     _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
+#define G_DM_DSCRA_DIR_DEST(x)     _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
 
 #define K_DM_DSCRA_DIR_DEST_INCR    0
 #define K_DM_DSCRA_DIR_DEST_DECR    1
 #define V_DM_DSCRA_DIR_DEST_DECR    _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_DECR, S_DM_DSCRA_DIR_DEST)
 #define V_DM_DSCRA_DIR_DEST_CONST   _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_CONST, S_DM_DSCRA_DIR_DEST)
 
-#define S_DM_DSCRA_DIR_SRC          _SB_MAKE64(46)
-#define M_DM_DSCRA_DIR_SRC          _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC(x)       _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
-#define G_DM_DSCRA_DIR_SRC(x)       _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
+#define S_DM_DSCRA_DIR_SRC         _SB_MAKE64(46)
+#define M_DM_DSCRA_DIR_SRC         _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC(x)      _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
+#define G_DM_DSCRA_DIR_SRC(x)      _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
 
-#define K_DM_DSCRA_DIR_SRC_INCR     0
-#define K_DM_DSCRA_DIR_SRC_DECR     1
+#define K_DM_DSCRA_DIR_SRC_INCR            0
+#define K_DM_DSCRA_DIR_SRC_DECR            1
 #define K_DM_DSCRA_DIR_SRC_CONST    2
 
-#define V_DM_DSCRA_DIR_SRC_INCR     _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC_DECR     _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_INCR            _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_DECR            _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
 #define V_DM_DSCRA_DIR_SRC_CONST    _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_CONST, S_DM_DSCRA_DIR_SRC)
 
 
-#define M_DM_DSCRA_ZERO_MEM         _SB_MAKEMASK1(48)
-#define M_DM_DSCRA_PREFETCH         _SB_MAKEMASK1(49)
-#define M_DM_DSCRA_L2C_DEST         _SB_MAKEMASK1(50)
-#define M_DM_DSCRA_L2C_SRC          _SB_MAKEMASK1(51)
+#define M_DM_DSCRA_ZERO_MEM        _SB_MAKEMASK1(48)
+#define M_DM_DSCRA_PREFETCH        _SB_MAKEMASK1(49)
+#define M_DM_DSCRA_L2C_DEST        _SB_MAKEMASK1(50)
+#define M_DM_DSCRA_L2C_SRC         _SB_MAKEMASK1(51)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_DM_DSCRA_RD_BKOFF        _SB_MAKEMASK1(52)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_DM_DSCRA_TCPCS_EN         _SB_MAKEMASK1(54)
-#define M_DM_DSCRA_TCPCS_RES        _SB_MAKEMASK1(55)
-#define M_DM_DSCRA_TCPCS_AP         _SB_MAKEMASK1(56)
-#define M_DM_DSCRA_CRC_EN           _SB_MAKEMASK1(57)
-#define M_DM_DSCRA_CRC_RES          _SB_MAKEMASK1(58)
-#define M_DM_DSCRA_CRC_AP           _SB_MAKEMASK1(59)
-#define M_DM_DSCRA_CRC_DFN          _SB_MAKEMASK1(60)
-#define M_DM_DSCRA_CRC_XBIT         _SB_MAKEMASK1(61)
+#define M_DM_DSCRA_TCPCS_EN        _SB_MAKEMASK1(54)
+#define M_DM_DSCRA_TCPCS_RES       _SB_MAKEMASK1(55)
+#define M_DM_DSCRA_TCPCS_AP        _SB_MAKEMASK1(56)
+#define M_DM_DSCRA_CRC_EN          _SB_MAKEMASK1(57)
+#define M_DM_DSCRA_CRC_RES         _SB_MAKEMASK1(58)
+#define M_DM_DSCRA_CRC_AP          _SB_MAKEMASK1(59)
+#define M_DM_DSCRA_CRC_DFN         _SB_MAKEMASK1(60)
+#define M_DM_DSCRA_CRC_XBIT        _SB_MAKEMASK1(61)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define M_DM_DSCRA_RESERVED2        _SB_MAKEMASK(3, 61)
+#define M_DM_DSCRA_RESERVED2       _SB_MAKEMASK(3, 61)
 
 /*
- * Data Mover Descriptor Doubleword "B"  (Table 7-25)
+ * Data Mover Descriptor Doubleword "B"         (Table 7-25)
  */
 
-#define S_DM_DSCRB_SRC_ADDR         _SB_MAKE64(0)
-#define M_DM_DSCRB_SRC_ADDR         _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
+#define S_DM_DSCRB_SRC_ADDR        _SB_MAKE64(0)
+#define M_DM_DSCRB_SRC_ADDR        _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
 
-#define S_DM_DSCRB_SRC_LENGTH       _SB_MAKE64(40)
-#define M_DM_DSCRB_SRC_LENGTH       _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
+#define S_DM_DSCRB_SRC_LENGTH      _SB_MAKE64(40)
+#define M_DM_DSCRB_SRC_LENGTH      _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
 #define V_DM_DSCRB_SRC_LENGTH(x)    _SB_MAKEVALUE(x, S_DM_DSCRB_SRC_LENGTH)
 #define G_DM_DSCRB_SRC_LENGTH(x)    _SB_GETVALUE(x, S_DM_DSCRB_SRC_LENGTH, M_DM_DSCRB_SRC_LENGTH)
 
index a96ded1..04c009c 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  SB1250 Board Support Package
     *
-    *  Generic Bus Constants                     File: sb1250_genbus.h
+    *  Generic Bus Constants                    File: sb1250_genbus.h
     *
     *  This module contains constants and macros useful for
     *  manipulating the SB1250's Generic Bus interface
  * Generic Bus Region Configuration Registers (Table 11-4)
  */
 
-#define S_IO_RDY_ACTIVE         0
+#define S_IO_RDY_ACTIVE                0
 #define M_IO_RDY_ACTIVE                _SB_MAKEMASK1(S_IO_RDY_ACTIVE)
 
-#define S_IO_ENA_RDY            1
+#define S_IO_ENA_RDY           1
 #define M_IO_ENA_RDY           _SB_MAKEMASK1(S_IO_ENA_RDY)
 
 #define S_IO_WIDTH_SEL         2
@@ -52,7 +52,7 @@
 #define K_IO_WIDTH_SEL_2       1
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
     || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define K_IO_WIDTH_SEL_1L       2
+#define K_IO_WIDTH_SEL_1L      2
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 #define K_IO_WIDTH_SEL_4       3
 #define V_IO_WIDTH_SEL(x)      _SB_MAKEVALUE(x, S_IO_WIDTH_SEL)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
     || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_EARLY_CS          _SB_MAKEMASK1(3)
+#define M_IO_EARLY_CS          _SB_MAKEMASK1(3)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #define S_IO_ALE_TO_CS         4
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
     || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_IO_BURST_WIDTH           _SB_MAKE64(6)
-#define M_IO_BURST_WIDTH           _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
-#define V_IO_BURST_WIDTH(x)        _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
-#define G_IO_BURST_WIDTH(x)        _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
+#define S_IO_BURST_WIDTH          _SB_MAKE64(6)
+#define M_IO_BURST_WIDTH          _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
+#define V_IO_BURST_WIDTH(x)       _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
+#define G_IO_BURST_WIDTH(x)       _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #define S_IO_CS_WIDTH          8
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
     || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_RDY_SYNC          _SB_MAKEMASK1(3)
+#define M_IO_RDY_SYNC          _SB_MAKEMASK1(3)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 #define S_IO_WRITE_WIDTH       4
 #define M_IO_ILL_ADDR_INT      _SB_MAKEMASK1(11)
 #define M_IO_MULT_CS_INT       _SB_MAKEMASK1(12)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_COH_ERR           _SB_MAKEMASK1(14)
+#define M_IO_COH_ERR           _SB_MAKEMASK1(14)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 
 
 #define S_GPIO_INTR_TYPEX(n)   (((n)/2)*2)
 #define M_GPIO_INTR_TYPEX(n)   _SB_MAKEMASK(2, S_GPIO_INTR_TYPEX(n))
-#define V_GPIO_INTR_TYPEX(n, x)        _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
-#define G_GPIO_INTR_TYPEX(n, x)        _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
+#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
+#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
 
 #define S_GPIO_INTR_TYPE0      0
 #define M_GPIO_INTR_TYPE0      _SB_MAKEMASK(2, S_GPIO_INTR_TYPE0)
index dbea73d..36afcb2 100644 (file)
  * First, the interrupt numbers.
  */
 
-#define K_INT_SOURCES               64
-
-#define K_INT_WATCHDOG_TIMER_0      0
-#define K_INT_WATCHDOG_TIMER_1      1
-#define K_INT_TIMER_0               2
-#define K_INT_TIMER_1               3
-#define K_INT_TIMER_2               4
-#define K_INT_TIMER_3               5
-#define K_INT_SMB_0                 6
-#define K_INT_SMB_1                 7
-#define K_INT_UART_0                8
-#define K_INT_UART_1                9
-#define K_INT_SER_0                 10
-#define K_INT_SER_1                 11
-#define K_INT_PCMCIA                12
-#define K_INT_ADDR_TRAP             13
-#define K_INT_PERF_CNT              14
-#define K_INT_TRACE_FREEZE          15
-#define K_INT_BAD_ECC               16
-#define K_INT_COR_ECC               17
-#define K_INT_IO_BUS                18
-#define K_INT_MAC_0                 19
-#define K_INT_MAC_1                 20
-#define K_INT_MAC_2                 21
-#define K_INT_DM_CH_0               22
-#define K_INT_DM_CH_1               23
-#define K_INT_DM_CH_2               24
-#define K_INT_DM_CH_3               25
-#define K_INT_MBOX_0                26
-#define K_INT_MBOX_1                27
-#define K_INT_MBOX_2                28
-#define K_INT_MBOX_3                29
+#define K_INT_SOURCES              64
+
+#define K_INT_WATCHDOG_TIMER_0     0
+#define K_INT_WATCHDOG_TIMER_1     1
+#define K_INT_TIMER_0              2
+#define K_INT_TIMER_1              3
+#define K_INT_TIMER_2              4
+#define K_INT_TIMER_3              5
+#define K_INT_SMB_0                6
+#define K_INT_SMB_1                7
+#define K_INT_UART_0               8
+#define K_INT_UART_1               9
+#define K_INT_SER_0                10
+#define K_INT_SER_1                11
+#define K_INT_PCMCIA               12
+#define K_INT_ADDR_TRAP                    13
+#define K_INT_PERF_CNT             14
+#define K_INT_TRACE_FREEZE         15
+#define K_INT_BAD_ECC              16
+#define K_INT_COR_ECC              17
+#define K_INT_IO_BUS               18
+#define K_INT_MAC_0                19
+#define K_INT_MAC_1                20
+#define K_INT_MAC_2                21
+#define K_INT_DM_CH_0              22
+#define K_INT_DM_CH_1              23
+#define K_INT_DM_CH_2              24
+#define K_INT_DM_CH_3              25
+#define K_INT_MBOX_0               26
+#define K_INT_MBOX_1               27
+#define K_INT_MBOX_2               28
+#define K_INT_MBOX_3               29
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define K_INT_CYCLE_CP0_INT        30
 #define K_INT_CYCLE_CP1_INT        31
 #endif /* 1250 PASS2 || 112x PASS1 */
-#define K_INT_GPIO_0                32
-#define K_INT_GPIO_1                33
-#define K_INT_GPIO_2                34
-#define K_INT_GPIO_3                35
-#define K_INT_GPIO_4                36
-#define K_INT_GPIO_5                37
-#define K_INT_GPIO_6                38
-#define K_INT_GPIO_7                39
-#define K_INT_GPIO_8                40
-#define K_INT_GPIO_9                41
-#define K_INT_GPIO_10               42
-#define K_INT_GPIO_11               43
-#define K_INT_GPIO_12               44
-#define K_INT_GPIO_13               45
-#define K_INT_GPIO_14               46
-#define K_INT_GPIO_15               47
-#define K_INT_LDT_FATAL             48
-#define K_INT_LDT_NONFATAL          49
-#define K_INT_LDT_SMI               50
-#define K_INT_LDT_NMI               51
-#define K_INT_LDT_INIT              52
-#define K_INT_LDT_STARTUP           53
-#define K_INT_LDT_EXT               54
-#define K_INT_PCI_ERROR             55
-#define K_INT_PCI_INTA              56
-#define K_INT_PCI_INTB              57
-#define K_INT_PCI_INTC              58
-#define K_INT_PCI_INTD              59
-#define K_INT_SPARE_2               60
+#define K_INT_GPIO_0               32
+#define K_INT_GPIO_1               33
+#define K_INT_GPIO_2               34
+#define K_INT_GPIO_3               35
+#define K_INT_GPIO_4               36
+#define K_INT_GPIO_5               37
+#define K_INT_GPIO_6               38
+#define K_INT_GPIO_7               39
+#define K_INT_GPIO_8               40
+#define K_INT_GPIO_9               41
+#define K_INT_GPIO_10              42
+#define K_INT_GPIO_11              43
+#define K_INT_GPIO_12              44
+#define K_INT_GPIO_13              45
+#define K_INT_GPIO_14              46
+#define K_INT_GPIO_15              47
+#define K_INT_LDT_FATAL                    48
+#define K_INT_LDT_NONFATAL         49
+#define K_INT_LDT_SMI              50
+#define K_INT_LDT_NMI              51
+#define K_INT_LDT_INIT             52
+#define K_INT_LDT_STARTUP          53
+#define K_INT_LDT_EXT              54
+#define K_INT_PCI_ERROR                    55
+#define K_INT_PCI_INTA             56
+#define K_INT_PCI_INTB             57
+#define K_INT_PCI_INTC             58
+#define K_INT_PCI_INTD             59
+#define K_INT_SPARE_2              60
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define K_INT_MAC_0_CH1                    61
 #define K_INT_MAC_1_CH1                    62
  * Mask values for each interrupt
  */
 
-#define M_INT_WATCHDOG_TIMER_0      _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
-#define M_INT_WATCHDOG_TIMER_1      _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
-#define M_INT_TIMER_0               _SB_MAKEMASK1(K_INT_TIMER_0)
-#define M_INT_TIMER_1               _SB_MAKEMASK1(K_INT_TIMER_1)
-#define M_INT_TIMER_2               _SB_MAKEMASK1(K_INT_TIMER_2)
-#define M_INT_TIMER_3               _SB_MAKEMASK1(K_INT_TIMER_3)
-#define M_INT_SMB_0                 _SB_MAKEMASK1(K_INT_SMB_0)
-#define M_INT_SMB_1                 _SB_MAKEMASK1(K_INT_SMB_1)
-#define M_INT_UART_0                _SB_MAKEMASK1(K_INT_UART_0)
-#define M_INT_UART_1                _SB_MAKEMASK1(K_INT_UART_1)
-#define M_INT_SER_0                 _SB_MAKEMASK1(K_INT_SER_0)
-#define M_INT_SER_1                 _SB_MAKEMASK1(K_INT_SER_1)
-#define M_INT_PCMCIA                _SB_MAKEMASK1(K_INT_PCMCIA)
-#define M_INT_ADDR_TRAP             _SB_MAKEMASK1(K_INT_ADDR_TRAP)
-#define M_INT_PERF_CNT              _SB_MAKEMASK1(K_INT_PERF_CNT)
-#define M_INT_TRACE_FREEZE          _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
-#define M_INT_BAD_ECC               _SB_MAKEMASK1(K_INT_BAD_ECC)
-#define M_INT_COR_ECC               _SB_MAKEMASK1(K_INT_COR_ECC)
-#define M_INT_IO_BUS                _SB_MAKEMASK1(K_INT_IO_BUS)
-#define M_INT_MAC_0                 _SB_MAKEMASK1(K_INT_MAC_0)
-#define M_INT_MAC_1                 _SB_MAKEMASK1(K_INT_MAC_1)
-#define M_INT_MAC_2                 _SB_MAKEMASK1(K_INT_MAC_2)
-#define M_INT_DM_CH_0               _SB_MAKEMASK1(K_INT_DM_CH_0)
-#define M_INT_DM_CH_1               _SB_MAKEMASK1(K_INT_DM_CH_1)
-#define M_INT_DM_CH_2               _SB_MAKEMASK1(K_INT_DM_CH_2)
-#define M_INT_DM_CH_3               _SB_MAKEMASK1(K_INT_DM_CH_3)
-#define M_INT_MBOX_0                _SB_MAKEMASK1(K_INT_MBOX_0)
-#define M_INT_MBOX_1                _SB_MAKEMASK1(K_INT_MBOX_1)
-#define M_INT_MBOX_2                _SB_MAKEMASK1(K_INT_MBOX_2)
-#define M_INT_MBOX_3                _SB_MAKEMASK1(K_INT_MBOX_3)
-#define M_INT_MBOX_ALL              _SB_MAKEMASK(4, K_INT_MBOX_0)
+#define M_INT_WATCHDOG_TIMER_0     _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
+#define M_INT_WATCHDOG_TIMER_1     _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
+#define M_INT_TIMER_0              _SB_MAKEMASK1(K_INT_TIMER_0)
+#define M_INT_TIMER_1              _SB_MAKEMASK1(K_INT_TIMER_1)
+#define M_INT_TIMER_2              _SB_MAKEMASK1(K_INT_TIMER_2)
+#define M_INT_TIMER_3              _SB_MAKEMASK1(K_INT_TIMER_3)
+#define M_INT_SMB_0                _SB_MAKEMASK1(K_INT_SMB_0)
+#define M_INT_SMB_1                _SB_MAKEMASK1(K_INT_SMB_1)
+#define M_INT_UART_0               _SB_MAKEMASK1(K_INT_UART_0)
+#define M_INT_UART_1               _SB_MAKEMASK1(K_INT_UART_1)
+#define M_INT_SER_0                _SB_MAKEMASK1(K_INT_SER_0)
+#define M_INT_SER_1                _SB_MAKEMASK1(K_INT_SER_1)
+#define M_INT_PCMCIA               _SB_MAKEMASK1(K_INT_PCMCIA)
+#define M_INT_ADDR_TRAP                    _SB_MAKEMASK1(K_INT_ADDR_TRAP)
+#define M_INT_PERF_CNT             _SB_MAKEMASK1(K_INT_PERF_CNT)
+#define M_INT_TRACE_FREEZE         _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
+#define M_INT_BAD_ECC              _SB_MAKEMASK1(K_INT_BAD_ECC)
+#define M_INT_COR_ECC              _SB_MAKEMASK1(K_INT_COR_ECC)
+#define M_INT_IO_BUS               _SB_MAKEMASK1(K_INT_IO_BUS)
+#define M_INT_MAC_0                _SB_MAKEMASK1(K_INT_MAC_0)
+#define M_INT_MAC_1                _SB_MAKEMASK1(K_INT_MAC_1)
+#define M_INT_MAC_2                _SB_MAKEMASK1(K_INT_MAC_2)
+#define M_INT_DM_CH_0              _SB_MAKEMASK1(K_INT_DM_CH_0)
+#define M_INT_DM_CH_1              _SB_MAKEMASK1(K_INT_DM_CH_1)
+#define M_INT_DM_CH_2              _SB_MAKEMASK1(K_INT_DM_CH_2)
+#define M_INT_DM_CH_3              _SB_MAKEMASK1(K_INT_DM_CH_3)
+#define M_INT_MBOX_0               _SB_MAKEMASK1(K_INT_MBOX_0)
+#define M_INT_MBOX_1               _SB_MAKEMASK1(K_INT_MBOX_1)
+#define M_INT_MBOX_2               _SB_MAKEMASK1(K_INT_MBOX_2)
+#define M_INT_MBOX_3               _SB_MAKEMASK1(K_INT_MBOX_3)
+#define M_INT_MBOX_ALL             _SB_MAKEMASK(4, K_INT_MBOX_0)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define M_INT_CYCLE_CP0_INT        _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT)
 #define M_INT_CYCLE_CP1_INT        _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT)
 #endif /* 1250 PASS2 || 112x PASS1 */
-#define M_INT_GPIO_0                _SB_MAKEMASK1(K_INT_GPIO_0)
-#define M_INT_GPIO_1                _SB_MAKEMASK1(K_INT_GPIO_1)
-#define M_INT_GPIO_2                _SB_MAKEMASK1(K_INT_GPIO_2)
-#define M_INT_GPIO_3                _SB_MAKEMASK1(K_INT_GPIO_3)
-#define M_INT_GPIO_4                _SB_MAKEMASK1(K_INT_GPIO_4)
-#define M_INT_GPIO_5                _SB_MAKEMASK1(K_INT_GPIO_5)
-#define M_INT_GPIO_6                _SB_MAKEMASK1(K_INT_GPIO_6)
-#define M_INT_GPIO_7                _SB_MAKEMASK1(K_INT_GPIO_7)
-#define M_INT_GPIO_8                _SB_MAKEMASK1(K_INT_GPIO_8)
-#define M_INT_GPIO_9                _SB_MAKEMASK1(K_INT_GPIO_9)
-#define M_INT_GPIO_10               _SB_MAKEMASK1(K_INT_GPIO_10)
-#define M_INT_GPIO_11               _SB_MAKEMASK1(K_INT_GPIO_11)
-#define M_INT_GPIO_12               _SB_MAKEMASK1(K_INT_GPIO_12)
-#define M_INT_GPIO_13               _SB_MAKEMASK1(K_INT_GPIO_13)
-#define M_INT_GPIO_14               _SB_MAKEMASK1(K_INT_GPIO_14)
-#define M_INT_GPIO_15               _SB_MAKEMASK1(K_INT_GPIO_15)
-#define M_INT_LDT_FATAL             _SB_MAKEMASK1(K_INT_LDT_FATAL)
-#define M_INT_LDT_NONFATAL          _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
-#define M_INT_LDT_SMI               _SB_MAKEMASK1(K_INT_LDT_SMI)
-#define M_INT_LDT_NMI               _SB_MAKEMASK1(K_INT_LDT_NMI)
-#define M_INT_LDT_INIT              _SB_MAKEMASK1(K_INT_LDT_INIT)
-#define M_INT_LDT_STARTUP           _SB_MAKEMASK1(K_INT_LDT_STARTUP)
-#define M_INT_LDT_EXT               _SB_MAKEMASK1(K_INT_LDT_EXT)
-#define M_INT_PCI_ERROR             _SB_MAKEMASK1(K_INT_PCI_ERROR)
-#define M_INT_PCI_INTA              _SB_MAKEMASK1(K_INT_PCI_INTA)
-#define M_INT_PCI_INTB              _SB_MAKEMASK1(K_INT_PCI_INTB)
-#define M_INT_PCI_INTC              _SB_MAKEMASK1(K_INT_PCI_INTC)
-#define M_INT_PCI_INTD              _SB_MAKEMASK1(K_INT_PCI_INTD)
-#define M_INT_SPARE_2               _SB_MAKEMASK1(K_INT_SPARE_2)
+#define M_INT_GPIO_0               _SB_MAKEMASK1(K_INT_GPIO_0)
+#define M_INT_GPIO_1               _SB_MAKEMASK1(K_INT_GPIO_1)
+#define M_INT_GPIO_2               _SB_MAKEMASK1(K_INT_GPIO_2)
+#define M_INT_GPIO_3               _SB_MAKEMASK1(K_INT_GPIO_3)
+#define M_INT_GPIO_4               _SB_MAKEMASK1(K_INT_GPIO_4)
+#define M_INT_GPIO_5               _SB_MAKEMASK1(K_INT_GPIO_5)
+#define M_INT_GPIO_6               _SB_MAKEMASK1(K_INT_GPIO_6)
+#define M_INT_GPIO_7               _SB_MAKEMASK1(K_INT_GPIO_7)
+#define M_INT_GPIO_8               _SB_MAKEMASK1(K_INT_GPIO_8)
+#define M_INT_GPIO_9               _SB_MAKEMASK1(K_INT_GPIO_9)
+#define M_INT_GPIO_10              _SB_MAKEMASK1(K_INT_GPIO_10)
+#define M_INT_GPIO_11              _SB_MAKEMASK1(K_INT_GPIO_11)
+#define M_INT_GPIO_12              _SB_MAKEMASK1(K_INT_GPIO_12)
+#define M_INT_GPIO_13              _SB_MAKEMASK1(K_INT_GPIO_13)
+#define M_INT_GPIO_14              _SB_MAKEMASK1(K_INT_GPIO_14)
+#define M_INT_GPIO_15              _SB_MAKEMASK1(K_INT_GPIO_15)
+#define M_INT_LDT_FATAL                    _SB_MAKEMASK1(K_INT_LDT_FATAL)
+#define M_INT_LDT_NONFATAL         _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
+#define M_INT_LDT_SMI              _SB_MAKEMASK1(K_INT_LDT_SMI)
+#define M_INT_LDT_NMI              _SB_MAKEMASK1(K_INT_LDT_NMI)
+#define M_INT_LDT_INIT             _SB_MAKEMASK1(K_INT_LDT_INIT)
+#define M_INT_LDT_STARTUP          _SB_MAKEMASK1(K_INT_LDT_STARTUP)
+#define M_INT_LDT_EXT              _SB_MAKEMASK1(K_INT_LDT_EXT)
+#define M_INT_PCI_ERROR                    _SB_MAKEMASK1(K_INT_PCI_ERROR)
+#define M_INT_PCI_INTA             _SB_MAKEMASK1(K_INT_PCI_INTA)
+#define M_INT_PCI_INTB             _SB_MAKEMASK1(K_INT_PCI_INTB)
+#define M_INT_PCI_INTC             _SB_MAKEMASK1(K_INT_PCI_INTC)
+#define M_INT_PCI_INTD             _SB_MAKEMASK1(K_INT_PCI_INTD)
+#define M_INT_SPARE_2              _SB_MAKEMASK1(K_INT_SPARE_2)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define M_INT_MAC_0_CH1                    _SB_MAKEMASK1(K_INT_MAC_0_CH1)
 #define M_INT_MAC_1_CH1                    _SB_MAKEMASK1(K_INT_MAC_1_CH1)
  */
 
 #define S_INT_LDT_INTMSG             0
-#define M_INT_LDT_INTMSG              _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
-#define V_INT_LDT_INTMSG(x)           _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
-#define G_INT_LDT_INTMSG(x)           _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
+#define M_INT_LDT_INTMSG             _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
+#define V_INT_LDT_INTMSG(x)          _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
+#define G_INT_LDT_INTMSG(x)          _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
 
 #define K_INT_LDT_INTMSG_FIXED       0
 #define K_INT_LDT_INTMSG_ARBITRATED   1
 #define K_INT_LDT_INTMSG_EXTINT              6
 #define K_INT_LDT_INTMSG_RESERVED     7
 
-#define M_INT_LDT_EDGETRIGGER         0
-#define M_INT_LDT_LEVELTRIGGER        _SB_MAKEMASK1(3)
+#define M_INT_LDT_EDGETRIGGER        0
+#define M_INT_LDT_LEVELTRIGGER       _SB_MAKEMASK1(3)
 
-#define M_INT_LDT_PHYSICALDEST        0
-#define M_INT_LDT_LOGICALDEST         _SB_MAKEMASK1(4)
+#define M_INT_LDT_PHYSICALDEST       0
+#define M_INT_LDT_LOGICALDEST        _SB_MAKEMASK1(4)
 
-#define S_INT_LDT_INTDEST             5
-#define M_INT_LDT_INTDEST             _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
-#define V_INT_LDT_INTDEST(x)          _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
-#define G_INT_LDT_INTDEST(x)          _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
+#define S_INT_LDT_INTDEST            5
+#define M_INT_LDT_INTDEST            _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
+#define V_INT_LDT_INTDEST(x)         _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
+#define G_INT_LDT_INTDEST(x)         _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
 
-#define S_INT_LDT_VECTOR              13
-#define M_INT_LDT_VECTOR              _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
-#define V_INT_LDT_VECTOR(x)           _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
-#define G_INT_LDT_VECTOR(x)           _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
+#define S_INT_LDT_VECTOR             13
+#define M_INT_LDT_VECTOR             _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
+#define V_INT_LDT_VECTOR(x)          _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
+#define G_INT_LDT_VECTOR(x)          _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
 
 /*
  * Vector format (Table 4-6)
  */
 
 #define M_LDTVECT_RAISEINT             0x00
-#define M_LDTVECT_RAISEMBOX             0x40
+#define M_LDTVECT_RAISEMBOX            0x40
 
 
 #endif /* 1250/112x */
index b61a749..30092d7 100644 (file)
  * Level 2 Cache Tag register (Table 5-3)
  */
 
-#define S_L2C_TAG_MBZ               0
-#define M_L2C_TAG_MBZ               _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
+#define S_L2C_TAG_MBZ              0
+#define M_L2C_TAG_MBZ              _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
 
-#define S_L2C_TAG_INDEX             5
-#define M_L2C_TAG_INDEX             _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
-#define V_L2C_TAG_INDEX(x)          _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
-#define G_L2C_TAG_INDEX(x)          _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
+#define S_L2C_TAG_INDEX                    5
+#define M_L2C_TAG_INDEX                    _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
+#define V_L2C_TAG_INDEX(x)         _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
+#define G_L2C_TAG_INDEX(x)         _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
 
-#define S_L2C_TAG_TAG               17
-#define M_L2C_TAG_TAG               _SB_MAKEMASK(23, S_L2C_TAG_TAG)
-#define V_L2C_TAG_TAG(x)            _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
-#define G_L2C_TAG_TAG(x)            _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
+#define S_L2C_TAG_TAG              17
+#define M_L2C_TAG_TAG              _SB_MAKEMASK(23, S_L2C_TAG_TAG)
+#define V_L2C_TAG_TAG(x)           _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
+#define G_L2C_TAG_TAG(x)           _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
 
-#define S_L2C_TAG_ECC               40
-#define M_L2C_TAG_ECC               _SB_MAKEMASK(6, S_L2C_TAG_ECC)
-#define V_L2C_TAG_ECC(x)            _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
-#define G_L2C_TAG_ECC(x)            _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
+#define S_L2C_TAG_ECC              40
+#define M_L2C_TAG_ECC              _SB_MAKEMASK(6, S_L2C_TAG_ECC)
+#define V_L2C_TAG_ECC(x)           _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
+#define G_L2C_TAG_ECC(x)           _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
 
-#define S_L2C_TAG_WAY               46
-#define M_L2C_TAG_WAY               _SB_MAKEMASK(2, S_L2C_TAG_WAY)
-#define V_L2C_TAG_WAY(x)            _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
-#define G_L2C_TAG_WAY(x)            _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
+#define S_L2C_TAG_WAY              46
+#define M_L2C_TAG_WAY              _SB_MAKEMASK(2, S_L2C_TAG_WAY)
+#define V_L2C_TAG_WAY(x)           _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
+#define G_L2C_TAG_WAY(x)           _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
 
-#define M_L2C_TAG_DIRTY             _SB_MAKEMASK1(48)
-#define M_L2C_TAG_VALID             _SB_MAKEMASK1(49)
+#define M_L2C_TAG_DIRTY                    _SB_MAKEMASK1(48)
+#define M_L2C_TAG_VALID                    _SB_MAKEMASK1(49)
 
 /*
  * Format of level 2 cache management address (table 5-2)
  */
 
-#define S_L2C_MGMT_INDEX            5
-#define M_L2C_MGMT_INDEX            _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
-#define V_L2C_MGMT_INDEX(x)         _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
-#define G_L2C_MGMT_INDEX(x)         _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
+#define S_L2C_MGMT_INDEX           5
+#define M_L2C_MGMT_INDEX           _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
+#define V_L2C_MGMT_INDEX(x)        _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
+#define G_L2C_MGMT_INDEX(x)        _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
 
-#define S_L2C_MGMT_QUADRANT         15
-#define M_L2C_MGMT_QUADRANT         _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
-#define V_L2C_MGMT_QUADRANT(x)      _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
-#define G_L2C_MGMT_QUADRANT(x)      _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
+#define S_L2C_MGMT_QUADRANT        15
+#define M_L2C_MGMT_QUADRANT        _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
+#define V_L2C_MGMT_QUADRANT(x)     _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
+#define G_L2C_MGMT_QUADRANT(x)     _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
 
 #define S_L2C_MGMT_HALF                    16
-#define M_L2C_MGMT_HALF                    _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
+#define M_L2C_MGMT_HALF                    _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
 
-#define S_L2C_MGMT_WAY              17
-#define M_L2C_MGMT_WAY              _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
-#define V_L2C_MGMT_WAY(x)           _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
-#define G_L2C_MGMT_WAY(x)           _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
+#define S_L2C_MGMT_WAY             17
+#define M_L2C_MGMT_WAY             _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
+#define V_L2C_MGMT_WAY(x)          _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
+#define G_L2C_MGMT_WAY(x)          _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
 
-#define S_L2C_MGMT_ECC_DIAG         21
-#define M_L2C_MGMT_ECC_DIAG         _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
-#define V_L2C_MGMT_ECC_DIAG(x)      _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
-#define G_L2C_MGMT_ECC_DIAG(x)      _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
+#define S_L2C_MGMT_ECC_DIAG        21
+#define M_L2C_MGMT_ECC_DIAG        _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
+#define V_L2C_MGMT_ECC_DIAG(x)     _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
+#define G_L2C_MGMT_ECC_DIAG(x)     _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
 
-#define S_L2C_MGMT_TAG              23
-#define M_L2C_MGMT_TAG              _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
-#define V_L2C_MGMT_TAG(x)           _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
-#define G_L2C_MGMT_TAG(x)           _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
+#define S_L2C_MGMT_TAG             23
+#define M_L2C_MGMT_TAG             _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
+#define V_L2C_MGMT_TAG(x)          _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
+#define G_L2C_MGMT_TAG(x)          _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
 
-#define M_L2C_MGMT_DIRTY            _SB_MAKEMASK1(19)
-#define M_L2C_MGMT_VALID            _SB_MAKEMASK1(20)
+#define M_L2C_MGMT_DIRTY           _SB_MAKEMASK1(19)
+#define M_L2C_MGMT_VALID           _SB_MAKEMASK1(20)
 
-#define A_L2C_MGMT_TAG_BASE         0x00D0000000
+#define A_L2C_MGMT_TAG_BASE        0x00D0000000
 
-#define L2C_ENTRIES_PER_WAY       4096
-#define L2C_NUM_WAYS              4
+#define L2C_ENTRIES_PER_WAY      4096
+#define L2C_NUM_WAYS             4
 
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
index bf7f320..2340c29 100644 (file)
@@ -66,7 +66,7 @@
 #define R_LDT_TYPE1_SRICMD     0x0050
 #define R_LDT_TYPE1_SRITXNUM   0x0054
 #define R_LDT_TYPE1_SRIRXNUM   0x0058
-#define R_LDT_TYPE1_ERRSTATUS   0x0068
+#define R_LDT_TYPE1_ERRSTATUS  0x0068
 #define R_LDT_TYPE1_SRICTRL    0x006C
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define R_LDT_TYPE1_ADDSTATUS  0x0070
 #define M_LDT_LINKCTRL_DWFCOUT_EN      _SB_MAKEMASK1_32(31)
 
 /*
- * LDT Link frequency register  (Table 8-20) offset 0x48
+ * LDT Link frequency register (Table 8-20) offset 0x48
  */
 
 #define S_LDT_LINKFREQ_FREQ            8
 
 #define S_LDT_SRICMD_TXINITIALOFFSET   28
 #define M_LDT_SRICMD_TXINITIALOFFSET   _SB_MAKEMASK_32(3, S_LDT_SRICMD_TXINITIALOFFSET)
-#define V_LDT_SRICMD_TXINITIALOFFSET(x)        _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
-#define G_LDT_SRICMD_TXINITIALOFFSET(x)        _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
+#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
+#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
 
 #define M_LDT_SRICMD_LINKFREQDIRECT    _SB_MAKEMASK1_32(31)
 
 #define M_LDT_ERRCTL_OVFSYNCFLOOD_EN   _SB_MAKEMASK1_32(5)
 #define M_LDT_ERRCTL_EOCNXAFATAL_EN    _SB_MAKEMASK1_32(6)
 #define M_LDT_ERRCTL_EOCNXANONFATAL_EN _SB_MAKEMASK1_32(7)
-#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN        _SB_MAKEMASK1_32(8)
+#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
 #define M_LDT_ERRCTL_CRCFATAL_EN       _SB_MAKEMASK1_32(9)
 #define M_LDT_ERRCTL_CRCNONFATAL_EN    _SB_MAKEMASK1_32(10)
 #define M_LDT_ERRCTL_SERRFATAL_EN      _SB_MAKEMASK1_32(11)
 #define M_LDT_ERRCTL_SRCTAGFATAL_EN    _SB_MAKEMASK1_32(12)
 #define M_LDT_ERRCTL_SRCTAGNONFATAL_EN _SB_MAKEMASK1_32(13)
-#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN        _SB_MAKEMASK1_32(14)
+#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
 #define M_LDT_ERRCTL_MAPNXAFATAL_EN    _SB_MAKEMASK1_32(15)
 #define M_LDT_ERRCTL_MAPNXANONFATAL_EN _SB_MAKEMASK1_32(16)
-#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN        _SB_MAKEMASK1_32(17)
+#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
 
 #define M_LDT_ERRCTL_PROTOERR          _SB_MAKEMASK1_32(24)
 #define M_LDT_ERRCTL_OVFERR            _SB_MAKEMASK1_32(25)
index cfc4d78..3fa94fc 100644 (file)
  */
 
 
-#define M_MAC_RESERVED0             _SB_MAKEMASK1(0)
-#define M_MAC_TX_HOLD_SOP_EN        _SB_MAKEMASK1(1)
-#define M_MAC_RETRY_EN              _SB_MAKEMASK1(2)
-#define M_MAC_RET_DRPREQ_EN         _SB_MAKEMASK1(3)
-#define M_MAC_RET_UFL_EN            _SB_MAKEMASK1(4)
-#define M_MAC_BURST_EN              _SB_MAKEMASK1(5)
-
-#define S_MAC_TX_PAUSE              _SB_MAKE64(6)
-#define M_MAC_TX_PAUSE_CNT          _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
-#define V_MAC_TX_PAUSE_CNT(x)       _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
-
-#define K_MAC_TX_PAUSE_CNT_512      0
-#define K_MAC_TX_PAUSE_CNT_1K       1
-#define K_MAC_TX_PAUSE_CNT_2K       2
-#define K_MAC_TX_PAUSE_CNT_4K       3
-#define K_MAC_TX_PAUSE_CNT_8K       4
-#define K_MAC_TX_PAUSE_CNT_16K      5
-#define K_MAC_TX_PAUSE_CNT_32K      6
-#define K_MAC_TX_PAUSE_CNT_64K      7
-
-#define V_MAC_TX_PAUSE_CNT_512      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
-#define V_MAC_TX_PAUSE_CNT_1K       V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
-#define V_MAC_TX_PAUSE_CNT_2K       V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
-#define V_MAC_TX_PAUSE_CNT_4K       V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
-#define V_MAC_TX_PAUSE_CNT_8K       V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
-#define V_MAC_TX_PAUSE_CNT_16K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
-#define V_MAC_TX_PAUSE_CNT_32K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
-#define V_MAC_TX_PAUSE_CNT_64K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
-
-#define M_MAC_RESERVED1             _SB_MAKEMASK(8, 9)
-
-#define M_MAC_AP_STAT_EN            _SB_MAKEMASK1(17)
+#define M_MAC_RESERVED0                    _SB_MAKEMASK1(0)
+#define M_MAC_TX_HOLD_SOP_EN       _SB_MAKEMASK1(1)
+#define M_MAC_RETRY_EN             _SB_MAKEMASK1(2)
+#define M_MAC_RET_DRPREQ_EN        _SB_MAKEMASK1(3)
+#define M_MAC_RET_UFL_EN           _SB_MAKEMASK1(4)
+#define M_MAC_BURST_EN             _SB_MAKEMASK1(5)
+
+#define S_MAC_TX_PAUSE             _SB_MAKE64(6)
+#define M_MAC_TX_PAUSE_CNT         _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
+#define V_MAC_TX_PAUSE_CNT(x)      _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
+
+#define K_MAC_TX_PAUSE_CNT_512     0
+#define K_MAC_TX_PAUSE_CNT_1K      1
+#define K_MAC_TX_PAUSE_CNT_2K      2
+#define K_MAC_TX_PAUSE_CNT_4K      3
+#define K_MAC_TX_PAUSE_CNT_8K      4
+#define K_MAC_TX_PAUSE_CNT_16K     5
+#define K_MAC_TX_PAUSE_CNT_32K     6
+#define K_MAC_TX_PAUSE_CNT_64K     7
+
+#define V_MAC_TX_PAUSE_CNT_512     V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
+#define V_MAC_TX_PAUSE_CNT_1K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
+#define V_MAC_TX_PAUSE_CNT_2K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
+#define V_MAC_TX_PAUSE_CNT_4K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
+#define V_MAC_TX_PAUSE_CNT_8K      V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
+#define V_MAC_TX_PAUSE_CNT_16K     V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
+#define V_MAC_TX_PAUSE_CNT_32K     V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
+#define V_MAC_TX_PAUSE_CNT_64K     V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
+
+#define M_MAC_RESERVED1                    _SB_MAKEMASK(8, 9)
+
+#define M_MAC_AP_STAT_EN           _SB_MAKEMASK1(17)
 
 #if SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_TIMESTAMP                    _SB_MAKEMASK1(18)
 #endif
-#define M_MAC_DRP_ERRPKT_EN         _SB_MAKEMASK1(19)
-#define M_MAC_DRP_FCSERRPKT_EN      _SB_MAKEMASK1(20)
-#define M_MAC_DRP_CODEERRPKT_EN     _SB_MAKEMASK1(21)
-#define M_MAC_DRP_DRBLERRPKT_EN     _SB_MAKEMASK1(22)
-#define M_MAC_DRP_RNTPKT_EN         _SB_MAKEMASK1(23)
-#define M_MAC_DRP_OSZPKT_EN         _SB_MAKEMASK1(24)
-#define M_MAC_DRP_LENERRPKT_EN      _SB_MAKEMASK1(25)
+#define M_MAC_DRP_ERRPKT_EN        _SB_MAKEMASK1(19)
+#define M_MAC_DRP_FCSERRPKT_EN     _SB_MAKEMASK1(20)
+#define M_MAC_DRP_CODEERRPKT_EN            _SB_MAKEMASK1(21)
+#define M_MAC_DRP_DRBLERRPKT_EN            _SB_MAKEMASK1(22)
+#define M_MAC_DRP_RNTPKT_EN        _SB_MAKEMASK1(23)
+#define M_MAC_DRP_OSZPKT_EN        _SB_MAKEMASK1(24)
+#define M_MAC_DRP_LENERRPKT_EN     _SB_MAKEMASK1(25)
 
-#define M_MAC_RESERVED3             _SB_MAKEMASK(6, 26)
+#define M_MAC_RESERVED3                    _SB_MAKEMASK(6, 26)
 
-#define M_MAC_BYPASS_SEL            _SB_MAKEMASK1(32)
-#define M_MAC_HDX_EN                _SB_MAKEMASK1(33)
+#define M_MAC_BYPASS_SEL           _SB_MAKEMASK1(32)
+#define M_MAC_HDX_EN               _SB_MAKEMASK1(33)
 
-#define S_MAC_SPEED_SEL             _SB_MAKE64(34)
-#define M_MAC_SPEED_SEL             _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
+#define S_MAC_SPEED_SEL                    _SB_MAKE64(34)
+#define M_MAC_SPEED_SEL                    _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
 #define V_MAC_SPEED_SEL(x)         _SB_MAKEVALUE(x, S_MAC_SPEED_SEL)
 #define G_MAC_SPEED_SEL(x)         _SB_GETVALUE(x, S_MAC_SPEED_SEL, M_MAC_SPEED_SEL)
 
-#define K_MAC_SPEED_SEL_10MBPS      0
-#define K_MAC_SPEED_SEL_100MBPS     1
+#define K_MAC_SPEED_SEL_10MBPS     0
+#define K_MAC_SPEED_SEL_100MBPS            1
 #define K_MAC_SPEED_SEL_1000MBPS    2
 #define K_MAC_SPEED_SEL_RESERVED    3
 
-#define V_MAC_SPEED_SEL_10MBPS      V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
-#define V_MAC_SPEED_SEL_100MBPS     V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
+#define V_MAC_SPEED_SEL_10MBPS     V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
+#define V_MAC_SPEED_SEL_100MBPS            V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
 #define V_MAC_SPEED_SEL_1000MBPS    V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_1000MBPS)
 #define V_MAC_SPEED_SEL_RESERVED    V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_RESERVED)
 
-#define M_MAC_TX_CLK_EDGE_SEL       _SB_MAKEMASK1(36)
-#define M_MAC_LOOPBACK_SEL          _SB_MAKEMASK1(37)
-#define M_MAC_FAST_SYNC             _SB_MAKEMASK1(38)
-#define M_MAC_SS_EN                 _SB_MAKEMASK1(39)
+#define M_MAC_TX_CLK_EDGE_SEL      _SB_MAKEMASK1(36)
+#define M_MAC_LOOPBACK_SEL         _SB_MAKEMASK1(37)
+#define M_MAC_FAST_SYNC                    _SB_MAKEMASK1(38)
+#define M_MAC_SS_EN                _SB_MAKEMASK1(39)
 
 #define S_MAC_BYPASS_CFG           _SB_MAKE64(40)
-#define M_MAC_BYPASS_CFG            _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
-#define V_MAC_BYPASS_CFG(x)         _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
-#define G_MAC_BYPASS_CFG(x)         _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
+#define M_MAC_BYPASS_CFG           _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
+#define V_MAC_BYPASS_CFG(x)        _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
+#define G_MAC_BYPASS_CFG(x)        _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
 
 #define K_MAC_BYPASS_GMII          0
-#define K_MAC_BYPASS_ENCODED        1
-#define K_MAC_BYPASS_SOP            2
-#define K_MAC_BYPASS_EOP            3
+#define K_MAC_BYPASS_ENCODED       1
+#define K_MAC_BYPASS_SOP           2
+#define K_MAC_BYPASS_EOP           3
 
-#define M_MAC_BYPASS_16             _SB_MAKEMASK1(42)
+#define M_MAC_BYPASS_16                    _SB_MAKEMASK1(42)
 #define M_MAC_BYPASS_FCS_CHK       _SB_MAKEMASK1(43)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_SPLIT_CH_SEL         _SB_MAKEMASK1(45)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_MAC_BYPASS_IFG            _SB_MAKE64(46)
-#define M_MAC_BYPASS_IFG            _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
+#define S_MAC_BYPASS_IFG           _SB_MAKE64(46)
+#define M_MAC_BYPASS_IFG           _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
 #define V_MAC_BYPASS_IFG(x)        _SB_MAKEVALUE(x, S_MAC_BYPASS_IFG)
 #define G_MAC_BYPASS_IFG(x)        _SB_GETVALUE(x, S_MAC_BYPASS_IFG, M_MAC_BYPASS_IFG)
 
-#define K_MAC_FC_CMD_DISABLED       0
-#define K_MAC_FC_CMD_ENABLED        1
+#define K_MAC_FC_CMD_DISABLED      0
+#define K_MAC_FC_CMD_ENABLED       1
 #define K_MAC_FC_CMD_ENAB_FALSECARR 2
 
-#define V_MAC_FC_CMD_DISABLED       V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
-#define V_MAC_FC_CMD_ENABLED        V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
+#define V_MAC_FC_CMD_DISABLED      V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
+#define V_MAC_FC_CMD_ENABLED       V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
 #define V_MAC_FC_CMD_ENAB_FALSECARR V_MAC_FC_CMD(K_MAC_FC_CMD_ENAB_FALSECARR)
 
-#define M_MAC_FC_SEL                _SB_MAKEMASK1(54)
+#define M_MAC_FC_SEL               _SB_MAKEMASK1(54)
 
-#define S_MAC_FC_CMD                _SB_MAKE64(55)
-#define M_MAC_FC_CMD                _SB_MAKEMASK(2, S_MAC_FC_CMD)
-#define V_MAC_FC_CMD(x)                    _SB_MAKEVALUE(x, S_MAC_FC_CMD)
-#define G_MAC_FC_CMD(x)                    _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
+#define S_MAC_FC_CMD               _SB_MAKE64(55)
+#define M_MAC_FC_CMD               _SB_MAKEMASK(2, S_MAC_FC_CMD)
+#define V_MAC_FC_CMD(x)                    _SB_MAKEVALUE(x, S_MAC_FC_CMD)
+#define G_MAC_FC_CMD(x)                    _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
 
-#define S_MAC_RX_CH_SEL             _SB_MAKE64(57)
-#define M_MAC_RX_CH_SEL             _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
-#define V_MAC_RX_CH_SEL(x)          _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
-#define G_MAC_RX_CH_SEL(x)          _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
+#define S_MAC_RX_CH_SEL                    _SB_MAKE64(57)
+#define M_MAC_RX_CH_SEL                    _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
+#define V_MAC_RX_CH_SEL(x)         _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
+#define G_MAC_RX_CH_SEL(x)         _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
 
 
 /*
  * Register: MAC_ENABLE_2
  */
 
-#define M_MAC_RXDMA_EN0                    _SB_MAKEMASK1(0)
-#define M_MAC_RXDMA_EN1                    _SB_MAKEMASK1(1)
-#define M_MAC_TXDMA_EN0                    _SB_MAKEMASK1(4)
-#define M_MAC_TXDMA_EN1                    _SB_MAKEMASK1(5)
+#define M_MAC_RXDMA_EN0                    _SB_MAKEMASK1(0)
+#define M_MAC_RXDMA_EN1                    _SB_MAKEMASK1(1)
+#define M_MAC_TXDMA_EN0                    _SB_MAKEMASK1(4)
+#define M_MAC_TXDMA_EN1                    _SB_MAKEMASK1(5)
 
-#define M_MAC_PORT_RESET            _SB_MAKEMASK1(8)
+#define M_MAC_PORT_RESET           _SB_MAKEMASK1(8)
 
 #if (SIBYTE_HDR_FEATURE_CHIP(1250) || SIBYTE_HDR_FEATURE_CHIP(112x))
-#define M_MAC_RX_ENABLE             _SB_MAKEMASK1(10)
-#define M_MAC_TX_ENABLE             _SB_MAKEMASK1(11)
-#define M_MAC_BYP_RX_ENABLE         _SB_MAKEMASK1(12)
-#define M_MAC_BYP_TX_ENABLE         _SB_MAKEMASK1(13)
+#define M_MAC_RX_ENABLE                    _SB_MAKEMASK1(10)
+#define M_MAC_TX_ENABLE                    _SB_MAKEMASK1(11)
+#define M_MAC_BYP_RX_ENABLE        _SB_MAKEMASK1(12)
+#define M_MAC_BYP_TX_ENABLE        _SB_MAKEMASK1(13)
 #endif
 
 /*
 
 #define S_MAC_TXD_WEIGHT0          _SB_MAKE64(0)
 #define M_MAC_TXD_WEIGHT0          _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT0)
-#define V_MAC_TXD_WEIGHT0(x)        _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
-#define G_MAC_TXD_WEIGHT0(x)        _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
+#define V_MAC_TXD_WEIGHT0(x)       _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
+#define G_MAC_TXD_WEIGHT0(x)       _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
 
 #define S_MAC_TXD_WEIGHT1          _SB_MAKE64(4)
 #define M_MAC_TXD_WEIGHT1          _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT1)
-#define V_MAC_TXD_WEIGHT1(x)        _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
-#define G_MAC_TXD_WEIGHT1(x)        _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
+#define V_MAC_TXD_WEIGHT1(x)       _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
+#define G_MAC_TXD_WEIGHT1(x)       _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
 
 /*
  * MAC Fifo Threshold registers (Table 9-14)
  * Register: MAC_THRSH_CFG_2
  */
 
-#define S_MAC_TX_WR_THRSH           _SB_MAKE64(0)
+#define S_MAC_TX_WR_THRSH          _SB_MAKE64(0)
 #if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below.  */
-/* #define M_MAC_TX_WR_THRSH           _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below.         */
+/* #define M_MAC_TX_WR_THRSH          _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
 #endif /* up to 1250 PASS1 */
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_WR_THRSH           _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
+#define M_MAC_TX_WR_THRSH          _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_WR_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
-#define G_MAC_TX_WR_THRSH(x)        _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
+#define V_MAC_TX_WR_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
+#define G_MAC_TX_WR_THRSH(x)       _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
 
-#define S_MAC_TX_RD_THRSH           _SB_MAKE64(8)
+#define S_MAC_TX_RD_THRSH          _SB_MAKE64(8)
 #if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below.  */
-/* #define M_MAC_TX_RD_THRSH           _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below.         */
+/* #define M_MAC_TX_RD_THRSH          _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
 #endif /* up to 1250 PASS1 */
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_RD_THRSH           _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
+#define M_MAC_TX_RD_THRSH          _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_RD_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
-#define G_MAC_TX_RD_THRSH(x)        _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
+#define V_MAC_TX_RD_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
+#define G_MAC_TX_RD_THRSH(x)       _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
 
-#define S_MAC_TX_RL_THRSH           _SB_MAKE64(16)
-#define M_MAC_TX_RL_THRSH           _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
-#define V_MAC_TX_RL_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
-#define G_MAC_TX_RL_THRSH(x)        _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
+#define S_MAC_TX_RL_THRSH          _SB_MAKE64(16)
+#define M_MAC_TX_RL_THRSH          _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
+#define V_MAC_TX_RL_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
+#define G_MAC_TX_RL_THRSH(x)       _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
 
-#define S_MAC_RX_PL_THRSH           _SB_MAKE64(24)
-#define M_MAC_RX_PL_THRSH           _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
-#define V_MAC_RX_PL_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
-#define G_MAC_RX_PL_THRSH(x)        _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
+#define S_MAC_RX_PL_THRSH          _SB_MAKE64(24)
+#define M_MAC_RX_PL_THRSH          _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
+#define V_MAC_RX_PL_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
+#define G_MAC_RX_PL_THRSH(x)       _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
 
-#define S_MAC_RX_RD_THRSH           _SB_MAKE64(32)
-#define M_MAC_RX_RD_THRSH           _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
-#define V_MAC_RX_RD_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
-#define G_MAC_RX_RD_THRSH(x)        _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
+#define S_MAC_RX_RD_THRSH          _SB_MAKE64(32)
+#define M_MAC_RX_RD_THRSH          _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
+#define V_MAC_RX_RD_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
+#define G_MAC_RX_RD_THRSH(x)       _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
 
-#define S_MAC_RX_RL_THRSH           _SB_MAKE64(40)
-#define M_MAC_RX_RL_THRSH           _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
-#define V_MAC_RX_RL_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
-#define G_MAC_RX_RL_THRSH(x)        _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
+#define S_MAC_RX_RL_THRSH          _SB_MAKE64(40)
+#define M_MAC_RX_RL_THRSH          _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
+#define V_MAC_RX_RL_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
+#define G_MAC_RX_RL_THRSH(x)       _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_ENC_FC_THRSH           _SB_MAKE64(56)
-#define M_MAC_ENC_FC_THRSH           _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
-#define V_MAC_ENC_FC_THRSH(x)        _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
-#define G_MAC_ENC_FC_THRSH(x)        _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
+#define S_MAC_ENC_FC_THRSH          _SB_MAKE64(56)
+#define M_MAC_ENC_FC_THRSH          _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
+#define V_MAC_ENC_FC_THRSH(x)       _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
+#define G_MAC_ENC_FC_THRSH(x)       _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 /*
  */
 
 /* XXXCGD: ??? Unused in pass2? */
-#define S_MAC_IFG_RX                _SB_MAKE64(0)
-#define M_MAC_IFG_RX                _SB_MAKEMASK(6, S_MAC_IFG_RX)
-#define V_MAC_IFG_RX(x)             _SB_MAKEVALUE(x, S_MAC_IFG_RX)
-#define G_MAC_IFG_RX(x)             _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
+#define S_MAC_IFG_RX               _SB_MAKE64(0)
+#define M_MAC_IFG_RX               _SB_MAKEMASK(6, S_MAC_IFG_RX)
+#define V_MAC_IFG_RX(x)                    _SB_MAKEVALUE(x, S_MAC_IFG_RX)
+#define G_MAC_IFG_RX(x)                    _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_PRE_LEN               _SB_MAKE64(0)
-#define M_MAC_PRE_LEN               _SB_MAKEMASK(6, S_MAC_PRE_LEN)
-#define V_MAC_PRE_LEN(x)            _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
-#define G_MAC_PRE_LEN(x)            _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
+#define S_MAC_PRE_LEN              _SB_MAKE64(0)
+#define M_MAC_PRE_LEN              _SB_MAKEMASK(6, S_MAC_PRE_LEN)
+#define V_MAC_PRE_LEN(x)           _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
+#define G_MAC_PRE_LEN(x)           _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_MAC_IFG_TX                _SB_MAKE64(6)
-#define M_MAC_IFG_TX                _SB_MAKEMASK(6, S_MAC_IFG_TX)
-#define V_MAC_IFG_TX(x)             _SB_MAKEVALUE(x, S_MAC_IFG_TX)
-#define G_MAC_IFG_TX(x)             _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
-
-#define S_MAC_IFG_THRSH             _SB_MAKE64(12)
-#define M_MAC_IFG_THRSH             _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
-#define V_MAC_IFG_THRSH(x)          _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
-#define G_MAC_IFG_THRSH(x)          _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
-
-#define S_MAC_BACKOFF_SEL           _SB_MAKE64(18)
-#define M_MAC_BACKOFF_SEL           _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
-#define V_MAC_BACKOFF_SEL(x)        _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
-#define G_MAC_BACKOFF_SEL(x)        _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
-
-#define S_MAC_LFSR_SEED             _SB_MAKE64(22)
-#define M_MAC_LFSR_SEED             _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
-#define V_MAC_LFSR_SEED(x)          _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
-#define G_MAC_LFSR_SEED(x)          _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
-
-#define S_MAC_SLOT_SIZE             _SB_MAKE64(30)
-#define M_MAC_SLOT_SIZE             _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
-#define V_MAC_SLOT_SIZE(x)          _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
-#define G_MAC_SLOT_SIZE(x)          _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
-
-#define S_MAC_MIN_FRAMESZ           _SB_MAKE64(40)
-#define M_MAC_MIN_FRAMESZ           _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
-#define V_MAC_MIN_FRAMESZ(x)        _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
-#define G_MAC_MIN_FRAMESZ(x)        _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
-
-#define S_MAC_MAX_FRAMESZ           _SB_MAKE64(48)
-#define M_MAC_MAX_FRAMESZ           _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
-#define V_MAC_MAX_FRAMESZ(x)        _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
-#define G_MAC_MAX_FRAMESZ(x)        _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
+#define S_MAC_IFG_TX               _SB_MAKE64(6)
+#define M_MAC_IFG_TX               _SB_MAKEMASK(6, S_MAC_IFG_TX)
+#define V_MAC_IFG_TX(x)                    _SB_MAKEVALUE(x, S_MAC_IFG_TX)
+#define G_MAC_IFG_TX(x)                    _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
+
+#define S_MAC_IFG_THRSH                    _SB_MAKE64(12)
+#define M_MAC_IFG_THRSH                    _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
+#define V_MAC_IFG_THRSH(x)         _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
+#define G_MAC_IFG_THRSH(x)         _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
+
+#define S_MAC_BACKOFF_SEL          _SB_MAKE64(18)
+#define M_MAC_BACKOFF_SEL          _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
+#define V_MAC_BACKOFF_SEL(x)       _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
+#define G_MAC_BACKOFF_SEL(x)       _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
+
+#define S_MAC_LFSR_SEED                    _SB_MAKE64(22)
+#define M_MAC_LFSR_SEED                    _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
+#define V_MAC_LFSR_SEED(x)         _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
+#define G_MAC_LFSR_SEED(x)         _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
+
+#define S_MAC_SLOT_SIZE                    _SB_MAKE64(30)
+#define M_MAC_SLOT_SIZE                    _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
+#define V_MAC_SLOT_SIZE(x)         _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
+#define G_MAC_SLOT_SIZE(x)         _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
+
+#define S_MAC_MIN_FRAMESZ          _SB_MAKE64(40)
+#define M_MAC_MIN_FRAMESZ          _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
+#define V_MAC_MIN_FRAMESZ(x)       _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
+#define G_MAC_MIN_FRAMESZ(x)       _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
+
+#define S_MAC_MAX_FRAMESZ          _SB_MAKE64(48)
+#define M_MAC_MAX_FRAMESZ          _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
+#define V_MAC_MAX_FRAMESZ(x)       _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
+#define G_MAC_MAX_FRAMESZ(x)       _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
 
 /*
  * These constants are used to configure the fields within the Frame
  * Configuration Register.
  */
 
-#define K_MAC_IFG_RX_10             _SB_MAKE64(0)      /* See table 176, not used */
-#define K_MAC_IFG_RX_100            _SB_MAKE64(0)
-#define K_MAC_IFG_RX_1000           _SB_MAKE64(0)
+#define K_MAC_IFG_RX_10                    _SB_MAKE64(0)       /* See table 176, not used */
+#define K_MAC_IFG_RX_100           _SB_MAKE64(0)
+#define K_MAC_IFG_RX_1000          _SB_MAKE64(0)
 
-#define K_MAC_IFG_TX_10             _SB_MAKE64(20)
-#define K_MAC_IFG_TX_100            _SB_MAKE64(20)
-#define K_MAC_IFG_TX_1000           _SB_MAKE64(8)
+#define K_MAC_IFG_TX_10                    _SB_MAKE64(20)
+#define K_MAC_IFG_TX_100           _SB_MAKE64(20)
+#define K_MAC_IFG_TX_1000          _SB_MAKE64(8)
 
-#define K_MAC_IFG_THRSH_10          _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_100         _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_1000        _SB_MAKE64(0)
+#define K_MAC_IFG_THRSH_10         _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_100        _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_1000       _SB_MAKE64(0)
 
-#define K_MAC_SLOT_SIZE_10          _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_100         _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_1000        _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_10         _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_100        _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_1000       _SB_MAKE64(0)
 
-#define V_MAC_IFG_RX_10        V_MAC_IFG_RX(K_MAC_IFG_RX_10)
+#define V_MAC_IFG_RX_10               V_MAC_IFG_RX(K_MAC_IFG_RX_10)
 #define V_MAC_IFG_RX_100       V_MAC_IFG_RX(K_MAC_IFG_RX_100)
 #define V_MAC_IFG_RX_1000      V_MAC_IFG_RX(K_MAC_IFG_RX_1000)
 
-#define V_MAC_IFG_TX_10        V_MAC_IFG_TX(K_MAC_IFG_TX_10)
+#define V_MAC_IFG_TX_10               V_MAC_IFG_TX(K_MAC_IFG_TX_10)
 #define V_MAC_IFG_TX_100       V_MAC_IFG_TX(K_MAC_IFG_TX_100)
 #define V_MAC_IFG_TX_1000      V_MAC_IFG_TX(K_MAC_IFG_TX_1000)
 
 #define V_MAC_SLOT_SIZE_100    V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_100)
 #define V_MAC_SLOT_SIZE_1000   V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_1000)
 
-#define K_MAC_MIN_FRAMESZ_FIFO      _SB_MAKE64(9)
+#define K_MAC_MIN_FRAMESZ_FIFO     _SB_MAKE64(9)
 #define K_MAC_MIN_FRAMESZ_DEFAULT   _SB_MAKE64(64)
 #define K_MAC_MAX_FRAMESZ_DEFAULT   _SB_MAKE64(1518)
-#define K_MAC_MAX_FRAMESZ_JUMBO     _SB_MAKE64(9216)
+#define K_MAC_MAX_FRAMESZ_JUMBO            _SB_MAKE64(9216)
 
-#define V_MAC_MIN_FRAMESZ_FIFO      V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
+#define V_MAC_MIN_FRAMESZ_FIFO     V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
 #define V_MAC_MIN_FRAMESZ_DEFAULT   V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_DEFAULT)
 #define V_MAC_MAX_FRAMESZ_DEFAULT   V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_DEFAULT)
-#define V_MAC_MAX_FRAMESZ_JUMBO     V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
+#define V_MAC_MAX_FRAMESZ_JUMBO            V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
 
 /*
  * MAC VLAN Tag Registers (Table 9-16)
  * Register: MAC_VLANTAG_2
  */
 
-#define S_MAC_VLAN_TAG           _SB_MAKE64(0)
-#define M_MAC_VLAN_TAG           _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
-#define V_MAC_VLAN_TAG(x)        _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
-#define G_MAC_VLAN_TAG(x)        _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
+#define S_MAC_VLAN_TAG          _SB_MAKE64(0)
+#define M_MAC_VLAN_TAG          _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
+#define V_MAC_VLAN_TAG(x)       _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
+#define G_MAC_VLAN_TAG(x)       _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define S_MAC_TX_PKT_OFFSET      _SB_MAKE64(32)
-#define M_MAC_TX_PKT_OFFSET      _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
-#define V_MAC_TX_PKT_OFFSET(x)   _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
-#define G_MAC_TX_PKT_OFFSET(x)   _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
+#define S_MAC_TX_PKT_OFFSET     _SB_MAKE64(32)
+#define M_MAC_TX_PKT_OFFSET     _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
+#define V_MAC_TX_PKT_OFFSET(x)  _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
+#define G_MAC_TX_PKT_OFFSET(x)  _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
 
-#define S_MAC_TX_CRC_OFFSET      _SB_MAKE64(40)
-#define M_MAC_TX_CRC_OFFSET      _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
-#define V_MAC_TX_CRC_OFFSET(x)   _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
-#define G_MAC_TX_CRC_OFFSET(x)   _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
+#define S_MAC_TX_CRC_OFFSET     _SB_MAKE64(40)
+#define M_MAC_TX_CRC_OFFSET     _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
+#define V_MAC_TX_CRC_OFFSET(x)  _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
+#define G_MAC_TX_CRC_OFFSET(x)  _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
 
-#define M_MAC_CH_BASE_FC_EN      _SB_MAKEMASK1(48)
+#define M_MAC_CH_BASE_FC_EN     _SB_MAKEMASK1(48)
 #endif /* 1250 PASS3 || 112x PASS1 */
 
 /*
  * on each channel.
  */
 
-#define S_MAC_RX_CH0                _SB_MAKE64(0)
-#define S_MAC_RX_CH1                _SB_MAKE64(8)
-#define S_MAC_TX_CH0                _SB_MAKE64(16)
-#define S_MAC_TX_CH1                _SB_MAKE64(24)
+#define S_MAC_RX_CH0               _SB_MAKE64(0)
+#define S_MAC_RX_CH1               _SB_MAKE64(8)
+#define S_MAC_TX_CH0               _SB_MAKE64(16)
+#define S_MAC_TX_CH1               _SB_MAKE64(24)
 
 #define S_MAC_TXCHANNELS           _SB_MAKE64(16)      /* this is 1st TX chan */
-#define S_MAC_CHANWIDTH             _SB_MAKE64(8)      /* bits between channels */
+#define S_MAC_CHANWIDTH                    _SB_MAKE64(8)       /* bits between channels */
 
 /*
- *  These are the same as RX channel 0.  The idea here
+ *  These are the same as RX channel 0.         The idea here
  *  is that you'll use one of the "S_" things above
  *  and pass just the six bits to a DMA-channel-specific ISR
  */
-#define M_MAC_INT_CHANNEL           _SB_MAKEMASK(8, 0)
-#define M_MAC_INT_EOP_COUNT         _SB_MAKEMASK1(0)
-#define M_MAC_INT_EOP_TIMER         _SB_MAKEMASK1(1)
-#define M_MAC_INT_EOP_SEEN          _SB_MAKEMASK1(2)
-#define M_MAC_INT_HWM               _SB_MAKEMASK1(3)
-#define M_MAC_INT_LWM               _SB_MAKEMASK1(4)
-#define M_MAC_INT_DSCR              _SB_MAKEMASK1(5)
-#define M_MAC_INT_ERR               _SB_MAKEMASK1(6)
-#define M_MAC_INT_DZERO             _SB_MAKEMASK1(7)   /* only for TX channels */
-#define M_MAC_INT_DROP              _SB_MAKEMASK1(7)   /* only for RX channels */
+#define M_MAC_INT_CHANNEL          _SB_MAKEMASK(8, 0)
+#define M_MAC_INT_EOP_COUNT        _SB_MAKEMASK1(0)
+#define M_MAC_INT_EOP_TIMER        _SB_MAKEMASK1(1)
+#define M_MAC_INT_EOP_SEEN         _SB_MAKEMASK1(2)
+#define M_MAC_INT_HWM              _SB_MAKEMASK1(3)
+#define M_MAC_INT_LWM              _SB_MAKEMASK1(4)
+#define M_MAC_INT_DSCR             _SB_MAKEMASK1(5)
+#define M_MAC_INT_ERR              _SB_MAKEMASK1(6)
+#define M_MAC_INT_DZERO                    _SB_MAKEMASK1(7)    /* only for TX channels */
+#define M_MAC_INT_DROP             _SB_MAKEMASK1(7)    /* only for RX channels */
 
 /*
  * In the following definitions we use ch (0/1) and txrx (TX=1, RX=0, see
  */
 #define S_MAC_STATUS_CH_OFFSET(ch, txrx) _SB_MAKE64(((ch) + 2 * (txrx)) * S_MAC_CHANWIDTH)
 
-#define M_MAC_STATUS_CHANNEL(ch, txrx)   _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_CHANNEL(ch, txrx)  _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
 #define M_MAC_STATUS_EOP_COUNT(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_COUNT, S_MAC_STATUS_CH_OFFSET(ch, txrx))
 #define M_MAC_STATUS_EOP_TIMER(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_TIMER, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_EOP_SEEN(ch, txrx)  _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_HWM(ch, txrx)       _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_LWM(ch, txrx)       _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DSCR(ch, txrx)      _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_ERR(ch, txrx)       _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DZERO(ch, txrx)     _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DROP(ch, txrx)      _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_OTHER_ERR           _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
-
-
-#define M_MAC_RX_UNDRFL             _SB_MAKEMASK1(40)
-#define M_MAC_RX_OVRFL              _SB_MAKEMASK1(41)
-#define M_MAC_TX_UNDRFL             _SB_MAKEMASK1(42)
-#define M_MAC_TX_OVRFL              _SB_MAKEMASK1(43)
-#define M_MAC_LTCOL_ERR             _SB_MAKEMASK1(44)
-#define M_MAC_EXCOL_ERR             _SB_MAKEMASK1(45)
-#define M_MAC_CNTR_OVRFL_ERR        _SB_MAKEMASK1(46)
+#define M_MAC_STATUS_EOP_SEEN(ch, txrx)         _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_HWM(ch, txrx)      _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_LWM(ch, txrx)      _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DSCR(ch, txrx)     _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_ERR(ch, txrx)      _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DZERO(ch, txrx)    _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DROP(ch, txrx)     _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_OTHER_ERR          _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
+
+
+#define M_MAC_RX_UNDRFL                    _SB_MAKEMASK1(40)
+#define M_MAC_RX_OVRFL             _SB_MAKEMASK1(41)
+#define M_MAC_TX_UNDRFL                    _SB_MAKEMASK1(42)
+#define M_MAC_TX_OVRFL             _SB_MAKEMASK1(43)
+#define M_MAC_LTCOL_ERR                    _SB_MAKEMASK1(44)
+#define M_MAC_EXCOL_ERR                    _SB_MAKEMASK1(45)
+#define M_MAC_CNTR_OVRFL_ERR       _SB_MAKEMASK1(46)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_SPLIT_EN             _SB_MAKEMASK1(47)   /* interrupt mask only */
+#define M_MAC_SPLIT_EN             _SB_MAKEMASK1(47)   /* interrupt mask only */
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
-#define S_MAC_COUNTER_ADDR          _SB_MAKE64(47)
-#define M_MAC_COUNTER_ADDR          _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
-#define V_MAC_COUNTER_ADDR(x)       _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
-#define G_MAC_COUNTER_ADDR(x)       _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
+#define S_MAC_COUNTER_ADDR         _SB_MAKE64(47)
+#define M_MAC_COUNTER_ADDR         _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
+#define V_MAC_COUNTER_ADDR(x)      _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
+#define G_MAC_COUNTER_ADDR(x)      _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_TX_PAUSE_ON          _SB_MAKEMASK1(52)
  * Register: MAC_FIFO_PTRS_2
  */
 
-#define S_MAC_TX_WRPTR              _SB_MAKE64(0)
-#define M_MAC_TX_WRPTR              _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
-#define V_MAC_TX_WRPTR(x)           _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
-#define G_MAC_TX_WRPTR(x)           _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_TX_WRPTR             _SB_MAKE64(0)
+#define M_MAC_TX_WRPTR             _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
+#define V_MAC_TX_WRPTR(x)          _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
+#define G_MAC_TX_WRPTR(x)          _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
 
-#define S_MAC_TX_RDPTR              _SB_MAKE64(8)
-#define M_MAC_TX_RDPTR              _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
-#define V_MAC_TX_RDPTR(x)           _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
-#define G_MAC_TX_RDPTR(x)           _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_TX_RDPTR             _SB_MAKE64(8)
+#define M_MAC_TX_RDPTR             _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
+#define V_MAC_TX_RDPTR(x)          _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
+#define G_MAC_TX_RDPTR(x)          _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
 
-#define S_MAC_RX_WRPTR              _SB_MAKE64(16)
-#define M_MAC_RX_WRPTR              _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
-#define V_MAC_RX_WRPTR(x)           _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
-#define G_MAC_RX_WRPTR(x)           _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_RX_WRPTR             _SB_MAKE64(16)
+#define M_MAC_RX_WRPTR             _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
+#define V_MAC_RX_WRPTR(x)          _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
+#define G_MAC_RX_WRPTR(x)          _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
 
-#define S_MAC_RX_RDPTR              _SB_MAKE64(24)
-#define M_MAC_RX_RDPTR              _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
-#define V_MAC_RX_RDPTR(x)           _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
-#define G_MAC_RX_RDPTR(x)           _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_RX_RDPTR             _SB_MAKE64(24)
+#define M_MAC_RX_RDPTR             _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
+#define V_MAC_RX_RDPTR(x)          _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
+#define G_MAC_RX_RDPTR(x)          _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
 
 /*
- * MAC Fifo End Of Packet Count Registers (Table 9-20)  [Debug register]
+ * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
  * Register: MAC_EOPCNT_0
  * Register: MAC_EOPCNT_1
  * Register: MAC_EOPCNT_2
  */
 
-#define S_MAC_TX_EOP_COUNTER        _SB_MAKE64(0)
-#define M_MAC_TX_EOP_COUNTER        _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
-#define V_MAC_TX_EOP_COUNTER(x)     _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
-#define G_MAC_TX_EOP_COUNTER(x)     _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
+#define S_MAC_TX_EOP_COUNTER       _SB_MAKE64(0)
+#define M_MAC_TX_EOP_COUNTER       _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
+#define V_MAC_TX_EOP_COUNTER(x)            _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
+#define G_MAC_TX_EOP_COUNTER(x)            _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
 
-#define S_MAC_RX_EOP_COUNTER        _SB_MAKE64(8)
-#define M_MAC_RX_EOP_COUNTER        _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
-#define V_MAC_RX_EOP_COUNTER(x)     _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
-#define G_MAC_RX_EOP_COUNTER(x)     _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
+#define S_MAC_RX_EOP_COUNTER       _SB_MAKE64(8)
+#define M_MAC_RX_EOP_COUNTER       _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
+#define V_MAC_RX_EOP_COUNTER(x)            _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
+#define G_MAC_RX_EOP_COUNTER(x)            _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
 
 /*
  * MAC Receive Address Filter Exact Match Registers (Table 9-21)
  * Register: MAC_TYPE_CFG_2
  */
 
-#define S_TYPECFG_TYPESIZE      _SB_MAKE64(16)
+#define S_TYPECFG_TYPESIZE     _SB_MAKE64(16)
 
 #define S_TYPECFG_TYPE0                _SB_MAKE64(0)
-#define M_TYPECFG_TYPE0         _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
-#define V_TYPECFG_TYPE0(x)      _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
-#define G_TYPECFG_TYPE0(x)      _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
+#define M_TYPECFG_TYPE0                _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
+#define V_TYPECFG_TYPE0(x)     _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
+#define G_TYPECFG_TYPE0(x)     _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
 
 #define S_TYPECFG_TYPE1                _SB_MAKE64(0)
-#define M_TYPECFG_TYPE1         _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
-#define V_TYPECFG_TYPE1(x)      _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
-#define G_TYPECFG_TYPE1(x)      _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
+#define M_TYPECFG_TYPE1                _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
+#define V_TYPECFG_TYPE1(x)     _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
+#define G_TYPECFG_TYPE1(x)     _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
 
 #define S_TYPECFG_TYPE2                _SB_MAKE64(0)
-#define M_TYPECFG_TYPE2         _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
-#define V_TYPECFG_TYPE2(x)      _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
-#define G_TYPECFG_TYPE2(x)      _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
+#define M_TYPECFG_TYPE2                _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
+#define V_TYPECFG_TYPE2(x)     _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
+#define G_TYPECFG_TYPE2(x)     _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
 
 #define S_TYPECFG_TYPE3                _SB_MAKE64(0)
-#define M_TYPECFG_TYPE3         _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
-#define V_TYPECFG_TYPE3(x)      _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
-#define G_TYPECFG_TYPE3(x)      _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
+#define M_TYPECFG_TYPE3                _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
+#define V_TYPECFG_TYPE3(x)     _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
+#define G_TYPECFG_TYPE3(x)     _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
 
 /*
  * MAC Receive Address Filter Control Registers (Table 9-24)
  * Register: MAC_ADFILTER_CFG_2
  */
 
-#define M_MAC_ALLPKT_EN                _SB_MAKEMASK1(0)
-#define M_MAC_UCAST_EN          _SB_MAKEMASK1(1)
-#define M_MAC_UCAST_INV         _SB_MAKEMASK1(2)
-#define M_MAC_MCAST_EN          _SB_MAKEMASK1(3)
-#define M_MAC_MCAST_INV         _SB_MAKEMASK1(4)
-#define M_MAC_BCAST_EN          _SB_MAKEMASK1(5)
-#define M_MAC_DIRECT_INV        _SB_MAKEMASK1(6)
+#define M_MAC_ALLPKT_EN                _SB_MAKEMASK1(0)
+#define M_MAC_UCAST_EN         _SB_MAKEMASK1(1)
+#define M_MAC_UCAST_INV                _SB_MAKEMASK1(2)
+#define M_MAC_MCAST_EN         _SB_MAKEMASK1(3)
+#define M_MAC_MCAST_INV                _SB_MAKEMASK1(4)
+#define M_MAC_BCAST_EN         _SB_MAKEMASK1(5)
+#define M_MAC_DIRECT_INV       _SB_MAKEMASK1(6)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define M_MAC_ALLMCAST_EN      _SB_MAKEMASK1(7)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
-#define S_MAC_IPHDR_OFFSET      _SB_MAKE64(8)
-#define M_MAC_IPHDR_OFFSET      _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
+#define S_MAC_IPHDR_OFFSET     _SB_MAKE64(8)
+#define M_MAC_IPHDR_OFFSET     _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
 #define V_MAC_IPHDR_OFFSET(x)  _SB_MAKEVALUE(x, S_MAC_IPHDR_OFFSET)
 #define G_MAC_IPHDR_OFFSET(x)  _SB_GETVALUE(x, S_MAC_IPHDR_OFFSET, M_MAC_IPHDR_OFFSET)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_RX_CRC_OFFSET     _SB_MAKE64(16)
-#define M_MAC_RX_CRC_OFFSET     _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
+#define S_MAC_RX_CRC_OFFSET    _SB_MAKE64(16)
+#define M_MAC_RX_CRC_OFFSET    _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
 #define V_MAC_RX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_CRC_OFFSET)
 #define G_MAC_RX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_CRC_OFFSET, M_MAC_RX_CRC_OFFSET)
 
-#define S_MAC_RX_PKT_OFFSET     _SB_MAKE64(24)
-#define M_MAC_RX_PKT_OFFSET     _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
+#define S_MAC_RX_PKT_OFFSET    _SB_MAKE64(24)
+#define M_MAC_RX_PKT_OFFSET    _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
 #define V_MAC_RX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_PKT_OFFSET)
 #define G_MAC_RX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_PKT_OFFSET, M_MAC_RX_PKT_OFFSET)
 
 #define M_MAC_FWDPAUSE_EN      _SB_MAKEMASK1(32)
 #define M_MAC_VLAN_DET_EN      _SB_MAKEMASK1(33)
 
-#define S_MAC_RX_CH_MSN_SEL     _SB_MAKE64(34)
-#define M_MAC_RX_CH_MSN_SEL     _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
+#define S_MAC_RX_CH_MSN_SEL    _SB_MAKE64(34)
+#define M_MAC_RX_CH_MSN_SEL    _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
 #define V_MAC_RX_CH_MSN_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_MSN_SEL)
 #define G_MAC_RX_CH_MSN_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_MSN_SEL, M_MAC_RX_CH_MSN_SEL)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
index 15048dc..8368e41 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  SB1250 Board Support Package
     *
-    *  Memory Controller constants              File: sb1250_mc.h
+    *  Memory Controller constants             File: sb1250_mc.h
     *
     *  This module contains constants and macros useful for
     *  programming the memory controller.
  * Memory Channel Config Register (table 6-14)
  */
 
-#define S_MC_RESERVED0              0
-#define M_MC_RESERVED0              _SB_MAKEMASK(8, S_MC_RESERVED0)
+#define S_MC_RESERVED0             0
+#define M_MC_RESERVED0             _SB_MAKEMASK(8, S_MC_RESERVED0)
 
-#define S_MC_CHANNEL_SEL            8
-#define M_MC_CHANNEL_SEL            _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
-#define V_MC_CHANNEL_SEL(x)         _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
-#define G_MC_CHANNEL_SEL(x)         _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
+#define S_MC_CHANNEL_SEL           8
+#define M_MC_CHANNEL_SEL           _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
+#define V_MC_CHANNEL_SEL(x)        _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
+#define G_MC_CHANNEL_SEL(x)        _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
 
-#define S_MC_BANK0_MAP              16
-#define M_MC_BANK0_MAP              _SB_MAKEMASK(4, S_MC_BANK0_MAP)
-#define V_MC_BANK0_MAP(x)           _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
-#define G_MC_BANK0_MAP(x)           _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
+#define S_MC_BANK0_MAP             16
+#define M_MC_BANK0_MAP             _SB_MAKEMASK(4, S_MC_BANK0_MAP)
+#define V_MC_BANK0_MAP(x)          _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
+#define G_MC_BANK0_MAP(x)          _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
 
-#define K_MC_BANK0_MAP_DEFAULT      0x00
-#define V_MC_BANK0_MAP_DEFAULT      V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
+#define K_MC_BANK0_MAP_DEFAULT     0x00
+#define V_MC_BANK0_MAP_DEFAULT     V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
 
-#define S_MC_BANK1_MAP              20
-#define M_MC_BANK1_MAP              _SB_MAKEMASK(4, S_MC_BANK1_MAP)
-#define V_MC_BANK1_MAP(x)           _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
-#define G_MC_BANK1_MAP(x)           _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
+#define S_MC_BANK1_MAP             20
+#define M_MC_BANK1_MAP             _SB_MAKEMASK(4, S_MC_BANK1_MAP)
+#define V_MC_BANK1_MAP(x)          _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
+#define G_MC_BANK1_MAP(x)          _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
 
-#define K_MC_BANK1_MAP_DEFAULT      0x08
-#define V_MC_BANK1_MAP_DEFAULT      V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
+#define K_MC_BANK1_MAP_DEFAULT     0x08
+#define V_MC_BANK1_MAP_DEFAULT     V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
 
-#define S_MC_BANK2_MAP              24
-#define M_MC_BANK2_MAP              _SB_MAKEMASK(4, S_MC_BANK2_MAP)
-#define V_MC_BANK2_MAP(x)           _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
-#define G_MC_BANK2_MAP(x)           _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
+#define S_MC_BANK2_MAP             24
+#define M_MC_BANK2_MAP             _SB_MAKEMASK(4, S_MC_BANK2_MAP)
+#define V_MC_BANK2_MAP(x)          _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
+#define G_MC_BANK2_MAP(x)          _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
 
-#define K_MC_BANK2_MAP_DEFAULT      0x09
-#define V_MC_BANK2_MAP_DEFAULT      V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
+#define K_MC_BANK2_MAP_DEFAULT     0x09
+#define V_MC_BANK2_MAP_DEFAULT     V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
 
-#define S_MC_BANK3_MAP              28
-#define M_MC_BANK3_MAP              _SB_MAKEMASK(4, S_MC_BANK3_MAP)
-#define V_MC_BANK3_MAP(x)           _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
-#define G_MC_BANK3_MAP(x)           _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
+#define S_MC_BANK3_MAP             28
+#define M_MC_BANK3_MAP             _SB_MAKEMASK(4, S_MC_BANK3_MAP)
+#define V_MC_BANK3_MAP(x)          _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
+#define G_MC_BANK3_MAP(x)          _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
 
-#define K_MC_BANK3_MAP_DEFAULT      0x0C
-#define V_MC_BANK3_MAP_DEFAULT      V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
+#define K_MC_BANK3_MAP_DEFAULT     0x0C
+#define V_MC_BANK3_MAP_DEFAULT     V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
 
-#define M_MC_RESERVED1              _SB_MAKEMASK(8, 32)
+#define M_MC_RESERVED1             _SB_MAKEMASK(8, 32)
 
 #define S_MC_QUEUE_SIZE                    40
-#define M_MC_QUEUE_SIZE             _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE(x)          _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
-#define G_MC_QUEUE_SIZE(x)          _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE_DEFAULT     V_MC_QUEUE_SIZE(0x0A)
-
-#define S_MC_AGE_LIMIT              44
-#define M_MC_AGE_LIMIT              _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT(x)           _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
-#define G_MC_AGE_LIMIT(x)           _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT_DEFAULT      V_MC_AGE_LIMIT(8)
-
-#define S_MC_WR_LIMIT               48
-#define M_MC_WR_LIMIT               _SB_MAKEMASK(4, S_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT(x)            _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
-#define G_MC_WR_LIMIT(x)            _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT_DEFAULT       V_MC_WR_LIMIT(5)
+#define M_MC_QUEUE_SIZE                    _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE(x)         _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
+#define G_MC_QUEUE_SIZE(x)         _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE_DEFAULT            V_MC_QUEUE_SIZE(0x0A)
+
+#define S_MC_AGE_LIMIT             44
+#define M_MC_AGE_LIMIT             _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT(x)          _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
+#define G_MC_AGE_LIMIT(x)          _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT_DEFAULT     V_MC_AGE_LIMIT(8)
+
+#define S_MC_WR_LIMIT              48
+#define M_MC_WR_LIMIT              _SB_MAKEMASK(4, S_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT(x)           _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
+#define G_MC_WR_LIMIT(x)           _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT_DEFAULT      V_MC_WR_LIMIT(5)
 
 #define M_MC_IOB1HIGHPRIORITY      _SB_MAKEMASK1(52)
 
-#define M_MC_RESERVED2              _SB_MAKEMASK(3, 53)
+#define M_MC_RESERVED2             _SB_MAKEMASK(3, 53)
 
-#define S_MC_CS_MODE                56
-#define M_MC_CS_MODE                _SB_MAKEMASK(4, S_MC_CS_MODE)
-#define V_MC_CS_MODE(x)             _SB_MAKEVALUE(x, S_MC_CS_MODE)
-#define G_MC_CS_MODE(x)             _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
+#define S_MC_CS_MODE               56
+#define M_MC_CS_MODE               _SB_MAKEMASK(4, S_MC_CS_MODE)
+#define V_MC_CS_MODE(x)                    _SB_MAKEVALUE(x, S_MC_CS_MODE)
+#define G_MC_CS_MODE(x)                    _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
 
-#define K_MC_CS_MODE_MSB_CS         0
-#define K_MC_CS_MODE_INTLV_CS       15
+#define K_MC_CS_MODE_MSB_CS        0
+#define K_MC_CS_MODE_INTLV_CS      15
 #define K_MC_CS_MODE_MIXED_CS_10    12
 #define K_MC_CS_MODE_MIXED_CS_30    6
 #define K_MC_CS_MODE_MIXED_CS_32    3
 
-#define V_MC_CS_MODE_MSB_CS         V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
-#define V_MC_CS_MODE_INTLV_CS       V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
+#define V_MC_CS_MODE_MSB_CS        V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
+#define V_MC_CS_MODE_INTLV_CS      V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
 #define V_MC_CS_MODE_MIXED_CS_10    V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_10)
 #define V_MC_CS_MODE_MIXED_CS_30    V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_30)
 #define V_MC_CS_MODE_MIXED_CS_32    V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_32)
 
-#define M_MC_ECC_DISABLE            _SB_MAKEMASK1(60)
-#define M_MC_BERR_DISABLE           _SB_MAKEMASK1(61)
-#define M_MC_FORCE_SEQ              _SB_MAKEMASK1(62)
-#define M_MC_DEBUG                  _SB_MAKEMASK1(63)
+#define M_MC_ECC_DISABLE           _SB_MAKEMASK1(60)
+#define M_MC_BERR_DISABLE          _SB_MAKEMASK1(61)
+#define M_MC_FORCE_SEQ             _SB_MAKEMASK1(62)
+#define M_MC_DEBUG                 _SB_MAKEMASK1(63)
 
-#define V_MC_CONFIG_DEFAULT     V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
+#define V_MC_CONFIG_DEFAULT    V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
                                V_MC_BANK0_MAP_DEFAULT | V_MC_BANK1_MAP_DEFAULT | \
                                V_MC_BANK2_MAP_DEFAULT | V_MC_BANK3_MAP_DEFAULT | V_MC_CHANNEL_SEL(0) | \
-                                M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
+                               M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
 
 
 /*
  * Note: this field has been updated to be consistent with the errata to 0.2
  */
 
-#define S_MC_CLK_RATIO              0
-#define M_MC_CLK_RATIO              _SB_MAKEMASK(4, S_MC_CLK_RATIO)
-#define V_MC_CLK_RATIO(x)           _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
-#define G_MC_CLK_RATIO(x)           _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
+#define S_MC_CLK_RATIO             0
+#define M_MC_CLK_RATIO             _SB_MAKEMASK(4, S_MC_CLK_RATIO)
+#define V_MC_CLK_RATIO(x)          _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
+#define G_MC_CLK_RATIO(x)          _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
 
-#define K_MC_CLK_RATIO_2X           4
-#define K_MC_CLK_RATIO_25X          5
-#define K_MC_CLK_RATIO_3X           6
-#define K_MC_CLK_RATIO_35X          7
-#define K_MC_CLK_RATIO_4X           8
+#define K_MC_CLK_RATIO_2X          4
+#define K_MC_CLK_RATIO_25X         5
+#define K_MC_CLK_RATIO_3X          6
+#define K_MC_CLK_RATIO_35X         7
+#define K_MC_CLK_RATIO_4X          8
 #define K_MC_CLK_RATIO_45X         9
 
 #define V_MC_CLK_RATIO_2X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_2X)
-#define V_MC_CLK_RATIO_25X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
-#define V_MC_CLK_RATIO_3X           V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
-#define V_MC_CLK_RATIO_35X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
-#define V_MC_CLK_RATIO_4X           V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
-#define V_MC_CLK_RATIO_45X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
-#define V_MC_CLK_RATIO_DEFAULT      V_MC_CLK_RATIO_25X
-
-#define S_MC_REF_RATE                8
-#define M_MC_REF_RATE                _SB_MAKEMASK(8, S_MC_REF_RATE)
-#define V_MC_REF_RATE(x)             _SB_MAKEVALUE(x, S_MC_REF_RATE)
-#define G_MC_REF_RATE(x)             _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
-
-#define K_MC_REF_RATE_100MHz         0x62
-#define K_MC_REF_RATE_133MHz         0x81
-#define K_MC_REF_RATE_200MHz         0xC4
-
-#define V_MC_REF_RATE_100MHz         V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
-#define V_MC_REF_RATE_133MHz         V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
-#define V_MC_REF_RATE_200MHz         V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
-#define V_MC_REF_RATE_DEFAULT        V_MC_REF_RATE_100MHz
-
-#define S_MC_CLOCK_DRIVE             16
-#define M_MC_CLOCK_DRIVE             _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
-#define V_MC_CLOCK_DRIVE(x)          _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
-#define G_MC_CLOCK_DRIVE(x)          _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
+#define V_MC_CLK_RATIO_25X         V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
+#define V_MC_CLK_RATIO_3X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
+#define V_MC_CLK_RATIO_35X         V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
+#define V_MC_CLK_RATIO_4X          V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
+#define V_MC_CLK_RATIO_45X         V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
+#define V_MC_CLK_RATIO_DEFAULT     V_MC_CLK_RATIO_25X
+
+#define S_MC_REF_RATE               8
+#define M_MC_REF_RATE               _SB_MAKEMASK(8, S_MC_REF_RATE)
+#define V_MC_REF_RATE(x)            _SB_MAKEVALUE(x, S_MC_REF_RATE)
+#define G_MC_REF_RATE(x)            _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
+
+#define K_MC_REF_RATE_100MHz        0x62
+#define K_MC_REF_RATE_133MHz        0x81
+#define K_MC_REF_RATE_200MHz        0xC4
+
+#define V_MC_REF_RATE_100MHz        V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
+#define V_MC_REF_RATE_133MHz        V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
+#define V_MC_REF_RATE_200MHz        V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
+#define V_MC_REF_RATE_DEFAULT       V_MC_REF_RATE_100MHz
+
+#define S_MC_CLOCK_DRIVE            16
+#define M_MC_CLOCK_DRIVE            _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
+#define V_MC_CLOCK_DRIVE(x)         _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
+#define G_MC_CLOCK_DRIVE(x)         _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
 #define V_MC_CLOCK_DRIVE_DEFAULT     V_MC_CLOCK_DRIVE(0xF)
 
-#define S_MC_DATA_DRIVE              20
-#define M_MC_DATA_DRIVE              _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE(x)           _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
-#define G_MC_DATA_DRIVE(x)           _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE_DEFAULT      V_MC_DATA_DRIVE(0x0)
+#define S_MC_DATA_DRIVE                     20
+#define M_MC_DATA_DRIVE                     _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE(x)          _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
+#define G_MC_DATA_DRIVE(x)          _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE_DEFAULT             V_MC_DATA_DRIVE(0x0)
 
-#define S_MC_ADDR_DRIVE              24
-#define M_MC_ADDR_DRIVE              _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE(x)           _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
-#define G_MC_ADDR_DRIVE(x)           _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE_DEFAULT      V_MC_ADDR_DRIVE(0x0)
+#define S_MC_ADDR_DRIVE                     24
+#define M_MC_ADDR_DRIVE                     _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE(x)          _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
+#define G_MC_ADDR_DRIVE(x)          _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE_DEFAULT             V_MC_ADDR_DRIVE(0x0)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_REF_DISABLE             _SB_MAKEMASK1(30)
+#define M_MC_REF_DISABLE            _SB_MAKEMASK1(30)
 #endif /* 1250 PASS3 || 112x PASS1 */
 
-#define M_MC_DLL_BYPASS              _SB_MAKEMASK1(31)
-
-#define S_MC_DQI_SKEW               32
-#define M_MC_DQI_SKEW               _SB_MAKEMASK(8, S_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW(x)            _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
-#define G_MC_DQI_SKEW(x)            _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW_DEFAULT       V_MC_DQI_SKEW(0)
-
-#define S_MC_DQO_SKEW               40
-#define M_MC_DQO_SKEW               _SB_MAKEMASK(8, S_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW(x)            _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
-#define G_MC_DQO_SKEW(x)            _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW_DEFAULT       V_MC_DQO_SKEW(0)
-
-#define S_MC_ADDR_SKEW               48
-#define M_MC_ADDR_SKEW               _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW(x)            _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
-#define G_MC_ADDR_SKEW(x)            _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW_DEFAULT       V_MC_ADDR_SKEW(0x0F)
-
-#define S_MC_DLL_DEFAULT             56
-#define M_MC_DLL_DEFAULT             _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
-#define V_MC_DLL_DEFAULT(x)          _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
-#define G_MC_DLL_DEFAULT(x)          _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
+#define M_MC_DLL_BYPASS                     _SB_MAKEMASK1(31)
+
+#define S_MC_DQI_SKEW              32
+#define M_MC_DQI_SKEW              _SB_MAKEMASK(8, S_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW(x)           _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
+#define G_MC_DQI_SKEW(x)           _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW_DEFAULT      V_MC_DQI_SKEW(0)
+
+#define S_MC_DQO_SKEW              40
+#define M_MC_DQO_SKEW              _SB_MAKEMASK(8, S_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW(x)           _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
+#define G_MC_DQO_SKEW(x)           _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW_DEFAULT      V_MC_DQO_SKEW(0)
+
+#define S_MC_ADDR_SKEW              48
+#define M_MC_ADDR_SKEW              _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW(x)           _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
+#define G_MC_ADDR_SKEW(x)           _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW_DEFAULT      V_MC_ADDR_SKEW(0x0F)
+
+#define S_MC_DLL_DEFAULT            56
+#define M_MC_DLL_DEFAULT            _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
+#define V_MC_DLL_DEFAULT(x)         _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
+#define G_MC_DLL_DEFAULT(x)         _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
 #define V_MC_DLL_DEFAULT_DEFAULT     V_MC_DLL_DEFAULT(0x10)
 
-#define V_MC_CLKCONFIG_DEFAULT       V_MC_DLL_DEFAULT_DEFAULT |  \
-                                     V_MC_ADDR_SKEW_DEFAULT | \
-                                     V_MC_DQO_SKEW_DEFAULT | \
-                                     V_MC_DQI_SKEW_DEFAULT | \
-                                     V_MC_ADDR_DRIVE_DEFAULT | \
-                                     V_MC_DATA_DRIVE_DEFAULT | \
-                                     V_MC_CLOCK_DRIVE_DEFAULT | \
-                                     V_MC_REF_RATE_DEFAULT
+#define V_MC_CLKCONFIG_DEFAULT      V_MC_DLL_DEFAULT_DEFAULT |  \
+                                    V_MC_ADDR_SKEW_DEFAULT | \
+                                    V_MC_DQO_SKEW_DEFAULT | \
+                                    V_MC_DQI_SKEW_DEFAULT | \
+                                    V_MC_ADDR_DRIVE_DEFAULT | \
+                                    V_MC_DATA_DRIVE_DEFAULT | \
+                                    V_MC_CLOCK_DRIVE_DEFAULT | \
+                                    V_MC_REF_RATE_DEFAULT
 
 
 
  * DRAM Command Register (Table 6-13)
  */
 
-#define S_MC_COMMAND                0
-#define M_MC_COMMAND                _SB_MAKEMASK(4, S_MC_COMMAND)
-#define V_MC_COMMAND(x)             _SB_MAKEVALUE(x, S_MC_COMMAND)
-#define G_MC_COMMAND(x)             _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
-
-#define K_MC_COMMAND_EMRS           0
-#define K_MC_COMMAND_MRS            1
-#define K_MC_COMMAND_PRE            2
-#define K_MC_COMMAND_AR             3
-#define K_MC_COMMAND_SETRFSH        4
-#define K_MC_COMMAND_CLRRFSH        5
-#define K_MC_COMMAND_SETPWRDN       6
-#define K_MC_COMMAND_CLRPWRDN       7
-
-#define V_MC_COMMAND_EMRS           V_MC_COMMAND(K_MC_COMMAND_EMRS)
-#define V_MC_COMMAND_MRS            V_MC_COMMAND(K_MC_COMMAND_MRS)
-#define V_MC_COMMAND_PRE            V_MC_COMMAND(K_MC_COMMAND_PRE)
-#define V_MC_COMMAND_AR             V_MC_COMMAND(K_MC_COMMAND_AR)
-#define V_MC_COMMAND_SETRFSH        V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
-#define V_MC_COMMAND_CLRRFSH        V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
-#define V_MC_COMMAND_SETPWRDN       V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
-#define V_MC_COMMAND_CLRPWRDN       V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
-
-#define M_MC_CS0                    _SB_MAKEMASK1(4)
-#define M_MC_CS1                    _SB_MAKEMASK1(5)
-#define M_MC_CS2                    _SB_MAKEMASK1(6)
-#define M_MC_CS3                    _SB_MAKEMASK1(7)
+#define S_MC_COMMAND               0
+#define M_MC_COMMAND               _SB_MAKEMASK(4, S_MC_COMMAND)
+#define V_MC_COMMAND(x)                    _SB_MAKEVALUE(x, S_MC_COMMAND)
+#define G_MC_COMMAND(x)                    _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
+
+#define K_MC_COMMAND_EMRS          0
+#define K_MC_COMMAND_MRS           1
+#define K_MC_COMMAND_PRE           2
+#define K_MC_COMMAND_AR                    3
+#define K_MC_COMMAND_SETRFSH       4
+#define K_MC_COMMAND_CLRRFSH       5
+#define K_MC_COMMAND_SETPWRDN      6
+#define K_MC_COMMAND_CLRPWRDN      7
+
+#define V_MC_COMMAND_EMRS          V_MC_COMMAND(K_MC_COMMAND_EMRS)
+#define V_MC_COMMAND_MRS           V_MC_COMMAND(K_MC_COMMAND_MRS)
+#define V_MC_COMMAND_PRE           V_MC_COMMAND(K_MC_COMMAND_PRE)
+#define V_MC_COMMAND_AR                    V_MC_COMMAND(K_MC_COMMAND_AR)
+#define V_MC_COMMAND_SETRFSH       V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
+#define V_MC_COMMAND_CLRRFSH       V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
+#define V_MC_COMMAND_SETPWRDN      V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
+#define V_MC_COMMAND_CLRPWRDN      V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
+
+#define M_MC_CS0                   _SB_MAKEMASK1(4)
+#define M_MC_CS1                   _SB_MAKEMASK1(5)
+#define M_MC_CS2                   _SB_MAKEMASK1(6)
+#define M_MC_CS3                   _SB_MAKEMASK1(7)
 
 /*
  * DRAM Mode Register (Table 6-14)
  */
 
-#define S_MC_EMODE                  0
-#define M_MC_EMODE                  _SB_MAKEMASK(15, S_MC_EMODE)
-#define V_MC_EMODE(x)               _SB_MAKEVALUE(x, S_MC_EMODE)
-#define G_MC_EMODE(x)               _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
-#define V_MC_EMODE_DEFAULT          V_MC_EMODE(0)
-
-#define S_MC_MODE                   16
-#define M_MC_MODE                   _SB_MAKEMASK(15, S_MC_MODE)
-#define V_MC_MODE(x)                _SB_MAKEVALUE(x, S_MC_MODE)
-#define G_MC_MODE(x)                _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
-#define V_MC_MODE_DEFAULT           V_MC_MODE(0x22)
-
-#define S_MC_DRAM_TYPE              32
-#define M_MC_DRAM_TYPE              _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
-#define V_MC_DRAM_TYPE(x)           _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
-#define G_MC_DRAM_TYPE(x)           _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
-
-#define K_MC_DRAM_TYPE_JEDEC        0
-#define K_MC_DRAM_TYPE_FCRAM        1
+#define S_MC_EMODE                 0
+#define M_MC_EMODE                 _SB_MAKEMASK(15, S_MC_EMODE)
+#define V_MC_EMODE(x)              _SB_MAKEVALUE(x, S_MC_EMODE)
+#define G_MC_EMODE(x)              _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
+#define V_MC_EMODE_DEFAULT         V_MC_EMODE(0)
+
+#define S_MC_MODE                  16
+#define M_MC_MODE                  _SB_MAKEMASK(15, S_MC_MODE)
+#define V_MC_MODE(x)               _SB_MAKEVALUE(x, S_MC_MODE)
+#define G_MC_MODE(x)               _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
+#define V_MC_MODE_DEFAULT          V_MC_MODE(0x22)
+
+#define S_MC_DRAM_TYPE             32
+#define M_MC_DRAM_TYPE             _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
+#define V_MC_DRAM_TYPE(x)          _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
+#define G_MC_DRAM_TYPE(x)          _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
+
+#define K_MC_DRAM_TYPE_JEDEC       0
+#define K_MC_DRAM_TYPE_FCRAM       1
 #define K_MC_DRAM_TYPE_SGRAM       2
 
-#define V_MC_DRAM_TYPE_JEDEC        V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
-#define V_MC_DRAM_TYPE_FCRAM        V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
-#define V_MC_DRAM_TYPE_SGRAM        V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
+#define V_MC_DRAM_TYPE_JEDEC       V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
+#define V_MC_DRAM_TYPE_FCRAM       V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
+#define V_MC_DRAM_TYPE_SGRAM       V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
 
 #define M_MC_EXTERNALDECODE        _SB_MAKEMASK1(35)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_PRE_ON_A8              _SB_MAKEMASK1(36)
-#define M_MC_RAM_WITH_A13           _SB_MAKEMASK1(37)
+#define M_MC_PRE_ON_A8             _SB_MAKEMASK1(36)
+#define M_MC_RAM_WITH_A13          _SB_MAKEMASK1(37)
 #endif /* 1250 PASS3 || 112x PASS1 */
 
 
 #define M_MC_r2wIDLE_TWOCYCLES   _SB_MAKEMASK1(61)
 #define M_MC_r2rIDLE_TWOCYCLES   _SB_MAKEMASK1(62)
 
-#define S_MC_tFIFO                56
-#define M_MC_tFIFO                _SB_MAKEMASK(4, S_MC_tFIFO)
-#define V_MC_tFIFO(x)             _SB_MAKEVALUE(x, S_MC_tFIFO)
-#define G_MC_tFIFO(x)             _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
-#define K_MC_tFIFO_DEFAULT        1
-#define V_MC_tFIFO_DEFAULT        V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
+#define S_MC_tFIFO               56
+#define M_MC_tFIFO               _SB_MAKEMASK(4, S_MC_tFIFO)
+#define V_MC_tFIFO(x)            _SB_MAKEVALUE(x, S_MC_tFIFO)
+#define G_MC_tFIFO(x)            _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
+#define K_MC_tFIFO_DEFAULT       1
+#define V_MC_tFIFO_DEFAULT       V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
 
-#define S_MC_tRFC                 52
-#define M_MC_tRFC                 _SB_MAKEMASK(4, S_MC_tRFC)
-#define V_MC_tRFC(x)              _SB_MAKEVALUE(x, S_MC_tRFC)
-#define G_MC_tRFC(x)              _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
-#define K_MC_tRFC_DEFAULT         12
-#define V_MC_tRFC_DEFAULT         V_MC_tRFC(K_MC_tRFC_DEFAULT)
+#define S_MC_tRFC                52
+#define M_MC_tRFC                _SB_MAKEMASK(4, S_MC_tRFC)
+#define V_MC_tRFC(x)             _SB_MAKEVALUE(x, S_MC_tRFC)
+#define G_MC_tRFC(x)             _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
+#define K_MC_tRFC_DEFAULT        12
+#define V_MC_tRFC_DEFAULT        V_MC_tRFC(K_MC_tRFC_DEFAULT)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define M_MC_tRFC_PLUS16          _SB_MAKEMASK1(51)    /* 1250C3 and later.  */
+#define M_MC_tRFC_PLUS16         _SB_MAKEMASK1(51)     /* 1250C3 and later.  */
 #endif
 
-#define S_MC_tCwCr                40
-#define M_MC_tCwCr                _SB_MAKEMASK(4, S_MC_tCwCr)
-#define V_MC_tCwCr(x)             _SB_MAKEVALUE(x, S_MC_tCwCr)
-#define G_MC_tCwCr(x)             _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
-#define K_MC_tCwCr_DEFAULT        4
-#define V_MC_tCwCr_DEFAULT        V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
-
-#define S_MC_tRCr                 28
-#define M_MC_tRCr                 _SB_MAKEMASK(4, S_MC_tRCr)
-#define V_MC_tRCr(x)              _SB_MAKEVALUE(x, S_MC_tRCr)
-#define G_MC_tRCr(x)              _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
-#define K_MC_tRCr_DEFAULT         9
-#define V_MC_tRCr_DEFAULT         V_MC_tRCr(K_MC_tRCr_DEFAULT)
-
-#define S_MC_tRCw                 24
-#define M_MC_tRCw                 _SB_MAKEMASK(4, S_MC_tRCw)
-#define V_MC_tRCw(x)              _SB_MAKEVALUE(x, S_MC_tRCw)
-#define G_MC_tRCw(x)              _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
-#define K_MC_tRCw_DEFAULT         10
-#define V_MC_tRCw_DEFAULT         V_MC_tRCw(K_MC_tRCw_DEFAULT)
-
-#define S_MC_tRRD                 20
-#define M_MC_tRRD                 _SB_MAKEMASK(4, S_MC_tRRD)
-#define V_MC_tRRD(x)              _SB_MAKEVALUE(x, S_MC_tRRD)
-#define G_MC_tRRD(x)              _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
-#define K_MC_tRRD_DEFAULT         2
-#define V_MC_tRRD_DEFAULT         V_MC_tRRD(K_MC_tRRD_DEFAULT)
-
-#define S_MC_tRP                  16
-#define M_MC_tRP                  _SB_MAKEMASK(4, S_MC_tRP)
-#define V_MC_tRP(x)               _SB_MAKEVALUE(x, S_MC_tRP)
-#define G_MC_tRP(x)               _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
-#define K_MC_tRP_DEFAULT          4
-#define V_MC_tRP_DEFAULT          V_MC_tRP(K_MC_tRP_DEFAULT)
-
-#define S_MC_tCwD                 8
-#define M_MC_tCwD                 _SB_MAKEMASK(4, S_MC_tCwD)
-#define V_MC_tCwD(x)              _SB_MAKEVALUE(x, S_MC_tCwD)
-#define G_MC_tCwD(x)              _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
-#define K_MC_tCwD_DEFAULT         1
-#define V_MC_tCwD_DEFAULT         V_MC_tCwD(K_MC_tCwD_DEFAULT)
-
-#define M_tCrDh                   _SB_MAKEMASK1(7)
+#define S_MC_tCwCr               40
+#define M_MC_tCwCr               _SB_MAKEMASK(4, S_MC_tCwCr)
+#define V_MC_tCwCr(x)            _SB_MAKEVALUE(x, S_MC_tCwCr)
+#define G_MC_tCwCr(x)            _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
+#define K_MC_tCwCr_DEFAULT       4
+#define V_MC_tCwCr_DEFAULT       V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
+
+#define S_MC_tRCr                28
+#define M_MC_tRCr                _SB_MAKEMASK(4, S_MC_tRCr)
+#define V_MC_tRCr(x)             _SB_MAKEVALUE(x, S_MC_tRCr)
+#define G_MC_tRCr(x)             _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
+#define K_MC_tRCr_DEFAULT        9
+#define V_MC_tRCr_DEFAULT        V_MC_tRCr(K_MC_tRCr_DEFAULT)
+
+#define S_MC_tRCw                24
+#define M_MC_tRCw                _SB_MAKEMASK(4, S_MC_tRCw)
+#define V_MC_tRCw(x)             _SB_MAKEVALUE(x, S_MC_tRCw)
+#define G_MC_tRCw(x)             _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
+#define K_MC_tRCw_DEFAULT        10
+#define V_MC_tRCw_DEFAULT        V_MC_tRCw(K_MC_tRCw_DEFAULT)
+
+#define S_MC_tRRD                20
+#define M_MC_tRRD                _SB_MAKEMASK(4, S_MC_tRRD)
+#define V_MC_tRRD(x)             _SB_MAKEVALUE(x, S_MC_tRRD)
+#define G_MC_tRRD(x)             _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
+#define K_MC_tRRD_DEFAULT        2
+#define V_MC_tRRD_DEFAULT        V_MC_tRRD(K_MC_tRRD_DEFAULT)
+
+#define S_MC_tRP                 16
+#define M_MC_tRP                 _SB_MAKEMASK(4, S_MC_tRP)
+#define V_MC_tRP(x)              _SB_MAKEVALUE(x, S_MC_tRP)
+#define G_MC_tRP(x)              _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
+#define K_MC_tRP_DEFAULT         4
+#define V_MC_tRP_DEFAULT         V_MC_tRP(K_MC_tRP_DEFAULT)
+
+#define S_MC_tCwD                8
+#define M_MC_tCwD                _SB_MAKEMASK(4, S_MC_tCwD)
+#define V_MC_tCwD(x)             _SB_MAKEVALUE(x, S_MC_tCwD)
+#define G_MC_tCwD(x)             _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
+#define K_MC_tCwD_DEFAULT        1
+#define V_MC_tCwD_DEFAULT        V_MC_tCwD(K_MC_tCwD_DEFAULT)
+
+#define M_tCrDh                          _SB_MAKEMASK1(7)
 #define M_MC_tCrDh               M_tCrDh
 
-#define S_MC_tCrD                 4
-#define M_MC_tCrD                 _SB_MAKEMASK(3, S_MC_tCrD)
-#define V_MC_tCrD(x)              _SB_MAKEVALUE(x, S_MC_tCrD)
-#define G_MC_tCrD(x)              _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
-#define K_MC_tCrD_DEFAULT         2
-#define V_MC_tCrD_DEFAULT         V_MC_tCrD(K_MC_tCrD_DEFAULT)
-
-#define S_MC_tRCD                 0
-#define M_MC_tRCD                 _SB_MAKEMASK(4, S_MC_tRCD)
-#define V_MC_tRCD(x)              _SB_MAKEVALUE(x, S_MC_tRCD)
-#define G_MC_tRCD(x)              _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
-#define K_MC_tRCD_DEFAULT         3
-#define V_MC_tRCD_DEFAULT         V_MC_tRCD(K_MC_tRCD_DEFAULT)
-
-#define V_MC_TIMING_DEFAULT     V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
-                                V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
-                                V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
-                                V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
-                                V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
-                                V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
-                                V_MC_tRP(K_MC_tRP_DEFAULT) | \
-                                V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
-                                V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
-                                V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
-                                M_MC_r2rIDLE_TWOCYCLES
+#define S_MC_tCrD                4
+#define M_MC_tCrD                _SB_MAKEMASK(3, S_MC_tCrD)
+#define V_MC_tCrD(x)             _SB_MAKEVALUE(x, S_MC_tCrD)
+#define G_MC_tCrD(x)             _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
+#define K_MC_tCrD_DEFAULT        2
+#define V_MC_tCrD_DEFAULT        V_MC_tCrD(K_MC_tCrD_DEFAULT)
+
+#define S_MC_tRCD                0
+#define M_MC_tRCD                _SB_MAKEMASK(4, S_MC_tRCD)
+#define V_MC_tRCD(x)             _SB_MAKEVALUE(x, S_MC_tRCD)
+#define G_MC_tRCD(x)             _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
+#define K_MC_tRCD_DEFAULT        3
+#define V_MC_tRCD_DEFAULT        V_MC_tRCD(K_MC_tRCD_DEFAULT)
+
+#define V_MC_TIMING_DEFAULT    V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
+                               V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
+                               V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
+                               V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
+                               V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
+                               V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
+                               V_MC_tRP(K_MC_tRP_DEFAULT) | \
+                               V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
+                               V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
+                               V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
+                               M_MC_r2rIDLE_TWOCYCLES
 
 /*
  * Errata says these are not the default
- *                               M_MC_w2rIDLE_TWOCYCLES | \
- *                               M_MC_r2wIDLE_TWOCYCLES | \
+ *                              M_MC_w2rIDLE_TWOCYCLES | \
+ *                              M_MC_r2wIDLE_TWOCYCLES | \
  */
 
 
  * Chip Select Start Address Register (Table 6-17)
  */
 
-#define S_MC_CS0_START              0
-#define M_MC_CS0_START              _SB_MAKEMASK(16, S_MC_CS0_START)
-#define V_MC_CS0_START(x)           _SB_MAKEVALUE(x, S_MC_CS0_START)
-#define G_MC_CS0_START(x)           _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
+#define S_MC_CS0_START             0
+#define M_MC_CS0_START             _SB_MAKEMASK(16, S_MC_CS0_START)
+#define V_MC_CS0_START(x)          _SB_MAKEVALUE(x, S_MC_CS0_START)
+#define G_MC_CS0_START(x)          _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
 
-#define S_MC_CS1_START              16
-#define M_MC_CS1_START              _SB_MAKEMASK(16, S_MC_CS1_START)
-#define V_MC_CS1_START(x)           _SB_MAKEVALUE(x, S_MC_CS1_START)
-#define G_MC_CS1_START(x)           _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
+#define S_MC_CS1_START             16
+#define M_MC_CS1_START             _SB_MAKEMASK(16, S_MC_CS1_START)
+#define V_MC_CS1_START(x)          _SB_MAKEVALUE(x, S_MC_CS1_START)
+#define G_MC_CS1_START(x)          _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
 
-#define S_MC_CS2_START              32
-#define M_MC_CS2_START              _SB_MAKEMASK(16, S_MC_CS2_START)
-#define V_MC_CS2_START(x)           _SB_MAKEVALUE(x, S_MC_CS2_START)
-#define G_MC_CS2_START(x)           _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
+#define S_MC_CS2_START             32
+#define M_MC_CS2_START             _SB_MAKEMASK(16, S_MC_CS2_START)
+#define V_MC_CS2_START(x)          _SB_MAKEVALUE(x, S_MC_CS2_START)
+#define G_MC_CS2_START(x)          _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
 
-#define S_MC_CS3_START              48
-#define M_MC_CS3_START              _SB_MAKEMASK(16, S_MC_CS3_START)
-#define V_MC_CS3_START(x)           _SB_MAKEVALUE(x, S_MC_CS3_START)
-#define G_MC_CS3_START(x)           _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
+#define S_MC_CS3_START             48
+#define M_MC_CS3_START             _SB_MAKEMASK(16, S_MC_CS3_START)
+#define V_MC_CS3_START(x)          _SB_MAKEVALUE(x, S_MC_CS3_START)
+#define G_MC_CS3_START(x)          _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
 
 /*
  * Chip Select End Address Register (Table 6-18)
  */
 
-#define S_MC_CS0_END                0
-#define M_MC_CS0_END                _SB_MAKEMASK(16, S_MC_CS0_END)
-#define V_MC_CS0_END(x)             _SB_MAKEVALUE(x, S_MC_CS0_END)
-#define G_MC_CS0_END(x)             _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
+#define S_MC_CS0_END               0
+#define M_MC_CS0_END               _SB_MAKEMASK(16, S_MC_CS0_END)
+#define V_MC_CS0_END(x)                    _SB_MAKEVALUE(x, S_MC_CS0_END)
+#define G_MC_CS0_END(x)                    _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
 
-#define S_MC_CS1_END                16
-#define M_MC_CS1_END                _SB_MAKEMASK(16, S_MC_CS1_END)
-#define V_MC_CS1_END(x)             _SB_MAKEVALUE(x, S_MC_CS1_END)
-#define G_MC_CS1_END(x)             _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
+#define S_MC_CS1_END               16
+#define M_MC_CS1_END               _SB_MAKEMASK(16, S_MC_CS1_END)
+#define V_MC_CS1_END(x)                    _SB_MAKEVALUE(x, S_MC_CS1_END)
+#define G_MC_CS1_END(x)                    _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
 
-#define S_MC_CS2_END                32
-#define M_MC_CS2_END                _SB_MAKEMASK(16, S_MC_CS2_END)
-#define V_MC_CS2_END(x)             _SB_MAKEVALUE(x, S_MC_CS2_END)
-#define G_MC_CS2_END(x)             _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
+#define S_MC_CS2_END               32
+#define M_MC_CS2_END               _SB_MAKEMASK(16, S_MC_CS2_END)
+#define V_MC_CS2_END(x)                    _SB_MAKEVALUE(x, S_MC_CS2_END)
+#define G_MC_CS2_END(x)                    _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
 
-#define S_MC_CS3_END                48
-#define M_MC_CS3_END                _SB_MAKEMASK(16, S_MC_CS3_END)
-#define V_MC_CS3_END(x)             _SB_MAKEVALUE(x, S_MC_CS3_END)
-#define G_MC_CS3_END(x)             _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
+#define S_MC_CS3_END               48
+#define M_MC_CS3_END               _SB_MAKEMASK(16, S_MC_CS3_END)
+#define V_MC_CS3_END(x)                    _SB_MAKEVALUE(x, S_MC_CS3_END)
+#define G_MC_CS3_END(x)                    _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
 
 /*
  * Chip Select Interleave Register (Table 6-19)
  */
 
-#define S_MC_INTLV_RESERVED         0
-#define M_MC_INTLV_RESERVED         _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
+#define S_MC_INTLV_RESERVED        0
+#define M_MC_INTLV_RESERVED        _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
 
-#define S_MC_INTERLEAVE             7
-#define M_MC_INTERLEAVE             _SB_MAKEMASK(18, S_MC_INTERLEAVE)
-#define V_MC_INTERLEAVE(x)          _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
+#define S_MC_INTERLEAVE                    7
+#define M_MC_INTERLEAVE                    _SB_MAKEMASK(18, S_MC_INTERLEAVE)
+#define V_MC_INTERLEAVE(x)         _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
 
-#define S_MC_INTLV_MBZ              25
-#define M_MC_INTLV_MBZ              _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
+#define S_MC_INTLV_MBZ             25
+#define M_MC_INTLV_MBZ             _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
 
 /*
  * Row Address Bits Register (Table 6-20)
  */
 
-#define S_MC_RAS_RESERVED           0
-#define M_MC_RAS_RESERVED           _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
+#define S_MC_RAS_RESERVED          0
+#define M_MC_RAS_RESERVED          _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
 
-#define S_MC_RAS_SELECT             12
-#define M_MC_RAS_SELECT             _SB_MAKEMASK(25, S_MC_RAS_SELECT)
-#define V_MC_RAS_SELECT(x)          _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
+#define S_MC_RAS_SELECT                    12
+#define M_MC_RAS_SELECT                    _SB_MAKEMASK(25, S_MC_RAS_SELECT)
+#define V_MC_RAS_SELECT(x)         _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
 
-#define S_MC_RAS_MBZ                37
-#define M_MC_RAS_MBZ                _SB_MAKEMASK(27, S_MC_RAS_MBZ)
+#define S_MC_RAS_MBZ               37
+#define M_MC_RAS_MBZ               _SB_MAKEMASK(27, S_MC_RAS_MBZ)
 
 
 /*
  * Column Address Bits Register (Table 6-21)
  */
 
-#define S_MC_CAS_RESERVED           0
-#define M_MC_CAS_RESERVED           _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
+#define S_MC_CAS_RESERVED          0
+#define M_MC_CAS_RESERVED          _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
 
-#define S_MC_CAS_SELECT             5
-#define M_MC_CAS_SELECT             _SB_MAKEMASK(18, S_MC_CAS_SELECT)
-#define V_MC_CAS_SELECT(x)          _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
+#define S_MC_CAS_SELECT                    5
+#define M_MC_CAS_SELECT                    _SB_MAKEMASK(18, S_MC_CAS_SELECT)
+#define V_MC_CAS_SELECT(x)         _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
 
-#define S_MC_CAS_MBZ                23
-#define M_MC_CAS_MBZ                _SB_MAKEMASK(41, S_MC_CAS_MBZ)
+#define S_MC_CAS_MBZ               23
+#define M_MC_CAS_MBZ               _SB_MAKEMASK(41, S_MC_CAS_MBZ)
 
 
 /*
  * Bank Address Address Bits Register (Table 6-22)
  */
 
-#define S_MC_BA_RESERVED            0
-#define M_MC_BA_RESERVED            _SB_MAKEMASK(5, S_MC_BA_RESERVED)
+#define S_MC_BA_RESERVED           0
+#define M_MC_BA_RESERVED           _SB_MAKEMASK(5, S_MC_BA_RESERVED)
 
-#define S_MC_BA_SELECT              5
-#define M_MC_BA_SELECT              _SB_MAKEMASK(20, S_MC_BA_SELECT)
-#define V_MC_BA_SELECT(x)           _SB_MAKEVALUE(x, S_MC_BA_SELECT)
+#define S_MC_BA_SELECT             5
+#define M_MC_BA_SELECT             _SB_MAKEMASK(20, S_MC_BA_SELECT)
+#define V_MC_BA_SELECT(x)          _SB_MAKEVALUE(x, S_MC_BA_SELECT)
 
-#define S_MC_BA_MBZ                 25
-#define M_MC_BA_MBZ                 _SB_MAKEMASK(39, S_MC_BA_MBZ)
+#define S_MC_BA_MBZ                25
+#define M_MC_BA_MBZ                _SB_MAKEMASK(39, S_MC_BA_MBZ)
 
 /*
  * Chip Select Attribute Register (Table 6-23)
  */
 
-#define K_MC_CS_ATTR_CLOSED         0
-#define K_MC_CS_ATTR_CASCHECK       1
-#define K_MC_CS_ATTR_HINT           2
-#define K_MC_CS_ATTR_OPEN           3
+#define K_MC_CS_ATTR_CLOSED        0
+#define K_MC_CS_ATTR_CASCHECK      1
+#define K_MC_CS_ATTR_HINT          2
+#define K_MC_CS_ATTR_OPEN          3
 
-#define S_MC_CS0_PAGE               0
-#define M_MC_CS0_PAGE               _SB_MAKEMASK(2, S_MC_CS0_PAGE)
-#define V_MC_CS0_PAGE(x)            _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
-#define G_MC_CS0_PAGE(x)            _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
+#define S_MC_CS0_PAGE              0
+#define M_MC_CS0_PAGE              _SB_MAKEMASK(2, S_MC_CS0_PAGE)
+#define V_MC_CS0_PAGE(x)           _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
+#define G_MC_CS0_PAGE(x)           _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
 
-#define S_MC_CS1_PAGE               16
-#define M_MC_CS1_PAGE               _SB_MAKEMASK(2, S_MC_CS1_PAGE)
-#define V_MC_CS1_PAGE(x)            _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
-#define G_MC_CS1_PAGE(x)            _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
+#define S_MC_CS1_PAGE              16
+#define M_MC_CS1_PAGE              _SB_MAKEMASK(2, S_MC_CS1_PAGE)
+#define V_MC_CS1_PAGE(x)           _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
+#define G_MC_CS1_PAGE(x)           _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
 
-#define S_MC_CS2_PAGE               32
-#define M_MC_CS2_PAGE               _SB_MAKEMASK(2, S_MC_CS2_PAGE)
-#define V_MC_CS2_PAGE(x)            _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
-#define G_MC_CS2_PAGE(x)            _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
+#define S_MC_CS2_PAGE              32
+#define M_MC_CS2_PAGE              _SB_MAKEMASK(2, S_MC_CS2_PAGE)
+#define V_MC_CS2_PAGE(x)           _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
+#define G_MC_CS2_PAGE(x)           _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
 
-#define S_MC_CS3_PAGE               48
-#define M_MC_CS3_PAGE               _SB_MAKEMASK(2, S_MC_CS3_PAGE)
-#define V_MC_CS3_PAGE(x)            _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
-#define G_MC_CS3_PAGE(x)            _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
+#define S_MC_CS3_PAGE              48
+#define M_MC_CS3_PAGE              _SB_MAKEMASK(2, S_MC_CS3_PAGE)
+#define V_MC_CS3_PAGE(x)           _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
+#define G_MC_CS3_PAGE(x)           _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
 
 /*
  * ECC Test ECC Register (Table 6-25)
  */
 
-#define S_MC_ECC_INVERT             0
-#define M_MC_ECC_INVERT             _SB_MAKEMASK(8, S_MC_ECC_INVERT)
+#define S_MC_ECC_INVERT                    0
+#define M_MC_ECC_INVERT                    _SB_MAKEMASK(8, S_MC_ECC_INVERT)
 
 
 #endif
index 29b9f0b..ee86ca0 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  SB1250 Board Support Package
     *
-    *  Register Definitions                     File: sb1250_regs.h
+    *  Register Definitions                    File: sb1250_regs.h
     *
     *  This module contains the addresses of the on-chip peripherals
     *  on the SB1250.
  */
 
 #if SIBYTE_HDR_FEATURE_1250_112x               /* This MC only on 1250 & 112x */
-#define A_MC_BASE_0                 0x0010051000
-#define A_MC_BASE_1                 0x0010052000
-#define MC_REGISTER_SPACING         0x1000
+#define A_MC_BASE_0                0x0010051000
+#define A_MC_BASE_1                0x0010052000
+#define MC_REGISTER_SPACING        0x1000
 
-#define A_MC_BASE(ctlid)            ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
+#define A_MC_BASE(ctlid)           ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
 #define A_MC_REGISTER(ctlid, reg)    (A_MC_BASE(ctlid)+(reg))
 
-#define R_MC_CONFIG                 0x0000000100
-#define R_MC_DRAMCMD                0x0000000120
-#define R_MC_DRAMMODE               0x0000000140
-#define R_MC_TIMING1                0x0000000160
-#define R_MC_TIMING2                0x0000000180
-#define R_MC_CS_START               0x00000001A0
-#define R_MC_CS_END                 0x00000001C0
-#define R_MC_CS_INTERLEAVE          0x00000001E0
-#define S_MC_CS_STARTEND            16
-
-#define R_MC_CSX_BASE               0x0000000200
-#define R_MC_CSX_ROW                0x0000000000       /* relative to CSX_BASE, above */
-#define R_MC_CSX_COL                0x0000000020       /* relative to CSX_BASE, above */
-#define R_MC_CSX_BA                 0x0000000040       /* relative to CSX_BASE, above */
-#define MC_CSX_SPACING              0x0000000060       /* relative to CSX_BASE, above */
-
-#define R_MC_CS0_ROW                0x0000000200
-#define R_MC_CS0_COL                0x0000000220
-#define R_MC_CS0_BA                 0x0000000240
-#define R_MC_CS1_ROW                0x0000000260
-#define R_MC_CS1_COL                0x0000000280
-#define R_MC_CS1_BA                 0x00000002A0
-#define R_MC_CS2_ROW                0x00000002C0
-#define R_MC_CS2_COL                0x00000002E0
-#define R_MC_CS2_BA                 0x0000000300
-#define R_MC_CS3_ROW                0x0000000320
-#define R_MC_CS3_COL                0x0000000340
-#define R_MC_CS3_BA                 0x0000000360
-#define R_MC_CS_ATTR                0x0000000380
-#define R_MC_TEST_DATA              0x0000000400
-#define R_MC_TEST_ECC               0x0000000420
-#define R_MC_MCLK_CFG               0x0000000500
+#define R_MC_CONFIG                0x0000000100
+#define R_MC_DRAMCMD               0x0000000120
+#define R_MC_DRAMMODE              0x0000000140
+#define R_MC_TIMING1               0x0000000160
+#define R_MC_TIMING2               0x0000000180
+#define R_MC_CS_START              0x00000001A0
+#define R_MC_CS_END                0x00000001C0
+#define R_MC_CS_INTERLEAVE         0x00000001E0
+#define S_MC_CS_STARTEND           16
+
+#define R_MC_CSX_BASE              0x0000000200
+#define R_MC_CSX_ROW               0x0000000000        /* relative to CSX_BASE, above */
+#define R_MC_CSX_COL               0x0000000020        /* relative to CSX_BASE, above */
+#define R_MC_CSX_BA                0x0000000040        /* relative to CSX_BASE, above */
+#define MC_CSX_SPACING             0x0000000060        /* relative to CSX_BASE, above */
+
+#define R_MC_CS0_ROW               0x0000000200
+#define R_MC_CS0_COL               0x0000000220
+#define R_MC_CS0_BA                0x0000000240
+#define R_MC_CS1_ROW               0x0000000260
+#define R_MC_CS1_COL               0x0000000280
+#define R_MC_CS1_BA                0x00000002A0
+#define R_MC_CS2_ROW               0x00000002C0
+#define R_MC_CS2_COL               0x00000002E0
+#define R_MC_CS2_BA                0x0000000300
+#define R_MC_CS3_ROW               0x0000000320
+#define R_MC_CS3_COL               0x0000000340
+#define R_MC_CS3_BA                0x0000000360
+#define R_MC_CS_ATTR               0x0000000380
+#define R_MC_TEST_DATA             0x0000000400
+#define R_MC_TEST_ECC              0x0000000420
+#define R_MC_MCLK_CFG              0x0000000500
 
 #endif /* 1250 & 112x */
 
 
 #if SIBYTE_HDR_FEATURE_1250_112x       /* This L2C only on 1250/112x */
 
-#define A_L2_READ_TAG               0x0010040018
-#define A_L2_ECC_TAG                0x0010040038
+#define A_L2_READ_TAG              0x0010040018
+#define A_L2_ECC_TAG               0x0010040038
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define A_L2_READ_MISC              0x0010040058
+#define A_L2_READ_MISC             0x0010040058
 #endif /* 1250 PASS3 || 112x PASS1 */
-#define A_L2_WAY_DISABLE            0x0010041000
-#define A_L2_MAKEDISABLE(x)         (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
-#define A_L2_MGMT_TAG_BASE          0x00D0000000
+#define A_L2_WAY_DISABLE           0x0010041000
+#define A_L2_MAKEDISABLE(x)        (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
+#define A_L2_MGMT_TAG_BASE         0x00D0000000
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define A_L2_CACHE_DISABLE        0x0010042000
 #define A_L2_MISC_CONFIG          0x0010043000
 #endif /* 1250 PASS2 || 112x PASS1 */
 
-/* Backward-compatibility definitions.  */
+/* Backward-compatibility definitions. */
 /* XXX: discourage people from using these constants.  */
-#define A_L2_READ_ADDRESS           A_L2_READ_TAG
-#define A_L2_EEC_ADDRESS            A_L2_ECC_TAG
+#define A_L2_READ_ADDRESS          A_L2_READ_TAG
+#define A_L2_EEC_ADDRESS           A_L2_ECC_TAG
 
 #endif
 
     ********************************************************************* */
 
 #if SIBYTE_HDR_FEATURE_1250_112x       /* This PCI/HT only on 1250/112x */
-#define A_PCI_TYPE00_HEADER         0x00DE000000
-#define A_PCI_TYPE01_HEADER         0x00DE000800
+#define A_PCI_TYPE00_HEADER        0x00DE000000
+#define A_PCI_TYPE01_HEADER        0x00DE000800
 #endif
 
 
     * Ethernet DMA and MACs
     ********************************************************************* */
 
-#define A_MAC_BASE_0                0x0010064000
-#define A_MAC_BASE_1                0x0010065000
+#define A_MAC_BASE_0               0x0010064000
+#define A_MAC_BASE_1               0x0010065000
 #if SIBYTE_HDR_FEATURE_CHIP(1250)
-#define A_MAC_BASE_2                0x0010066000
+#define A_MAC_BASE_2               0x0010066000
 #endif /* 1250 */
 
-#define MAC_SPACING                 0x1000
-#define MAC_DMA_TXRX_SPACING        0x0400
-#define MAC_DMA_CHANNEL_SPACING     0x0100
-#define DMA_RX                      0
-#define DMA_TX                      1
+#define MAC_SPACING                0x1000
+#define MAC_DMA_TXRX_SPACING       0x0400
+#define MAC_DMA_CHANNEL_SPACING            0x0100
+#define DMA_RX                     0
+#define DMA_TX                     1
 #define MAC_NUM_DMACHAN                    2               /* channels per direction */
 
 /* XXX: not correct; depends on SOC type.  */
-#define MAC_NUM_PORTS               3
+#define MAC_NUM_PORTS              3
 
-#define A_MAC_CHANNEL_BASE(macnum)                  \
-            (A_MAC_BASE_0 +                         \
-             MAC_SPACING*(macnum))
+#define A_MAC_CHANNEL_BASE(macnum)                 \
+           (A_MAC_BASE_0 +                         \
+            MAC_SPACING*(macnum))
 
-#define A_MAC_REGISTER(macnum,reg)                  \
-            (A_MAC_BASE_0 +                         \
-             MAC_SPACING*(macnum) + (reg))
+#define A_MAC_REGISTER(macnum,reg)                 \
+           (A_MAC_BASE_0 +                         \
+            MAC_SPACING*(macnum) + (reg))
 
 
 #define R_MAC_DMA_CHANNELS             0x800 /* Relative to A_MAC_CHANNEL_BASE */
 
 #define A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan)  \
-             ((A_MAC_CHANNEL_BASE(macnum)) +        \
-             R_MAC_DMA_CHANNELS +                   \
-             (MAC_DMA_TXRX_SPACING*(txrx)) +        \
-             (MAC_DMA_CHANNEL_SPACING*(chan)))
+            ((A_MAC_CHANNEL_BASE(macnum)) +        \
+            R_MAC_DMA_CHANNELS +                   \
+            (MAC_DMA_TXRX_SPACING*(txrx)) +        \
+            (MAC_DMA_CHANNEL_SPACING*(chan)))
 
 #define R_MAC_DMA_CHANNEL_BASE(txrx, chan)             \
-             (R_MAC_DMA_CHANNELS +                   \
-             (MAC_DMA_TXRX_SPACING*(txrx)) +        \
-             (MAC_DMA_CHANNEL_SPACING*(chan)))
+            (R_MAC_DMA_CHANNELS +                   \
+            (MAC_DMA_TXRX_SPACING*(txrx)) +        \
+            (MAC_DMA_CHANNEL_SPACING*(chan)))
 
-#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg)           \
-            (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) +    \
-            (reg))
+#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg)          \
+           (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) +    \
+           (reg))
 
-#define R_MAC_DMA_REGISTER(txrx, chan, reg)           \
-            (R_MAC_DMA_CHANNEL_BASE(txrx, chan) +    \
-            (reg))
+#define R_MAC_DMA_REGISTER(txrx, chan, reg)          \
+           (R_MAC_DMA_CHANNEL_BASE(txrx, chan) +    \
+           (reg))
 
 /*
  * DMA channel registers, relative to A_MAC_DMA_CHANNEL_BASE
  */
 
-#define R_MAC_DMA_CONFIG0               0x00000000
-#define R_MAC_DMA_CONFIG1               0x00000008
-#define R_MAC_DMA_DSCR_BASE             0x00000010
-#define R_MAC_DMA_DSCR_CNT              0x00000018
-#define R_MAC_DMA_CUR_DSCRA             0x00000020
-#define R_MAC_DMA_CUR_DSCRB             0x00000028
-#define R_MAC_DMA_CUR_DSCRADDR          0x00000030
+#define R_MAC_DMA_CONFIG0              0x00000000
+#define R_MAC_DMA_CONFIG1              0x00000008
+#define R_MAC_DMA_DSCR_BASE            0x00000010
+#define R_MAC_DMA_DSCR_CNT             0x00000018
+#define R_MAC_DMA_CUR_DSCRA            0x00000020
+#define R_MAC_DMA_CUR_DSCRB            0x00000028
+#define R_MAC_DMA_CUR_DSCRADDR         0x00000030
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define R_MAC_DMA_OODPKTLOST_RX         0x00000038     /* rx only */
+#define R_MAC_DMA_OODPKTLOST_RX                0x00000038      /* rx only */
 #endif /* 1250 PASS3 || 112x PASS1 */
 
 /*
  * RMON Counters
  */
 
-#define R_MAC_RMON_TX_BYTES             0x00000000
-#define R_MAC_RMON_COLLISIONS           0x00000008
-#define R_MAC_RMON_LATE_COL             0x00000010
-#define R_MAC_RMON_EX_COL               0x00000018
-#define R_MAC_RMON_FCS_ERROR            0x00000020
-#define R_MAC_RMON_TX_ABORT             0x00000028
+#define R_MAC_RMON_TX_BYTES            0x00000000
+#define R_MAC_RMON_COLLISIONS          0x00000008
+#define R_MAC_RMON_LATE_COL            0x00000010
+#define R_MAC_RMON_EX_COL              0x00000018
+#define R_MAC_RMON_FCS_ERROR           0x00000020
+#define R_MAC_RMON_TX_ABORT            0x00000028
 /* Counter #6 (0x30) now reserved */
-#define R_MAC_RMON_TX_BAD               0x00000038
-#define R_MAC_RMON_TX_GOOD              0x00000040
-#define R_MAC_RMON_TX_RUNT              0x00000048
-#define R_MAC_RMON_TX_OVERSIZE          0x00000050
-#define R_MAC_RMON_RX_BYTES             0x00000080
-#define R_MAC_RMON_RX_MCAST             0x00000088
-#define R_MAC_RMON_RX_BCAST             0x00000090
-#define R_MAC_RMON_RX_BAD               0x00000098
-#define R_MAC_RMON_RX_GOOD              0x000000A0
-#define R_MAC_RMON_RX_RUNT              0x000000A8
-#define R_MAC_RMON_RX_OVERSIZE          0x000000B0
-#define R_MAC_RMON_RX_FCS_ERROR         0x000000B8
-#define R_MAC_RMON_RX_LENGTH_ERROR      0x000000C0
-#define R_MAC_RMON_RX_CODE_ERROR        0x000000C8
-#define R_MAC_RMON_RX_ALIGN_ERROR       0x000000D0
+#define R_MAC_RMON_TX_BAD              0x00000038
+#define R_MAC_RMON_TX_GOOD             0x00000040
+#define R_MAC_RMON_TX_RUNT             0x00000048
+#define R_MAC_RMON_TX_OVERSIZE         0x00000050
+#define R_MAC_RMON_RX_BYTES            0x00000080
+#define R_MAC_RMON_RX_MCAST            0x00000088
+#define R_MAC_RMON_RX_BCAST            0x00000090
+#define R_MAC_RMON_RX_BAD              0x00000098
+#define R_MAC_RMON_RX_GOOD             0x000000A0
+#define R_MAC_RMON_RX_RUNT             0x000000A8
+#define R_MAC_RMON_RX_OVERSIZE         0x000000B0
+#define R_MAC_RMON_RX_FCS_ERROR                0x000000B8
+#define R_MAC_RMON_RX_LENGTH_ERROR     0x000000C0
+#define R_MAC_RMON_RX_CODE_ERROR       0x000000C8
+#define R_MAC_RMON_RX_ALIGN_ERROR      0x000000D0
 
 /* Updated to spec 0.2 */
-#define R_MAC_CFG                       0x00000100
-#define R_MAC_THRSH_CFG                 0x00000108
-#define R_MAC_VLANTAG                   0x00000110
-#define R_MAC_FRAMECFG                  0x00000118
-#define R_MAC_EOPCNT                    0x00000120
-#define R_MAC_FIFO_PTRS                 0x00000128
-#define R_MAC_ADFILTER_CFG              0x00000200
-#define R_MAC_ETHERNET_ADDR             0x00000208
-#define R_MAC_PKT_TYPE                  0x00000210
+#define R_MAC_CFG                      0x00000100
+#define R_MAC_THRSH_CFG                        0x00000108
+#define R_MAC_VLANTAG                  0x00000110
+#define R_MAC_FRAMECFG                 0x00000118
+#define R_MAC_EOPCNT                   0x00000120
+#define R_MAC_FIFO_PTRS                        0x00000128
+#define R_MAC_ADFILTER_CFG             0x00000200
+#define R_MAC_ETHERNET_ADDR            0x00000208
+#define R_MAC_PKT_TYPE                 0x00000210
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define R_MAC_ADMASK0                  0x00000218
 #define R_MAC_ADMASK1                  0x00000220
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define R_MAC_HASH_BASE                 0x00000240
-#define R_MAC_ADDR_BASE                 0x00000280
-#define R_MAC_CHLO0_BASE                0x00000300
-#define R_MAC_CHUP0_BASE                0x00000320
-#define R_MAC_ENABLE                    0x00000400
-#define R_MAC_STATUS                    0x00000408
-#define R_MAC_INT_MASK                  0x00000410
-#define R_MAC_TXD_CTL                   0x00000420
-#define R_MAC_MDIO                      0x00000428
+#define R_MAC_HASH_BASE                        0x00000240
+#define R_MAC_ADDR_BASE                        0x00000280
+#define R_MAC_CHLO0_BASE               0x00000300
+#define R_MAC_CHUP0_BASE               0x00000320
+#define R_MAC_ENABLE                   0x00000400
+#define R_MAC_STATUS                   0x00000408
+#define R_MAC_INT_MASK                 0x00000410
+#define R_MAC_TXD_CTL                  0x00000420
+#define R_MAC_MDIO                     0x00000428
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define R_MAC_STATUS1                  0x00000430
+#define R_MAC_STATUS1                  0x00000430
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define R_MAC_DEBUG_STATUS              0x00000448
+#define R_MAC_DEBUG_STATUS             0x00000448
 
 #define MAC_HASH_COUNT                 8
 #define MAC_ADDR_COUNT                 8
 
 
 #if SIBYTE_HDR_FEATURE_1250_112x    /* This MC only on 1250 & 112x */
-#define R_DUART_NUM_PORTS           2
+#define R_DUART_NUM_PORTS          2
 
-#define A_DUART                     0x0010060000
+#define A_DUART                            0x0010060000
 
-#define DUART_CHANREG_SPACING       0x100
+#define DUART_CHANREG_SPACING      0x100
 
 #define A_DUART_CHANREG(chan, reg)                                     \
        (A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
  * These constants are the absolute addresses.
  */
 
-#define A_DUART_MODE_REG_1_A        0x0010060100
-#define A_DUART_MODE_REG_2_A        0x0010060110
-#define A_DUART_STATUS_A            0x0010060120
-#define A_DUART_CLK_SEL_A           0x0010060130
-#define A_DUART_CMD_A               0x0010060150
-#define A_DUART_RX_HOLD_A           0x0010060160
-#define A_DUART_TX_HOLD_A           0x0010060170
-
-#define A_DUART_MODE_REG_1_B        0x0010060200
-#define A_DUART_MODE_REG_2_B        0x0010060210
-#define A_DUART_STATUS_B            0x0010060220
-#define A_DUART_CLK_SEL_B           0x0010060230
-#define A_DUART_CMD_B               0x0010060250
-#define A_DUART_RX_HOLD_B           0x0010060260
-#define A_DUART_TX_HOLD_B           0x0010060270
-
-#define A_DUART_INPORT_CHNG         0x0010060300
-#define A_DUART_AUX_CTRL            0x0010060310
-#define A_DUART_ISR_A               0x0010060320
-#define A_DUART_IMR_A               0x0010060330
-#define A_DUART_ISR_B               0x0010060340
-#define A_DUART_IMR_B               0x0010060350
-#define A_DUART_OUT_PORT            0x0010060360
-#define A_DUART_OPCR                0x0010060370
-#define A_DUART_IN_PORT             0x0010060380
-#define A_DUART_ISR                 0x0010060390
-#define A_DUART_IMR                 0x00100603A0
-#define A_DUART_SET_OPR             0x00100603B0
-#define A_DUART_CLEAR_OPR           0x00100603C0
-#define A_DUART_INPORT_CHNG_A       0x00100603D0
-#define A_DUART_INPORT_CHNG_B       0x00100603E0
+#define A_DUART_MODE_REG_1_A       0x0010060100
+#define A_DUART_MODE_REG_2_A       0x0010060110
+#define A_DUART_STATUS_A           0x0010060120
+#define A_DUART_CLK_SEL_A          0x0010060130
+#define A_DUART_CMD_A              0x0010060150
+#define A_DUART_RX_HOLD_A          0x0010060160
+#define A_DUART_TX_HOLD_A          0x0010060170
+
+#define A_DUART_MODE_REG_1_B       0x0010060200
+#define A_DUART_MODE_REG_2_B       0x0010060210
+#define A_DUART_STATUS_B           0x0010060220
+#define A_DUART_CLK_SEL_B          0x0010060230
+#define A_DUART_CMD_B              0x0010060250
+#define A_DUART_RX_HOLD_B          0x0010060260
+#define A_DUART_TX_HOLD_B          0x0010060270
+
+#define A_DUART_INPORT_CHNG        0x0010060300
+#define A_DUART_AUX_CTRL           0x0010060310
+#define A_DUART_ISR_A              0x0010060320
+#define A_DUART_IMR_A              0x0010060330
+#define A_DUART_ISR_B              0x0010060340
+#define A_DUART_IMR_B              0x0010060350
+#define A_DUART_OUT_PORT           0x0010060360
+#define A_DUART_OPCR               0x0010060370
+#define A_DUART_IN_PORT                    0x0010060380
+#define A_DUART_ISR                0x0010060390
+#define A_DUART_IMR                0x00100603A0
+#define A_DUART_SET_OPR                    0x00100603B0
+#define A_DUART_CLEAR_OPR          0x00100603C0
+#define A_DUART_INPORT_CHNG_A      0x00100603D0
+#define A_DUART_INPORT_CHNG_B      0x00100603E0
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define A_DUART_FULL_CTL_A         0x0010060140
 #define A_DUART_FULL_CTL_B         0x0010060240
 
-#define A_DUART_OPCR_A             0x0010060180
-#define A_DUART_OPCR_B             0x0010060280
+#define A_DUART_OPCR_A             0x0010060180
+#define A_DUART_OPCR_B             0x0010060280
 
 #define A_DUART_INPORT_CHNG_DEBUG   0x00100603F0
 #endif /* 1250 PASS2 || 112x PASS1 */
 
 #if SIBYTE_HDR_FEATURE_1250_112x       /* sync serial only on 1250/112x */
 
-#define A_SER_BASE_0                0x0010060400
-#define A_SER_BASE_1                0x0010060800
-#define SER_SPACING                 0x400
+#define A_SER_BASE_0               0x0010060400
+#define A_SER_BASE_1               0x0010060800
+#define SER_SPACING                0x400
 
-#define SER_DMA_TXRX_SPACING        0x80
+#define SER_DMA_TXRX_SPACING       0x80
 
-#define SER_NUM_PORTS               2
+#define SER_NUM_PORTS              2
 
-#define A_SER_CHANNEL_BASE(sernum)                  \
-            (A_SER_BASE_0 +                         \
-             SER_SPACING*(sernum))
+#define A_SER_CHANNEL_BASE(sernum)                 \
+           (A_SER_BASE_0 +                         \
+            SER_SPACING*(sernum))
 
-#define A_SER_REGISTER(sernum,reg)                  \
-            (A_SER_BASE_0 +                         \
-             SER_SPACING*(sernum) + (reg))
+#define A_SER_REGISTER(sernum,reg)                 \
+           (A_SER_BASE_0 +                         \
+            SER_SPACING*(sernum) + (reg))
 
 
 #define R_SER_DMA_CHANNELS             0   /* Relative to A_SER_BASE_x */
 
 #define A_SER_DMA_CHANNEL_BASE(sernum,txrx)    \
-             ((A_SER_CHANNEL_BASE(sernum)) +        \
-             R_SER_DMA_CHANNELS +                   \
-             (SER_DMA_TXRX_SPACING*(txrx)))
+            ((A_SER_CHANNEL_BASE(sernum)) +        \
+            R_SER_DMA_CHANNELS +                   \
+            (SER_DMA_TXRX_SPACING*(txrx)))
 
-#define A_SER_DMA_REGISTER(sernum, txrx, reg)           \
-            (A_SER_DMA_CHANNEL_BASE(sernum, txrx) +    \
-            (reg))
+#define A_SER_DMA_REGISTER(sernum, txrx, reg)          \
+           (A_SER_DMA_CHANNEL_BASE(sernum, txrx) +    \
+           (reg))
 
 
 /*
  * DMA channel registers, relative to A_SER_DMA_CHANNEL_BASE
  */
 
-#define R_SER_DMA_CONFIG0           0x00000000
-#define R_SER_DMA_CONFIG1           0x00000008
-#define R_SER_DMA_DSCR_BASE         0x00000010
-#define R_SER_DMA_DSCR_CNT          0x00000018
-#define R_SER_DMA_CUR_DSCRA         0x00000020
-#define R_SER_DMA_CUR_DSCRB         0x00000028
-#define R_SER_DMA_CUR_DSCRADDR      0x00000030
-
-#define R_SER_DMA_CONFIG0_RX        0x00000000
-#define R_SER_DMA_CONFIG1_RX        0x00000008
-#define R_SER_DMA_DSCR_BASE_RX      0x00000010
-#define R_SER_DMA_DSCR_COUNT_RX     0x00000018
-#define R_SER_DMA_CUR_DSCR_A_RX     0x00000020
-#define R_SER_DMA_CUR_DSCR_B_RX     0x00000028
+#define R_SER_DMA_CONFIG0          0x00000000
+#define R_SER_DMA_CONFIG1          0x00000008
+#define R_SER_DMA_DSCR_BASE        0x00000010
+#define R_SER_DMA_DSCR_CNT         0x00000018
+#define R_SER_DMA_CUR_DSCRA        0x00000020
+#define R_SER_DMA_CUR_DSCRB        0x00000028
+#define R_SER_DMA_CUR_DSCRADDR     0x00000030
+
+#define R_SER_DMA_CONFIG0_RX       0x00000000
+#define R_SER_DMA_CONFIG1_RX       0x00000008
+#define R_SER_DMA_DSCR_BASE_RX     0x00000010
+#define R_SER_DMA_DSCR_COUNT_RX            0x00000018
+#define R_SER_DMA_CUR_DSCR_A_RX            0x00000020
+#define R_SER_DMA_CUR_DSCR_B_RX            0x00000028
 #define R_SER_DMA_CUR_DSCR_ADDR_RX  0x00000030
 
-#define R_SER_DMA_CONFIG0_TX        0x00000080
-#define R_SER_DMA_CONFIG1_TX        0x00000088
-#define R_SER_DMA_DSCR_BASE_TX      0x00000090
-#define R_SER_DMA_DSCR_COUNT_TX     0x00000098
-#define R_SER_DMA_CUR_DSCR_A_TX     0x000000A0
-#define R_SER_DMA_CUR_DSCR_B_TX     0x000000A8
+#define R_SER_DMA_CONFIG0_TX       0x00000080
+#define R_SER_DMA_CONFIG1_TX       0x00000088
+#define R_SER_DMA_DSCR_BASE_TX     0x00000090
+#define R_SER_DMA_DSCR_COUNT_TX            0x00000098
+#define R_SER_DMA_CUR_DSCR_A_TX            0x000000A0
+#define R_SER_DMA_CUR_DSCR_B_TX            0x000000A8
 #define R_SER_DMA_CUR_DSCR_ADDR_TX  0x000000B0
 
-#define R_SER_MODE                  0x00000100
-#define R_SER_MINFRM_SZ             0x00000108
-#define R_SER_MAXFRM_SZ             0x00000110
-#define R_SER_ADDR                  0x00000118
-#define R_SER_USR0_ADDR             0x00000120
-#define R_SER_USR1_ADDR             0x00000128
-#define R_SER_USR2_ADDR             0x00000130
-#define R_SER_USR3_ADDR             0x00000138
-#define R_SER_CMD                   0x00000140
-#define R_SER_TX_RD_THRSH           0x00000160
-#define R_SER_TX_WR_THRSH           0x00000168
-#define R_SER_RX_RD_THRSH           0x00000170
+#define R_SER_MODE                 0x00000100
+#define R_SER_MINFRM_SZ                    0x00000108
+#define R_SER_MAXFRM_SZ                    0x00000110
+#define R_SER_ADDR                 0x00000118
+#define R_SER_USR0_ADDR                    0x00000120
+#define R_SER_USR1_ADDR                    0x00000128
+#define R_SER_USR2_ADDR                    0x00000130
+#define R_SER_USR3_ADDR                    0x00000138
+#define R_SER_CMD                  0x00000140
+#define R_SER_TX_RD_THRSH          0x00000160
+#define R_SER_TX_WR_THRSH          0x00000168
+#define R_SER_RX_RD_THRSH          0x00000170
 #define R_SER_LINE_MODE                    0x00000178
-#define R_SER_DMA_ENABLE            0x00000180
-#define R_SER_INT_MASK              0x00000190
-#define R_SER_STATUS                0x00000188
-#define R_SER_STATUS_DEBUG          0x000001A8
-#define R_SER_RX_TABLE_BASE         0x00000200
-#define SER_RX_TABLE_COUNT          16
-#define R_SER_TX_TABLE_BASE         0x00000300
-#define SER_TX_TABLE_COUNT          16
+#define R_SER_DMA_ENABLE           0x00000180
+#define R_SER_INT_MASK             0x00000190
+#define R_SER_STATUS               0x00000188
+#define R_SER_STATUS_DEBUG         0x000001A8
+#define R_SER_RX_TABLE_BASE        0x00000200
+#define SER_RX_TABLE_COUNT         16
+#define R_SER_TX_TABLE_BASE        0x00000300
+#define SER_TX_TABLE_COUNT         16
 
 /* RMON Counters */
-#define R_SER_RMON_TX_BYTE_LO       0x000001C0
-#define R_SER_RMON_TX_BYTE_HI       0x000001C8
-#define R_SER_RMON_RX_BYTE_LO       0x000001D0
-#define R_SER_RMON_RX_BYTE_HI       0x000001D8
-#define R_SER_RMON_TX_UNDERRUN      0x000001E0
-#define R_SER_RMON_RX_OVERFLOW      0x000001E8
-#define R_SER_RMON_RX_ERRORS        0x000001F0
-#define R_SER_RMON_RX_BADADDR       0x000001F8
+#define R_SER_RMON_TX_BYTE_LO      0x000001C0
+#define R_SER_RMON_TX_BYTE_HI      0x000001C8
+#define R_SER_RMON_RX_BYTE_LO      0x000001D0
+#define R_SER_RMON_RX_BYTE_HI      0x000001D8
+#define R_SER_RMON_TX_UNDERRUN     0x000001E0
+#define R_SER_RMON_RX_OVERFLOW     0x000001E8
+#define R_SER_RMON_RX_ERRORS       0x000001F0
+#define R_SER_RMON_RX_BADADDR      0x000001F8
 
 #endif /* 1250/112x */
 
     * Generic Bus Registers
     ********************************************************************* */
 
-#define IO_EXT_CFG_COUNT            8
+#define IO_EXT_CFG_COUNT           8
 
 #define A_IO_EXT_BASE              0x0010061000
 #define A_IO_EXT_REG(r)                    (A_IO_EXT_BASE + (r))
 
-#define A_IO_EXT_CFG_BASE           0x0010061000
-#define A_IO_EXT_MULT_SIZE_BASE     0x0010061100
+#define A_IO_EXT_CFG_BASE          0x0010061000
+#define A_IO_EXT_MULT_SIZE_BASE            0x0010061100
 #define A_IO_EXT_START_ADDR_BASE    0x0010061200
-#define A_IO_EXT_TIME_CFG0_BASE     0x0010061600
-#define A_IO_EXT_TIME_CFG1_BASE     0x0010061700
+#define A_IO_EXT_TIME_CFG0_BASE            0x0010061600
+#define A_IO_EXT_TIME_CFG1_BASE            0x0010061700
 
 #define IO_EXT_REGISTER_SPACING            8
 #define A_IO_EXT_CS_BASE(cs)       (A_IO_EXT_CFG_BASE+IO_EXT_REGISTER_SPACING*(cs))
 #define R_IO_EXT_REG(reg, cs)      ((cs)*IO_EXT_REGISTER_SPACING + (reg))
 
 #define R_IO_EXT_CFG               0x0000
-#define R_IO_EXT_MULT_SIZE          0x0100
+#define R_IO_EXT_MULT_SIZE         0x0100
 #define R_IO_EXT_START_ADDR        0x0200
-#define R_IO_EXT_TIME_CFG0          0x0600
-#define R_IO_EXT_TIME_CFG1          0x0700
-
-
-#define A_IO_INTERRUPT_STATUS       0x0010061A00
-#define A_IO_INTERRUPT_DATA0        0x0010061A10
-#define A_IO_INTERRUPT_DATA1        0x0010061A18
-#define A_IO_INTERRUPT_DATA2        0x0010061A20
-#define A_IO_INTERRUPT_DATA3        0x0010061A28
-#define A_IO_INTERRUPT_ADDR0        0x0010061A30
-#define A_IO_INTERRUPT_ADDR1        0x0010061A40
-#define A_IO_INTERRUPT_PARITY       0x0010061A50
-#define A_IO_PCMCIA_CFG             0x0010061A60
-#define A_IO_PCMCIA_STATUS          0x0010061A70
+#define R_IO_EXT_TIME_CFG0         0x0600
+#define R_IO_EXT_TIME_CFG1         0x0700
+
+
+#define A_IO_INTERRUPT_STATUS      0x0010061A00
+#define A_IO_INTERRUPT_DATA0       0x0010061A10
+#define A_IO_INTERRUPT_DATA1       0x0010061A18
+#define A_IO_INTERRUPT_DATA2       0x0010061A20
+#define A_IO_INTERRUPT_DATA3       0x0010061A28
+#define A_IO_INTERRUPT_ADDR0       0x0010061A30
+#define A_IO_INTERRUPT_ADDR1       0x0010061A40
+#define A_IO_INTERRUPT_PARITY      0x0010061A50
+#define A_IO_PCMCIA_CFG                    0x0010061A60
+#define A_IO_PCMCIA_STATUS         0x0010061A70
 #define A_IO_DRIVE_0               0x0010061300
 #define A_IO_DRIVE_1               0x0010061308
 #define A_IO_DRIVE_2               0x0010061310
 #define R_IO_DRIVE(x)              ((x)*IO_DRIVE_REGISTER_SPACING)
 #define A_IO_DRIVE(x)              (A_IO_DRIVE_BASE + R_IO_DRIVE(x))
 
-#define R_IO_INTERRUPT_STATUS       0x0A00
-#define R_IO_INTERRUPT_DATA0        0x0A10
-#define R_IO_INTERRUPT_DATA1        0x0A18
-#define R_IO_INTERRUPT_DATA2        0x0A20
-#define R_IO_INTERRUPT_DATA3        0x0A28
-#define R_IO_INTERRUPT_ADDR0        0x0A30
-#define R_IO_INTERRUPT_ADDR1        0x0A40
-#define R_IO_INTERRUPT_PARITY       0x0A50
-#define R_IO_PCMCIA_CFG             0x0A60
-#define R_IO_PCMCIA_STATUS          0x0A70
+#define R_IO_INTERRUPT_STATUS      0x0A00
+#define R_IO_INTERRUPT_DATA0       0x0A10
+#define R_IO_INTERRUPT_DATA1       0x0A18
+#define R_IO_INTERRUPT_DATA2       0x0A20
+#define R_IO_INTERRUPT_DATA3       0x0A28
+#define R_IO_INTERRUPT_ADDR0       0x0A30
+#define R_IO_INTERRUPT_ADDR1       0x0A40
+#define R_IO_INTERRUPT_PARITY      0x0A50
+#define R_IO_PCMCIA_CFG                    0x0A60
+#define R_IO_PCMCIA_STATUS         0x0A70
 
 /*  *********************************************************************
     * GPIO Registers
     ********************************************************************* */
 
-#define A_GPIO_CLR_EDGE             0x0010061A80
-#define A_GPIO_INT_TYPE             0x0010061A88
-#define A_GPIO_INPUT_INVERT         0x0010061A90
-#define A_GPIO_GLITCH               0x0010061A98
-#define A_GPIO_READ                 0x0010061AA0
-#define A_GPIO_DIRECTION            0x0010061AA8
-#define A_GPIO_PIN_CLR              0x0010061AB0
-#define A_GPIO_PIN_SET              0x0010061AB8
+#define A_GPIO_CLR_EDGE                    0x0010061A80
+#define A_GPIO_INT_TYPE                    0x0010061A88
+#define A_GPIO_INPUT_INVERT        0x0010061A90
+#define A_GPIO_GLITCH              0x0010061A98
+#define A_GPIO_READ                0x0010061AA0
+#define A_GPIO_DIRECTION           0x0010061AA8
+#define A_GPIO_PIN_CLR             0x0010061AB0
+#define A_GPIO_PIN_SET             0x0010061AB8
 
 #define A_GPIO_BASE                0x0010061A80
 
-#define R_GPIO_CLR_EDGE             0x00
-#define R_GPIO_INT_TYPE             0x08
-#define R_GPIO_INPUT_INVERT         0x10
-#define R_GPIO_GLITCH               0x18
-#define R_GPIO_READ                 0x20
-#define R_GPIO_DIRECTION            0x28
-#define R_GPIO_PIN_CLR              0x30
-#define R_GPIO_PIN_SET              0x38
+#define R_GPIO_CLR_EDGE                    0x00
+#define R_GPIO_INT_TYPE                    0x08
+#define R_GPIO_INPUT_INVERT        0x10
+#define R_GPIO_GLITCH              0x18
+#define R_GPIO_READ                0x20
+#define R_GPIO_DIRECTION           0x28
+#define R_GPIO_PIN_CLR             0x30
+#define R_GPIO_PIN_SET             0x38
 
 /*  *********************************************************************
     * SMBus Registers
     ********************************************************************* */
 
-#define A_SMB_XTRA_0                0x0010060000
-#define A_SMB_XTRA_1                0x0010060008
-#define A_SMB_FREQ_0                0x0010060010
-#define A_SMB_FREQ_1                0x0010060018
-#define A_SMB_STATUS_0              0x0010060020
-#define A_SMB_STATUS_1              0x0010060028
-#define A_SMB_CMD_0                 0x0010060030
-#define A_SMB_CMD_1                 0x0010060038
-#define A_SMB_START_0               0x0010060040
-#define A_SMB_START_1               0x0010060048
-#define A_SMB_DATA_0                0x0010060050
-#define A_SMB_DATA_1                0x0010060058
-#define A_SMB_CONTROL_0             0x0010060060
-#define A_SMB_CONTROL_1             0x0010060068
-#define A_SMB_PEC_0                 0x0010060070
-#define A_SMB_PEC_1                 0x0010060078
-
-#define A_SMB_0                     0x0010060000
-#define A_SMB_1                     0x0010060008
-#define SMB_REGISTER_SPACING        0x8
-#define A_SMB_BASE(idx)             (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
+#define A_SMB_XTRA_0               0x0010060000
+#define A_SMB_XTRA_1               0x0010060008
+#define A_SMB_FREQ_0               0x0010060010
+#define A_SMB_FREQ_1               0x0010060018
+#define A_SMB_STATUS_0             0x0010060020
+#define A_SMB_STATUS_1             0x0010060028
+#define A_SMB_CMD_0                0x0010060030
+#define A_SMB_CMD_1                0x0010060038
+#define A_SMB_START_0              0x0010060040
+#define A_SMB_START_1              0x0010060048
+#define A_SMB_DATA_0               0x0010060050
+#define A_SMB_DATA_1               0x0010060058
+#define A_SMB_CONTROL_0                    0x0010060060
+#define A_SMB_CONTROL_1                    0x0010060068
+#define A_SMB_PEC_0                0x0010060070
+#define A_SMB_PEC_1                0x0010060078
+
+#define A_SMB_0                            0x0010060000
+#define A_SMB_1                            0x0010060008
+#define SMB_REGISTER_SPACING       0x8
+#define A_SMB_BASE(idx)                    (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
 #define A_SMB_REGISTER(idx, reg)    (A_SMB_BASE(idx)+(reg))
 
-#define R_SMB_XTRA                  0x0000000000
-#define R_SMB_FREQ                  0x0000000010
-#define R_SMB_STATUS                0x0000000020
-#define R_SMB_CMD                   0x0000000030
-#define R_SMB_START                 0x0000000040
-#define R_SMB_DATA                  0x0000000050
-#define R_SMB_CONTROL               0x0000000060
-#define R_SMB_PEC                   0x0000000070
+#define R_SMB_XTRA                 0x0000000000
+#define R_SMB_FREQ                 0x0000000010
+#define R_SMB_STATUS               0x0000000020
+#define R_SMB_CMD                  0x0000000030
+#define R_SMB_START                0x0000000040
+#define R_SMB_DATA                 0x0000000050
+#define R_SMB_CONTROL              0x0000000060
+#define R_SMB_PEC                  0x0000000070
 
 /*  *********************************************************************
     * Timer Registers
  */
 
 #define A_SCD_WDOG_0               0x0010020050
-#define A_SCD_WDOG_1                0x0010020150
-#define SCD_WDOG_SPACING            0x100
+#define A_SCD_WDOG_1               0x0010020150
+#define SCD_WDOG_SPACING           0x100
 #define SCD_NUM_WDOGS              2
-#define A_SCD_WDOG_BASE(w)          (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
+#define A_SCD_WDOG_BASE(w)         (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
 #define A_SCD_WDOG_REGISTER(w, r)   (A_SCD_WDOG_BASE(w) + (r))
 
 #define R_SCD_WDOG_INIT                    0x0000000000
 #define R_SCD_WDOG_CNT             0x0000000008
 #define R_SCD_WDOG_CFG             0x0000000010
 
-#define A_SCD_WDOG_INIT_0           0x0010020050
-#define A_SCD_WDOG_CNT_0            0x0010020058
-#define A_SCD_WDOG_CFG_0            0x0010020060
+#define A_SCD_WDOG_INIT_0          0x0010020050
+#define A_SCD_WDOG_CNT_0           0x0010020058
+#define A_SCD_WDOG_CFG_0           0x0010020060
 
-#define A_SCD_WDOG_INIT_1           0x0010020150
-#define A_SCD_WDOG_CNT_1            0x0010020158
-#define A_SCD_WDOG_CFG_1            0x0010020160
+#define A_SCD_WDOG_INIT_1          0x0010020150
+#define A_SCD_WDOG_CNT_1           0x0010020158
+#define A_SCD_WDOG_CFG_1           0x0010020160
 
 /*
  * Generic timers
  */
 
 #define A_SCD_TIMER_0              0x0010020070
-#define A_SCD_TIMER_1               0x0010020078
+#define A_SCD_TIMER_1              0x0010020078
 #define A_SCD_TIMER_2              0x0010020170
-#define A_SCD_TIMER_3               0x0010020178
+#define A_SCD_TIMER_3              0x0010020178
 #define SCD_NUM_TIMERS             4
-#define A_SCD_TIMER_BASE(w)         (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
+#define A_SCD_TIMER_BASE(w)        (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
 #define A_SCD_TIMER_REGISTER(w, r)  (A_SCD_TIMER_BASE(w) + (r))
 
 #define R_SCD_TIMER_INIT           0x0000000000
 #define R_SCD_TIMER_CNT                    0x0000000010
 #define R_SCD_TIMER_CFG                    0x0000000020
 
-#define A_SCD_TIMER_INIT_0          0x0010020070
-#define A_SCD_TIMER_CNT_0           0x0010020080
-#define A_SCD_TIMER_CFG_0           0x0010020090
+#define A_SCD_TIMER_INIT_0         0x0010020070
+#define A_SCD_TIMER_CNT_0          0x0010020080
+#define A_SCD_TIMER_CFG_0          0x0010020090
 
-#define A_SCD_TIMER_INIT_1          0x0010020078
-#define A_SCD_TIMER_CNT_1           0x0010020088
-#define A_SCD_TIMER_CFG_1           0x0010020098
+#define A_SCD_TIMER_INIT_1         0x0010020078
+#define A_SCD_TIMER_CNT_1          0x0010020088
+#define A_SCD_TIMER_CFG_1          0x0010020098
 
-#define A_SCD_TIMER_INIT_2          0x0010020170
-#define A_SCD_TIMER_CNT_2           0x0010020180
-#define A_SCD_TIMER_CFG_2           0x0010020190
+#define A_SCD_TIMER_INIT_2         0x0010020170
+#define A_SCD_TIMER_CNT_2          0x0010020180
+#define A_SCD_TIMER_CFG_2          0x0010020190
 
-#define A_SCD_TIMER_INIT_3          0x0010020178
-#define A_SCD_TIMER_CNT_3           0x0010020188
-#define A_SCD_TIMER_CFG_3           0x0010020198
+#define A_SCD_TIMER_INIT_3         0x0010020178
+#define A_SCD_TIMER_CNT_3          0x0010020188
+#define A_SCD_TIMER_CFG_3          0x0010020198
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define A_SCD_SCRATCH             0x0010020C10
     * System Control Registers
     ********************************************************************* */
 
-#define A_SCD_SYSTEM_REVISION       0x0010020000
-#define A_SCD_SYSTEM_CFG            0x0010020008
-#define A_SCD_SYSTEM_MANUF          0x0010038000
+#define A_SCD_SYSTEM_REVISION      0x0010020000
+#define A_SCD_SYSTEM_CFG           0x0010020008
+#define A_SCD_SYSTEM_MANUF         0x0010038000
 
 /*  *********************************************************************
     * System Address Trap Registers
     ********************************************************************* */
 
-#define A_ADDR_TRAP_INDEX           0x00100200B0
-#define A_ADDR_TRAP_REG             0x00100200B8
-#define A_ADDR_TRAP_UP_0            0x0010020400
-#define A_ADDR_TRAP_UP_1            0x0010020408
-#define A_ADDR_TRAP_UP_2            0x0010020410
-#define A_ADDR_TRAP_UP_3            0x0010020418
-#define A_ADDR_TRAP_DOWN_0          0x0010020420
-#define A_ADDR_TRAP_DOWN_1          0x0010020428
-#define A_ADDR_TRAP_DOWN_2          0x0010020430
-#define A_ADDR_TRAP_DOWN_3          0x0010020438
-#define A_ADDR_TRAP_CFG_0           0x0010020440
-#define A_ADDR_TRAP_CFG_1           0x0010020448
-#define A_ADDR_TRAP_CFG_2           0x0010020450
-#define A_ADDR_TRAP_CFG_3           0x0010020458
+#define A_ADDR_TRAP_INDEX          0x00100200B0
+#define A_ADDR_TRAP_REG                    0x00100200B8
+#define A_ADDR_TRAP_UP_0           0x0010020400
+#define A_ADDR_TRAP_UP_1           0x0010020408
+#define A_ADDR_TRAP_UP_2           0x0010020410
+#define A_ADDR_TRAP_UP_3           0x0010020418
+#define A_ADDR_TRAP_DOWN_0         0x0010020420
+#define A_ADDR_TRAP_DOWN_1         0x0010020428
+#define A_ADDR_TRAP_DOWN_2         0x0010020430
+#define A_ADDR_TRAP_DOWN_3         0x0010020438
+#define A_ADDR_TRAP_CFG_0          0x0010020440
+#define A_ADDR_TRAP_CFG_1          0x0010020448
+#define A_ADDR_TRAP_CFG_2          0x0010020450
+#define A_ADDR_TRAP_CFG_3          0x0010020458
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 #define A_ADDR_TRAP_REG_DEBUG      0x0010020460
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
     * System Interrupt Mapper Registers
     ********************************************************************* */
 
-#define A_IMR_CPU0_BASE                 0x0010020000
-#define A_IMR_CPU1_BASE                 0x0010022000
-#define IMR_REGISTER_SPACING            0x2000
-#define IMR_REGISTER_SPACING_SHIFT      13
+#define A_IMR_CPU0_BASE                        0x0010020000
+#define A_IMR_CPU1_BASE                        0x0010022000
+#define IMR_REGISTER_SPACING           0x2000
+#define IMR_REGISTER_SPACING_SHIFT     13
 
 #define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
 #define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
 
-#define R_IMR_INTERRUPT_DIAG            0x0010
-#define R_IMR_INTERRUPT_LDT             0x0018
-#define R_IMR_INTERRUPT_MASK            0x0028
-#define R_IMR_INTERRUPT_TRACE           0x0038
-#define R_IMR_INTERRUPT_SOURCE_STATUS   0x0040
-#define R_IMR_LDT_INTERRUPT_SET         0x0048
-#define R_IMR_LDT_INTERRUPT             0x0018
-#define R_IMR_LDT_INTERRUPT_CLR         0x0020
-#define R_IMR_MAILBOX_CPU               0x00c0
-#define R_IMR_ALIAS_MAILBOX_CPU         0x1000
-#define R_IMR_MAILBOX_SET_CPU           0x00C8
-#define R_IMR_ALIAS_MAILBOX_SET_CPU     0x1008
-#define R_IMR_MAILBOX_CLR_CPU           0x00D0
-#define R_IMR_INTERRUPT_STATUS_BASE     0x0100
-#define R_IMR_INTERRUPT_STATUS_COUNT    7
-#define R_IMR_INTERRUPT_MAP_BASE        0x0200
-#define R_IMR_INTERRUPT_MAP_COUNT       64
+#define R_IMR_INTERRUPT_DIAG           0x0010
+#define R_IMR_INTERRUPT_LDT            0x0018
+#define R_IMR_INTERRUPT_MASK           0x0028
+#define R_IMR_INTERRUPT_TRACE          0x0038
+#define R_IMR_INTERRUPT_SOURCE_STATUS  0x0040
+#define R_IMR_LDT_INTERRUPT_SET                0x0048
+#define R_IMR_LDT_INTERRUPT            0x0018
+#define R_IMR_LDT_INTERRUPT_CLR                0x0020
+#define R_IMR_MAILBOX_CPU              0x00c0
+#define R_IMR_ALIAS_MAILBOX_CPU                0x1000
+#define R_IMR_MAILBOX_SET_CPU          0x00C8
+#define R_IMR_ALIAS_MAILBOX_SET_CPU    0x1008
+#define R_IMR_MAILBOX_CLR_CPU          0x00D0
+#define R_IMR_INTERRUPT_STATUS_BASE    0x0100
+#define R_IMR_INTERRUPT_STATUS_COUNT   7
+#define R_IMR_INTERRUPT_MAP_BASE       0x0200
+#define R_IMR_INTERRUPT_MAP_COUNT      64
 
 /*
  * these macros work together to build the address of a mailbox
     * System Performance Counter Registers
     ********************************************************************* */
 
-#define A_SCD_PERF_CNT_CFG          0x00100204C0
-#define A_SCD_PERF_CNT_0            0x00100204D0
-#define A_SCD_PERF_CNT_1            0x00100204D8
-#define A_SCD_PERF_CNT_2            0x00100204E0
-#define A_SCD_PERF_CNT_3            0x00100204E8
+#define A_SCD_PERF_CNT_CFG         0x00100204C0
+#define A_SCD_PERF_CNT_0           0x00100204D0
+#define A_SCD_PERF_CNT_1           0x00100204D8
+#define A_SCD_PERF_CNT_2           0x00100204E0
+#define A_SCD_PERF_CNT_3           0x00100204E8
 
 #define SCD_NUM_PERF_CNT 4
 #define SCD_PERF_CNT_SPACING 8
     * System Bus Watcher Registers
     ********************************************************************* */
 
-#define A_SCD_BUS_ERR_STATUS        0x0010020880
+#define A_SCD_BUS_ERR_STATUS       0x0010020880
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define A_SCD_BUS_ERR_STATUS_DEBUG  0x00100208D0
-#define A_BUS_ERR_STATUS_DEBUG  0x00100208D0
+#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
 #endif /* 1250 PASS2 || 112x PASS1 */
-#define A_BUS_ERR_DATA_0            0x00100208A0
-#define A_BUS_ERR_DATA_1            0x00100208A8
-#define A_BUS_ERR_DATA_2            0x00100208B0
-#define A_BUS_ERR_DATA_3            0x00100208B8
-#define A_BUS_L2_ERRORS             0x00100208C0
-#define A_BUS_MEM_IO_ERRORS         0x00100208C8
+#define A_BUS_ERR_DATA_0           0x00100208A0
+#define A_BUS_ERR_DATA_1           0x00100208A8
+#define A_BUS_ERR_DATA_2           0x00100208B0
+#define A_BUS_ERR_DATA_3           0x00100208B8
+#define A_BUS_L2_ERRORS                    0x00100208C0
+#define A_BUS_MEM_IO_ERRORS        0x00100208C8
 
 /*  *********************************************************************
     * System Debug Controller Registers
     ********************************************************************* */
 
-#define A_SCD_JTAG_BASE             0x0010000000
+#define A_SCD_JTAG_BASE                    0x0010000000
 
 /*  *********************************************************************
     * System Trace Buffer Registers
     ********************************************************************* */
 
-#define A_SCD_TRACE_CFG             0x0010020A00
-#define A_SCD_TRACE_READ            0x0010020A08
-#define A_SCD_TRACE_EVENT_0         0x0010020A20
-#define A_SCD_TRACE_EVENT_1         0x0010020A28
-#define A_SCD_TRACE_EVENT_2         0x0010020A30
-#define A_SCD_TRACE_EVENT_3         0x0010020A38
-#define A_SCD_TRACE_SEQUENCE_0      0x0010020A40
-#define A_SCD_TRACE_SEQUENCE_1      0x0010020A48
-#define A_SCD_TRACE_SEQUENCE_2      0x0010020A50
-#define A_SCD_TRACE_SEQUENCE_3      0x0010020A58
-#define A_SCD_TRACE_EVENT_4         0x0010020A60
-#define A_SCD_TRACE_EVENT_5         0x0010020A68
-#define A_SCD_TRACE_EVENT_6         0x0010020A70
-#define A_SCD_TRACE_EVENT_7         0x0010020A78
-#define A_SCD_TRACE_SEQUENCE_4      0x0010020A80
-#define A_SCD_TRACE_SEQUENCE_5      0x0010020A88
-#define A_SCD_TRACE_SEQUENCE_6      0x0010020A90
-#define A_SCD_TRACE_SEQUENCE_7      0x0010020A98
+#define A_SCD_TRACE_CFG                    0x0010020A00
+#define A_SCD_TRACE_READ           0x0010020A08
+#define A_SCD_TRACE_EVENT_0        0x0010020A20
+#define A_SCD_TRACE_EVENT_1        0x0010020A28
+#define A_SCD_TRACE_EVENT_2        0x0010020A30
+#define A_SCD_TRACE_EVENT_3        0x0010020A38
+#define A_SCD_TRACE_SEQUENCE_0     0x0010020A40
+#define A_SCD_TRACE_SEQUENCE_1     0x0010020A48
+#define A_SCD_TRACE_SEQUENCE_2     0x0010020A50
+#define A_SCD_TRACE_SEQUENCE_3     0x0010020A58
+#define A_SCD_TRACE_EVENT_4        0x0010020A60
+#define A_SCD_TRACE_EVENT_5        0x0010020A68
+#define A_SCD_TRACE_EVENT_6        0x0010020A70
+#define A_SCD_TRACE_EVENT_7        0x0010020A78
+#define A_SCD_TRACE_SEQUENCE_4     0x0010020A80
+#define A_SCD_TRACE_SEQUENCE_5     0x0010020A88
+#define A_SCD_TRACE_SEQUENCE_6     0x0010020A90
+#define A_SCD_TRACE_SEQUENCE_7     0x0010020A98
 
 #define TRACE_REGISTER_SPACING 8
 #define TRACE_NUM_REGISTERS    8
     * System Generic DMA Registers
     ********************************************************************* */
 
-#define A_DM_0                     0x0010020B00
-#define A_DM_1                     0x0010020B20
+#define A_DM_0                     0x0010020B00
+#define A_DM_1                     0x0010020B20
 #define A_DM_2                     0x0010020B40
 #define A_DM_3                     0x0010020B60
 #define DM_REGISTER_SPACING        0x20
     ********************************************************************* */
 
 #if SIBYTE_HDR_FEATURE_1250_112x
-#define A_PHYS_MEMORY_0                 _SB_MAKE64(0x0000000000)
-#define A_PHYS_MEMORY_SIZE              _SB_MAKE64((256*1024*1024))
-#define A_PHYS_SYSTEM_CTL               _SB_MAKE64(0x0010000000)
-#define A_PHYS_IO_SYSTEM                _SB_MAKE64(0x0010060000)
+#define A_PHYS_MEMORY_0                        _SB_MAKE64(0x0000000000)
+#define A_PHYS_MEMORY_SIZE             _SB_MAKE64((256*1024*1024))
+#define A_PHYS_SYSTEM_CTL              _SB_MAKE64(0x0010000000)
+#define A_PHYS_IO_SYSTEM               _SB_MAKE64(0x0010060000)
 #define A_PHYS_GENBUS                  _SB_MAKE64(0x0010090000)
 #define A_PHYS_GENBUS_END              _SB_MAKE64(0x0040000000)
 #define A_PHYS_LDTPCI_IO_MATCH_BYTES_32 _SB_MAKE64(0x0040000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS_32  _SB_MAKE64(0x0060000000)
-#define A_PHYS_MEMORY_1                 _SB_MAKE64(0x0080000000)
-#define A_PHYS_MEMORY_2                 _SB_MAKE64(0x0090000000)
-#define A_PHYS_MEMORY_3                 _SB_MAKE64(0x00C0000000)
-#define A_PHYS_L2_CACHE_TEST            _SB_MAKE64(0x00D0000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BYTES  _SB_MAKE64(0x00D8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BYTES    _SB_MAKE64(0x00DC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BYTES   _SB_MAKE64(0x00DE000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BITS   _SB_MAKE64(0x00F8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS     _SB_MAKE64(0x00FC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BITS    _SB_MAKE64(0x00FE000000)
-#define A_PHYS_MEMORY_EXP               _SB_MAKE64(0x0100000000)
-#define A_PHYS_MEMORY_EXP_SIZE          _SB_MAKE64((508*1024*1024*1024))
-#define A_PHYS_LDT_EXP                  _SB_MAKE64(0x8000000000)
-#define A_PHYS_PCI_FULLACCESS_BYTES     _SB_MAKE64(0xF000000000)
-#define A_PHYS_PCI_FULLACCESS_BITS      _SB_MAKE64(0xF100000000)
-#define A_PHYS_RESERVED                 _SB_MAKE64(0xF200000000)
-#define A_PHYS_RESERVED_SPECIAL_LDT     _SB_MAKE64(0xFD00000000)
-
-#define A_PHYS_L2CACHE_WAY_SIZE         _SB_MAKE64(0x0000020000)
-#define PHYS_L2CACHE_NUM_WAYS           4
-#define A_PHYS_L2CACHE_TOTAL_SIZE       _SB_MAKE64(0x0000080000)
-#define A_PHYS_L2CACHE_WAY0             _SB_MAKE64(0x00D0180000)
-#define A_PHYS_L2CACHE_WAY1             _SB_MAKE64(0x00D01A0000)
-#define A_PHYS_L2CACHE_WAY2             _SB_MAKE64(0x00D01C0000)
-#define A_PHYS_L2CACHE_WAY3             _SB_MAKE64(0x00D01E0000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
+#define A_PHYS_MEMORY_1                        _SB_MAKE64(0x0080000000)
+#define A_PHYS_MEMORY_2                        _SB_MAKE64(0x0090000000)
+#define A_PHYS_MEMORY_3                        _SB_MAKE64(0x00C0000000)
+#define A_PHYS_L2_CACHE_TEST           _SB_MAKE64(0x00D0000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BYTES   _SB_MAKE64(0x00DC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BYTES  _SB_MAKE64(0x00DE000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BITS  _SB_MAKE64(0x00F8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS    _SB_MAKE64(0x00FC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BITS   _SB_MAKE64(0x00FE000000)
+#define A_PHYS_MEMORY_EXP              _SB_MAKE64(0x0100000000)
+#define A_PHYS_MEMORY_EXP_SIZE         _SB_MAKE64((508*1024*1024*1024))
+#define A_PHYS_LDT_EXP                 _SB_MAKE64(0x8000000000)
+#define A_PHYS_PCI_FULLACCESS_BYTES    _SB_MAKE64(0xF000000000)
+#define A_PHYS_PCI_FULLACCESS_BITS     _SB_MAKE64(0xF100000000)
+#define A_PHYS_RESERVED                        _SB_MAKE64(0xF200000000)
+#define A_PHYS_RESERVED_SPECIAL_LDT    _SB_MAKE64(0xFD00000000)
+
+#define A_PHYS_L2CACHE_WAY_SIZE                _SB_MAKE64(0x0000020000)
+#define PHYS_L2CACHE_NUM_WAYS          4
+#define A_PHYS_L2CACHE_TOTAL_SIZE      _SB_MAKE64(0x0000080000)
+#define A_PHYS_L2CACHE_WAY0            _SB_MAKE64(0x00D0180000)
+#define A_PHYS_L2CACHE_WAY1            _SB_MAKE64(0x00D01A0000)
+#define A_PHYS_L2CACHE_WAY2            _SB_MAKE64(0x00D01C0000)
+#define A_PHYS_L2CACHE_WAY3            _SB_MAKE64(0x00D01E0000)
 #endif
 
 
index 615e165..d725f2f 100644 (file)
 
 #define M_SYS_RESERVED             _SB_MAKEMASK(8, 0)
 
-#define S_SYS_REVISION              _SB_MAKE64(8)
-#define M_SYS_REVISION              _SB_MAKEMASK(8, S_SYS_REVISION)
-#define V_SYS_REVISION(x)           _SB_MAKEVALUE(x, S_SYS_REVISION)
-#define G_SYS_REVISION(x)           _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
+#define S_SYS_REVISION             _SB_MAKE64(8)
+#define M_SYS_REVISION             _SB_MAKEMASK(8, S_SYS_REVISION)
+#define V_SYS_REVISION(x)          _SB_MAKEVALUE(x, S_SYS_REVISION)
+#define G_SYS_REVISION(x)          _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
 
 #define K_SYS_REVISION_BCM1250_PASS1   0x01
 
 #define K_SYS_REVISION_BCM1480_B0      0x11
 
 /*Cache size - 23:20  of revision register*/
-#define S_SYS_L2C_SIZE            _SB_MAKE64(20)
-#define M_SYS_L2C_SIZE            _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
-#define V_SYS_L2C_SIZE(x)         _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
-#define G_SYS_L2C_SIZE(x)         _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
+#define S_SYS_L2C_SIZE           _SB_MAKE64(20)
+#define M_SYS_L2C_SIZE           _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
+#define V_SYS_L2C_SIZE(x)        _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
+#define G_SYS_L2C_SIZE(x)        _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
 
 #define K_SYS_L2C_SIZE_1MB     0
 #define K_SYS_L2C_SIZE_512KB   5
 
 
 /* Number of CPU cores, bits 27:24  of revision register*/
-#define S_SYS_NUM_CPUS            _SB_MAKE64(24)
-#define M_SYS_NUM_CPUS            _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
-#define V_SYS_NUM_CPUS(x)         _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
-#define G_SYS_NUM_CPUS(x)         _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
+#define S_SYS_NUM_CPUS           _SB_MAKE64(24)
+#define M_SYS_NUM_CPUS           _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
+#define V_SYS_NUM_CPUS(x)        _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
+#define G_SYS_NUM_CPUS(x)        _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
 
 
 /* XXX: discourage people from using these constants.  */
-#define S_SYS_PART                  _SB_MAKE64(16)
-#define M_SYS_PART                  _SB_MAKEMASK(16, S_SYS_PART)
-#define V_SYS_PART(x)               _SB_MAKEVALUE(x, S_SYS_PART)
-#define G_SYS_PART(x)               _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
+#define S_SYS_PART                 _SB_MAKE64(16)
+#define M_SYS_PART                 _SB_MAKEMASK(16, S_SYS_PART)
+#define V_SYS_PART(x)              _SB_MAKEVALUE(x, S_SYS_PART)
+#define G_SYS_PART(x)              _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
 
 /* XXX: discourage people from using these constants.  */
-#define K_SYS_PART_SB1250           0x1250
-#define K_SYS_PART_BCM1120          0x1121
-#define K_SYS_PART_BCM1125          0x1123
-#define K_SYS_PART_BCM1125H         0x1124
-#define K_SYS_PART_BCM1122          0x1113
+#define K_SYS_PART_SB1250          0x1250
+#define K_SYS_PART_BCM1120         0x1121
+#define K_SYS_PART_BCM1125         0x1123
+#define K_SYS_PART_BCM1125H        0x1124
+#define K_SYS_PART_BCM1122         0x1113
 
 
 /* The "peripheral set" (SOC type) is the low 4 bits of the "part" field.  */
-#define S_SYS_SOC_TYPE              _SB_MAKE64(16)
-#define M_SYS_SOC_TYPE              _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
-#define V_SYS_SOC_TYPE(x)           _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
-#define G_SYS_SOC_TYPE(x)           _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
-
-#define K_SYS_SOC_TYPE_BCM1250      0x0
-#define K_SYS_SOC_TYPE_BCM1120      0x1
-#define K_SYS_SOC_TYPE_BCM1250_ALT  0x2                /* 1250pass2 w/ 1/4 L2.  */
-#define K_SYS_SOC_TYPE_BCM1125      0x3
-#define K_SYS_SOC_TYPE_BCM1125H     0x4
-#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5                /* 1250pass2 w/ 1/2 L2.  */
-#define K_SYS_SOC_TYPE_BCM1x80      0x6
-#define K_SYS_SOC_TYPE_BCM1x55      0x7
+#define S_SYS_SOC_TYPE             _SB_MAKE64(16)
+#define M_SYS_SOC_TYPE             _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
+#define V_SYS_SOC_TYPE(x)          _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
+#define G_SYS_SOC_TYPE(x)          _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
+
+#define K_SYS_SOC_TYPE_BCM1250     0x0
+#define K_SYS_SOC_TYPE_BCM1120     0x1
+#define K_SYS_SOC_TYPE_BCM1250_ALT  0x2                /* 1250pass2 w/ 1/4 L2.  */
+#define K_SYS_SOC_TYPE_BCM1125     0x3
+#define K_SYS_SOC_TYPE_BCM1125H            0x4
+#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5                /* 1250pass2 w/ 1/2 L2.  */
+#define K_SYS_SOC_TYPE_BCM1x80     0x6
+#define K_SYS_SOC_TYPE_BCM1x55     0x7
 
 /*
  * Calculate correct SOC type given a copy of system revision register.
         ? K_SYS_SOC_TYPE_BCM1250 : G_SYS_SOC_TYPE(sysrev))
 #endif
 
-#define S_SYS_WID                   _SB_MAKE64(32)
-#define M_SYS_WID                   _SB_MAKEMASK(32, S_SYS_WID)
-#define V_SYS_WID(x)                _SB_MAKEVALUE(x, S_SYS_WID)
-#define G_SYS_WID(x)                _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
+#define S_SYS_WID                  _SB_MAKE64(32)
+#define M_SYS_WID                  _SB_MAKEMASK(32, S_SYS_WID)
+#define V_SYS_WID(x)               _SB_MAKEVALUE(x, S_SYS_WID)
+#define G_SYS_WID(x)               _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
 
 /*
  * System Manufacturing Register
 
 #if SIBYTE_HDR_FEATURE_1250_112x
 /* Wafer ID: bits 31:0 */
-#define S_SYS_WAFERID1_200        _SB_MAKE64(0)
-#define M_SYS_WAFERID1_200        _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
-#define V_SYS_WAFERID1_200(x)     _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
-#define G_SYS_WAFERID1_200(x)     _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
+#define S_SYS_WAFERID1_200       _SB_MAKE64(0)
+#define M_SYS_WAFERID1_200       _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
+#define V_SYS_WAFERID1_200(x)    _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
+#define G_SYS_WAFERID1_200(x)    _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
 
-#define S_SYS_BIN                 _SB_MAKE64(32)
-#define M_SYS_BIN                 _SB_MAKEMASK(4, S_SYS_BIN)
-#define V_SYS_BIN(x)              _SB_MAKEVALUE(x, S_SYS_BIN)
-#define G_SYS_BIN(x)              _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
+#define S_SYS_BIN                _SB_MAKE64(32)
+#define M_SYS_BIN                _SB_MAKEMASK(4, S_SYS_BIN)
+#define V_SYS_BIN(x)             _SB_MAKEVALUE(x, S_SYS_BIN)
+#define G_SYS_BIN(x)             _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
 
 /* Wafer ID: bits 39:36 */
-#define S_SYS_WAFERID2_200        _SB_MAKE64(36)
-#define M_SYS_WAFERID2_200        _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
-#define V_SYS_WAFERID2_200(x)     _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
-#define G_SYS_WAFERID2_200(x)     _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
+#define S_SYS_WAFERID2_200       _SB_MAKE64(36)
+#define M_SYS_WAFERID2_200       _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
+#define V_SYS_WAFERID2_200(x)    _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
+#define G_SYS_WAFERID2_200(x)    _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
 
 /* Wafer ID: bits 39:0 */
-#define S_SYS_WAFERID_300         _SB_MAKE64(0)
-#define M_SYS_WAFERID_300         _SB_MAKEMASK(40, S_SYS_WAFERID_300)
-#define V_SYS_WAFERID_300(x)      _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
-#define G_SYS_WAFERID_300(x)      _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
-
-#define S_SYS_XPOS                _SB_MAKE64(40)
-#define M_SYS_XPOS                _SB_MAKEMASK(6, S_SYS_XPOS)
-#define V_SYS_XPOS(x)             _SB_MAKEVALUE(x, S_SYS_XPOS)
-#define G_SYS_XPOS(x)             _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
-
-#define S_SYS_YPOS                _SB_MAKE64(46)
-#define M_SYS_YPOS                _SB_MAKEMASK(6, S_SYS_YPOS)
-#define V_SYS_YPOS(x)             _SB_MAKEVALUE(x, S_SYS_YPOS)
-#define G_SYS_YPOS(x)             _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
+#define S_SYS_WAFERID_300        _SB_MAKE64(0)
+#define M_SYS_WAFERID_300        _SB_MAKEMASK(40, S_SYS_WAFERID_300)
+#define V_SYS_WAFERID_300(x)     _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
+#define G_SYS_WAFERID_300(x)     _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
+
+#define S_SYS_XPOS               _SB_MAKE64(40)
+#define M_SYS_XPOS               _SB_MAKEMASK(6, S_SYS_XPOS)
+#define V_SYS_XPOS(x)            _SB_MAKEVALUE(x, S_SYS_XPOS)
+#define G_SYS_XPOS(x)            _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
+
+#define S_SYS_YPOS               _SB_MAKE64(46)
+#define M_SYS_YPOS               _SB_MAKEMASK(6, S_SYS_YPOS)
+#define V_SYS_YPOS(x)            _SB_MAKEVALUE(x, S_SYS_YPOS)
+#define G_SYS_YPOS(x)            _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
 #endif
 
 
  */
 
 #if SIBYTE_HDR_FEATURE_1250_112x
-#define M_SYS_LDT_PLL_BYP           _SB_MAKEMASK1(3)
+#define M_SYS_LDT_PLL_BYP          _SB_MAKEMASK1(3)
 #define M_SYS_PCI_SYNC_TEST_MODE    _SB_MAKEMASK1(4)
-#define M_SYS_IOB0_DIV              _SB_MAKEMASK1(5)
-#define M_SYS_IOB1_DIV              _SB_MAKEMASK1(6)
-
-#define S_SYS_PLL_DIV               _SB_MAKE64(7)
-#define M_SYS_PLL_DIV               _SB_MAKEMASK(5, S_SYS_PLL_DIV)
-#define V_SYS_PLL_DIV(x)            _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
-#define G_SYS_PLL_DIV(x)            _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
-
-#define M_SYS_SER0_ENABLE           _SB_MAKEMASK1(12)
-#define M_SYS_SER0_RSTB_EN          _SB_MAKEMASK1(13)
-#define M_SYS_SER1_ENABLE           _SB_MAKEMASK1(14)
-#define M_SYS_SER1_RSTB_EN          _SB_MAKEMASK1(15)
-#define M_SYS_PCMCIA_ENABLE         _SB_MAKEMASK1(16)
-
-#define S_SYS_BOOT_MODE             _SB_MAKE64(17)
-#define M_SYS_BOOT_MODE             _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
-#define V_SYS_BOOT_MODE(x)          _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
-#define G_SYS_BOOT_MODE(x)          _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
-#define K_SYS_BOOT_MODE_ROM32       0
-#define K_SYS_BOOT_MODE_ROM8        1
+#define M_SYS_IOB0_DIV             _SB_MAKEMASK1(5)
+#define M_SYS_IOB1_DIV             _SB_MAKEMASK1(6)
+
+#define S_SYS_PLL_DIV              _SB_MAKE64(7)
+#define M_SYS_PLL_DIV              _SB_MAKEMASK(5, S_SYS_PLL_DIV)
+#define V_SYS_PLL_DIV(x)           _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
+#define G_SYS_PLL_DIV(x)           _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
+
+#define M_SYS_SER0_ENABLE          _SB_MAKEMASK1(12)
+#define M_SYS_SER0_RSTB_EN         _SB_MAKEMASK1(13)
+#define M_SYS_SER1_ENABLE          _SB_MAKEMASK1(14)
+#define M_SYS_SER1_RSTB_EN         _SB_MAKEMASK1(15)
+#define M_SYS_PCMCIA_ENABLE        _SB_MAKEMASK1(16)
+
+#define S_SYS_BOOT_MODE                    _SB_MAKE64(17)
+#define M_SYS_BOOT_MODE                    _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
+#define V_SYS_BOOT_MODE(x)         _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
+#define G_SYS_BOOT_MODE(x)         _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
+#define K_SYS_BOOT_MODE_ROM32      0
+#define K_SYS_BOOT_MODE_ROM8       1
 #define K_SYS_BOOT_MODE_SMBUS_SMALL 2
 #define K_SYS_BOOT_MODE_SMBUS_BIG   3
 
-#define M_SYS_PCI_HOST              _SB_MAKEMASK1(19)
-#define M_SYS_PCI_ARBITER           _SB_MAKEMASK1(20)
-#define M_SYS_SOUTH_ON_LDT          _SB_MAKEMASK1(21)
-#define M_SYS_BIG_ENDIAN            _SB_MAKEMASK1(22)
-#define M_SYS_GENCLK_EN             _SB_MAKEMASK1(23)
-#define M_SYS_LDT_TEST_EN           _SB_MAKEMASK1(24)
-#define M_SYS_GEN_PARITY_EN         _SB_MAKEMASK1(25)
+#define M_SYS_PCI_HOST             _SB_MAKEMASK1(19)
+#define M_SYS_PCI_ARBITER          _SB_MAKEMASK1(20)
+#define M_SYS_SOUTH_ON_LDT         _SB_MAKEMASK1(21)
+#define M_SYS_BIG_ENDIAN           _SB_MAKEMASK1(22)
+#define M_SYS_GENCLK_EN                    _SB_MAKEMASK1(23)
+#define M_SYS_LDT_TEST_EN          _SB_MAKEMASK1(24)
+#define M_SYS_GEN_PARITY_EN        _SB_MAKEMASK1(25)
 
-#define S_SYS_CONFIG                26
-#define M_SYS_CONFIG                _SB_MAKEMASK(6, S_SYS_CONFIG)
-#define V_SYS_CONFIG(x)             _SB_MAKEVALUE(x, S_SYS_CONFIG)
-#define G_SYS_CONFIG(x)             _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
+#define S_SYS_CONFIG               26
+#define M_SYS_CONFIG               _SB_MAKEMASK(6, S_SYS_CONFIG)
+#define V_SYS_CONFIG(x)                    _SB_MAKEVALUE(x, S_SYS_CONFIG)
+#define G_SYS_CONFIG(x)                    _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
 
 /* The following bits are writeable by JTAG only. */
 
-#define M_SYS_CLKSTOP               _SB_MAKEMASK1(32)
-#define M_SYS_CLKSTEP               _SB_MAKEMASK1(33)
+#define M_SYS_CLKSTOP              _SB_MAKEMASK1(32)
+#define M_SYS_CLKSTEP              _SB_MAKEMASK1(33)
 
-#define S_SYS_CLKCOUNT              34
-#define M_SYS_CLKCOUNT              _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
-#define V_SYS_CLKCOUNT(x)           _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
-#define G_SYS_CLKCOUNT(x)           _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
+#define S_SYS_CLKCOUNT             34
+#define M_SYS_CLKCOUNT             _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
+#define V_SYS_CLKCOUNT(x)          _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
+#define G_SYS_CLKCOUNT(x)          _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
 
-#define M_SYS_PLL_BYPASS            _SB_MAKEMASK1(42)
+#define M_SYS_PLL_BYPASS           _SB_MAKEMASK1(42)
 
 #define S_SYS_PLL_IREF             43
 #define M_SYS_PLL_IREF             _SB_MAKEMASK(2, S_SYS_PLL_IREF)
 #define S_SYS_PLL_VREG             47
 #define M_SYS_PLL_VREG             _SB_MAKEMASK(2, S_SYS_PLL_VREG)
 
-#define M_SYS_MEM_RESET             _SB_MAKEMASK1(49)
-#define M_SYS_L2C_RESET             _SB_MAKEMASK1(50)
-#define M_SYS_IO_RESET_0            _SB_MAKEMASK1(51)
-#define M_SYS_IO_RESET_1            _SB_MAKEMASK1(52)
-#define M_SYS_SCD_RESET             _SB_MAKEMASK1(53)
+#define M_SYS_MEM_RESET                    _SB_MAKEMASK1(49)
+#define M_SYS_L2C_RESET                    _SB_MAKEMASK1(50)
+#define M_SYS_IO_RESET_0           _SB_MAKEMASK1(51)
+#define M_SYS_IO_RESET_1           _SB_MAKEMASK1(52)
+#define M_SYS_SCD_RESET                    _SB_MAKEMASK1(53)
 
 /* End of bits writable by JTAG only. */
 
-#define M_SYS_CPU_RESET_0           _SB_MAKEMASK1(54)
-#define M_SYS_CPU_RESET_1           _SB_MAKEMASK1(55)
+#define M_SYS_CPU_RESET_0          _SB_MAKEMASK1(54)
+#define M_SYS_CPU_RESET_1          _SB_MAKEMASK1(55)
 
-#define M_SYS_UNICPU0               _SB_MAKEMASK1(56)
-#define M_SYS_UNICPU1               _SB_MAKEMASK1(57)
+#define M_SYS_UNICPU0              _SB_MAKEMASK1(56)
+#define M_SYS_UNICPU1              _SB_MAKEMASK1(57)
 
-#define M_SYS_SB_SOFTRES            _SB_MAKEMASK1(58)
-#define M_SYS_EXT_RESET             _SB_MAKEMASK1(59)
-#define M_SYS_SYSTEM_RESET          _SB_MAKEMASK1(60)
+#define M_SYS_SB_SOFTRES           _SB_MAKEMASK1(58)
+#define M_SYS_EXT_RESET                    _SB_MAKEMASK1(59)
+#define M_SYS_SYSTEM_RESET         _SB_MAKEMASK1(60)
 
-#define M_SYS_MISR_MODE             _SB_MAKEMASK1(61)
-#define M_SYS_MISR_RESET            _SB_MAKEMASK1(62)
+#define M_SYS_MISR_MODE                    _SB_MAKEMASK1(61)
+#define M_SYS_MISR_RESET           _SB_MAKEMASK1(62)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
 #define M_SYS_SW_FLAG              _SB_MAKEMASK1(63)
  * Registers: SCD_MBOX_CPU_x
  */
 
-#define S_MBOX_INT_3                0
-#define M_MBOX_INT_3                _SB_MAKEMASK(16, S_MBOX_INT_3)
-#define S_MBOX_INT_2                16
-#define M_MBOX_INT_2                _SB_MAKEMASK(16, S_MBOX_INT_2)
-#define S_MBOX_INT_1                32
-#define M_MBOX_INT_1                _SB_MAKEMASK(16, S_MBOX_INT_1)
-#define S_MBOX_INT_0                48
-#define M_MBOX_INT_0                _SB_MAKEMASK(16, S_MBOX_INT_0)
+#define S_MBOX_INT_3               0
+#define M_MBOX_INT_3               _SB_MAKEMASK(16, S_MBOX_INT_3)
+#define S_MBOX_INT_2               16
+#define M_MBOX_INT_2               _SB_MAKEMASK(16, S_MBOX_INT_2)
+#define S_MBOX_INT_1               32
+#define M_MBOX_INT_1               _SB_MAKEMASK(16, S_MBOX_INT_1)
+#define S_MBOX_INT_0               48
+#define M_MBOX_INT_0               _SB_MAKEMASK(16, S_MBOX_INT_0)
 
 /*
  * Watchdog Registers (Table 4-8) (Table 4-9) (Table 4-10)
  * Registers: SCD_WDOG_INIT_CNT_x
  */
 
-#define V_SCD_WDOG_FREQ             1000000
+#define V_SCD_WDOG_FREQ                    1000000
 
-#define S_SCD_WDOG_INIT             0
-#define M_SCD_WDOG_INIT             _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
+#define S_SCD_WDOG_INIT                    0
+#define M_SCD_WDOG_INIT                    _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
 
-#define S_SCD_WDOG_CNT              0
-#define M_SCD_WDOG_CNT              _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
+#define S_SCD_WDOG_CNT             0
+#define M_SCD_WDOG_CNT             _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
 
-#define S_SCD_WDOG_ENABLE           0
-#define M_SCD_WDOG_ENABLE           _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
+#define S_SCD_WDOG_ENABLE          0
+#define M_SCD_WDOG_ENABLE          _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
 
-#define S_SCD_WDOG_RESET_TYPE       2
-#define M_SCD_WDOG_RESET_TYPE       _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
+#define S_SCD_WDOG_RESET_TYPE      2
+#define M_SCD_WDOG_RESET_TYPE      _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
 #define V_SCD_WDOG_RESET_TYPE(x)    _SB_MAKEVALUE(x, S_SCD_WDOG_RESET_TYPE)
 #define G_SCD_WDOG_RESET_TYPE(x)    _SB_GETVALUE(x, S_SCD_WDOG_RESET_TYPE, M_SCD_WDOG_RESET_TYPE)
 
-#define K_SCD_WDOG_RESET_FULL       0  /* actually, (x & 1) == 0  */
-#define K_SCD_WDOG_RESET_SOFT       1
-#define K_SCD_WDOG_RESET_CPU0       3
-#define K_SCD_WDOG_RESET_CPU1       5
+#define K_SCD_WDOG_RESET_FULL        /* actually, (x & 1) == 0  */
+#define K_SCD_WDOG_RESET_SOFT      1
+#define K_SCD_WDOG_RESET_CPU0      3
+#define K_SCD_WDOG_RESET_CPU1      5
 #define K_SCD_WDOG_RESET_BOTH_CPUS  7
 
 /* This feature is present in 1250 C0 and later, but *not* in 112x A revs.  */
 #if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define S_SCD_WDOG_HAS_RESET        8
-#define M_SCD_WDOG_HAS_RESET        _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
+#define S_SCD_WDOG_HAS_RESET       8
+#define M_SCD_WDOG_HAS_RESET       _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
 #endif
 
 
  * Timer Registers (Table 4-11) (Table 4-12) (Table 4-13)
  */
 
-#define V_SCD_TIMER_FREQ            1000000
+#define V_SCD_TIMER_FREQ           1000000
 
-#define S_SCD_TIMER_INIT            0
-#define M_SCD_TIMER_INIT            _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
-#define V_SCD_TIMER_INIT(x)         _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
-#define G_SCD_TIMER_INIT(x)         _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
+#define S_SCD_TIMER_INIT           0
+#define M_SCD_TIMER_INIT           _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
+#define V_SCD_TIMER_INIT(x)        _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
+#define G_SCD_TIMER_INIT(x)        _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
 
 #define V_SCD_TIMER_WIDTH          23
-#define S_SCD_TIMER_CNT             0
-#define M_SCD_TIMER_CNT             _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
-#define V_SCD_TIMER_CNT(x)         _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
-#define G_SCD_TIMER_CNT(x)         _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
+#define S_SCD_TIMER_CNT                    0
+#define M_SCD_TIMER_CNT                    _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
+#define V_SCD_TIMER_CNT(x)        _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
+#define G_SCD_TIMER_CNT(x)        _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
 
-#define M_SCD_TIMER_ENABLE          _SB_MAKEMASK1(0)
-#define M_SCD_TIMER_MODE            _SB_MAKEMASK1(1)
+#define M_SCD_TIMER_ENABLE         _SB_MAKEMASK1(0)
+#define M_SCD_TIMER_MODE           _SB_MAKEMASK1(1)
 #define M_SCD_TIMER_MODE_CONTINUOUS M_SCD_TIMER_MODE
 
 /*
  * System Performance Counters
  */
 
-#define S_SPC_CFG_SRC0            0
-#define M_SPC_CFG_SRC0            _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
-#define V_SPC_CFG_SRC0(x)         _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
-#define G_SPC_CFG_SRC0(x)         _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
+#define S_SPC_CFG_SRC0           0
+#define M_SPC_CFG_SRC0           _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
+#define V_SPC_CFG_SRC0(x)        _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
+#define G_SPC_CFG_SRC0(x)        _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
 
-#define S_SPC_CFG_SRC1            8
-#define M_SPC_CFG_SRC1            _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
-#define V_SPC_CFG_SRC1(x)         _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
-#define G_SPC_CFG_SRC1(x)         _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
+#define S_SPC_CFG_SRC1           8
+#define M_SPC_CFG_SRC1           _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
+#define V_SPC_CFG_SRC1(x)        _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
+#define G_SPC_CFG_SRC1(x)        _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
 
-#define S_SPC_CFG_SRC2            16
-#define M_SPC_CFG_SRC2            _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
-#define V_SPC_CFG_SRC2(x)         _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
-#define G_SPC_CFG_SRC2(x)         _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
+#define S_SPC_CFG_SRC2           16
+#define M_SPC_CFG_SRC2           _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
+#define V_SPC_CFG_SRC2(x)        _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
+#define G_SPC_CFG_SRC2(x)        _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
 
-#define S_SPC_CFG_SRC3            24
-#define M_SPC_CFG_SRC3            _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
-#define V_SPC_CFG_SRC3(x)         _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
-#define G_SPC_CFG_SRC3(x)         _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
+#define S_SPC_CFG_SRC3           24
+#define M_SPC_CFG_SRC3           _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
+#define V_SPC_CFG_SRC3(x)        _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
+#define G_SPC_CFG_SRC3(x)        _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
 
 #if SIBYTE_HDR_FEATURE_1250_112x
 #define M_SPC_CFG_CLEAR                _SB_MAKEMASK1(32)
  * Bus Watcher
  */
 
-#define S_SCD_BERR_TID            8
-#define M_SCD_BERR_TID            _SB_MAKEMASK(10, S_SCD_BERR_TID)
-#define V_SCD_BERR_TID(x)         _SB_MAKEVALUE(x, S_SCD_BERR_TID)
-#define G_SCD_BERR_TID(x)         _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
+#define S_SCD_BERR_TID           8
+#define M_SCD_BERR_TID           _SB_MAKEMASK(10, S_SCD_BERR_TID)
+#define V_SCD_BERR_TID(x)        _SB_MAKEVALUE(x, S_SCD_BERR_TID)
+#define G_SCD_BERR_TID(x)        _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
 
-#define S_SCD_BERR_RID            18
-#define M_SCD_BERR_RID            _SB_MAKEMASK(4, S_SCD_BERR_RID)
-#define V_SCD_BERR_RID(x)         _SB_MAKEVALUE(x, S_SCD_BERR_RID)
-#define G_SCD_BERR_RID(x)         _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
+#define S_SCD_BERR_RID           18
+#define M_SCD_BERR_RID           _SB_MAKEMASK(4, S_SCD_BERR_RID)
+#define V_SCD_BERR_RID(x)        _SB_MAKEVALUE(x, S_SCD_BERR_RID)
+#define G_SCD_BERR_RID(x)        _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
 
-#define S_SCD_BERR_DCODE          22
-#define M_SCD_BERR_DCODE          _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
-#define V_SCD_BERR_DCODE(x)       _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
-#define G_SCD_BERR_DCODE(x)       _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
+#define S_SCD_BERR_DCODE         22
+#define M_SCD_BERR_DCODE         _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
+#define V_SCD_BERR_DCODE(x)      _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
+#define G_SCD_BERR_DCODE(x)      _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
 
-#define M_SCD_BERR_MULTERRS       _SB_MAKEMASK1(30)
+#define M_SCD_BERR_MULTERRS      _SB_MAKEMASK1(30)
 
 
-#define S_SCD_L2ECC_CORR_D        0
-#define M_SCD_L2ECC_CORR_D        _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
-#define V_SCD_L2ECC_CORR_D(x)     _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
-#define G_SCD_L2ECC_CORR_D(x)     _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
+#define S_SCD_L2ECC_CORR_D       0
+#define M_SCD_L2ECC_CORR_D       _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
+#define V_SCD_L2ECC_CORR_D(x)    _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
+#define G_SCD_L2ECC_CORR_D(x)    _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
 
-#define S_SCD_L2ECC_BAD_D         8
-#define M_SCD_L2ECC_BAD_D         _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
-#define V_SCD_L2ECC_BAD_D(x)      _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
-#define G_SCD_L2ECC_BAD_D(x)      _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
+#define S_SCD_L2ECC_BAD_D        8
+#define M_SCD_L2ECC_BAD_D        _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
+#define V_SCD_L2ECC_BAD_D(x)     _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
+#define G_SCD_L2ECC_BAD_D(x)     _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
 
-#define S_SCD_L2ECC_CORR_T        16
-#define M_SCD_L2ECC_CORR_T        _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
-#define V_SCD_L2ECC_CORR_T(x)     _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
-#define G_SCD_L2ECC_CORR_T(x)     _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
+#define S_SCD_L2ECC_CORR_T       16
+#define M_SCD_L2ECC_CORR_T       _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
+#define V_SCD_L2ECC_CORR_T(x)    _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
+#define G_SCD_L2ECC_CORR_T(x)    _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
 
-#define S_SCD_L2ECC_BAD_T         24
-#define M_SCD_L2ECC_BAD_T         _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
-#define V_SCD_L2ECC_BAD_T(x)      _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
-#define G_SCD_L2ECC_BAD_T(x)      _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
+#define S_SCD_L2ECC_BAD_T        24
+#define M_SCD_L2ECC_BAD_T        _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
+#define V_SCD_L2ECC_BAD_T(x)     _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
+#define G_SCD_L2ECC_BAD_T(x)     _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
 
-#define S_SCD_MEM_ECC_CORR        0
-#define M_SCD_MEM_ECC_CORR        _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
-#define V_SCD_MEM_ECC_CORR(x)     _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
-#define G_SCD_MEM_ECC_CORR(x)     _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
+#define S_SCD_MEM_ECC_CORR       0
+#define M_SCD_MEM_ECC_CORR       _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
+#define V_SCD_MEM_ECC_CORR(x)    _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
+#define G_SCD_MEM_ECC_CORR(x)    _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
 
-#define S_SCD_MEM_ECC_BAD         8
-#define M_SCD_MEM_ECC_BAD         _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
-#define V_SCD_MEM_ECC_BAD(x)      _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
-#define G_SCD_MEM_ECC_BAD(x)      _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
+#define S_SCD_MEM_ECC_BAD        8
+#define M_SCD_MEM_ECC_BAD        _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
+#define V_SCD_MEM_ECC_BAD(x)     _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
+#define G_SCD_MEM_ECC_BAD(x)     _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
 
-#define S_SCD_MEM_BUSERR          16
-#define M_SCD_MEM_BUSERR          _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
-#define V_SCD_MEM_BUSERR(x)       _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
-#define G_SCD_MEM_BUSERR(x)       _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
+#define S_SCD_MEM_BUSERR         16
+#define M_SCD_MEM_BUSERR         _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
+#define V_SCD_MEM_BUSERR(x)      _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
+#define G_SCD_MEM_BUSERR(x)      _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
 
 
 /*
 #define M_ATRAP_INDEX            _SB_MAKEMASK(4, 0)
 #define M_ATRAP_ADDRESS                  _SB_MAKEMASK(40, 0)
 
-#define S_ATRAP_CFG_CNT            0
-#define M_ATRAP_CFG_CNT            _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
-#define V_ATRAP_CFG_CNT(x)         _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
-#define G_ATRAP_CFG_CNT(x)         _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
+#define S_ATRAP_CFG_CNT                   0
+#define M_ATRAP_CFG_CNT                   _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
+#define V_ATRAP_CFG_CNT(x)        _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
+#define G_ATRAP_CFG_CNT(x)        _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
 
 #define M_ATRAP_CFG_WRITE         _SB_MAKEMASK1(3)
-#define M_ATRAP_CFG_ALL                   _SB_MAKEMASK1(4)
-#define M_ATRAP_CFG_INV                   _SB_MAKEMASK1(5)
+#define M_ATRAP_CFG_ALL                   _SB_MAKEMASK1(4)
+#define M_ATRAP_CFG_INV                   _SB_MAKEMASK1(5)
 #define M_ATRAP_CFG_USESRC        _SB_MAKEMASK1(6)
 #define M_ATRAP_CFG_SRCINV        _SB_MAKEMASK1(7)
 
-#define S_ATRAP_CFG_AGENTID     8
-#define M_ATRAP_CFG_AGENTID     _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
-#define V_ATRAP_CFG_AGENTID(x)  _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
-#define G_ATRAP_CFG_AGENTID(x)  _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
+#define S_ATRAP_CFG_AGENTID    8
+#define M_ATRAP_CFG_AGENTID    _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
+#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
+#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
 
 #define K_BUS_AGENT_CPU0       0
 #define K_BUS_AGENT_CPU1       1
 #define K_BUS_AGENT_IOB0       2
 #define K_BUS_AGENT_IOB1       3
-#define K_BUS_AGENT_SCD        4
-#define K_BUS_AGENT_L2C        6
+#define K_BUS_AGENT_SCD 4
+#define K_BUS_AGENT_L2C 6
 #define K_BUS_AGENT_MC 7
 
 #define S_ATRAP_CFG_CATTR     12
 #define G_ATRAP_CFG_CATTR(x)  _SB_GETVALUE(x, S_ATRAP_CFG_CATTR, M_ATRAP_CFG_CATTR)
 
 #define K_ATRAP_CFG_CATTR_IGNORE       0
-#define K_ATRAP_CFG_CATTR_UNC          1
+#define K_ATRAP_CFG_CATTR_UNC          1
 #define K_ATRAP_CFG_CATTR_CACHEABLE    2
-#define K_ATRAP_CFG_CATTR_NONCOH       3
+#define K_ATRAP_CFG_CATTR_NONCOH       3
 #define K_ATRAP_CFG_CATTR_COHERENT     4
 #define K_ATRAP_CFG_CATTR_NOTUNC       5
 #define K_ATRAP_CFG_CATTR_NOTNONCOH    6
-#define K_ATRAP_CFG_CATTR_NOTCOHERENT   7
+#define K_ATRAP_CFG_CATTR_NOTCOHERENT  7
 
 #endif /* 1250/112x */
 
  * Trace Buffer Config register
  */
 
-#define M_SCD_TRACE_CFG_RESET           _SB_MAKEMASK1(0)
-#define M_SCD_TRACE_CFG_START_READ      _SB_MAKEMASK1(1)
-#define M_SCD_TRACE_CFG_START           _SB_MAKEMASK1(2)
-#define M_SCD_TRACE_CFG_STOP            _SB_MAKEMASK1(3)
-#define M_SCD_TRACE_CFG_FREEZE          _SB_MAKEMASK1(4)
-#define M_SCD_TRACE_CFG_FREEZE_FULL     _SB_MAKEMASK1(5)
-#define M_SCD_TRACE_CFG_DEBUG_FULL      _SB_MAKEMASK1(6)
-#define M_SCD_TRACE_CFG_FULL            _SB_MAKEMASK1(7)
+#define M_SCD_TRACE_CFG_RESET          _SB_MAKEMASK1(0)
+#define M_SCD_TRACE_CFG_START_READ     _SB_MAKEMASK1(1)
+#define M_SCD_TRACE_CFG_START          _SB_MAKEMASK1(2)
+#define M_SCD_TRACE_CFG_STOP           _SB_MAKEMASK1(3)
+#define M_SCD_TRACE_CFG_FREEZE         _SB_MAKEMASK1(4)
+#define M_SCD_TRACE_CFG_FREEZE_FULL    _SB_MAKEMASK1(5)
+#define M_SCD_TRACE_CFG_DEBUG_FULL     _SB_MAKEMASK1(6)
+#define M_SCD_TRACE_CFG_FULL           _SB_MAKEMASK1(7)
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_SCD_TRACE_CFG_FORCECNT        _SB_MAKEMASK1(8)
+#define M_SCD_TRACE_CFG_FORCECNT       _SB_MAKEMASK1(8)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 /*
  * a slightly different place in the register.
  */
 #if SIBYTE_HDR_FEATURE_1250_112x
-#define S_SCD_TRACE_CFG_CUR_ADDR        10
+#define S_SCD_TRACE_CFG_CUR_ADDR       10
 #else
 #if SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SCD_TRACE_CFG_CUR_ADDR        24
+#define S_SCD_TRACE_CFG_CUR_ADDR       24
 #endif /* 1480 */
-#endif  /* 1250/112x */
+#endif /* 1250/112x */
 
-#define M_SCD_TRACE_CFG_CUR_ADDR        _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
-#define V_SCD_TRACE_CFG_CUR_ADDR(x)     _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
-#define G_SCD_TRACE_CFG_CUR_ADDR(x)     _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
+#define M_SCD_TRACE_CFG_CUR_ADDR       _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
+#define V_SCD_TRACE_CFG_CUR_ADDR(x)    _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
+#define G_SCD_TRACE_CFG_CUR_ADDR(x)    _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
 
 /*
  * Trace Event registers
  */
 
-#define S_SCD_TREVT_ADDR_MATCH          0
-#define M_SCD_TREVT_ADDR_MATCH          _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
-#define V_SCD_TREVT_ADDR_MATCH(x)       _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
-#define G_SCD_TREVT_ADDR_MATCH(x)       _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
-
-#define M_SCD_TREVT_REQID_MATCH         _SB_MAKEMASK1(4)
-#define M_SCD_TREVT_DATAID_MATCH        _SB_MAKEMASK1(5)
-#define M_SCD_TREVT_RESPID_MATCH        _SB_MAKEMASK1(6)
-#define M_SCD_TREVT_INTERRUPT           _SB_MAKEMASK1(7)
-#define M_SCD_TREVT_DEBUG_PIN           _SB_MAKEMASK1(9)
-#define M_SCD_TREVT_WRITE               _SB_MAKEMASK1(10)
-#define M_SCD_TREVT_READ                _SB_MAKEMASK1(11)
-
-#define S_SCD_TREVT_REQID               12
-#define M_SCD_TREVT_REQID               _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
-#define V_SCD_TREVT_REQID(x)            _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
-#define G_SCD_TREVT_REQID(x)            _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
-
-#define S_SCD_TREVT_RESPID              16
-#define M_SCD_TREVT_RESPID              _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
-#define V_SCD_TREVT_RESPID(x)           _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
-#define G_SCD_TREVT_RESPID(x)           _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
-
-#define S_SCD_TREVT_DATAID              20
-#define M_SCD_TREVT_DATAID              _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
-#define V_SCD_TREVT_DATAID(x)           _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
-#define G_SCD_TREVT_DATAID(x)           _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
-
-#define S_SCD_TREVT_COUNT               24
-#define M_SCD_TREVT_COUNT               _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
-#define V_SCD_TREVT_COUNT(x)            _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
-#define G_SCD_TREVT_COUNT(x)            _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
+#define S_SCD_TREVT_ADDR_MATCH         0
+#define M_SCD_TREVT_ADDR_MATCH         _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
+#define V_SCD_TREVT_ADDR_MATCH(x)      _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
+#define G_SCD_TREVT_ADDR_MATCH(x)      _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
+
+#define M_SCD_TREVT_REQID_MATCH                _SB_MAKEMASK1(4)
+#define M_SCD_TREVT_DATAID_MATCH       _SB_MAKEMASK1(5)
+#define M_SCD_TREVT_RESPID_MATCH       _SB_MAKEMASK1(6)
+#define M_SCD_TREVT_INTERRUPT          _SB_MAKEMASK1(7)
+#define M_SCD_TREVT_DEBUG_PIN          _SB_MAKEMASK1(9)
+#define M_SCD_TREVT_WRITE              _SB_MAKEMASK1(10)
+#define M_SCD_TREVT_READ               _SB_MAKEMASK1(11)
+
+#define S_SCD_TREVT_REQID              12
+#define M_SCD_TREVT_REQID              _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
+#define V_SCD_TREVT_REQID(x)           _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
+#define G_SCD_TREVT_REQID(x)           _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
+
+#define S_SCD_TREVT_RESPID             16
+#define M_SCD_TREVT_RESPID             _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
+#define V_SCD_TREVT_RESPID(x)          _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
+#define G_SCD_TREVT_RESPID(x)          _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
+
+#define S_SCD_TREVT_DATAID             20
+#define M_SCD_TREVT_DATAID             _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
+#define V_SCD_TREVT_DATAID(x)          _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
+#define G_SCD_TREVT_DATAID(x)          _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
+
+#define S_SCD_TREVT_COUNT              24
+#define M_SCD_TREVT_COUNT              _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
+#define V_SCD_TREVT_COUNT(x)           _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
+#define G_SCD_TREVT_COUNT(x)           _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
 
 /*
  * Trace Sequence registers
  */
 
-#define S_SCD_TRSEQ_EVENT4              0
-#define M_SCD_TRSEQ_EVENT4              _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
-#define V_SCD_TRSEQ_EVENT4(x)           _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
-#define G_SCD_TRSEQ_EVENT4(x)           _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
-
-#define S_SCD_TRSEQ_EVENT3              4
-#define M_SCD_TRSEQ_EVENT3              _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
-#define V_SCD_TRSEQ_EVENT3(x)           _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
-#define G_SCD_TRSEQ_EVENT3(x)           _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
-
-#define S_SCD_TRSEQ_EVENT2              8
-#define M_SCD_TRSEQ_EVENT2              _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
-#define V_SCD_TRSEQ_EVENT2(x)           _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
-#define G_SCD_TRSEQ_EVENT2(x)           _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
-
-#define S_SCD_TRSEQ_EVENT1              12
-#define M_SCD_TRSEQ_EVENT1              _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
-#define V_SCD_TRSEQ_EVENT1(x)           _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
-#define G_SCD_TRSEQ_EVENT1(x)           _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
-
-#define K_SCD_TRSEQ_E0                  0
-#define K_SCD_TRSEQ_E1                  1
-#define K_SCD_TRSEQ_E2                  2
-#define K_SCD_TRSEQ_E3                  3
-#define K_SCD_TRSEQ_E0_E1               4
-#define K_SCD_TRSEQ_E1_E2               5
-#define K_SCD_TRSEQ_E2_E3               6
-#define K_SCD_TRSEQ_E0_E1_E2            7
-#define K_SCD_TRSEQ_E0_E1_E2_E3         8
-#define K_SCD_TRSEQ_E0E1                9
-#define K_SCD_TRSEQ_E0E1E2              10
-#define K_SCD_TRSEQ_E0E1E2E3            11
-#define K_SCD_TRSEQ_E0E1_E2             12
-#define K_SCD_TRSEQ_E0E1_E2E3           13
-#define K_SCD_TRSEQ_E0E1_E2_E3          14
-#define K_SCD_TRSEQ_IGNORED             15
-
-#define K_SCD_TRSEQ_TRIGGER_ALL         (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
-                                         V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
-                                         V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
-                                         V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
-
-#define S_SCD_TRSEQ_FUNCTION            16
-#define M_SCD_TRSEQ_FUNCTION            _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
-#define V_SCD_TRSEQ_FUNCTION(x)         _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
-#define G_SCD_TRSEQ_FUNCTION(x)         _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
-
-#define K_SCD_TRSEQ_FUNC_NOP            0
-#define K_SCD_TRSEQ_FUNC_START          1
-#define K_SCD_TRSEQ_FUNC_STOP           2
-#define K_SCD_TRSEQ_FUNC_FREEZE         3
-
-#define V_SCD_TRSEQ_FUNC_NOP            V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
-#define V_SCD_TRSEQ_FUNC_START          V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
-#define V_SCD_TRSEQ_FUNC_STOP           V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
-#define V_SCD_TRSEQ_FUNC_FREEZE         V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
-
-#define M_SCD_TRSEQ_ASAMPLE             _SB_MAKEMASK1(18)
-#define M_SCD_TRSEQ_DSAMPLE             _SB_MAKEMASK1(19)
-#define M_SCD_TRSEQ_DEBUGPIN            _SB_MAKEMASK1(20)
-#define M_SCD_TRSEQ_DEBUGCPU            _SB_MAKEMASK1(21)
-#define M_SCD_TRSEQ_CLEARUSE            _SB_MAKEMASK1(22)
-#define M_SCD_TRSEQ_ALLD_A              _SB_MAKEMASK1(23)
-#define M_SCD_TRSEQ_ALL_A               _SB_MAKEMASK1(24)
+#define S_SCD_TRSEQ_EVENT4             0
+#define M_SCD_TRSEQ_EVENT4             _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
+#define V_SCD_TRSEQ_EVENT4(x)          _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
+#define G_SCD_TRSEQ_EVENT4(x)          _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
+
+#define S_SCD_TRSEQ_EVENT3             4
+#define M_SCD_TRSEQ_EVENT3             _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
+#define V_SCD_TRSEQ_EVENT3(x)          _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
+#define G_SCD_TRSEQ_EVENT3(x)          _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
+
+#define S_SCD_TRSEQ_EVENT2             8
+#define M_SCD_TRSEQ_EVENT2             _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
+#define V_SCD_TRSEQ_EVENT2(x)          _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
+#define G_SCD_TRSEQ_EVENT2(x)          _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
+
+#define S_SCD_TRSEQ_EVENT1             12
+#define M_SCD_TRSEQ_EVENT1             _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
+#define V_SCD_TRSEQ_EVENT1(x)          _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
+#define G_SCD_TRSEQ_EVENT1(x)          _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
+
+#define K_SCD_TRSEQ_E0                 0
+#define K_SCD_TRSEQ_E1                 1
+#define K_SCD_TRSEQ_E2                 2
+#define K_SCD_TRSEQ_E3                 3
+#define K_SCD_TRSEQ_E0_E1              4
+#define K_SCD_TRSEQ_E1_E2              5
+#define K_SCD_TRSEQ_E2_E3              6
+#define K_SCD_TRSEQ_E0_E1_E2           7
+#define K_SCD_TRSEQ_E0_E1_E2_E3                8
+#define K_SCD_TRSEQ_E0E1               9
+#define K_SCD_TRSEQ_E0E1E2             10
+#define K_SCD_TRSEQ_E0E1E2E3           11
+#define K_SCD_TRSEQ_E0E1_E2            12
+#define K_SCD_TRSEQ_E0E1_E2E3          13
+#define K_SCD_TRSEQ_E0E1_E2_E3         14
+#define K_SCD_TRSEQ_IGNORED            15
+
+#define K_SCD_TRSEQ_TRIGGER_ALL                (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
+                                        V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
+                                        V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
+                                        V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
+
+#define S_SCD_TRSEQ_FUNCTION           16
+#define M_SCD_TRSEQ_FUNCTION           _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
+#define V_SCD_TRSEQ_FUNCTION(x)                _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
+#define G_SCD_TRSEQ_FUNCTION(x)                _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
+
+#define K_SCD_TRSEQ_FUNC_NOP           0
+#define K_SCD_TRSEQ_FUNC_START         1
+#define K_SCD_TRSEQ_FUNC_STOP          2
+#define K_SCD_TRSEQ_FUNC_FREEZE                3
+
+#define V_SCD_TRSEQ_FUNC_NOP           V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
+#define V_SCD_TRSEQ_FUNC_START         V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
+#define V_SCD_TRSEQ_FUNC_STOP          V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
+#define V_SCD_TRSEQ_FUNC_FREEZE                V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
+
+#define M_SCD_TRSEQ_ASAMPLE            _SB_MAKEMASK1(18)
+#define M_SCD_TRSEQ_DSAMPLE            _SB_MAKEMASK1(19)
+#define M_SCD_TRSEQ_DEBUGPIN           _SB_MAKEMASK1(20)
+#define M_SCD_TRSEQ_DEBUGCPU           _SB_MAKEMASK1(21)
+#define M_SCD_TRSEQ_CLEARUSE           _SB_MAKEMASK1(22)
+#define M_SCD_TRSEQ_ALLD_A             _SB_MAKEMASK1(23)
+#define M_SCD_TRSEQ_ALL_A              _SB_MAKEMASK1(24)
 
 #endif
index 128d6b7..3cb73e8 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  SB1250 Board Support Package
     *
-    *  SMBUS Constants                          File: sb1250_smbus.h
+    *  SMBUS Constants                         File: sb1250_smbus.h
     *
     *  This module contains constants and macros useful for
     *  manipulating the SB1250's SMbus devices.
  * SMBus Clock Frequency Register (Table 14-2)
  */
 
-#define S_SMB_FREQ_DIV              0
-#define M_SMB_FREQ_DIV              _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
-#define V_SMB_FREQ_DIV(x)           _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
+#define S_SMB_FREQ_DIV             0
+#define M_SMB_FREQ_DIV             _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
+#define V_SMB_FREQ_DIV(x)          _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
 
 #define K_SMB_FREQ_400KHZ          0x1F
 #define K_SMB_FREQ_100KHZ          0x7D
 #define K_SMB_FREQ_10KHZ           1250
 
-#define S_SMB_CMD                   0
-#define M_SMB_CMD                   _SB_MAKEMASK(8, S_SMB_CMD)
-#define V_SMB_CMD(x)                _SB_MAKEVALUE(x, S_SMB_CMD)
+#define S_SMB_CMD                  0
+#define M_SMB_CMD                  _SB_MAKEMASK(8, S_SMB_CMD)
+#define V_SMB_CMD(x)               _SB_MAKEVALUE(x, S_SMB_CMD)
 
 /*
  * SMBus control register (Table 14-4)
  */
 
-#define M_SMB_ERR_INTR              _SB_MAKEMASK1(0)
-#define M_SMB_FINISH_INTR           _SB_MAKEMASK1(1)
+#define M_SMB_ERR_INTR             _SB_MAKEMASK1(0)
+#define M_SMB_FINISH_INTR          _SB_MAKEMASK1(1)
 
-#define S_SMB_DATA_OUT              4
-#define M_SMB_DATA_OUT              _SB_MAKEMASK1(S_SMB_DATA_OUT)
-#define V_SMB_DATA_OUT(x)           _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
+#define S_SMB_DATA_OUT             4
+#define M_SMB_DATA_OUT             _SB_MAKEMASK1(S_SMB_DATA_OUT)
+#define V_SMB_DATA_OUT(x)          _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
 
-#define M_SMB_DATA_DIR              _SB_MAKEMASK1(5)
-#define M_SMB_DATA_DIR_OUTPUT       M_SMB_DATA_DIR
-#define M_SMB_CLK_OUT               _SB_MAKEMASK1(6)
-#define M_SMB_DIRECT_ENABLE         _SB_MAKEMASK1(7)
+#define M_SMB_DATA_DIR             _SB_MAKEMASK1(5)
+#define M_SMB_DATA_DIR_OUTPUT      M_SMB_DATA_DIR
+#define M_SMB_CLK_OUT              _SB_MAKEMASK1(6)
+#define M_SMB_DIRECT_ENABLE        _SB_MAKEMASK1(7)
 
 /*
  * SMBus status registers (Table 14-5)
  */
 
-#define M_SMB_BUSY                  _SB_MAKEMASK1(0)
-#define M_SMB_ERROR                 _SB_MAKEMASK1(1)
-#define M_SMB_ERROR_TYPE            _SB_MAKEMASK1(2)
+#define M_SMB_BUSY                 _SB_MAKEMASK1(0)
+#define M_SMB_ERROR                _SB_MAKEMASK1(1)
+#define M_SMB_ERROR_TYPE           _SB_MAKEMASK1(2)
 
 #if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SMB_SCL_IN                5
-#define M_SMB_SCL_IN                _SB_MAKEMASK1(S_SMB_SCL_IN)
-#define V_SMB_SCL_IN(x)             _SB_MAKEVALUE(x, S_SMB_SCL_IN)
-#define G_SMB_SCL_IN(x)             _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
+#define S_SMB_SCL_IN               5
+#define M_SMB_SCL_IN               _SB_MAKEMASK1(S_SMB_SCL_IN)
+#define V_SMB_SCL_IN(x)                    _SB_MAKEVALUE(x, S_SMB_SCL_IN)
+#define G_SMB_SCL_IN(x)                    _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
 #endif /* 1250 PASS3 || 112x PASS1 || 1480 */
 
-#define S_SMB_REF                   6
-#define M_SMB_REF                   _SB_MAKEMASK1(S_SMB_REF)
-#define V_SMB_REF(x)                _SB_MAKEVALUE(x, S_SMB_REF)
-#define G_SMB_REF(x)                _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
+#define S_SMB_REF                  6
+#define M_SMB_REF                  _SB_MAKEMASK1(S_SMB_REF)
+#define V_SMB_REF(x)               _SB_MAKEVALUE(x, S_SMB_REF)
+#define G_SMB_REF(x)               _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
 
-#define S_SMB_DATA_IN               7
-#define M_SMB_DATA_IN               _SB_MAKEMASK1(S_SMB_DATA_IN)
-#define V_SMB_DATA_IN(x)            _SB_MAKEVALUE(x, S_SMB_DATA_IN)
-#define G_SMB_DATA_IN(x)            _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
+#define S_SMB_DATA_IN              7
+#define M_SMB_DATA_IN              _SB_MAKEMASK1(S_SMB_DATA_IN)
+#define V_SMB_DATA_IN(x)           _SB_MAKEVALUE(x, S_SMB_DATA_IN)
+#define G_SMB_DATA_IN(x)           _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
 
 /*
  * SMBus Start/Command registers (Table 14-9)
  */
 
-#define S_SMB_ADDR                  0
-#define M_SMB_ADDR                  _SB_MAKEMASK(7, S_SMB_ADDR)
-#define V_SMB_ADDR(x)               _SB_MAKEVALUE(x, S_SMB_ADDR)
-#define G_SMB_ADDR(x)               _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
+#define S_SMB_ADDR                 0
+#define M_SMB_ADDR                 _SB_MAKEMASK(7, S_SMB_ADDR)
+#define V_SMB_ADDR(x)              _SB_MAKEVALUE(x, S_SMB_ADDR)
+#define G_SMB_ADDR(x)              _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
 
-#define M_SMB_QDATA                 _SB_MAKEMASK1(7)
+#define M_SMB_QDATA                _SB_MAKEMASK1(7)
 
-#define S_SMB_TT                    8
-#define M_SMB_TT                    _SB_MAKEMASK(3, S_SMB_TT)
-#define V_SMB_TT(x)                 _SB_MAKEVALUE(x, S_SMB_TT)
-#define G_SMB_TT(x)                 _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
+#define S_SMB_TT                   8
+#define M_SMB_TT                   _SB_MAKEMASK(3, S_SMB_TT)
+#define V_SMB_TT(x)                _SB_MAKEVALUE(x, S_SMB_TT)
+#define G_SMB_TT(x)                _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
 
-#define K_SMB_TT_WR1BYTE            0
-#define K_SMB_TT_WR2BYTE            1
-#define K_SMB_TT_WR3BYTE            2
-#define K_SMB_TT_CMD_RD1BYTE        3
-#define K_SMB_TT_CMD_RD2BYTE        4
-#define K_SMB_TT_RD1BYTE            5
-#define K_SMB_TT_QUICKCMD           6
-#define K_SMB_TT_EEPROMREAD         7
+#define K_SMB_TT_WR1BYTE           0
+#define K_SMB_TT_WR2BYTE           1
+#define K_SMB_TT_WR3BYTE           2
+#define K_SMB_TT_CMD_RD1BYTE       3
+#define K_SMB_TT_CMD_RD2BYTE       4
+#define K_SMB_TT_RD1BYTE           5
+#define K_SMB_TT_QUICKCMD          6
+#define K_SMB_TT_EEPROMREAD        7
 
 #define V_SMB_TT_WR1BYTE           V_SMB_TT(K_SMB_TT_WR1BYTE)
 #define V_SMB_TT_WR2BYTE           V_SMB_TT(K_SMB_TT_WR2BYTE)
 #define V_SMB_TT_QUICKCMD          V_SMB_TT(K_SMB_TT_QUICKCMD)
 #define V_SMB_TT_EEPROMREAD        V_SMB_TT(K_SMB_TT_EEPROMREAD)
 
-#define M_SMB_PEC                   _SB_MAKEMASK1(15)
+#define M_SMB_PEC                  _SB_MAKEMASK1(15)
 
 /*
  * SMBus Data Register (Table 14-6) and SMBus Extra Register (Table 14-7)
  */
 
-#define S_SMB_LB                    0
-#define M_SMB_LB                    _SB_MAKEMASK(8, S_SMB_LB)
-#define V_SMB_LB(x)                 _SB_MAKEVALUE(x, S_SMB_LB)
+#define S_SMB_LB                   0
+#define M_SMB_LB                   _SB_MAKEMASK(8, S_SMB_LB)
+#define V_SMB_LB(x)                _SB_MAKEVALUE(x, S_SMB_LB)
 
-#define S_SMB_MB                    8
-#define M_SMB_MB                    _SB_MAKEMASK(8, S_SMB_MB)
-#define V_SMB_MB(x)                 _SB_MAKEVALUE(x, S_SMB_MB)
+#define S_SMB_MB                   8
+#define M_SMB_MB                   _SB_MAKEMASK(8, S_SMB_MB)
+#define V_SMB_MB(x)                _SB_MAKEVALUE(x, S_SMB_MB)
 
 
 /*
  * SMBus Packet Error Check register (Table 14-8)
  */
 
-#define S_SPEC_PEC                  0
-#define M_SPEC_PEC                  _SB_MAKEMASK(8, S_SPEC_PEC)
-#define V_SPEC_MB(x)                _SB_MAKEVALUE(x, S_SPEC_PEC)
+#define S_SPEC_PEC                 0
+#define M_SPEC_PEC                 _SB_MAKEMASK(8, S_SPEC_PEC)
+#define V_SPEC_MB(x)               _SB_MAKEVALUE(x, S_SPEC_PEC)
 
 
 #if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
 
-#define S_SMB_CMDH                  8
-#define M_SMB_CMDH                  _SB_MAKEMASK(8, S_SMB_CMDH)
-#define V_SMB_CMDH(x)               _SB_MAKEVALUE(x, S_SMB_CMDH)
+#define S_SMB_CMDH                 8
+#define M_SMB_CMDH                 _SB_MAKEMASK(8, S_SMB_CMDH)
+#define V_SMB_CMDH(x)              _SB_MAKEVALUE(x, S_SMB_CMDH)
 
 #define M_SMB_EXTEND               _SB_MAKEMASK1(14)
 
-#define S_SMB_DFMT                  8
-#define M_SMB_DFMT                  _SB_MAKEMASK(3, S_SMB_DFMT)
-#define V_SMB_DFMT(x)               _SB_MAKEVALUE(x, S_SMB_DFMT)
-#define G_SMB_DFMT(x)               _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
+#define S_SMB_DFMT                 8
+#define M_SMB_DFMT                 _SB_MAKEMASK(3, S_SMB_DFMT)
+#define V_SMB_DFMT(x)              _SB_MAKEVALUE(x, S_SMB_DFMT)
+#define G_SMB_DFMT(x)              _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
 
-#define K_SMB_DFMT_1BYTE            0
-#define K_SMB_DFMT_2BYTE            1
-#define K_SMB_DFMT_3BYTE            2
-#define K_SMB_DFMT_4BYTE            3
-#define K_SMB_DFMT_NODATA           4
-#define K_SMB_DFMT_CMD4BYTE         5
-#define K_SMB_DFMT_CMD5BYTE         6
-#define K_SMB_DFMT_RESERVED         7
+#define K_SMB_DFMT_1BYTE           0
+#define K_SMB_DFMT_2BYTE           1
+#define K_SMB_DFMT_3BYTE           2
+#define K_SMB_DFMT_4BYTE           3
+#define K_SMB_DFMT_NODATA          4
+#define K_SMB_DFMT_CMD4BYTE        5
+#define K_SMB_DFMT_CMD5BYTE        6
+#define K_SMB_DFMT_RESERVED        7
 
 #define V_SMB_DFMT_1BYTE           V_SMB_DFMT(K_SMB_DFMT_1BYTE)
 #define V_SMB_DFMT_2BYTE           V_SMB_DFMT(K_SMB_DFMT_2BYTE)
 #define V_SMB_DFMT_CMD5BYTE        V_SMB_DFMT(K_SMB_DFMT_CMD5BYTE)
 #define V_SMB_DFMT_RESERVED        V_SMB_DFMT(K_SMB_DFMT_RESERVED)
 
-#define S_SMB_AFMT                  11
-#define M_SMB_AFMT                  _SB_MAKEMASK(2, S_SMB_AFMT)
-#define V_SMB_AFMT(x)               _SB_MAKEVALUE(x, S_SMB_AFMT)
-#define G_SMB_AFMT(x)               _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
+#define S_SMB_AFMT                 11
+#define M_SMB_AFMT                 _SB_MAKEMASK(2, S_SMB_AFMT)
+#define V_SMB_AFMT(x)              _SB_MAKEVALUE(x, S_SMB_AFMT)
+#define G_SMB_AFMT(x)              _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
 
-#define K_SMB_AFMT_NONE             0
-#define K_SMB_AFMT_ADDR             1
+#define K_SMB_AFMT_NONE                    0
+#define K_SMB_AFMT_ADDR                    1
 #define K_SMB_AFMT_ADDR_CMD1BYTE    2
 #define K_SMB_AFMT_ADDR_CMD2BYTE    3
 
index 274e917..b3acc75 100644 (file)
@@ -1,7 +1,7 @@
 /*  *********************************************************************
     *  SB1250 Board Support Package
     *
-    *  Synchronous Serial Constants              File: sb1250_syncser.h
+    *  Synchronous Serial Constants             File: sb1250_syncser.h
     *
     *  This module contains constants and macros useful for
     *  manipulating the SB1250's Synchronous Serial
  * Serial Mode Configuration Register
  */
 
-#define M_SYNCSER_CRC_MODE                 _SB_MAKEMASK1(0)
-#define M_SYNCSER_MSB_FIRST                _SB_MAKEMASK1(1)
+#define M_SYNCSER_CRC_MODE                _SB_MAKEMASK1(0)
+#define M_SYNCSER_MSB_FIRST               _SB_MAKEMASK1(1)
 
-#define S_SYNCSER_FLAG_NUM                 2
-#define M_SYNCSER_FLAG_NUM                 _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
-#define V_SYNCSER_FLAG_NUM                 _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
+#define S_SYNCSER_FLAG_NUM                2
+#define M_SYNCSER_FLAG_NUM                _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
+#define V_SYNCSER_FLAG_NUM                _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
 
-#define M_SYNCSER_FLAG_EN                  _SB_MAKEMASK1(6)
-#define M_SYNCSER_HDLC_EN                  _SB_MAKEMASK1(7)
-#define M_SYNCSER_LOOP_MODE                _SB_MAKEMASK1(8)
-#define M_SYNCSER_LOOPBACK                 _SB_MAKEMASK1(9)
+#define M_SYNCSER_FLAG_EN                 _SB_MAKEMASK1(6)
+#define M_SYNCSER_HDLC_EN                 _SB_MAKEMASK1(7)
+#define M_SYNCSER_LOOP_MODE               _SB_MAKEMASK1(8)
+#define M_SYNCSER_LOOPBACK                _SB_MAKEMASK1(9)
 
 /*
  * Serial Clock Source and Line Interface Mode Register
  */
 
-#define M_SYNCSER_RXCLK_INV                _SB_MAKEMASK1(0)
-#define M_SYNCSER_RXCLK_EXT                _SB_MAKEMASK1(1)
+#define M_SYNCSER_RXCLK_INV               _SB_MAKEMASK1(0)
+#define M_SYNCSER_RXCLK_EXT               _SB_MAKEMASK1(1)
 
-#define S_SYNCSER_RXSYNC_DLY               2
-#define M_SYNCSER_RXSYNC_DLY               _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
-#define V_SYNCSER_RXSYNC_DLY(x)            _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
+#define S_SYNCSER_RXSYNC_DLY              2
+#define M_SYNCSER_RXSYNC_DLY              _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
+#define V_SYNCSER_RXSYNC_DLY(x)                   _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
 
-#define M_SYNCSER_RXSYNC_LOW               _SB_MAKEMASK1(4)
-#define M_SYNCSER_RXSTRB_LOW               _SB_MAKEMASK1(5)
+#define M_SYNCSER_RXSYNC_LOW              _SB_MAKEMASK1(4)
+#define M_SYNCSER_RXSTRB_LOW              _SB_MAKEMASK1(5)
 
-#define M_SYNCSER_RXSYNC_EDGE              _SB_MAKEMASK1(6)
-#define M_SYNCSER_RXSYNC_INT               _SB_MAKEMASK1(7)
+#define M_SYNCSER_RXSYNC_EDGE             _SB_MAKEMASK1(6)
+#define M_SYNCSER_RXSYNC_INT              _SB_MAKEMASK1(7)
 
-#define M_SYNCSER_TXCLK_INV                _SB_MAKEMASK1(8)
-#define M_SYNCSER_TXCLK_EXT                _SB_MAKEMASK1(9)
+#define M_SYNCSER_TXCLK_INV               _SB_MAKEMASK1(8)
+#define M_SYNCSER_TXCLK_EXT               _SB_MAKEMASK1(9)
 
-#define S_SYNCSER_TXSYNC_DLY               10
-#define M_SYNCSER_TXSYNC_DLY               _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
-#define V_SYNCSER_TXSYNC_DLY(x)            _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
+#define S_SYNCSER_TXSYNC_DLY              10
+#define M_SYNCSER_TXSYNC_DLY              _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
+#define V_SYNCSER_TXSYNC_DLY(x)                   _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
 
-#define M_SYNCSER_TXSYNC_LOW               _SB_MAKEMASK1(12)
-#define M_SYNCSER_TXSTRB_LOW               _SB_MAKEMASK1(13)
+#define M_SYNCSER_TXSYNC_LOW              _SB_MAKEMASK1(12)
+#define M_SYNCSER_TXSTRB_LOW              _SB_MAKEMASK1(13)
 
-#define M_SYNCSER_TXSYNC_EDGE              _SB_MAKEMASK1(14)
-#define M_SYNCSER_TXSYNC_INT               _SB_MAKEMASK1(15)
+#define M_SYNCSER_TXSYNC_EDGE             _SB_MAKEMASK1(14)
+#define M_SYNCSER_TXSYNC_INT              _SB_MAKEMASK1(15)
 
 /*
  * Serial Command Register
  */
 
-#define M_SYNCSER_CMD_RX_EN                _SB_MAKEMASK1(0)
-#define M_SYNCSER_CMD_TX_EN                _SB_MAKEMASK1(1)
-#define M_SYNCSER_CMD_RX_RESET             _SB_MAKEMASK1(2)
-#define M_SYNCSER_CMD_TX_RESET             _SB_MAKEMASK1(3)
-#define M_SYNCSER_CMD_TX_PAUSE             _SB_MAKEMASK1(5)
+#define M_SYNCSER_CMD_RX_EN               _SB_MAKEMASK1(0)
+#define M_SYNCSER_CMD_TX_EN               _SB_MAKEMASK1(1)
+#define M_SYNCSER_CMD_RX_RESET            _SB_MAKEMASK1(2)
+#define M_SYNCSER_CMD_TX_RESET            _SB_MAKEMASK1(3)
+#define M_SYNCSER_CMD_TX_PAUSE            _SB_MAKEMASK1(5)
 
 /*
  * Serial DMA Enable Register
  */
 
-#define M_SYNCSER_DMA_RX_EN                _SB_MAKEMASK1(0)
-#define M_SYNCSER_DMA_TX_EN                _SB_MAKEMASK1(4)
+#define M_SYNCSER_DMA_RX_EN               _SB_MAKEMASK1(0)
+#define M_SYNCSER_DMA_TX_EN               _SB_MAKEMASK1(4)
 
 /*
  * Serial Status Register
  */
 
-#define M_SYNCSER_RX_CRCERR                _SB_MAKEMASK1(0)
-#define M_SYNCSER_RX_ABORT                 _SB_MAKEMASK1(1)
-#define M_SYNCSER_RX_OCTET                 _SB_MAKEMASK1(2)
-#define M_SYNCSER_RX_LONGFRM               _SB_MAKEMASK1(3)
-#define M_SYNCSER_RX_SHORTFRM              _SB_MAKEMASK1(4)
-#define M_SYNCSER_RX_OVERRUN               _SB_MAKEMASK1(5)
-#define M_SYNCSER_RX_SYNC_ERR              _SB_MAKEMASK1(6)
-#define M_SYNCSER_TX_CRCERR                _SB_MAKEMASK1(8)
-#define M_SYNCSER_TX_UNDERRUN              _SB_MAKEMASK1(9)
-#define M_SYNCSER_TX_SYNC_ERR              _SB_MAKEMASK1(10)
-#define M_SYNCSER_TX_PAUSE_COMPLETE        _SB_MAKEMASK1(11)
-#define M_SYNCSER_RX_EOP_COUNT             _SB_MAKEMASK1(16)
-#define M_SYNCSER_RX_EOP_TIMER             _SB_MAKEMASK1(17)
-#define M_SYNCSER_RX_EOP_SEEN              _SB_MAKEMASK1(18)
-#define M_SYNCSER_RX_HWM                   _SB_MAKEMASK1(19)
-#define M_SYNCSER_RX_LWM                   _SB_MAKEMASK1(20)
-#define M_SYNCSER_RX_DSCR                  _SB_MAKEMASK1(21)
-#define M_SYNCSER_RX_DERR                  _SB_MAKEMASK1(22)
-#define M_SYNCSER_TX_EOP_COUNT             _SB_MAKEMASK1(24)
-#define M_SYNCSER_TX_EOP_TIMER             _SB_MAKEMASK1(25)
-#define M_SYNCSER_TX_EOP_SEEN              _SB_MAKEMASK1(26)
-#define M_SYNCSER_TX_HWM                   _SB_MAKEMASK1(27)
-#define M_SYNCSER_TX_LWM                   _SB_MAKEMASK1(28)
-#define M_SYNCSER_TX_DSCR                  _SB_MAKEMASK1(29)
-#define M_SYNCSER_TX_DERR                  _SB_MAKEMASK1(30)
-#define M_SYNCSER_TX_DZERO                 _SB_MAKEMASK1(31)
+#define M_SYNCSER_RX_CRCERR               _SB_MAKEMASK1(0)
+#define M_SYNCSER_RX_ABORT                _SB_MAKEMASK1(1)
+#define M_SYNCSER_RX_OCTET                _SB_MAKEMASK1(2)
+#define M_SYNCSER_RX_LONGFRM              _SB_MAKEMASK1(3)
+#define M_SYNCSER_RX_SHORTFRM             _SB_MAKEMASK1(4)
+#define M_SYNCSER_RX_OVERRUN              _SB_MAKEMASK1(5)
+#define M_SYNCSER_RX_SYNC_ERR             _SB_MAKEMASK1(6)
+#define M_SYNCSER_TX_CRCERR               _SB_MAKEMASK1(8)
+#define M_SYNCSER_TX_UNDERRUN             _SB_MAKEMASK1(9)
+#define M_SYNCSER_TX_SYNC_ERR             _SB_MAKEMASK1(10)
+#define M_SYNCSER_TX_PAUSE_COMPLETE       _SB_MAKEMASK1(11)
+#define M_SYNCSER_RX_EOP_COUNT            _SB_MAKEMASK1(16)
+#define M_SYNCSER_RX_EOP_TIMER            _SB_MAKEMASK1(17)
+#define M_SYNCSER_RX_EOP_SEEN             _SB_MAKEMASK1(18)
+#define M_SYNCSER_RX_HWM                  _SB_MAKEMASK1(19)
+#define M_SYNCSER_RX_LWM                  _SB_MAKEMASK1(20)
+#define M_SYNCSER_RX_DSCR                 _SB_MAKEMASK1(21)
+#define M_SYNCSER_RX_DERR                 _SB_MAKEMASK1(22)
+#define M_SYNCSER_TX_EOP_COUNT            _SB_MAKEMASK1(24)
+#define M_SYNCSER_TX_EOP_TIMER            _SB_MAKEMASK1(25)
+#define M_SYNCSER_TX_EOP_SEEN             _SB_MAKEMASK1(26)
+#define M_SYNCSER_TX_HWM                  _SB_MAKEMASK1(27)
+#define M_SYNCSER_TX_LWM                  _SB_MAKEMASK1(28)
+#define M_SYNCSER_TX_DSCR                 _SB_MAKEMASK1(29)
+#define M_SYNCSER_TX_DERR                 _SB_MAKEMASK1(30)
+#define M_SYNCSER_TX_DZERO                _SB_MAKEMASK1(31)
 
 /*
  * Sequencer Table Entry format
  */
 
-#define M_SYNCSER_SEQ_LAST                 _SB_MAKEMASK1(0)
-#define M_SYNCSER_SEQ_BYTE                 _SB_MAKEMASK1(1)
+#define M_SYNCSER_SEQ_LAST                _SB_MAKEMASK1(0)
+#define M_SYNCSER_SEQ_BYTE                _SB_MAKEMASK1(1)
 
-#define S_SYNCSER_SEQ_COUNT                2
-#define M_SYNCSER_SEQ_COUNT                _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
-#define V_SYNCSER_SEQ_COUNT(x)             _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
+#define S_SYNCSER_SEQ_COUNT               2
+#define M_SYNCSER_SEQ_COUNT               _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
+#define V_SYNCSER_SEQ_COUNT(x)            _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
 
-#define M_SYNCSER_SEQ_ENABLE               _SB_MAKEMASK1(6)
-#define M_SYNCSER_SEQ_STROBE               _SB_MAKEMASK1(7)
+#define M_SYNCSER_SEQ_ENABLE              _SB_MAKEMASK1(6)
+#define M_SYNCSER_SEQ_STROBE              _SB_MAKEMASK1(7)
 
 #endif
index bb99eca..a43dc19 100644 (file)
  * Register: DUART_MODE_REG_1_B
  */
 
-#define S_DUART_BITS_PER_CHAR       0
-#define M_DUART_BITS_PER_CHAR       _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
+#define S_DUART_BITS_PER_CHAR      0
+#define M_DUART_BITS_PER_CHAR      _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
 #define V_DUART_BITS_PER_CHAR(x)    _SB_MAKEVALUE(x, S_DUART_BITS_PER_CHAR)
 
 #define K_DUART_BITS_PER_CHAR_RSV0  0
 #define K_DUART_BITS_PER_CHAR_RSV1  1
-#define K_DUART_BITS_PER_CHAR_7     2
-#define K_DUART_BITS_PER_CHAR_8     3
+#define K_DUART_BITS_PER_CHAR_7            2
+#define K_DUART_BITS_PER_CHAR_8            3
 
 #define V_DUART_BITS_PER_CHAR_RSV0  V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV0)
 #define V_DUART_BITS_PER_CHAR_RSV1  V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV1)
-#define V_DUART_BITS_PER_CHAR_7     V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
-#define V_DUART_BITS_PER_CHAR_8     V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
+#define V_DUART_BITS_PER_CHAR_7            V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
+#define V_DUART_BITS_PER_CHAR_8            V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
 
 
 #define M_DUART_PARITY_TYPE_EVEN    0x00
-#define M_DUART_PARITY_TYPE_ODD     _SB_MAKEMASK1(2)
+#define M_DUART_PARITY_TYPE_ODD            _SB_MAKEMASK1(2)
 
-#define S_DUART_PARITY_MODE          3
-#define M_DUART_PARITY_MODE         _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
-#define V_DUART_PARITY_MODE(x)      _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
+#define S_DUART_PARITY_MODE         3
+#define M_DUART_PARITY_MODE        _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
+#define V_DUART_PARITY_MODE(x)     _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
 
-#define K_DUART_PARITY_MODE_ADD       0
+#define K_DUART_PARITY_MODE_ADD              0
 #define K_DUART_PARITY_MODE_ADD_FIXED 1
 #define K_DUART_PARITY_MODE_NONE      2
 
-#define V_DUART_PARITY_MODE_ADD       V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
+#define V_DUART_PARITY_MODE_ADD              V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
 #define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
 #define V_DUART_PARITY_MODE_NONE      V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
 
@@ -81,7 +81,7 @@
 #define M_DUART_RX_IRQ_SEL_RXRDY    0
 #define M_DUART_RX_IRQ_SEL_RXFULL   _SB_MAKEMASK1(6)
 
-#define M_DUART_RX_RTS_ENA          _SB_MAKEMASK1(7)
+#define M_DUART_RX_RTS_ENA         _SB_MAKEMASK1(7)
 
 /*
  * DUART Mode Register #2 (Table 10-4)
  * Register: DUART_MODE_REG_2_B
  */
 
-#define M_DUART_MODE_RESERVED1      _SB_MAKEMASK(3, 0)   /* ignored */
+#define M_DUART_MODE_RESERVED1     _SB_MAKEMASK(3, 0)   /* ignored */
 
-#define M_DUART_STOP_BIT_LEN_2      _SB_MAKEMASK1(3)
-#define M_DUART_STOP_BIT_LEN_1      0
+#define M_DUART_STOP_BIT_LEN_2     _SB_MAKEMASK1(3)
+#define M_DUART_STOP_BIT_LEN_1     0
 
-#define M_DUART_TX_CTS_ENA          _SB_MAKEMASK1(4)
+#define M_DUART_TX_CTS_ENA         _SB_MAKEMASK1(4)
 
 
-#define M_DUART_MODE_RESERVED2      _SB_MAKEMASK1(5)    /* must be zero */
+#define M_DUART_MODE_RESERVED2     _SB_MAKEMASK1(5)    /* must be zero */
 
 #define S_DUART_CHAN_MODE          6
-#define M_DUART_CHAN_MODE           _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
+#define M_DUART_CHAN_MODE          _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
 #define V_DUART_CHAN_MODE(x)       _SB_MAKEVALUE(x, S_DUART_CHAN_MODE)
 
 #define K_DUART_CHAN_MODE_NORMAL    0
  * Register: DUART_CMD_B
  */
 
-#define M_DUART_RX_EN               _SB_MAKEMASK1(0)
-#define M_DUART_RX_DIS              _SB_MAKEMASK1(1)
-#define M_DUART_TX_EN               _SB_MAKEMASK1(2)
-#define M_DUART_TX_DIS              _SB_MAKEMASK1(3)
+#define M_DUART_RX_EN              _SB_MAKEMASK1(0)
+#define M_DUART_RX_DIS             _SB_MAKEMASK1(1)
+#define M_DUART_TX_EN              _SB_MAKEMASK1(2)
+#define M_DUART_TX_DIS             _SB_MAKEMASK1(3)
 
 #define S_DUART_MISC_CMD           4
-#define M_DUART_MISC_CMD            _SB_MAKEMASK(3, S_DUART_MISC_CMD)
-#define V_DUART_MISC_CMD(x)         _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
-
-#define K_DUART_MISC_CMD_NOACTION0       0
-#define K_DUART_MISC_CMD_NOACTION1       1
-#define K_DUART_MISC_CMD_RESET_RX        2
-#define K_DUART_MISC_CMD_RESET_TX        3
-#define K_DUART_MISC_CMD_NOACTION4       4
+#define M_DUART_MISC_CMD           _SB_MAKEMASK(3, S_DUART_MISC_CMD)
+#define V_DUART_MISC_CMD(x)        _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
+
+#define K_DUART_MISC_CMD_NOACTION0      0
+#define K_DUART_MISC_CMD_NOACTION1      1
+#define K_DUART_MISC_CMD_RESET_RX       2
+#define K_DUART_MISC_CMD_RESET_TX       3
+#define K_DUART_MISC_CMD_NOACTION4      4
 #define K_DUART_MISC_CMD_RESET_BREAK_INT 5
-#define K_DUART_MISC_CMD_START_BREAK     6
-#define K_DUART_MISC_CMD_STOP_BREAK      7
-
-#define V_DUART_MISC_CMD_NOACTION0       V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
-#define V_DUART_MISC_CMD_NOACTION1       V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
-#define V_DUART_MISC_CMD_RESET_RX        V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
-#define V_DUART_MISC_CMD_RESET_TX        V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
-#define V_DUART_MISC_CMD_NOACTION4       V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
+#define K_DUART_MISC_CMD_START_BREAK    6
+#define K_DUART_MISC_CMD_STOP_BREAK     7
+
+#define V_DUART_MISC_CMD_NOACTION0      V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
+#define V_DUART_MISC_CMD_NOACTION1      V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
+#define V_DUART_MISC_CMD_RESET_RX       V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
+#define V_DUART_MISC_CMD_RESET_TX       V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
+#define V_DUART_MISC_CMD_NOACTION4      V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
 #define V_DUART_MISC_CMD_RESET_BREAK_INT V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_BREAK_INT)
-#define V_DUART_MISC_CMD_START_BREAK     V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
-#define V_DUART_MISC_CMD_STOP_BREAK      V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
+#define V_DUART_MISC_CMD_START_BREAK    V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
+#define V_DUART_MISC_CMD_STOP_BREAK     V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
 
-#define M_DUART_CMD_RESERVED             _SB_MAKEMASK1(7)
+#define M_DUART_CMD_RESERVED            _SB_MAKEMASK1(7)
 
 /*
  * DUART Status Register (Table 10-6)
  * READ-ONLY
  */
 
-#define M_DUART_RX_RDY              _SB_MAKEMASK1(0)
-#define M_DUART_RX_FFUL             _SB_MAKEMASK1(1)
-#define M_DUART_TX_RDY              _SB_MAKEMASK1(2)
-#define M_DUART_TX_EMT              _SB_MAKEMASK1(3)
-#define M_DUART_OVRUN_ERR           _SB_MAKEMASK1(4)
-#define M_DUART_PARITY_ERR          _SB_MAKEMASK1(5)
-#define M_DUART_FRM_ERR             _SB_MAKEMASK1(6)
-#define M_DUART_RCVD_BRK            _SB_MAKEMASK1(7)
+#define M_DUART_RX_RDY             _SB_MAKEMASK1(0)
+#define M_DUART_RX_FFUL                    _SB_MAKEMASK1(1)
+#define M_DUART_TX_RDY             _SB_MAKEMASK1(2)
+#define M_DUART_TX_EMT             _SB_MAKEMASK1(3)
+#define M_DUART_OVRUN_ERR          _SB_MAKEMASK1(4)
+#define M_DUART_PARITY_ERR         _SB_MAKEMASK1(5)
+#define M_DUART_FRM_ERR                    _SB_MAKEMASK1(6)
+#define M_DUART_RCVD_BRK           _SB_MAKEMASK1(7)
 
 /*
  * DUART Baud Rate Register (Table 10-7)
  * Register: DUART_CLK_SEL_B
  */
 
-#define M_DUART_CLK_COUNTER         _SB_MAKEMASK(12, 0)
-#define V_DUART_BAUD_RATE(x)        (100000000/((x)*20)-1)
+#define M_DUART_CLK_COUNTER        _SB_MAKEMASK(12, 0)
+#define V_DUART_BAUD_RATE(x)       (100000000/((x)*20)-1)
 
 /*
  * DUART Data Registers (Table 10-8 and 10-9)
  * Register: DUART_TX_HOLD_B
  */
 
-#define M_DUART_RX_DATA             _SB_MAKEMASK(8, 0)
-#define M_DUART_TX_DATA             _SB_MAKEMASK(8, 0)
+#define M_DUART_RX_DATA                    _SB_MAKEMASK(8, 0)
+#define M_DUART_TX_DATA                    _SB_MAKEMASK(8, 0)
 
 /*
  * DUART Input Port Register (Table 10-10)
  * Register: DUART_IN_PORT
  */
 
-#define M_DUART_IN_PIN0_VAL         _SB_MAKEMASK1(0)
-#define M_DUART_IN_PIN1_VAL         _SB_MAKEMASK1(1)
-#define M_DUART_IN_PIN2_VAL         _SB_MAKEMASK1(2)
-#define M_DUART_IN_PIN3_VAL         _SB_MAKEMASK1(3)
-#define M_DUART_IN_PIN4_VAL         _SB_MAKEMASK1(4)
-#define M_DUART_IN_PIN5_VAL         _SB_MAKEMASK1(5)
-#define M_DUART_RIN0_PIN            _SB_MAKEMASK1(6)
-#define M_DUART_RIN1_PIN            _SB_MAKEMASK1(7)
+#define M_DUART_IN_PIN0_VAL        _SB_MAKEMASK1(0)
+#define M_DUART_IN_PIN1_VAL        _SB_MAKEMASK1(1)
+#define M_DUART_IN_PIN2_VAL        _SB_MAKEMASK1(2)
+#define M_DUART_IN_PIN3_VAL        _SB_MAKEMASK1(3)
+#define M_DUART_IN_PIN4_VAL        _SB_MAKEMASK1(4)
+#define M_DUART_IN_PIN5_VAL        _SB_MAKEMASK1(5)
+#define M_DUART_RIN0_PIN           _SB_MAKEMASK1(6)
+#define M_DUART_RIN1_PIN           _SB_MAKEMASK1(7)
 
 /*
  * DUART Input Port Change Status Register (Tables 10-11, 10-12, and 10-13)
  * Register: DUART_INPORT_CHNG
  */
 
-#define S_DUART_IN_PIN_VAL          0
-#define M_DUART_IN_PIN_VAL          _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
+#define S_DUART_IN_PIN_VAL         0
+#define M_DUART_IN_PIN_VAL         _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
 
-#define S_DUART_IN_PIN_CHNG         4
-#define M_DUART_IN_PIN_CHNG         _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
+#define S_DUART_IN_PIN_CHNG        4
+#define M_DUART_IN_PIN_CHNG        _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
 
 
 /*
  * Register: DUART_OPCR
  */
 
-#define M_DUART_OPCR_RESERVED0      _SB_MAKEMASK1(0)   /* must be zero */
-#define M_DUART_OPC2_SEL            _SB_MAKEMASK1(1)
-#define M_DUART_OPCR_RESERVED1      _SB_MAKEMASK1(2)   /* must be zero */
-#define M_DUART_OPC3_SEL            _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED2      _SB_MAKEMASK(4, 4)  /* must be zero */
+#define M_DUART_OPCR_RESERVED0     _SB_MAKEMASK1(0)   /* must be zero */
+#define M_DUART_OPC2_SEL           _SB_MAKEMASK1(1)
+#define M_DUART_OPCR_RESERVED1     _SB_MAKEMASK1(2)   /* must be zero */
+#define M_DUART_OPC3_SEL           _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED2     _SB_MAKEMASK(4, 4)  /* must be zero */
 
 /*
  * DUART Aux Control Register (Table 10-15)
  * Register: DUART_AUX_CTRL
  */
 
-#define M_DUART_IP0_CHNG_ENA        _SB_MAKEMASK1(0)
-#define M_DUART_IP1_CHNG_ENA        _SB_MAKEMASK1(1)
-#define M_DUART_IP2_CHNG_ENA        _SB_MAKEMASK1(2)
-#define M_DUART_IP3_CHNG_ENA        _SB_MAKEMASK1(3)
-#define M_DUART_ACR_RESERVED        _SB_MAKEMASK(4, 4)
+#define M_DUART_IP0_CHNG_ENA       _SB_MAKEMASK1(0)
+#define M_DUART_IP1_CHNG_ENA       _SB_MAKEMASK1(1)
+#define M_DUART_IP2_CHNG_ENA       _SB_MAKEMASK1(2)
+#define M_DUART_IP3_CHNG_ENA       _SB_MAKEMASK1(3)
+#define M_DUART_ACR_RESERVED       _SB_MAKEMASK(4, 4)
 
-#define M_DUART_CTS_CHNG_ENA        _SB_MAKEMASK1(0)
-#define M_DUART_CIN_CHNG_ENA        _SB_MAKEMASK1(2)
+#define M_DUART_CTS_CHNG_ENA       _SB_MAKEMASK1(0)
+#define M_DUART_CIN_CHNG_ENA       _SB_MAKEMASK1(2)
 
 /*
  * DUART Interrupt Status Register (Table 10-16)
  * Register: DUART_ISR
  */
 
-#define M_DUART_ISR_TX_A            _SB_MAKEMASK1(0)
+#define M_DUART_ISR_TX_A           _SB_MAKEMASK1(0)
 
-#define S_DUART_ISR_RX_A            1
-#define M_DUART_ISR_RX_A            _SB_MAKEMASK1(S_DUART_ISR_RX_A)
-#define V_DUART_ISR_RX_A(x)         _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
-#define G_DUART_ISR_RX_A(x)         _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
+#define S_DUART_ISR_RX_A           1
+#define M_DUART_ISR_RX_A           _SB_MAKEMASK1(S_DUART_ISR_RX_A)
+#define V_DUART_ISR_RX_A(x)        _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
+#define G_DUART_ISR_RX_A(x)        _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
 
-#define M_DUART_ISR_BRK_A           _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN_A            _SB_MAKEMASK1(3)
+#define M_DUART_ISR_BRK_A          _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN_A           _SB_MAKEMASK1(3)
 #define M_DUART_ISR_ALL_A          _SB_MAKEMASK(4, 0)
 
-#define M_DUART_ISR_TX_B            _SB_MAKEMASK1(4)
-#define M_DUART_ISR_RX_B            _SB_MAKEMASK1(5)
-#define M_DUART_ISR_BRK_B           _SB_MAKEMASK1(6)
-#define M_DUART_ISR_IN_B            _SB_MAKEMASK1(7)
+#define M_DUART_ISR_TX_B           _SB_MAKEMASK1(4)
+#define M_DUART_ISR_RX_B           _SB_MAKEMASK1(5)
+#define M_DUART_ISR_BRK_B          _SB_MAKEMASK1(6)
+#define M_DUART_ISR_IN_B           _SB_MAKEMASK1(7)
 #define M_DUART_ISR_ALL_B          _SB_MAKEMASK(4, 4)
 
 /*
  * Register: DUART_ISR_B
  */
 
-#define M_DUART_ISR_TX              _SB_MAKEMASK1(0)
-#define M_DUART_ISR_RX              _SB_MAKEMASK1(1)
-#define M_DUART_ISR_BRK             _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN              _SB_MAKEMASK1(3)
+#define M_DUART_ISR_TX             _SB_MAKEMASK1(0)
+#define M_DUART_ISR_RX             _SB_MAKEMASK1(1)
+#define M_DUART_ISR_BRK                    _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN             _SB_MAKEMASK1(3)
 #define M_DUART_ISR_ALL                    _SB_MAKEMASK(4, 0)
-#define M_DUART_ISR_RESERVED        _SB_MAKEMASK(4, 4)
+#define M_DUART_ISR_RESERVED       _SB_MAKEMASK(4, 4)
 
 /*
  * DUART Interrupt Mask Register (Table 10-19)
  * Register: DUART_IMR
  */
 
-#define M_DUART_IMR_TX_A            _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX_A            _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK_A           _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN_A            _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX_A           _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX_A           _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK_A          _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN_A           _SB_MAKEMASK1(3)
 #define M_DUART_IMR_ALL_A          _SB_MAKEMASK(4, 0)
 
-#define M_DUART_IMR_TX_B            _SB_MAKEMASK1(4)
-#define M_DUART_IMR_RX_B            _SB_MAKEMASK1(5)
-#define M_DUART_IMR_BRK_B           _SB_MAKEMASK1(6)
-#define M_DUART_IMR_IN_B            _SB_MAKEMASK1(7)
-#define M_DUART_IMR_ALL_B           _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_TX_B           _SB_MAKEMASK1(4)
+#define M_DUART_IMR_RX_B           _SB_MAKEMASK1(5)
+#define M_DUART_IMR_BRK_B          _SB_MAKEMASK1(6)
+#define M_DUART_IMR_IN_B           _SB_MAKEMASK1(7)
+#define M_DUART_IMR_ALL_B          _SB_MAKEMASK(4, 4)
 
 /*
  * DUART Channel A Interrupt Mask Register (Table 10-20)
  * Register: DUART_IMR_B
  */
 
-#define M_DUART_IMR_TX              _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX              _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK             _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN              _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX             _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX             _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK                    _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN             _SB_MAKEMASK1(3)
 #define M_DUART_IMR_ALL                    _SB_MAKEMASK(4, 0)
-#define M_DUART_IMR_RESERVED        _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_RESERVED       _SB_MAKEMASK(4, 4)
 
 
 /*
  * Register: DUART_SET_OPR
  */
 
-#define M_DUART_SET_OPR0            _SB_MAKEMASK1(0)
-#define M_DUART_SET_OPR1            _SB_MAKEMASK1(1)
-#define M_DUART_SET_OPR2            _SB_MAKEMASK1(2)
-#define M_DUART_SET_OPR3            _SB_MAKEMASK1(3)
-#define M_DUART_OPSR_RESERVED       _SB_MAKEMASK(4, 4)
+#define M_DUART_SET_OPR0           _SB_MAKEMASK1(0)
+#define M_DUART_SET_OPR1           _SB_MAKEMASK1(1)
+#define M_DUART_SET_OPR2           _SB_MAKEMASK1(2)
+#define M_DUART_SET_OPR3           _SB_MAKEMASK1(3)
+#define M_DUART_OPSR_RESERVED      _SB_MAKEMASK(4, 4)
 
 /*
  * DUART Output Port Clear Register (Table 10-23)
  * Register: DUART_CLEAR_OPR
  */
 
-#define M_DUART_CLR_OPR0            _SB_MAKEMASK1(0)
-#define M_DUART_CLR_OPR1            _SB_MAKEMASK1(1)
-#define M_DUART_CLR_OPR2            _SB_MAKEMASK1(2)
-#define M_DUART_CLR_OPR3            _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED       _SB_MAKEMASK(4, 4)
+#define M_DUART_CLR_OPR0           _SB_MAKEMASK1(0)
+#define M_DUART_CLR_OPR1           _SB_MAKEMASK1(1)
+#define M_DUART_CLR_OPR2           _SB_MAKEMASK1(2)
+#define M_DUART_CLR_OPR3           _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED      _SB_MAKEMASK(4, 4)
 
 /*
  * DUART Output Port RTS Register (Table 10-24)
  * Register: DUART_OUT_PORT
  */
 
-#define M_DUART_OUT_PIN_SET0        _SB_MAKEMASK1(0)
-#define M_DUART_OUT_PIN_SET1        _SB_MAKEMASK1(1)
-#define M_DUART_OUT_PIN_CLR0        _SB_MAKEMASK1(2)
-#define M_DUART_OUT_PIN_CLR1        _SB_MAKEMASK1(3)
-#define M_DUART_OPRR_RESERVED       _SB_MAKEMASK(4, 4)
+#define M_DUART_OUT_PIN_SET0       _SB_MAKEMASK1(0)
+#define M_DUART_OUT_PIN_SET1       _SB_MAKEMASK1(1)
+#define M_DUART_OUT_PIN_CLR0       _SB_MAKEMASK1(2)
+#define M_DUART_OUT_PIN_CLR1       _SB_MAKEMASK1(3)
+#define M_DUART_OPRR_RESERVED      _SB_MAKEMASK(4, 4)
 
 #define M_DUART_OUT_PIN_SET(chan) \
     (chan == 0 ? M_DUART_OUT_PIN_SET0 : M_DUART_OUT_PIN_SET1)
  * Full Interrupt Control Register
  */
 
-#define S_DUART_SIG_FULL           _SB_MAKE64(0)
-#define M_DUART_SIG_FULL           _SB_MAKEMASK(4, S_DUART_SIG_FULL)
-#define V_DUART_SIG_FULL(x)        _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
-#define G_DUART_SIG_FULL(x)        _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
+#define S_DUART_SIG_FULL          _SB_MAKE64(0)
+#define M_DUART_SIG_FULL          _SB_MAKEMASK(4, S_DUART_SIG_FULL)
+#define V_DUART_SIG_FULL(x)       _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
+#define G_DUART_SIG_FULL(x)       _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
 
-#define S_DUART_INT_TIME           _SB_MAKE64(4)
-#define M_DUART_INT_TIME           _SB_MAKEMASK(4, S_DUART_INT_TIME)
-#define V_DUART_INT_TIME(x)        _SB_MAKEVALUE(x, S_DUART_INT_TIME)
-#define G_DUART_INT_TIME(x)        _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
+#define S_DUART_INT_TIME          _SB_MAKE64(4)
+#define M_DUART_INT_TIME          _SB_MAKEMASK(4, S_DUART_INT_TIME)
+#define V_DUART_INT_TIME(x)       _SB_MAKEVALUE(x, S_DUART_INT_TIME)
+#define G_DUART_INT_TIME(x)       _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
 #endif /* 1250 PASS2 || 112x PASS1 || 1480 */
 
 
index 64c4787..0351a46 100644 (file)
 
 /* Generic bus chip selects */
 #ifdef CONFIG_SIBYTE_RHONE
-#define LEDS_CS         6
-#define LEDS_PHYS       0x1d0a0000
+#define LEDS_CS                6
+#define LEDS_PHYS      0x1d0a0000
 #endif
 
 /* GPIOs */
-#define K_GPIO_DBG_LED  0
+#define K_GPIO_DBG_LED 0
 
 #endif /* __ASM_SIBYTE_SENTOSA_H */
index 114d9d2..187cfb1 100644 (file)
 #ifdef CONFIG_SIBYTE_SWARM
 #define SIBYTE_BOARD_NAME "BCM91250A (SWARM)"
 #define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE    1
+#define SIBYTE_HAVE_IDE           1
 #endif
 #ifdef CONFIG_SIBYTE_LITTLESUR
 #define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)"
 #define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE    1
+#define SIBYTE_HAVE_IDE           1
 #define SIBYTE_DEFAULT_CONSOLE "cfe0"
 #endif
 #ifdef CONFIG_SIBYTE_CRHONE
 #define SIBYTE_BOARD_NAME "BCM91125C (CRhone)"
 #define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE    0
+#define SIBYTE_HAVE_IDE           0
 #endif
 #ifdef CONFIG_SIBYTE_CRHINE
 #define SIBYTE_BOARD_NAME "BCM91120C (CRhine)"
 #define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE    0
+#define SIBYTE_HAVE_IDE           0
 #endif
 
 /* Generic bus chip selects */
-#define LEDS_CS         3
-#define LEDS_PHYS       0x100a0000
+#define LEDS_CS                3
+#define LEDS_PHYS      0x100a0000
 
 #ifdef SIBYTE_HAVE_IDE
-#define IDE_CS          4
-#define IDE_PHYS        0x100b0000
-#define K_GPIO_GB_IDE   4
-#define K_INT_GB_IDE    (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS         4
+#define IDE_PHYS       0x100b0000
+#define K_GPIO_GB_IDE  4
+#define K_INT_GB_IDE   (K_INT_GPIO_0 + K_GPIO_GB_IDE)
 #endif
 
 #ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS       6
-#define PCMCIA_PHYS     0x11000000
+#define PCMCIA_CS      6
+#define PCMCIA_PHYS    0x11000000
 #define K_GPIO_PC_READY 9
-#define K_INT_PC_READY  (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
 #endif
 
 #endif /* __ASM_SIBYTE_SWARM_H */
index f33b5fd..eb60087 100644 (file)
@@ -26,7 +26,7 @@ extern cpumask_t cpu_sibling_map[];
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 /* Map from cpu id to sequential logical cpu number.  This will only
-   not be idempotent when cpus failed to come on-line.  */
+   not be idempotent when cpus failed to come on-line. */
 extern int __cpu_number_map[NR_CPUS];
 #define cpu_number_map(cpu)  __cpu_number_map[cpu]
 
@@ -36,7 +36,7 @@ extern int __cpu_logical_map[NR_CPUS];
 
 #define NO_PROC_ID     (-1)
 
-#define SMP_RESCHEDULE_YOURSELF        0x1     /* XXX braindead */
+#define SMP_RESCHEDULE_YOURSELF 0x1    /* XXX braindead */
 #define SMP_CALL_FUNCTION      0x2
 /* Octeon - Tell another core to flush its icache */
 #define SMP_ICACHE_FLUSH       0x4
@@ -62,14 +62,14 @@ static inline void smp_send_reschedule(int cpu)
 #ifdef CONFIG_HOTPLUG_CPU
 static inline int __cpu_disable(void)
 {
-       extern struct plat_smp_ops *mp_ops;     /* private */
+       extern struct plat_smp_ops *mp_ops;     /* private */
 
        return mp_ops->cpu_disable();
 }
 
 static inline void __cpu_die(unsigned int cpu)
 {
-       extern struct plat_smp_ops *mp_ops;     /* private */
+       extern struct plat_smp_ops *mp_ops;     /* private */
 
        mp_ops->cpu_die(cpu);
 }
@@ -81,14 +81,14 @@ extern asmlinkage void smp_call_function_interrupt(void);
 
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
-       extern struct plat_smp_ops *mp_ops;     /* private */
+       extern struct plat_smp_ops *mp_ops;     /* private */
 
        mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
 }
 
 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       extern struct plat_smp_ops *mp_ops;     /* private */
+       extern struct plat_smp_ops *mp_ops;     /* private */
 
        mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
 }
index 8935426..e56b439 100644 (file)
@@ -14,8 +14,8 @@
 
 extern unsigned int smtc_status;
 
-#define SMTC_TLB_SHARED        0x00000001
-#define SMTC_MTC_ACTIVE        0x00000002
+#define SMTC_TLB_SHARED 0x00000001
+#define SMTC_MTC_ACTIVE 0x00000002
 
 /*
  * TLB/ASID Management information
index 2367b56..66814f8 100644 (file)
@@ -88,8 +88,8 @@
 
 #define SWIN_SIZE_BITS         24
 #define SWIN_SIZE              (UINT64_CAST 1 << 24)
-#define        SWIN_SIZEMASK           (SWIN_SIZE - 1)
-#define        SWIN_WIDGET_MASK        0xF
+#define SWIN_SIZEMASK          (SWIN_SIZE - 1)
+#define SWIN_WIDGET_MASK       0xF
 
 /*
  * Convert smallwindow address to xtalk address.
@@ -97,8 +97,8 @@
  * 'addr' can be physical or virtual address, but will be converted
  * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
  */
-#define        SWIN_WIDGETADDR(addr)   ((addr) & SWIN_SIZEMASK)
-#define        SWIN_WIDGETNUM(addr)    (((addr)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define SWIN_WIDGETADDR(addr)  ((addr) & SWIN_SIZEMASK)
+#define SWIN_WIDGETNUM(addr)   (((addr)  >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
 /*
  * Verify if addr belongs to small window address on node with "nasid"
  *
  *
  *
  */
-#define        NODE_SWIN_ADDR(nasid, addr)     \
+#define NODE_SWIN_ADDR(nasid, addr)    \
                (((addr) >= NODE_SWIN_BASE(nasid, 0))  && \
                 ((addr) <  (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
                 ))
 
 #endif
 
-#define        HUB_REGISTER_WIDGET     1
+#define HUB_REGISTER_WIDGET    1
 #define IALIAS_BASE            NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
 #define IALIAS_SIZE            0x800000        /* 8 Megabytes */
 #define IS_IALIAS(_a)          (((_a) >= IALIAS_BASE) &&               \
  *   WARNING: They won't work in assembler.
  *
  *   BDDIR_ENTRY_LO returns the address of the low double-word of the dir
- *                  entry corresponding to a physical (Cac or Uncac) address.
+ *                 entry corresponding to a physical (Cac or Uncac) address.
  *   BDDIR_ENTRY_HI returns the address of the high double-word of the entry.
  *   BDPRT_ENTRY    returns the address of the double-word protection entry
- *                  corresponding to the page containing the physical address.
+ *                 corresponding to the page containing the physical address.
  *   BDPRT_ENTRY_S  Stores the value into the protection entry.
  *   BDPRT_ENTRY_L  Load the value from the protection entry.
  *   BDECC_ENTRY    returns the address of the ECC byte corresponding to a
- *                  double-word at a specified physical address.
+ *                 double-word at a specified physical address.
  *   BDECC_ENTRY_H  returns the address of the two ECC bytes corresponding to a
- *                  quad-word at a specified physical address.
+ *                 quad-word at a specified physical address.
  */
 #define NODE_BDOOR_BASE(_n)    (NODE_HSPEC_BASE(_n) + (NODE_ADDRSPACE_SIZE/2))
 
 #define BDADDR_IS_DIR(_ba)     ((UINT64_CAST  (_ba) & 0x200) != 0)
 #define BDADDR_IS_PRT(_ba)     ((UINT64_CAST  (_ba) & 0x200) == 0)
 
-#define BDDIR_TO_MEM(_ba)      (UINT64_CAST (_ba) & NASID_MASK            | \
+#define BDDIR_TO_MEM(_ba)      (UINT64_CAST (_ba) & NASID_MASK            | \
                                 (UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2  | \
                                 (UINT64_CAST(_ba) & 0x1f << 4) << 3)
 
-#define BDPRT_TO_MEM(_ba)      (UINT64_CAST (_ba) & NASID_MASK     | \
+#define BDPRT_TO_MEM(_ba)      (UINT64_CAST (_ba) & NASID_MASK     | \
                                 (UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2)
 
 #define BDECC_TO_MEM(_ba)      (UINT64_CAST (_ba) & NASID_MASK     | \
 /*
  * WARNING:
  *     When certain Hub chip workaround are defined, it's not sufficient
- *     to dereference the *_HUB_ADDR() macros.  You should instead use
+ *     to dereference the *_HUB_ADDR() macros.  You should instead use
  *     HUB_L() and HUB_S() if you must deal with pointers to hub registers.
  *     Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
  *     They're always safe.
  */
 #define LOCAL_HUB_ADDR(_x)     (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x)        (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +   \
+#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +  \
                                              0x800000 + (_x)))
 #ifdef CONFIG_SGI_IP27
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x)        (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +   \
+#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) +  \
                                              0x800000 + (_x)))
 #endif /* CONFIG_SGI_IP27 */
 
 #ifndef __ASSEMBLY__
 
 #define HUB_L(_a)                      *(_a)
-#define        HUB_S(_a, _d)                   *(_a) = (_d)
+#define HUB_S(_a, _d)                  *(_a) = (_d)
 
 #define LOCAL_HUB_L(_r)                        HUB_L(LOCAL_HUB_ADDR(_r))
 #define LOCAL_HUB_S(_r, _d)            HUB_S(LOCAL_HUB_ADDR(_r), (_d))
 
 #define KLI_LAUNCH             0               /* Dir. entries */
 #define KLI_KLCONFIG           1
-#define        KLI_NMI                 2
+#define KLI_NMI                        2
 #define KLI_GDA                        3
 #define KLI_FREEMEM            4
-#define        KLI_SYMMON_STK          5
+#define KLI_SYMMON_STK         5
 #define KLI_PI_ERROR           6
 #define KLI_KERN_VARS          7
-#define        KLI_KERN_XP             8
-#define        KLI_KERN_PARTID         9
+#define KLI_KERN_XP            8
+#define KLI_KERN_PARTID                9
 
 #ifndef __ASSEMBLY__
 
 #define KLD_SYMMON_STK(nasid)  (KLD_BASE(nasid) + KLI_SYMMON_STK)
 #define KLD_FREEMEM(nasid)     (KLD_BASE(nasid) + KLI_FREEMEM)
 #define KLD_KERN_VARS(nasid)   (KLD_BASE(nasid) + KLI_KERN_VARS)
-#define        KLD_KERN_XP(nasid)      (KLD_BASE(nasid) + KLI_KERN_XP)
-#define        KLD_KERN_PARTID(nasid)  (KLD_BASE(nasid) + KLI_KERN_PARTID)
+#define KLD_KERN_XP(nasid)     (KLD_BASE(nasid) + KLI_KERN_XP)
+#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
 
 #define LAUNCH_OFFSET(nasid, slice)                                    \
        (KLD_LAUNCH(nasid)->offset +                                    \
         KLD_NMI(nasid)->stride * (slice))
 #define NMI_ADDR(nasid, slice)                                         \
        TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice))
-#define NMI_SIZE(nasid)        KLD_NMI(nasid)->size
+#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
 
 #define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset
 #define KLCONFIG_ADDR(nasid)                                           \
 /* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
  * relocatable program
  */
-#define        UNIX_DEBUG_LOADADDR     0x300000
-#define        SYMMON_LOADADDR(nasid)                                          \
+#define UNIX_DEBUG_LOADADDR    0x300000
+#define SYMMON_LOADADDR(nasid)                                         \
        TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
 
 #define FREEMEM_OFFSET(nasid)  KLD_FREEMEM(nasid)->offset
 #define KERN_VARS_ADDR(nasid)  KLD_KERN_VARS(nasid)->pointer
 #define KERN_VARS_SIZE(nasid)  KLD_KERN_VARS(nasid)->size
 
-#define        KERN_XP_ADDR(nasid)     KLD_KERN_XP(nasid)->pointer
-#define        KERN_XP_SIZE(nasid)     KLD_KERN_XP(nasid)->size
+#define KERN_XP_ADDR(nasid)    KLD_KERN_XP(nasid)->pointer
+#define KERN_XP_SIZE(nasid)    KLD_KERN_XP(nasid)->size
 
 #define GPDA_ADDR(nasid)       TO_NODE_CAC(nasid, GPDA_OFFSET)
 
index dc81114..e33d092 100644 (file)
  */
 
 #if defined(CONFIG_SGI_IP27)
-#define HUB_NIC_ADDR(_cpuid)                                              \
-       REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)),       \
+#define HUB_NIC_ADDR(_cpuid)                                              \
+       REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)),       \
                MD_MLAN_CTL)
 #endif
 
-#define SET_HUB_NIC(_my_cpuid, _val)                                      \
+#define SET_HUB_NIC(_my_cpuid, _val)                                      \
        (HUB_S(HUB_NIC_ADDR(_my_cpuid), (_val)))
 
-#define SET_MY_HUB_NIC(_v)                                                \
+#define SET_MY_HUB_NIC(_v)                                                \
        SET_HUB_NIC(cpuid(), (_v))
 
-#define GET_HUB_NIC(_my_cpuid)                                                    \
+#define GET_HUB_NIC(_my_cpuid)                                            \
        (HUB_L(HUB_NIC_ADDR(_my_cpuid)))
 
-#define GET_MY_HUB_NIC()                                                  \
+#define GET_MY_HUB_NIC()                                                  \
        GET_HUB_NIC(cpuid())
 
 #endif /* _ASM_SGI_SN_AGENT_H */
index bd75945..471e687 100644 (file)
@@ -28,14 +28,14 @@ typedef u64 hubreg_t;
 #define INVALID_CNODEID                (cnodeid_t)-1
 #define INVALID_PNODEID                (pnodeid_t)-1
 #define INVALID_MODULE         (moduleid_t)-1
-#define        INVALID_PARTID          (partid_t)-1
+#define INVALID_PARTID         (partid_t)-1
 
 extern nasid_t get_nasid(void);
 extern cnodeid_t get_cpu_cnode(cpuid_t);
 extern int get_cpu_slice(cpuid_t);
 
 /*
- * NO ONE should access these arrays directly.  The only reason we refer to
+ * NO ONE should access these arrays directly. The only reason we refer to
  * them here is to avoid the procedure call that would be required in the
  * macros below.  (Really want private data members here :-)
  */
@@ -44,12 +44,12 @@ extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
 
 /*
  * These macros are used by various parts of the kernel to convert
- * between the three different kinds of node numbering.   At least some
+ * between the three different kinds of node numbering.          At least some
  * of them may change to procedure calls in the future, but the macros
  * will continue to work.  Don't use the arrays above directly.
  */
 
-#define        NASID_TO_REGION(nnode)          \
+#define NASID_TO_REGION(nnode)         \
     ((nnode) >> \
      (is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
 
index b3e3606..bbb8325 100644 (file)
@@ -21,24 +21,24 @@ typedef struct kf_mem_s {
                                     * is this necessary ?
                                     */
        confidence_t km_dimm[MAX_DIMMS];
-                                   /* confidence level that dimm[i] is bad
+                                   /* confidence level that dimm[i] is bad
                                     *I think this is the right number
                                     */
 
 } kf_mem_t;
 
 typedef struct kf_cpu_s {
-       confidence_t    kc_confidence; /* confidence level that cpu is bad */
-       confidence_t    kc_icache; /* confidence level that instr. cache is bad */
-       confidence_t    kc_dcache; /* confidence level that data   cache is bad */
-       confidence_t    kc_scache; /* confidence level that sec.   cache is bad */
+       confidence_t    kc_confidence; /* confidence level that cpu is bad */
+       confidence_t    kc_icache; /* confidence level that instr. cache is bad */
+       confidence_t    kc_dcache; /* confidence level that data   cache is bad */
+       confidence_t    kc_scache; /* confidence level that sec.   cache is bad */
        confidence_t    kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
 } kf_cpu_t;
 
 typedef struct kf_pci_bus_s {
        confidence_t    kpb_belief;     /* confidence level  that the  pci bus is bad */
        confidence_t    kpb_pcidev_belief[MAX_PCIDEV];
-                                       /* confidence level that the pci dev is bad */
+                                       /* confidence level that the pci dev is bad */
 } kf_pci_bus_t;
 
 #endif /* __ASM_SN_FRU_H */
index 9cb6ff7..85fa1b5 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
  *
  * gda.h -- Contains the data structure for the global data area,
- *     The GDA contains information communicated between the
+ *     The GDA contains information communicated between the
  *     PROM, SYMMON, and the kernel.
  */
 #ifndef _ASM_SN_GDA_H
@@ -23,8 +23,8 @@
  *
  * Version #   | Change
  * -------------+-------------------------------------------------------
- *     1       | Initial SN0 version
- *     2       | Prom sets g_partid field to the partition number. 0 IS
+ *     1       | Initial SN0 version
+ *     2       | Prom sets g_partid field to the partition number. 0 IS
  *             | a valid partition #.
  */
 
@@ -60,7 +60,7 @@ typedef struct gda {
                                /* Pointer to a mask of nodes with copies
                                 * of the kernel. */
        char    g_padding[56];  /* pad out to 128 bytes */
-       nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
+       nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
                                                  * indexed by cnodeid.
                                                  */
 } gda_t;
@@ -74,7 +74,7 @@ typedef struct gda {
  *             revisions assume GDA is NOT set up, and read partition
  *             information from the board info.
  */
-#define        PART_GDA_VERSION        2
+#define PART_GDA_VERSION       2
 
 /*
  * The following requests can be sent to the PROM during startup.
@@ -83,17 +83,17 @@ typedef struct gda {
 #define PROMOP_MAGIC           0x0ead0000
 #define PROMOP_MAGIC_MASK      0x0fff0000
 
-#define PROMOP_BIST_SHIFT       11
-#define PROMOP_BIST_MASK        (0x3 << 11)
+#define PROMOP_BIST_SHIFT      11
+#define PROMOP_BIST_MASK       (0x3 << 11)
 
 #define PROMOP_REG             PI_ERR_STACK_ADDR_A
 
 #define PROMOP_INVALID         (PROMOP_MAGIC | 0x00)
-#define PROMOP_HALT             (PROMOP_MAGIC | 0x10)
-#define PROMOP_POWERDOWN        (PROMOP_MAGIC | 0x20)
-#define PROMOP_RESTART          (PROMOP_MAGIC | 0x30)
-#define PROMOP_REBOOT           (PROMOP_MAGIC | 0x40)
-#define PROMOP_IMODE            (PROMOP_MAGIC | 0x50)
+#define PROMOP_HALT            (PROMOP_MAGIC | 0x10)
+#define PROMOP_POWERDOWN       (PROMOP_MAGIC | 0x20)
+#define PROMOP_RESTART         (PROMOP_MAGIC | 0x30)
+#define PROMOP_REBOOT          (PROMOP_MAGIC | 0x40)
+#define PROMOP_IMODE           (PROMOP_MAGIC | 0x50)
 
 #define PROMOP_CMD_MASK                0x00f0
 #define PROMOP_OPTIONS_MASK    0xfff0
index 6718b64..fc13481 100644 (file)
@@ -14,8 +14,8 @@
 #define INT_PEND0_BASELVL      0
 #define INT_PEND1_BASELVL      64
 
-#define        N_INTPENDJUNK_BITS      8
-#define        INTPENDJUNK_CLRBIT      0x80
+#define N_INTPENDJUNK_BITS     8
+#define INTPENDJUNK_CLRBIT     0x80
 
 /*
  * Macros to manipulate the interrupt register on the calling hub chip.
@@ -32,7 +32,7 @@
  * We do an uncached load of the int_pend0 register to ensure this.
  */
 
-#define LOCAL_HUB_CLR_INTR(level)                              \
+#define LOCAL_HUB_CLR_INTR(level)                              \
 do {                                                           \
        LOCAL_HUB_S(PI_INT_PEND_MOD, (level));                  \
        LOCAL_HUB_L(PI_INT_PEND0);                              \
@@ -40,7 +40,7 @@ do {                                                          \
 
 #define REMOTE_HUB_CLR_INTR(hub, level)                                \
 do {                                                           \
-       nasid_t  __hub = (hub);                                 \
+       nasid_t  __hub = (hub);                                 \
                                                                \
        REMOTE_HUB_S(__hub, PI_INT_PEND_MOD, (level));          \
        REMOTE_HUB_L(__hub, PI_INT_PEND0);                      \
@@ -102,8 +102,8 @@ do {                                                                \
 #define LLP_PFAIL_INTR_A       41      /* see ml/SN/SN0/sysctlr.c */
 #define LLP_PFAIL_INTR_B       42
 
-#define        TLB_INTR_A              43      /* used for tlb flush random */
-#define        TLB_INTR_B              44
+#define TLB_INTR_A             43      /* used for tlb flush random */
+#define TLB_INTR_B             44
 
 #define IP27_INTR_0            45      /* Reserved for PROM use */
 #define IP27_INTR_1            46      /* do not use in Kernel */
@@ -116,8 +116,8 @@ do {                                                                \
 
 #define BRIDGE_ERROR_INTR      53      /* Setup by PROM to catch       */
                                        /* Bridge Errors */
-#define        DEBUG_INTR_A            54
-#define        DEBUG_INTR_B            55      /* Used by symmon to stop all cpus */
+#define DEBUG_INTR_A           54
+#define DEBUG_INTR_B           55      /* Used by symmon to stop all cpus */
 #define IO_ERROR_INTR          57      /* Setup by PROM */
 #define CLK_ERR_INTR           58
 #define COR_ERR_INTR_A         59
index 24c6775..d5174d0 100644 (file)
@@ -31,7 +31,7 @@
 #define HUB_PIO_MAP_TO_MEM     0
 #define HUB_PIO_MAP_TO_IO      1
 
-#define IIO_ITTE_INVALID_WIDGET        3       /* an invalid widget  */
+#define IIO_ITTE_INVALID_WIDGET 3      /* an invalid widget  */
 
 #define IIO_ITTE_PUT(nasid, bigwin, io_or_mem, widget, addr) \
        REMOTE_HUB_S((nasid), IIO_ITTE(bigwin), \
@@ -52,7 +52,7 @@
  * value _x is expected to be a widget number in the range
  * 0, 8 - 0xF
  */
-#define        IIO_IOPRB(_x)   (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+#define IIO_IOPRB(_x)  (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
                        (_x) : \
                        (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
 
index 0996777..e33f036 100644 (file)
@@ -62,8 +62,8 @@ struct ioc3_sioregs {
 
        volatile u8             fill3[0x170 - 0x169 - 1];
 
-       struct ioc3_uartregs    uartb;  /* 0x20170  */
-       struct ioc3_uartregs    uarta;  /* 0x20178  */
+       struct ioc3_uartregs    uartb;  /* 0x20170  */
+       struct ioc3_uartregs    uarta;  /* 0x20178  */
 };
 
 /* Register layout of IOC3 in configuration space.  */
@@ -106,7 +106,7 @@ struct ioc3 {
        volatile u32    ppbr_l_b;       /* 0x00094  */
        volatile u32    ppcr_b;         /* 0x00098  */
 
-       /* Keyboard and Mouse Registers  */
+       /* Keyboard and Mouse Registers  */
        volatile u32    km_csr;         /* 0x0009c  */
        volatile u32    k_rd;           /* 0x000a0  */
        volatile u32    m_rd;           /* 0x000a4  */
@@ -208,7 +208,7 @@ struct ioc3_erxbuf {
 /*
  * Ethernet TX Descriptor
  */
-#define ETXD_DATALEN    104
+#define ETXD_DATALEN   104
 struct ioc3_etxd {
        u32     cmd;                            /* command field */
        u32     bufcnt;                         /* buffer counts field */
index fe02900..467c313 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 1999, 2000 by Ralf Baechle
  */
-#ifndef        _ASM_SN_KLCONFIG_H
-#define        _ASM_SN_KLCONFIG_H
+#ifndef _ASM_SN_KLCONFIG_H
+#define _ASM_SN_KLCONFIG_H
 
 /*
  * The KLCONFIG structures store info about the various BOARDs found
 /*
  * WARNING:
  *     Certain assembly language routines (notably xxxxx.s) in the IP27PROM
- *     will depend on the format of the data structures in this file.  In
- *      most cases, rearranging the fields can seriously break things.
- *      Adding fields in the beginning or middle can also break things.
- *      Add fields if necessary, to the end of a struct in such a way
- *      that offsets of existing fields do not change.
+ *     will depend on the format of the data structures in this file.  In
+ *     most cases, rearranging the fields can seriously break things.
+ *     Adding fields in the beginning or middle can also break things.
+ *     Add fields if necessary, to the end of a struct in such a way
+ *     that offsets of existing fields do not change.
  */
 
 #include <linux/types.h>
@@ -35,7 +35,7 @@
 #include <asm/sn/sn0/addrs.h>
 //#include <sys/SN/router.h>
 // XXX Stolen from <sys/SN/router.h>:
-#define MAX_ROUTER_PORTS (6)    /* Max. number of ports on a router */
+#define MAX_ROUTER_PORTS (6)   /* Max. number of ports on a router */
 #include <asm/sn/fru.h>
 //#include <sys/graph.h>
 //#include <sys/xtalk/xbow.h>
 
 typedef u64  nic_t;
 
-#define KLCFGINFO_MAGIC        0xbeedbabe
+#define KLCFGINFO_MAGIC 0xbeedbabe
 
 typedef s32 klconf_off_t;
 
 /*
  * Some IMPORTANT OFFSETS. These are the offsets on all NODES.
  */
-#define        MAX_MODULE_ID           255
+#define MAX_MODULE_ID          255
 #define SIZE_PAD               4096 /* 4k padding for structures */
 /*
  * 1 NODE brd, 2 Router brd (1 8p, 1 meta), 6 Widgets,
@@ -86,25 +86,25 @@ typedef s32 klconf_off_t;
 /* All bits in this field are currently used. Try the pad fields if
    you need more flag bits */
 
-#define ENABLE_BOARD           0x01
-#define FAILED_BOARD           0x02
-#define DUPLICATE_BOARD        0x04    /* Boards like midplanes/routers which
+#define ENABLE_BOARD           0x01
+#define FAILED_BOARD           0x02
+#define DUPLICATE_BOARD                0x04    /* Boards like midplanes/routers which
                                           are discovered twice. Use one of them */
 #define VISITED_BOARD          0x08    /* Used for compact hub numbering. */
-#define LOCAL_MASTER_IO6       0x10    /* master io6 for that node */
+#define LOCAL_MASTER_IO6       0x10    /* master io6 for that node */
 #define GLOBAL_MASTER_IO6      0x20
-#define THIRD_NIC_PRESENT      0x40    /* for future use */
-#define SECOND_NIC_PRESENT     0x80    /* addons like MIO are present */
+#define THIRD_NIC_PRESENT      0x40    /* for future use */
+#define SECOND_NIC_PRESENT     0x80    /* addons like MIO are present */
 
 /* klinfo->flags fields */
 
-#define KLINFO_ENABLE          0x01    /* This component is enabled */
-#define KLINFO_FAILED          0x02    /* This component failed */
-#define KLINFO_DEVICE          0x04    /* This component is a device */
-#define KLINFO_VISITED         0x08    /* This component has been visited */
-#define KLINFO_CONTROLLER      0x10    /* This component is a device controller */
-#define KLINFO_INSTALL         0x20    /* Install a driver */
-#define        KLINFO_HEADLESS         0x40    /* Headless (or hubless) component */
+#define KLINFO_ENABLE          0x01    /* This component is enabled */
+#define KLINFO_FAILED          0x02    /* This component failed */
+#define KLINFO_DEVICE          0x04    /* This component is a device */
+#define KLINFO_VISITED         0x08    /* This component has been visited */
+#define KLINFO_CONTROLLER      0x10    /* This component is a device controller */
+#define KLINFO_INSTALL         0x20    /* Install a driver */
+#define KLINFO_HEADLESS                0x40    /* Headless (or hubless) component */
 #define IS_CONSOLE_IOC3(i)     ((((klinfo_t *)i)->flags) & KLINFO_INSTALL)
 
 #define GB2            0x80000000
@@ -116,30 +116,30 @@ typedef s32 klconf_off_t;
    is used in the code to allocate various areas.
 */
 
-#define BOARD_STRUCT           0
-#define COMPONENT_STRUCT       1
-#define ERRINFO_STRUCT                 2
-#define KLMALLOC_TYPE_MAX      (ERRINFO_STRUCT + 1)
-#define DEVICE_STRUCT          3
+#define BOARD_STRUCT           0
+#define COMPONENT_STRUCT       1
+#define ERRINFO_STRUCT         2
+#define KLMALLOC_TYPE_MAX      (ERRINFO_STRUCT + 1)
+#define DEVICE_STRUCT          3
 
 
 typedef struct console_s {
-       unsigned long   uart_base;
-       unsigned long   config_base;
-       unsigned long   memory_base;
+       unsigned long   uart_base;
+       unsigned long   config_base;
+       unsigned long   memory_base;
        short           baud;
        short           flag;
        int             type;
        nasid_t         nasid;
        char            wid;
-       char            npci;
+       char            npci;
        nic_t           baseio_nic;
 } console_t;
 
 typedef struct klc_malloc_hdr {
-        klconf_off_t km_base;
-        klconf_off_t km_limit;
-        klconf_off_t km_current;
+       klconf_off_t km_base;
+       klconf_off_t km_limit;
+       klconf_off_t km_current;
 } klc_malloc_hdr_t;
 
 /* Functions/macros needed to use this structure */
@@ -148,7 +148,7 @@ typedef struct kl_config_hdr {
        u64             ch_magic;       /* set this to KLCFGINFO_MAGIC */
        u32             ch_version;    /* structure version number */
        klconf_off_t    ch_malloc_hdr_off; /* offset of ch_malloc_hdr */
-       klconf_off_t    ch_cons_off;       /* offset of ch_cons */
+       klconf_off_t    ch_cons_off;       /* offset of ch_cons */
        klconf_off_t    ch_board_info;  /* the link list of boards */
        console_t       ch_cons_info;   /* address info of the console */
        klc_malloc_hdr_t ch_malloc_hdr[KLMALLOC_TYPE_MAX];
@@ -157,27 +157,27 @@ typedef struct kl_config_hdr {
 } kl_config_hdr_t;
 
 
-#define KL_CONFIG_HDR(_nasid)  ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
+#define KL_CONFIG_HDR(_nasid)  ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
 #define KL_CONFIG_INFO_OFFSET(_nasid)                                  \
-        (KL_CONFIG_HDR(_nasid)->ch_board_info)
+       (KL_CONFIG_HDR(_nasid)->ch_board_info)
 #define KL_CONFIG_INFO_SET_OFFSET(_nasid, _off)                                \
-        (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
+       (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
 
-#define KL_CONFIG_INFO(_nasid)                                                 \
-        (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ?          \
+#define KL_CONFIG_INFO(_nasid)                                         \
+       (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ?           \
         NODE_OFFSET_TO_K1((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
         0)
 #define KL_CONFIG_MAGIC(_nasid)                (KL_CONFIG_HDR(_nasid)->ch_magic)
 
 #define KL_CONFIG_CHECK_MAGIC(_nasid)                                  \
-        (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
+       (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
 
 #define KL_CONFIG_HDR_INIT_MAGIC(_nasid)       \
-                  (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
+                 (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
 
 /* --- New Macros for the changed kl_config_hdr_t structure --- */
 
-#define PTR_CH_MALLOC_HDR(_k)   ((klc_malloc_hdr_t *)\
+#define PTR_CH_MALLOC_HDR(_k)  ((klc_malloc_hdr_t *)\
                        ((unsigned long)_k + (_k->ch_malloc_hdr_off)))
 
 #define KL_CONFIG_CH_MALLOC_HDR(_n)   PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
@@ -190,29 +190,29 @@ typedef struct kl_config_hdr {
 /* ------------------------------------------------------------- */
 
 #define KL_CONFIG_INFO_START(_nasid)   \
-        (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
+       (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
 
 #define KL_CONFIG_BOARD_NASID(_brd)    ((_brd)->brd_nasid)
 #define KL_CONFIG_BOARD_SET_NEXT(_brd, _off)   ((_brd)->brd_next = (_off))
 
-#define KL_CONFIG_DUPLICATE_BOARD(_brd)        ((_brd)->brd_flags & DUPLICATE_BOARD)
+#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
 
-#define XBOW_PORT_TYPE_HUB(_xbowp, _link)      \
-               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
-#define XBOW_PORT_TYPE_IO(_xbowp, _link)       \
-               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
+#define XBOW_PORT_TYPE_HUB(_xbowp, _link)      \
+              ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
+#define XBOW_PORT_TYPE_IO(_xbowp, _link)       \
+              ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
 
-#define XBOW_PORT_IS_ENABLED(_xbowp, _link)    \
-               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
-#define XBOW_PORT_NASID(_xbowp, _link)         \
-               ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
+#define XBOW_PORT_IS_ENABLED(_xbowp, _link)    \
+              ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
+#define XBOW_PORT_NASID(_xbowp, _link) \
+              ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
 
-#define XBOW_PORT_IO     0x1
-#define XBOW_PORT_HUB    0x2
+#define XBOW_PORT_IO    0x1
+#define XBOW_PORT_HUB   0x2
 #define XBOW_PORT_ENABLE 0x4
 
-#define        SN0_PORT_FENCE_SHFT     0
-#define        SN0_PORT_FENCE_MASK     (1 << SN0_PORT_FENCE_SHFT)
+#define SN0_PORT_FENCE_SHFT    0
+#define SN0_PORT_FENCE_MASK    (1 << SN0_PORT_FENCE_SHFT)
 
 /*
  * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
@@ -242,28 +242,28 @@ typedef struct kl_config_hdr {
  *
  KLCONFIG
 
- +------------+      +------------+      +------------+      +------------+
- |  lboard    |  +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
- +------------+  |   +------------+  |   +------------+  |   +------------+
- | board info |  |   | board info |  |   |errinfo,bptr|  |   | board info |
- +------------+  |   +------------+  |   +------------+  |   +------------+
- | offset     |--+   |  offset    |--+   |  offset    |--+   |offset=NULL |
- +------------+      +------------+      +------------+      +------------+
+ +------------+             +------------+      +------------+      +------------+
+ |  lboard    |         +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
+ +------------+         |   +------------+  |   +------------+  |   +------------+
+ | board info |         |   | board info |  |   |errinfo,bptr|  |   | board info |
+ +------------+         |   +------------+  |   +------------+  |   +------------+
+ | offset     |--+   | offset    |--+   |  offset    |--+   |offset=NULL |
+ +------------+             +------------+      +------------+      +------------+
 
 
  +------------+
  | board info |
- +------------+       +--------------------------------+
+ +------------+              +--------------------------------+
  | compt 1    |------>| type, rev, diaginfo, size ...  |  (CPU)
- +------------+       +--------------------------------+
+ +------------+              +--------------------------------+
  | compt 2    |--+
- +------------+  |    +--------------------------------+
- |  ...       |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
- +------------+       +--------------------------------+
+ +------------+         |    +--------------------------------+
+ |  ...              |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
+ +------------+              +--------------------------------+
  | errinfo    |--+
- +------------+  |    +--------------------------------+
-                 +--->|r/l brd errinfo,compt err flags |
-                      +--------------------------------+
+ +------------+         |    +--------------------------------+
+                +--->|r/l brd errinfo,compt err flags |
+                     +--------------------------------+
 
  *
  * Each BOARD consists of COMPONENTs and the BOARD structure has
@@ -311,7 +311,7 @@ typedef struct kl_config_hdr {
  */
 #define KL_CPU_R4000           0x1     /* Standard R4000 */
 #define KL_CPU_TFP             0x2     /* TFP processor */
-#define        KL_CPU_R10000           0x3     /* R10000 (T5) */
+#define KL_CPU_R10000          0x3     /* R10000 (T5) */
 #define KL_CPU_NONE            (-1)    /* no cpu present in slot */
 
 /*
@@ -320,13 +320,13 @@ typedef struct kl_config_hdr {
 
 #define KLCLASS_MASK   0xf0
 #define KLCLASS_NONE   0x00
-#define KLCLASS_NODE   0x10             /* CPU, Memory and HUB board */
+#define KLCLASS_NODE   0x10             /* CPU, Memory and HUB board */
 #define KLCLASS_CPU    KLCLASS_NODE
-#define KLCLASS_IO     0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI
+#define KLCLASS_IO     0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI
                                            and the non-graphics widget boards */
-#define KLCLASS_ROUTER 0x30             /* Router board */
-#define KLCLASS_MIDPLANE 0x40            /* We need to treat this as a board
-                                            so that we can record error info */
+#define KLCLASS_ROUTER 0x30             /* Router board */
+#define KLCLASS_MIDPLANE 0x40           /* We need to treat this as a board
+                                           so that we can record error info */
 #define KLCLASS_GFX    0x50            /* graphics boards */
 
 #define KLCLASS_PSEUDO_GFX     0x60    /* HDTV type cards that use a gfx
@@ -336,7 +336,7 @@ typedef struct kl_config_hdr {
 #define KLCLASS_MAX    7               /* Bump this if a new CLASS is added */
 #define KLTYPE_MAX     10              /* Bump this if a new CLASS is added */
 
-#define KLCLASS_UNKNOWN        0xf0
+#define KLCLASS_UNKNOWN 0xf0
 
 #define KLCLASS(_x) ((_x) & KLCLASS_MASK)
 
@@ -353,36 +353,36 @@ typedef struct kl_config_hdr {
 
 #define KLTYPE_WEIRDIO (KLCLASS_IO  | 0x0)
 #define KLTYPE_BASEIO  (KLCLASS_IO  | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
-#define KLTYPE_IO6     KLTYPE_BASEIO       /* Additional name */
+#define KLTYPE_IO6     KLTYPE_BASEIO       /* Additional name */
 #define KLTYPE_4CHSCSI (KLCLASS_IO  | 0x2)
-#define KLTYPE_MSCSI   KLTYPE_4CHSCSI      /* Additional name */
-#define KLTYPE_ETHERNET        (KLCLASS_IO  | 0x3)
-#define KLTYPE_MENET   KLTYPE_ETHERNET     /* Additional name */
-#define KLTYPE_FDDI    (KLCLASS_IO  | 0x4)
+#define KLTYPE_MSCSI   KLTYPE_4CHSCSI      /* Additional name */
+#define KLTYPE_ETHERNET (KLCLASS_IO  | 0x3)
+#define KLTYPE_MENET   KLTYPE_ETHERNET     /* Additional name */
+#define KLTYPE_FDDI    (KLCLASS_IO  | 0x4)
 #define KLTYPE_UNUSED  (KLCLASS_IO  | 0x5) /* XXX UNUSED */
-#define KLTYPE_HAROLD   (KLCLASS_IO  | 0x6) /* PCI SHOE BOX */
+#define KLTYPE_HAROLD  (KLCLASS_IO  | 0x6) /* PCI SHOE BOX */
 #define KLTYPE_PCI     KLTYPE_HAROLD
-#define KLTYPE_VME      (KLCLASS_IO  | 0x7) /* Any 3rd party VME card */
-#define KLTYPE_MIO     (KLCLASS_IO  | 0x8)
-#define KLTYPE_FC      (KLCLASS_IO  | 0x9)
-#define KLTYPE_LINC            (KLCLASS_IO  | 0xA)
-#define KLTYPE_TPU     (KLCLASS_IO  | 0xB) /* Tensor Processing Unit */
-#define KLTYPE_GSN_A           (KLCLASS_IO  | 0xC) /* Main GSN board */
-#define KLTYPE_GSN_B           (KLCLASS_IO  | 0xD) /* Auxiliary GSN board */
+#define KLTYPE_VME     (KLCLASS_IO  | 0x7) /* Any 3rd party VME card */
+#define KLTYPE_MIO     (KLCLASS_IO  | 0x8)
+#define KLTYPE_FC      (KLCLASS_IO  | 0x9)
+#define KLTYPE_LINC    (KLCLASS_IO  | 0xA)
+#define KLTYPE_TPU     (KLCLASS_IO  | 0xB) /* Tensor Processing Unit */
+#define KLTYPE_GSN_A   (KLCLASS_IO  | 0xC) /* Main GSN board */
+#define KLTYPE_GSN_B   (KLCLASS_IO  | 0xD) /* Auxiliary GSN board */
 
 #define KLTYPE_GFX     (KLCLASS_GFX | 0x0) /* unknown graphics type */
 #define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
 #define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
 
 #define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
-#define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
-#define KLTYPE_ROUTER2    KLTYPE_ROUTER                /* Obsolete! */
+#define KLTYPE_ROUTER    (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_ROUTER2   KLTYPE_ROUTER         /* Obsolete! */
 #define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
 #define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
 
 #define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
 #define KLTYPE_MIDPLANE8  (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
-#define KLTYPE_MIDPLANE    KLTYPE_MIDPLANE8
+#define KLTYPE_MIDPLANE           KLTYPE_MIDPLANE8
 #define KLTYPE_PBRICK_XBOW     (KLCLASS_MIDPLANE | 0x2)
 
 #define KLTYPE_IOBRICK         (KLCLASS_IOBRICK | 0x0)
@@ -398,11 +398,11 @@ typedef struct kl_config_hdr {
  * When bringup started nic names had not standardized and so we
  * had to hard code. (For people interested in history.)
  */
-#define KLTYPE_XTHD    (KLCLASS_PSEUDO_GFX | 0x9)
+#define KLTYPE_XTHD    (KLCLASS_PSEUDO_GFX | 0x9)
 
 #define KLTYPE_UNKNOWN (KLCLASS_UNKNOWN | 0xf)
 
-#define KLTYPE(_x)     ((_x) & KLTYPE_MASK)
+#define KLTYPE(_x)     ((_x) & KLTYPE_MASK)
 #define IS_MIO_PRESENT(l)      ((l->brd_type == KLTYPE_BASEIO) && \
                                 (l->brd_flags & SECOND_NIC_PRESENT))
 #define IS_MIO_IOC3(l, n)      (IS_MIO_PRESENT(l) && (n > 2))
@@ -416,33 +416,33 @@ typedef struct kl_config_hdr {
 #define LOCAL_BOARD 1
 #define REMOTE_BOARD 2
 
-#define LBOARD_STRUCT_VERSION  2
+#define LBOARD_STRUCT_VERSION  2
 
 typedef struct lboard_s {
-       klconf_off_t    brd_next;         /* Next BOARD */
-       unsigned char   struct_type;      /* type of structure, local or remote */
-       unsigned char   brd_type;         /* type+class */
-       unsigned char   brd_sversion;     /* version of this structure */
-        unsigned char  brd_brevision;    /* board revision */
-        unsigned char  brd_promver;      /* board prom version, if any */
-       unsigned char   brd_flags;        /* Enabled, Disabled etc */
-       unsigned char   brd_slot;         /* slot number */
-       unsigned short  brd_debugsw;      /* Debug switches */
-       moduleid_t      brd_module;       /* module to which it belongs */
-       partid_t        brd_partition;    /* Partition number */
-        unsigned short         brd_diagval;      /* diagnostic value */
-        unsigned short         brd_diagparm;     /* diagnostic parameter */
-        unsigned char  brd_inventory;    /* inventory history */
-        unsigned char  brd_numcompts;    /* Number of components */
-        nic_t          brd_nic;          /* Number in CAN */
-       nasid_t         brd_nasid;        /* passed parameter */
-       klconf_off_t    brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
-       klconf_off_t    brd_errinfo;      /* Board's error information */
+       klconf_off_t    brd_next;         /* Next BOARD */
+       unsigned char   struct_type;      /* type of structure, local or remote */
+       unsigned char   brd_type;         /* type+class */
+       unsigned char   brd_sversion;     /* version of this structure */
+       unsigned char   brd_brevision;    /* board revision */
+       unsigned char   brd_promver;      /* board prom version, if any */
+       unsigned char   brd_flags;        /* Enabled, Disabled etc */
+       unsigned char   brd_slot;         /* slot number */
+       unsigned short  brd_debugsw;      /* Debug switches */
+       moduleid_t      brd_module;       /* module to which it belongs */
+       partid_t        brd_partition;    /* Partition number */
+       unsigned short  brd_diagval;      /* diagnostic value */
+       unsigned short  brd_diagparm;     /* diagnostic parameter */
+       unsigned char   brd_inventory;    /* inventory history */
+       unsigned char   brd_numcompts;    /* Number of components */
+       nic_t           brd_nic;          /* Number in CAN */
+       nasid_t         brd_nasid;        /* passed parameter */
+       klconf_off_t    brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+       klconf_off_t    brd_errinfo;      /* Board's error information */
        struct lboard_s *brd_parent;      /* Logical parent for this brd */
-       vertex_hdl_t    brd_graph_link;   /* vertex hdl to connect extern compts */
+       vertex_hdl_t    brd_graph_link;   /* vertex hdl to connect extern compts */
        confidence_t    brd_confidence;   /* confidence that the board is bad */
-       nasid_t         brd_owner;        /* who owns this board */
-       unsigned char   brd_nic_flags;    /* To handle 8 more NICs */
+       nasid_t         brd_owner;        /* who owns this board */
+       unsigned char   brd_nic_flags;    /* To handle 8 more NICs */
        char            brd_name[32];
 } lboard_t;
 
@@ -456,23 +456,23 @@ typedef struct lboard_s {
 
 #define KLCF_CLASS(_brd)       KLCLASS((_brd)->brd_type)
 #define KLCF_TYPE(_brd)                KLTYPE((_brd)->brd_type)
-#define KLCF_REMOTE(_brd)      (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
+#define KLCF_REMOTE(_brd)      (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
 #define KLCF_NUM_COMPS(_brd)   ((_brd)->brd_numcompts)
 #define KLCF_MODULE_ID(_brd)   ((_brd)->brd_module)
 
-#define KLCF_NEXT(_brd)        \
-        ((_brd)->brd_next ?    \
+#define KLCF_NEXT(_brd)                \
+       ((_brd)->brd_next ?     \
         (lboard_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), (_brd)->brd_next)):\
         NULL)
-#define KLCF_COMP(_brd, _ndx)   \
-                (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd),        \
+#define KLCF_COMP(_brd, _ndx)  \
+               (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
                                               (_brd)->brd_compts[(_ndx)]))
 
 #define KLCF_COMP_ERROR(_brd, _comp)   \
-               (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
+              (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
 
 #define KLCF_COMP_TYPE(_comp)  ((_comp)->struct_type)
-#define KLCF_BRIDGE_W_ID(_comp)        ((_comp)->physid)       /* Widget ID */
+#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid)      /* Widget ID */
 
 
 
@@ -481,73 +481,73 @@ typedef struct lboard_s {
  * component.
  */
 
-typedef struct klinfo_s {                  /* Generic info */
-        unsigned char   struct_type;       /* type of this structure */
-        unsigned char   struct_version;    /* version of this structure */
-        unsigned char   flags;            /* Enabled, disabled etc */
-        unsigned char   revision;         /* component revision */
-        unsigned short  diagval;          /* result of diagnostics */
-        unsigned short  diagparm;         /* diagnostic parameter */
-        unsigned char   inventory;        /* previous inventory status */
-       nic_t           nic;              /* MUst be aligned properly */
-        unsigned char   physid;           /* physical id of component */
-        unsigned int    virtid;           /* virtual id as seen by system */
-       unsigned char   widid;            /* Widget id - if applicable */
-       nasid_t         nasid;            /* node number - from parent */
+typedef struct klinfo_s {                 /* Generic info */
+       unsigned char   struct_type;       /* type of this structure */
+       unsigned char   struct_version;    /* version of this structure */
+       unsigned char   flags;            /* Enabled, disabled etc */
+       unsigned char   revision;         /* component revision */
+       unsigned short  diagval;          /* result of diagnostics */
+       unsigned short  diagparm;         /* diagnostic parameter */
+       unsigned char   inventory;        /* previous inventory status */
+       nic_t           nic;              /* MUst be aligned properly */
+       unsigned char   physid;           /* physical id of component */
+       unsigned int    virtid;           /* virtual id as seen by system */
+       unsigned char   widid;            /* Widget id - if applicable */
+       nasid_t         nasid;            /* node number - from parent */
        char            pad1;             /* pad out structure. */
        char            pad2;             /* pad out structure. */
-       COMPONENT       *arcs_compt;      /* ptr to the arcs struct for ease*/
-        klconf_off_t   errinfo;          /* component specific errors */
-        unsigned short  pad3;             /* pci fields have moved over to */
-        unsigned short  pad4;             /* klbri_t */
+       COMPONENT       *arcs_compt;      /* ptr to the arcs struct for ease*/
+       klconf_off_t    errinfo;          /* component specific errors */
+       unsigned short  pad3;             /* pci fields have moved over to */
+       unsigned short  pad4;             /* klbri_t */
 } klinfo_t ;
 
 #define KLCONFIG_INFO_ENABLED(_i)      ((_i)->flags & KLINFO_ENABLE)
 /*
  * Component structures.
  * Following are the currently identified components:
- *     CPU, HUB, MEM_BANK,
- *     XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
- *     BRIDGE, IOC3, SuperIO, SCSI, FDDI
- *     ROUTER
- *     GRAPHICS
+ *     CPU, HUB, MEM_BANK,
+ *     XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
+ *     BRIDGE, IOC3, SuperIO, SCSI, FDDI
+ *     ROUTER
+ *     GRAPHICS
  */
 #define KLSTRUCT_UNKNOWN       0
-#define KLSTRUCT_CPU           1
-#define KLSTRUCT_HUB           2
-#define KLSTRUCT_MEMBNK        3
-#define KLSTRUCT_XBOW          4
-#define KLSTRUCT_BRI           5
-#define KLSTRUCT_IOC3          6
-#define KLSTRUCT_PCI           7
-#define KLSTRUCT_VME           8
+#define KLSTRUCT_CPU           1
+#define KLSTRUCT_HUB           2
+#define KLSTRUCT_MEMBNK                3
+#define KLSTRUCT_XBOW          4
+#define KLSTRUCT_BRI           5
+#define KLSTRUCT_IOC3          6
+#define KLSTRUCT_PCI           7
+#define KLSTRUCT_VME           8
 #define KLSTRUCT_ROU           9
-#define KLSTRUCT_GFX           10
-#define KLSTRUCT_SCSI          11
-#define KLSTRUCT_FDDI          12
-#define KLSTRUCT_MIO           13
-#define KLSTRUCT_DISK          14
-#define KLSTRUCT_TAPE          15
-#define KLSTRUCT_CDROM                 16
-#define KLSTRUCT_HUB_UART      17
-#define KLSTRUCT_IOC3ENET      18
-#define KLSTRUCT_IOC3UART      19
+#define KLSTRUCT_GFX           10
+#define KLSTRUCT_SCSI          11
+#define KLSTRUCT_FDDI          12
+#define KLSTRUCT_MIO           13
+#define KLSTRUCT_DISK          14
+#define KLSTRUCT_TAPE          15
+#define KLSTRUCT_CDROM         16
+#define KLSTRUCT_HUB_UART      17
+#define KLSTRUCT_IOC3ENET      18
+#define KLSTRUCT_IOC3UART      19
 #define KLSTRUCT_UNUSED                20 /* XXX UNUSED */
-#define KLSTRUCT_IOC3PCKM       21
-#define KLSTRUCT_RAD           22
-#define KLSTRUCT_HUB_TTY        23
-#define KLSTRUCT_IOC3_TTY      24
+#define KLSTRUCT_IOC3PCKM      21
+#define KLSTRUCT_RAD           22
+#define KLSTRUCT_HUB_TTY       23
+#define KLSTRUCT_IOC3_TTY      24
 
 /* Early Access IO proms are compatible
    only with KLSTRUCT values up to 24. */
 
-#define KLSTRUCT_FIBERCHANNEL  25
+#define KLSTRUCT_FIBERCHANNEL  25
 #define KLSTRUCT_MOD_SERIAL_NUM 26
-#define KLSTRUCT_IOC3MS         27
-#define KLSTRUCT_TPU            28
-#define KLSTRUCT_GSN_A          29
-#define KLSTRUCT_GSN_B          30
-#define KLSTRUCT_XTHD           31
+#define KLSTRUCT_IOC3MS                27
+#define KLSTRUCT_TPU           28
+#define KLSTRUCT_GSN_A         29
+#define KLSTRUCT_GSN_B         30
+#define KLSTRUCT_XTHD          31
 
 /*
  * These are the indices of various components within a lboard structure.
@@ -583,7 +583,7 @@ typedef u64 *router_t;
  * The port info in ip27_cfg area translates to a lboart_t in the
  * KLCONFIG area. But since KLCONFIG does not use pointers, lboart_t
  * is stored in terms of a nasid and a offset from start of KLCONFIG
- * area  on that nasid.
+ * area         on that nasid.
  */
 typedef struct klport_s {
        nasid_t         port_nasid;
@@ -591,20 +591,20 @@ typedef struct klport_s {
        klconf_off_t    port_offset;
 } klport_t;
 
-typedef struct klcpu_s {                          /* CPU */
-       klinfo_t        cpu_info;
-       unsigned short  cpu_prid;       /* Processor PRID value */
-       unsigned short  cpu_fpirr;      /* FPU IRR value */
-       unsigned short  cpu_speed;      /* Speed in MHZ */
-       unsigned short  cpu_scachesz;   /* secondary cache size in MB */
-       unsigned short  cpu_scachespeed;/* secondary cache speed in MHz */
+typedef struct klcpu_s {                         /* CPU */
+       klinfo_t        cpu_info;
+       unsigned short  cpu_prid;       /* Processor PRID value */
+       unsigned short  cpu_fpirr;      /* FPU IRR value */
+       unsigned short  cpu_speed;      /* Speed in MHZ */
+       unsigned short  cpu_scachesz;   /* secondary cache size in MB */
+       unsigned short  cpu_scachespeed;/* secondary cache speed in MHz */
 } klcpu_t ;
 
 #define CPU_STRUCT_VERSION   2
 
 typedef struct klhub_s {                       /* HUB */
-       klinfo_t        hub_info;
-       unsigned int            hub_flags;              /* PCFG_HUB_xxx flags */
+       klinfo_t        hub_info;
+       unsigned int            hub_flags;              /* PCFG_HUB_xxx flags */
        klport_t        hub_port;               /* hub is connected to this */
        nic_t           hub_box_nic;            /* nic of containing box */
        klconf_off_t    hub_mfg_nic;            /* MFG NIC string */
@@ -612,36 +612,36 @@ typedef struct klhub_s {                  /* HUB */
 } klhub_t ;
 
 typedef struct klhub_uart_s {                  /* HUB */
-       klinfo_t        hubuart_info;
-       unsigned int            hubuart_flags;          /* PCFG_HUB_xxx flags */
+       klinfo_t        hubuart_info;
+       unsigned int            hubuart_flags;          /* PCFG_HUB_xxx flags */
        nic_t           hubuart_box_nic;        /* nic of containing box */
 } klhub_uart_t ;
 
-#define MEMORY_STRUCT_VERSION   2
+#define MEMORY_STRUCT_VERSION  2
 
 typedef struct klmembnk_s {                    /* MEMORY BANK */
-       klinfo_t        membnk_info;
-       short           membnk_memsz;           /* Total memory in megabytes */
+       klinfo_t        membnk_info;
+       short           membnk_memsz;           /* Total memory in megabytes */
        short           membnk_dimm_select; /* bank to physical addr mapping*/
        short           membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
        short           membnk_attr;
 } klmembnk_t ;
 
 #define KLCONFIG_MEMBNK_SIZE(_info, _bank)     \
-                            ((_info)->membnk_bnksz[(_bank)])
+                           ((_info)->membnk_bnksz[(_bank)])
 
 
 #define MEMBNK_PREMIUM 1
 #define KLCONFIG_MEMBNK_PREMIUM(_info, _bank)  \
-                            ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
+                           ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
 
 #define MAX_SERIAL_NUM_SIZE 10
 
 typedef struct klmod_serial_num_s {
-      klinfo_t        snum_info;
+      klinfo_t       snum_info;
       union {
-              char snum_str[MAX_SERIAL_NUM_SIZE];
-              unsigned long long       snum_int;
+             char snum_str[MAX_SERIAL_NUM_SIZE];
+             unsigned long long       snum_int;
       } snum;
 } klmod_serial_num_t;
 
@@ -650,43 +650,43 @@ typedef struct klmod_serial_num_s {
    serial number struct as a component without losing compatibility
    between prom versions. */
 
-#define GET_SNUM_COMP(_l)      ((klmod_serial_num_t *)\
+#define GET_SNUM_COMP(_l)      ((klmod_serial_num_t *)\
                                KLCF_COMP(_l, _l->brd_numcompts))
 
 #define MAX_XBOW_LINKS 16
 
-typedef struct klxbow_s {                          /* XBOW */
-       klinfo_t        xbow_info ;
+typedef struct klxbow_s {                         /* XBOW */
+       klinfo_t        xbow_info ;
        klport_t        xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
-        int            xbow_master_hub_link;
-        /* type of brd connected+component struct ptr+flags */
+       int             xbow_master_hub_link;
+       /* type of brd connected+component struct ptr+flags */
 } klxbow_t ;
 
 #define MAX_PCI_SLOTS 8
 
 typedef struct klpci_device_s {
        s32     pci_device_id;  /* 32 bits of vendor/device ID. */
-       s32     pci_device_pad; /* 32 bits of padding. */
+       s32     pci_device_pad; /* 32 bits of padding. */
 } klpci_device_t;
 
 #define BRIDGE_STRUCT_VERSION  2
 
-typedef struct klbri_s {                          /* BRIDGE */
-       klinfo_t        bri_info ;
-       unsigned char   bri_eprominfo ;    /* IO6prom connected to bridge */
-       unsigned char   bri_bustype ;      /* PCI/VME BUS bridge/GIO */
-       pci_t           pci_specific  ;    /* PCI Board config info */
+typedef struct klbri_s {                         /* BRIDGE */
+       klinfo_t        bri_info ;
+       unsigned char   bri_eprominfo ;    /* IO6prom connected to bridge */
+       unsigned char   bri_bustype ;      /* PCI/VME BUS bridge/GIO */
+       pci_t           pci_specific  ;    /* PCI Board config info */
        klpci_device_t  bri_devices[MAX_PCI_DEVS] ;     /* PCI IDs */
        klconf_off_t    bri_mfg_nic ;
 } klbri_t ;
 
 #define MAX_IOC3_TTY   2
 
-typedef struct klioc3_s {                          /* IOC3 */
-       klinfo_t        ioc3_info ;
-       unsigned char   ioc3_ssram ;        /* Info about ssram */
-       unsigned char   ioc3_nvram ;        /* Info about nvram */
-       klinfo_t        ioc3_superio ;      /* Info about superio */
+typedef struct klioc3_s {                         /* IOC3 */
+       klinfo_t        ioc3_info ;
+       unsigned char   ioc3_ssram ;        /* Info about ssram */
+       unsigned char   ioc3_nvram ;        /* Info about nvram */
+       klinfo_t        ioc3_superio ;      /* Info about superio */
        klconf_off_t    ioc3_tty_off ;
        klinfo_t        ioc3_enet ;
        klconf_off_t    ioc3_enet_off ;
@@ -695,27 +695,27 @@ typedef struct klioc3_s {                          /* IOC3 */
 
 #define MAX_VME_SLOTS 8
 
-typedef struct klvmeb_s {                          /* VME BRIDGE - PCI CTLR */
-       klinfo_t        vmeb_info ;
+typedef struct klvmeb_s {                         /* VME BRIDGE - PCI CTLR */
+       klinfo_t        vmeb_info ;
        vmeb_t          vmeb_specific ;
-       klconf_off_t    vmeb_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
+       klconf_off_t    vmeb_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
 } klvmeb_t ;
 
-typedef struct klvmed_s {                          /* VME DEVICE - VME BOARD */
+typedef struct klvmed_s {                         /* VME DEVICE - VME BOARD */
        klinfo_t        vmed_info ;
        vmed_t          vmed_specific ;
-       klconf_off_t    vmed_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
+       klconf_off_t    vmed_brdinfo[MAX_VME_SLOTS]   ;    /* VME Board config info */
 } klvmed_t ;
 
 #define ROUTER_VECTOR_VERS     2
 
 /* XXX - Don't we need the number of ports here?!? */
-typedef struct klrou_s {                          /* ROUTER */
-       klinfo_t        rou_info ;
-       unsigned int            rou_flags ;           /* PCFG_ROUTER_xxx flags */
-       nic_t           rou_box_nic ;         /* nic of the containing module */
-       klport_t        rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
-       klconf_off_t    rou_mfg_nic ;     /* MFG NIC string */
+typedef struct klrou_s {                         /* ROUTER */
+       klinfo_t        rou_info ;
+       unsigned int            rou_flags ;           /* PCFG_ROUTER_xxx flags */
+       nic_t           rou_box_nic ;         /* nic of the containing module */
+       klport_t        rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
+       klconf_off_t    rou_mfg_nic ;     /* MFG NIC string */
        u64     rou_vector;       /* vector from master node */
 } klrou_t ;
 
@@ -732,30 +732,30 @@ typedef struct klrou_s {                          /* ROUTER */
 #define KLGFX_COOKIE   0x0c0de000
 
 typedef struct klgfx_s {               /* GRAPHICS Device */
-       klinfo_t        gfx_info;
-       klconf_off_t    old_gndevs;     /* for compatibility with older proms */
-       klconf_off_t    old_gdoff0;     /* for compatibility with older proms */
+       klinfo_t        gfx_info;
+       klconf_off_t    old_gndevs;     /* for compatibility with older proms */
+       klconf_off_t    old_gdoff0;     /* for compatibility with older proms */
        unsigned int            cookie;         /* for compatibility with older proms */
        unsigned int            moduleslot;
        struct klgfx_s  *gfx_next_pipe;
        graphics_t      gfx_specific;
-       klconf_off_t    pad0;           /* for compatibility with older proms */
-       klconf_off_t    gfx_mfg_nic;
+       klconf_off_t    pad0;           /* for compatibility with older proms */
+       klconf_off_t    gfx_mfg_nic;
 } klgfx_t;
 
 typedef struct klxthd_s {
-       klinfo_t        xthd_info ;
-       klconf_off_t    xthd_mfg_nic ;        /* MFG NIC string */
+       klinfo_t        xthd_info ;
+       klconf_off_t    xthd_mfg_nic ;        /* MFG NIC string */
 } klxthd_t ;
 
-typedef struct kltpu_s {                     /* TPU board */
-       klinfo_t        tpu_info ;
-       klconf_off_t    tpu_mfg_nic ;        /* MFG NIC string */
+typedef struct kltpu_s {                    /* TPU board */
+       klinfo_t        tpu_info ;
+       klconf_off_t    tpu_mfg_nic ;        /* MFG NIC string */
 } kltpu_t ;
 
-typedef struct klgsn_s {                     /* GSN board */
-       klinfo_t        gsn_info ;
-       klconf_off_t    gsn_mfg_nic ;        /* MFG NIC string */
+typedef struct klgsn_s {                    /* GSN board */
+       klinfo_t        gsn_info ;
+       klconf_off_t    gsn_mfg_nic ;        /* MFG NIC string */
 } klgsn_t ;
 
 #define MAX_SCSI_DEVS 16
@@ -767,57 +767,57 @@ typedef struct klgsn_s {                     /* GSN board */
  * that as the size to be klmalloced.
  */
 
-typedef struct klscsi_s {                          /* SCSI Controller */
-       klinfo_t        scsi_info ;
-       scsi_t          scsi_specific   ;
-       unsigned char   scsi_numdevs ;
+typedef struct klscsi_s {                         /* SCSI Controller */
+       klinfo_t        scsi_info ;
+       scsi_t          scsi_specific   ;
+       unsigned char   scsi_numdevs ;
        klconf_off_t    scsi_devinfo[MAX_SCSI_DEVS] ;
 } klscsi_t ;
 
-typedef struct klscdev_s {                          /* SCSI device */
-       klinfo_t        scdev_info ;
+typedef struct klscdev_s {                         /* SCSI device */
+       klinfo_t        scdev_info ;
        struct scsidisk_data *scdev_cfg ; /* driver fills up this */
 } klscdev_t ;
 
-typedef struct klttydev_s {                          /* TTY device */
-       klinfo_t        ttydev_info ;
+typedef struct klttydev_s {                         /* TTY device */
+       klinfo_t        ttydev_info ;
        struct terminal_data *ttydev_cfg ; /* driver fills up this */
 } klttydev_t ;
 
-typedef struct klenetdev_s {                          /* ENET device */
-       klinfo_t        enetdev_info ;
+typedef struct klenetdev_s {                         /* ENET device */
+       klinfo_t        enetdev_info ;
        struct net_data *enetdev_cfg ; /* driver fills up this */
 } klenetdev_t ;
 
-typedef struct klkbddev_s {                          /* KBD device */
-       klinfo_t        kbddev_info ;
+typedef struct klkbddev_s {                         /* KBD device */
+       klinfo_t        kbddev_info ;
        struct keyboard_data *kbddev_cfg ; /* driver fills up this */
 } klkbddev_t ;
 
-typedef struct klmsdev_s {                          /* mouse device */
-        klinfo_t        msdev_info ;
-        void           *msdev_cfg ;
+typedef struct klmsdev_s {                         /* mouse device */
+       klinfo_t        msdev_info ;
+       void            *msdev_cfg ;
 } klmsdev_t ;
 
 #define MAX_FDDI_DEVS 10 /* XXX Is this true */
 
-typedef struct klfddi_s {                          /* FDDI */
-       klinfo_t        fddi_info ;
-       fddi_t          fddi_specific ;
+typedef struct klfddi_s {                         /* FDDI */
+       klinfo_t        fddi_info ;
+       fddi_t          fddi_specific ;
        klconf_off_t    fddi_devinfo[MAX_FDDI_DEVS] ;
 } klfddi_t ;
 
-typedef struct klmio_s {                          /* MIO */
-       klinfo_t        mio_info ;
-       mio_t           mio_specific   ;
+typedef struct klmio_s {                         /* MIO */
+       klinfo_t        mio_info ;
+       mio_t           mio_specific   ;
 } klmio_t ;
 
 
 typedef union klcomp_s {
        klcpu_t         kc_cpu;
        klhub_t         kc_hub;
-       klmembnk_t      kc_mem;
-       klxbow_t        kc_xbow;
+       klmembnk_t      kc_mem;
+       klxbow_t        kc_xbow;
        klbri_t         kc_bri;
        klioc3_t        kc_ioc3;
        klvmeb_t        kc_vmeb;
@@ -831,11 +831,11 @@ typedef union klcomp_s {
        klmod_serial_num_t kc_snum ;
 } klcomp_t;
 
-typedef union kldev_s {      /* for device structure allocation */
+typedef union kldev_s {             /* for device structure allocation */
        klscdev_t       kc_scsi_dev ;
        klttydev_t      kc_tty_dev ;
        klenetdev_t     kc_enet_dev ;
-       klkbddev_t      kc_kbd_dev ;
+       klkbddev_t      kc_kbd_dev ;
 } kldev_t ;
 
 /* Data structure interface routines. TBD */
index 1327e12..bfb3aec 100644 (file)
@@ -16,8 +16,8 @@
  * The kldir memory area resides at a fixed place in each node's memory and
  * provides pointers to most other IP27 memory areas.  This allows us to
  * resize and/or relocate memory areas at a later time without breaking all
- * firmware and kernels that use them.  Indices in the array are
- * permanently dedicated to areas listed below.  Some memory areas (marked
+ * firmware and kernels that use them. Indices in the array are
+ * permanently dedicated to areas listed below.         Some memory areas (marked
  * below) reside at a permanently fixed location, but are included in the
  * directory for completeness.
  */
  * The upper portion of the memory map applies during boot
  * only and is overwritten by IRIX/SYMMON.
  *
- *                                    MEMORY MAP PER NODE
+ *                                   MEMORY MAP PER NODE
  *
- * 0x2000000 (32M)         +-----------------------------------------+
- *                         |      IO6 BUFFERS FOR FLASH ENET IOC3    |
- * 0x1F80000 (31.5M)       +-----------------------------------------+
- *                         |      IO6 TEXT/DATA/BSS/stack            |
- * 0x1C00000 (30M)         +-----------------------------------------+
- *                         |      IO6 PROM DEBUG TEXT/DATA/BSS/stack |
- * 0x0800000 (28M)         +-----------------------------------------+
- *                         |      IP27 PROM TEXT/DATA/BSS/stack      |
- * 0x1B00000 (27M)         +-----------------------------------------+
- *                         |      IP27 CFG                           |
- * 0x1A00000 (26M)         +-----------------------------------------+
- *                         |      Graphics PROM                      |
- * 0x1800000 (24M)         +-----------------------------------------+
- *                         |      3rd Party PROM drivers             |
- * 0x1600000 (22M)         +-----------------------------------------+
- *                         |                                         |
- *                         |      Free                               |
- *                         |                                         |
- *                         +-----------------------------------------+
- *                         |      UNIX DEBUG Version                 |
- * 0x190000 (2M--)         +-----------------------------------------+
- *                         |      SYMMON                             |
- *                         |      (For UNIX Debug only)              |
- * 0x34000 (208K)          +-----------------------------------------+
- *                         |      SYMMON STACK [NUM_CPU_PER_NODE]    |
- *                         |      (For UNIX Debug only)              |
- * 0x25000 (148K)          +-----------------------------------------+
- *                         |      KLCONFIG - II (temp)               |
- *                         |                                         |
- *                         |    ----------------------------         |
- *                         |                                         |
- *                         |      UNIX NON-DEBUG Version             |
- * 0x19000 (100K)          +-----------------------------------------+
+ * 0x2000000 (32M)        +-----------------------------------------+
+ *                        |      IO6 BUFFERS FOR FLASH ENET IOC3    |
+ * 0x1F80000 (31.5M)      +-----------------------------------------+
+ *                        |      IO6 TEXT/DATA/BSS/stack            |
+ * 0x1C00000 (30M)        +-----------------------------------------+
+ *                        |      IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M)        +-----------------------------------------+
+ *                        |      IP27 PROM TEXT/DATA/BSS/stack      |
+ * 0x1B00000 (27M)        +-----------------------------------------+
+ *                        |      IP27 CFG                           |
+ * 0x1A00000 (26M)        +-----------------------------------------+
+ *                        |      Graphics PROM                      |
+ * 0x1800000 (24M)        +-----------------------------------------+
+ *                        |      3rd Party PROM drivers             |
+ * 0x1600000 (22M)        +-----------------------------------------+
+ *                        |                                         |
+ *                        |      Free                               |
+ *                        |                                         |
+ *                        +-----------------------------------------+
+ *                        |      UNIX DEBUG Version                 |
+ * 0x190000 (2M--)        +-----------------------------------------+
+ *                        |      SYMMON                             |
+ *                        |      (For UNIX Debug only)              |
+ * 0x34000 (208K)         +-----------------------------------------+
+ *                        |      SYMMON STACK [NUM_CPU_PER_NODE]    |
+ *                        |      (For UNIX Debug only)              |
+ * 0x25000 (148K)         +-----------------------------------------+
+ *                        |      KLCONFIG - II (temp)               |
+ *                        |                                         |
+ *                        |    ----------------------------         |
+ *                        |                                         |
+ *                        |      UNIX NON-DEBUG Version             |
+ * 0x19000 (100K)         +-----------------------------------------+
  *
  *
  * The lower portion of the memory map contains information that is
  * permanent and is used by the IP27PROM, IO6PROM and IRIX.
  *
- * 0x19000 (100K)          +-----------------------------------------+
- *                         |                                         |
- *                         |      PI Error Spools (32K)              |
- *                         |                                         |
- * 0x12000 (72K)           +-----------------------------------------+
- *                         |      Unused                             |
- * 0x11c00 (71K)           +-----------------------------------------+
- *                         |      CPU 1 NMI Eframe area             |
- * 0x11a00 (70.5K)         +-----------------------------------------+
- *                         |      CPU 0 NMI Eframe area             |
- * 0x11800 (70K)           +-----------------------------------------+
- *                         |      CPU 1 NMI Register save area       |
- * 0x11600 (69.5K)         +-----------------------------------------+
- *                         |      CPU 0 NMI Register save area       |
- * 0x11400 (69K)           +-----------------------------------------+
- *                         |      GDA (1k)                           |
- * 0x11000 (68K)           +-----------------------------------------+
- *                         |      Early cache Exception stack        |
- *                         |             and/or                      |
- *                        |      kernel/io6prom nmi registers       |
+ * 0x19000 (100K)         +-----------------------------------------+
+ *                        |                                         |
+ *                        |      PI Error Spools (32K)              |
+ *                        |                                         |
+ * 0x12000 (72K)          +-----------------------------------------+
+ *                        |      Unused                             |
+ * 0x11c00 (71K)          +-----------------------------------------+
+ *                        |      CPU 1 NMI Eframe area              |
+ * 0x11a00 (70.5K)        +-----------------------------------------+
+ *                        |      CPU 0 NMI Eframe area              |
+ * 0x11800 (70K)          +-----------------------------------------+
+ *                        |      CPU 1 NMI Register save area       |
+ * 0x11600 (69.5K)        +-----------------------------------------+
+ *                        |      CPU 0 NMI Register save area       |
+ * 0x11400 (69K)          +-----------------------------------------+
+ *                        |      GDA (1k)                           |
+ * 0x11000 (68K)          +-----------------------------------------+
+ *                        |      Early cache Exception stack        |
+ *                        |             and/or                      |
+ *                        |      kernel/io6prom nmi registers       |
  * 0x10800  (66k)         +-----------------------------------------+
- *                        |      cache error eframe                 |
- * 0x10400 (65K)           +-----------------------------------------+
- *                         |      Exception Handlers (UALIAS copy)   |
- * 0x10000 (64K)           +-----------------------------------------+
- *                         |                                         |
- *                         |                                         |
- *                         |      KLCONFIG - I (permanent) (48K)     |
- *                         |                                         |
- *                         |                                         |
- *                         |                                         |
- * 0x4000 (16K)            +-----------------------------------------+
- *                         |      NMI Handler (Protected Page)       |
- * 0x3000 (12K)            +-----------------------------------------+
- *                         |      ARCS PVECTORS (master node only)   |
- * 0x2c00 (11K)            +-----------------------------------------+
- *                         |      ARCS TVECTORS (master node only)   |
- * 0x2800 (10K)            +-----------------------------------------+
- *                         |      LAUNCH [NUM_CPU]                   |
- * 0x2400 (9K)             +-----------------------------------------+
- *                         |      Low memory directory (KLDIR)       |
- * 0x2000 (8K)             +-----------------------------------------+
- *                         |      ARCS SPB (1K)                      |
- * 0x1000 (4K)             +-----------------------------------------+
- *                         |      Early cache Exception stack        |
- *                         |             and/or                      |
- *                        |      kernel/io6prom nmi registers       |
- * 0x800  (2k)            +-----------------------------------------+
- *                        |      cache error eframe                 |
- * 0x400 (1K)              +-----------------------------------------+
- *                         |      Exception Handlers                 |
- * 0x0   (0K)              +-----------------------------------------+
+ *                        |      cache error eframe                 |
+ * 0x10400 (65K)          +-----------------------------------------+
+ *                        |      Exception Handlers (UALIAS copy)   |
+ * 0x10000 (64K)          +-----------------------------------------+
+ *                        |                                         |
+ *                        |                                         |
+ *                        |      KLCONFIG - I (permanent) (48K)     |
+ *                        |                                         |
+ *                        |                                         |
+ *                        |                                         |
+ * 0x4000 (16K)                   +-----------------------------------------+
+ *                        |      NMI Handler (Protected Page)       |
+ * 0x3000 (12K)                   +-----------------------------------------+
+ *                        |      ARCS PVECTORS (master node only)   |
+ * 0x2c00 (11K)                   +-----------------------------------------+
+ *                        |      ARCS TVECTORS (master node only)   |
+ * 0x2800 (10K)                   +-----------------------------------------+
+ *                        |      LAUNCH [NUM_CPU]                   |
+ * 0x2400 (9K)            +-----------------------------------------+
+ *                        |      Low memory directory (KLDIR)       |
+ * 0x2000 (8K)            +-----------------------------------------+
+ *                        |      ARCS SPB (1K)                      |
+ * 0x1000 (4K)            +-----------------------------------------+
+ *                        |      Early cache Exception stack        |
+ *                        |             and/or                      |
+ *                        |      kernel/io6prom nmi registers       |
+ * 0x800  (2k)            +-----------------------------------------+
+ *                        |      cache error eframe                 |
+ * 0x400 (1K)             +-----------------------------------------+
+ *                        |      Exception Handlers                 |
+ * 0x0  (0K)              +-----------------------------------------+
  */
 
 #ifdef __ASSEMBLY__
 
 #ifndef __ASSEMBLY__
 typedef struct kldir_ent_s {
-       u64             magic;          /* Indicates validity of entry      */
+       u64             magic;          /* Indicates validity of entry      */
        off_t           offset;         /* Offset from start of node space  */
        unsigned long   pointer;        /* Pointer to area in some cases    */
-       size_t          size;           /* Size in bytes                    */
+       size_t          size;           /* Size in bytes                    */
        u64             count;          /* Repeat count if array, 1 if not  */
-       size_t          stride;         /* Stride if array, 0 if not        */
-       char            rsvd[16];       /* Pad entry to 0x40 bytes          */
+       size_t          stride;         /* Stride if array, 0 if not        */
+       char            rsvd[16];       /* Pad entry to 0x40 bytes          */
        /* NOTE: These 16 bytes are used in the Partition KLDIR
           entry to store partition info. Refer to klpart.h for this. */
 } kldir_ent_t;
index b7c2226..04226d8 100644 (file)
@@ -19,7 +19,7 @@
  *
  * The master stores launch parameters in the launch structure
  * corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor.  The slave calls the desired
+ * an interrupt to the slave processor.         The slave calls the desired
  * function, then returns to the slave loop.  The master may poll or wait
  * for the slaves to finish.
  *
@@ -33,7 +33,7 @@
 #define LAUNCH_PADSZ           0xa0
 #endif
 
-#define LAUNCH_OFF_MAGIC       0x00    /* Struct offsets for assembly      */
+#define LAUNCH_OFF_MAGIC       0x00    /* Struct offsets for assembly      */
 #define LAUNCH_OFF_BUSY                0x08
 #define LAUNCH_OFF_CALL                0x10
 #define LAUNCH_OFF_CALLC       0x18
@@ -44,7 +44,7 @@
 #define LAUNCH_OFF_BEVNORMAL   0x40
 #define LAUNCH_OFF_BEVECC      0x48
 
-#define LAUNCH_STATE_DONE      0       /* Return value of LAUNCH_POLL      */
+#define LAUNCH_STATE_DONE      0       /* Return value of LAUNCH_POLL      */
 #define LAUNCH_STATE_SENT      1
 #define LAUNCH_STATE_RECD      2
 
@@ -65,16 +65,16 @@ typedef int launch_state_t;
 typedef void (*launch_proc_t)(u64 call_parm);
 
 typedef struct launch_s {
-       volatile u64            magic;  /* Magic number                     */
-       volatile u64            busy;   /* Slave currently active           */
+       volatile u64            magic;  /* Magic number                     */
+       volatile u64            busy;   /* Slave currently active           */
        volatile launch_proc_t  call_addr;      /* Func. for slave to call  */
        volatile u64            call_addr_c;    /* 1's complement of call_addr*/
        volatile u64            call_parm;      /* Single parm passed to call*/
        volatile void *stack_addr;      /* Stack pointer for slave function */
        volatile void *gp_addr;         /* Global pointer for slave func.   */
-       volatile char           *bevutlb;/* Address of bev utlb ex handler   */
-       volatile char           *bevnormal;/*Address of bev normal ex handler */
-       volatile char           *bevecc;/* Address of bev cache err handler */
+       volatile char           *bevutlb;/* Address of bev utlb ex handler   */
+       volatile char           *bevnormal;/*Address of bev normal ex handler */
+       volatile char           *bevecc;/* Address of bev cache err handler */
        volatile char           pad[160];       /* Pad to LAUNCH_SIZEOF     */
 } launch_t;
 
index 721496a..401f3b0 100644 (file)
@@ -48,7 +48,7 @@
 
 #endif /* CONFIG_MAPPED_KERNEL */
 
-#define MAPPED_KERN_RO_TO_K0(x)        PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
-#define MAPPED_KERN_RW_TO_K0(x)        PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
+#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
+#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
 
 #endif /* __ASM_SN_MAPPED_KERNEL_H  */
index 1af4989..12ac210 100644 (file)
@@ -19,7 +19,7 @@
  *
  * The master stores launch parameters in the launch structure
  * corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor.  The slave calls the desired
+ * an interrupt to the slave processor.         The slave calls the desired
  * function, followed by an optional rendezvous function, then returns to
  * the slave loop.  The master does not wait for the slaves before
  * returning.
@@ -31,7 +31,7 @@
 #define NMI_MAGIC              0x48414d4d455201
 #define NMI_SIZEOF             0x40
 
-#define NMI_OFF_MAGIC          0x00    /* Struct offsets for assembly      */
+#define NMI_OFF_MAGIC          0x00    /* Struct offsets for assembly      */
 #define NMI_OFF_FLAGS          0x08
 #define NMI_OFF_CALL           0x10
 #define NMI_OFF_CALLC          0x18
@@ -53,8 +53,8 @@
 typedef struct nmi_s {
        volatile unsigned long   magic;         /* Magic number */
        volatile unsigned long   flags;         /* Combination of flags above */
-       volatile void *call_addr;       /* Routine for slave to call        */
-       volatile void *call_addr_c;     /* 1's complement of address        */
+       volatile void *call_addr;       /* Routine for slave to call        */
+       volatile void *call_addr_c;     /* 1's complement of address        */
        volatile void *call_parm;       /* Single parm passed to call       */
        volatile unsigned long   gmaster;       /* Flag true only on global master*/
 } nmi_t;
index b061900..6b53070 100644 (file)
@@ -29,7 +29,7 @@
  * chapter of the Hub specification.
  *
  * NOTE: This header file is included both by C and by assembler source
- *      files.  Please bracket any language-dependent definitions
+ *      files.  Please bracket any language-dependent definitions
  *      appropriately.
  */
 
 
 #define BWIN_INDEX_BITS                3
 #define BWIN_SIZE              (UINT64_CAST 1 << BWIN_SIZE_BITS)
-#define        BWIN_SIZEMASK           (BWIN_SIZE - 1)
-#define        BWIN_WIDGET_MASK        0x7
+#define BWIN_SIZEMASK          (BWIN_SIZE - 1)
+#define BWIN_WIDGET_MASK       0x7
 #define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
-#define NODE_BWIN_BASE(nasid, bigwin)  (NODE_BWIN_BASE0(nasid) +       \
+#define NODE_BWIN_BASE(nasid, bigwin)  (NODE_BWIN_BASE0(nasid) +       \
                        (UINT64_CAST(bigwin) << BWIN_SIZE_BITS))
 
-#define        BWIN_WIDGETADDR(addr)   ((addr) & BWIN_SIZEMASK)
-#define        BWIN_WINDOWNUM(addr)    (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define BWIN_WIDGETADDR(addr)  ((addr) & BWIN_SIZEMASK)
+#define BWIN_WINDOWNUM(addr)   (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
 /*
  * Verify if addr belongs to large window address of node with "nasid"
  *
  *
  */
 
-#define        NODE_BWIN_ADDR(nasid, addr)     \
+#define NODE_BWIN_ADDR(nasid, addr)    \
                (((addr) >= NODE_BWIN_BASE0(nasid)) && \
                 ((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
                                BWIN_SIZE)))
  * The following define the major position-independent aliases used
  * in SN0.
  *     CALIAS -- Varies in size, points to the first n bytes of memory
- *                     on the reader's node.
+ *                     on the reader's node.
  */
 
 #define CALIAS_BASE            CAC_BASE
 
 #ifndef __ASSEMBLY__
 #define KERN_NMI_ADDR(nasid, slice)                                    \
-                    TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET +     \
+                   TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET +      \
                                  (IP27_NMI_KREGS_CPU_SIZE * (slice)))
 #endif /* !__ASSEMBLY__ */
 
 
 #define IO6PROM_BASE           PHYS_TO_K0(0x01c00000)
 #define IO6PROM_SIZE           0x400000
-#define        IO6PROM_BASE_MAPPED     (UNCAC_BASE | 0x11c00000)
+#define IO6PROM_BASE_MAPPED    (UNCAC_BASE | 0x11c00000)
 #define IO6DPROM_BASE          PHYS_TO_K0(0x01c00000)
 #define IO6DPROM_SIZE          0x200000
 
index f734f20..425a67e 100644 (file)
 #define _ASM_SN_SN0_ARCH_H
 
 
-#ifndef SN0XXL  /* 128 cpu SMP max */
+#ifndef SN0XXL /* 128 cpu SMP max */
 /*
  * This is the maximum number of nodes that can be part of a kernel.
  * Effectively, it's the maximum number of compact node ids (cnodeid_t).
  */
-#define MAX_COMPACT_NODES       64
+#define MAX_COMPACT_NODES      64
 
 /*
  * MAXCPUS refers to the maximum number of CPUs in a single kernel.
  * This is not necessarily the same as MAXNODES * CPUS_PER_NODE
  */
-#define MAXCPUS                 128
+#define MAXCPUS                        128
 
 #else /* SN0XXL system */
 
-#define MAX_COMPACT_NODES       128
-#define MAXCPUS                 256
+#define MAX_COMPACT_NODES      128
+#define MAXCPUS                        256
 
 #endif /* SN0XXL */
 
@@ -41,9 +41,9 @@
 /*
  * MAX_REGIONS refers to the maximum number of hardware partitioned regions.
  */
-#define        MAX_REGIONS             64
-#define MAX_NONPREMIUM_REGIONS  16
-#define MAX_PREMIUM_REGIONS     MAX_REGIONS
+#define MAX_REGIONS            64
+#define MAX_NONPREMIUM_REGIONS 16
+#define MAX_PREMIUM_REGIONS    MAX_REGIONS
 
 /*
  * MAX_PARITIONS refers to the maximum number of logically defined
  * Slot constants for SN0
  */
 #ifdef CONFIG_SGI_SN_N_MODE
-#define MAX_MEM_SLOTS   16                      /* max slots per node */
+#define MAX_MEM_SLOTS  16                      /* max slots per node */
 #else /* !CONFIG_SGI_SN_N_MODE, assume CONFIG_SGI_SN_M_MODE */
-#define MAX_MEM_SLOTS   32                      /* max slots per node */
+#define MAX_MEM_SLOTS  32                      /* max slots per node */
 #endif /* CONFIG_SGI_SN_M_MODE */
 
-#define SLOT_SHIFT             (27)
+#define SLOT_SHIFT             (27)
 #define SLOT_MIN_MEM_SIZE      (32*1024*1024)
 
 #define CPUS_PER_NODE          2       /* CPUs on a single hub */
index 3e228f8..d78dd76 100644 (file)
@@ -19,8 +19,8 @@
 #define HUB_REV_2_0            2
 #define HUB_REV_2_1            3
 #define HUB_REV_2_2            4
-#define HUB_REV_2_3             5
-#define HUB_REV_2_4             6
+#define HUB_REV_2_3            5
+#define HUB_REV_2_4            6
 
 #define MAX_HUB_PATH           80
 
@@ -32,9 +32,9 @@
 //#include <asm/sn/sn0/hubcore.h>
 
 /* Translation of uncached attributes */
-#define        UATTR_HSPEC     0
-#define        UATTR_IO        1
-#define        UATTR_MSPEC     2
-#define        UATTR_UNCAC     3
+#define UATTR_HSPEC    0
+#define UATTR_IO       1
+#define UATTR_MSPEC    2
+#define UATTR_UNCAC    3
 
 #endif /* _ASM_SN_SN0_HUB_H */
index 46286d8..5998b13 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
  * Copyright (C) 1999 by Ralf Baechle
  */
-#ifndef        _ASM_SGI_SN_SN0_HUBIO_H
-#define        _ASM_SGI_SN_SN0_HUBIO_H
+#ifndef _ASM_SGI_SN_SN0_HUBIO_H
+#define _ASM_SGI_SN_SN0_HUBIO_H
 
 /*
  * Hub I/O interface registers
@@ -22,7 +22,7 @@
  * Slightly friendlier names for some common registers.
  * The hardware definitions follow.
  */
-#define IIO_WIDGET             IIO_WID      /* Widget identification */
+#define IIO_WIDGET             IIO_WID      /* Widget identification */
 #define IIO_WIDGET_STAT                IIO_WSTAT    /* Widget status register */
 #define IIO_WIDGET_CTRL                IIO_WCR      /* Widget control register */
 #define IIO_WIDGET_TOUT                IIO_WRTO     /* Widget request timeout */
 #define IIO_XTALKCC_TOUT       IIO_IXCC     /* Xtalk credit count timeout*/
 #define IIO_XTALKTT_TOUT       IIO_IXTT     /* Xtalk tail timeout */
 #define IIO_IO_ERR_CLR         IIO_IECLR    /* IO error clear */
-#define IIO_BTE_CRB_CNT         IIO_IBCN     /* IO BTE CRB count */
+#define IIO_BTE_CRB_CNT                IIO_IBCN     /* IO BTE CRB count */
 
 #define IIO_LLP_CSR_IS_UP              0x00002000
-#define        IIO_LLP_CSR_LLP_STAT_MASK       0x00003000
-#define        IIO_LLP_CSR_LLP_STAT_SHFT       12
+#define IIO_LLP_CSR_LLP_STAT_MASK      0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT      12
 
 /* key to IIO_PROTECT_OVRRD */
 #define IIO_PROTECT_OVRRD_KEY  0x53474972756c6573ull   /* "SGIrules" */
 
 /* BTE register names */
 #define IIO_BTE_STAT_0         IIO_IBLS_0   /* Also BTE length/status 0 */
-#define IIO_BTE_SRC_0          IIO_IBSA_0   /* Also BTE source address  0 */
+#define IIO_BTE_SRC_0          IIO_IBSA_0   /* Also BTE source address  0 */
 #define IIO_BTE_DEST_0         IIO_IBDA_0   /* Also BTE dest. address 0 */
 #define IIO_BTE_CTRL_0         IIO_IBCT_0   /* Also BTE control/terminate 0 */
-#define IIO_BTE_NOTIFY_0       IIO_IBNA_0   /* Also BTE notification 0 */
+#define IIO_BTE_NOTIFY_0       IIO_IBNA_0   /* Also BTE notification 0 */
 #define IIO_BTE_INT_0          IIO_IBIA_0   /* Also BTE interrupt 0 */
 #define IIO_BTE_OFF_0          0            /* Base offset from BTE 0 regs. */
 #define IIO_BTE_OFF_1  IIO_IBLS_1 - IIO_IBLS_0 /* Offset from base to BTE 1 */
 #define IIO_WSTAT      0x400008        /* Widget status */
 #define IIO_WCR                0x400020        /* Widget control */
 
-#define        IIO_WSTAT_ECRAZY        (1ULL << 32)    /* Hub gone crazy */
-#define        IIO_WSTAT_TXRETRY       (1ULL << 9)     /* Hub Tx Retry timeout */
-#define        IIO_WSTAT_TXRETRY_MASK  (0x7F)
-#define        IIO_WSTAT_TXRETRY_SHFT  (16)
-#define        IIO_WSTAT_TXRETRY_CNT(w)        (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+#define IIO_WSTAT_ECRAZY       (1ULL << 32)    /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY      (1ULL << 9)     /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK (0x7F)
+#define IIO_WSTAT_TXRETRY_SHFT (16)
+#define IIO_WSTAT_TXRETRY_CNT(w)       (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
                                          IIO_WSTAT_TXRETRY_MASK)
 
 #define IIO_ILAPR      0x400100        /* Local Access Protection */
 #define IIO_IGFX_INIT(widget, node, cpu, valid)                                (\
        (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) |     \
        (((node)   & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) |     \
-       (((cpu)    & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) |     \
-       (((valid)  & IIO_IGFX_VLD_MASK)   << IIO_IGFX_VLD_SHIFT)         )
+       (((cpu)    & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) |     \
+       (((valid)  & IIO_IGFX_VLD_MASK)   << IIO_IGFX_VLD_SHIFT)         )
 
 /* Scratch registers (not all bits available) */
 #define IIO_SCRATCH_REG0       0x400150
-#define        IIO_SCRATCH_REG1        0x400158
+#define IIO_SCRATCH_REG1       0x400158
 #define IIO_SCRATCH_MASK       0x0000000f00f11fff
 
 #define IIO_SCRATCH_BIT0_0     0x0000000800000000
 typedef union hubii_wid_u {
        u64     wid_reg_value;
        struct {
-               u64     wid_rsvd:       32,     /* unused */
+               u64     wid_rsvd:       32,     /* unused */
                        wid_rev_num:     4,     /* revision number */
                        wid_part_num:   16,     /* the widget type: hub=c101 */
                        wid_mfg_num:    11,     /* Manufacturer id (IBM) */
                        wid_rsvd1:       1;     /* Reserved */
-        } wid_fields_s;
+       } wid_fields_s;
 } hubii_wid_t;
 
 
 typedef union hubii_wcr_u {
        u64     wcr_reg_value;
        struct {
-               u64     wcr_rsvd:       41,     /* unused */
+               u64     wcr_rsvd:       41,     /* unused */
                        wcr_e_thresh:    5,     /* elasticity threshold */
                        wcr_dir_con:     1,     /* widget direct connect */
                        wcr_f_bad_pkt:   1,     /* Force bad llp pkt enable */
                        wcr_xbar_crd:    3,     /* LLP crossbar credit */
                        wcr_rsvd1:       8,     /* Reserved */
-                       wcr_tag_mode:    1,     /* Tag mode */
+                       wcr_tag_mode:    1,     /* Tag mode */
                        wcr_widget_id:   4;     /* LLP crossbar credit */
-        } wcr_fields_s;
+       } wcr_fields_s;
 } hubii_wcr_t;
 
-#define        iwcr_dir_con    wcr_fields_s.wcr_dir_con
+#define iwcr_dir_con   wcr_fields_s.wcr_dir_con
 
 typedef union hubii_wstat_u {
-       u64      reg_value;
+       u64      reg_value;
        struct {
                u64     rsvd1:          31,
                        crazy:           1,     /* Crazy bit            */
                        rsvd2:           8,
-                       llp_tx_cnt:      8,     /* LLP Xmit retry counter */
+                       llp_tx_cnt:      8,     /* LLP Xmit retry counter */
                        rsvd3:           6,
                        tx_max_rtry:     1,     /* LLP Retry Timeout Signal */
                        rsvd4:           2,
                        xt_tail_to:      1,     /* Xtalk Tail Timeout   */
-                       xt_crd_to:       1,     /* Xtalk Credit Timeout */
+                       xt_crd_to:       1,     /* Xtalk Credit Timeout */
                        pending:         4;     /* Pending Requests     */
        } wstat_fields_s;
 } hubii_wstat_t;
@@ -219,50 +219,50 @@ typedef union hubii_wstat_u {
 typedef union hubii_ilcsr_u {
        u64     icsr_reg_value;
        struct {
-               u64     icsr_rsvd:      22,     /* unused */
-                       icsr_max_burst: 10,     /* max burst */
-                        icsr_rsvd4:     6,     /* reserved */
-                       icsr_max_retry: 10,     /* max retry */
-                        icsr_rsvd3:     2,     /* reserved */
-                        icsr_lnk_stat:  2,     /* link status */
-                        icsr_bm8:       1,     /* Bit mode 8 */
-                        icsr_llp_en:    1,     /* LLP enable bit */
-                       icsr_rsvd2:      1,     /* reserver */
-                        icsr_wrm_reset:         1,     /* Warm reset bit */
+               u64     icsr_rsvd:      22,     /* unused */
+                       icsr_max_burst: 10,     /* max burst */
+                       icsr_rsvd4:      6,     /* reserved */
+                       icsr_max_retry: 10,     /* max retry */
+                       icsr_rsvd3:      2,     /* reserved */
+                       icsr_lnk_stat:   2,     /* link status */
+                       icsr_bm8:        1,     /* Bit mode 8 */
+                       icsr_llp_en:     1,     /* LLP enable bit */
+                       icsr_rsvd2:      1,     /* reserver */
+                       icsr_wrm_reset:  1,     /* Warm reset bit */
                        icsr_rsvd1:      2,     /* Data ready offset */
-                        icsr_null_to:   6;     /* Null timeout   */
+                       icsr_null_to:    6;     /* Null timeout   */
 
-        } icsr_fields_s;
+       } icsr_fields_s;
 } hubii_ilcsr_t;
 
 
 typedef union hubii_iowa_u {
        u64     iowa_reg_value;
        struct {
-               u64     iowa_rsvd:      48,     /* unused */
+               u64     iowa_rsvd:      48,     /* unused */
                        iowa_wxoac:      8,     /* xtalk widget access bits */
                        iowa_rsvd1:      7,     /* xtalk widget access bits */
                        iowa_w0oac:      1;     /* xtalk widget access bits */
-        } iowa_fields_s;
+       } iowa_fields_s;
 } hubii_iowa_t;
 
 typedef union hubii_iiwa_u {
        u64     iiwa_reg_value;
        struct {
-               u64     iiwa_rsvd:      48,     /* unused */
+               u64     iiwa_rsvd:      48,     /* unused */
                        iiwa_wxiac:      8,     /* hub wid access bits */
                        iiwa_rsvd1:      7,     /* reserved */
                        iiwa_w0iac:      1;     /* hub wid0 access */
-        } iiwa_fields_s;
+       } iiwa_fields_s;
 } hubii_iiwa_t;
 
 typedef union  hubii_illr_u {
        u64     illr_reg_value;
        struct {
-               u64     illr_rsvd:      32,     /* unused */
+               u64     illr_rsvd:      32,     /* unused */
                        illr_cb_cnt:    16,     /* checkbit error count */
                        illr_sn_cnt:    16;     /* sequence number count */
-        } illr_fields_s;
+       } illr_fields_s;
 } hubii_illr_t;
 
 /* The structures below are defined to extract and modify the ii
@@ -273,7 +273,7 @@ performance registers */
 typedef union io_perf_sel {
        u64 perf_sel_reg;
        struct {
-               u64     perf_rsvd  : 48,
+               u64     perf_rsvd  : 48,
                        perf_icct  :  8,
                        perf_ippr1 :  4,
                        perf_ippr0 :  4;
@@ -301,7 +301,7 @@ typedef union io_perf_cnt {
 #define IIO_LLP_SN_MAX 0xffff
 
 /* IO PRB Entries */
-#define        IIO_NUM_IPRBS   (9)
+#define IIO_NUM_IPRBS  (9)
 #define IIO_IOPRB_0    0x400198        /* PRB entry 0 */
 #define IIO_IOPRB_8    0x4001a0        /* PRB entry 8 */
 #define IIO_IOPRB_9    0x4001a8        /* PRB entry 9 */
@@ -318,21 +318,21 @@ typedef union io_perf_cnt {
 #define IIO_IMEM       0x4001e8        /* Miscellaneous Enable Mask */
 #define IIO_IXTT       0x4001f0        /* Crosstalk tail timeout */
 #define IIO_IECLR      0x4001f8        /* IO error clear */
-#define IIO_IBCN        0x400200        /* IO BTE CRB count */
+#define IIO_IBCN       0x400200        /* IO BTE CRB count */
 
 /*
  * IIO_IMEM Register fields.
  */
-#define IIO_IMEM_W0ESD  0x1             /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD  (1 << 4)        /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD  (1 << 8)        /* BTE 1 Shut down due to error */
+#define IIO_IMEM_W0ESD 0x1             /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1 << 4)        /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1 << 8)        /* BTE 1 Shut down due to error */
 
 /* PIO Read address Table Entries */
 #define IIO_IPCA       0x400300        /* PRB Counter adjust */
 #define IIO_NUM_PRTES  8               /* Total number of PRB table entries */
 #define IIO_PRTE_0     0x400308        /* PIO Read address table entry 0 */
 #define IIO_PRTE(_x)   (IIO_PRTE_0 + (8 * (_x)))
-#define        IIO_WIDPRTE(x)  IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
 #define IIO_IPDR       0x400388        /* PIO table entry deallocation */
 #define IIO_ICDR       0x400390        /* CRB Entry Deallocation */
 #define IIO_IFDR       0x400398        /* IOQ FIFO Depth */
@@ -369,35 +369,35 @@ typedef union io_perf_cnt {
 /*
  * IIO PIO Deallocation register field masks : (IIO_IPDR)
  */
-#define        IIO_IPDR_PND    (1 << 4)
+#define IIO_IPDR_PND   (1 << 4)
 
 /*
  * IIO CRB deallocation register field masks: (IIO_ICDR)
  */
-#define        IIO_ICDR_PND    (1 << 4)
+#define IIO_ICDR_PND   (1 << 4)
 
 /*
  * IIO CRB control register Fields: IIO_ICCR
  */
-#define        IIO_ICCR_PENDING        (0x10000)
-#define        IIO_ICCR_CMD_MASK       (0xFF)
-#define        IIO_ICCR_CMD_SHFT       (7)
-#define        IIO_ICCR_CMD_NOP        (0x0)   /* No Op */
-#define        IIO_ICCR_CMD_WAKE       (0x100) /* Reactivate CRB entry and process */
-#define        IIO_ICCR_CMD_TIMEOUT    (0x200) /* Make CRB timeout & mark invalid */
-#define        IIO_ICCR_CMD_EJECT      (0x400) /* Contents of entry written to memory
+#define IIO_ICCR_PENDING       (0x10000)
+#define IIO_ICCR_CMD_MASK      (0xFF)
+#define IIO_ICCR_CMD_SHFT      (7)
+#define IIO_ICCR_CMD_NOP       (0x0)   /* No Op */
+#define IIO_ICCR_CMD_WAKE      (0x100) /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT   (0x200) /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT     (0x400) /* Contents of entry written to memory
                                         * via a WB
                                         */
-#define        IIO_ICCR_CMD_FLUSH      (0x800)
+#define IIO_ICCR_CMD_FLUSH     (0x800)
 
 /*
  * CRB manipulation macros
  *     The CRB macros are slightly complicated, since there are up to
- *     four registers associated with each CRB entry.
+ *     four registers associated with each CRB entry.
  */
 #define IIO_NUM_CRBS           15      /* Number of CRBs */
-#define IIO_NUM_NORMAL_CRBS     12     /* Number of regular CRB entries */
-#define IIO_NUM_PC_CRBS        4       /* Number of partial cache CRBs */
+#define IIO_NUM_NORMAL_CRBS    12      /* Number of regular CRB entries */
+#define IIO_NUM_PC_CRBS                4       /* Number of partial cache CRBs */
 #define IIO_ICRB_OFFSET                8
 #define IIO_ICRB_0             0x400400
 /* XXX - This is now tuneable:
@@ -405,9 +405,9 @@ typedef union io_perf_cnt {
  */
 
 #define IIO_ICRB_A(_x) (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
-#define IIO_ICRB_B(_x)  (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
+#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
 #define IIO_ICRB_C(_x) (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
-#define IIO_ICRB_D(_x)  (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
+#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
 
 /* XXX - IBUE register coming for Hub 2 */
 
@@ -444,16 +444,16 @@ typedef union io_perf_cnt {
 typedef union icrba_u {
        u64     reg_value;
        struct {
-               u64     resvd:  6,
+               u64     resvd:  6,
                        stall_bte0: 1,  /* Stall BTE 0 */
                        stall_bte1: 1,  /* Stall BTE 1 */
                        error:  1,      /* CRB has an error     */
-                       ecode:  3,      /* Error Code           */
+                       ecode:  3,      /* Error Code           */
                        lnetuce: 1,     /* SN0net Uncorrectable error */
-                       mark:   1,      /* CRB Has been marked  */
+                       mark:   1,      /* CRB Has been marked  */
                        xerr:   1,      /* Error bit set in xtalk header */
                        sidn:   4,      /* SIDN field from xtalk        */
-                       tnum:   5,      /* TNUM field in xtalk          */
+                       tnum:   5,      /* TNUM field in xtalk          */
                        addr:   38,     /* Address of request   */
                        valid:  1,      /* Valid status         */
                        iow:    1;      /* IO Write operation   */
@@ -467,15 +467,15 @@ typedef union h1_icrba_u {
        u64     reg_value;
 
        struct {
-               u64     resvd:  6,
-                       unused: 1,      /* Unused but RW!!      */
+               u64     resvd:  6,
+                       unused: 1,      /* Unused but RW!!      */
                        error:  1,      /* CRB has an error     */
-                       ecode:  4,      /* Error Code           */
+                       ecode:  4,      /* Error Code           */
                        lnetuce: 1,     /* SN0net Uncorrectable error */
-                       mark:   1,      /* CRB Has been marked  */
+                       mark:   1,      /* CRB Has been marked  */
                        xerr:   1,      /* Error bit set in xtalk header */
                        sidn:   4,      /* SIDN field from xtalk        */
-                       tnum:   5,      /* TNUM field in xtalk          */
+                       tnum:   5,      /* TNUM field in xtalk          */
                        addr:   38,     /* Address of request   */
                        valid:  1,      /* Valid status         */
                        iow:    1;      /* IO Write operation   */
@@ -488,21 +488,21 @@ typedef union h1_icrba_u {
 
 #endif /* !__ASSEMBLY__ */
 
-#define        IIO_ICRB_ADDR_SHFT      2       /* Shift to get proper address */
+#define IIO_ICRB_ADDR_SHFT     2       /* Shift to get proper address */
 
 /*
  * values for "ecode" field
  */
-#define        IIO_ICRB_ECODE_DERR     0       /* Directory error due to IIO access */
-#define        IIO_ICRB_ECODE_PERR     1       /* Poison error on IO access */
-#define        IIO_ICRB_ECODE_WERR     2       /* Write error by IIO access
+#define IIO_ICRB_ECODE_DERR    0       /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR    1       /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR    2       /* Write error by IIO access
                                         * e.g. WINV to a Read only line.
                                         */
-#define        IIO_ICRB_ECODE_AERR     3       /* Access error caused by IIO access */
-#define        IIO_ICRB_ECODE_PWERR    4       /* Error on partial write       */
-#define        IIO_ICRB_ECODE_PRERR    5       /* Error on partial read        */
-#define        IIO_ICRB_ECODE_TOUT     6       /* CRB timeout before deallocating */
-#define        IIO_ICRB_ECODE_XTERR    7       /* Incoming xtalk pkt had error bit */
+#define IIO_ICRB_ECODE_AERR    3       /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR   4       /* Error on partial write       */
+#define IIO_ICRB_ECODE_PRERR   5       /* Error on partial read        */
+#define IIO_ICRB_ECODE_TOUT    6       /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR   7       /* Incoming xtalk pkt had error bit */
 
 
 
@@ -513,10 +513,10 @@ typedef union h1_icrba_u {
 typedef union icrbb_u {
        u64     reg_value;
        struct {
-           u64 rsvd1:  5,
-               btenum: 1,      /* BTE to which entry belongs to */
-               cohtrans: 1,    /* Coherent transaction */
-               xtsize: 2,      /* Xtalk operation size
+           u64 rsvd1:  5,
+               btenum: 1,      /* BTE to which entry belongs to */
+               cohtrans: 1,    /* Coherent transaction */
+               xtsize: 2,      /* Xtalk operation size
                                 * 0: Double Word
                                 * 1: 32 Bytes.
                                 * 2: 128 Bytes,
@@ -526,11 +526,11 @@ typedef union icrbb_u {
                srcinit: 2,     /* Source Initiator:
                                 * See below for field values.
                                 */
-               useold: 1,      /* Use OLD command for processing */
+               useold: 1,      /* Use OLD command for processing */
                imsgtype: 2,    /* Incoming message type
                                 * see below for field values
                                 */
-               imsg:   8,      /* Incoming message     */
+               imsg:   8,      /* Incoming message     */
                initator: 3,    /* Initiator of original request
                                 * See below for field values.
                                 */
@@ -538,12 +538,12 @@ typedef union icrbb_u {
                                 * See below for field values.
                                 */
                rsvd2:  7,
-               ackcnt: 11,     /* Invalidate ack count */
+               ackcnt: 11,     /* Invalidate ack count */
                resp:   1,      /* data response  given to processor */
-               ack:    1,      /* indicates data ack received  */
+               ack:    1,      /* indicates data ack received  */
                hold:   1,      /* entry is gathering inval acks */
                wb_pend:1,      /* waiting for writeback to complete */
-               intvn:  1,      /* Intervention */
+               intvn:  1,      /* Intervention */
                stall_ib: 1,    /* Stall Ibuf (from crosstalk) */
                stall_intr: 1;  /* Stall internal interrupts */
        } icrbb_field_s;
@@ -556,9 +556,9 @@ typedef union h1_icrbb_u {
        u64     reg_value;
        struct {
                u64     rsvd1:  5,
-                       btenum: 1,      /* BTE to which entry belongs to */
-                       cohtrans: 1,    /* Coherent transaction */
-                       xtsize: 2,      /* Xtalk operation size
+                       btenum: 1,      /* BTE to which entry belongs to */
+                       cohtrans: 1,    /* Coherent transaction */
+                       xtsize: 2,      /* Xtalk operation size
                                         * 0: Double Word
                                         * 1: 32 Bytes.
                                         * 2: 128 Bytes,
@@ -568,99 +568,99 @@ typedef union h1_icrbb_u {
                        srcinit: 2,     /* Source Initiator:
                                         * See below for field values.
                                         */
-                       useold: 1,      /* Use OLD command for processing */
+                       useold: 1,      /* Use OLD command for processing */
                        imsgtype: 2,    /* Incoming message type
                                         * see below for field values
                                         */
-                       imsg:   8,      /* Incoming message     */
+                       imsg:   8,      /* Incoming message     */
                        initator: 3,    /* Initiator of original request
                                         * See below for field values.
                                         */
-                       rsvd2:  1,
+                       rsvd2:  1,
                        pcache: 1,      /* entry belongs to partial cache */
                        reqtype: 5,     /* Identifies type of request
                                         * See below for field values.
                                         */
-                       stl_ib: 1,      /* stall Ibus coming from xtalk */
+                       stl_ib: 1,      /* stall Ibus coming from xtalk */
                        stl_intr: 1,    /* Stall internal interrupts */
-                       stl_bte0: 1,    /* Stall BTE 0  */
+                       stl_bte0: 1,    /* Stall BTE 0  */
                        stl_bte1: 1,    /* Stall BTE 1  */
-                       intrvn: 1,      /* Req was target of intervention */
-                       ackcnt: 11,     /* Invalidate ack count */
+                       intrvn: 1,      /* Req was target of intervention */
+                       ackcnt: 11,     /* Invalidate ack count */
                        resp:   1,      /* data response  given to processor */
-                       ack:    1,      /* indicates data ack received  */
+                       ack:    1,      /* indicates data ack received  */
                        hold:   1,      /* entry is gathering inval acks */
                        wb_pend:1,      /* waiting for writeback to complete */
-                       sleep:  1,      /* xtalk req sleeping till IO-sync */
+                       sleep:  1,      /* xtalk req sleeping till IO-sync */
                        pnd_reply: 1,   /* replies not issed due to IOQ full */
                        pnd_req: 1;     /* reqs not issued due to IOQ full */
        } h1_icrbb_field_s;
 } h1_icrbb_t;
 
 
-#define        b_imsgtype      icrbb_field_s.imsgtype
-#define        b_btenum        icrbb_field_s.btenum
-#define        b_cohtrans      icrbb_field_s.cohtrans
-#define        b_xtsize        icrbb_field_s.xtsize
-#define        b_srcnode       icrbb_field_s.srcnode
-#define        b_srcinit       icrbb_field_s.srcinit
-#define        b_imsgtype      icrbb_field_s.imsgtype
-#define        b_imsg          icrbb_field_s.imsg
-#define        b_initiator     icrbb_field_s.initiator
+#define b_imsgtype     icrbb_field_s.imsgtype
+#define b_btenum       icrbb_field_s.btenum
+#define b_cohtrans     icrbb_field_s.cohtrans
+#define b_xtsize       icrbb_field_s.xtsize
+#define b_srcnode      icrbb_field_s.srcnode
+#define b_srcinit      icrbb_field_s.srcinit
+#define b_imsgtype     icrbb_field_s.imsgtype
+#define b_imsg         icrbb_field_s.imsg
+#define b_initiator    icrbb_field_s.initiator
 
 #endif /* !__ASSEMBLY__ */
 
 /*
  * values for field xtsize
  */
-#define        IIO_ICRB_XTSIZE_DW      0       /* Xtalk operation size is 8 bytes  */
-#define        IIO_ICRB_XTSIZE_32      1       /* Xtalk operation size is 32 bytes */
-#define        IIO_ICRB_XTSIZE_128     2       /* Xtalk operation size is 128 bytes */
+#define IIO_ICRB_XTSIZE_DW     0       /* Xtalk operation size is 8 bytes  */
+#define IIO_ICRB_XTSIZE_32     1       /* Xtalk operation size is 32 bytes */
+#define IIO_ICRB_XTSIZE_128    2       /* Xtalk operation size is 128 bytes */
 
 /*
  * values for field srcinit
  */
-#define        IIO_ICRB_PROC0          0       /* Source of request is Proc 0 */
-#define        IIO_ICRB_PROC1          1       /* Source of request is Proc 1 */
-#define        IIO_ICRB_GB_REQ         2       /* Source is Guaranteed BW request */
-#define        IIO_ICRB_IO_REQ         3       /* Source is Normal IO request  */
+#define IIO_ICRB_PROC0         0       /* Source of request is Proc 0 */
+#define IIO_ICRB_PROC1         1       /* Source of request is Proc 1 */
+#define IIO_ICRB_GB_REQ                2       /* Source is Guaranteed BW request */
+#define IIO_ICRB_IO_REQ                3       /* Source is Normal IO request  */
 
 /*
  * Values for field imsgtype
  */
-#define        IIO_ICRB_IMSGT_XTALK    0       /* Incoming Meessage from Xtalk */
-#define        IIO_ICRB_IMSGT_BTE      1       /* Incoming message from BTE    */
-#define        IIO_ICRB_IMSGT_SN0NET   2       /* Incoming message from SN0 net */
-#define        IIO_ICRB_IMSGT_CRB      3       /* Incoming message from CRB ???  */
+#define IIO_ICRB_IMSGT_XTALK   0       /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE     1       /* Incoming message from BTE    */
+#define IIO_ICRB_IMSGT_SN0NET  2       /* Incoming message from SN0 net */
+#define IIO_ICRB_IMSGT_CRB     3       /* Incoming message from CRB ???  */
 
 /*
  * values for field initiator.
  */
-#define        IIO_ICRB_INIT_XTALK     0       /* Message originated in xtalk  */
-#define        IIO_ICRB_INIT_BTE0      0x1     /* Message originated in BTE 0  */
-#define        IIO_ICRB_INIT_SN0NET    0x2     /* Message originated in SN0net */
-#define        IIO_ICRB_INIT_CRB       0x3     /* Message originated in CRB ?  */
-#define        IIO_ICRB_INIT_BTE1      0x5     /* MEssage originated in BTE 1  */
+#define IIO_ICRB_INIT_XTALK    0       /* Message originated in xtalk  */
+#define IIO_ICRB_INIT_BTE0     0x1     /* Message originated in BTE 0  */
+#define IIO_ICRB_INIT_SN0NET   0x2     /* Message originated in SN0net */
+#define IIO_ICRB_INIT_CRB      0x3     /* Message originated in CRB ?  */
+#define IIO_ICRB_INIT_BTE1     0x5     /* MEssage originated in BTE 1  */
 
 /*
  * Values for field reqtype.
  */
 /* XXX - Need to fix this for Hub 2 */
-#define        IIO_ICRB_REQ_DWRD       0       /* Request type double word     */
-#define        IIO_ICRB_REQ_QCLRD      1       /* Request is Qrtr Caceh line Rd */
-#define        IIO_ICRB_REQ_BLKRD      2       /* Request is block read        */
-#define        IIO_ICRB_REQ_RSHU       6       /* Request is BTE block read    */
-#define        IIO_ICRB_REQ_REXU       7       /* request is BTE Excl Read     */
-#define        IIO_ICRB_REQ_RDEX       8       /* Request is Read Exclusive    */
-#define        IIO_ICRB_REQ_WINC       9       /* Request is Write Invalidate  */
-#define        IIO_ICRB_REQ_BWINV      10      /* Request is BTE Winv          */
-#define        IIO_ICRB_REQ_PIORD      11      /* Request is PIO read          */
-#define        IIO_ICRB_REQ_PIOWR      12      /* Request is PIO Write         */
-#define        IIO_ICRB_REQ_PRDM       13      /* Request is Fetch&Op          */
-#define        IIO_ICRB_REQ_PWRM       14      /* Request is Store &Op         */
-#define        IIO_ICRB_REQ_PTPWR      15      /* Request is Peer to peer      */
-#define        IIO_ICRB_REQ_WB         16      /* Request is Write back        */
-#define        IIO_ICRB_REQ_DEX        17      /* Retained DEX Cache line      */
+#define IIO_ICRB_REQ_DWRD      0       /* Request type double word     */
+#define IIO_ICRB_REQ_QCLRD     1       /* Request is Qrtr Caceh line Rd */
+#define IIO_ICRB_REQ_BLKRD     2       /* Request is block read        */
+#define IIO_ICRB_REQ_RSHU      6       /* Request is BTE block read    */
+#define IIO_ICRB_REQ_REXU      7       /* request is BTE Excl Read     */
+#define IIO_ICRB_REQ_RDEX      8       /* Request is Read Exclusive    */
+#define IIO_ICRB_REQ_WINC      9       /* Request is Write Invalidate  */
+#define IIO_ICRB_REQ_BWINV     10      /* Request is BTE Winv          */
+#define IIO_ICRB_REQ_PIORD     11      /* Request is PIO read          */
+#define IIO_ICRB_REQ_PIOWR     12      /* Request is PIO Write         */
+#define IIO_ICRB_REQ_PRDM      13      /* Request is Fetch&Op          */
+#define IIO_ICRB_REQ_PWRM      14      /* Request is Store &Op         */
+#define IIO_ICRB_REQ_PTPWR     15      /* Request is Peer to peer      */
+#define IIO_ICRB_REQ_WB                16      /* Request is Write back        */
+#define IIO_ICRB_REQ_DEX       17      /* Retained DEX Cache line      */
 
 /*
  * Fields in CRB Register C
@@ -674,8 +674,8 @@ typedef union icrbc_s {
                u64     rsvd:   6,
                        sleep:  1,
                        pricnt: 4,      /* Priority count sent with Read req */
-                       pripsc: 4,      /* Priority Pre scalar  */
-                       bteop:  1,      /* BTE Operation        */
+                       pripsc: 4,      /* Priority Pre scalar  */
+                       bteop:  1,      /* BTE Operation        */
                        push_be: 34,    /* Push address Byte enable
                                         * Holds push addr, if CRB is for BTE
                                         * If CRB belongs to Partial cache,
@@ -684,20 +684,20 @@ typedef union icrbc_s {
                                         */
                        suppl:  11,     /* Supplemental field   */
                        barrop: 1,      /* Barrier Op bit set in xtalk req */
-                       doresp: 1,      /* Xtalk req needs a response   */
-                       gbr:    1;      /* GBR bit set in xtalk packet  */
+                       doresp: 1,      /* Xtalk req needs a response   */
+                       gbr:    1;      /* GBR bit set in xtalk packet  */
        } icrbc_field_s;
 } icrbc_t;
 
-#define        c_pricnt        icrbc_field_s.pricnt
-#define        c_pripsc        icrbc_field_s.pripsc
-#define        c_bteop         icrbc_field_s.bteop
-#define        c_bteaddr       icrbc_field_s.push_be   /* push_be field has 2 names */
-#define c_benable      icrbc_field_s.push_be   /* push_be field has 2 names */
-#define        c_suppl         icrbc_field_s.suppl
-#define        c_barrop        icrbc_field_s.barrop
-#define        c_doresp        icrbc_field_s.doresp
-#define        c_gbr   icrbc_field_s.gbr
+#define c_pricnt       icrbc_field_s.pricnt
+#define c_pripsc       icrbc_field_s.pripsc
+#define c_bteop                icrbc_field_s.bteop
+#define c_bteaddr      icrbc_field_s.push_be   /* push_be field has 2 names */
+#define c_benable      icrbc_field_s.push_be   /* push_be field has 2 names */
+#define c_suppl                icrbc_field_s.suppl
+#define c_barrop       icrbc_field_s.barrop
+#define c_doresp       icrbc_field_s.doresp
+#define c_gbr  icrbc_field_s.gbr
 #endif /* !__ASSEMBLY__ */
 
 /*
@@ -708,31 +708,31 @@ typedef union icrbc_s {
 typedef union icrbd_s {
        u64     reg_value;
        struct {
-           u64 rsvd:   38,
+           u64 rsvd:   38,
                toutvld: 1,     /* Timeout in progress for this CRB */
-               ctxtvld: 1,     /* Context field below is valid */
+               ctxtvld: 1,     /* Context field below is valid */
                rsvd2:  1,
-               context: 15,    /* Bit vector:
+               context: 15,    /* Bit vector:
                                 * Has a bit set for each CRB entry
                                 * which needs to be deallocated
                                 * before this CRB entry is processed.
                                 * Set only for barrier operations.
                                 */
-               timeout: 8;     /* Timeout Upper 8 bits */
+               timeout: 8;     /* Timeout Upper 8 bits */
        } icrbd_field_s;
 } icrbd_t;
 
-#define        icrbd_toutvld   icrbd_field_s.toutvld
-#define        icrbd_ctxtvld   icrbd_field_s.ctxtvld
-#define        icrbd_context   icrbd_field_s.context
+#define icrbd_toutvld  icrbd_field_s.toutvld
+#define icrbd_ctxtvld  icrbd_field_s.ctxtvld
+#define icrbd_context  icrbd_field_s.context
 
 
 typedef union hubii_ifdr_u {
        u64     hi_ifdr_value;
        struct {
                u64     ifdr_rsvd:      49,
-                       ifdr_maxrp:      7,
-                       ifdr_rsvd1:      1,
+                       ifdr_maxrp:      7,
+                       ifdr_rsvd1:      1,
                        ifdr_maxrq:      7;
        } hi_ifdr_fields;
 } hubii_ifdr_t;
@@ -789,26 +789,26 @@ typedef union hubii_ifdr_u {
 typedef union iprte_a {
        u64     entry;
        struct {
-           u64 rsvd1     : 7,  /* Reserved field               */
-               valid     : 1,  /* Maps to a timeout entry      */
-               rsvd2     : 1,
-               srcnode   : 9,  /* Node which did this PIO      */
-               initiator : 2,  /* If T5A or T5B or IO          */
-               rsvd3     : 3,
-               addr      : 38, /* Physical address of PIO      */
-               rsvd4     : 3;
+           u64 rsvd1     : 7,  /* Reserved field               */
+               valid     : 1,  /* Maps to a timeout entry      */
+               rsvd2     : 1,
+               srcnode   : 9,  /* Node which did this PIO      */
+               initiator : 2,  /* If T5A or T5B or IO          */
+               rsvd3     : 3,
+               addr      : 38, /* Physical address of PIO      */
+               rsvd4     : 3;
        } iprte_fields;
 } iprte_a_t;
 
-#define        iprte_valid     iprte_fields.valid
-#define        iprte_timeout   iprte_fields.timeout
-#define        iprte_srcnode   iprte_fields.srcnode
-#define        iprte_init      iprte_fields.initiator
-#define        iprte_addr      iprte_fields.addr
+#define iprte_valid    iprte_fields.valid
+#define iprte_timeout  iprte_fields.timeout
+#define iprte_srcnode  iprte_fields.srcnode
+#define iprte_init     iprte_fields.initiator
+#define iprte_addr     iprte_fields.addr
 
 #endif /* !__ASSEMBLY__ */
 
-#define        IPRTE_ADDRSHFT  3
+#define IPRTE_ADDRSHFT 3
 
 /*
  * Hub IIO PRB Register format.
@@ -823,14 +823,14 @@ typedef union iprte_a {
 typedef union iprb_u {
        u64     reg_value;
        struct {
-           u64 rsvd1:  15,
+           u64 rsvd1:  15,
                error:  1,      /* Widget rcvd wr resp pkt w/ error */
-               ovflow: 5,      /* Overflow count. perf measurement */
+               ovflow: 5,      /* Overflow count. perf measurement */
                fire_and_forget: 1, /* Launch Write without response */
                mode:   2,      /* Widget operation Mode        */
                rsvd2:  2,
                bnakctr: 14,
-               rsvd3:  2,
+               rsvd3:  2,
                anakctr: 14,
                xtalkctr: 8;
        } iprb_fields_s;
@@ -838,13 +838,13 @@ typedef union iprb_u {
 
 #define iprb_regval    reg_value
 
-#define        iprb_error      iprb_fields_s.error
-#define        iprb_ovflow     iprb_fields_s.ovflow
-#define        iprb_ff         iprb_fields_s.fire_and_forget
-#define        iprb_mode       iprb_fields_s.mode
-#define        iprb_bnakctr    iprb_fields_s.bnakctr
-#define        iprb_anakctr    iprb_fields_s.anakctr
-#define        iprb_xtalkctr   iprb_fields_s.xtalkctr
+#define iprb_error     iprb_fields_s.error
+#define iprb_ovflow    iprb_fields_s.ovflow
+#define iprb_ff                iprb_fields_s.fire_and_forget
+#define iprb_mode      iprb_fields_s.mode
+#define iprb_bnakctr   iprb_fields_s.bnakctr
+#define iprb_anakctr   iprb_fields_s.anakctr
+#define iprb_xtalkctr  iprb_fields_s.xtalkctr
 
 #endif /* !__ASSEMBLY__ */
 
@@ -853,10 +853,10 @@ typedef union iprb_u {
  * For details of the meanings of NAK and Accept, refer the PIO flow
  * document
  */
-#define        IPRB_MODE_NORMAL        (0)
-#define        IPRB_MODE_COLLECT_A     (1)     /* PRB in collect A mode */
-#define        IPRB_MODE_SERVICE_A     (2)     /* NAK B and Accept A */
-#define        IPRB_MODE_SERVICE_B     (3)     /* NAK A and Accept B */
+#define IPRB_MODE_NORMAL       (0)
+#define IPRB_MODE_COLLECT_A    (1)     /* PRB in collect A mode */
+#define IPRB_MODE_SERVICE_A    (2)     /* NAK B and Accept A */
+#define IPRB_MODE_SERVICE_B    (3)     /* NAK A and Accept B */
 
 /*
  * IO CRB entry C_A to E_A : Partial (cache) CRBS
@@ -865,31 +865,31 @@ typedef union iprb_u {
 typedef union icrbp_a {
        u64   ip_reg;       /* the entire register value        */
        struct {
-            u64 error: 1,  /*    63, error occurred            */
-               ln_uce: 1,  /*    62: uncorrectable memory      */
-               ln_ae:  1,  /*    61: protection violation      */
-               ln_werr:1,  /*    60: write access error        */
-               ln_aerr:1,  /*    59: sn0net: Address error     */
-               ln_perr:1,  /*    58: sn0net: poison error      */
-               timeout:1,  /*    57: CRB timed out             */
-               l_bdpkt:1,  /*    56: truncated pkt on sn0net   */
-               c_bdpkt:1,  /*    55: truncated pkt on xtalk    */
-               c_err:  1,  /*    54: incoming xtalk req, err set*/
+            u64 error: 1,  /*    63, error occurred            */
+               ln_uce: 1,  /*    62: uncorrectable memory      */
+               ln_ae:  1,  /*    61: protection violation      */
+               ln_werr:1,  /*    60: write access error        */
+               ln_aerr:1,  /*    59: sn0net: Address error     */
+               ln_perr:1,  /*    58: sn0net: poison error      */
+               timeout:1,  /*    57: CRB timed out             */
+               l_bdpkt:1,  /*    56: truncated pkt on sn0net   */
+               c_bdpkt:1,  /*    55: truncated pkt on xtalk    */
+               c_err:  1,  /*    54: incoming xtalk req, err set*/
                rsvd1: 12,  /* 53-42: reserved                  */
-               valid:  1,  /*    41: Valid status              */
+               valid:  1,  /*    41: Valid status              */
                sidn:   4,  /* 40-37: SIDN field of xtalk rqst  */
                tnum:   5,  /* 36-32: TNUM of xtalk request     */
-               bo:     1,  /*    31: barrier op set in xtalk rqst*/
-               resprqd:1,  /*    30: xtalk rqst requires response*/
-               gbr:    1,  /*    29: gbr bit set in xtalk rqst */
+               bo:     1,  /*    31: barrier op set in xtalk rqst*/
+               resprqd:1,  /*    30: xtalk rqst requires response*/
+               gbr:    1,  /*    29: gbr bit set in xtalk rqst */
                size:   2,  /* 28-27: size of xtalk request     */
                excl:   4,  /* 26-23: exclusive bit(s)          */
                stall:  3,  /* 22-20: stall (xtalk, bte 0/1)    */
-               intvn:  1,  /*    19: rqst target of intervention*/
-               resp:   1,  /*    18: Data response given to t5 */
-               ack:    1,  /*    17: Data ack received.        */
-               hold:   1,  /*    16: crb gathering invalidate acks*/
-               wb:     1,  /*    15: writeback pending.        */
+               intvn:  1,  /*    19: rqst target of intervention*/
+               resp:   1,  /*    18: Data response given to t5 */
+               ack:    1,  /*    17: Data ack received.        */
+               hold:   1,  /*    16: crb gathering invalidate acks*/
+               wb:     1,  /*    15: writeback pending.        */
                ack_cnt:11, /* 14-04: counter of invalidate acks*/
                tscaler:4;  /* 03-00: Timeout prescaler         */
        } ip_fmt;
@@ -908,13 +908,13 @@ typedef union hubii_idsr {
        u64 iin_reg;
        struct {
                u64 rsvd1 : 35,
-                   isent : 1,
-                   rsvd2 : 3,
-                   ienable: 1,
-                   rsvd  : 7,
-                   node  : 9,
-                   rsvd4 : 1,
-                   level : 7;
+                   isent : 1,
+                   rsvd2 : 3,
+                   ienable: 1,
+                   rsvd  : 7,
+                   node  : 9,
+                   rsvd4 : 1,
+                   level : 7;
        } iin_fmt;
 } hubii_idsr_t;
 #endif /* !__ASSEMBLY__ */
@@ -966,7 +966,7 @@ typedef union hubii_idsr {
  * Value of 3 is required by Xbow 1.1
  * We may be able to increase this to 4 with Xbow 1.2.
  */
-#define       HUBII_XBOW_CREDIT       3
+#define              HUBII_XBOW_CREDIT       3
 #define              HUBII_XBOW_REV2_CREDIT  4
 
 #endif /* _ASM_SGI_SN_SN0_HUBIO_H */
index 14c225d..305d002 100644 (file)
@@ -8,16 +8,16 @@
  * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
  * Copyright (C) 1999 by Ralf Baechle
  */
-#ifndef        _ASM_SN_SN0_HUBMD_H
-#define        _ASM_SN_SN0_HUBMD_H
+#ifndef _ASM_SN_SN0_HUBMD_H
+#define _ASM_SN_SN0_HUBMD_H
 
 
 /*
  * Hub Memory/Directory interface registers
  */
-#define CACHE_SLINE_SIZE        128    /* Secondary cache line size on SN0 */
+#define CACHE_SLINE_SIZE       128     /* Secondary cache line size on SN0 */
 
-#define        MAX_REGIONS             64
+#define MAX_REGIONS            64
 
 /* Hardware page size and shift */
 
 #define MD_IO_PROT_OVRRD       0x200008 /* Clear my bit in MD_IO_PROTECT   */
 #define MD_HSPEC_PROTECT       0x200010 /* BDDIR, LBOOT, RBOOT protection  */
 #define MD_MEMORY_CONFIG       0x200018 /* Memory/Directory DIMM control   */
-#define        MD_REFRESH_CONTROL      0x200020 /* Memory/Directory refresh ctrl   */
-#define        MD_FANDOP_CAC_STAT      0x200028 /* Fetch-and-op cache status       */
-#define        MD_MIG_DIFF_THRESH      0x200030 /* Page migr. count diff thresh.   */
-#define        MD_MIG_VALUE_THRESH     0x200038 /* Page migr. count abs. thresh.   */
-#define        MD_MIG_CANDIDATE        0x200040 /* Latest page migration candidate */
-#define        MD_MIG_CANDIDATE_CLR    0x200048 /* Clear page migration candidate  */
-#define MD_DIR_ERROR           0x200050 /* Directory DIMM error            */
-#define MD_DIR_ERROR_CLR       0x200058 /* Directory DIMM error clear      */
-#define MD_PROTOCOL_ERROR      0x200060 /* Directory protocol error        */
+#define MD_REFRESH_CONTROL     0x200020 /* Memory/Directory refresh ctrl   */
+#define MD_FANDOP_CAC_STAT     0x200028 /* Fetch-and-op cache status       */
+#define MD_MIG_DIFF_THRESH     0x200030 /* Page migr. count diff thresh.   */
+#define MD_MIG_VALUE_THRESH    0x200038 /* Page migr. count abs. thresh.   */
+#define MD_MIG_CANDIDATE       0x200040 /* Latest page migration candidate */
+#define MD_MIG_CANDIDATE_CLR   0x200048 /* Clear page migration candidate  */
+#define MD_DIR_ERROR           0x200050 /* Directory DIMM error            */
+#define MD_DIR_ERROR_CLR       0x200058 /* Directory DIMM error clear      */
+#define MD_PROTOCOL_ERROR      0x200060 /* Directory protocol error        */
 #define MD_PROTOCOL_ERROR_CLR  0x200068 /* Directory protocol error clear  */
-#define MD_MEM_ERROR           0x200070 /* Memory DIMM error               */
-#define MD_MEM_ERROR_CLR       0x200078 /* Memory DIMM error clear         */
-#define MD_MISC_ERROR          0x200080 /* Miscellaneous MD error          */
+#define MD_MEM_ERROR           0x200070 /* Memory DIMM error               */
+#define MD_MEM_ERROR_CLR       0x200078 /* Memory DIMM error clear         */
+#define MD_MISC_ERROR          0x200080 /* Miscellaneous MD error          */
 #define MD_MISC_ERROR_CLR      0x200088 /* Miscellaneous MD error clear    */
 #define MD_MEM_DIMM_INIT       0x200090 /* Memory DIMM mode initization.   */
-#define MD_DIR_DIMM_INIT       0x200098 /* Directory DIMM mode init.       */
-#define MD_MOQ_SIZE            0x2000a0 /* MD outgoing queue size          */
+#define MD_DIR_DIMM_INIT       0x200098 /* Directory DIMM mode init.       */
+#define MD_MOQ_SIZE            0x2000a0 /* MD outgoing queue size          */
 #define MD_MLAN_CTL            0x2000a8 /* NIC (Microlan) control register */
 
-#define MD_PERF_SEL            0x210000 /* Select perf monitor events      */
-#define MD_PERF_CNT0           0x210010 /* Performance counter 0           */
-#define MD_PERF_CNT1           0x210018 /* Performance counter 1           */
-#define MD_PERF_CNT2           0x210020 /* Performance counter 2           */
-#define MD_PERF_CNT3           0x210028 /* Performance counter 3           */
-#define MD_PERF_CNT4           0x210030 /* Performance counter 4           */
-#define MD_PERF_CNT5           0x210038 /* Performance counter 5           */
-
-#define MD_UREG0_0             0x220000 /* uController/UART 0 register     */
-#define MD_UREG0_1             0x220008 /* uController/UART 0 register     */
-#define MD_UREG0_2             0x220010 /* uController/UART 0 register     */
-#define MD_UREG0_3             0x220018 /* uController/UART 0 register     */
-#define MD_UREG0_4             0x220020 /* uController/UART 0 register     */
-#define MD_UREG0_5             0x220028 /* uController/UART 0 register     */
-#define MD_UREG0_6             0x220030 /* uController/UART 0 register     */
-#define MD_UREG0_7             0x220038 /* uController/UART 0 register     */
+#define MD_PERF_SEL            0x210000 /* Select perf monitor events      */
+#define MD_PERF_CNT0           0x210010 /* Performance counter 0           */
+#define MD_PERF_CNT1           0x210018 /* Performance counter 1           */
+#define MD_PERF_CNT2           0x210020 /* Performance counter 2           */
+#define MD_PERF_CNT3           0x210028 /* Performance counter 3           */
+#define MD_PERF_CNT4           0x210030 /* Performance counter 4           */
+#define MD_PERF_CNT5           0x210038 /* Performance counter 5           */
+
+#define MD_UREG0_0             0x220000 /* uController/UART 0 register     */
+#define MD_UREG0_1             0x220008 /* uController/UART 0 register     */
+#define MD_UREG0_2             0x220010 /* uController/UART 0 register     */
+#define MD_UREG0_3             0x220018 /* uController/UART 0 register     */
+#define MD_UREG0_4             0x220020 /* uController/UART 0 register     */
+#define MD_UREG0_5             0x220028 /* uController/UART 0 register     */
+#define MD_UREG0_6             0x220030 /* uController/UART 0 register     */
+#define MD_UREG0_7             0x220038 /* uController/UART 0 register     */
 
 #define MD_SLOTID_USTAT                0x220048 /* Hub slot ID & UART/uCtlr status */
-#define MD_LED0                        0x220050 /* Eight-bit LED for CPU A         */
-#define MD_LED1                        0x220058 /* Eight-bit LED for CPU B         */
-
-#define MD_UREG1_0             0x220080 /* uController/UART 1 register     */
-#define MD_UREG1_1             0x220088 /* uController/UART 1 register     */
-#define MD_UREG1_2             0x220090 /* uController/UART 1 register     */
-#define MD_UREG1_3             0x220098 /* uController/UART 1 register     */
-#define MD_UREG1_4             0x2200a0 /* uController/UART 1 register     */
-#define MD_UREG1_5             0x2200a8 /* uController/UART 1 register     */
-#define MD_UREG1_6             0x2200b0 /* uController/UART 1 register     */
-#define MD_UREG1_7             0x2200b8 /* uController/UART 1 register     */
-#define MD_UREG1_8             0x2200c0 /* uController/UART 1 register     */
-#define MD_UREG1_9             0x2200c8 /* uController/UART 1 register     */
-#define MD_UREG1_10            0x2200d0 /* uController/UART 1 register     */
-#define MD_UREG1_11            0x2200d8 /* uController/UART 1 register     */
-#define MD_UREG1_12            0x2200e0 /* uController/UART 1 register     */
-#define MD_UREG1_13            0x2200e8 /* uController/UART 1 register     */
-#define MD_UREG1_14            0x2200f0 /* uController/UART 1 register     */
-#define MD_UREG1_15            0x2200f8 /* uController/UART 1 register     */
+#define MD_LED0                        0x220050 /* Eight-bit LED for CPU A         */
+#define MD_LED1                        0x220058 /* Eight-bit LED for CPU B         */
+
+#define MD_UREG1_0             0x220080 /* uController/UART 1 register     */
+#define MD_UREG1_1             0x220088 /* uController/UART 1 register     */
+#define MD_UREG1_2             0x220090 /* uController/UART 1 register     */
+#define MD_UREG1_3             0x220098 /* uController/UART 1 register     */
+#define MD_UREG1_4             0x2200a0 /* uController/UART 1 register     */
+#define MD_UREG1_5             0x2200a8 /* uController/UART 1 register     */
+#define MD_UREG1_6             0x2200b0 /* uController/UART 1 register     */
+#define MD_UREG1_7             0x2200b8 /* uController/UART 1 register     */
+#define MD_UREG1_8             0x2200c0 /* uController/UART 1 register     */
+#define MD_UREG1_9             0x2200c8 /* uController/UART 1 register     */
+#define MD_UREG1_10            0x2200d0 /* uController/UART 1 register     */
+#define MD_UREG1_11            0x2200d8 /* uController/UART 1 register     */
+#define MD_UREG1_12            0x2200e0 /* uController/UART 1 register     */
+#define MD_UREG1_13            0x2200e8 /* uController/UART 1 register     */
+#define MD_UREG1_14            0x2200f0 /* uController/UART 1 register     */
+#define MD_UREG1_15            0x2200f8 /* uController/UART 1 register     */
 
 #ifdef CONFIG_SGI_SN_N_MODE
 #define MD_MEM_BANKS           4        /* 4 banks of memory max in N mode */
  *   Bits not used by the MD are used by software.
  */
 
-#define MD_SIZE_EMPTY          0       /* Valid in MEMORY_CONFIG           */
+#define MD_SIZE_EMPTY          0       /* Valid in MEMORY_CONFIG           */
 #define MD_SIZE_8MB            1
 #define MD_SIZE_16MB           2
 #define MD_SIZE_32MB           3       /* Broken in Hub 1                  */
-#define MD_SIZE_64MB           4       /* Valid in MEMORY_CONFIG           */
-#define MD_SIZE_128MB          5       /* Valid in MEMORY_CONFIG           */
+#define MD_SIZE_64MB           4       /* Valid in MEMORY_CONFIG           */
+#define MD_SIZE_128MB          5       /* Valid in MEMORY_CONFIG           */
 #define MD_SIZE_256MB          6
-#define MD_SIZE_512MB          7       /* Valid in MEMORY_CONFIG           */
+#define MD_SIZE_512MB          7       /* Valid in MEMORY_CONFIG           */
 #define MD_SIZE_1GB            8
 #define MD_SIZE_2GB            9
 #define MD_SIZE_4GB            10
 
 /* MD_SLOTID_USTAT bit definitions */
 
-#define MSU_CORECLK_TST_SHFT   7       /* You don't wanna know             */
+#define MSU_CORECLK_TST_SHFT   7       /* You don't wanna know             */
 #define MSU_CORECLK_TST_MASK   (UINT64_CAST 1 << 7)
 #define MSU_CORECLK_TST                (UINT64_CAST 1 << 7)
-#define MSU_CORECLK_SHFT       6       /* You don't wanna know             */
+#define MSU_CORECLK_SHFT       6       /* You don't wanna know             */
 #define MSU_CORECLK_MASK       (UINT64_CAST 1 << 6)
 #define MSU_CORECLK            (UINT64_CAST 1 << 6)
-#define MSU_NETSYNC_SHFT       5       /* You don't wanna know             */
+#define MSU_NETSYNC_SHFT       5       /* You don't wanna know             */
 #define MSU_NETSYNC_MASK       (UINT64_CAST 1 << 5)
 #define MSU_NETSYNC            (UINT64_CAST 1 << 5)
-#define MSU_FPROMRDY_SHFT      4       /* Flash PROM ready bit             */
+#define MSU_FPROMRDY_SHFT      4       /* Flash PROM ready bit             */
 #define MSU_FPROMRDY_MASK      (UINT64_CAST 1 << 4)
 #define MSU_FPROMRDY           (UINT64_CAST 1 << 4)
 #define MSU_I2CINTR_SHFT               3       /* I2C interrupt bit   */
 #define MSU_SN00_SLOTID_SHFT   7
 #define MSU_SN00_SLOTID_MASK   (UINT64_CAST 0x80)
 
-#define        MSU_PIMM_PSC_SHFT       4
-#define        MSU_PIMM_PSC_MASK       (0xf << MSU_PIMM_PSC_SHFT)
+#define MSU_PIMM_PSC_SHFT      4
+#define MSU_PIMM_PSC_MASK      (0xf << MSU_PIMM_PSC_SHFT)
 
 /* MD_MIG_DIFF_THRESH bit definitions */
 
 
 /* Other MD definitions */
 
-#define MD_BANK_SHFT           29                      /* log2(512 MB)     */
+#define MD_BANK_SHFT           29                      /* log2(512 MB)     */
 #define MD_BANK_MASK           (UINT64_CAST 7 << 29)
 #define MD_BANK_SIZE           (UINT64_CAST 1 << MD_BANK_SHFT)   /* 512 MB */
 #define MD_BANK_OFFSET(_b)     (UINT64_CAST (_b) << MD_BANK_SHFT)
  * Format C:  STATE != shared (FINE must be 0)
  */
 
-#define MD_PDIR_MASK           0xffffffffffff          /* Whole entry      */
+#define MD_PDIR_MASK           0xffffffffffff          /* Whole entry      */
 #define MD_PDIR_ECC_SHFT       0                       /* ABC low or high  */
 #define MD_PDIR_ECC_MASK       0x7f
-#define MD_PDIR_PRIO_SHFT      8                       /* ABC low          */
+#define MD_PDIR_PRIO_SHFT      8                       /* ABC low          */
 #define MD_PDIR_PRIO_MASK      (0xf << 8)
-#define MD_PDIR_AX_SHFT                7                       /* ABC low          */
+#define MD_PDIR_AX_SHFT                7                       /* ABC low          */
 #define MD_PDIR_AX_MASK                (1 << 7)
 #define MD_PDIR_AX             (1 << 7)
-#define MD_PDIR_FINE_SHFT      12                      /* ABC low          */
+#define MD_PDIR_FINE_SHFT      12                      /* ABC low          */
 #define MD_PDIR_FINE_MASK      (1 << 12)
 #define MD_PDIR_FINE           (1 << 12)
-#define MD_PDIR_OCT_SHFT       13                      /* A low            */
+#define MD_PDIR_OCT_SHFT       13                      /* A low            */
 #define MD_PDIR_OCT_MASK       (7 << 13)
-#define MD_PDIR_STATE_SHFT     13                      /* BC low           */
+#define MD_PDIR_STATE_SHFT     13                      /* BC low           */
 #define MD_PDIR_STATE_MASK     (7 << 13)
-#define MD_PDIR_ONECNT_SHFT    16                      /* BC low           */
+#define MD_PDIR_ONECNT_SHFT    16                      /* BC low           */
 #define MD_PDIR_ONECNT_MASK    (0x3f << 16)
-#define MD_PDIR_PTR_SHFT       22                      /* C low            */
+#define MD_PDIR_PTR_SHFT       22                      /* C low            */
 #define MD_PDIR_PTR_MASK       (UINT64_CAST 0x7ff << 22)
-#define MD_PDIR_VECMSB_SHFT    22                      /* AB low           */
+#define MD_PDIR_VECMSB_SHFT    22                      /* AB low           */
 #define MD_PDIR_VECMSB_BITMASK 0x3ffffff
 #define MD_PDIR_VECMSB_BITSHFT 27
 #define MD_PDIR_VECMSB_MASK    (UINT64_CAST MD_PDIR_VECMSB_BITMASK << 22)
-#define MD_PDIR_CWOFF_SHFT     7                       /* C high           */
+#define MD_PDIR_CWOFF_SHFT     7                       /* C high           */
 #define MD_PDIR_CWOFF_MASK     (7 << 7)
-#define MD_PDIR_VECLSB_SHFT    10                      /* AB high          */
+#define MD_PDIR_VECLSB_SHFT    10                      /* AB high          */
 #define MD_PDIR_VECLSB_BITMASK (UINT64_CAST 0x3fffffffff)
 #define MD_PDIR_VECLSB_BITSHFT 0
 #define MD_PDIR_VECLSB_MASK    (MD_PDIR_VECLSB_BITMASK << 10)
  * Format C:  STATE != shared
  */
 
-#define MD_SDIR_MASK           0xffff                  /* Whole entry      */
+#define MD_SDIR_MASK           0xffff                  /* Whole entry      */
 #define MD_SDIR_ECC_SHFT       0                       /* AC low or high   */
 #define MD_SDIR_ECC_MASK       0x1f
-#define MD_SDIR_PRIO_SHFT      6                       /* AC low           */
+#define MD_SDIR_PRIO_SHFT      6                       /* AC low           */
 #define MD_SDIR_PRIO_MASK      (1 << 6)
-#define MD_SDIR_AX_SHFT                5                       /* AC low           */
+#define MD_SDIR_AX_SHFT                5                       /* AC low           */
 #define MD_SDIR_AX_MASK                (1 << 5)
 #define MD_SDIR_AX             (1 << 5)
-#define MD_SDIR_STATE_SHFT     7                       /* AC low           */
+#define MD_SDIR_STATE_SHFT     7                       /* AC low           */
 #define MD_SDIR_STATE_MASK     (7 << 7)
-#define MD_SDIR_PTR_SHFT       10                      /* C low            */
+#define MD_SDIR_PTR_SHFT       10                      /* C low            */
 #define MD_SDIR_PTR_MASK       (0x3f << 10)
-#define MD_SDIR_CWOFF_SHFT     5                       /* C high           */
+#define MD_SDIR_CWOFF_SHFT     5                       /* C high           */
 #define MD_SDIR_CWOFF_MASK     (7 << 5)
-#define MD_SDIR_VECMSB_SHFT    11                      /* A low            */
+#define MD_SDIR_VECMSB_SHFT    11                      /* A low            */
 #define MD_SDIR_VECMSB_BITMASK 0x1f
 #define MD_SDIR_VECMSB_BITSHFT 7
 #define MD_SDIR_VECMSB_MASK    (MD_SDIR_VECMSB_BITMASK << 11)
-#define MD_SDIR_VECLSB_SHFT    5                       /* A high           */
+#define MD_SDIR_VECLSB_SHFT    5                       /* A high           */
 #define MD_SDIR_VECLSB_BITMASK 0x7ff
 #define MD_SDIR_VECLSB_BITSHFT 0
 #define MD_SDIR_VECLSB_MASK    (MD_SDIR_VECLSB_BITMASK << 5)
 
 /* Premium SIMM protection entry shifts and masks. */
 
-#define MD_PPROT_SHFT          0                       /* Prot. field      */
+#define MD_PPROT_SHFT          0                       /* Prot. field      */
 #define MD_PPROT_MASK          7
 #define MD_PPROT_MIGMD_SHFT    3                       /* Migration mode   */
 #define MD_PPROT_MIGMD_MASK    (3 << 3)
 
 /* Standard SIMM protection entry shifts and masks. */
 
-#define MD_SPROT_SHFT          0                       /* Prot. field      */
+#define MD_SPROT_SHFT          0                       /* Prot. field      */
 #define MD_SPROT_MASK          7
 #define MD_SPROT_MIGMD_SHFT    3                       /* Migration mode   */
 #define MD_SPROT_MIGMD_MASK    (3 << 3)
 
 #define CPU_LED_ADDR(_nasid, _slice)                                      \
        (private.p_sn00 ?                                                  \
-        REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) :         \
+        REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) :         \
         REMOTE_HUB_ADDR((_nasid), MD_LED0    + ((_slice) << 3)))
 
 #define SET_CPU_LEDS(_nasid, _slice,  _val)                               \
        (HUB_S(CPU_LED_ADDR(_nasid, _slice), (_val)))
 
-#define SET_MY_LEDS(_v)                                                   \
+#define SET_MY_LEDS(_v)                                                           \
        SET_CPU_LEDS(get_nasid(), get_slice(), (_v))
 
 /*
  */
 
 struct dir_error_reg {
-       u64     uce_vld:   1,   /*    63: valid directory uce   */
+       u64     uce_vld:   1,   /*    63: valid directory uce   */
                ae_vld:    1,   /*    62: valid dir prot ecc error */
                ce_vld:    1,   /*    61: valid correctable ECC err*/
                rsvd1:    19,   /* 60-42: reserved              */
@@ -555,13 +555,13 @@ struct dir_error_reg {
 };
 
 typedef union md_dir_error {
-       u64     derr_reg;       /* the entire register          */
+       u64     derr_reg;       /* the entire register          */
        struct dir_error_reg derr_fmt;  /* the register format          */
 } md_dir_error_t;
 
 
 struct mem_error_reg {
-       u64     uce_vld:   1,   /*    63: valid memory uce      */
+       u64     uce_vld:   1,   /*    63: valid memory uce      */
                ce_vld:    1,   /*    62: valid correctable ECC err*/
                rsvd1:    22,   /* 61-40: reserved              */
                bad_syn:   8,   /* 39-32: bad mem ecc syndrome  */
@@ -573,8 +573,8 @@ struct mem_error_reg {
 
 
 typedef union md_mem_error {
-       u64     merr_reg;       /* the entire register          */
-       struct mem_error_reg  merr_fmt; /* format of the mem_error reg  */
+       u64     merr_reg;       /* the entire register          */
+       struct mem_error_reg  merr_fmt; /* format of the mem_error reg  */
 } md_mem_error_t;
 
 
@@ -594,7 +594,7 @@ struct proto_error_reg {
 };
 
 typedef union md_proto_error {
-       u64     perr_reg;       /* the entire register          */
+       u64     perr_reg;       /* the entire register          */
        struct proto_error_reg  perr_fmt; /* format of the register     */
 } md_proto_error_t;
 
@@ -695,33 +695,33 @@ typedef union md_pdir_loent {
  *   represent directory memory information.
  */
 
-typedef        union   md_dir_high     {
-        md_sdir_high_t md_sdir_high;
-        md_pdir_high_t md_pdir_high;
+typedef union  md_dir_high     {
+       md_sdir_high_t  md_sdir_high;
+       md_pdir_high_t  md_pdir_high;
 } md_dir_high_t;
 
-typedef        union   md_dir_low      {
-        md_sdir_low_t  md_sdir_low;
-        md_pdir_low_t  md_pdir_low;
+typedef union  md_dir_low      {
+       md_sdir_low_t   md_sdir_low;
+       md_pdir_low_t   md_pdir_low;
 } md_dir_low_t;
 
-typedef        struct  bddir_entry     {
-        md_dir_low_t   md_dir_low;
-        md_dir_high_t  md_dir_high;
+typedef struct bddir_entry     {
+       md_dir_low_t    md_dir_low;
+       md_dir_high_t   md_dir_high;
 } bddir_entry_t;
 
 typedef struct dir_mem_entry   {
-        u64            prcpf[MAX_REGIONS];
-        bddir_entry_t  directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
+       u64             prcpf[MAX_REGIONS];
+       bddir_entry_t   directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
 } dir_mem_entry_t;
 
 
 
 typedef union md_perf_sel {
-       u64     perf_sel_reg;
+       u64     perf_sel_reg;
        struct  {
                u64     perf_rsvd : 60,
-                       perf_en   :  1,
+                       perf_en   :  1,
                        perf_sel  :  3;
        } perf_sel_bits;
 } md_perf_sel_t;
@@ -730,7 +730,7 @@ typedef union md_perf_cnt {
        u64     perf_cnt;
        struct  {
                u64     perf_rsvd : 44,
-                       perf_cnt  : 20;
+                       perf_cnt  : 20;
        } perf_cnt_bits;
 } md_perf_cnt_t;
 
index b40d3ef..b73c4be 100644 (file)
 #define NI_BASE_TABLES         0x630000
 
 #define NI_STATUS_REV_ID       0x600000 /* Hub network status, rev, and ID */
-#define NI_PORT_RESET          0x600008 /* Reset the network interface     */
+#define NI_PORT_RESET          0x600008 /* Reset the network interface     */
 #define NI_PROTECTION          0x600010 /* NI register access permissions  */
-#define NI_GLOBAL_PARMS                0x600018 /* LLP parameters                  */
+#define NI_GLOBAL_PARMS                0x600018 /* LLP parameters                  */
 #define NI_SCRATCH_REG0                0x600100 /* Scratch register 0 (64 bits)    */
 #define NI_SCRATCH_REG1                0x600108 /* Scratch register 1 (64 bits)    */
 #define NI_DIAG_PARMS          0x600110 /* Parameters for diags            */
 
 #define NI_VECTOR_PARMS                0x600200 /* Vector PIO routing parameters   */
-#define NI_VECTOR              0x600208 /* Vector PIO route                */
-#define NI_VECTOR_DATA         0x600210 /* Vector PIO data                 */
-#define NI_VECTOR_STATUS       0x600300 /* Vector PIO return status        */
-#define NI_RETURN_VECTOR       0x600308 /* Vector PIO return vector        */
-#define NI_VECTOR_READ_DATA    0x600310 /* Vector PIO read data            */
+#define NI_VECTOR              0x600208 /* Vector PIO route                */
+#define NI_VECTOR_DATA         0x600210 /* Vector PIO data                 */
+#define NI_VECTOR_STATUS       0x600300 /* Vector PIO return status        */
+#define NI_RETURN_VECTOR       0x600308 /* Vector PIO return vector        */
+#define NI_VECTOR_READ_DATA    0x600310 /* Vector PIO read data            */
 #define NI_VECTOR_CLEAR                0x600380 /* Vector PIO read & clear status  */
 
-#define NI_IO_PROTECT          0x600400 /* PIO protection bits             */
-#define NI_IO_PROT_OVRRD       0x600408 /* PIO protection bit override     */
-
-#define NI_AGE_CPU0_MEMORY     0x600500 /* CPU 0 memory age control        */
-#define NI_AGE_CPU0_PIO                0x600508 /* CPU 0 PIO age control           */
-#define NI_AGE_CPU1_MEMORY     0x600510 /* CPU 1 memory age control        */
-#define NI_AGE_CPU1_PIO                0x600518 /* CPU 1 PIO age control           */
-#define NI_AGE_GBR_MEMORY      0x600520 /* GBR memory age control          */
-#define NI_AGE_GBR_PIO         0x600528 /* GBR PIO age control             */
-#define NI_AGE_IO_MEMORY       0x600530 /* IO memory age control           */
-#define NI_AGE_IO_PIO          0x600538 /* IO PIO age control              */
+#define NI_IO_PROTECT          0x600400 /* PIO protection bits             */
+#define NI_IO_PROT_OVRRD       0x600408 /* PIO protection bit override     */
+
+#define NI_AGE_CPU0_MEMORY     0x600500 /* CPU 0 memory age control        */
+#define NI_AGE_CPU0_PIO                0x600508 /* CPU 0 PIO age control           */
+#define NI_AGE_CPU1_MEMORY     0x600510 /* CPU 1 memory age control        */
+#define NI_AGE_CPU1_PIO                0x600518 /* CPU 1 PIO age control           */
+#define NI_AGE_GBR_MEMORY      0x600520 /* GBR memory age control          */
+#define NI_AGE_GBR_PIO         0x600528 /* GBR PIO age control             */
+#define NI_AGE_IO_MEMORY       0x600530 /* IO memory age control           */
+#define NI_AGE_IO_PIO          0x600538 /* IO PIO age control              */
 #define NI_AGE_REG_MIN         NI_AGE_CPU0_MEMORY
 #define NI_AGE_REG_MAX         NI_AGE_IO_PIO
 
-#define NI_PORT_PARMS          0x608000 /* LLP Parameters                  */
-#define NI_PORT_ERROR          0x608008 /* LLP Errors                      */
-#define NI_PORT_ERROR_CLEAR    0x608088 /* Clear the error bits            */
+#define NI_PORT_PARMS          0x608000 /* LLP Parameters                  */
+#define NI_PORT_ERROR          0x608008 /* LLP Errors                      */
+#define NI_PORT_ERROR_CLEAR    0x608088 /* Clear the error bits            */
 
 #define NI_META_TABLE0         0x638000 /* First meta routing table entry  */
 #define NI_META_TABLE(_x)      (NI_META_TABLE0 + (8 * (_x)))
 #define NSRI_LINKUP_SHFT       29
 #define NSRI_LINKUP_MASK       (UINT64_CAST 0x1 << 29)
 #define NSRI_DOWNREASON_SHFT   28              /* 0=failed, 1=never came   */
-#define NSRI_DOWNREASON_MASK   (UINT64_CAST 0x1 << 28) /*    out of reset. */
+#define NSRI_DOWNREASON_MASK   (UINT64_CAST 0x1 << 28) /*    out of reset. */
 #define NSRI_MORENODES_SHFT    18
 #define NSRI_MORENODES_MASK    (UINT64_CAST 1 << 18)   /* Max. # of nodes  */
 #define         MORE_MEMORY            0
 #define         MORE_NODES             1
 #define NSRI_REGIONSIZE_SHFT   17
-#define NSRI_REGIONSIZE_MASK   (UINT64_CAST 1 << 17)   /* Granularity      */
+#define NSRI_REGIONSIZE_MASK   (UINT64_CAST 1 << 17)   /* Granularity      */
 #define         REGIONSIZE_FINE        1
 #define         REGIONSIZE_COARSE      0
 #define NSRI_NODEID_SHFT       8
 #define NSRI_REV_SHFT          4
 #define NSRI_REV_MASK          (UINT64_CAST 0xf << 4)  /* Chip Revision    */
 #define NSRI_CHIPID_SHFT       0
-#define NSRI_CHIPID_MASK       (UINT64_CAST 0xf)       /* Chip type ID     */
+#define NSRI_CHIPID_MASK       (UINT64_CAST 0xf)       /* Chip type ID     */
 
 /*
- * In fine mode, each node is a region.  In coarse mode, there are
+ * In fine mode, each node is a region.         In coarse mode, there are
  * eight nodes per region.
  */
 #define NASID_TO_FINEREG_SHFT  0
-#define NASID_TO_COARSEREG_SHFT        3
+#define NASID_TO_COARSEREG_SHFT 3
 
 /* NI_PORT_RESET mask definitions */
 
 
 /* NI_GLOBAL_PARMS mask and shift definitions */
 
-#define NGP_MAXRETRY_SHFT      48              /* Maximum retries          */
+#define NGP_MAXRETRY_SHFT      48              /* Maximum retries          */
 #define NGP_MAXRETRY_MASK      (UINT64_CAST 0x3ff << 48)
-#define NGP_TAILTOWRAP_SHFT    32              /* Tail timeout wrap        */
+#define NGP_TAILTOWRAP_SHFT    32              /* Tail timeout wrap        */
 #define NGP_TAILTOWRAP_MASK    (UINT64_CAST 0xffff << 32)
 
-#define NGP_CREDITTOVAL_SHFT   16              /* Tail timeout wrap        */
+#define NGP_CREDITTOVAL_SHFT   16              /* Tail timeout wrap        */
 #define NGP_CREDITTOVAL_MASK   (UINT64_CAST 0xf << 16)
-#define NGP_TAILTOVAL_SHFT     4               /* Tail timeout value       */
+#define NGP_TAILTOVAL_SHFT     4               /* Tail timeout value       */
 #define NGP_TAILTOVAL_MASK     (UINT64_CAST 0xf << 4)
 
 /* NI_DIAG_PARMS mask and shift definitions */
 
 #define NDP_PORTTORESET                (UINT64_CAST 1 << 18)   /* Port tmout reset */
 #define NDP_LLP8BITMODE                (UINT64_CAST 1 << 12)   /* LLP 8-bit mode   */
-#define NDP_PORTDISABLE                (UINT64_CAST 1 <<  6)   /* Port disable     */
+#define NDP_PORTDISABLE                (UINT64_CAST 1 <<  6)   /* Port disable     */
 #define NDP_SENDERROR          (UINT64_CAST 1)         /* Send data error  */
 
 /*
 #define NVP_PIOID_MASK         (UINT64_CAST 0x3ff << 40)
 #define NVP_WRITEID_SHFT       32
 #define NVP_WRITEID_MASK       (UINT64_CAST 0xff << 32)
-#define NVP_ADDRESS_MASK       (UINT64_CAST 0xffff8)   /* Bits 19:3        */
+#define NVP_ADDRESS_MASK       (UINT64_CAST 0xffff8)   /* Bits 19:3        */
 #define NVP_TYPE_SHFT          0
 #define NVP_TYPE_MASK          (UINT64_CAST 0x3)
 
 #define NVS_PIOID_MASK         (UINT64_CAST 0x3ff << 40)
 #define NVS_WRITEID_SHFT       32
 #define NVS_WRITEID_MASK       (UINT64_CAST 0xff << 32)
-#define NVS_ADDRESS_MASK       (UINT64_CAST 0xfffffff8)   /* Bits 31:3     */
+#define NVS_ADDRESS_MASK       (UINT64_CAST 0xfffffff8)   /* Bits 31:3     */
 #define NVS_TYPE_SHFT          0
 #define NVS_TYPE_MASK          (UINT64_CAST 0x7)
 #define NVS_ERROR_MASK         (UINT64_CAST 0x4)  /* bit set means error */
 #define         PIOTYPE_WRITE          1       /* VECTOR_PARMS and VECTOR_STATUS   */
 #define         PIOTYPE_UNDEFINED      2       /* VECTOR_PARMS and VECTOR_STATUS   */
 #define         PIOTYPE_EXCHANGE       3       /* VECTOR_PARMS and VECTOR_STATUS   */
-#define         PIOTYPE_ADDR_ERR       4       /* VECTOR_STATUS only               */
-#define         PIOTYPE_CMD_ERR        5       /* VECTOR_STATUS only               */
-#define         PIOTYPE_PROT_ERR       6       /* VECTOR_STATUS only               */
-#define         PIOTYPE_UNKNOWN        7       /* VECTOR_STATUS only               */
+#define         PIOTYPE_ADDR_ERR       4       /* VECTOR_STATUS only               */
+#define         PIOTYPE_CMD_ERR        5       /* VECTOR_STATUS only               */
+#define         PIOTYPE_PROT_ERR       6       /* VECTOR_STATUS only               */
+#define         PIOTYPE_UNKNOWN        7       /* VECTOR_STATUS only               */
 
 /* NI_AGE_XXX mask and shift definitions */
 
 
 #define NPE_FATAL_ERRORS       (NPE_LINKRESET | NPE_INTERNALERROR |    \
                                 NPE_BADMESSAGE | NPE_BADDEST |         \
-                                NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
+                                NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
                                 NPE_TAILTO_MASK)
 
 /* NI_META_TABLE mask and shift definitions */
 typedef union  hubni_port_error_u {
        u64     nipe_reg_value;
        struct {
-           u64 nipe_rsvd:      26,     /* unused */
+           u64 nipe_rsvd:      26,     /* unused */
                nipe_lnk_reset:  1,     /* link reset */
                nipe_intl_err:   1,     /* internal error */
                nipe_bad_msg:    1,     /* bad message */
index e39f5f9..7b83655 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
  * Copyright (C) 1999 by Ralf Baechle
  */
-#ifndef        _ASM_SN_SN0_HUBPI_H
-#define        _ASM_SN_SN0_HUBPI_H
+#ifndef _ASM_SN_SN0_HUBPI_H
+#define _ASM_SN_SN0_HUBPI_H
 
 #include <linux/types.h>
 
 
 /* General protection and control registers */
 
-#define PI_CPU_PROTECT         0x000000 /* CPU Protection                  */
-#define PI_PROT_OVERRD         0x000008 /* Clear CPU Protection bit        */
-#define        PI_IO_PROTECT           0x000010 /* Interrupt Pending Protection    */
+#define PI_CPU_PROTECT         0x000000 /* CPU Protection                  */
+#define PI_PROT_OVERRD         0x000008 /* Clear CPU Protection bit        */
+#define PI_IO_PROTECT          0x000010 /* Interrupt Pending Protection    */
 #define PI_REGION_PRESENT      0x000018 /* Indicates whether region exists */
-#define PI_CPU_NUM             0x000020 /* CPU Number ID                   */
-#define PI_CALIAS_SIZE         0x000028 /* Cached Alias Size               */
-#define PI_MAX_CRB_TIMEOUT     0x000030 /* Maximum Timeout for CRB         */
+#define PI_CPU_NUM             0x000020 /* CPU Number ID                   */
+#define PI_CALIAS_SIZE         0x000028 /* Cached Alias Size               */
+#define PI_MAX_CRB_TIMEOUT     0x000030 /* Maximum Timeout for CRB         */
 #define PI_CRB_SFACTOR         0x000038 /* Scale factor for CRB timeout    */
 
 /* CALIAS values */
 
 /* Processor control and status checking */
 
-#define PI_CPU_PRESENT_A       0x000040 /* CPU Present A                   */
-#define PI_CPU_PRESENT_B       0x000048 /* CPU Present B                   */
-#define PI_CPU_ENABLE_A                0x000050 /* CPU Enable A                    */
-#define PI_CPU_ENABLE_B                0x000058 /* CPU Enable B                    */
-#define PI_REPLY_LEVEL         0x000060 /* Reply Level                     */
+#define PI_CPU_PRESENT_A       0x000040 /* CPU Present A                   */
+#define PI_CPU_PRESENT_B       0x000048 /* CPU Present B                   */
+#define PI_CPU_ENABLE_A                0x000050 /* CPU Enable A                    */
+#define PI_CPU_ENABLE_B                0x000058 /* CPU Enable B                    */
+#define PI_REPLY_LEVEL         0x000060 /* Reply Level                     */
 #define PI_HARDRESET_BIT       0x020068 /* Bit cleared by s/w on SR        */
-#define PI_NMI_A               0x000070 /* NMI to CPU A                    */
-#define PI_NMI_B               0x000078 /* NMI to CPU B                    */
+#define PI_NMI_A               0x000070 /* NMI to CPU A                    */
+#define PI_NMI_B               0x000078 /* NMI to CPU B                    */
 #define PI_NMI_OFFSET          (PI_NMI_B - PI_NMI_A)
-#define PI_SOFTRESET           0x000080 /* Softreset (to both CPUs)        */
+#define PI_SOFTRESET           0x000080 /* Softreset (to both CPUs)        */
 
-/* Regular Interrupt register checking.  */
+/* Regular Interrupt register checking.         */
 
 #define PI_INT_PEND_MOD                0x000090 /* Write to set pending ints       */
-#define PI_INT_PEND0           0x000098 /* Read to get pending ints        */
-#define PI_INT_PEND1           0x0000a0 /* Read to get pending ints        */
-#define PI_INT_MASK0_A         0x0000a8 /* Interrupt Mask 0 for CPU A      */
-#define PI_INT_MASK1_A         0x0000b0 /* Interrupt Mask 1 for CPU A      */
-#define PI_INT_MASK0_B         0x0000b8 /* Interrupt Mask 0 for CPU B      */
-#define PI_INT_MASK1_B         0x0000c0 /* Interrupt Mask 1 for CPU B      */
+#define PI_INT_PEND0           0x000098 /* Read to get pending ints        */
+#define PI_INT_PEND1           0x0000a0 /* Read to get pending ints        */
+#define PI_INT_MASK0_A         0x0000a8 /* Interrupt Mask 0 for CPU A      */
+#define PI_INT_MASK1_A         0x0000b0 /* Interrupt Mask 1 for CPU A      */
+#define PI_INT_MASK0_B         0x0000b8 /* Interrupt Mask 0 for CPU B      */
+#define PI_INT_MASK1_B         0x0000c0 /* Interrupt Mask 1 for CPU B      */
 
-#define PI_INT_MASK_OFFSET     0x10     /* Offset from A to B              */
+#define PI_INT_MASK_OFFSET     0x10     /* Offset from A to B              */
 
 /* Crosscall interrupts */
 
 #define PI_CC_PEND_SET_B       0x0000d0 /* CC Interrupt Pending Set, CPU B */
 #define PI_CC_PEND_CLR_A       0x0000d8 /* CC Interrupt Pending Clr, CPU A */
 #define PI_CC_PEND_CLR_B       0x0000e0 /* CC Interrupt Pending Clr, CPU B */
-#define PI_CC_MASK             0x0000e8 /* CC Interrupt mask               */
+#define PI_CC_MASK             0x0000e8 /* CC Interrupt mask               */
 
-#define PI_INT_SET_OFFSET      0x08     /* Offset from A to B              */
+#define PI_INT_SET_OFFSET      0x08     /* Offset from A to B              */
 
 /* Realtime Counter and Profiler control registers */
 
-#define PI_RT_COUNT            0x030100 /* Real Time Counter               */
-#define PI_RT_COMPARE_A                0x000108 /* Real Time Compare A             */
-#define PI_RT_COMPARE_B                0x000110 /* Real Time Compare B             */
+#define PI_RT_COUNT            0x030100 /* Real Time Counter               */
+#define PI_RT_COMPARE_A                0x000108 /* Real Time Compare A             */
+#define PI_RT_COMPARE_B                0x000110 /* Real Time Compare B             */
 #define PI_PROFILE_COMPARE     0x000118 /* L5 int to both cpus when == RTC */
-#define PI_RT_PEND_A           0x000120 /* Set if RT int for A pending     */
-#define PI_RT_PEND_B           0x000128 /* Set if RT int for B pending     */
+#define PI_RT_PEND_A           0x000120 /* Set if RT int for A pending     */
+#define PI_RT_PEND_B           0x000128 /* Set if RT int for B pending     */
 #define PI_PROF_PEND_A         0x000130 /* Set if Prof int for A pending   */
 #define PI_PROF_PEND_B         0x000138 /* Set if Prof int for B pending   */
-#define PI_RT_EN_A             0x000140 /* RT int for CPU A enable         */
-#define PI_RT_EN_B             0x000148 /* RT int for CPU B enable         */
-#define PI_PROF_EN_A           0x000150 /* PROF int for CPU A enable       */
-#define PI_PROF_EN_B           0x000158 /* PROF int for CPU B enable       */
-#define PI_RT_LOCAL_CTRL       0x000160 /* RT control register             */
+#define PI_RT_EN_A             0x000140 /* RT int for CPU A enable         */
+#define PI_RT_EN_B             0x000148 /* RT int for CPU B enable         */
+#define PI_PROF_EN_A           0x000150 /* PROF int for CPU A enable       */
+#define PI_PROF_EN_B           0x000158 /* PROF int for CPU B enable       */
+#define PI_RT_LOCAL_CTRL       0x000160 /* RT control register             */
 #define PI_RT_FILTER_CTRL      0x000168 /* GCLK Filter control register    */
 
 #define PI_COUNT_OFFSET                0x08     /* A to B offset for all counts    */
 
 /* Built-In Self Test support */
 
-#define PI_BIST_WRITE_DATA     0x000200 /* BIST write data                 */
-#define PI_BIST_READ_DATA      0x000208 /* BIST read data                  */
-#define PI_BIST_COUNT_TARG     0x000210 /* BIST Count and Target           */
-#define PI_BIST_READY          0x000218 /* BIST Ready indicator            */
-#define PI_BIST_SHIFT_LOAD     0x000220 /* BIST control                    */
-#define PI_BIST_SHIFT_UNLOAD   0x000228 /* BIST control                    */
-#define PI_BIST_ENTER_RUN      0x000230 /* BIST control                    */
+#define PI_BIST_WRITE_DATA     0x000200 /* BIST write data                 */
+#define PI_BIST_READ_DATA      0x000208 /* BIST read data                  */
+#define PI_BIST_COUNT_TARG     0x000210 /* BIST Count and Target           */
+#define PI_BIST_READY          0x000218 /* BIST Ready indicator            */
+#define PI_BIST_SHIFT_LOAD     0x000220 /* BIST control                    */
+#define PI_BIST_SHIFT_UNLOAD   0x000228 /* BIST control                    */
+#define PI_BIST_ENTER_RUN      0x000230 /* BIST control                    */
 
 /* Graphics control registers */
 
-#define PI_GFX_PAGE_A          0x000300 /* Graphics page A                 */
-#define PI_GFX_CREDIT_CNTR_A   0x000308 /* Graphics credit counter A       */
-#define PI_GFX_BIAS_A          0x000310 /* Graphics bias A                 */
+#define PI_GFX_PAGE_A          0x000300 /* Graphics page A                 */
+#define PI_GFX_CREDIT_CNTR_A   0x000308 /* Graphics credit counter A       */
+#define PI_GFX_BIAS_A          0x000310 /* Graphics bias A                 */
 #define PI_GFX_INT_CNTR_A      0x000318 /* Graphics interrupt counter A    */
 #define PI_GFX_INT_CMP_A       0x000320 /* Graphics interrupt comparator A */
-#define PI_GFX_PAGE_B          0x000328 /* Graphics page B                 */
-#define PI_GFX_CREDIT_CNTR_B   0x000330 /* Graphics credit counter B       */
-#define PI_GFX_BIAS_B          0x000338 /* Graphics bias B                 */
+#define PI_GFX_PAGE_B          0x000328 /* Graphics page B                 */
+#define PI_GFX_CREDIT_CNTR_B   0x000330 /* Graphics credit counter B       */
+#define PI_GFX_BIAS_B          0x000338 /* Graphics bias B                 */
 #define PI_GFX_INT_CNTR_B      0x000340 /* Graphics interrupt counter B    */
 #define PI_GFX_INT_CMP_B       0x000348 /* Graphics interrupt comparator B */
 
 #define PI_ERR_INT_MASK_B      0x000410 /* Error Interrupt mask for CPU B  */
 #define PI_ERR_STACK_ADDR_A    0x000418 /* Error stack address for CPU A   */
 #define PI_ERR_STACK_ADDR_B    0x000420 /* Error stack address for CPU B   */
-#define PI_ERR_STACK_SIZE      0x000428 /* Error Stack Size                */
-#define PI_ERR_STATUS0_A       0x000430 /* Error Status 0A                 */
+#define PI_ERR_STACK_SIZE      0x000428 /* Error Stack Size                */
+#define PI_ERR_STATUS0_A       0x000430 /* Error Status 0A                 */
 #define PI_ERR_STATUS0_A_RCLR  0x000438 /* Error Status 0A clear on read   */
-#define PI_ERR_STATUS1_A       0x000440 /* Error Status 1A                 */
+#define PI_ERR_STATUS1_A       0x000440 /* Error Status 1A                 */
 #define PI_ERR_STATUS1_A_RCLR  0x000448 /* Error Status 1A clear on read   */
-#define PI_ERR_STATUS0_B       0x000450 /* Error Status 0B                 */
+#define PI_ERR_STATUS0_B       0x000450 /* Error Status 0B                 */
 #define PI_ERR_STATUS0_B_RCLR  0x000458 /* Error Status 0B clear on read   */
-#define PI_ERR_STATUS1_B       0x000460 /* Error Status 1B                 */
+#define PI_ERR_STATUS1_B       0x000460 /* Error Status 1B                 */
 #define PI_ERR_STATUS1_B_RCLR  0x000468 /* Error Status 1B clear on read   */
-#define PI_SPOOL_CMP_A         0x000470 /* Spool compare for CPU A         */
-#define PI_SPOOL_CMP_B         0x000478 /* Spool compare for CPU B         */
-#define PI_CRB_TIMEOUT_A       0x000480 /* Timed out CRB entries for A     */
-#define PI_CRB_TIMEOUT_B       0x000488 /* Timed out CRB entries for B     */
+#define PI_SPOOL_CMP_A         0x000470 /* Spool compare for CPU A         */
+#define PI_SPOOL_CMP_B         0x000478 /* Spool compare for CPU B         */
+#define PI_CRB_TIMEOUT_A       0x000480 /* Timed out CRB entries for A     */
+#define PI_CRB_TIMEOUT_B       0x000488 /* Timed out CRB entries for B     */
 #define PI_SYSAD_ERRCHK_EN     0x000490 /* Enables SYSAD error checking    */
-#define PI_BAD_CHECK_BIT_A     0x000498 /* Force SYSAD check bit error     */
-#define PI_BAD_CHECK_BIT_B     0x0004a0 /* Force SYSAD check bit error     */
-#define PI_NACK_CNT_A          0x0004a8 /* Consecutive NACK counter        */
-#define PI_NACK_CNT_B          0x0004b0 /*     "       " for CPU B         */
+#define PI_BAD_CHECK_BIT_A     0x000498 /* Force SYSAD check bit error     */
+#define PI_BAD_CHECK_BIT_B     0x0004a0 /* Force SYSAD check bit error     */
+#define PI_NACK_CNT_A          0x0004a8 /* Consecutive NACK counter        */
+#define PI_NACK_CNT_B          0x0004b0 /*     "       " for CPU B         */
 #define PI_NACK_CMP            0x0004b8 /* NACK count compare              */
 #define PI_STACKADDR_OFFSET    (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
 #define PI_ERRSTAT_OFFSET      (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
 #define PI_ERR_SPUR_MSG_A      0x00000008
 #define PI_ERR_WRB_TERR_B      0x00000010      /* WRB TERR                 */
 #define PI_ERR_WRB_TERR_A      0x00000020
-#define PI_ERR_WRB_WERR_B      0x00000040      /* WRB WERR                 */
+#define PI_ERR_WRB_WERR_B      0x00000040      /* WRB WERR                 */
 #define PI_ERR_WRB_WERR_A      0x00000080
 #define PI_ERR_SYSSTATE_B      0x00000100      /* SysState parity error    */
 #define PI_ERR_SYSSTATE_A      0x00000200
  * The following three macros define all possible error int pends.
  */
 
-#define PI_FATAL_ERR_CPU_A     (PI_ERR_SYSSTATE_TAG_A  | \
-                                PI_ERR_BAD_SPOOL_A     | \
-                                PI_ERR_SYSCMD_ADDR_A   | \
-                                PI_ERR_SYSCMD_DATA_A   | \
-                                PI_ERR_SYSAD_ADDR_A    | \
+#define PI_FATAL_ERR_CPU_A     (PI_ERR_SYSSTATE_TAG_A  | \
+                                PI_ERR_BAD_SPOOL_A     | \
+                                PI_ERR_SYSCMD_ADDR_A   | \
+                                PI_ERR_SYSCMD_DATA_A   | \
+                                PI_ERR_SYSAD_ADDR_A    | \
                                 PI_ERR_SYSAD_DATA_A    | \
                                 PI_ERR_SYSSTATE_A)
 
-#define PI_MISC_ERR_CPU_A      (PI_ERR_UNCAC_UNCORR_A  | \
-                                PI_ERR_WRB_WERR_A      | \
-                                PI_ERR_WRB_TERR_A      | \
-                                PI_ERR_SPUR_MSG_A      | \
+#define PI_MISC_ERR_CPU_A      (PI_ERR_UNCAC_UNCORR_A  | \
+                                PI_ERR_WRB_WERR_A      | \
+                                PI_ERR_WRB_TERR_A      | \
+                                PI_ERR_SPUR_MSG_A      | \
                                 PI_ERR_SPOOL_CMP_A)
 
-#define PI_FATAL_ERR_CPU_B     (PI_ERR_SYSSTATE_TAG_B  | \
-                                PI_ERR_BAD_SPOOL_B     | \
-                                PI_ERR_SYSCMD_ADDR_B   | \
-                                PI_ERR_SYSCMD_DATA_B   | \
-                                PI_ERR_SYSAD_ADDR_B    | \
+#define PI_FATAL_ERR_CPU_B     (PI_ERR_SYSSTATE_TAG_B  | \
+                                PI_ERR_BAD_SPOOL_B     | \
+                                PI_ERR_SYSCMD_ADDR_B   | \
+                                PI_ERR_SYSCMD_DATA_B   | \
+                                PI_ERR_SYSAD_ADDR_B    | \
                                 PI_ERR_SYSAD_DATA_B    | \
                                 PI_ERR_SYSSTATE_B)
 
-#define PI_MISC_ERR_CPU_B      (PI_ERR_UNCAC_UNCORR_B  | \
-                                PI_ERR_WRB_WERR_B      | \
-                                PI_ERR_WRB_TERR_B      | \
-                                PI_ERR_SPUR_MSG_B      | \
+#define PI_MISC_ERR_CPU_B      (PI_ERR_UNCAC_UNCORR_B  | \
+                                PI_ERR_WRB_WERR_B      | \
+                                PI_ERR_WRB_TERR_B      | \
+                                PI_ERR_SPUR_MSG_B      | \
                                 PI_ERR_SPOOL_CMP_B)
 
 #define PI_ERR_GENERIC (PI_ERR_MD_UNCORR)
 #define PI_ERR_ST0_CMD_SHFT    17
 #define PI_ERR_ST0_ADDR_MASK   0x3ffffffffe000000
 #define PI_ERR_ST0_ADDR_SHFT   25
-#define PI_ERR_ST0_OVERRUN_MASK        0x4000000000000000
-#define PI_ERR_ST0_OVERRUN_SHFT        62
+#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
+#define PI_ERR_ST0_OVERRUN_SHFT 62
 #define PI_ERR_ST0_VALID_MASK  0x8000000000000000
 #define PI_ERR_ST0_VALID_SHFT  63
 
 /* Fields in PI_ERR_STATUS1_[AB] */
 #define PI_ERR_ST1_SPOOL_MASK  0x00000000001fffff
 #define PI_ERR_ST1_SPOOL_SHFT  0
-#define PI_ERR_ST1_TOUTCNT_MASK        0x000000001fe00000
-#define PI_ERR_ST1_TOUTCNT_SHFT        21
+#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
+#define PI_ERR_ST1_TOUTCNT_SHFT 21
 #define PI_ERR_ST1_INVCNT_MASK 0x0000007fe0000000
 #define PI_ERR_ST1_INVCNT_SHFT 29
 #define PI_ERR_ST1_CRBNUM_MASK 0x0000038000000000
 #define PI_ERR_ST1_CRBNUM_SHFT 39
 #define PI_ERR_ST1_WRBRRB_MASK 0x0000040000000000
 #define PI_ERR_ST1_WRBRRB_SHFT 42
-#define PI_ERR_ST1_CRBSTAT_MASK        0x001ff80000000000
-#define PI_ERR_ST1_CRBSTAT_SHFT        43
+#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
+#define PI_ERR_ST1_CRBSTAT_SHFT 43
 #define PI_ERR_ST1_MSGSRC_MASK 0xffe0000000000000
 #define PI_ERR_ST1_MSGSRC_SHFT 53
 
 #define PI_ERR_STK_CRBNUM_SHFT 9
 #define PI_ERR_STK_WRBRRB_MASK 0x0000000000001000
 #define PI_ERR_STK_WRBRRB_SHFT 12
-#define PI_ERR_STK_CRBSTAT_MASK        0x00000000007fe000
-#define PI_ERR_STK_CRBSTAT_SHFT        13
+#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
+#define PI_ERR_STK_CRBSTAT_SHFT 13
 #define PI_ERR_STK_CMD_MASK    0x000000007f800000
 #define PI_ERR_STK_CMD_SHFT    23
 #define PI_ERR_STK_ADDR_MASK   0xffffffff80000000
@@ -364,11 +364,11 @@ typedef u64       rtc_time_t;
 
 /* Bits in PI_SYSAD_ERRCHK_EN */
 #define PI_SYSAD_ERRCHK_ECCGEN 0x01    /* Enable ECC generation            */
-#define PI_SYSAD_ERRCHK_QUALGEN        0x02    /* Enable data quality signal gen.  */
-#define PI_SYSAD_ERRCHK_SADP   0x04    /* Enable SysAD parity checking     */
+#define PI_SYSAD_ERRCHK_QUALGEN 0x02   /* Enable data quality signal gen.  */
+#define PI_SYSAD_ERRCHK_SADP   0x04    /* Enable SysAD parity checking     */
 #define PI_SYSAD_ERRCHK_CMDP   0x08    /* Enable SysCmd parity checking    */
 #define PI_SYSAD_ERRCHK_STATE  0x10    /* Enable SysState parity checking  */
-#define PI_SYSAD_ERRCHK_QUAL   0x20    /* Enable data quality checking     */
+#define PI_SYSAD_ERRCHK_QUAL   0x20    /* Enable data quality checking     */
 #define PI_SYSAD_CHECK_ALL     0x3f    /* Generate and check all signals.  */
 
 /* Interrupt pending bits on R10000 */
index 3c97e08..3b5efee 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-#define CAUSE_BERRINTR                 IE_IRQ5
+#define CAUSE_BERRINTR         IE_IRQ5
 
-#define ECCF_CACHE_ERR  0
-#define ECCF_TAGLO      1
-#define ECCF_ECC        2
-#define ECCF_ERROREPC   3
-#define ECCF_PADDR      4
-#define ECCF_SIZE       (5 * sizeof(long))
+#define ECCF_CACHE_ERR 0
+#define ECCF_TAGLO     1
+#define ECCF_ECC       2
+#define ECCF_ERROREPC  3
+#define ECCF_PADDR     4
+#define ECCF_SIZE      (5 * sizeof(long))
 
 #endif /* !__ASSEMBLY__ */
 
@@ -39,8 +39,8 @@
  * the processor number of the calling processor.  The proc parameters
  * must be a register.
  */
-#define KL_GET_CPUNUM(proc)                            \
-       dli     proc, LOCAL_HUB(0);                     \
+#define KL_GET_CPUNUM(proc)                            \
+       dli     proc, LOCAL_HUB(0);                     \
        ld      proc, PI_CPU_NUM(proc)
 
 #endif /* __ASSEMBLY__ */
 
 #define NUM_CAUSE_INTRS                8
 
-#define SCACHE_LINESIZE        128
-#define SCACHE_LINEMASK        (SCACHE_LINESIZE - 1)
+#define SCACHE_LINESIZE 128
+#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
 
 #include <asm/sn/addrs.h>
 
-#define LED_CYCLE_MASK  0x0f
-#define LED_CYCLE_SHFT  4
+#define LED_CYCLE_MASK 0x0f
+#define LED_CYCLE_SHFT 4
 
 #define SEND_NMI(_nasid, _slice)       \
-          REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+         REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
 
 #endif /* _ASM_SN_SN0_IP27_H */
index 74d0bb2..c4813d6 100644 (file)
@@ -11,7 +11,7 @@
 
 #include <linux/types.h>
 
-typedef unsigned long  cpuid_t;
+typedef unsigned long  cpuid_t;
 typedef unsigned long  cnodemask_t;
 typedef signed short   nasid_t;        /* node id in numa-as-id space */
 typedef signed short   cnodeid_t;      /* node id in compact-id space */
@@ -19,7 +19,7 @@ typedef signed char   partid_t;       /* partition ID type */
 typedef signed short   moduleid_t;     /* user-visible module number type */
 typedef signed short   cmoduleid_t;    /* kernel compact module id type */
 typedef unsigned char  clusterid_t;    /* Clusterid of the cell */
-typedef unsigned long  pfn_t;
+typedef unsigned long  pfn_t;
 
 typedef dev_t          vertex_hdl_t;   /* hardware graph vertex handle */
 
index 8c1eb02..a107201 100644 (file)
 
 extern unsigned int sni_brd_type;
 
-#define SNI_BRD_10                 2
-#define SNI_BRD_10NEW              3
-#define SNI_BRD_TOWER_OASIC        4
-#define SNI_BRD_MINITOWER          5
-#define SNI_BRD_PCI_TOWER          6
-#define SNI_BRD_RM200              7
-#define SNI_BRD_PCI_MTOWER         8
-#define SNI_BRD_PCI_DESKTOP        9
-#define SNI_BRD_PCI_TOWER_CPLUS   10
+#define SNI_BRD_10                2
+#define SNI_BRD_10NEW             3
+#define SNI_BRD_TOWER_OASIC       4
+#define SNI_BRD_MINITOWER         5
+#define SNI_BRD_PCI_TOWER         6
+#define SNI_BRD_RM200             7
+#define SNI_BRD_PCI_MTOWER        8
+#define SNI_BRD_PCI_DESKTOP       9
+#define SNI_BRD_PCI_TOWER_CPLUS          10
 #define SNI_BRD_PCI_MTOWER_CPLUS  11
 
 /* RM400 cpu types */
-#define SNI_CPU_M8021           0x01
-#define SNI_CPU_M8030           0x04
-#define SNI_CPU_M8031           0x06
-#define SNI_CPU_M8034           0x0f
-#define SNI_CPU_M8037           0x07
-#define SNI_CPU_M8040           0x05
-#define SNI_CPU_M8043           0x09
-#define SNI_CPU_M8050           0x0b
-#define SNI_CPU_M8053           0x0d
+#define SNI_CPU_M8021          0x01
+#define SNI_CPU_M8030          0x04
+#define SNI_CPU_M8031          0x06
+#define SNI_CPU_M8034          0x0f
+#define SNI_CPU_M8037          0x07
+#define SNI_CPU_M8040          0x05
+#define SNI_CPU_M8043          0x09
+#define SNI_CPU_M8050          0x0b
+#define SNI_CPU_M8053          0x0d
 
 #define SNI_PORT_BASE          CKSEG1ADDR(0xb4000000)
 
@@ -52,14 +52,14 @@ extern unsigned int sni_brd_type;
 #define PCIMT_ERRADDR          CKSEG1ADDR(0xbfff0044)
 #define PCIMT_SYNDROME         CKSEG1ADDR(0xbfff004c)
 #define PCIMT_ITPEND           CKSEG1ADDR(0xbfff0054)
-#define  IT_INT2               0x01
-#define  IT_INTD               0x02
-#define  IT_INTC               0x04
-#define  IT_INTB               0x08
-#define  IT_INTA               0x10
-#define  IT_EISA               0x20
-#define  IT_SCSI               0x40
-#define  IT_ETH                        0x80
+#define         IT_INT2                0x01
+#define         IT_INTD                0x02
+#define         IT_INTC                0x04
+#define         IT_INTB                0x08
+#define         IT_INTA                0x10
+#define         IT_EISA                0x20
+#define         IT_SCSI                0x40
+#define         IT_ETH                 0x80
 #define PCIMT_IRQSEL           CKSEG1ADDR(0xbfff005c)
 #define PCIMT_TESTMEM          CKSEG1ADDR(0xbfff0064)
 #define PCIMT_ECCREG           CKSEG1ADDR(0xbfff006c)
@@ -86,14 +86,14 @@ extern unsigned int sni_brd_type;
 #define PCIMT_ERRADDR          CKSEG1ADDR(0xbfff0040)
 #define PCIMT_SYNDROME         CKSEG1ADDR(0xbfff0048)
 #define PCIMT_ITPEND           CKSEG1ADDR(0xbfff0050)
-#define  IT_INT2               0x01
-#define  IT_INTD               0x02
-#define  IT_INTC               0x04
-#define  IT_INTB               0x08
-#define  IT_INTA               0x10
-#define  IT_EISA               0x20
-#define  IT_SCSI               0x40
-#define  IT_ETH                        0x80
+#define         IT_INT2                0x01
+#define         IT_INTD                0x02
+#define         IT_INTC                0x04
+#define         IT_INTB                0x08
+#define         IT_INTA                0x10
+#define         IT_EISA                0x20
+#define         IT_SCSI                0x40
+#define         IT_ETH                 0x80
 #define PCIMT_IRQSEL           CKSEG1ADDR(0xbfff0058)
 #define PCIMT_TESTMEM          CKSEG1ADDR(0xbfff0060)
 #define PCIMT_ECCREG           CKSEG1ADDR(0xbfff0068)
@@ -137,29 +137,29 @@ extern unsigned int sni_brd_type;
 /*
  * A20R based boards
  */
-#define A20R_PT_CLOCK_BASE      CKSEG1ADDR(0xbc040000)
-#define A20R_PT_TIM0_ACK        CKSEG1ADDR(0xbc050000)
-#define A20R_PT_TIM1_ACK        CKSEG1ADDR(0xbc060000)
+#define A20R_PT_CLOCK_BASE     CKSEG1ADDR(0xbc040000)
+#define A20R_PT_TIM0_ACK       CKSEG1ADDR(0xbc050000)
+#define A20R_PT_TIM1_ACK       CKSEG1ADDR(0xbc060000)
 
-#define SNI_A20R_IRQ_BASE       MIPS_CPU_IRQ_BASE
-#define SNI_A20R_IRQ_TIMER      (SNI_A20R_IRQ_BASE+5)
+#define SNI_A20R_IRQ_BASE      MIPS_CPU_IRQ_BASE
+#define SNI_A20R_IRQ_TIMER     (SNI_A20R_IRQ_BASE+5)
 
-#define SNI_PCIT_INT_REG        CKSEG1ADDR(0xbfff000c)
+#define SNI_PCIT_INT_REG       CKSEG1ADDR(0xbfff000c)
 
-#define SNI_PCIT_INT_START      24
-#define SNI_PCIT_INT_END        30
+#define SNI_PCIT_INT_START     24
+#define SNI_PCIT_INT_END       30
 
-#define PCIT_IRQ_ETHERNET       (MIPS_CPU_IRQ_BASE + 5)
-#define PCIT_IRQ_INTA           (SNI_PCIT_INT_START + 0)
-#define PCIT_IRQ_INTB           (SNI_PCIT_INT_START + 1)
-#define PCIT_IRQ_INTC           (SNI_PCIT_INT_START + 2)
-#define PCIT_IRQ_INTD           (SNI_PCIT_INT_START + 3)
-#define PCIT_IRQ_SCSI0          (SNI_PCIT_INT_START + 4)
-#define PCIT_IRQ_SCSI1          (SNI_PCIT_INT_START + 5)
+#define PCIT_IRQ_ETHERNET      (MIPS_CPU_IRQ_BASE + 5)
+#define PCIT_IRQ_INTA          (SNI_PCIT_INT_START + 0)
+#define PCIT_IRQ_INTB          (SNI_PCIT_INT_START + 1)
+#define PCIT_IRQ_INTC          (SNI_PCIT_INT_START + 2)
+#define PCIT_IRQ_INTD          (SNI_PCIT_INT_START + 3)
+#define PCIT_IRQ_SCSI0         (SNI_PCIT_INT_START + 4)
+#define PCIT_IRQ_SCSI1         (SNI_PCIT_INT_START + 5)
 
 
 /*
- * Interrupt 0-16 are EISA interrupts.  Interrupts from 16 on are assigned
+ * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
  * to the other interrupts generated by ASIC PCI.
  *
  * INT2 is a wired-or of the push button interrupt, high temperature interrupt
@@ -204,12 +204,12 @@ extern unsigned int sni_brd_type;
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define __SNI_END 3
 #endif
-#define SNI_IDPROM_BASE        CKSEG1ADDR(0x1ff00000)
+#define SNI_IDPROM_BASE               CKSEG1ADDR(0x1ff00000)
 #define SNI_IDPROM_MEMSIZE     (SNI_IDPROM_BASE + (0x28 ^ __SNI_END))
 #define SNI_IDPROM_BRDTYPE     (SNI_IDPROM_BASE + (0x29 ^ __SNI_END))
 #define SNI_IDPROM_CPUTYPE     (SNI_IDPROM_BASE + (0x30 ^ __SNI_END))
 
-#define SNI_IDPROM_SIZE        0x1000
+#define SNI_IDPROM_SIZE 0x1000
 
 /* board specific init functions */
 extern void sni_a20r_init(void);
index 65900da..d2da53c 100644 (file)
@@ -11,7 +11,7 @@
 #else
 # define SECTION_SIZE_BITS     28
 #endif
-#define MAX_PHYSMEM_BITS        35
+#define MAX_PHYSMEM_BITS       35
 
 #endif /* CONFIG_SPARSEMEM */
 #endif /* _MIPS_SPARSEMEM_H */
index ca61e84..5130c88 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  *
- * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * Simple spin lock operations.         There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
  * These are fair FIFO ticket locks
@@ -222,7 +222,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define arch_write_can_lock(rw)        (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
 
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
index c52f360..9b2528e 100644 (file)
@@ -11,7 +11,7 @@
 
 typedef union {
        /*
-        * bits  0..15 : serving_now
+        * bits  0..15 : serving_now
         * bits 16..31 : ticket
         */
        u32 lock;
index cb41af5..c993840 100644 (file)
                ori     $28, sp, _THREAD_MASK
                xori    $28, _THREAD_MASK
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-               .set    mips64
-               pref    0, 0($28)       /* Prefetch the current pointer */
-               pref    0, PT_R31(sp)   /* Prefetch the $31(ra) */
+               .set    mips64
+               pref    0, 0($28)       /* Prefetch the current pointer */
+               pref    0, PT_R31(sp)   /* Prefetch the $31(ra) */
                /* The Octeon multiplier state is affected by general multiply
                    instructions. It must be saved before and kernel code might
                    corrupt it */
-               jal     octeon_mult_save
-               LONG_L  v1, 0($28)  /* Load the current pointer */
+               jal     octeon_mult_save
+               LONG_L  v1, 0($28)  /* Load the current pointer */
                         /* Restore $31(ra) that was changed by the jal */
-               LONG_L  ra, PT_R31(sp)
-               pref    0, 0(v1)    /* Prefetch the current thread */
+               LONG_L  ra, PT_R31(sp)
+               pref    0, 0(v1)    /* Prefetch the current thread */
 #endif
                .set    pop
                .endm
index 436e3ad..29030cb 100644 (file)
@@ -35,7 +35,7 @@ static __inline__ char *strcpy(char *__dest, __const__ char *__src)
        ".set\tat\n\t"
        ".set\treorder"
        : "=r" (__dest), "=r" (__src)
-        : "0" (__dest), "1" (__src)
+       : "0" (__dest), "1" (__src)
        : "memory");
 
   return __xdest;
@@ -62,9 +62,9 @@ static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n)
        "2:\n\t"
        ".set\tat\n\t"
        ".set\treorder"
-        : "=r" (__dest), "=r" (__src), "=r" (__n)
-        : "0" (__dest), "1" (__src), "2" (__n)
-        : "memory");
+       : "=r" (__dest), "=r" (__src), "=r" (__n)
+       : "0" (__dest), "1" (__src), "2" (__n)
+       : "memory");
 
   return __xdest;
 }
index 4f8ddba..fd16bcb 100644 (file)
@@ -30,7 +30,7 @@ extern struct task_struct *ll_task;
 #ifdef CONFIG_MIPS_MT_FPAFF
 
 /*
- * Handle the scheduler resume end of FPU affinity management.  We do this
+ * Handle the scheduler resume end of FPU affinity management. We do this
  * inline to try to keep the overhead down. If we have been forced to run on
  * a "CPU" with an FPU because of a previous high level of FP computation,
  * but did not actually use the FPU during the most recent time-slice (CU1
@@ -72,7 +72,7 @@ do {                                                                  \
                __save_dsp(prev);                                       \
        __clear_software_ll_bit();                                      \
        __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);  \
-       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+       (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
index b2050b9..178f792 100644 (file)
@@ -44,7 +44,7 @@ struct thread_info {
 #define INIT_THREAD_INFO(tsk)                  \
 {                                              \
        .task           = &tsk,                 \
-       .exec_domain    = &default_exec_domain, \
+       .exec_domain    = &default_exec_domain, \
        .flags          = _TIF_FIXADE,          \
        .cpu            = 0,                    \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
index 761f2e9..debc800 100644 (file)
@@ -6,8 +6,8 @@
  * include/asm-mips/time.h
  *     header file for the new style time.c file and time services.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
 
 static inline int init_mips_clocksource(void)
 {
-#ifdef CONFIG_CSRC_R4K
+#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
        return init_r4k_clocksource();
 #else
        return 0;
index 80d9dfc..c67842b 100644 (file)
@@ -5,7 +5,7 @@
  * MIPS doesn't need any special per-pte or per-vma handling, except
  * we need to flush cache for area to be unmapped.
  */
-#define tlb_start_vma(tlb, vma)                                \
+#define tlb_start_vma(tlb, vma)                                        \
        do {                                                    \
                if (!tlb->fullmm)                               \
                        flush_cache_range(vma, vma->vm_start, vma->vm_end); \
index 259145e..12609a1 100644 (file)
@@ -11,7 +11,7 @@
 #include <topology.h>
 
 #ifdef CONFIG_SMP
-#define smt_capable()   (smp_num_siblings > 1)
+#define smt_capable()  (smp_num_siblings > 1)
 #endif
 
 #endif /* __ASM_TOPOLOGY_H */
index 420ca06..f41cf3e 100644 (file)
@@ -14,7 +14,7 @@
 /*
  * Possible status responses for a board_be_handler backend.
  */
-#define MIPS_BE_DISCARD        0               /* return with no action */
+#define MIPS_BE_DISCARD 0              /* return with no action */
 #define MIPS_BE_FIXUP  1               /* return to the fixup code */
 #define MIPS_BE_FATAL  2               /* treat as an unrecoverable error */
 
index 8808d7f..aab959d 100644 (file)
@@ -40,7 +40,7 @@
 #define JMR3927_PCIIO_BASE     (KSEG1 + JMR3927_PCIIO)
 
 #define JMR3927_IOC_REV_ADDR   (JMR3927_IOC_BASE + 0x00000000)
-#define JMR3927_IOC_NVRAMB_ADDR        (JMR3927_IOC_BASE + 0x00010000)
+#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
 #define JMR3927_IOC_LED_ADDR   (JMR3927_IOC_BASE + 0x00020000)
 #define JMR3927_IOC_DIPSW_ADDR (JMR3927_IOC_BASE + 0x00030000)
 #define JMR3927_IOC_BREV_ADDR  (JMR3927_IOC_BASE + 0x00040000)
 #define JMR3927_NR_IRQ_IRC     16      /* On-Chip IRC */
 #define JMR3927_NR_IRQ_IOC     8       /* PCI/MODEM/INT[6:7] */
 
-#define JMR3927_IRQ_IRC        TXX9_IRQ_BASE
-#define JMR3927_IRQ_IOC        (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
-#define JMR3927_IRQ_END        (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
+#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
+#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
+#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
 
 #define JMR3927_IRQ_IRC_INT0   (JMR3927_IRQ_IRC + TX3927_IR_INT0)
 #define JMR3927_IRQ_IRC_INT1   (JMR3927_IRQ_IRC + TX3927_IR_INT1)
 #define JMR3927_IRQ_IRC_INT5   (JMR3927_IRQ_IRC + TX3927_IR_INT5)
 #define JMR3927_IRQ_IRC_SIO0   (JMR3927_IRQ_IRC + TX3927_IR_SIO0)
 #define JMR3927_IRQ_IRC_SIO1   (JMR3927_IRQ_IRC + TX3927_IR_SIO1)
-#define JMR3927_IRQ_IRC_SIO(ch)        (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
+#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
 #define JMR3927_IRQ_IRC_DMA    (JMR3927_IRQ_IRC + TX3927_IR_DMA)
 #define JMR3927_IRQ_IRC_PIO    (JMR3927_IRQ_IRC + TX3927_IR_PIO)
 #define JMR3927_IRQ_IRC_PCI    (JMR3927_IRQ_IRC + TX3927_IR_PCI)
-#define JMR3927_IRQ_IRC_TMR(ch)        (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
+#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
 #define JMR3927_IRQ_IOC_PCIA   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIA)
 #define JMR3927_IRQ_IOC_PCIB   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIB)
 #define JMR3927_IRQ_IOC_PCIC   (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIC)
 #define JMR3927_IRQ_ETHER0     JMR3927_IRQ_IRC_INT3
 
 /* Clocks */
-#define JMR3927_CORECLK        132710400       /* 132.7MHz */
+#define JMR3927_CORECLK 132710400      /* 132.7MHz */
 
 /*
  * TX3927 Pin Configuration:
index b2adab3..4060ad2 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2002 MontaVista Software Inc.
  *
@@ -38,7 +38,7 @@
 #define RBTX4927_IMASK_ADDR    (IO_BASE + TXX9_CE(2) + 0x00002000)
 #define RBTX4927_IMSTAT_ADDR   (IO_BASE + TXX9_CE(2) + 0x00002006)
 #define RBTX4927_SOFTINT_ADDR  (IO_BASE + TXX9_CE(2) + 0x00003000)
-#define RBTX4927_SOFTRESET_ADDR        (IO_BASE + TXX9_CE(2) + 0x0000f000)
+#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
 #define RBTX4927_SOFTRESETLOCK_ADDR    (IO_BASE + TXX9_CE(2) + 0x0000f002)
 #define RBTX4927_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f006)
 #define RBTX4927_BRAMRTC_BASE  (IO_BASE + TXX9_CE(2) + 0x00010000)
@@ -50,7 +50,7 @@
 #define rbtx4927_imask_addr    ((__u8 __iomem *)RBTX4927_IMASK_ADDR)
 #define rbtx4927_imstat_addr   ((__u8 __iomem *)RBTX4927_IMSTAT_ADDR)
 #define rbtx4927_softint_addr  ((__u8 __iomem *)RBTX4927_SOFTINT_ADDR)
-#define rbtx4927_softreset_addr        ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
+#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
 #define rbtx4927_softresetlock_addr    \
                                ((__u8 __iomem *)RBTX4927_SOFTRESETLOCK_ADDR)
 #define rbtx4927_pcireset_addr ((__u8 __iomem *)RBTX4927_PCIRESET_ADDR)
index 9f0441a..9c969dd 100644 (file)
@@ -36,7 +36,7 @@
 #define RBTX4938_SPICS_ADDR    (IO_BASE + TXX9_CE(2) + 0x00005002)
 #define RBTX4938_SFPWR_ADDR    (IO_BASE + TXX9_CE(2) + 0x00005008)
 #define RBTX4938_SFVOL_ADDR    (IO_BASE + TXX9_CE(2) + 0x0000500a)
-#define RBTX4938_SOFTRESET_ADDR        (IO_BASE + TXX9_CE(2) + 0x00007000)
+#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
 #define RBTX4938_SOFTRESETLOCK_ADDR    (IO_BASE + TXX9_CE(2) + 0x00007002)
 #define RBTX4938_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007004)
 #define RBTX4938_ETHER_BASE    (IO_BASE + TXX9_CE(2) + 0x00020000)
@@ -78,7 +78,7 @@
 #define rbtx4938_spics_addr    ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
 #define rbtx4938_sfpwr_addr    ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
 #define rbtx4938_sfvol_addr    ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
-#define rbtx4938_softreset_addr        ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
+#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
 #define rbtx4938_softresetlock_addr    \
                                ((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
 #define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
@@ -94,7 +94,7 @@
 
 /* These are the virtual IRQ numbers, we divide all IRQ's into
  * 'spaces', the 'space' determines where and how to enable/disable
- * that particular IRQ on an RBTX4938 machine.  Add new 'spaces' as new
+ * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
  * IRQ hardware is supported.
  */
 #define RBTX4938_NR_IRQ_IOC    8
 #define RBTX4938_IRQ_IOC       (TXX9_IRQ_BASE + TX4938_NUM_IR)
 #define RBTX4938_IRQ_END       (RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC)
 
-#define RBTX4938_IRQ_IRC_ECCERR        (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
-#define RBTX4938_IRQ_IRC_WTOERR        (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
-#define RBTX4938_IRQ_IRC_INT(n)        (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
-#define RBTX4938_IRQ_IRC_SIO(n)        (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
+#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
+#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
+#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
+#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
 #define RBTX4938_IRQ_IRC_DMA(ch, n)    (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n))
 #define RBTX4938_IRQ_IRC_PIO   (RBTX4938_IRQ_IRC + TX4938_IR_PIO)
 #define RBTX4938_IRQ_IRC_PDMAC (RBTX4938_IRQ_IRC + TX4938_IR_PDMAC)
 #define RBTX4938_IRQ_IRC_PCIC  (RBTX4938_IRQ_IRC + TX4938_IR_PCIC)
-#define RBTX4938_IRQ_IRC_TMR(n)        (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
+#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
 #define RBTX4938_IRQ_IRC_NDFMC (RBTX4938_IRQ_IRC + TX4938_IR_NDFMC)
-#define RBTX4938_IRQ_IRC_PCIERR        (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
-#define RBTX4938_IRQ_IRC_PCIPME        (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
+#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
+#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
 #define RBTX4938_IRQ_IRC_ACLC  (RBTX4938_IRQ_IRC + TX4938_IR_ACLC)
 #define RBTX4938_IRQ_IRC_ACLCPME       (RBTX4938_IRQ_IRC + TX4938_IR_ACLCPME)
 #define RBTX4938_IRQ_IRC_PCIC1 (RBTX4938_IRQ_IRC + TX4938_IR_PCIC1)
index e517899..6157bfd 100644 (file)
@@ -17,7 +17,7 @@
 
 /* Address map */
 #define RBTX4939_IOC_REG_ADDR  (IO_BASE + TXX9_CE(1) + 0x00000000)
-#define RBTX4939_BOARD_REV_ADDR        (IO_BASE + TXX9_CE(1) + 0x00000000)
+#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
 #define RBTX4939_IOC_REV_ADDR  (IO_BASE + TXX9_CE(1) + 0x00000002)
 #define RBTX4939_CONFIG1_ADDR  (IO_BASE + TXX9_CE(1) + 0x00000004)
 #define RBTX4939_CONFIG2_ADDR  (IO_BASE + TXX9_CE(1) + 0x00000006)
@@ -46,9 +46,9 @@
 #define RBTX4939_VPSIN_ADDR    (IO_BASE + TXX9_CE(1) + 0x0000500c)
 #define RBTX4939_7SEG_ADDR(s, ch)      \
        (IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
-#define RBTX4939_SOFTRESET_ADDR        (IO_BASE + TXX9_CE(1) + 0x00007000)
+#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
 #define RBTX4939_RESETEN_ADDR  (IO_BASE + TXX9_CE(1) + 0x00007002)
-#define RBTX4939_RESETSTAT_ADDR        (IO_BASE + TXX9_CE(1) + 0x00007004)
+#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
 #define RBTX4939_ETHER_BASE    (IO_BASE + TXX9_CE(1) + 0x00020000)
 
 /* Ethernet port address */
 #define RBTX4939_PE2_CIR       0x08
 #define RBTX4939_PE2_SPI       0x10
 #define RBTX4939_PE2_GPIO      0x20
-#define RBTX4939_PE3_VP        0x01
+#define RBTX4939_PE3_VP 0x01
 #define RBTX4939_PE3_VP_P      0x02
 #define RBTX4939_PE3_VP_S      0x04
 
-#define rbtx4939_board_rev_addr        ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
+#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
 #define rbtx4939_ioc_rev_addr  ((u8 __iomem *)RBTX4939_IOC_REV_ADDR)
 #define rbtx4939_config1_addr  ((u8 __iomem *)RBTX4939_CONFIG1_ADDR)
 #define rbtx4939_config2_addr  ((u8 __iomem *)RBTX4939_CONFIG2_ADDR)
 #define rbtx4939_vpsin_addr    ((u8 __iomem *)RBTX4939_VPSIN_ADDR)
 #define rbtx4939_7seg_addr(s, ch) \
                                ((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
-#define rbtx4939_softreset_addr        ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
+#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
 #define rbtx4939_reseten_addr  ((u8 __iomem *)RBTX4939_RESETEN_ADDR)
-#define rbtx4939_resetstat_addr        ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
+#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
 
 /*
  * IRQ mappings
index d1d6332..926d08f 100644 (file)
 /* Common Registers */
 #define SMSC_FDC37M81X_CONFIG_INDEX  0x00
 #define SMSC_FDC37M81X_CONFIG_DATA   0x01
-#define SMSC_FDC37M81X_CONF          0x02
-#define SMSC_FDC37M81X_INDEX         0x03
-#define SMSC_FDC37M81X_DNUM          0x07
-#define SMSC_FDC37M81X_DID           0x20
-#define SMSC_FDC37M81X_DREV          0x21
-#define SMSC_FDC37M81X_PCNT          0x22
-#define SMSC_FDC37M81X_PMGT          0x23
-#define SMSC_FDC37M81X_OSC           0x24
-#define SMSC_FDC37M81X_CONFPA0       0x26
-#define SMSC_FDC37M81X_CONFPA1       0x27
-#define SMSC_FDC37M81X_TEST4         0x2B
-#define SMSC_FDC37M81X_TEST5         0x2C
-#define SMSC_FDC37M81X_TEST1         0x2D
-#define SMSC_FDC37M81X_TEST2         0x2E
-#define SMSC_FDC37M81X_TEST3         0x2F
+#define SMSC_FDC37M81X_CONF         0x02
+#define SMSC_FDC37M81X_INDEX        0x03
+#define SMSC_FDC37M81X_DNUM         0x07
+#define SMSC_FDC37M81X_DID          0x20
+#define SMSC_FDC37M81X_DREV         0x21
+#define SMSC_FDC37M81X_PCNT         0x22
+#define SMSC_FDC37M81X_PMGT         0x23
+#define SMSC_FDC37M81X_OSC          0x24
+#define SMSC_FDC37M81X_CONFPA0      0x26
+#define SMSC_FDC37M81X_CONFPA1      0x27
+#define SMSC_FDC37M81X_TEST4        0x2B
+#define SMSC_FDC37M81X_TEST5        0x2C
+#define SMSC_FDC37M81X_TEST1        0x2D
+#define SMSC_FDC37M81X_TEST2        0x2E
+#define SMSC_FDC37M81X_TEST3        0x2F
 
 /* Logical device numbers */
-#define SMSC_FDC37M81X_FDD           0x00
-#define SMSC_FDC37M81X_PARALLEL      0x03
-#define SMSC_FDC37M81X_SERIAL1       0x04
-#define SMSC_FDC37M81X_SERIAL2       0x05
-#define SMSC_FDC37M81X_KBD           0x07
-#define SMSC_FDC37M81X_AUXIO         0x08
-#define SMSC_FDC37M81X_NONE          0xff
+#define SMSC_FDC37M81X_FDD          0x00
+#define SMSC_FDC37M81X_PARALLEL             0x03
+#define SMSC_FDC37M81X_SERIAL1      0x04
+#define SMSC_FDC37M81X_SERIAL2      0x05
+#define SMSC_FDC37M81X_KBD          0x07
+#define SMSC_FDC37M81X_AUXIO        0x08
+#define SMSC_FDC37M81X_NONE         0xff
 
 /* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE        0x30
+#define SMSC_FDC37M81X_ACTIVE       0x30
 #define SMSC_FDC37M81X_BASEADDR0     0x60
 #define SMSC_FDC37M81X_BASEADDR1     0x61
-#define SMSC_FDC37M81X_INT           0x70
-#define SMSC_FDC37M81X_INT2          0x72
-#define SMSC_FDC37M81X_LDCR_F0       0xF0
+#define SMSC_FDC37M81X_INT          0x70
+#define SMSC_FDC37M81X_INT2         0x72
+#define SMSC_FDC37M81X_LDCR_F0      0xF0
 
 /* Chip Config Values */
 #define SMSC_FDC37M81X_CONFIG_ENTER  0x55
 #define SMSC_FDC37M81X_CONFIG_EXIT   0xaa
-#define SMSC_FDC37M81X_CHIP_ID       0x4d
+#define SMSC_FDC37M81X_CHIP_ID      0x4d
 
 unsigned long smsc_fdc37m81x_init(unsigned long port);
 
index dc30c8d..149fab4 100644 (file)
@@ -8,8 +8,8 @@
 #ifndef __ASM_TXX9_TX3927_H
 #define __ASM_TXX9_TX3927_H
 
-#define TX3927_REG_BASE        0xfffe0000UL
-#define TX3927_REG_SIZE        0x00010000
+#define TX3927_REG_BASE 0xfffe0000UL
+#define TX3927_REG_SIZE 0x00010000
 #define TX3927_SDRAMC_REG      (TX3927_REG_BASE + 0x8000)
 #define TX3927_ROMC_REG                (TX3927_REG_BASE + 0x9000)
 #define TX3927_DMA_REG         (TX3927_REG_BASE + 0xb000)
@@ -191,8 +191,8 @@ struct tx3927_ccfg_reg {
 #define TX3927_DMA_CCR_XFSZ_1W TX3927_DMA_CCR_XFSZ(2)
 #define TX3927_DMA_CCR_XFSZ_4W TX3927_DMA_CCR_XFSZ(4)
 #define TX3927_DMA_CCR_XFSZ_8W TX3927_DMA_CCR_XFSZ(5)
-#define TX3927_DMA_CCR_XFSZ_16W        TX3927_DMA_CCR_XFSZ(6)
-#define TX3927_DMA_CCR_XFSZ_32W        TX3927_DMA_CCR_XFSZ(7)
+#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
+#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
 #define TX3927_DMA_CCR_MEMIO   0x00000002
 #define TX3927_DMA_CCR_ONEAD   0x00000001
 
@@ -250,7 +250,7 @@ struct tx3927_ccfg_reg {
 /* see PCI_BASE_ADDRESS_XXX in linux/pci.h */
 
 /* bits for PBAPMC */
-#define TX3927_PCIC_PBAPMC_RPBA        0x00000004
+#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
 #define TX3927_PCIC_PBAPMC_PBAEN       0x00000002
 #define TX3927_PCIC_PBAPMC_BMCEN       0x00000001
 
@@ -282,7 +282,7 @@ struct tx3927_ccfg_reg {
 #define TX3927_CCFG_TLBOFF     0x00020000
 #define TX3927_CCFG_BEOW       0x00010000
 #define TX3927_CCFG_WR 0x00008000
-#define TX3927_CCFG_TOE        0x00004000
+#define TX3927_CCFG_TOE 0x00004000
 #define TX3927_CCFG_PCIXARB    0x00002000
 #define TX3927_CCFG_PCI3       0x00001000
 #define TX3927_CCFG_PSNP       0x00000800
@@ -301,8 +301,8 @@ struct tx3927_ccfg_reg {
 #define TX3927_PCFG_SELALL     0x0003ffff
 #define TX3927_PCFG_SELCS      0x00020000
 #define TX3927_PCFG_SELDSF     0x00010000
-#define TX3927_PCFG_SELSIOC_ALL        0x0000c000
-#define TX3927_PCFG_SELSIOC(ch)        (0x00004000<<(ch))
+#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
+#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
 #define TX3927_PCFG_SELSIO_ALL 0x00003000
 #define TX3927_PCFG_SELSIO(ch) (0x00001000<<(ch))
 #define TX3927_PCFG_SELTMR_ALL 0x00000e00
index 18c98c5..284eea7 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2006 MontaVista Software Inc.
  *
 #include <asm/txx9/tx4927pcic.h>
 
 #ifdef CONFIG_64BIT
-#define TX4927_REG_BASE        0xffffffffff1f0000UL
+#define TX4927_REG_BASE 0xffffffffff1f0000UL
 #else
-#define TX4927_REG_BASE        0xff1f0000UL
+#define TX4927_REG_BASE 0xff1f0000UL
 #endif
-#define TX4927_REG_SIZE        0x00010000
+#define TX4927_REG_SIZE 0x00010000
 
 #define TX4927_SDRAMC_REG      (TX4927_REG_BASE + 0x8000)
 #define TX4927_EBUSC_REG       (TX4927_REG_BASE + 0x9000)
@@ -118,10 +118,10 @@ struct tx4927_ccfg_reg {
 #define TX4927_CCFG_DIVMODE_2  (0x4 << 17)
 #define TX4927_CCFG_DIVMODE_3  (0x5 << 17)
 #define TX4927_CCFG_DIVMODE_4  (0x6 << 17)
-#define TX4927_CCFG_DIVMODE_2_5        (0x7 << 17)
+#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
 #define TX4927_CCFG_BEOW       0x00010000
 #define TX4927_CCFG_WR 0x00008000
-#define TX4927_CCFG_TOE        0x00004000
+#define TX4927_CCFG_TOE 0x00004000
 #define TX4927_CCFG_PCIARB     0x00002000
 #define TX4927_CCFG_PCIDIVMODE_MASK    0x00001800
 #define TX4927_CCFG_PCIDIVMODE_2_5     0x00000000
@@ -136,10 +136,10 @@ struct tx4927_ccfg_reg {
 
 /* PCFG : Pin Configuration */
 #define TX4927_PCFG_SDCLKDLY_MASK      0x30000000
-#define TX4927_PCFG_SDCLKDLY(d)        ((d)<<28)
+#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
 #define TX4927_PCFG_SYSCLKEN   0x08000000
-#define TX4927_PCFG_SDCLKEN_ALL        0x07800000
-#define TX4927_PCFG_SDCLKEN(ch)        (0x00800000<<(ch))
+#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
 #define TX4927_PCFG_PCICLKEN_ALL       0x003f0000
 #define TX4927_PCFG_PCICLKEN(ch)       (0x00010000<<(ch))
 #define TX4927_PCFG_SEL2       0x00000200
index c470b8a..9eab269 100644 (file)
@@ -93,7 +93,7 @@ struct tx4927_pcic_reg {
 
 /* bits for PBACFG */
 #define TX4927_PCIC_PBACFG_FIXPA       0x00000008
-#define TX4927_PCIC_PBACFG_RPBA        0x00000004
+#define TX4927_PCIC_PBACFG_RPBA 0x00000004
 #define TX4927_PCIC_PBACFG_PBAEN       0x00000002
 #define TX4927_PCIC_PBACFG_BMCEN       0x00000001
 
@@ -165,7 +165,7 @@ struct tx4927_pcic_reg {
 #define TX4927_PCIC_PDMCFG_CHNEN       0x00000080
 #define TX4927_PCIC_PDMCFG_XFRACT      0x00000040
 #define TX4927_PCIC_PDMCFG_BSWAP       0x00000020
-#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK        0x0000000c
+#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
 #define TX4927_PCIC_PDMCFG_XFRSIZE_1DW 0x00000000
 #define TX4927_PCIC_PDMCFG_XFRSIZE_1QW 0x00000004
 #define TX4927_PCIC_PDMCFG_XFRSIZE_4QW 0x00000008
@@ -174,7 +174,7 @@ struct tx4927_pcic_reg {
 
 /* bits for PDMSTS */
 #define TX4927_PCIC_PDMSTS_REQCNT_MASK 0x3f000000
-#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK        0x00f00000
+#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
 #define TX4927_PCIC_PDMSTS_FIFOWP_MASK 0x000c0000
 #define TX4927_PCIC_PDMSTS_FIFORP_MASK 0x00030000
 #define TX4927_PCIC_PDMSTS_ERRINT      0x00000800
index 8a178f1..6ca767e 100644 (file)
 #include <asm/txx9/tx4927.h>
 
 #ifdef CONFIG_64BIT
-#define TX4938_REG_BASE        0xffffffffff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
 #else
-#define TX4938_REG_BASE        0xff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
 #endif
-#define TX4938_REG_SIZE        0x00010000 /* == TX4937_REG_SIZE */
+#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
 
 /* NDFMC, SRAMC, PCIC1, SPIC: TX4938 only */
 #define TX4938_NDFMC_REG       (TX4938_REG_BASE + 0x5000)
@@ -72,16 +72,16 @@ struct tx4938_ccfg_reg {
 #define TX4938_NUM_IR_DMA      4
 #define TX4938_IR_DMA(ch, n)   ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */
 #define TX4938_IR_PIO  14
-#define TX4938_IR_PDMAC        15
+#define TX4938_IR_PDMAC 15
 #define TX4938_IR_PCIC 16
 #define TX4938_NUM_IR_TMR      3
 #define TX4938_IR_TMR(n)       (17 + (n))
-#define TX4938_IR_NDFMC        21
+#define TX4938_IR_NDFMC 21
 #define TX4938_IR_PCIERR       22
 #define TX4938_IR_PCIPME       23
 #define TX4938_IR_ACLC 24
 #define TX4938_IR_ACLCPME      25
-#define TX4938_IR_PCIC1        26
+#define TX4938_IR_PCIC1 26
 #define TX4938_IR_SPI  31
 #define TX4938_NUM_IR  32
 /* multiplex */
@@ -105,10 +105,10 @@ struct tx4938_ccfg_reg {
 #define TX4938_CCFG_PCI1_66    0x00200000
 #define TX4938_CCFG_DIVMODE_MASK       0x001e0000
 #define TX4938_CCFG_DIVMODE_2  (0x4 << 17)
-#define TX4938_CCFG_DIVMODE_2_5        (0xf << 17)
+#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
 #define TX4938_CCFG_DIVMODE_3  (0x5 << 17)
 #define TX4938_CCFG_DIVMODE_4  (0x6 << 17)
-#define TX4938_CCFG_DIVMODE_4_5        (0xd << 17)
+#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
 #define TX4938_CCFG_DIVMODE_8  (0x0 << 17)
 #define TX4938_CCFG_DIVMODE_10 (0xb << 17)
 #define TX4938_CCFG_DIVMODE_12 (0x1 << 17)
@@ -116,7 +116,7 @@ struct tx4938_ccfg_reg {
 #define TX4938_CCFG_DIVMODE_18 (0x9 << 17)
 #define TX4938_CCFG_BEOW       0x00010000
 #define TX4938_CCFG_WR 0x00008000
-#define TX4938_CCFG_TOE        0x00004000
+#define TX4938_CCFG_TOE 0x00004000
 #define TX4938_CCFG_PCIARB     0x00002000
 #define TX4938_CCFG_PCIDIVMODE_MASK    0x00001c00
 #define TX4938_CCFG_PCIDIVMODE_4       (0x1 << 10)
@@ -141,10 +141,10 @@ struct tx4938_ccfg_reg {
 #define TX4938_PCFG_SPI_SEL    0x0800000000000000ULL
 #define TX4938_PCFG_NDF_SEL    0x0400000000000000ULL
 #define TX4938_PCFG_SDCLKDLY_MASK      0x30000000
-#define TX4938_PCFG_SDCLKDLY(d)        ((d)<<28)
+#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
 #define TX4938_PCFG_SYSCLKEN   0x08000000
-#define TX4938_PCFG_SDCLKEN_ALL        0x07800000
-#define TX4938_PCFG_SDCLKEN(ch)        (0x00800000<<(ch))
+#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
 #define TX4938_PCFG_PCICLKEN_ALL       0x003f0000
 #define TX4938_PCFG_PCICLKEN(ch)       (0x00010000<<(ch))
 #define TX4938_PCFG_SEL2       0x00000200
@@ -230,8 +230,8 @@ struct tx4938_ccfg_reg {
 #define TX4938_DMA_CCR_XFSZ_2W TX4938_DMA_CCR_XFSZ(3)
 #define TX4938_DMA_CCR_XFSZ_4W TX4938_DMA_CCR_XFSZ(4)
 #define TX4938_DMA_CCR_XFSZ_8W TX4938_DMA_CCR_XFSZ(5)
-#define TX4938_DMA_CCR_XFSZ_16W        TX4938_DMA_CCR_XFSZ(6)
-#define TX4938_DMA_CCR_XFSZ_32W        TX4938_DMA_CCR_XFSZ(7)
+#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
+#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
 #define TX4938_DMA_CCR_MEMIO   0x00000002
 #define TX4938_DMA_CCR_SNGAD   0x00000001
 
@@ -263,9 +263,9 @@ struct tx4938_ccfg_reg {
 #define TX4938_REV_PCODE()     \
        ((__u32)__raw_readq(&tx4938_ccfgptr->crir) >> 16)
 
-#define tx4938_ccfg_clear(bits)        tx4927_ccfg_clear(bits)
+#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
 #define tx4938_ccfg_set(bits)  tx4927_ccfg_set(bits)
-#define tx4938_ccfg_change(change, new)        tx4927_ccfg_change(change, new)
+#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
 
 #define TX4938_SDRAMC_CR(ch)   TX4927_SDRAMC_CR(ch)
 #define TX4938_SDRAMC_BA(ch)   TX4927_SDRAMC_BA(ch)
index d4f342c..6d66708 100644 (file)
 #include <asm/txx9/tx4938.h>
 
 #ifdef CONFIG_64BIT
-#define TX4939_REG_BASE        0xffffffffff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
 #else
-#define TX4939_REG_BASE        0xff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
 #endif
-#define TX4939_REG_SIZE        0x00010000 /* == TX4938_REG_SIZE */
+#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
 
 #define TX4939_ATA_REG(ch)     (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
 #define TX4939_NDFMC_REG       (TX4939_REG_BASE + 0x5000)
@@ -189,14 +189,14 @@ struct tx4939_vpc_desc {
 #define TX4939_IR_INT(n)       (3 + (n))
 #define TX4939_NUM_IR_ETH      2
 #define TX4939_IR_ETH(n)       ((n) ? 43 : 6)
-#define TX4939_IR_VIDEO        7
+#define TX4939_IR_VIDEO 7
 #define TX4939_IR_CIR  8
 #define TX4939_NUM_IR_SIO      4
 #define TX4939_IR_SIO(n)       ((n) ? 43 + (n) : 9)    /* 9,44-46 */
 #define TX4939_NUM_IR_DMA      4
 #define TX4939_IR_DMA(ch, n)   (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
 #define TX4939_IR_IRC  14
-#define TX4939_IR_PDMAC        15
+#define TX4939_IR_PDMAC 15
 #define TX4939_NUM_IR_TMR      6
 #define TX4939_IR_TMR(n)       (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */
 #define TX4939_NUM_IR_ATA      2
@@ -210,10 +210,10 @@ struct tx4939_vpc_desc {
 #define TX4939_IR_I2C  33
 #define TX4939_IR_SPI  34
 #define TX4939_IR_PCIC 35
-#define TX4939_IR_PCIC1        36
+#define TX4939_IR_PCIC1 36
 #define TX4939_IR_PCIERR       37
 #define TX4939_IR_PCIPME       38
-#define TX4939_IR_NDFMC        39
+#define TX4939_IR_NDFMC 39
 #define TX4939_IR_ACLCPME      40
 #define TX4939_IR_RTC  41
 #define TX4939_IR_RND  42
@@ -239,7 +239,7 @@ struct tx4939_vpc_desc {
 #define TX4939_CCFG_PCI66      0x00800000
 #define TX4939_CCFG_PCIMODE    0x00400000
 #define TX4939_CCFG_SSCG       0x00100000
-#define TX4939_CCFG_MULCLK_MASK        0x000e0000
+#define TX4939_CCFG_MULCLK_MASK 0x000e0000
 #define TX4939_CCFG_MULCLK_8   (0x7 << 17)
 #define TX4939_CCFG_MULCLK_9   (0x0 << 17)
 #define TX4939_CCFG_MULCLK_10  (0x1 << 17)
@@ -250,7 +250,7 @@ struct tx4939_vpc_desc {
 #define TX4939_CCFG_MULCLK_15  (0x6 << 17)
 #define TX4939_CCFG_BEOW       0x00010000
 #define TX4939_CCFG_WR 0x00008000
-#define TX4939_CCFG_TOE        0x00004000
+#define TX4939_CCFG_TOE 0x00004000
 #define TX4939_CCFG_PCIARB     0x00002000
 #define TX4939_CCFG_YDIVMODE_MASK      0x00001c00
 #define TX4939_CCFG_YDIVMODE_2 (0x0 << 10)
@@ -275,7 +275,7 @@ struct tx4939_vpc_desc {
 #define TX4939_PCFG_I2CMODE    0x1000000000000000ULL
 #define TX4939_PCFG_I2SMODE_MASK       0x0c00000000000000ULL
 #define TX4939_PCFG_I2SMODE_GPIO       0x0c00000000000000ULL
-#define TX4939_PCFG_I2SMODE_I2S        0x0800000000000000ULL
+#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
 #define TX4939_PCFG_I2SMODE_I2S_ALT    0x0400000000000000ULL
 #define TX4939_PCFG_I2SMODE_ACLC       0x0000000000000000ULL
 #define TX4939_PCFG_SIO3MODE   0x0200000000000000ULL
@@ -392,15 +392,15 @@ struct tx4939_vpc_desc {
 /*
  * CRYPTO
  */
-#define TX4939_CRYPTO_CSR_SAESO        0x08000000
-#define TX4939_CRYPTO_CSR_SAESI        0x04000000
-#define TX4939_CRYPTO_CSR_SDESO        0x02000000
-#define TX4939_CRYPTO_CSR_SDESI        0x01000000
+#define TX4939_CRYPTO_CSR_SAESO 0x08000000
+#define TX4939_CRYPTO_CSR_SAESI 0x04000000
+#define TX4939_CRYPTO_CSR_SDESO 0x02000000
+#define TX4939_CRYPTO_CSR_SDESI 0x01000000
 #define TX4939_CRYPTO_CSR_INDXBST_MASK 0x00700000
 #define TX4939_CRYPTO_CSR_INDXBST(n)   ((n) << 20)
-#define TX4939_CRYPTO_CSR_TOINT        0x00080000
-#define TX4939_CRYPTO_CSR_DCINT        0x00040000
-#define TX4939_CRYPTO_CSR_GBINT        0x00010000
+#define TX4939_CRYPTO_CSR_TOINT 0x00080000
+#define TX4939_CRYPTO_CSR_DCINT 0x00040000
+#define TX4939_CRYPTO_CSR_GBINT 0x00010000
 #define TX4939_CRYPTO_CSR_INDXAST_MASK 0x0000e000
 #define TX4939_CRYPTO_CSR_INDXAST(n)   ((n) << 13)
 #define TX4939_CRYPTO_CSR_CSWAP_MASK   0x00001800
@@ -418,7 +418,7 @@ struct tx4939_vpc_desc {
 #define TX4939_CRYPTO_CSR_PDINT_END    0x00000040
 #define TX4939_CRYPTO_CSR_PDINT_NEXT   0x00000080
 #define TX4939_CRYPTO_CSR_PDINT_NONE   0x000000c0
-#define TX4939_CRYPTO_CSR_GINTE        0x00000008
+#define TX4939_CRYPTO_CSR_GINTE 0x00000008
 #define TX4939_CRYPTO_CSR_RSTD 0x00000004
 #define TX4939_CRYPTO_CSR_RSTC 0x00000002
 #define TX4939_CRYPTO_CSR_ENCR 0x00000001
@@ -442,7 +442,7 @@ struct tx4939_vpc_desc {
 #define TX4939_CRYPTO_DESC_START       0x00000200
 #define TX4939_CRYPTO_DESC_END 0x00000100
 #define TX4939_CRYPTO_DESC_XOR 0x00000010
-#define TX4939_CRYPTO_DESC_LAST        0x00000008
+#define TX4939_CRYPTO_DESC_LAST 0x00000008
 #define TX4939_CRYPTO_DESC_ERR_MASK    0x00000006
 #define TX4939_CRYPTO_DESC_ERR_NONE    0x00000000
 #define TX4939_CRYPTO_DESC_ERR_TOUT    0x00000002
@@ -457,7 +457,7 @@ struct tx4939_vpc_desc {
 
 #define TX4939_CRYPTO_NR_SET   6
 
-#define TX4939_CRYPTO_RCSR_INTE        0x00000008
+#define TX4939_CRYPTO_RCSR_INTE 0x00000008
 #define TX4939_CRYPTO_RCSR_RST 0x00000004
 #define TX4939_CRYPTO_RCSR_FIN 0x00000002
 #define TX4939_CRYPTO_RCSR_ST  0x00000001
@@ -480,8 +480,8 @@ struct tx4939_vpc_desc {
 #define TX4939_VPC_CTRLA_PDINT_ALL     0x00000000
 #define TX4939_VPC_CTRLA_PDINT_NEXT    0x00000010
 #define TX4939_VPC_CTRLA_PDINT_NONE    0x00000030
-#define TX4939_VPC_CTRLA_VDVLDP        0x00000008
-#define TX4939_VPC_CTRLA_VDMODE        0x00000004
+#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
+#define TX4939_VPC_CTRLA_VDMODE 0x00000004
 #define TX4939_VPC_CTRLA_VDFOR 0x00000002
 #define TX4939_VPC_CTRLA_ENVPC 0x00000001
 
@@ -512,9 +512,9 @@ struct tx4939_vpc_desc {
        ((__u32)((__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_BCFG_MASK) \
                 >> 32))
 
-#define tx4939_ccfg_clear(bits)        tx4938_ccfg_clear(bits)
+#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
 #define tx4939_ccfg_set(bits)  tx4938_ccfg_set(bits)
-#define tx4939_ccfg_change(change, new)        tx4938_ccfg_change(change, new)
+#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
 
 #define TX4939_EBUSC_CR(ch)    TX4927_EBUSC_CR(ch)
 #define TX4939_EBUSC_BA(ch)    TX4927_EBUSC_BA(ch)
@@ -522,7 +522,7 @@ struct tx4939_vpc_desc {
 #define TX4939_EBUSC_WIDTH(ch) \
        (16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
 
-/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2  (14.745MHz for MST 20MHz) */
+/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2         (14.745MHz for MST 20MHz) */
 #define TX4939_SCLK0(mst)      \
        ((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2)
 
index 67f70a8..466a3de 100644 (file)
@@ -59,9 +59,9 @@ void txx9_clockevent_init(unsigned long baseaddr, int irq,
 void txx9_tmr_init(unsigned long baseaddr);
 
 #ifdef CONFIG_CPU_TX39XX
-#define TXX9_TIMER_BITS        24
+#define TXX9_TIMER_BITS 24
 #else
-#define TXX9_TIMER_BITS        32
+#define TXX9_TIMER_BITS 32
 #endif
 
 #endif /* __ASM_TXX9TMR_H */
index 3b92efe..bd87e36 100644 (file)
@@ -87,12 +87,12 @@ extern u64 __ua_limit;
 /*
  * access_ok: - Checks if a user space pointer is valid
  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
- *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- *        to write to a block, it is always safe to read from it.
+ *       %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ *       to write to a block, it is always safe to read from it.
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -124,10 +124,10 @@ extern u64 __ua_limit;
 
 /*
  * put_user: - Write a simple value into user space.
- * @x:   Value to copy to user space.
+ * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -138,15 +138,15 @@ extern u64 __ua_limit;
  *
  * Returns zero on success, or -EFAULT on error.
  */
-#define put_user(x,ptr)        \
+#define put_user(x,ptr) \
        __put_user_check((x), (ptr), sizeof(*(ptr)))
 
 /*
  * get_user: - Get a simple variable from user space.
- * @x:   Variable to store result.
+ * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -163,10 +163,10 @@ extern u64 __ua_limit;
 
 /*
  * __put_user: - Write a simple value into user space, with less checking.
- * @x:   Value to copy to user space.
+ * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -185,10 +185,10 @@ extern u64 __ua_limit;
 
 /*
  * __get_user: - Get a simple variable from user space, with less checking.
- * @x:   Variable to store result.
+ * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -390,10 +390,10 @@ extern void __put_user_unknown(void);
 
 /*
  * put_user_unaligned: - Write a simple value into user space.
- * @x:   Value to copy to user space.
+ * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -409,10 +409,10 @@ extern void __put_user_unknown(void);
 
 /*
  * get_user_unaligned: - Get a simple variable from user space.
- * @x:   Variable to store result.
+ * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -429,10 +429,10 @@ extern void __put_user_unknown(void);
 
 /*
  * __put_user_unaligned: - Write a simple value into user space, with less checking.
- * @x:   Value to copy to user space.
+ * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -451,10 +451,10 @@ extern void __put_user_unknown(void);
 
 /*
  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
- * @x:   Variable to store result.
+ * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -543,7 +543,7 @@ do {                                                                        \
  */
 #define __get_user_unaligned_asm_ll32(val, addr)                       \
 {                                                                      \
-        unsigned long long __gu_tmp;                                   \
+       unsigned long long __gu_tmp;                                    \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     ulw     %1, (%3)                                \n"     \
@@ -631,7 +631,7 @@ do {                                                                        \
 #define __put_user_unaligned_asm_ll32(ptr)                             \
 {                                                                      \
        __asm__ __volatile__(                                           \
-       "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
+       "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
@@ -658,7 +658,7 @@ extern void __put_user_unaligned_unknown(void);
 #ifdef MODULE
 #define __MODULE_JAL(destination)                                      \
        ".set\tnoat\n\t"                                                \
-       __UA_LA "\t$1, " #destination "\n\t"                            \
+       __UA_LA "\t$1, " #destination "\n\t"                            \
        "jalr\t$1\n\t"                                                  \
        ".set\tat\n\t"
 #else
@@ -694,11 +694,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 
 /*
  * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:   Destination address, in user space.
+ * @to:          Destination address, in user space.
  * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
+ * @n:   Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -716,7 +716,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
        might_fault();                                                  \
-       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
        __cu_len;                                                       \
 })
 
@@ -731,7 +731,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+       __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
        __cu_len;                                                       \
 })
 
@@ -744,18 +744,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_to = (to);                                                 \
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
-       __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
-                                                   __cu_len);          \
+       __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
+                                                   __cu_len);          \
        __cu_len;                                                       \
 })
 
 /*
  * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
+ * @to:          Destination address, in user space.
  * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
+ * @n:   Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Copy data from kernel space to user space.
  *
@@ -774,7 +774,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               \
                might_fault();                                          \
                __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
-                                                __cu_len);             \
+                                                __cu_len);             \
        }                                                               \
        __cu_len;                                                       \
 })
@@ -827,11 +827,11 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 
 /*
  * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to:   Destination address, in kernel space.
+ * @to:          Destination address, in kernel space.
  * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
+ * @n:   Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -853,17 +853,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_len = (n);                                                 \
        might_fault();                                                  \
        __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
-                                          __cu_len);                   \
+                                          __cu_len);                   \
        __cu_len;                                                       \
 })
 
 /*
  * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
+ * @to:          Destination address, in kernel space.
  * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
+ * @n:   Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Copy data from user space to kernel space.
  *
@@ -885,7 +885,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              \
                might_fault();                                          \
                __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
-                                                  __cu_len);           \
+                                                  __cu_len);           \
        }                                                               \
        __cu_len;                                                       \
 })
@@ -901,7 +901,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_len = (n);                                                 \
        might_fault();                                                  \
        __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
-                                          __cu_len);                   \
+                                          __cu_len);                   \
        __cu_len;                                                       \
 })
 
@@ -915,18 +915,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
        __cu_from = (from);                                             \
        __cu_len = (n);                                                 \
        if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       \
-                  access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
+                  access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
                might_fault();                                          \
                __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
-                                                  __cu_len);           \
+                                                  __cu_len);           \
        }                                                               \
        __cu_len;                                                       \
 })
 
 /*
  * __clear_user: - Zero a block of memory in user space, with less checking.
- * @to:   Destination address, in user space.
- * @n:    Number of bytes to zero.
+ * @to:          Destination address, in user space.
+ * @n:   Number of bytes to zero.
  *
  * Zero a block of memory in user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -966,7 +966,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
 /*
  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
+ *        least @count bytes long.
  * @src:   Source address, in user space.
  * @count: Maximum number of bytes to copy, including the trailing NUL.
  *
@@ -1005,7 +1005,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
 /*
  * strncpy_from_user: - Copy a NUL terminated string from userspace.
  * @dst:   Destination address, in kernel space.  This buffer must be at
- *         least @count bytes long.
+ *        least @count bytes long.
  * @src:   Source address, in user space.
  * @count: Maximum number of bytes to copy, including the trailing NUL.
  *
@@ -1060,7 +1060,7 @@ static inline long __strlen_user(const char __user *s)
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Get the size of a NUL-terminated string in user space.
  *
@@ -1108,7 +1108,7 @@ static inline long __strnlen_user(const char __user *s, long n)
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep.
  *
  * Get the size of a NUL-terminated string in user space.
  *
index 7e0bf17..058e941 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2012  MIPS Technologies, Inc.
index afa83a4..6bad61b 100644 (file)
@@ -20,7 +20,7 @@
  *  upage: 1 page consisting of a user struct that tells gdb
  *     what is present in the file.  Directly after this is a
  *     copy of the task_struct, which is currently not used by gdb,
- *     but it may come in handy at some point.  All of the registers
+ *     but it may come in handy at some point.  All of the registers
  *     are stored as part of the upage.  The upage should always be
  *     only one page long.
  *  data: The data segment follows next.  We use current->end_text to
index c231a3d..a866918 100644 (file)
@@ -20,7 +20,7 @@
 #ifndef __NEC_VR41XX_PCI_H
 #define __NEC_VR41XX_PCI_H
 
-#define PCI_MASTER_ADDRESS_MASK        0x7fffffffU
+#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
 
 struct pci_master_address_conversion {
        uint32_t bus_base_address;
index 61bead6..d58b567 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  tb0287.h, Include file for TANBAC TB0287 mini-ITX board.
  *
- *  Copyright (C) 2005  Media Lab Inc. <ito@mlb.co.jp>
+ *  Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
  *
  *  This code is largely based on tb0219.h.
  *
index 65e3445..9344e24 100644 (file)
 #endif
 
 /*
- * Pleasures of the R4600 V1.x.  Cite from the IDT R4600 V1.7 errata:
+ * Pleasures of the R4600 V1.x.         Cite from the IDT R4600 V1.7 errata:
  *
  *  18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
- *      Hit_Invalidate_D and Create_Dirty_Excl_D should only be
- *      executed if there is no other dcache activity. If the dcache is
- *      accessed for another instruction immeidately preceding when these
- *      cache instructions are executing, it is possible that the dcache
- *      tag match outputs used by these cache instructions will be
- *      incorrect. These cache instructions should be preceded by at least
- *      four instructions that are not any kind of load or store
- *      instruction.
- *
- *      This is not allowed:    lw
- *                              nop
- *                              nop
- *                              nop
- *                              cache       Hit_Writeback_Invalidate_D
- *
- *      This is allowed:        lw
- *                              nop
- *                              nop
- *                              nop
- *                              nop
- *                              cache       Hit_Writeback_Invalidate_D
+ *     Hit_Invalidate_D and Create_Dirty_Excl_D should only be
+ *     executed if there is no other dcache activity. If the dcache is
+ *     accessed for another instruction immeidately preceding when these
+ *     cache instructions are executing, it is possible that the dcache
+ *     tag match outputs used by these cache instructions will be
+ *     incorrect. These cache instructions should be preceded by at least
+ *     four instructions that are not any kind of load or store
+ *     instruction.
+ *
+ *     This is not allowed:    lw
+ *                             nop
+ *                             nop
+ *                             nop
+ *                             cache       Hit_Writeback_Invalidate_D
+ *
+ *     This is allowed:        lw
+ *                             nop
+ *                             nop
+ *                             nop
+ *                             nop
+ *                             cache       Hit_Writeback_Invalidate_D
  */
 #ifndef R4600_V1_HIT_CACHEOP_WAR
 #error Check setting of R4600_V1_HIT_CACHEOP_WAR for your platform
  *
  * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
  * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only
- * operate correctly if the internal data cache refill buffer is empty.  These
+ * operate correctly if the internal data cache refill buffer is empty.         These
  * CACHE instructions should be separated from any potential data cache miss
  * by a load instruction to an uncached address to empty the response buffer."
  * (Revision 2.0 device errata from IDT available on http://www.idt.com/
index 79bac88..680e7ef 100644 (file)
 /*
  * User-level device driver visible types
  */
-typedef char            xwidgetnum_t;  /* xtalk widget number  (0..15) */
+typedef char           xwidgetnum_t;   /* xtalk widget number  (0..15) */
 
 #define XWIDGET_NONE           -1
 
-typedef int xwidget_part_num_t;        /* xtalk widget part number */
+typedef int xwidget_part_num_t; /* xtalk widget part number */
 
 #define XWIDGET_PART_NUM_NONE  -1
 
-typedef int             xwidget_rev_num_t;     /* xtalk widget revision number */
+typedef int            xwidget_rev_num_t;      /* xtalk widget revision number */
 
 #define XWIDGET_REV_NUM_NONE   -1
 
@@ -37,15 +37,15 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
 /* It is often convenient to fold the XIO target port
  * number into the XIO address.
  */
-#define        XIO_NOWHERE     (0xFFFFFFFFFFFFFFFFull)
-#define        XIO_ADDR_BITS   (0x0000FFFFFFFFFFFFull)
-#define        XIO_PORT_BITS   (0xF000000000000000ull)
-#define        XIO_PORT_SHIFT  (60)
-
-#define        XIO_PACKED(x)   (((x)&XIO_PORT_BITS) != 0)
-#define        XIO_ADDR(x)     ((x)&XIO_ADDR_BITS)
-#define        XIO_PORT(x)     ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
-#define        XIO_PACK(p, o)  ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
+#define XIO_NOWHERE    (0xFFFFFFFFFFFFFFFFull)
+#define XIO_ADDR_BITS  (0x0000FFFFFFFFFFFFull)
+#define XIO_PORT_BITS  (0xF000000000000000ull)
+#define XIO_PORT_SHIFT (60)
+
+#define XIO_PACKED(x)  (((x)&XIO_PORT_BITS) != 0)
+#define XIO_ADDR(x)    ((x)&XIO_ADDR_BITS)
+#define XIO_PORT(x)    ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
+#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
 
 #endif /* !__ASSEMBLY__ */
 
index b4a13d7..32e4e88 100644 (file)
 #define WIDGET_PENDING                 0x0000001f
 
 /* WIDGET_ERR_UPPER_ADDR */
-#define        WIDGET_ERR_UPPER_ADDR_ONLY      0x0000ffff
+#define WIDGET_ERR_UPPER_ADDR_ONLY     0x0000ffff
 
 /* WIDGET_CONTROL */
 #define WIDGET_F_BAD_PKT               0x00010000
 #define WIDGET_LLP_XBAR_CRD            0x0000f000
-#define        WIDGET_LLP_XBAR_CRD_SHFT        12
+#define WIDGET_LLP_XBAR_CRD_SHFT       12
 #define WIDGET_CLR_RLLP_CNT            0x00000800
 #define WIDGET_CLR_TLLP_CNT            0x00000400
 #define WIDGET_SYS_END                 0x00000200
@@ -86,8 +86,8 @@
 
 /*
  * according to the crosstalk spec, only 32-bits access to the widget
- * configuration registers is allowed.  some widgets may allow 64-bits
- * access but software should not depend on it.  registers beyond the
+ * configuration registers is allowed. some widgets may allow 64-bits
+ * access but software should not depend on it.         registers beyond the
  * widget target flush register are widget dependent thus will not be
  * defined here
  */
index 77d4fb3..350cccc 100644 (file)
@@ -8,6 +8,7 @@ header-y += byteorder.h
 header-y += cachectl.h
 header-y += errno.h
 header-y += fcntl.h
+header-y += inst.h
 header-y += ioctl.h
 header-y += ioctls.h
 header-y += ipcbuf.h
index 9161e68..002c39e 100644 (file)
@@ -6,8 +6,8 @@
  * Copyright (C) 1995, 2003 by Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
-#ifndef __ASM_BREAK_H
-#define __ASM_BREAK_H
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
 
 /*
  * The following break codes are or were in use for specific purposes in
  * non-Linux/MIPS object files or make use of them in the future.
  */
 #define BRK_USERBP     0       /* User bp (used by debuggers) */
-#define BRK_KERNELBP   1       /* Break in the kernel */
-#define BRK_ABORT      2       /* Sometimes used by abort(3) to SIGIOT */
-#define BRK_BD_TAKEN   3       /* For bd slot emulation - not implemented */
-#define BRK_BD_NOTTAKEN        4       /* For bd slot emulation - not implemented */
 #define BRK_SSTEPBP    5       /* User bp (used by debuggers) */
 #define BRK_OVERFLOW   6       /* Overflow check */
 #define BRK_DIVZERO    7       /* Divide by zero check */
 #define BRK_RANGE      8       /* Range error check */
-#define BRK_STACKOVERFLOW 9    /* For Ada stackchecking */
-#define BRK_NORLD      10      /* No rld found - not used by Linux/MIPS */
-#define _BRK_THREADBP  11      /* For threads, user bp (used by debuggers) */
-#define BRK_BUG                512     /* Used by BUG() */
-#define BRK_KDB                513     /* Used in KDB_ENTER() */
+#define BRK_BUG                12      /* Used by BUG() */
 #define BRK_MEMU       514     /* Used by FPU emulator */
 #define BRK_KPROBE_BP  515     /* Kprobe break */
 #define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
 #define BRK_MULOVF     1023    /* Multiply overflow */
 
-#endif /* __ASM_BREAK_H */
+#endif /* __UAPI_ASM_BREAK_H */
index f3ce721..2303909 100644 (file)
@@ -5,15 +5,15 @@
  *
  * Copyright (C) 1994, 1995, 1996 by Ralf Baechle
  */
-#ifndef        _ASM_CACHECTL
-#define        _ASM_CACHECTL
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
 
 /*
  * Options for cacheflush system call
  */
-#define        ICACHE  (1<<0)          /* flush instruction cache        */
-#define        DCACHE  (1<<1)          /* writeback and flush data cache */
-#define        BCACHE  (ICACHE|DCACHE) /* flush both caches              */
+#define ICACHE (1<<0)          /* flush instruction cache        */
+#define DCACHE (1<<1)          /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches              */
 
 /*
  * Caching modes for the cachectl(2) call
index bd67b15..31575e2 100644 (file)
 
 #include <asm-generic/errno-base.h>
 
-#define        ENOMSG          35      /* No message of desired type */
-#define        EIDRM           36      /* Identifier removed */
-#define        ECHRNG          37      /* Channel number out of range */
-#define        EL2NSYNC        38      /* Level 2 not synchronized */
-#define        EL3HLT          39      /* Level 3 halted */
-#define        EL3RST          40      /* Level 3 reset */
-#define        ELNRNG          41      /* Link number out of range */
-#define        EUNATCH         42      /* Protocol driver not attached */
-#define        ENOCSI          43      /* No CSI structure available */
-#define        EL2HLT          44      /* Level 2 halted */
-#define        EDEADLK         45      /* Resource deadlock would occur */
-#define        ENOLCK          46      /* No record locks available */
-#define        EBADE           50      /* Invalid exchange */
-#define        EBADR           51      /* Invalid request descriptor */
-#define        EXFULL          52      /* Exchange full */
-#define        ENOANO          53      /* No anode */
-#define        EBADRQC         54      /* Invalid request code */
-#define        EBADSLT         55      /* Invalid slot */
-#define        EDEADLOCK       56      /* File locking deadlock error */
-#define        EBFONT          59      /* Bad font file format */
-#define        ENOSTR          60      /* Device not a stream */
-#define        ENODATA         61      /* No data available */
-#define        ETIME           62      /* Timer expired */
-#define        ENOSR           63      /* Out of streams resources */
-#define        ENONET          64      /* Machine is not on the network */
-#define        ENOPKG          65      /* Package not installed */
-#define        EREMOTE         66      /* Object is remote */
-#define        ENOLINK         67      /* Link has been severed */
-#define        EADV            68      /* Advertise error */
-#define        ESRMNT          69      /* Srmount error */
-#define        ECOMM           70      /* Communication error on send */
-#define        EPROTO          71      /* Protocol error */
-#define        EDOTDOT         73      /* RFS specific error */
-#define        EMULTIHOP       74      /* Multihop attempted */
-#define        EBADMSG         77      /* Not a data message */
-#define        ENAMETOOLONG    78      /* File name too long */
-#define        EOVERFLOW       79      /* Value too large for defined data type */
-#define        ENOTUNIQ        80      /* Name not unique on network */
-#define        EBADFD          81      /* File descriptor in bad state */
-#define        EREMCHG         82      /* Remote address changed */
-#define        ELIBACC         83      /* Can not access a needed shared library */
-#define        ELIBBAD         84      /* Accessing a corrupted shared library */
-#define        ELIBSCN         85      /* .lib section in a.out corrupted */
-#define        ELIBMAX         86      /* Attempting to link in too many shared libraries */
-#define        ELIBEXEC        87      /* Cannot exec a shared library directly */
-#define        EILSEQ          88      /* Illegal byte sequence */
-#define        ENOSYS          89      /* Function not implemented */
-#define        ELOOP           90      /* Too many symbolic links encountered */
-#define        ERESTART        91      /* Interrupted system call should be restarted */
-#define        ESTRPIPE        92      /* Streams pipe error */
-#define        ENOTEMPTY       93      /* Directory not empty */
-#define        EUSERS          94      /* Too many users */
-#define        ENOTSOCK        95      /* Socket operation on non-socket */
-#define        EDESTADDRREQ    96      /* Destination address required */
-#define        EMSGSIZE        97      /* Message too long */
-#define        EPROTOTYPE      98      /* Protocol wrong type for socket */
-#define        ENOPROTOOPT     99      /* Protocol not available */
-#define        EPROTONOSUPPORT 120     /* Protocol not supported */
-#define        ESOCKTNOSUPPORT 121     /* Socket type not supported */
-#define        EOPNOTSUPP      122     /* Operation not supported on transport endpoint */
-#define        EPFNOSUPPORT    123     /* Protocol family not supported */
-#define        EAFNOSUPPORT    124     /* Address family not supported by protocol */
-#define        EADDRINUSE      125     /* Address already in use */
-#define        EADDRNOTAVAIL   126     /* Cannot assign requested address */
-#define        ENETDOWN        127     /* Network is down */
-#define        ENETUNREACH     128     /* Network is unreachable */
-#define        ENETRESET       129     /* Network dropped connection because of reset */
-#define        ECONNABORTED    130     /* Software caused connection abort */
-#define        ECONNRESET      131     /* Connection reset by peer */
-#define        ENOBUFS         132     /* No buffer space available */
-#define        EISCONN         133     /* Transport endpoint is already connected */
-#define        ENOTCONN        134     /* Transport endpoint is not connected */
-#define        EUCLEAN         135     /* Structure needs cleaning */
-#define        ENOTNAM         137     /* Not a XENIX named type file */
-#define        ENAVAIL         138     /* No XENIX semaphores available */
-#define        EISNAM          139     /* Is a named type file */
-#define        EREMOTEIO       140     /* Remote I/O error */
+#define ENOMSG         35      /* No message of desired type */
+#define EIDRM          36      /* Identifier removed */
+#define ECHRNG         37      /* Channel number out of range */
+#define EL2NSYNC       38      /* Level 2 not synchronized */
+#define EL3HLT         39      /* Level 3 halted */
+#define EL3RST         40      /* Level 3 reset */
+#define ELNRNG         41      /* Link number out of range */
+#define EUNATCH                42      /* Protocol driver not attached */
+#define ENOCSI         43      /* No CSI structure available */
+#define EL2HLT         44      /* Level 2 halted */
+#define EDEADLK                45      /* Resource deadlock would occur */
+#define ENOLCK         46      /* No record locks available */
+#define EBADE          50      /* Invalid exchange */
+#define EBADR          51      /* Invalid request descriptor */
+#define EXFULL         52      /* Exchange full */
+#define ENOANO         53      /* No anode */
+#define EBADRQC                54      /* Invalid request code */
+#define EBADSLT                55      /* Invalid slot */
+#define EDEADLOCK      56      /* File locking deadlock error */
+#define EBFONT         59      /* Bad font file format */
+#define ENOSTR         60      /* Device not a stream */
+#define ENODATA                61      /* No data available */
+#define ETIME          62      /* Timer expired */
+#define ENOSR          63      /* Out of streams resources */
+#define ENONET         64      /* Machine is not on the network */
+#define ENOPKG         65      /* Package not installed */
+#define EREMOTE                66      /* Object is remote */
+#define ENOLINK                67      /* Link has been severed */
+#define EADV           68      /* Advertise error */
+#define ESRMNT         69      /* Srmount error */
+#define ECOMM          70      /* Communication error on send */
+#define EPROTO         71      /* Protocol error */
+#define EDOTDOT                73      /* RFS specific error */
+#define EMULTIHOP      74      /* Multihop attempted */
+#define EBADMSG                77      /* Not a data message */
+#define ENAMETOOLONG   78      /* File name too long */
+#define EOVERFLOW      79      /* Value too large for defined data type */
+#define ENOTUNIQ       80      /* Name not unique on network */
+#define EBADFD         81      /* File descriptor in bad state */
+#define EREMCHG                82      /* Remote address changed */
+#define ELIBACC                83      /* Can not access a needed shared library */
+#define ELIBBAD                84      /* Accessing a corrupted shared library */
+#define ELIBSCN                85      /* .lib section in a.out corrupted */
+#define ELIBMAX                86      /* Attempting to link in too many shared libraries */
+#define ELIBEXEC       87      /* Cannot exec a shared library directly */
+#define EILSEQ         88      /* Illegal byte sequence */
+#define ENOSYS         89      /* Function not implemented */
+#define ELOOP          90      /* Too many symbolic links encountered */
+#define ERESTART       91      /* Interrupted system call should be restarted */
+#define ESTRPIPE       92      /* Streams pipe error */
+#define ENOTEMPTY      93      /* Directory not empty */
+#define EUSERS         94      /* Too many users */
+#define ENOTSOCK       95      /* Socket operation on non-socket */
+#define EDESTADDRREQ   96      /* Destination address required */
+#define EMSGSIZE       97      /* Message too long */
+#define EPROTOTYPE     98      /* Protocol wrong type for socket */
+#define ENOPROTOOPT    99      /* Protocol not available */
+#define EPROTONOSUPPORT 120    /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121    /* Socket type not supported */
+#define EOPNOTSUPP     122     /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT   123     /* Protocol family not supported */
+#define EAFNOSUPPORT   124     /* Address family not supported by protocol */
+#define EADDRINUSE     125     /* Address already in use */
+#define EADDRNOTAVAIL  126     /* Cannot assign requested address */
+#define ENETDOWN       127     /* Network is down */
+#define ENETUNREACH    128     /* Network is unreachable */
+#define ENETRESET      129     /* Network dropped connection because of reset */
+#define ECONNABORTED   130     /* Software caused connection abort */
+#define ECONNRESET     131     /* Connection reset by peer */
+#define ENOBUFS                132     /* No buffer space available */
+#define EISCONN                133     /* Transport endpoint is already connected */
+#define ENOTCONN       134     /* Transport endpoint is not connected */
+#define EUCLEAN                135     /* Structure needs cleaning */
+#define ENOTNAM                137     /* Not a XENIX named type file */
+#define ENAVAIL                138     /* No XENIX semaphores available */
+#define EISNAM         139     /* Is a named type file */
+#define EREMOTEIO      140     /* Remote I/O error */
 #define EINIT          141     /* Reserved */
 #define EREMDEV                142     /* Error 142 */
-#define        ESHUTDOWN       143     /* Cannot send after transport endpoint shutdown */
-#define        ETOOMANYREFS    144     /* Too many references: cannot splice */
-#define        ETIMEDOUT       145     /* Connection timed out */
-#define        ECONNREFUSED    146     /* Connection refused */
-#define        EHOSTDOWN       147     /* Host is down */
-#define        EHOSTUNREACH    148     /* No route to host */
-#define        EWOULDBLOCK     EAGAIN  /* Operation would block */
-#define        EALREADY        149     /* Operation already in progress */
-#define        EINPROGRESS     150     /* Operation now in progress */
-#define        ESTALE          151     /* Stale NFS file handle */
+#define ESHUTDOWN      143     /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS   144     /* Too many references: cannot splice */
+#define ETIMEDOUT      145     /* Connection timed out */
+#define ECONNREFUSED   146     /* Connection refused */
+#define EHOSTDOWN      147     /* Host is down */
+#define EHOSTUNREACH   148     /* No route to host */
+#define EWOULDBLOCK    EAGAIN  /* Operation would block */
+#define EALREADY       149     /* Operation already in progress */
+#define EINPROGRESS    150     /* Operation now in progress */
+#define ESTALE         151     /* Stale NFS file handle */
 #define ECANCELED      158     /* AIO operation canceled */
 
 /*
  */
 #define ENOMEDIUM      159     /* No medium found */
 #define EMEDIUMTYPE    160     /* Wrong medium type */
-#define        ENOKEY          161     /* Required key not available */
-#define        EKEYEXPIRED     162     /* Key has expired */
-#define        EKEYREVOKED     163     /* Key has been revoked */
-#define        EKEYREJECTED    164     /* Key was rejected by service */
+#define ENOKEY         161     /* Required key not available */
+#define EKEYEXPIRED    162     /* Key has expired */
+#define EKEYREVOKED    163     /* Key has been revoked */
+#define EKEYREJECTED   164     /* Key was rejected by service */
 
 /* for robust mutexes */
-#define        EOWNERDEAD      165     /* Owner died */
-#define        ENOTRECOVERABLE 166     /* State not recoverable */
+#define EOWNERDEAD     165     /* Owner died */
+#define ENOTRECOVERABLE 166    /* State not recoverable */
 
-#define        ERFKILL         167     /* Operation not possible due to RF-kill */
+#define ERFKILL                167     /* Operation not possible due to RF-kill */
 
 #define EHWPOISON      168     /* Memory page has hardware error */
 
index 75edded..0bda78f 100644 (file)
@@ -12,7 +12,7 @@
 #define O_APPEND       0x0008
 #define O_DSYNC                0x0010  /* used to be O_SYNC, see below */
 #define O_NONBLOCK     0x0080
-#define O_CREAT         0x0100 /* not fcntl */
+#define O_CREAT                0x0100  /* not fcntl */
 #define O_TRUNC                0x0200  /* not fcntl */
 #define O_EXCL         0x0400  /* not fcntl */
 #define O_NOCTTY       0x0800  /* not fcntl */
@@ -50,7 +50,7 @@
 
 /*
  * The flavours of struct flock.  "struct flock" is the ABI compliant
- * variant.  Finally struct flock64 is the LFS variant of struct flock.  As
+ * variant.  Finally struct flock64 is the LFS variant of struct flock.         As
  * a historic accident and inconsistence with the ABI definition it doesn't
  * contain all the same fields as struct flock.
  */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
new file mode 100644 (file)
index 0000000..4d07881
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * Format of an instruction in memory.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
+ */
+#ifndef _UAPI_ASM_INST_H
+#define _UAPI_ASM_INST_H
+
+/*
+ * Major opcodes; before MIPS IV cop1x was called cop3.
+ */
+enum major_op {
+       spec_op, bcond_op, j_op, jal_op,
+       beq_op, bne_op, blez_op, bgtz_op,
+       addi_op, addiu_op, slti_op, sltiu_op,
+       andi_op, ori_op, xori_op, lui_op,
+       cop0_op, cop1_op, cop2_op, cop1x_op,
+       beql_op, bnel_op, blezl_op, bgtzl_op,
+       daddi_op, daddiu_op, ldl_op, ldr_op,
+       spec2_op, jalx_op, mdmx_op, spec3_op,
+       lb_op, lh_op, lwl_op, lw_op,
+       lbu_op, lhu_op, lwr_op, lwu_op,
+       sb_op, sh_op, swl_op, sw_op,
+       sdl_op, sdr_op, swr_op, cache_op,
+       ll_op, lwc1_op, lwc2_op, pref_op,
+       lld_op, ldc1_op, ldc2_op, ld_op,
+       sc_op, swc1_op, swc2_op, major_3b_op,
+       scd_op, sdc1_op, sdc2_op, sd_op
+};
+
+/*
+ * func field of spec opcode.
+ */
+enum spec_op {
+       sll_op, movc_op, srl_op, sra_op,
+       sllv_op, pmon_op, srlv_op, srav_op,
+       jr_op, jalr_op, movz_op, movn_op,
+       syscall_op, break_op, spim_op, sync_op,
+       mfhi_op, mthi_op, mflo_op, mtlo_op,
+       dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
+       mult_op, multu_op, div_op, divu_op,
+       dmult_op, dmultu_op, ddiv_op, ddivu_op,
+       add_op, addu_op, sub_op, subu_op,
+       and_op, or_op, xor_op, nor_op,
+       spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
+       dadd_op, daddu_op, dsub_op, dsubu_op,
+       tge_op, tgeu_op, tlt_op, tltu_op,
+       teq_op, spec5_unused_op, tne_op, spec6_unused_op,
+       dsll_op, spec7_unused_op, dsrl_op, dsra_op,
+       dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
+};
+
+/*
+ * func field of spec2 opcode.
+ */
+enum spec2_op {
+       madd_op, maddu_op, mul_op, spec2_3_unused_op,
+       msub_op, msubu_op, /* more unused ops */
+       clz_op = 0x20, clo_op,
+       dclz_op = 0x24, dclo_op,
+       sdbpp_op = 0x3f
+};
+
+/*
+ * func field of spec3 opcode.
+ */
+enum spec3_op {
+       ext_op, dextm_op, dextu_op, dext_op,
+       ins_op, dinsm_op, dinsu_op, dins_op,
+       lx_op = 0x0a,
+       bshfl_op = 0x20,
+       dbshfl_op = 0x24,
+       rdhwr_op = 0x3b
+};
+
+/*
+ * rt field of bcond opcodes.
+ */
+enum rt_op {
+       bltz_op, bgez_op, bltzl_op, bgezl_op,
+       spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
+       tgei_op, tgeiu_op, tlti_op, tltiu_op,
+       teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+       bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+       rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
+       rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
+       bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+};
+
+/*
+ * rs field of cop opcodes.
+ */
+enum cop_op {
+       mfc_op        = 0x00, dmfc_op       = 0x01,
+       cfc_op        = 0x02, mtc_op        = 0x04,
+       dmtc_op       = 0x05, ctc_op        = 0x06,
+       bc_op         = 0x08, cop_op        = 0x10,
+       copm_op       = 0x18
+};
+
+/*
+ * rt field of cop.bc_op opcodes
+ */
+enum bcop_op {
+       bcf_op, bct_op, bcfl_op, bctl_op
+};
+
+/*
+ * func field of cop0 coi opcodes.
+ */
+enum cop0_coi_func {
+       tlbr_op       = 0x01, tlbwi_op      = 0x02,
+       tlbwr_op      = 0x06, tlbp_op       = 0x08,
+       rfe_op        = 0x10, eret_op       = 0x18
+};
+
+/*
+ * func field of cop0 com opcodes.
+ */
+enum cop0_com_func {
+       tlbr1_op      = 0x01, tlbw_op       = 0x02,
+       tlbp1_op      = 0x08, dctr_op       = 0x09,
+       dctw_op       = 0x0a
+};
+
+/*
+ * fmt field of cop1 opcodes.
+ */
+enum cop1_fmt {
+       s_fmt, d_fmt, e_fmt, q_fmt,
+       w_fmt, l_fmt
+};
+
+/*
+ * func field of cop1 instructions using d, s or w format.
+ */
+enum cop1_sdw_func {
+       fadd_op      =  0x00, fsub_op      =  0x01,
+       fmul_op      =  0x02, fdiv_op      =  0x03,
+       fsqrt_op     =  0x04, fabs_op      =  0x05,
+       fmov_op      =  0x06, fneg_op      =  0x07,
+       froundl_op   =  0x08, ftruncl_op   =  0x09,
+       fceill_op    =  0x0a, ffloorl_op   =  0x0b,
+       fround_op    =  0x0c, ftrunc_op    =  0x0d,
+       fceil_op     =  0x0e, ffloor_op    =  0x0f,
+       fmovc_op     =  0x11, fmovz_op     =  0x12,
+       fmovn_op     =  0x13, frecip_op    =  0x15,
+       frsqrt_op    =  0x16, fcvts_op     =  0x20,
+       fcvtd_op     =  0x21, fcvte_op     =  0x22,
+       fcvtw_op     =  0x24, fcvtl_op     =  0x25,
+       fcmp_op      =  0x30
+};
+
+/*
+ * func field of cop1x opcodes (MIPS IV).
+ */
+enum cop1x_func {
+       lwxc1_op     =  0x00, ldxc1_op     =  0x01,
+       pfetch_op    =  0x07, swxc1_op     =  0x08,
+       sdxc1_op     =  0x09, madd_s_op    =  0x20,
+       madd_d_op    =  0x21, madd_e_op    =  0x22,
+       msub_s_op    =  0x28, msub_d_op    =  0x29,
+       msub_e_op    =  0x2a, nmadd_s_op   =  0x30,
+       nmadd_d_op   =  0x31, nmadd_e_op   =  0x32,
+       nmsub_s_op   =  0x38, nmsub_d_op   =  0x39,
+       nmsub_e_op   =  0x3a
+};
+
+/*
+ * func field for mad opcodes (MIPS IV).
+ */
+enum mad_func {
+       madd_fp_op      = 0x08, msub_fp_op      = 0x0a,
+       nmadd_fp_op     = 0x0c, nmsub_fp_op     = 0x0e
+};
+
+/*
+ * func field for special3 lx opcodes (Cavium Octeon).
+ */
+enum lx_func {
+       lwx_op  = 0x00,
+       lhx_op  = 0x04,
+       lbux_op = 0x06,
+       ldx_op  = 0x08,
+       lwux_op = 0x10,
+       lhux_op = 0x14,
+       lbx_op  = 0x16,
+};
+
+/*
+ * Damn ...  bitfields depend from byteorder :-(
+ */
+#ifdef __MIPSEB__
+#define BITFIELD_FIELD(field, more)                                    \
+       field;                                                          \
+       more
+
+#elif defined(__MIPSEL__)
+
+#define BITFIELD_FIELD(field, more)                                    \
+       more                                                            \
+       field;
+
+#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
+#endif
+
+struct j_format {
+       BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+       BITFIELD_FIELD(unsigned int target : 26,
+       ;))
+};
+
+struct i_format {                      /* signed immediate format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;))))
+};
+
+struct u_format {                      /* unsigned immediate format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int uimmediate : 16,
+       ;))))
+};
+
+struct c_format {                      /* Cache (>= R6000) format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int c_op : 3,
+       BITFIELD_FIELD(unsigned int cache : 2,
+       BITFIELD_FIELD(unsigned int simmediate : 16,
+       ;)))))
+};
+
+struct r_format {                      /* Register format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int re : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct p_format {              /* Performance counter format (R10000) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int re : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct f_format {                      /* FPU register format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int : 1,
+       BITFIELD_FIELD(unsigned int fmt : 4,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int re : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct ma_format {             /* FPU multiply and add format (MIPS IV) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       ;)))))))
+};
+
+struct b_format {                      /* BREAK and SYSCALL */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int code : 20,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))
+};
+
+struct ps_format {                     /* MIPS-3D / paired single format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct v_format {                              /* MDMX vector format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int sel : 4,
+       BITFIELD_FIELD(unsigned int fmt : 1,
+       BITFIELD_FIELD(unsigned int vt : 5,
+       BITFIELD_FIELD(unsigned int vs : 5,
+       BITFIELD_FIELD(unsigned int vd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+union mips_instruction {
+       unsigned int word;
+       unsigned short halfword[2];
+       unsigned char byte[4];
+       struct j_format j_format;
+       struct i_format i_format;
+       struct u_format u_format;
+       struct c_format c_format;
+       struct r_format r_format;
+       struct p_format p_format;
+       struct f_format f_format;
+       struct ma_format ma_format;
+       struct b_format b_format;
+       struct ps_format ps_format;
+       struct v_format v_format;
+};
+
+#endif /* _UAPI_ASM_INST_H */
index addd56b..b1e6377 100644 (file)
@@ -41,7 +41,7 @@
 #define         TIOCPKT_START          0x08    /* start output */
 #define         TIOCPKT_NOSTOP         0x10    /* no more ^S, ^Q */
 #define         TIOCPKT_DOSTOP         0x20    /* now do ^S ^Q */
-#define  TIOCPKT_IOCTL         0x40    /* state change of pty driver */
+#define         TIOCPKT_IOCTL          0x40    /* state change of pty driver */
 #define TIOCSWINSZ     _IOW('t', 103, struct winsize)  /* set window size */
 #define TIOCGWINSZ     _IOR('t', 104, struct winsize)  /* get window size */
 #define TIOCNOTTY      0x5471          /* void tty association */
@@ -63,9 +63,9 @@
 #define FIONREAD       0x467f
 #define TIOCINQ                FIONREAD
 
-#define TIOCGETP        0x7408
-#define TIOCSETP        0x7409
-#define TIOCSETN        0x740a                 /* TIOCSETP wo flush */
+#define TIOCGETP       0x7408
+#define TIOCSETP       0x7409
+#define TIOCSETN       0x740a                  /* TIOCSETP wo flush */
 
 /* #define TIOCSETA    _IOW('t', 20, struct termios) set termios struct */
 /* #define TIOCSETAW   _IOW('t', 21, struct termios) drain output, set */
@@ -74,9 +74,9 @@
 /* #define TIOCSETD    _IOW('t', 27, int)      set line discipline */
                                                /* 127-124 compat */
 
-#define TIOCSBRK       0x5427  /* BSD compatibility */
-#define TIOCCBRK       0x5428  /* BSD compatibility */
-#define TIOCGSID       0x7416  /* Return the session ID of FD */
+#define TIOCSBRK       0x5427  /* BSD compatibility */
+#define TIOCCBRK       0x5428  /* BSD compatibility */
+#define TIOCGSID       0x7416  /* Return the session ID of FD */
 #define TCGETS2                _IOR('T', 0x2A, struct termios2)
 #define TCSETS2                _IOW('T', 0x2B, struct termios2)
 #define TCSETSW2       _IOW('T', 0x2C, struct termios2)
 #define TIOCGLCKTRMIOS 0x548b
 #define TIOCSLCKTRMIOS 0x548c
 #define TIOCSERGSTRUCT 0x548d /* For debugging only */
-#define TIOCSERGETLSR   0x548e /* Get line status register */
-#define TIOCSERGETMULTI 0x548f /* Get multiport config  */
+#define TIOCSERGETLSR  0x548e /* Get line status register */
+#define TIOCSERGETMULTI 0x548f /* Get multiport config */
 #define TIOCSERSETMULTI 0x5490 /* Set multiport config */
-#define TIOCMIWAIT      0x5491 /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT     0x5492 /* read serial port inline interrupt counts */
+#define TIOCMIWAIT     0x5491 /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT    0x5492 /* read serial port inline interrupt counts */
 
 #endif /* __ASM_IOCTLS_H */
index 9a936ac..cfcb876 100644 (file)
@@ -64,7 +64,7 @@
 
 #define MADV_NORMAL    0               /* no further special treatment */
 #define MADV_RANDOM    1               /* expect random page references */
-#define MADV_SEQUENTIAL        2               /* expect sequential page references */
+#define MADV_SEQUENTIAL 2              /* expect sequential page references */
 #define MADV_WILLNEED  3               /* will need these pages */
 #define MADV_DONTNEED  4               /* don't need these pages */
 
 #define MADV_DONTFORK  10              /* don't inherit across fork */
 #define MADV_DOFORK    11              /* do inherit across fork */
 
-#define MADV_MERGEABLE   12            /* KSM may merge identical pages */
+#define MADV_MERGEABLE  12             /* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13            /* KSM may not merge identical pages */
-#define MADV_HWPOISON    100           /* poison a page for testing */
+#define MADV_HWPOISON   100            /* poison a page for testing */
 
 #define MADV_HUGEPAGE  14              /* Worth backing with hugepages */
-#define MADV_NOHUGEPAGE        15              /* Not worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15             /* Not worth backing with hugepages */
 
-#define MADV_DONTDUMP   16             /* Explicity exclude from the core dump,
+#define MADV_DONTDUMP  16              /* Explicity exclude from the core dump,
                                           overrides the coredump filter bits */
 #define MADV_DODUMP    17              /* Clear the MADV_NODUMP flag */
 
index 1bc1f52..4d58d84 100644 (file)
@@ -49,8 +49,8 @@ struct pt_regs {
        unsigned long cp0_tcstatus;
 #endif /* CONFIG_MIPS_MT_SMTC */
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-       unsigned long long mpl[3];        /* MTM{0,1,2} */
-       unsigned long long mtp[3];        /* MTP{0,1,2} */
+       unsigned long long mpl[3];        /* MTM{0,1,2} */
+       unsigned long long mtp[3];        /* MTP{0,1,2} */
 #endif
 } __attribute__ ((aligned (8)));
 
@@ -67,14 +67,14 @@ struct pt_regs {
 #define PTRACE_GET_THREAD_AREA 25
 #define PTRACE_SET_THREAD_AREA 26
 
-/* Calls to trace a 64bit program from a 32bit program.  */
+/* Calls to trace a 64bit program from a 32bit program.         */
 #define PTRACE_PEEKTEXT_3264   0xc0
 #define PTRACE_PEEKDATA_3264   0xc1
 #define PTRACE_POKETEXT_3264   0xc2
 #define PTRACE_POKEDATA_3264   0xc3
 #define PTRACE_GET_THREAD_AREA_3264    0xc4
 
-/* Read and write watchpoint registers.  */
+/* Read and write watchpoint registers.         */
 enum pt_watch_style {
        pt_watch_style_mips32,
        pt_watch_style_mips64
index 7281a4d..e1085ac 100644 (file)
@@ -12,8 +12,8 @@
 
 struct semid64_ds {
        struct ipc64_perm sem_perm;             /* permissions .. see ipc.h */
-       __kernel_time_t sem_otime;              /* last semop time */
-       __kernel_time_t sem_ctime;              /* last change time */
+       __kernel_time_t sem_otime;              /* last semop time */
+       __kernel_time_t sem_ctime;              /* last change time */
        unsigned long   sem_nsems;              /* no. of semaphores in array */
        unsigned long   __unused1;
        unsigned long   __unused2;
index 7344650..6a87141 100644 (file)
@@ -11,7 +11,7 @@
 
 
 #define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
-#undef __ARCH_SI_TRAPNO        /* exception code needs to fill this ...  */
+#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ...  */
 
 #define HAVE_ARCH_SIGINFO_T
 
@@ -55,7 +55,7 @@ typedef struct siginfo {
                        int _overrun;           /* overrun count */
                        char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
                        sigval_t _sigval;       /* same as below */
-                       int _sys_private;       /* not to be passed to user */
+                       int _sys_private;       /* not to be passed to user */
                } _timer;
 
                /* POSIX.1b signals */
@@ -91,9 +91,9 @@ typedef struct siginfo {
                        short _addr_lsb;
                } _sigfault;
 
-               /* SIGPOLL, SIGXFSZ (To do ...)  */
+               /* SIGPOLL, SIGXFSZ (To do ...)  */
                struct {
-                       __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+                       __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
                } _sigpoll;
        } _sifields;
index 6783c88..d6b18b4 100644 (file)
@@ -24,28 +24,28 @@ typedef unsigned long old_sigset_t;         /* at least 32 bits */
 #define SIGHUP          1      /* Hangup (POSIX).  */
 #define SIGINT          2      /* Interrupt (ANSI).  */
 #define SIGQUIT                 3      /* Quit (POSIX).  */
-#define SIGILL          4      /* Illegal instruction (ANSI).  */
-#define SIGTRAP                 5      /* Trace trap (POSIX).  */
-#define SIGIOT          6      /* IOT trap (4.2 BSD).  */
-#define SIGABRT                 SIGIOT /* Abort (ANSI).  */
+#define SIGILL          4      /* Illegal instruction (ANSI).  */
+#define SIGTRAP                 5      /* Trace trap (POSIX).  */
+#define SIGIOT          6      /* IOT trap (4.2 BSD).  */
+#define SIGABRT                 SIGIOT /* Abort (ANSI).  */
 #define SIGEMT          7
 #define SIGFPE          8      /* Floating-point exception (ANSI).  */
 #define SIGKILL                 9      /* Kill, unblockable (POSIX).  */
-#define SIGBUS         10      /* BUS error (4.2 BSD).  */
+#define SIGBUS         10      /* BUS error (4.2 BSD).  */
 #define SIGSEGV                11      /* Segmentation violation (ANSI).  */
 #define SIGSYS         12
-#define SIGPIPE                13      /* Broken pipe (POSIX).  */
-#define SIGALRM                14      /* Alarm clock (POSIX).  */
-#define SIGTERM                15      /* Termination (ANSI).  */
+#define SIGPIPE                13      /* Broken pipe (POSIX).  */
+#define SIGALRM                14      /* Alarm clock (POSIX).  */
+#define SIGTERM                15      /* Termination (ANSI).  */
 #define SIGUSR1                16      /* User-defined signal 1 (POSIX).  */
 #define SIGUSR2                17      /* User-defined signal 2 (POSIX).  */
 #define SIGCHLD                18      /* Child status has changed (POSIX).  */
-#define SIGCLD         SIGCHLD /* Same as SIGCHLD (System V).  */
+#define SIGCLD         SIGCHLD /* Same as SIGCHLD (System V).  */
 #define SIGPWR         19      /* Power failure restart (System V).  */
 #define SIGWINCH       20      /* Window size change (4.3 BSD, Sun).  */
 #define SIGURG         21      /* Urgent condition on socket (4.2 BSD).  */
-#define SIGIO          22      /* I/O now possible (4.2 BSD).  */
-#define SIGPOLL                SIGIO   /* Pollable event occurred (System V).  */
+#define SIGIO          22      /* I/O now possible (4.2 BSD).  */
+#define SIGPOLL                SIGIO   /* Pollable event occurred (System V).  */
 #define SIGSTOP                23      /* Stop, unblockable (POSIX).  */
 #define SIGTSTP                24      /* Keyboard stop (POSIX).  */
 #define SIGCONT                25      /* Continue (POSIX).  */
@@ -54,7 +54,7 @@ typedef unsigned long old_sigset_t;           /* at least 32 bits */
 #define SIGVTALRM      28      /* Virtual alarm clock (4.2 BSD).  */
 #define SIGPROF                29      /* Profiling alarm clock (4.2 BSD).  */
 #define SIGXCPU                30      /* CPU limit exceeded (4.2 BSD).  */
-#define SIGXFSZ                31      /* File size limit exceeded (4.2 BSD).  */
+#define SIGXFSZ                31      /* File size limit exceeded (4.2 BSD).  */
 
 /* These should not be considered constants from userland.  */
 #define SIGRTMIN       32
index 3e68bfb..47132f4 100644 (file)
                                   SIGPIPE when they die.  */
 #define SO_DONTROUTE   0x0010  /* Don't do local routing.  */
 #define SO_BROADCAST   0x0020  /* Allow transmission of
-                                  broadcast messages.  */
+                                  broadcast messages.  */
 #define SO_LINGER      0x0080  /* Block on close of a reliable
                                   socket to transmit pending data.  */
 #define SO_OOBINLINE 0x0100    /* Receive out-of-band data in-band.  */
 #define SO_REUSEPORT 0x0200    /* Allow local address and port reuse.  */
 
 #define SO_TYPE                0x1008  /* Compatible name for SO_STYLE.  */
-#define SO_STYLE       SO_TYPE /* Synonym */
+#define SO_STYLE       SO_TYPE /* Synonym */
 #define SO_ERROR       0x1007  /* get error status and clear */
 #define SO_SNDBUF      0x1001  /* Send buffer size. */
 #define SO_RCVBUF      0x1002  /* Receive buffer. */
 #define SO_SNDLOWAT    0x1003  /* send low-water mark */
 #define SO_RCVLOWAT    0x1004  /* receive low-water mark */
 #define SO_SNDTIMEO    0x1005  /* send timeout */
-#define SO_RCVTIMEO    0x1006  /* receive timeout */
+#define SO_RCVTIMEO    0x1006  /* receive timeout */
 #define SO_ACCEPTCONN  0x1009
 #define SO_PROTOCOL    0x1028  /* protocol type */
 #define SO_DOMAIN      0x1029  /* domain/socket family */
 #define SO_BINDTODEVICE                25
 
 /* Socket filtering */
-#define SO_ATTACH_FILTER        26
-#define SO_DETACH_FILTER        27
+#define SO_ATTACH_FILTER       26
+#define SO_DETACH_FILTER       27
 #define SO_GET_FILTER          SO_ATTACH_FILTER
 
-#define SO_PEERNAME             28
+#define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
 #define SCM_TIMESTAMP          SO_TIMESTAMP
 
@@ -79,7 +79,7 @@
 #define SO_TIMESTAMPING                37
 #define SCM_TIMESTAMPING       SO_TIMESTAMPING
 
-#define SO_RXQ_OVFL             40
+#define SO_RXQ_OVFL            40
 
 #define SO_WIFI_STATUS         41
 #define SCM_WIFI_STATUS                SO_WIFI_STATUS
index ed1a5f7..419fbe6 100644 (file)
@@ -14,7 +14,7 @@
 
 /* Socket-level I/O control calls. */
 #define FIOGETOWN      _IOR('f', 123, int)
-#define FIOSETOWN      _IOW('f', 124, int)
+#define FIOSETOWN      _IOW('f', 124, int)
 
 #define SIOCATMARK     _IOR('s', 7, int)
 #define SIOCSPGRP      _IOW('s', 8, pid_t)
index fe9a4c3..b47bc54 100644 (file)
@@ -23,7 +23,7 @@ struct stat {
        __u32           st_nlink;
        uid_t           st_uid;
        gid_t           st_gid;
-       unsigned        st_rdev;
+       unsigned        st_rdev;
        long            st_pad2[2];
        off_t           st_size;
        long            st_pad3;
index 0f805c7..3305c83 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <linux/types.h>
 
-typedef __kernel_fsid_t        fsid_t;
+typedef __kernel_fsid_t               fsid_t;
 
 #endif
 
@@ -31,7 +31,7 @@ struct statfs {
        long            f_bavail;
 
        /* Linux specials */
-       __kernel_fsid_t f_fsid;
+       __kernel_fsid_t f_fsid;
        long            f_namelen;
        long            f_flags;
        long            f_spare[5];
@@ -73,7 +73,7 @@ struct statfs64 {                     /* Same as struct statfs */
        long            f_bavail;
 
        /* Linux specials */
-       __kernel_fsid_t f_fsid;
+       __kernel_fsid_t f_fsid;
        long            f_namelen;
        long            f_flags;
        long            f_spare[5];
index 4f47b7d..ae637e9 100644 (file)
  * sysmips(2) is deprecated - though some existing software uses it.
  * We only support the following commands.
  */
-#define SETNAME                    1   /* set hostname                  */
+#define SETNAME                           1    /* set hostname                  */
 #define FLUSH_CACHE               3    /* writeback and invalidate caches */
-#define MIPS_FIXADE                7   /* control address error fixing  */
-#define MIPS_RDNVRAM              10   /* read NVRAM */
-#define MIPS_ATOMIC_SET                2001    /* atomically set variable       */
+#define MIPS_FIXADE                  /* control address error fixing  */
+#define MIPS_RDNVRAM             10    /* read NVRAM */
+#define MIPS_ATOMIC_SET                2001    /* atomically set variable       */
 
 #endif /* _ASM_SYSMIPS_H */
index 76630b3..2750203 100644 (file)
@@ -53,7 +53,7 @@ struct ktermios {
 };
 
 /* c_cc characters */
-#define VINTR           0              /* Interrupt character [ISIG].  */
+#define VINTR           0              /* Interrupt character [ISIG].  */
 #define VQUIT           1              /* Quit character [ISIG].  */
 #define VERASE          2              /* Erase character [ICANON].  */
 #define VKILL           3              /* Kill-line character [ICANON].  */
@@ -72,7 +72,7 @@ struct ktermios {
 #define VDSUSP         11              /* Delayed suspend character [ISIG].  */
 #endif
 #define VREPRINT       12              /* Reprint-line character [ICANON].  */
-#define VDISCARD       13              /* Discard character [IEXTEN].  */
+#define VDISCARD       13              /* Discard character [IEXTEN].  */
 #define VWERASE                14              /* Word-erase character [ICANON].  */
 #define VLNEXT         15              /* Literal-next character [IEXTEN].  */
 #define VEOF           16              /* End-of-file character [ICANON].  */
@@ -92,7 +92,7 @@ struct ktermios {
 #define IXON   0002000         /* Enable start/stop output control.  */
 #define IXANY  0004000         /* Any character will restart after stop.  */
 #define IXOFF  0010000         /* Enable start/stop input control.  */
-#define IMAXBEL        0020000         /* Ring bell when input queue is full.  */
+#define IMAXBEL 0020000                /* Ring bell when input queue is full.  */
 #define IUTF8  0040000         /* Input is UTF-8 */
 
 /* c_oflag bits */
@@ -105,123 +105,123 @@ struct ktermios {
 #define OFILL  0000100
 #define OFDEL  0000200
 #define NLDLY  0000400
-#define   NL0  0000000
-#define   NL1  0000400
+#define          NL0   0000000
+#define          NL1   0000400
 #define CRDLY  0003000
-#define   CR0  0000000
-#define   CR1  0001000
-#define   CR2  0002000
-#define   CR3  0003000
+#define          CR0   0000000
+#define          CR1   0001000
+#define          CR2   0002000
+#define          CR3   0003000
 #define TABDLY 0014000
-#define   TAB0 0000000
-#define   TAB1 0004000
-#define   TAB2 0010000
-#define   TAB3 0014000
-#define   XTABS        0014000
+#define          TAB0  0000000
+#define          TAB1  0004000
+#define          TAB2  0010000
+#define          TAB3  0014000
+#define          XTABS 0014000
 #define BSDLY  0020000
-#define   BS0  0000000
-#define   BS1  0020000
+#define          BS0   0000000
+#define          BS1   0020000
 #define VTDLY  0040000
-#define   VT0  0000000
-#define   VT1  0040000
+#define          VT0   0000000
+#define          VT1   0040000
 #define FFDLY  0100000
-#define   FF0  0000000
-#define   FF1  0100000
+#define          FF0   0000000
+#define          FF1   0100000
 /*
 #define PAGEOUT ???
-#define WRAP    ???
+#define WRAP   ???
  */
 
 /* c_cflag bit meaning */
 #define CBAUD  0010017
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
+#define         B0     0000000         /* hang up */
+#define         B50    0000001
+#define         B75    0000002
+#define         B110   0000003
+#define         B134   0000004
+#define         B150   0000005
+#define         B200   0000006
+#define         B300   0000007
+#define         B600   0000010
+#define         B1200  0000011
+#define         B1800  0000012
+#define         B2400  0000013
+#define         B4800  0000014
+#define         B9600  0000015
+#define         B19200 0000016
+#define         B38400 0000017
 #define EXTA B19200
 #define EXTB B38400
 #define CSIZE  0000060         /* Number of bits per byte (mask).  */
-#define   CS5  0000000         /* 5 bits per byte.  */
-#define   CS6  0000020         /* 6 bits per byte.  */
-#define   CS7  0000040         /* 7 bits per byte.  */
-#define   CS8  0000060         /* 8 bits per byte.  */
+#define          CS5   0000000         /* 5 bits per byte.  */
+#define          CS6   0000020         /* 6 bits per byte.  */
+#define          CS7   0000040         /* 7 bits per byte.  */
+#define          CS8   0000060         /* 8 bits per byte.  */
 #define CSTOPB 0000100         /* Two stop bits instead of one.  */
 #define CREAD  0000200         /* Enable receiver.  */
 #define PARENB 0000400         /* Parity enable.  */
-#define PARODD 0001000         /* Odd parity instead of even.  */
+#define PARODD 0001000         /* Odd parity instead of even.  */
 #define HUPCL  0002000         /* Hang up on last close.  */
 #define CLOCAL 0004000         /* Ignore modem status lines.  */
 #define CBAUDEX 0010000
-#define    BOTHER 0010000
-#define    B57600 0010001
-#define   B115200 0010002
-#define   B230400 0010003
-#define   B460800 0010004
-#define   B500000 0010005
-#define   B576000 0010006
-#define   B921600 0010007
-#define  B1000000 0010010
-#define  B1152000 0010011
-#define  B1500000 0010012
-#define  B2000000 0010013
-#define  B2500000 0010014
-#define  B3000000 0010015
-#define  B3500000 0010016
-#define  B4000000 0010017
+#define           BOTHER 0010000
+#define           B57600 0010001
+#define          B115200 0010002
+#define          B230400 0010003
+#define          B460800 0010004
+#define          B500000 0010005
+#define          B576000 0010006
+#define          B921600 0010007
+#define         B1000000 0010010
+#define         B1152000 0010011
+#define         B1500000 0010012
+#define         B2000000 0010013
+#define         B2500000 0010014
+#define         B3000000 0010015
+#define         B3500000 0010016
+#define         B4000000 0010017
 #define CIBAUD   002003600000  /* input baud rate */
-#define CMSPAR    010000000000 /* mark or space (stick) parity */
+#define CMSPAR   010000000000  /* mark or space (stick) parity */
 #define CRTSCTS          020000000000  /* flow control */
 
-#define IBSHIFT        16              /* Shift from CBAUD to CIBAUD */
+#define IBSHIFT 16             /* Shift from CBAUD to CIBAUD */
 
 /* c_lflag bits */
 #define ISIG   0000001         /* Enable signals.  */
 #define ICANON 0000002         /* Do erase and kill processing.  */
 #define XCASE  0000004
-#define ECHO   0000010         /* Enable echo.  */
+#define ECHO   0000010         /* Enable echo.  */
 #define ECHOE  0000020         /* Visual erase for ERASE.  */
-#define ECHOK  0000040         /* Echo NL after KILL.  */
-#define ECHONL 0000100         /* Echo NL even if ECHO is off.  */
+#define ECHOK  0000040         /* Echo NL after KILL.  */
+#define ECHONL 0000100         /* Echo NL even if ECHO is off.  */
 #define NOFLSH 0000200         /* Disable flush after interrupt.  */
 #define IEXTEN 0000400         /* Enable DISCARD and LNEXT.  */
-#define ECHOCTL        0001000         /* Echo control characters as ^X.  */
-#define ECHOPRT        0002000         /* Hardcopy visual erase.  */
+#define ECHOCTL 0001000                /* Echo control characters as ^X.  */
+#define ECHOPRT 0002000                /* Hardcopy visual erase.  */
 #define ECHOKE 0004000         /* Visual erase for KILL.  */
 #define FLUSHO 0020000
 #define PENDIN 0040000         /* Retype pending input (state).  */
-#define TOSTOP 0100000         /* Send SIGTTOU for background output.  */
-#define ITOSTOP        TOSTOP
-#define EXTPROC        0200000         /* External processing on pty */
+#define TOSTOP 0100000         /* Send SIGTTOU for background output.  */
+#define ITOSTOP TOSTOP
+#define EXTPROC 0200000                /* External processing on pty */
 
 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT    0x01   /* Transmitter physically empty */
+#define TIOCSER_TEMT   0x01    /* Transmitter physically empty */
 
 /* tcflow() and TCXONC use these */
-#define        TCOOFF          0       /* Suspend output.  */
-#define        TCOON           1       /* Restart suspended output.  */
-#define        TCIOFF          2       /* Send a STOP character.  */
-#define        TCION           3       /* Send a START character.  */
+#define TCOOFF         0       /* Suspend output.  */
+#define TCOON          1       /* Restart suspended output.  */
+#define TCIOFF         2       /* Send a STOP character.  */
+#define TCION          3       /* Send a START character.  */
 
 /* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0       /* Discard data received but not yet read.  */
-#define        TCOFLUSH        1       /* Discard data written but not yet sent.  */
-#define        TCIOFLUSH       2       /* Discard all pending data.  */
+#define TCIFLUSH       0       /* Discard data received but not yet read.  */
+#define TCOFLUSH       1       /* Discard data written but not yet sent.  */
+#define TCIOFLUSH      2       /* Discard all pending data.  */
 
 /* tcsetattr uses these */
-#define        TCSANOW         TCSETS  /* Change immediately.  */
-#define        TCSADRAIN       TCSETSW /* Change when pending output is written.  */
-#define        TCSAFLUSH       TCSETSF /* Flush pending input before changing.  */
+#define TCSANOW                TCSETS  /* Change immediately.  */
+#define TCSADRAIN      TCSETSW /* Change when pending output is written.  */
+#define TCSAFLUSH      TCSETSF /* Flush pending input before changing.  */
 
 #endif /* _ASM_TERMBITS_H */
index 574fbdf..baeb2fa 100644 (file)
@@ -31,12 +31,12 @@ struct tchars {
 };
 
 struct ltchars {
-        char    t_suspc;        /* stop process signal */
-        char    t_dsuspc;       /* delayed stop process signal */
-        char    t_rprntc;       /* reprint line */
-        char    t_flushc;       /* flush output (toggles) */
-        char    t_werasc;       /* word erase */
-        char    t_lnextc;       /* literal next character */
+       char    t_suspc;        /* stop process signal */
+       char    t_dsuspc;       /* delayed stop process signal */
+       char    t_rprntc;       /* reprint line */
+       char    t_flushc;       /* flush output (toggles) */
+       char    t_werasc;       /* word erase */
+       char    t_lnextc;       /* literal next character */
 };
 
 /* TIOCGSIZE, TIOCSSIZE not defined yet.  Only needed for SunOS source
index 0eebf3c..16338b8 100644 (file)
  * Linux o32 style syscalls are in the range from 4000 to 4999.
  */
 #define __NR_Linux                     4000
-#define __NR_syscall                   (__NR_Linux +   0)
-#define __NR_exit                      (__NR_Linux +   1)
-#define __NR_fork                      (__NR_Linux +   2)
-#define __NR_read                      (__NR_Linux +   3)
-#define __NR_write                     (__NR_Linux +   4)
-#define __NR_open                      (__NR_Linux +   5)
-#define __NR_close                     (__NR_Linux +   6)
-#define __NR_waitpid                   (__NR_Linux +   7)
-#define __NR_creat                     (__NR_Linux +   8)
-#define __NR_link                      (__NR_Linux +   9)
+#define __NR_syscall                   (__NR_Linux +   0)
+#define __NR_exit                      (__NR_Linux +   1)
+#define __NR_fork                      (__NR_Linux +   2)
+#define __NR_read                      (__NR_Linux +   3)
+#define __NR_write                     (__NR_Linux +   4)
+#define __NR_open                      (__NR_Linux +   5)
+#define __NR_close                     (__NR_Linux +   6)
+#define __NR_waitpid                   (__NR_Linux +   7)
+#define __NR_creat                     (__NR_Linux +   8)
+#define __NR_link                      (__NR_Linux +   9)
 #define __NR_unlink                    (__NR_Linux +  10)
 #define __NR_execve                    (__NR_Linux +  11)
 #define __NR_chdir                     (__NR_Linux +  12)
  * Linux 64-bit syscalls are in the range from 5000 to 5999.
  */
 #define __NR_Linux                     5000
-#define __NR_read                      (__NR_Linux +   0)
-#define __NR_write                     (__NR_Linux +   1)
-#define __NR_open                      (__NR_Linux +   2)
-#define __NR_close                     (__NR_Linux +   3)
-#define __NR_stat                      (__NR_Linux +   4)
-#define __NR_fstat                     (__NR_Linux +   5)
-#define __NR_lstat                     (__NR_Linux +   6)
-#define __NR_poll                      (__NR_Linux +   7)
-#define __NR_lseek                     (__NR_Linux +   8)
-#define __NR_mmap                      (__NR_Linux +   9)
+#define __NR_read                      (__NR_Linux +   0)
+#define __NR_write                     (__NR_Linux +   1)
+#define __NR_open                      (__NR_Linux +   2)
+#define __NR_close                     (__NR_Linux +   3)
+#define __NR_stat                      (__NR_Linux +   4)
+#define __NR_fstat                     (__NR_Linux +   5)
+#define __NR_lstat                     (__NR_Linux +   6)
+#define __NR_poll                      (__NR_Linux +   7)
+#define __NR_lseek                     (__NR_Linux +   8)
+#define __NR_mmap                      (__NR_Linux +   9)
 #define __NR_mprotect                  (__NR_Linux +  10)
 #define __NR_munmap                    (__NR_Linux +  11)
 #define __NR_brk                       (__NR_Linux +  12)
  * Linux N32 syscalls are in the range from 6000 to 6999.
  */
 #define __NR_Linux                     6000
-#define __NR_read                      (__NR_Linux +   0)
-#define __NR_write                     (__NR_Linux +   1)
-#define __NR_open                      (__NR_Linux +   2)
-#define __NR_close                     (__NR_Linux +   3)
-#define __NR_stat                      (__NR_Linux +   4)
-#define __NR_fstat                     (__NR_Linux +   5)
-#define __NR_lstat                     (__NR_Linux +   6)
-#define __NR_poll                      (__NR_Linux +   7)
-#define __NR_lseek                     (__NR_Linux +   8)
-#define __NR_mmap                      (__NR_Linux +   9)
+#define __NR_read                      (__NR_Linux +   0)
+#define __NR_write                     (__NR_Linux +   1)
+#define __NR_open                      (__NR_Linux +   2)
+#define __NR_close                     (__NR_Linux +   3)
+#define __NR_stat                      (__NR_Linux +   4)
+#define __NR_fstat                     (__NR_Linux +   5)
+#define __NR_lstat                     (__NR_Linux +   6)
+#define __NR_poll                      (__NR_Linux +   7)
+#define __NR_lseek                     (__NR_Linux +   8)
+#define __NR_mmap                      (__NR_Linux +   9)
 #define __NR_mprotect                  (__NR_Linux +  10)
 #define __NR_munmap                    (__NR_Linux +  11)
 #define __NR_brk                       (__NR_Linux +  12)
index dd9d99b..624b0ee 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the Jazz family specific parts of the kernel
 #
 
-obj-y          := irq.o jazzdma.o reset.o setup.o
+obj-y          := irq.o jazzdma.o reset.o setup.o
index f21868b..e1ea4f6 100644 (file)
@@ -111,7 +111,7 @@ asmlinkage void plat_irq_dispatch(void)
 }
 
 static void r4030_set_mode(enum clock_event_mode mode,
-                           struct clock_event_device *evt)
+                          struct clock_event_device *evt)
 {
        /* Nothing to do ...  */
 }
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
 
        BUG_ON(HZ != 100);
 
-       cd->cpumask             = cpumask_of(cpu);
+       cd->cpumask             = cpumask_of(cpu);
        clockevents_register_device(cd);
        action->dev_id = cd;
        setup_irq(JAZZ_TIMER_IRQ, action);
index 2d8e447..db6f5af 100644 (file)
@@ -63,7 +63,7 @@ static inline void vdma_pgtbl_init(void)
 static int __init vdma_init(void)
 {
        /*
-        * Allocate 32k of memory for DMA page tables.  This needs to be page
+        * Allocate 32k of memory for DMA page tables.  This needs to be page
         * aligned and should be uncached to avoid cache flushing after every
         * update.
         */
@@ -218,14 +218,14 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
                        printk
                            ("vdma_map: Invalid logical address: %08lx\n",
                             laddr);
-               return -EINVAL; /* invalid logical address */
+               return -EINVAL; /* invalid logical address */
        }
        if (paddr > 0x1fffffff) {
                if (vdma_debug)
                        printk
                            ("vdma_map: Invalid physical address: %08lx\n",
                             paddr);
-               return -EINVAL; /* invalid physical address */
+               return -EINVAL; /* invalid physical address */
        }
 
        pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
index 820e926..e4374a5 100644 (file)
@@ -137,9 +137,9 @@ static struct resource jazz_esp_rsrc[] = {
 };
 
 static struct platform_device jazz_esp_pdev = {
-       .name           = "jazz_esp",
-       .num_resources  = ARRAY_SIZE(jazz_esp_rsrc),
-       .resource       = jazz_esp_rsrc
+       .name           = "jazz_esp",
+       .num_resources  = ARRAY_SIZE(jazz_esp_rsrc),
+       .resource       = jazz_esp_rsrc
 };
 
 static struct resource jazz_sonic_rsrc[] = {
@@ -156,9 +156,9 @@ static struct resource jazz_sonic_rsrc[] = {
 };
 
 static struct platform_device jazz_sonic_pdev = {
-       .name           = "jazzsonic",
-       .num_resources  = ARRAY_SIZE(jazz_sonic_rsrc),
-       .resource       = jazz_sonic_rsrc
+       .name           = "jazzsonic",
+       .num_resources  = ARRAY_SIZE(jazz_sonic_rsrc),
+       .resource       = jazz_sonic_rsrc
 };
 
 static struct resource jazz_cmos_rsrc[] = {
@@ -175,13 +175,13 @@ static struct resource jazz_cmos_rsrc[] = {
 };
 
 static struct platform_device jazz_cmos_pdev = {
-       .name           = "rtc_cmos",
-       .num_resources  = ARRAY_SIZE(jazz_cmos_rsrc),
-       .resource       = jazz_cmos_rsrc
+       .name           = "rtc_cmos",
+       .num_resources  = ARRAY_SIZE(jazz_cmos_rsrc),
+       .resource       = jazz_cmos_rsrc
 };
 
 static struct platform_device pcspeaker_pdev = {
-       .name           = "pcspkr",
+       .name           = "pcspkr",
        .id             = -1,
 };
 
index 43d964d..be2b3de 100644 (file)
@@ -52,7 +52,7 @@ static bool is_avt2;
 static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
        .eccbytes = 36,
        .eccpos = {
-               6,  7,  8,  9,  10, 11, 12, 13,
+               6,  7,  8,  9,  10, 11, 12, 13,
                14, 15, 16, 17, 18, 19, 20, 21,
                22, 23, 24, 25, 26, 27, 28, 29,
                30, 31, 32, 33, 34, 35, 36, 37,
@@ -210,7 +210,7 @@ static const uint32_t qi_lb60_keymap[] = {
        KEY(6, 7, KEY_RIGHT),   /* S57 */
 
        KEY(7, 0, KEY_LEFTSHIFT),       /* S58 */
-       KEY(7, 1, KEY_LEFTALT), /* S59 */
+       KEY(7, 1, KEY_LEFTALT), /* S59 */
        KEY(7, 2, KEY_QI_FN),   /* S60 */
 };
 
@@ -317,7 +317,7 @@ static struct spi_board_info qi_lb60_spi_board_info[] = {
 
 /* Battery */
 static struct jz_battery_platform_data qi_lb60_battery_pdata = {
-       .gpio_charge =  JZ_GPIO_PORTC(27),
+       .gpio_charge =  JZ_GPIO_PORTC(27),
        .gpio_charge_active_low = 1,
        .info = {
                .name = "battery",
@@ -344,7 +344,7 @@ static struct gpio_keys_platform_data qi_lb60_gpio_keys_data = {
 };
 
 static struct platform_device qi_lb60_gpio_keys = {
-       .name = "gpio-keys",
+       .name = "gpio-keys",
        .id =   -1,
        .dev = {
                .platform_data = &qi_lb60_gpio_keys_data,
index 330a0f2..a8acdef 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 SoC clock support debugfs entries
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 118a8a5..484d38a 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 SoC clock support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
@@ -31,7 +31,7 @@
 #define JZ_REG_CLOCK_LOW_POWER 0x04
 #define JZ_REG_CLOCK_PLL       0x10
 #define JZ_REG_CLOCK_GATE      0x20
-#define JZ_REG_CLOCK_SLEEP_CTRL        0x24
+#define JZ_REG_CLOCK_SLEEP_CTRL 0x24
 #define JZ_REG_CLOCK_I2S       0x60
 #define JZ_REG_CLOCK_LCD       0x64
 #define JZ_REG_CLOCK_MMC       0x68
index d7feb89..317ec6f 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 SoC DMA support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index e1ddb95..00b798d 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform GPIO support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index fc57ded..2531da1 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform IRQ support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index f75e39d..0f48720 100644 (file)
@@ -2,7 +2,7 @@
  *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 6d14dcd..e9348fd 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform devices
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
@@ -102,7 +102,7 @@ struct platform_device jz4740_mmc_device = {
                .dma_mask = &jz4740_mmc_device.dev.coherent_dma_mask,
                .coherent_dma_mask = DMA_BIT_MASK(32),
        },
-       .num_resources  = ARRAY_SIZE(jz4740_mmc_resources),
+       .num_resources  = ARRAY_SIZE(jz4740_mmc_resources),
        .resource       = jz4740_mmc_resources,
 };
 
@@ -114,7 +114,7 @@ static struct resource jz4740_rtc_resources[] = {
                .flags  = IORESOURCE_MEM,
        },
        {
-               .start  = JZ4740_IRQ_RTC,
+               .start  = JZ4740_IRQ_RTC,
                .end    = JZ4740_IRQ_RTC,
                .flags  = IORESOURCE_IRQ,
        },
@@ -144,7 +144,7 @@ static struct resource jz4740_i2c_resources[] = {
 struct platform_device jz4740_i2c_device = {
        .name           = "jz4740-i2c",
        .id             = 0,
-       .num_resources  = ARRAY_SIZE(jz4740_i2c_resources),
+       .num_resources  = ARRAY_SIZE(jz4740_i2c_resources),
        .resource       = jz4740_i2c_resources,
 };
 
@@ -318,8 +318,8 @@ static struct resource jz4740_wdt_resources[] = {
 };
 
 struct platform_device jz4740_wdt_device = {
-       .name          = "jz4740-wdt",
-       .id            = -1,
+       .name          = "jz4740-wdt",
+       .id            = -1,
        .num_resources = ARRAY_SIZE(jz4740_wdt_resources),
        .resource      = jz4740_wdt_resources,
 };
index 6744fa7..d8e2130 100644 (file)
@@ -3,7 +3,7 @@
  *     JZ4740 SoC power management support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 4a70407..5a93f38 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 SoC prom code
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 6c0da5a..b6c6343 100644 (file)
@@ -2,7 +2,7 @@
  *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index d97cfbf..76eafcb 100644 (file)
@@ -4,7 +4,7 @@
  *  JZ4740 setup code
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 39bb4bb..5e430ce 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform time support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 22f11d7..4992461 100644 (file)
@@ -3,7 +3,7 @@
  *  JZ4740 platform timer support
  *
  *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
+ *  under  the terms of the GNU General         Public License as published by the
  *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  *
index 007c33d..f81d98f 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_CSRC_IOASIC)     += csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
+obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
@@ -39,7 +40,7 @@ obj-$(CONFIG_CPU_R4K_FPU)     += r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_R3000)                += r2300_fpu.o r2300_switch.o
 obj-$(CONFIG_CPU_R6000)                += r6000_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_TX39XX)       += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON)        += octeon_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_SMP_UP)           += smp-up.o
@@ -53,7 +54,7 @@ obj-$(CONFIG_MIPS_CMP)                += smp-cmp.o
 obj-$(CONFIG_CPU_MIPSR2)       += spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
-obj-$(CONFIG_MIPS_VPE_APSP_API)        += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
 
 obj-$(CONFIG_I8259)            += i8259.o
 obj-$(CONFIG_IRQ_CPU)          += irq_cpu.o
@@ -98,4 +99,35 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
 
 obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
+#
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
+# to enable DSP assembler support here even if the MIPS Release 2 CPU we
+# are targetting does not support DSP because all code-paths making use of
+# it properly check that the running CPU *actually does* support these
+# instructions.
+#
+ifeq ($(CONFIG_CPU_MIPSR2), y)
+CFLAGS_DSP                     = -DHAVE_AS_DSP
+
+#
+# Check if assembler supports DSP ASE
+#
+ifeq ($(call cc-option-yn,-mdsp), y)
+CFLAGS_DSP                     += -mdsp
+endif
+
+#
+# Check if assembler supports DSP ASE Rev2
+#
+ifeq ($(call cc-option-yn,-mdspr2), y)
+CFLAGS_DSP                     += -mdspr2
+endif
+
+CFLAGS_signal.o                        = $(CFLAGS_DSP)
+CFLAGS_signal32.o              = $(CFLAGS_DSP)
+CFLAGS_process.o               = $(CFLAGS_DSP)
+CFLAGS_branch.o                        = $(CFLAGS_DSP)
+CFLAGS_ptrace.o                        = $(CFLAGS_DSP)
+endif
+
 CPPFLAGS_vmlinux.lds           := $(KBUILD_CFLAGS)
index 9fdd8bc..e06f777 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Heavily inspired by the 32-bit Sparc compat code which is
  * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek  (jj@ultra.linux.cz)
  */
 
 #define ELF_ARCH               EM_MIPS
@@ -48,7 +48,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 #define TASK32_SIZE            0x7fff8000UL
 #undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
 #include <asm/processor.h>
 #include <linux/module.h>
@@ -67,8 +67,8 @@ struct elf_prstatus32
        pid_t   pr_ppid;
        pid_t   pr_pgrp;
        pid_t   pr_sid;
-       struct compat_timeval pr_utime; /* User time */
-       struct compat_timeval pr_stime; /* System time */
+       struct compat_timeval pr_utime; /* User time */
+       struct compat_timeval pr_stime; /* System time */
        struct compat_timeval pr_cutime;/* Cumulative user time */
        struct compat_timeval pr_cstime;/* Cumulative system time */
        elf_gregset_t pr_reg;   /* GP registers */
@@ -88,7 +88,7 @@ struct elf_prpsinfo32
        pid_t   pr_pid, pr_ppid, pr_pgrp, pr_sid;
        /* Lots missing */
        char    pr_fname[16];   /* filename of executable */
-       char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+       char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
 };
 
 #define elf_caddr_t    u32
index ff44823..556a435 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Heavily inspired by the 32-bit Sparc compat code which is
  * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek  (jj@ultra.linux.cz)
  */
 
 #define ELF_ARCH               EM_MIPS
@@ -50,7 +50,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 #define TASK32_SIZE            0x7fff8000UL
 #undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
 #include <asm/processor.h>
 
@@ -86,8 +86,8 @@ struct elf_prstatus32
        pid_t   pr_ppid;
        pid_t   pr_pgrp;
        pid_t   pr_sid;
-       struct compat_timeval pr_utime; /* User time */
-       struct compat_timeval pr_stime; /* System time */
+       struct compat_timeval pr_utime; /* User time */
+       struct compat_timeval pr_stime; /* System time */
        struct compat_timeval pr_cutime;/* Cumulative user time */
        struct compat_timeval pr_cstime;/* Cumulative system time */
        elf_gregset_t pr_reg;   /* GP registers */
@@ -107,7 +107,7 @@ struct elf_prpsinfo32
        pid_t   pr_pid, pr_ppid, pr_pgrp, pr_sid;
        /* Lots missing */
        char    pr_fname[16];   /* filename of executable */
-       char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+       char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
 };
 
 #define elf_caddr_t    u32
index e908e81..64c4fd6 100644 (file)
@@ -170,7 +170,7 @@ bmips_smp_entry:
 
        /* switch to permanent stack and continue booting */
 
-       .global bmips_secondary_reentry
+       .global bmips_secondary_reentry
 bmips_secondary_reentry:
        la      k0, bmips_smp_boot_sp
        lw      sp, 0(k0)
@@ -182,7 +182,7 @@ bmips_secondary_reentry:
 #endif /* CONFIG_SMP */
 
        .align  4
-       .global bmips_reset_nmi_vec_end
+       .global bmips_reset_nmi_vec_end
 bmips_reset_nmi_vec_end:
 
 END(bmips_reset_nmi_vec)
@@ -206,7 +206,7 @@ LEAF(bmips_smp_int_vec)
        eret
 
        .align  4
-       .global bmips_smp_int_vec_end
+       .global bmips_smp_int_vec_end
 bmips_smp_int_vec_end:
 
 END(bmips_smp_int_vec)
index 4d735d0..83ffe95 100644 (file)
@@ -57,7 +57,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         */
        case bcond_op:
                switch (insn.i_format.rt) {
-               case bltz_op:
+               case bltz_op:
                case bltzl_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -197,8 +197,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                bit += (bit != 0);
                bit += 23;
                switch (insn.i_format.rt & 3) {
-               case 0: /* bc1f */
-               case 2: /* bc1fl */
+               case 0: /* bc1f */
+               case 2: /* bc1fl */
                        if (~fcr31 & (1 << bit)) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == 2)
@@ -208,8 +208,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->cp0_epc = epc;
                        break;
 
-               case 1: /* bc1t */
-               case 3: /* bc1tl */
+               case 1: /* bc1t */
+               case 3: /* bc1tl */
                        if (fcr31 & (1 << bit)) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == 3)
index 69bbfae..15f618b 100644 (file)
@@ -41,7 +41,7 @@
  * the rest of the system
  */
 static void sibyte_set_mode(enum clock_event_mode mode,
-                           struct clock_event_device *evt)
+                          struct clock_event_device *evt)
 {
        unsigned int cpu = smp_processor_id();
        void __iomem *cfg, *init;
@@ -144,7 +144,7 @@ void __cpuinit sb1480_clockevent_init(void)
 
        bcm1480_unmask_irq(cpu, irq);
 
-       action->handler = sibyte_counter_handler;
+       action->handler = sibyte_counter_handler;
        action->flags   = IRQF_PERCPU | IRQF_TIMER;
        action->name    = name;
        action->dev_id  = cd;
index ed648cb..ff1f01b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  DS1287 clockevent driver
  *
- *  Copyright (C) 2008  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -89,7 +89,7 @@ static void ds1287_event_handler(struct clock_event_device *dev)
 static struct clock_event_device ds1287_clockevent = {
        .name           = "ds1287",
        .features       = CLOCK_EVT_FEAT_PERIODIC,
-       .set_next_event = ds1287_set_next_event,
+       .set_next_event = ds1287_set_next_event,
        .set_mode       = ds1287_set_mode,
        .event_handler  = ds1287_event_handler,
 };
index 831b475..f069460 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  GT641xx clockevent routines.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -98,7 +98,7 @@ static struct clock_event_device gt641xx_timer0_clockevent = {
        .name           = "gt641xx-timer0",
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
        .irq            = GT641XX_TIMER0_IRQ,
-       .set_next_event = gt641xx_timer0_set_next_event,
+       .set_next_event = gt641xx_timer0_set_next_event,
        .set_mode       = gt641xx_timer0_set_mode,
        .event_handler  = gt641xx_timer0_event_handler,
 };
index 7532392..07b847d 100644 (file)
@@ -25,7 +25,7 @@
 #ifndef CONFIG_MIPS_MT_SMTC
 
 static int mips_next_event(unsigned long delta,
-                           struct clock_event_device *evt)
+                          struct clock_event_device *evt)
 {
        unsigned int cnt;
        int res;
@@ -66,7 +66,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
                goto out;
 
        /*
-        * The same applies to performance counter interrupts.  But with the
+        * The same applies to performance counter interrupts.  But with the
         * above we now know that the reason we got here must be a timer
         * interrupt.  Being the paranoiacs we are we check anyway.
         */
@@ -119,7 +119,7 @@ int c0_compare_int_usable(void)
        unsigned int cnt;
 
        /*
-        * IP7 already pending?  Try to clear it by acking the timer.
+        * IP7 already pending?  Try to clear it by acking the timer.
         */
        if (c0_compare_int_pending()) {
                cnt = read_c0_count();
index e73439f..200f277 100644 (file)
@@ -39,7 +39,7 @@
  * the rest of the system
  */
 static void sibyte_set_mode(enum clock_event_mode mode,
-                           struct clock_event_device *evt)
+                          struct clock_event_device *evt)
 {
        unsigned int cpu = smp_processor_id();
        void __iomem *cfg, *init;
@@ -143,7 +143,7 @@ void __cpuinit sb1250_clockevent_init(void)
 
        sb1250_unmask_irq(cpu, irq);
 
-       action->handler = sibyte_counter_handler;
+       action->handler = sibyte_counter_handler;
        action->flags   = IRQF_PERCPU | IRQF_TIMER;
        action->name    = name;
        action->dev_id  = cd;
index 2e72d30..9de5ed7 100644 (file)
@@ -49,7 +49,7 @@ static int smtc_nextinvpe[NR_CPUS];
 
 /*
  * Timestamps stored are absolute values to be programmed
- * into Count register.  Valid timestamps will never be zero.
+ * into Count register.         Valid timestamps will never be zero.
  * If a Zero Count value is actually calculated, it is converted
  * to be a 1, which will introduce 1 or two CPU cycles of error
  * roughly once every four billion events, which at 1000 HZ means
index e5c30b1..2ae0846 100644 (file)
@@ -4,7 +4,7 @@
  * for more details.
  *
  * Based on linux/arch/mips/kernel/cevt-r4k.c,
- *          linux/arch/mips/jmr3927/rbhma3100/setup.c
+ *         linux/arch/mips/jmr3927/rbhma3100/setup.c
  *
  * Copyright 2001 MontaVista Software Inc.
  * Copyright (C) 2000-2001 Toshiba Corporation
@@ -129,7 +129,7 @@ static struct txx9_clock_event_device txx9_clock_event_device = {
                                  CLOCK_EVT_FEAT_ONESHOT,
                .rating         = 200,
                .set_mode       = txx9tmr_set_mode,
-               .set_next_event = txx9tmr_set_next_event,
+               .set_next_event = txx9tmr_set_next_event,
        },
 };
 
@@ -139,7 +139,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
        struct clock_event_device *cd = &txx9_cd->cd;
        struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
 
-       __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+       __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
        cd->event_handler(cd);
        return IRQ_HANDLED;
 }
index d6a1864..de3c25f 100644 (file)
@@ -84,9 +84,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
                ".set   noreorder\n\t"
                ".set   nomacro\n\t"
                "mult   %2, %3\n\t"
-               "dsll32 %0, %4, %5\n\t"
+               "dsll32 %0, %4, %5\n\t"
                "mflo   $0\n\t"
-               "dsll32 %1, %4, %5\n\t"
+               "dsll32 %1, %4, %5\n\t"
                "nop\n\t"
                ".set   pop"
                : "=&r" (lv1), "=r" (lw)
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
        panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 
-int daddiu_bug  = -1;
+int daddiu_bug = -1;
 
 static inline void check_daddiu(void)
 {
@@ -273,7 +273,7 @@ static inline void check_daddiu(void)
 #ifdef HAVE_AS_SET_DADDI
                ".set   daddi\n\t"
 #endif
-               "daddiu %0, %2, %4\n\t"
+               "daddiu %0, %2, %4\n\t"
                "addiu  %1, $0, %4\n\t"
                "daddu  %1, %2\n\t"
                ".set   pop"
@@ -292,7 +292,7 @@ static inline void check_daddiu(void)
        asm volatile(
                "addiu  %2, $0, %3\n\t"
                "dsrl   %2, %2, 1\n\t"
-               "daddiu %0, %2, %4\n\t"
+               "daddiu %0, %2, %4\n\t"
                "addiu  %1, $0, %4\n\t"
                "daddu  %1, %2"
                : "=&r" (v), "=&r" (w), "=&r" (tmp)
index cce3782..6bfccc2 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) xxxx  the Anonymous
  * Copyright (C) 1994 - 2006 Ralf Baechle
  * Copyright (C) 2003, 2004  Maciej W. Rozycki
- * Copyright (C) 2001, 2004, 2011, 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012         MIPS Technologies, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -69,12 +69,12 @@ void r4k_wait_irqoff(void)
                        "       wait                    \n"
                        "       .set    pop             \n");
        local_irq_enable();
-       __asm__("       .globl __pastwait       \n"
+       __asm__("       .globl __pastwait       \n"
                "__pastwait:                    \n");
 }
 
 /*
- * The RM7000 variant has to handle erratum 38.  The workaround is to not
+ * The RM7000 variant has to handle erratum 38.         The workaround is to not
  * have any pending stores when the WAIT instruction is executed.
  */
 static void rm7k_wait_irqoff(void)
@@ -201,6 +201,7 @@ void __init check_wait(void)
                break;
 
        case CPU_M14KC:
+       case CPU_M14KEC:
        case CPU_24K:
        case CPU_34K:
        case CPU_1004K:
@@ -331,6 +332,34 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
 #endif
 }
 
+static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+       switch (isa) {
+       case MIPS_CPU_ISA_M64R2:
+               c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+       case MIPS_CPU_ISA_M64R1:
+               c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+       case MIPS_CPU_ISA_V:
+               c->isa_level |= MIPS_CPU_ISA_V;
+       case MIPS_CPU_ISA_IV:
+               c->isa_level |= MIPS_CPU_ISA_IV;
+       case MIPS_CPU_ISA_III:
+               c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II |
+                               MIPS_CPU_ISA_III;
+               break;
+
+       case MIPS_CPU_ISA_M32R2:
+               c->isa_level |= MIPS_CPU_ISA_M32R2;
+       case MIPS_CPU_ISA_M32R1:
+               c->isa_level |= MIPS_CPU_ISA_M32R1;
+       case MIPS_CPU_ISA_II:
+               c->isa_level |= MIPS_CPU_ISA_II;
+       case MIPS_CPU_ISA_I:
+               c->isa_level |= MIPS_CPU_ISA_I;
+               break;
+       }
+}
+
 static char unknown_isa[] __cpuinitdata = KERN_ERR \
        "Unsupported ISA type, c0.config0: %d.";
 
@@ -348,10 +377,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
        case 0:
                switch ((config0 & MIPS_CONF_AR) >> 10) {
                case 0:
-                       c->isa_level = MIPS_CPU_ISA_M32R1;
+                       set_isa(c, MIPS_CPU_ISA_M32R1);
                        break;
                case 1:
-                       c->isa_level = MIPS_CPU_ISA_M32R2;
+                       set_isa(c, MIPS_CPU_ISA_M32R2);
                        break;
                default:
                        goto unknown;
@@ -360,10 +389,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
        case 2:
                switch ((config0 & MIPS_CONF_AR) >> 10) {
                case 0:
-                       c->isa_level = MIPS_CPU_ISA_M64R1;
+                       set_isa(c, MIPS_CPU_ISA_M64R1);
                        break;
                case 1:
-                       c->isa_level = MIPS_CPU_ISA_M64R2;
+                       set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
                default:
                        goto unknown;
@@ -439,6 +468,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
                c->ases |= MIPS_ASE_MIPSMT;
        if (config3 & MIPS_CONF3_ULRI)
                c->options |= MIPS_CPU_ULRI;
+       if (config3 & MIPS_CONF3_ISA)
+               c->options |= MIPS_CPU_MICROMIPS;
+       if (config3 & MIPS_CONF3_VZ)
+               c->ases |= MIPS_ASE_VZ;
 
        return config3 & MIPS_CONF_M;
 }
@@ -469,7 +502,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
        c->scache.flags = MIPS_CACHE_NOT_PRESENT;
 
        ok = decode_config0(c);                 /* Read Config registers.  */
-       BUG_ON(!ok);                            /* Arch spec violation!  */
+       BUG_ON(!ok);                            /* Arch spec violation!  */
        if (ok)
                ok = decode_config1(c);
        if (ok)
@@ -494,7 +527,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R2000:
                c->cputype = CPU_R2000;
                __cpu_name[cpu] = "R2000";
-               c->isa_level = MIPS_CPU_ISA_I;
+               set_isa(c, MIPS_CPU_ISA_I);
                c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
                             MIPS_CPU_NOFPUEX;
                if (__cpu_has_fpu())
@@ -514,7 +547,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        c->cputype = CPU_R3000;
                        __cpu_name[cpu] = "R3000";
                }
-               c->isa_level = MIPS_CPU_ISA_I;
+               set_isa(c, MIPS_CPU_ISA_I);
                c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
                             MIPS_CPU_NOFPUEX;
                if (__cpu_has_fpu())
@@ -540,7 +573,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        }
                }
 
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_WATCH | MIPS_CPU_VCE |
                             MIPS_CPU_LLSC;
@@ -580,14 +613,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        __cpu_name[cpu] = "NEC Vr41xx";
                        break;
                }
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS;
                c->tlbsize = 32;
                break;
        case PRID_IMP_R4300:
                c->cputype = CPU_R4300;
                __cpu_name[cpu] = "R4300";
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                c->tlbsize = 32;
@@ -595,7 +628,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R4600:
                c->cputype = CPU_R4600;
                __cpu_name[cpu] = "R4600";
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -610,13 +643,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                 */
                c->cputype = CPU_R4650;
                __cpu_name[cpu] = "R4650";
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
                c->tlbsize = 48;
                break;
        #endif
        case PRID_IMP_TX39:
-               c->isa_level = MIPS_CPU_ISA_I;
+               set_isa(c, MIPS_CPU_ISA_I);
                c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
 
                if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
@@ -641,7 +674,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R4700:
                c->cputype = CPU_R4700;
                __cpu_name[cpu] = "R4700";
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -649,7 +682,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_TX49:
                c->cputype = CPU_TX49XX;
                __cpu_name[cpu] = "R49XX";
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS | MIPS_CPU_LLSC;
                if (!(c->processor_id & 0x08))
                        c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -658,7 +691,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R5000:
                c->cputype = CPU_R5000;
                __cpu_name[cpu] = "R5000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -666,7 +699,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R5432:
                c->cputype = CPU_R5432;
                __cpu_name[cpu] = "R5432";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_WATCH | MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -674,7 +707,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R5500:
                c->cputype = CPU_R5500;
                __cpu_name[cpu] = "R5500";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_WATCH | MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -682,7 +715,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_NEVADA:
                c->cputype = CPU_NEVADA;
                __cpu_name[cpu] = "Nevada";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
                c->tlbsize = 48;
@@ -690,7 +723,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R6000:
                c->cputype = CPU_R6000;
                __cpu_name[cpu] = "R6000";
-               c->isa_level = MIPS_CPU_ISA_II;
+               set_isa(c, MIPS_CPU_ISA_II);
                c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
                             MIPS_CPU_LLSC;
                c->tlbsize = 32;
@@ -698,7 +731,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R6000A:
                c->cputype = CPU_R6000A;
                __cpu_name[cpu] = "R6000A";
-               c->isa_level = MIPS_CPU_ISA_II;
+               set_isa(c, MIPS_CPU_ISA_II);
                c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
                             MIPS_CPU_LLSC;
                c->tlbsize = 32;
@@ -706,38 +739,38 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_RM7000:
                c->cputype = CPU_RM7000;
                __cpu_name[cpu] = "RM7000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                /*
-                * Undocumented RM7000:  Bit 29 in the info register of
+                * Undocumented RM7000:  Bit 29 in the info register of
                 * the RM7000 v2.0 indicates if the TLB has 48 or 64
                 * entries.
                 *
-                * 29      1 =>    64 entry JTLB
-                *         0 =>    48 entry JTLB
+                * 29      1 =>    64 entry JTLB
+                *         0 =>    48 entry JTLB
                 */
                c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
                break;
        case PRID_IMP_RM9000:
                c->cputype = CPU_RM9000;
                __cpu_name[cpu] = "RM9000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
                /*
                 * Bit 29 in the info register of the RM9000
                 * indicates if the TLB has 48 or 64 entries.
                 *
-                * 29      1 =>    64 entry JTLB
-                *         0 =>    48 entry JTLB
+                * 29      1 =>    64 entry JTLB
+                *         0 =>    48 entry JTLB
                 */
                c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
                break;
        case PRID_IMP_R8000:
                c->cputype = CPU_R8000;
                __cpu_name[cpu] = "RM8000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
                             MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_LLSC;
@@ -746,7 +779,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R10000:
                c->cputype = CPU_R10000;
                __cpu_name[cpu] = "R10000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
                             MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -756,7 +789,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R12000:
                c->cputype = CPU_R12000;
                __cpu_name[cpu] = "R12000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
                             MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -766,7 +799,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_R14000:
                c->cputype = CPU_R14000;
                __cpu_name[cpu] = "R14000";
-               c->isa_level = MIPS_CPU_ISA_IV;
+               set_isa(c, MIPS_CPU_ISA_IV);
                c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
                             MIPS_CPU_FPU | MIPS_CPU_32FPR |
                             MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -786,7 +819,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                        break;
                }
 
-               c->isa_level = MIPS_CPU_ISA_III;
+               set_isa(c, MIPS_CPU_ISA_III);
                c->options = R4K_OPTS |
                             MIPS_CPU_FPU | MIPS_CPU_LLSC |
                             MIPS_CPU_32FPR;
@@ -838,10 +871,13 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                __cpu_name[cpu] = "MIPS 20Kc";
                break;
        case PRID_IMP_24K:
-       case PRID_IMP_24KE:
                c->cputype = CPU_24K;
                __cpu_name[cpu] = "MIPS 24Kc";
                break;
+       case PRID_IMP_24KE:
+               c->cputype = CPU_24K;
+               __cpu_name[cpu] = "MIPS 24KEc";
+               break;
        case PRID_IMP_25KF:
                c->cputype = CPU_25KF;
                __cpu_name[cpu] = "MIPS 25Kc";
@@ -858,6 +894,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
                c->cputype = CPU_M14KC;
                __cpu_name[cpu] = "MIPS M14Kc";
                break;
+       case PRID_IMP_M14KEC:
+               c->cputype = CPU_M14KEC;
+               __cpu_name[cpu] = "MIPS M14KEc";
+               break;
        case PRID_IMP_1004K:
                c->cputype = CPU_1004K;
                __cpu_name[cpu] = "MIPS 1004Kc";
@@ -946,7 +986,7 @@ static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
        case PRID_IMP_PR4450:
                c->cputype = CPU_PR4450;
                __cpu_name[cpu] = "Philips PR4450";
-               c->isa_level = MIPS_CPU_ISA_M32R1;
+               set_isa(c, MIPS_CPU_ISA_M32R1);
                break;
        }
 }
@@ -1053,12 +1093,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
                return;
        }
 
-       c->options = (MIPS_CPU_TLB       |
-                       MIPS_CPU_4KEX    |
+       c->options = (MIPS_CPU_TLB       |
+                       MIPS_CPU_4KEX    |
                        MIPS_CPU_COUNTER |
-                       MIPS_CPU_DIVEC   |
-                       MIPS_CPU_WATCH   |
-                       MIPS_CPU_EJTAG   |
+                       MIPS_CPU_DIVEC   |
+                       MIPS_CPU_WATCH   |
+                       MIPS_CPU_EJTAG   |
                        MIPS_CPU_LLSC);
 
        switch (c->processor_id & 0xff00) {
@@ -1105,12 +1145,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
        }
 
        if (c->cputype == CPU_XLP) {
-               c->isa_level = MIPS_CPU_ISA_M64R2;
+               set_isa(c, MIPS_CPU_ISA_M64R2);
                c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
                /* This will be updated again after all threads are woken up */
                c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
        } else {
-               c->isa_level = MIPS_CPU_ISA_M64R1;
+               set_isa(c, MIPS_CPU_ISA_M64R1);
                c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
        }
 }
@@ -1129,7 +1169,7 @@ __cpuinit void cpu_probe(void)
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int cpu = smp_processor_id();
 
-       c->processor_id = PRID_IMP_UNKNOWN;
+       c->processor_id = PRID_IMP_UNKNOWN;
        c->fpu_id       = FPIR_IMP_NONE;
        c->cputype      = CPU_UNKNOWN;
 
index e7c98e2..3237c52 100644 (file)
@@ -107,6 +107,8 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
 static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        int i;
+       unsigned long rate;
+       int ret;
 
        if (!cpu_online(policy->cpu))
                return -ENODEV;
@@ -117,15 +119,22 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return PTR_ERR(cpuclk);
        }
 
-       cpuclk->rate = cpu_clock_freq / 1000;
-       if (!cpuclk->rate)
+       rate = cpu_clock_freq / 1000;
+       if (!rate) {
+               clk_put(cpuclk);
                return -EINVAL;
+       }
+       ret = clk_set_rate(cpuclk, rate);
+       if (ret) {
+               clk_put(cpuclk);
+               return ret;
+       }
 
        /* clock table init */
        for (i = 2;
             (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
             i++)
-               loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8;
+               loongson2_clockmod_table[i].frequency = (rate * i) / 8;
 
        policy->cur = loongson2_cpufreq_get(policy->cpu);
 
@@ -195,8 +204,8 @@ static void loongson2_cpu_wait(void)
 
        spin_lock_irqsave(&loongson2_wait_lock, flags);
        cpu_freq = LOONGSON_CHIPCFG0;
-       LOONGSON_CHIPCFG0 &= ~0x7;      /* Put CPU into wait mode */
-       LOONGSON_CHIPCFG0 = cpu_freq;   /* Restore CPU state */
+       LOONGSON_CHIPCFG0 &= ~0x7;      /* Put CPU into wait mode */
+       LOONGSON_CHIPCFG0 = cpu_freq;   /* Restore CPU state */
        spin_unlock_irqrestore(&loongson2_wait_lock, flags);
 }
 
index 0f53c39..93aa302 100644 (file)
@@ -59,7 +59,7 @@ static void crash_kexec_prepare_cpus(void)
 
 #else /* !defined(CONFIG_SMP)  */
 static void crash_kexec_prepare_cpus(void) {}
-#endif /* !defined(CONFIG_SMP)  */
+#endif /* !defined(CONFIG_SMP) */
 
 void default_machine_crash_shutdown(struct pt_regs *regs)
 {
index f96f99c..468f3eb 100644 (file)
@@ -35,7 +35,7 @@ static cycle_t bcm1480_hpt_read(struct clocksource *cs)
 
 struct clocksource bcm1480_clocksource = {
        .name   = "zbbus-cycles",
-       .rating = 200,
+       .rating = 200,
        .read   = bcm1480_hpt_read,
        .mask   = CLOCKSOURCE_MASK(64),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
new file mode 100644 (file)
index 0000000..5dca24b
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+       unsigned int hi, hi2, lo;
+
+       do {
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+       } while (hi2 != hi);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+
+static struct clocksource gic_clocksource = {
+       .name   = "GIC",
+       .read   = gic_hpt_read,
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+       unsigned int config, bits;
+
+       /* Calculate the clocksource mask. */
+       GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
+       bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+               (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
+
+       /* Set clocksource mask. */
+       gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
+
+       /* Calculate a somewhat reasonable rating value. */
+       gic_clocksource.rating = 200 + frequency / 10000000;
+
+       clocksource_register_hz(&gic_clocksource, frequency);
+}
index 46bd7fa..0654bff 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  DEC I/O ASIC's counter clocksource
  *
- *  Copyright (C) 2008  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 2e7c523..abd99ea 100644 (file)
@@ -45,7 +45,7 @@ unsigned int __init mips_get_pll_freq(void)
        m = PLL_GET_M(pll_reg);
        n = PLL_GET_N(pll_reg);
        p = PLL_GET_P(pll_reg);
-       pr_info("MIPS PLL Register:0x%x  M=%d  N=%d  P=%d\n", pll_reg, m, n, p);
+       pr_info("MIPS PLL Register:0x%x  M=%d  N=%d  P=%d\n", pll_reg, m, n, p);
 
        /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
        fout = ((2 * n * fin) / (m * (0x01 << p)));
@@ -83,8 +83,8 @@ static void __init powertv_c0_hpt_clocksource_init(void)
 
 /**
  * struct tim_c - free running counter
- * @hi:        High 16 bits of the counter
- * @lo:        Low 32 bits of the counter
+ * @hi: High 16 bits of the counter
+ * @lo: Low 32 bits of the counter
  *
  * Lays out the structure of the free running counter in memory. This counter
  * increments at a rate of 27 MHz/8 on all platforms.
index e9606d9..6ecb77d 100644 (file)
@@ -44,7 +44,7 @@ static cycle_t sb1250_hpt_read(struct clocksource *cs)
 
 struct clocksource bcm1250_clocksource = {
        .name   = "bcm1250-counter-3",
-       .rating = 200,
+       .rating = 200,
        .read   = sb1250_hpt_read,
        .mask   = CLOCKSOURCE_MASK(23),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
index 9ae813e..9e6440e 100644 (file)
@@ -14,8 +14,7 @@
 
 extern void prom_putchar(char);
 
-static void __init
-early_console_write(struct console *con, const char *s, unsigned n)
+static void early_console_write(struct console *con, const char *s, unsigned n)
 {
        while (n-- && *s) {
                if (*s == '\n')
@@ -25,7 +24,7 @@ early_console_write(struct console *con, const char *s, unsigned n)
        }
 }
 
-static struct console early_console __initdata = {
+static struct console early_console = {
        .name   = "early",
        .write  = early_console_write,
        .flags  = CON_PRINTBUFFER | CON_BOOT,
index 83fa146..cf5509f 100644 (file)
@@ -125,21 +125,21 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
  *
  * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
  *
- * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
+ * lui v1, hi_16bit_of_mcount       --> b 1f (0x10000005)
  * addiu v1, v1, low_16bit_of_mcount
  * move at, ra
  * move $12, ra_address
  * jalr v1
  *  sub sp, sp, 8
- *                                  1: offset = 5 instructions
+ *                                 1: offset = 5 instructions
  * 2.2 For the Other situations
  *
- * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
+ * lui v1, hi_16bit_of_mcount       --> b 1f (0x10000004)
  * addiu v1, v1, low_16bit_of_mcount
  * move at, ra
  * jalr v1
  *  nop | move $12, ra_address | sub sp, sp, 8
- *                                  1: offset = 4 instructions
+ *                                 1: offset = 4 instructions
  */
 
 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
@@ -228,8 +228,8 @@ int ftrace_disable_ftrace_graph_caller(void)
 
 #ifndef KBUILD_MCOUNT_RA_ADDRESS
 
-#define S_RA_SP        (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
-#define S_R_SP (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 #define OFFSET_MASK    0xffff  /* stack offset range: 0 ~ PT_SIZE */
 
 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
index 8a0096d..ecb347c 100644 (file)
@@ -160,7 +160,7 @@ LEAF(r4k_wait)
        .set pop
        .endm
 
-       .align  5
+       .align  5
 BUILD_ROLLBACK_PROLOGUE handle_int
 NESTED(handle_int, PT_SIZE, sp)
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -362,7 +362,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
        SAVE_ALL
-       move    a0, sp
+       move    a0, sp
        jal     nmi_exception_handler
        RESTORE_ALL
        .set    mips3
@@ -409,7 +409,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
           string escapes and emits bogus warnings if it believes to
           recognize an unknown escape code.  So make the arguments
           start with an n and gas will believe \n is ok ...  */
-       .macro  __BUILD_verbose nexception
+       .macro  __BUILD_verbose nexception
        LONG_L  a1, PT_EPC(sp)
 #ifdef CONFIG_32BIT
        PRINT("Got \nexception at %08lx\012")
@@ -442,7 +442,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .endm
 
        .macro  BUILD_HANDLER exception handler clear verbose
-       __BUILD_HANDLER \exception \handler \clear \verbose _int
+       __BUILD_HANDLER \exception \handler \clear \verbose _int
        .endm
 
        BUILD_HANDLER adel ade ade silent               /* #4  */
@@ -456,7 +456,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        BUILD_HANDLER tr tr sti silent                  /* #13 */
        BUILD_HANDLER fpe fpe fpe silent                /* #15 */
        BUILD_HANDLER mdmx mdmx sti silent              /* #22 */
-#ifdef         CONFIG_HARDWARE_WATCHPOINTS
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
        /*
         * For watch, interrupts will be enabled after the watch
         * registers are read.
@@ -482,8 +482,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
        MFC0    k1, CP0_ENTRYHI
        andi    k1, 0xff        /* ASID_MASK */
        MFC0    k0, CP0_EPC
-       PTR_SRL k0, _PAGE_SHIFT + 1
-       PTR_SLL k0, _PAGE_SHIFT + 1
+       PTR_SRL k0, _PAGE_SHIFT + 1
+       PTR_SLL k0, _PAGE_SHIFT + 1
        or      k1, k0
        MTC0    k1, CP0_ENTRYHI
        mtc0_tlbw_hazard
index fcf9731..c61cdae 100644 (file)
@@ -133,7 +133,7 @@ EXPORT(_stext)
 #ifdef CONFIG_BOOT_RAW
        /*
         * Give us a fighting chance of running if execution beings at the
-        * kernel load address.  This is needed because this platform does
+        * kernel load address.  This is needed because this platform does
         * not have a ELF loader yet.
         */
 FEXPORT(__kernel_entry)
@@ -201,7 +201,7 @@ NESTED(kernel_entry, 16, sp)                        # kernel entry point
 
 #ifdef CONFIG_SMP
 /*
- * SMP slave cpus entry point.  Board specific code for bootstrap calls this
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
  * function after setting up the stack and gp registers.
  */
 NESTED(smp_bootstrap, 16, sp)
index 32b397b..2b91fe8 100644 (file)
@@ -178,7 +178,7 @@ handle_real_irq:
        } else {
                inb(PIC_MASTER_IMR);    /* DUMMY - (do we need this?) */
                outb(cached_master_mask, PIC_MASTER_IMR);
-               outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+               outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
        }
        smtc_im_ack_irq(irq);
        raw_spin_unlock_irqrestore(&i8259A_lock, flags);
index 883fc6c..44a1f79 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  GT641xx IRQ routines.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
 
 #include <asm/gt64120.h>
 
-#define GT641XX_IRQ_TO_BIT(irq)        (1U << (irq - GT641XX_IRQ_BASE))
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
 
 static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
 
index 14ac52c..fab40f7 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
@@ -86,7 +86,7 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
  */
 void ll_msc_irq(void)
 {
-       unsigned int irq;
+       unsigned int irq;
 
        /* read the interrupt vector register */
        MSCIC_READ(MSC01_IC_VEC, irq);
index b0662cf..26f4e4c 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (C) 2003 Ralf Baechle
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
index a5aa43d..d1fea7a 100644 (file)
@@ -48,7 +48,7 @@ again:
 }
 
 /*
- * Allocate the 16 legacy interrupts for i8259 devices.  This happens early
+ * Allocate the 16 legacy interrupts for i8259 devices.         This happens early
  * in the kernel initialization so treating allocation failure as BUG() is
  * ok.
  */
index 972263b..72ef2d2 100644 (file)
@@ -3,13 +3,13 @@
  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
  *
  * Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005  MIPS Technologies, Inc.  All rights reserved.
- *      Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005  MIPS Technologies, Inc. All rights reserved.
+ *     Author: Maciej W. Rozycki <macro@mips.com>
  *
  * This file define the irq handler for MIPS CPU interrupts.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 
 #include <asm/irq_cpu.h>
 #include <asm/mipsregs.h>
@@ -113,3 +114,44 @@ void __init mips_cpu_irq_init(void)
                irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
                                         handle_percpu_irq);
 }
+
+#ifdef CONFIG_IRQ_DOMAIN
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+                            irq_hw_number_t hw)
+{
+       static struct irq_chip *chip;
+
+       if (hw < 2 && cpu_has_mipsmt) {
+               /* Software interrupts are used for MT/CMT IPI */
+               chip = &mips_mt_cpu_irq_controller;
+       } else {
+               chip = &mips_cpu_irq_controller;
+       }
+
+       irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+       .map = mips_cpu_intc_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+int __init mips_cpu_intc_init(struct device_node *of_node,
+                             struct device_node *parent)
+{
+       struct irq_domain *domain;
+
+       /* Mask interrupts. */
+       clear_c0_status(ST0_IM);
+       clear_c0_cause(CAUSEF_IP);
+
+       domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+                                      &mips_cpu_intc_irq_domain_ops, NULL);
+       if (!domain)
+               panic("Failed to add irqdomain for MIPS CPU\n");
+
+       return 0;
+}
+#endif /* CONFIG_IRQ_DOMAIN */
index b0c55b5..ab00e49 100644 (file)
@@ -1,12 +1,12 @@
 /*
  * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
- *          linux/arch/mips/tx4927/common/tx4927_irq.c,
- *          linux/arch/mips/tx4938/common/irq.c
+ *         linux/arch/mips/tx4927/common/tx4927_irq.c,
+ *         linux/arch/mips/tx4938/common/irq.c
  *
  * Copyright 2001, 2003-2005 MontaVista Software Inc.
  * Author: MontaVista Software, Inc.
- *         ahennessy@mvista.com
- *         source@mvista.com
+ *        ahennessy@mvista.com
+ *        source@mvista.com
  * Copyright (C) 2000-2001 Toshiba Corporation
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -122,7 +122,7 @@ static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
        switch (flow_type & IRQF_TRIGGER_MASK) {
        case IRQF_TRIGGER_RISING:       mode = TXx9_IRCR_UP;    break;
        case IRQF_TRIGGER_FALLING:      mode = TXx9_IRCR_DOWN;  break;
-       case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH;  break;
+       case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH;  break;
        case IRQF_TRIGGER_LOW:  mode = TXx9_IRCR_LOW;   break;
        default:
                return -EINVAL;
index 23817a6..fcaac2f 100644 (file)
@@ -40,7 +40,7 @@ static struct hard_trap_info {
        { 6, SIGBUS },          /* instruction bus error */
        { 7, SIGBUS },          /* data bus error */
        { 9, SIGTRAP },         /* break */
-/*     { 11, SIGILL }, */      /* CPU unusable */
+/*     { 11, SIGILL }, */      /* CPU unusable */
        { 12, SIGFPE },         /* overflow */
        { 13, SIGTRAP },        /* trap */
        { 14, SIGSEGV },        /* virtual instruction cache coherency */
@@ -321,7 +321,7 @@ int kgdb_ll_trap(int cmd, const char *str,
                .regs   = regs,
                .str    = str,
                .err    = err,
-               .trapnr = trap,
+               .trapnr = trap,
                .signr  = sig,
 
        };
@@ -371,7 +371,7 @@ int kgdb_arch_init(void)
        union mips_instruction insn = {
                .r_format = {
                        .opcode = spec_op,
-                       .func   = break_op,
+                       .func   = break_op,
                }
        };
        memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
index 158467d..12bc4eb 100644 (file)
@@ -307,7 +307,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
 /*
  * Called after single-stepping.  p->addr is the address of the
  * instruction whose first byte has been replaced by the "break 0"
- * instruction.  To avoid the SMP problems that can occur when we
+ * instruction.         To avoid the SMP problems that can occur when we
  * temporarily put back the original opcode to single-step, we
  * single-stepped a copy of the instruction.  The address of this
  * copy is p->ainsn.insn.
@@ -535,7 +535,7 @@ void jprobe_return_end(void);
 
 void __kprobes jprobe_return(void)
 {
-       /* Assembler quirk necessitates this '0,code' business.  */
+       /* Assembler quirk necessitates this '0,code' business.  */
        asm volatile(
                "break 0,%0\n\t"
                ".globl jprobe_return_end\n"
@@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
 
@@ -614,11 +614,11 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
         * We can handle this because:
         *     - instances are always inserted at the head of the list
         *     - when multiple return probes are registered for the same
-        *       function, the first instance's ret_addr will point to the
-        *       real return address, and all the rest will point to
-        *       kretprobe_trampoline
+        *       function, the first instance's ret_addr will point to the
+        *       real return address, and all the rest will point to
+        *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_hash_unlock(current, &flags);
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index 253bd8a..8eeee1c 100644 (file)
@@ -76,7 +76,7 @@ out:
        return error;
 }
 
-#define RLIM_INFINITY32        0x7fffffff
+#define RLIM_INFINITY32 0x7fffffff
 #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
 
 struct rlimit32 {
@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
 
 /* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
    lseek back to original location.  They fail just like lseek does on
-   non-seekable files.  */
+   non-seekable files. */
 
 SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
        unsigned long, unused, unsigned long, a4, unsigned long, a5)
@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
 }
 
 asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
-                                   size_t count)
+                                  size_t count)
 {
        return sys_readahead(fd, merge_64(a2, a3), count);
 }
@@ -276,7 +276,7 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
        unsigned offset_a3, unsigned len_a4, unsigned len_a5)
 {
        return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
-                            merge_64(len_a4, len_a5));
+                            merge_64(len_a4, len_a5));
 }
 
 asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
@@ -286,7 +286,7 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
 }
 
 SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
-               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
+               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
 {
        return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
                                 dfd, pathname);
index df1e3e4..6e58e97 100644 (file)
@@ -17,9 +17,9 @@
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_user_nocheck_asm(char *__to,
-                                            const char *__from, long __len);
+                                           const char *__from, long __len);
 extern long __strncpy_from_user_asm(char *__to, const char *__from,
-                                    long __len);
+                                   long __len);
 extern long __strlen_user_nocheck_asm(const char *s);
 extern long __strlen_user_asm(const char *s);
 extern long __strnlen_user_nocheck_asm(const char *s);
index 61d6002..2b70723 100644 (file)
@@ -55,7 +55,7 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
 static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
 {
        *location = (*location & 0xffff0000) |
-                   ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+                   ((((long long) v + 0x8000LL) >> 16) & 0xffff);
 
        return 0;
 }
@@ -78,7 +78,7 @@ static int apply_r_mips_higher_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
        *location = (*location & 0xffff0000) |
-                   ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+                   ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
 
        return 0;
 }
@@ -87,7 +87,7 @@ static int apply_r_mips_highest_rela(struct module *me, u32 *location,
                                     Elf_Addr v)
 {
        *location = (*location & 0xffff0000) |
-                   ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+                   ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
 
        return 0;
 }
index 07ff581..977a623 100644 (file)
@@ -79,7 +79,7 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
        }
 
        *location = (*location & ~0x03ffffff) |
-                   ((*location + (v >> 2)) & 0x03ffffff);
+                   ((*location + (v >> 2)) & 0x03ffffff);
 
        return 0;
 }
@@ -122,7 +122,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
        struct mips_hi16 *l;
        Elf_Addr val, vallo;
 
-       /* Sign extend the addend we extract from the lo insn.  */
+       /* Sign extend the addend we extract from the lo insn.  */
        vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
 
        if (me->arch.r_mips_hi16_list != NULL) {
@@ -165,7 +165,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
        }
 
        /*
-        * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
+        * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
         */
        val = v + vallo;
        insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -230,7 +230,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
        }
 
        /*
-        * Normally the hi16 list should be deallocated at this point.  A
+        * Normally the hi16 list should be deallocated at this point.  A
         * malformed binary however could contain a series of R_MIPS_HI16
         * relocations not followed by a R_MIPS_LO16 relocation.  In that
         * case, free up the list and return an error.
@@ -261,7 +261,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
        spin_unlock_irqrestore(&dbe_lock, flags);
 
        /* Now, if we found one, we are running inside it now, hence
-           we cannot unload the module, hence no refcnt needed. */
+          we cannot unload the module, hence no refcnt needed. */
        return e;
 }
 
index 207f134..0e23343 100644 (file)
@@ -30,7 +30,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti, int usedfpu)
  */
        .align  7
        LEAF(resume)
@@ -69,7 +69,7 @@
 1:
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       mfc0    t0, $11,7       /* CvmMemCtl */
        bbit0   t0, 6, 3f       /* Is user access enabled? */
 
        /* Store the CVMSEG state */
@@ -77,8 +77,8 @@
        andi    t0, 0x3f
        /* Multiply * (cache line size/sizeof(long)/2) */
        sll     t0, 7-LONGLOG-1
-       li      t1, -32768      /* Base address of CVMSEG */
-       LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+       li      t1, -32768      /* Base address of CVMSEG */
+       LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
        synciobdma
 2:
        .set noreorder
        LONG_S  t8, 0(t2)       /* Store CVMSEG to thread storage */
        LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
        bnez    t0, 2b          /* Loop until we've copied it all */
-        LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+        LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
        .set reorder
 
        /* Disable access to CVMSEG */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       mfc0    t0, $11,7       /* CvmMemCtl */
        xori    t0, t0, 0x40    /* Bit 6 is CVMSEG user enable */
-       mtc0    t0, $11,7       /* CvmMemCtl */
+       mtc0    t0, $11,7       /* CvmMemCtl */
 #endif
 3:
        /*
 
        dmfc0   t9, $9,7        /* CvmCtl register. */
 
-        /* Save the COP2 CRC state */
+       /* Save the COP2 CRC state */
        dmfc2   t0, 0x0201
        dmfc2   t1, 0x0202
        dmfc2   t2, 0x0200
        sd      t0, OCTEON_CP2_LLM_DAT(a0)
        sd      t1, OCTEON_CP2_LLM_DAT+8(a0)
 
-1:      bbit1  t9, 26, 3f      /* done if CvmCtl[NOCRYPTO] set */
+1:     bbit1   t9, 26, 3f      /* done if CvmCtl[NOCRYPTO] set */
 
        /* Save the COP2 crypto state */
-        /* this part is mostly common to both pass 1 and later revisions */
-       dmfc2   t0, 0x0084
-       dmfc2   t1, 0x0080
-       dmfc2   t2, 0x0081
-       dmfc2   t3, 0x0082
+       /* this part is mostly common to both pass 1 and later revisions */
+       dmfc2   t0, 0x0084
+       dmfc2   t1, 0x0080
+       dmfc2   t2, 0x0081
+       dmfc2   t3, 0x0082
        sd      t0, OCTEON_CP2_3DES_IV(a0)
-       dmfc2   t0, 0x0088
+       dmfc2   t0, 0x0088
        sd      t1, OCTEON_CP2_3DES_KEY(a0)
-       dmfc2   t1, 0x0111                      /* only necessary for pass 1 */
+       dmfc2   t1, 0x0111                      /* only necessary for pass 1 */
        sd      t2, OCTEON_CP2_3DES_KEY+8(a0)
-       dmfc2   t2, 0x0102
+       dmfc2   t2, 0x0102
        sd      t3, OCTEON_CP2_3DES_KEY+16(a0)
-       dmfc2   t3, 0x0103
+       dmfc2   t3, 0x0103
        sd      t0, OCTEON_CP2_3DES_RESULT(a0)
-       dmfc2   t0, 0x0104
-       sd      t1, OCTEON_CP2_AES_INP0(a0)     /* only necessary for pass 1 */
-       dmfc2   t1, 0x0105
+       dmfc2   t0, 0x0104
+       sd      t1, OCTEON_CP2_AES_INP0(a0)     /* only necessary for pass 1 */
+       dmfc2   t1, 0x0105
        sd      t2, OCTEON_CP2_AES_IV(a0)
        dmfc2   t2, 0x0106
        sd      t3, OCTEON_CP2_AES_IV+8(a0)
-       dmfc2   t3, 0x0107
+       dmfc2   t3, 0x0107
        sd      t0, OCTEON_CP2_AES_KEY(a0)
        dmfc2   t0, 0x0110
        sd      t1, OCTEON_CP2_AES_KEY+8(a0)
        sd      t2, OCTEON_CP2_AES_KEY+16(a0)
        dmfc2   t2, 0x0101
        sd      t3, OCTEON_CP2_AES_KEY+24(a0)
-       mfc0    t3, $15,0       /* Get the processor ID register */
+       mfc0    t3, $15,0       /* Get the processor ID register */
        sd      t0, OCTEON_CP2_AES_KEYLEN(a0)
        li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        sd      t1, OCTEON_CP2_AES_RESULT(a0)
        /* Skip to the Pass1 version of the remainder of the COP2 state */
        beq     t3, t0, 2f
 
-        /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+       /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
        dmfc2   t1, 0x0240
        dmfc2   t2, 0x0241
        dmfc2   t3, 0x0242
        sd      t2, OCTEON_CP2_HSH_DATW+72(a0)
        dmfc2   t2, 0x024D
        sd      t3, OCTEON_CP2_HSH_DATW+80(a0)
-       dmfc2   t3, 0x024E
+       dmfc2   t3, 0x024E
        sd      t0, OCTEON_CP2_HSH_DATW+88(a0)
        dmfc2   t0, 0x0250
        sd      t1, OCTEON_CP2_HSH_DATW+96(a0)
        sd      t3, OCTEON_CP2_HSH_IVW+24(a0)
        dmfc2   t3, 0x0257
        sd      t0, OCTEON_CP2_HSH_IVW+32(a0)
-       dmfc2   t0, 0x0258
+       dmfc2   t0, 0x0258
        sd      t1, OCTEON_CP2_HSH_IVW+40(a0)
-       dmfc2   t1, 0x0259
+       dmfc2   t1, 0x0259
        sd      t2, OCTEON_CP2_HSH_IVW+48(a0)
        dmfc2   t2, 0x025E
        sd      t3, OCTEON_CP2_HSH_IVW+56(a0)
        sd      t0, OCTEON_CP2_GFM_RESULT+8(a0)
        jr      ra
 
-2:      /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+2:     /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
        dmfc2   t3, 0x0040
        dmfc2   t0, 0x0041
        dmfc2   t1, 0x0042
        sd      t3, OCTEON_CP2_HSH_IVW+8(a0)
        sd      t0, OCTEON_CP2_HSH_IVW+16(a0)
 
-3:      /* pass 1 or CvmCtl[NOCRYPTO] set */
+3:     /* pass 1 or CvmCtl[NOCRYPTO] set */
        jr      ra
        END(octeon_cop2_save)
 
        .set push
        .set noreorder
        LEAF(octeon_cop2_restore)
-        /* First cache line was prefetched before the call */
-        pref    4,  128(a0)
+       /* First cache line was prefetched before the call */
+       pref    4,  128(a0)
        dmfc0   t9, $9,7        /* CvmCtl register. */
 
-        pref    4,  256(a0)
+       pref    4,  256(a0)
        ld      t0, OCTEON_CP2_CRC_IV(a0)
-        pref    4,  384(a0)
+       pref    4,  384(a0)
        ld      t1, OCTEON_CP2_CRC_LENGTH(a0)
        ld      t2, OCTEON_CP2_CRC_POLY(a0)
 
        /* Restore the COP2 CRC state */
        dmtc2   t0, 0x0201
-       dmtc2   t1, 0x1202
+       dmtc2   t1, 0x1202
        bbit1   t9, 28, 2f      /* Skip LLM if CvmCtl[NODFA_CP2] is set */
         dmtc2  t2, 0x4200
 
        ld      t0, OCTEON_CP2_3DES_IV(a0)
        ld      t1, OCTEON_CP2_3DES_KEY(a0)
        ld      t2, OCTEON_CP2_3DES_KEY+8(a0)
-       dmtc2   t0, 0x0084
+       dmtc2   t0, 0x0084
        ld      t0, OCTEON_CP2_3DES_KEY+16(a0)
-       dmtc2   t1, 0x0080
+       dmtc2   t1, 0x0080
        ld      t1, OCTEON_CP2_3DES_RESULT(a0)
-       dmtc2   t2, 0x0081
+       dmtc2   t2, 0x0081
        ld      t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
        dmtc2   t0, 0x0082
        ld      t0, OCTEON_CP2_AES_IV(a0)
-       dmtc2   t1, 0x0098
+       dmtc2   t1, 0x0098
        ld      t1, OCTEON_CP2_AES_IV+8(a0)
-       dmtc2   t2, 0x010A                  /* only really needed for pass 1 */
+       dmtc2   t2, 0x010A                  /* only really needed for pass 1 */
        ld      t2, OCTEON_CP2_AES_KEY(a0)
-       dmtc2   t0, 0x0102
+       dmtc2   t0, 0x0102
        ld      t0, OCTEON_CP2_AES_KEY+8(a0)
        dmtc2   t1, 0x0103
        ld      t1, OCTEON_CP2_AES_KEY+16(a0)
        ld      t1, OCTEON_CP2_AES_RESULT(a0)
        dmtc2   t2, 0x0107
        ld      t2, OCTEON_CP2_AES_RESULT+8(a0)
-       mfc0    t3, $15,0       /* Get the processor ID register */
+       mfc0    t3, $15,0       /* Get the processor ID register */
        dmtc2   t0, 0x0110
        li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        dmtc2   t1, 0x0100
        bne     t0, t3, 3f      /* Skip the next stuff for non-pass1 */
         dmtc2  t2, 0x0101
 
-        /* this code is specific for pass 1 */
+       /* this code is specific for pass 1 */
        ld      t0, OCTEON_CP2_HSH_DATW(a0)
        ld      t1, OCTEON_CP2_HSH_DATW+8(a0)
        ld      t2, OCTEON_CP2_HSH_DATW+16(a0)
        ld      t0, OCTEON_CP2_HSH_IVW+16(a0)
        dmtc2   t1, 0x0048
        dmtc2   t2, 0x0049
-        b done_restore   /* unconditional branch */
+       b done_restore   /* unconditional branch */
         dmtc2  t0, 0x004A
 
-3:      /* this is post-pass1 code */
+3:     /* this is post-pass1 code */
        ld      t2, OCTEON_CP2_HSH_DATW(a0)
        ld      t0, OCTEON_CP2_HSH_DATW+8(a0)
        ld      t1, OCTEON_CP2_HSH_DATW+16(a0)
@@ -433,7 +433,7 @@ done_restore:
  * sp is assumed to point to a struct pt_regs
  *
  * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- *       safely modify k0 and k1.
+ *      safely modify k0 and k1.
  */
        .align  7
        .set push
@@ -446,14 +446,14 @@ done_restore:
        /* Save the multiplier state */
        v3mulu  k0, $0, $0
        v3mulu  k1, $0, $0
-       sd      k0, PT_MTP(sp)        /* PT_MTP    has P0 */
+       sd      k0, PT_MTP(sp)        /* PT_MTP    has P0 */
        v3mulu  k0, $0, $0
        sd      k1, PT_MTP+8(sp)      /* PT_MTP+8  has P1 */
        ori     k1, $0, 1
        v3mulu  k1, k1, $0
        sd      k0, PT_MTP+16(sp)     /* PT_MTP+16 has P2 */
        v3mulu  k0, $0, $0
-       sd      k1, PT_MPL(sp)        /* PT_MPL    has MPL0 */
+       sd      k1, PT_MPL(sp)        /* PT_MPL    has MPL0 */
        v3mulu  k1, $0, $0
        sd      k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
        jr      ra
@@ -475,19 +475,19 @@ done_restore:
        .set noreorder
        LEAF(octeon_mult_restore)
        dmfc0   k1, $9,7                /* CvmCtl register. */
-       ld      v0, PT_MPL(sp)          /* MPL0 */
-       ld      v1, PT_MPL+8(sp)        /* MPL1 */
-       ld      k0, PT_MPL+16(sp)       /* MPL2 */
+       ld      v0, PT_MPL(sp)          /* MPL0 */
+       ld      v1, PT_MPL+8(sp)        /* MPL1 */
+       ld      k0, PT_MPL+16(sp)       /* MPL2 */
        bbit1   k1, 27, 1f              /* Skip CvmCtl[NOMUL] */
        /* Normally falls through, so no time wasted here */
        nop
 
        /* Restore the multiplier state */
-       ld      k1, PT_MTP+16(sp)       /* P2 */
+       ld      k1, PT_MTP+16(sp)       /* P2 */
        MTM0    v0                      /* MPL0 */
        ld      v0, PT_MTP+8(sp)        /* P1 */
        MTM1    v1                      /* MPL1 */
-       ld      v1, PT_MTP(sp)          /* P0 */
+       ld      v1, PT_MTP(sp)          /* P0 */
        MTM2    k0                      /* MPL2 */
        MTP2    k1                      /* P2 */
        MTP1    v0                      /* P1 */
index d9c81c5..45f1ffc 100644 (file)
@@ -103,13 +103,13 @@ static struct mips_pmu mipspmu;
 
 #define M_CONFIG1_PC   (1 << 4)
 
-#define M_PERFCTL_EXL                  (1      <<  0)
-#define M_PERFCTL_KERNEL               (1      <<  1)
-#define M_PERFCTL_SUPERVISOR           (1      <<  2)
-#define M_PERFCTL_USER                 (1      <<  3)
-#define M_PERFCTL_INTERRUPT_ENABLE     (1      <<  4)
+#define M_PERFCTL_EXL                  (1      <<  0)
+#define M_PERFCTL_KERNEL               (1      <<  1)
+#define M_PERFCTL_SUPERVISOR           (1      <<  2)
+#define M_PERFCTL_USER                 (1      <<  3)
+#define M_PERFCTL_INTERRUPT_ENABLE     (1      <<  4)
 #define M_PERFCTL_EVENT(event)         (((event) & 0x3ff)  << 5)
-#define M_PERFCTL_VPEID(vpe)           ((vpe)    << 16)
+#define M_PERFCTL_VPEID(vpe)           ((vpe)    << 16)
 
 #ifdef CONFIG_CPU_BMIPS5000
 #define M_PERFCTL_MT_EN(filter)                0
@@ -117,13 +117,13 @@ static struct mips_pmu mipspmu;
 #define M_PERFCTL_MT_EN(filter)                ((filter) << 20)
 #endif /* CONFIG_CPU_BMIPS5000 */
 
-#define    M_TC_EN_ALL                 M_PERFCTL_MT_EN(0)
-#define    M_TC_EN_VPE                 M_PERFCTL_MT_EN(1)
-#define    M_TC_EN_TC                  M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid)           ((tcid)   << 22)
-#define M_PERFCTL_WIDE                 (1      << 30)
-#define M_PERFCTL_MORE                 (1      << 31)
-#define M_PERFCTL_TC                   (1      << 30)
+#define           M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
+#define           M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
+#define           M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid)           ((tcid)   << 22)
+#define M_PERFCTL_WIDE                 (1      << 30)
+#define M_PERFCTL_MORE                 (1      << 31)
+#define M_PERFCTL_TC                   (1      << 30)
 
 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL |                \
                                        M_PERFCTL_KERNEL |              \
@@ -827,7 +827,7 @@ static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
        [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
-       [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL  },
+       [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL  },
        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
        [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
        [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
@@ -1371,7 +1371,7 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
         (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||           \
         (r) == 176 || ((b) >= 50 && (b) <= 55) ||                      \
         ((b) >= 64 && (b) <= 67))
-#define IS_RANGE_V_34K_EVENT(r)        ((r) == 47)
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
 #endif
 
 /* 74K */
index 07dff54..135c4aa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Copyright (C) 1995, 1996, 2001  Ralf Baechle
  *  Copyright (C) 2001, 2004  MIPS Technologies, Inc.
- *  Copyright (C) 2004  Maciej W. Rozycki
+ *  Copyright (C) 2004 Maciej W. Rozycki
  */
 #include <linux/delay.h>
 #include <linux/kernel.h>
@@ -64,6 +64,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                                cpu_data[n].watch_reg_masks[i]);
                seq_printf(m, "]\n");
        }
+       if (cpu_has_mips_r) {
+               seq_printf(m, "isa\t\t\t:");
+               if (cpu_has_mips_1)
+                       seq_printf(m, "%s", "mips1");
+               if (cpu_has_mips_2)
+                       seq_printf(m, "%s", " mips2");
+               if (cpu_has_mips_3)
+                       seq_printf(m, "%s", " mips3");
+               if (cpu_has_mips_4)
+                       seq_printf(m, "%s", " mips4");
+               if (cpu_has_mips_5)
+                       seq_printf(m, "%s", " mips5");
+               if (cpu_has_mips32r1)
+                       seq_printf(m, "%s", " mips32r1");
+               if (cpu_has_mips32r2)
+                       seq_printf(m, "%s", " mips32r2");
+               if (cpu_has_mips64r1)
+                       seq_printf(m, "%s", " mips64r1");
+               if (cpu_has_mips64r2)
+                       seq_printf(m, "%s", " mips64r2");
+               seq_printf(m, "\n");
+       }
 
        seq_printf(m, "ASEs implemented\t:");
        if (cpu_has_mips16)     seq_printf(m, "%s", " mips16");
@@ -73,6 +95,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_dsp)        seq_printf(m, "%s", " dsp");
        if (cpu_has_dsp2)       seq_printf(m, "%s", " dsp2");
        if (cpu_has_mipsmt)     seq_printf(m, "%s", " mt");
+       if (cpu_has_mmips)      seq_printf(m, "%s", " micromips");
+       if (cpu_has_vz)         seq_printf(m, "%s", " vz");
        seq_printf(m, "\n");
 
        seq_printf(m, "shadow register sets\t: %d\n",
index a33d2ef..3be4405 100644 (file)
@@ -154,8 +154,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                return 0;
        }
        *childregs = *regs;
-       childregs->regs[7] = 0; /* Clear error flag */
-       childregs->regs[2] = 0; /* Child gets zero as return value */
+       childregs->regs[7] = 0; /* Clear error flag */
+       childregs->regs[2] = 0; /* Child gets zero as return value */
        if (usp)
                childregs->regs[29] = usp;
        ti->addr_limit = USER_DS;
index 4812c6d..9c6299c 100644 (file)
@@ -50,7 +50,7 @@ void ptrace_disable(struct task_struct *child)
 }
 
 /*
- * Read a general register set.  We always use the 64-bit format, even
+ * Read a general register set.         We always use the 64-bit format, even
  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  * Registers are sign extended to fill the available space.
  */
@@ -326,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request,
                case FPC_CSR:
                        tmp = child->thread.fpu.fcr31;
                        break;
-               case FPC_EIR: { /* implementation / version register */
+               case FPC_EIR: { /* implementation / version register */
                        unsigned int flags;
 #ifdef CONFIG_MIPS_MT_SMTC
                        unsigned long irqflags;
@@ -520,10 +520,10 @@ static inline int audit_arch(void)
 {
        int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-       arch |=  __AUDIT_ARCH_64BIT;
+       arch |=  __AUDIT_ARCH_64BIT;
 #endif
 #if defined(__LITTLE_ENDIAN)
-       arch |=  __AUDIT_ARCH_LE;
+       arch |=  __AUDIT_ARCH_LE;
 #endif
        return arch;
 }
@@ -546,7 +546,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
        /* The 0x80 provides a way for the tracing parent to distinguish
           between a syscall stop and SIGTRAP delivery */
        ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
+                                0x80 : 0));
 
        /*
         * this isn't the same as continuing with a signal, but it will do
@@ -581,7 +581,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
        /* The 0x80 provides a way for the tracing parent to distinguish
           between a syscall stop and SIGTRAP delivery */
        ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
+                                0x80 : 0));
 
        /*
         * this isn't the same as continuing with a signal, but it will do
index a3b0178..9486055 100644 (file)
@@ -124,7 +124,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                case FPC_CSR:
                        tmp = child->thread.fpu.fcr31;
                        break;
-               case FPC_EIR: { /* implementation / version register */
+               case FPC_EIR: { /* implementation / version register */
                        unsigned int flags;
 #ifdef CONFIG_MIPS_MT_SMTC
                        unsigned int irqflags;
index 61c8a0f..f31063d 100644 (file)
 LEAF(_save_fp_context)
        li      v0, 0                                   # assume success
        cfc1    t1,fcr31
-       EX(swc1 $f0,(SC_FPREGS+0)(a0))
-       EX(swc1 $f1,(SC_FPREGS+8)(a0))
-       EX(swc1 $f2,(SC_FPREGS+16)(a0))
-       EX(swc1 $f3,(SC_FPREGS+24)(a0))
-       EX(swc1 $f4,(SC_FPREGS+32)(a0))
-       EX(swc1 $f5,(SC_FPREGS+40)(a0))
-       EX(swc1 $f6,(SC_FPREGS+48)(a0))
-       EX(swc1 $f7,(SC_FPREGS+56)(a0))
-       EX(swc1 $f8,(SC_FPREGS+64)(a0))
-       EX(swc1 $f9,(SC_FPREGS+72)(a0))
-       EX(swc1 $f10,(SC_FPREGS+80)(a0))
-       EX(swc1 $f11,(SC_FPREGS+88)(a0))
-       EX(swc1 $f12,(SC_FPREGS+96)(a0))
-       EX(swc1 $f13,(SC_FPREGS+104)(a0))
-       EX(swc1 $f14,(SC_FPREGS+112)(a0))
-       EX(swc1 $f15,(SC_FPREGS+120)(a0))
-       EX(swc1 $f16,(SC_FPREGS+128)(a0))
-       EX(swc1 $f17,(SC_FPREGS+136)(a0))
-       EX(swc1 $f18,(SC_FPREGS+144)(a0))
-       EX(swc1 $f19,(SC_FPREGS+152)(a0))
-       EX(swc1 $f20,(SC_FPREGS+160)(a0))
-       EX(swc1 $f21,(SC_FPREGS+168)(a0))
-       EX(swc1 $f22,(SC_FPREGS+176)(a0))
-       EX(swc1 $f23,(SC_FPREGS+184)(a0))
-       EX(swc1 $f24,(SC_FPREGS+192)(a0))
-       EX(swc1 $f25,(SC_FPREGS+200)(a0))
-       EX(swc1 $f26,(SC_FPREGS+208)(a0))
-       EX(swc1 $f27,(SC_FPREGS+216)(a0))
-       EX(swc1 $f28,(SC_FPREGS+224)(a0))
-       EX(swc1 $f29,(SC_FPREGS+232)(a0))
-       EX(swc1 $f30,(SC_FPREGS+240)(a0))
-       EX(swc1 $f31,(SC_FPREGS+248)(a0))
+       EX(swc1 $f0,(SC_FPREGS+0)(a0))
+       EX(swc1 $f1,(SC_FPREGS+8)(a0))
+       EX(swc1 $f2,(SC_FPREGS+16)(a0))
+       EX(swc1 $f3,(SC_FPREGS+24)(a0))
+       EX(swc1 $f4,(SC_FPREGS+32)(a0))
+       EX(swc1 $f5,(SC_FPREGS+40)(a0))
+       EX(swc1 $f6,(SC_FPREGS+48)(a0))
+       EX(swc1 $f7,(SC_FPREGS+56)(a0))
+       EX(swc1 $f8,(SC_FPREGS+64)(a0))
+       EX(swc1 $f9,(SC_FPREGS+72)(a0))
+       EX(swc1 $f10,(SC_FPREGS+80)(a0))
+       EX(swc1 $f11,(SC_FPREGS+88)(a0))
+       EX(swc1 $f12,(SC_FPREGS+96)(a0))
+       EX(swc1 $f13,(SC_FPREGS+104)(a0))
+       EX(swc1 $f14,(SC_FPREGS+112)(a0))
+       EX(swc1 $f15,(SC_FPREGS+120)(a0))
+       EX(swc1 $f16,(SC_FPREGS+128)(a0))
+       EX(swc1 $f17,(SC_FPREGS+136)(a0))
+       EX(swc1 $f18,(SC_FPREGS+144)(a0))
+       EX(swc1 $f19,(SC_FPREGS+152)(a0))
+       EX(swc1 $f20,(SC_FPREGS+160)(a0))
+       EX(swc1 $f21,(SC_FPREGS+168)(a0))
+       EX(swc1 $f22,(SC_FPREGS+176)(a0))
+       EX(swc1 $f23,(SC_FPREGS+184)(a0))
+       EX(swc1 $f24,(SC_FPREGS+192)(a0))
+       EX(swc1 $f25,(SC_FPREGS+200)(a0))
+       EX(swc1 $f26,(SC_FPREGS+208)(a0))
+       EX(swc1 $f27,(SC_FPREGS+216)(a0))
+       EX(swc1 $f28,(SC_FPREGS+224)(a0))
+       EX(swc1 $f29,(SC_FPREGS+232)(a0))
+       EX(swc1 $f30,(SC_FPREGS+240)(a0))
+       EX(swc1 $f31,(SC_FPREGS+248)(a0))
        EX(sw   t1,(SC_FPC_CSR)(a0))
        cfc1    t0,$0                           # implementation/version
        jr      ra
@@ -82,38 +82,38 @@ LEAF(_save_fp_context)
 LEAF(_restore_fp_context)
        li      v0, 0                                   # assume success
        EX(lw t0,(SC_FPC_CSR)(a0))
-       EX(lwc1 $f0,(SC_FPREGS+0)(a0))
-       EX(lwc1 $f1,(SC_FPREGS+8)(a0))
-       EX(lwc1 $f2,(SC_FPREGS+16)(a0))
-       EX(lwc1 $f3,(SC_FPREGS+24)(a0))
-       EX(lwc1 $f4,(SC_FPREGS+32)(a0))
-       EX(lwc1 $f5,(SC_FPREGS+40)(a0))
-       EX(lwc1 $f6,(SC_FPREGS+48)(a0))
-       EX(lwc1 $f7,(SC_FPREGS+56)(a0))
-       EX(lwc1 $f8,(SC_FPREGS+64)(a0))
-       EX(lwc1 $f9,(SC_FPREGS+72)(a0))
-       EX(lwc1 $f10,(SC_FPREGS+80)(a0))
-       EX(lwc1 $f11,(SC_FPREGS+88)(a0))
-       EX(lwc1 $f12,(SC_FPREGS+96)(a0))
-       EX(lwc1 $f13,(SC_FPREGS+104)(a0))
-       EX(lwc1 $f14,(SC_FPREGS+112)(a0))
-       EX(lwc1 $f15,(SC_FPREGS+120)(a0))
-       EX(lwc1 $f16,(SC_FPREGS+128)(a0))
-       EX(lwc1 $f17,(SC_FPREGS+136)(a0))
-       EX(lwc1 $f18,(SC_FPREGS+144)(a0))
-       EX(lwc1 $f19,(SC_FPREGS+152)(a0))
-       EX(lwc1 $f20,(SC_FPREGS+160)(a0))
-       EX(lwc1 $f21,(SC_FPREGS+168)(a0))
-       EX(lwc1 $f22,(SC_FPREGS+176)(a0))
-       EX(lwc1 $f23,(SC_FPREGS+184)(a0))
-       EX(lwc1 $f24,(SC_FPREGS+192)(a0))
-       EX(lwc1 $f25,(SC_FPREGS+200)(a0))
-       EX(lwc1 $f26,(SC_FPREGS+208)(a0))
-       EX(lwc1 $f27,(SC_FPREGS+216)(a0))
-       EX(lwc1 $f28,(SC_FPREGS+224)(a0))
-       EX(lwc1 $f29,(SC_FPREGS+232)(a0))
-       EX(lwc1 $f30,(SC_FPREGS+240)(a0))
-       EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+       EX(lwc1 $f0,(SC_FPREGS+0)(a0))
+       EX(lwc1 $f1,(SC_FPREGS+8)(a0))
+       EX(lwc1 $f2,(SC_FPREGS+16)(a0))
+       EX(lwc1 $f3,(SC_FPREGS+24)(a0))
+       EX(lwc1 $f4,(SC_FPREGS+32)(a0))
+       EX(lwc1 $f5,(SC_FPREGS+40)(a0))
+       EX(lwc1 $f6,(SC_FPREGS+48)(a0))
+       EX(lwc1 $f7,(SC_FPREGS+56)(a0))
+       EX(lwc1 $f8,(SC_FPREGS+64)(a0))
+       EX(lwc1 $f9,(SC_FPREGS+72)(a0))
+       EX(lwc1 $f10,(SC_FPREGS+80)(a0))
+       EX(lwc1 $f11,(SC_FPREGS+88)(a0))
+       EX(lwc1 $f12,(SC_FPREGS+96)(a0))
+       EX(lwc1 $f13,(SC_FPREGS+104)(a0))
+       EX(lwc1 $f14,(SC_FPREGS+112)(a0))
+       EX(lwc1 $f15,(SC_FPREGS+120)(a0))
+       EX(lwc1 $f16,(SC_FPREGS+128)(a0))
+       EX(lwc1 $f17,(SC_FPREGS+136)(a0))
+       EX(lwc1 $f18,(SC_FPREGS+144)(a0))
+       EX(lwc1 $f19,(SC_FPREGS+152)(a0))
+       EX(lwc1 $f20,(SC_FPREGS+160)(a0))
+       EX(lwc1 $f21,(SC_FPREGS+168)(a0))
+       EX(lwc1 $f22,(SC_FPREGS+176)(a0))
+       EX(lwc1 $f23,(SC_FPREGS+184)(a0))
+       EX(lwc1 $f24,(SC_FPREGS+192)(a0))
+       EX(lwc1 $f25,(SC_FPREGS+200)(a0))
+       EX(lwc1 $f26,(SC_FPREGS+208)(a0))
+       EX(lwc1 $f27,(SC_FPREGS+216)(a0))
+       EX(lwc1 $f28,(SC_FPREGS+224)(a0))
+       EX(lwc1 $f29,(SC_FPREGS+232)(a0))
+       EX(lwc1 $f30,(SC_FPREGS+240)(a0))
+       EX(lwc1 $f31,(SC_FPREGS+248)(a0))
        jr      ra
         ctc1   t0,fcr31
        END(_restore_fp_context)
index 8d32d5a..5266c6e 100644 (file)
@@ -42,7 +42,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti, int usedfpu)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
index 8decdfa..5e51219 100644 (file)
@@ -40,7 +40,7 @@
 
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                     struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti, int usedfpu)
  */
        .align  5
        LEAF(resume)
@@ -53,7 +53,7 @@
         * check if we need to save FPU registers
         */
 
-       beqz    a3, 1f
+       beqz    a3, 1f
 
        PTR_L   t3, TASK_THREAD_INFO(a0)
        /*
index 804ebb2..43d2d78 100644 (file)
@@ -33,7 +33,7 @@ process_entry:
        b               process_entry
 
 1:
-       /* indirection page, update s0  */
+       /* indirection page, update s0  */
        and             s3, s2, 0x2
        beq             s3, zero, 1f
        and             s0, s2, ~0x2
@@ -69,7 +69,7 @@ done:
           of kexec_flag.  */
 
        bal             1f
- 1:    move            t1,ra;
+ 1:    move            t1,ra;
        PTR_LA          t2,1b
        PTR_LA          t0,kexec_flag
        PTR_SUB         t0,t0,t2;
@@ -158,10 +158,10 @@ arg3:     PTR             0x0
  */
 secondary_kexec_args:
        EXPORT(secondary_kexec_args)
-s_arg0:        PTR             0x0
-s_arg1:        PTR             0x0
-s_arg2:        PTR             0x0
-s_arg3:        PTR             0x0
+s_arg0: PTR            0x0
+s_arg1: PTR            0x0
+s_arg2: PTR            0x0
+s_arg3: PTR            0x0
        .size   secondary_kexec_args,PTRSIZE*4
 kexec_flag:
        LONG            0x1
index b8c18dc..93c070b 100644 (file)
@@ -252,12 +252,12 @@ int rtlx_release(int index)
 
 unsigned int rtlx_read_poll(int index, int can_sleep)
 {
-       struct rtlx_channel *chan;
+       struct rtlx_channel *chan;
 
-       if (rtlx == NULL)
-               return 0;
+       if (rtlx == NULL)
+               return 0;
 
-       chan = &rtlx->channel[index];
+       chan = &rtlx->channel[index];
 
        /* data available to read? */
        if (chan->lx_read == chan->lx_write) {
@@ -399,11 +399,9 @@ static int file_release(struct inode *inode, struct file *filp)
 
 static unsigned int file_poll(struct file *file, poll_table * wait)
 {
-       int minor;
+       int minor = iminor(file_inode(file));
        unsigned int mask = 0;
 
-       minor = iminor(file->f_path.dentry->d_inode);
-
        poll_wait(file, &channel_wqs[minor].rt_queue, wait);
        poll_wait(file, &channel_wqs[minor].lx_queue, wait);
 
@@ -424,7 +422,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
 static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
                         loff_t * ppos)
 {
-       int minor = iminor(file->f_path.dentry->d_inode);
+       int minor = iminor(file_inode(file));
 
        /* data available? */
        if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
@@ -437,11 +435,8 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
 static ssize_t file_write(struct file *file, const char __user * buffer,
                          size_t count, loff_t * ppos)
 {
-       int minor;
-       struct rtlx_channel *rt;
-
-       minor = iminor(file->f_path.dentry->d_inode);
-       rt = &rtlx->channel[minor];
+       int minor = iminor(file_inode(file));
+       struct rtlx_channel *rt = &rtlx->channel[minor];
 
        /* any space left... */
        if (!rtlx_write_poll(minor)) {
@@ -451,8 +446,8 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
                        return -EAGAIN;
 
                __wait_event_interruptible(channel_wqs[minor].rt_queue,
-                                          rtlx_write_poll(minor),
-                                          ret);
+                                          rtlx_write_poll(minor),
+                                          ret);
                if (ret)
                        return ret;
        }
@@ -462,11 +457,11 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
 
 static const struct file_operations rtlx_fops = {
        .owner =   THIS_MODULE,
-       .open =    file_open,
+       .open =    file_open,
        .release = file_release,
        .write =   file_write,
-       .read =    file_read,
-       .poll =    file_poll,
+       .read =    file_read,
+       .poll =    file_poll,
        .llseek =  noop_llseek,
 };
 
index 80ff942..9ea2964 100644 (file)
@@ -24,7 +24,7 @@
 /* Highest syscall used of any syscall flavour */
 #define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
 
-       .align  5
+       .align  5
 NESTED(handle_sys, PT_SIZE, sp)
        .set    noat
        SAVE_SOME
@@ -54,7 +54,7 @@ stack_done:
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
        li      t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
        and     t0, t1
-       bnez    t0, syscall_trace_entry # -> yes
+       bnez    t0, syscall_trace_entry # -> yes
 
        jalr    t2                      # Do The Real Thing (TM)
 
@@ -126,8 +126,8 @@ stackargs:
        la      t1, 5f                  # load up to 3 arguments
        subu    t1, t3
 1:     lw      t5, 16(t0)              # argument #5 from usp
-       .set    push
-       .set    noreorder
+       .set    push
+       .set    noreorder
        .set    nomacro
        jr      t1
         addiu  t1, 6f - 5f
@@ -205,7 +205,7 @@ illegal_syscall:
        jr      t2
        /* Unreached */
 
-einval:        li      v0, -ENOSYS
+einval: li     v0, -ENOSYS
        jr      ra
        END(sys_syscall)
 
@@ -354,7 +354,7 @@ einval:     li      v0, -ENOSYS
        sys     sys_ni_syscall          0       /* was create_module */
        sys     sys_init_module         5
        sys     sys_delete_module       1
-       sys     sys_ni_syscall          0       /* 4130 was get_kernel_syms */
+       sys     sys_ni_syscall          0       /* 4130 was get_kernel_syms */
        sys     sys_quotactl            4
        sys     sys_getpgid             1
        sys     sys_fchdir              1
@@ -589,7 +589,7 @@ einval:     li      v0, -ENOSYS
        /* We pre-compute the number of _instruction_ bytes needed to
           load or store the arguments 6-8. Negative values are ignored. */
 
-       .macro  sys function, nargs
+       .macro  sys function, nargs
        PTR     \function
        LONG    (\nargs << 2) - (5 << 2)
        .endm
index 9444ad9..36cfd40 100644 (file)
@@ -25,7 +25,7 @@
 #define handle_sys64 handle_sys
 #endif
 
-       .align  5
+       .align  5
 NESTED(handle_sys64, PT_SIZE, sp)
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
        /*
@@ -40,7 +40,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
 #endif
 
        dsubu   t0, v0, __NR_64_Linux   # check syscall number
-       sltiu   t0, t0, __NR_64_Linux_syscalls + 1
+       sltiu   t0, t0, __NR_64_Linux_syscalls + 1
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
        ld      t1, PT_EPC(sp)          # skip syscall on return
        daddiu  t1, 4                   # skip to next instruction
@@ -290,7 +290,7 @@ sys_call_table:
        PTR     sys_quotactl
        PTR     sys_ni_syscall                  /* was nfsservctl */
        PTR     sys_ni_syscall                  /* res. for getpmsg */
-       PTR     sys_ni_syscall                  /* 5175  for putpmsg */
+       PTR     sys_ni_syscall                  /* 5175  for putpmsg */
        PTR     sys_ni_syscall                  /* res. for afs_syscall */
        PTR     sys_ni_syscall                  /* res. for security */
        PTR     sys_gettid
index 3b18a8e..693d60b 100644 (file)
@@ -22,7 +22,7 @@
 #define handle_sysn32 handle_sys
 #endif
 
-       .align  5
+       .align  5
 NESTED(handle_sysn32, PT_SIZE, sp)
 #ifndef CONFIG_MIPS32_O32
        .set    noat
@@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
 #endif
 
        dsubu   t0, v0, __NR_N32_Linux  # check syscall number
-       sltiu   t0, t0, __NR_N32_Linux_syscalls + 1
+       sltiu   t0, t0, __NR_N32_Linux_syscalls + 1
 
 #ifndef CONFIG_MIPS32_O32
        ld      t1, PT_EPC(sp)          # skip syscall on return
@@ -279,7 +279,7 @@ EXPORT(sysn32_call_table)
        PTR     sys_quotactl
        PTR     sys_ni_syscall                  /* was nfsservctl */
        PTR     sys_ni_syscall                  /* res. for getpmsg */
-       PTR     sys_ni_syscall                  /* 6175  for putpmsg */
+       PTR     sys_ni_syscall                  /* 6175  for putpmsg */
        PTR     sys_ni_syscall                  /* res. for afs_syscall */
        PTR     sys_ni_syscall                  /* res. for security */
        PTR     sys_gettid
@@ -402,8 +402,8 @@ EXPORT(sysn32_call_table)
        PTR     compat_sys_rt_tgsigqueueinfo    /* 6295 */
        PTR     sys_perf_event_open
        PTR     sys_accept4
-       PTR     compat_sys_recvmmsg
-       PTR     sys_getdents64
+       PTR     compat_sys_recvmmsg
+       PTR     sys_getdents64
        PTR     sys_fanotify_init               /* 6300 */
        PTR     sys_fanotify_mark
        PTR     sys_prlimit64
index 063cd0d..af8887f 100644 (file)
@@ -10,7 +10,7 @@
  *
  * Hairy, the userspace application uses a different argument passing
  * convention than the kernel, so we have to translate things from o32
- * to ABI64 calling convention.  64-bit syscalls are also processed
+ * to ABI64 calling convention.         64-bit syscalls are also processed
  * here for now.
  */
 #include <linux/errno.h>
@@ -24,7 +24,7 @@
 #include <asm/unistd.h>
 #include <asm/sysmips.h>
 
-       .align  5
+       .align  5
 NESTED(handle_sys, PT_SIZE, sp)
        .set    noat
        SAVE_SOME
@@ -185,7 +185,7 @@ LEAF(sys32_syscall)
        jr      t2
        /* Unreached */
 
-einval:        li      v0, -ENOSYS
+einval: li     v0, -ENOSYS
        jr      ra
        END(sys32_syscall)
 
@@ -284,8 +284,8 @@ sys_call_table:
        PTR     compat_sys_old_readdir
        PTR     sys_mips_mmap                   /* 4090 */
        PTR     sys_munmap
-       PTR     sys_truncate
-       PTR     sys_ftruncate
+       PTR     compat_sys_truncate
+       PTR     compat_sys_ftruncate
        PTR     sys_fchmod
        PTR     sys_fchown                      /* 4095 */
        PTR     sys_getpriority
@@ -329,7 +329,7 @@ sys_call_table:
        PTR     sys_bdflush
        PTR     sys_sysfs                       /* 4135 */
        PTR     sys_32_personality
-       PTR     sys_ni_syscall                  /* for afs_syscall */
+       PTR     sys_ni_syscall                  /* for afs_syscall */
        PTR     sys_setfsuid
        PTR     sys_setfsgid
        PTR     sys_32_llseek                   /* 4140 */
@@ -352,12 +352,12 @@ sys_call_table:
        PTR     sys_munlockall
        PTR     sys_sched_setparam
        PTR     sys_sched_getparam
-       PTR     sys_sched_setscheduler          /* 4160 */
+       PTR     sys_sched_setscheduler          /* 4160 */
        PTR     sys_sched_getscheduler
        PTR     sys_sched_yield
        PTR     sys_sched_get_priority_max
        PTR     sys_sched_get_priority_min
-       PTR     compat_sys_sched_rr_get_interval        /* 4165 */
+       PTR     compat_sys_sched_rr_get_interval        /* 4165 */
        PTR     compat_sys_nanosleep
        PTR     sys_mremap
        PTR     sys_accept
@@ -387,7 +387,7 @@ sys_call_table:
        PTR     sys_prctl
        PTR     sys32_rt_sigreturn
        PTR     compat_sys_rt_sigaction
-       PTR     compat_sys_rt_sigprocmask       /* 4195 */
+       PTR     compat_sys_rt_sigprocmask       /* 4195 */
        PTR     compat_sys_rt_sigpending
        PTR     compat_sys_rt_sigtimedwait
        PTR     compat_sys_rt_sigqueueinfo
index 8c41187..4c774d5 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  * Copyright (C) 1996 Stoned Elipot
  * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2002, 2007         Maciej W. Rozycki
  */
 #include <linux/init.h>
 #include <linux/ioport.h>
@@ -449,7 +449,7 @@ static void __init bootmem_init(void)
  * At this stage the bootmem allocator is ready to use.
  *
  * NOTE: historically plat_mem_setup did the entire platform initialization.
- *       This was rather impractical because it meant plat_mem_setup had to
+ *      This was rather impractical because it meant plat_mem_setup had to
  * get away without any kind of memory allocator.  To keep old code from
  * breaking plat_setup was just renamed to plat_setup and a second platform
  * initialization hook for anything else was introduced.
@@ -469,7 +469,7 @@ static int __init early_parse_mem(char *p)
        if (usermem == 0) {
                boot_mem_map.nr_map = 0;
                usermem = 1;
-       }
+       }
        start = 0;
        size = memparse(p, &p);
        if (*p == '@')
@@ -480,34 +480,75 @@ static int __init early_parse_mem(char *p)
 }
 early_param("mem", early_parse_mem);
 
-static void __init arch_mem_init(char **cmdline_p)
+#ifdef CONFIG_PROC_VMCORE
+unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
+static int __init early_parse_elfcorehdr(char *p)
+{
+       int i;
+
+       setup_elfcorehdr = memparse(p, &p);
+
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               unsigned long start = boot_mem_map.map[i].addr;
+               unsigned long end = (boot_mem_map.map[i].addr +
+                                    boot_mem_map.map[i].size);
+               if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
+                       /*
+                        * Reserve from the elf core header to the end of
+                        * the memory segment, that should all be kdump
+                        * reserved memory.
+                        */
+                       setup_elfcorehdr_size = end - setup_elfcorehdr;
+                       break;
+               }
+       }
+       /*
+        * If we don't find it in the memory map, then we shouldn't
+        * have to worry about it, as the new kernel won't use it.
+        */
+       return 0;
+}
+early_param("elfcorehdr", early_parse_elfcorehdr);
+#endif
+
+static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
 {
-       phys_t init_mem, init_end, init_size;
+       phys_t size;
+       int i;
+
+       size = end - mem;
+       if (!size)
+               return;
+
+       /* Make sure it is in the boot_mem_map */
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               if (mem >= boot_mem_map.map[i].addr &&
+                   mem < (boot_mem_map.map[i].addr +
+                          boot_mem_map.map[i].size))
+                       return;
+       }
+       add_memory_region(mem, size, type);
+}
 
+static void __init arch_mem_init(char **cmdline_p)
+{
        extern void plat_mem_setup(void);
 
        /* call board setup routine */
        plat_mem_setup();
 
-       init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT;
-       init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT;
-       init_size = init_end - init_mem;
-       if (init_size) {
-               /* Make sure it is in the boot_mem_map */
-               int i, found;
-               found = 0;
-               for (i = 0; i < boot_mem_map.nr_map; i++) {
-                       if (init_mem >= boot_mem_map.map[i].addr &&
-                           init_mem < (boot_mem_map.map[i].addr +
-                                       boot_mem_map.map[i].size)) {
-                               found = 1;
-                               break;
-                       }
-               }
-               if (!found)
-                       add_memory_region(init_mem, init_size,
-                                         BOOT_MEM_INIT_RAM);
-       }
+       /*
+        * Make sure all kernel memory is in the maps.  The "UP" and
+        * "DOWN" are opposite for initdata since if it crosses over
+        * into another memory section you don't want that to be
+        * freed when the initdata is freed.
+        */
+       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+                        BOOT_MEM_RAM);
+       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+                        BOOT_MEM_INIT_RAM);
 
        pr_info("Determined physical RAM map:\n");
        print_memory_map();
@@ -537,6 +578,14 @@ static void __init arch_mem_init(char **cmdline_p)
        }
 
        bootmem_init();
+#ifdef CONFIG_PROC_VMCORE
+       if (setup_elfcorehdr && setup_elfcorehdr_size) {
+               printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
+                      setup_elfcorehdr, setup_elfcorehdr_size);
+               reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
+                               BOOTMEM_DEFAULT);
+       }
+#endif
 #ifdef CONFIG_KEXEC
        if (crashk_res.start != crashk_res.end)
                reserve_bootmem(crashk_res.start,
@@ -571,7 +620,7 @@ static void __init mips_parse_crashkernel(void)
                return;
 
        crashk_res.start = crash_base;
-       crashk_res.end   = crash_base + crash_size - 1;
+       crashk_res.end   = crash_base + crash_size - 1;
 }
 
 static void __init request_crashkernel(struct resource *res)
@@ -585,7 +634,7 @@ static void __init request_crashkernel(struct resource *res)
                                crashk_res.start + 1) >> 20),
                        (unsigned long)(crashk_res.start  >> 20));
 }
-#else /* !defined(CONFIG_KEXEC)  */
+#else /* !defined(CONFIG_KEXEC)         */
 static void __init mips_parse_crashkernel(void)
 {
 }
index 95b019d..b5e88fd 100644 (file)
@@ -412,7 +412,7 @@ give_sigsegv:
 #endif
 
 static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
-                         struct pt_regs *regs, int signr, sigset_t *set,
+                         struct pt_regs *regs, int signr, sigset_t *set,
                          siginfo_t *info)
 {
        struct rt_sigframe __user *frame;
@@ -425,7 +425,7 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
        /* Create siginfo.  */
        err |= copy_siginfo_to_user(&frame->rs_info, info);
 
-       /* Create the ucontext.  */
+       /* Create the ucontext.  */
        err |= __put_user(0, &frame->rs_uc.uc_flags);
        err |= __put_user(NULL, &frame->rs_uc.uc_link);
        err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -468,7 +468,7 @@ struct mips_abi mips_abi = {
        .setup_frame    = setup_frame,
        .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
 #endif
-       .setup_rt_frame = setup_rt_frame,
+       .setup_rt_frame = setup_rt_frame,
        .rt_signal_return_offset =
                offsetof(struct mips_vdso, rt_signal_trampoline),
        .restart        = __NR_restart_syscall
@@ -500,7 +500,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
                        regs->cp0_epc -= 4;
                }
 
-               regs->regs[0] = 0;              /* Don't deal with this again.  */
+               regs->regs[0] = 0;              /* Don't deal with this again.  */
        }
 
        if (sig_uses_siginfo(ka))
@@ -524,7 +524,7 @@ static void do_signal(struct pt_regs *regs)
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
-               /* Whee!  Actually deliver the signal.  */
+               /* Whee!  Actually deliver the signal.  */
                handle_signal(signr, &info, &ka, regs);
                return;
        }
@@ -545,7 +545,7 @@ static void do_signal(struct pt_regs *regs)
                        regs->cp0_epc -= 4;
                        break;
                }
-               regs->regs[0] = 0;      /* Don't deal with this again.  */
+               regs->regs[0] = 0;      /* Don't deal with this again.  */
        }
 
        /*
index ad7c2be..57de8b7 100644 (file)
@@ -48,7 +48,7 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
-#define __NR_O32_restart_syscall        4253
+#define __NR_O32_restart_syscall       4253
 
 /* 32-bit compatibility types */
 
@@ -56,11 +56,11 @@ typedef unsigned int __sighandler32_t;
 typedef void (*vfptr_t)(void);
 
 struct ucontext32 {
-       u32                 uc_flags;
-       s32                 uc_link;
+       u32                 uc_flags;
+       s32                 uc_link;
        compat_stack_t      uc_stack;
        struct sigcontext32 uc_mcontext;
-       compat_sigset_t     uc_sigmask;   /* mask last for extensibility */
+       compat_sigset_t     uc_sigmask;   /* mask last for extensibility */
 };
 
 struct sigframe32 {
@@ -302,7 +302,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
                        return -EFAULT;
                err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
                err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
-                                 &oact->sa_handler);
+                                 &oact->sa_handler);
                err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
                err |= __put_user(0, &oact->sa_mask.sig[1]);
                err |= __put_user(0, &oact->sa_mask.sig[2]);
@@ -507,7 +507,7 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
        /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
        err |= copy_siginfo_to_user32(&frame->rs_info, info);
 
-       /* Create the ucontext.  */
+       /* Create the ucontext.  */
        err |= __put_user(0, &frame->rs_uc.uc_flags);
        err |= __put_user(0, &frame->rs_uc.uc_link);
        err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -552,7 +552,7 @@ struct mips_abi mips_abi_32 = {
        .setup_frame    = setup_frame_32,
        .signal_return_offset =
                offsetof(struct mips_vdso, o32_signal_trampoline),
-       .setup_rt_frame = setup_rt_frame_32,
+       .setup_rt_frame = setup_rt_frame_32,
        .rt_signal_return_offset =
                offsetof(struct mips_vdso, o32_rt_signal_trampoline),
        .restart        = __NR_O32_restart_syscall
index 5f4ef2a..b2241bb 100644 (file)
@@ -51,11 +51,11 @@ extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
 extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
 
 struct ucontextn32 {
-       u32                 uc_flags;
-       s32                 uc_link;
+       u32                 uc_flags;
+       s32                 uc_link;
        compat_stack_t      uc_stack;
        struct sigcontext   uc_mcontext;
-       compat_sigset_t     uc_sigmask;   /* mask last for extensibility */
+       compat_sigset_t     uc_sigmask;   /* mask last for extensibility */
 };
 
 struct rt_sigframe_n32 {
@@ -115,7 +115,7 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
        /* Create siginfo.  */
        err |= copy_siginfo_to_user32(&frame->rs_info, info);
 
-       /* Create the ucontext.  */
+       /* Create the ucontext.  */
        err |= __put_user(0, &frame->rs_uc.uc_flags);
        err |= __put_user(0, &frame->rs_uc.uc_link);
        err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -154,7 +154,7 @@ give_sigsegv:
 }
 
 struct mips_abi mips_abi_n32 = {
-       .setup_rt_frame = setup_rt_frame_n32,
+       .setup_rt_frame = setup_rt_frame_n32,
        .rt_signal_return_offset =
                offsetof(struct mips_vdso, n32_rt_signal_trampoline),
        .restart        = __NR_N32_restart_syscall
index 06cd0c6..c2e5d74 100644 (file)
@@ -172,7 +172,7 @@ void __init cmp_smp_setup(void)
                if (amon_cpu_avail(i)) {
                        set_cpu_possible(i, true);
                        __cpu_number_map[i]     = ++ncpu;
-                       __cpu_logical_map[ncpu] = i;
+                       __cpu_logical_map[ncpu] = i;
                }
        }
 
index 2defa2b..bfede06 100644 (file)
@@ -71,7 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
                /* Record this as available CPU */
                set_cpu_possible(tc, true);
                __cpu_number_map[tc]    = ++ncpu;
-               __cpu_logical_map[ncpu] = tc;
+               __cpu_logical_map[ncpu] = tc;
        }
 
        /* Disable multi-threading with TC's */
@@ -215,7 +215,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
        write_tc_gpr_gp((unsigned long)gp);
 
        flush_icache_range((unsigned long)gp,
-                          (unsigned long)(gp + sizeof(struct thread_info)));
+                          (unsigned long)(gp + sizeof(struct thread_info)));
 
        /* finally out of configuration and into chaos */
        clear_c0_mvpcontrol(MVPCONTROL_VPC);
index 20938a4..76016ac 100644 (file)
@@ -65,7 +65,7 @@ FEXPORT(__smtc_ipi_vector)
 1:
        /*
         * The IPI sender has put some information on the anticipated
-        * kernel stack frame.  If we were in user mode, this will be
+        * kernel stack frame.  If we were in user mode, this will be
         * built above the saved kernel SP.  If we were already in the
         * kernel, it will be built above the current CPU SP.
         *
index 145771c..aee7c81 100644 (file)
@@ -35,7 +35,7 @@ static struct proc_dir_entry *smtc_stats;
 atomic_t smtc_fpu_recoveries;
 
 static int proc_read_smtc(char *page, char **start, off_t off,
-                          int count, int *eof, void *data)
+                         int count, int *eof, void *data)
 {
        int totalen = 0;
        int len;
@@ -68,7 +68,7 @@ static int proc_read_smtc(char *page, char **start, off_t off,
                page += len;
        }
        len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
-                     atomic_read(&smtc_fpu_recoveries));
+                     atomic_read(&smtc_fpu_recoveries));
        totalen += len;
        page += len;
 
@@ -87,5 +87,5 @@ void init_smtc_stats(void)
        atomic_set(&smtc_fpu_recoveries, 0);
 
        smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
-                                           proc_read_smtc, NULL);
+                                           proc_read_smtc, NULL);
 }
index 1d47843..7186222 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/addrspace.h>
 #include <asm/smtc.h>
 #include <asm/smtc_proc.h>
+#include <asm/setup.h>
 
 /*
  * SMTC Kernel needs to manipulate low-level CPU interrupt mask
@@ -235,7 +236,7 @@ static void smtc_configure_tlb(void)
                    mips_ihb();
                    /* No need to un-Halt - that happens later anyway */
                    for (i=0; i < vpes; i++) {
-                       write_tc_c0_tcbind(i);
+                       write_tc_c0_tcbind(i);
                        /*
                         * To be 100% sure we're really getting the right
                         * information, we exit the configuration state
@@ -286,7 +287,7 @@ static void smtc_configure_tlb(void)
 
 /*
  * Incrementally build the CPU map out of constituent MIPS MT cores,
- * using the specified available VPEs and TCs.  Plaform code needs
+ * using the specified available VPEs and TCs. Plaform code needs
  * to ensure that each MIPS MT core invokes this routine on reset,
  * one at a time(!).
  *
@@ -348,7 +349,7 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        {
                /*
                 * FIXME: Multi-core SMTC hasn't been tested and the
-                *        maximum number of VPEs may change.
+                *        maximum number of VPEs may change.
                 */
                cp1contexts[0] = smtc_nconf1[0] - 1;
                cp1contexts[1] = smtc_nconf1[1];
@@ -761,9 +762,9 @@ void smtc_forward_irq(struct irq_data *d)
         * mask has been purged of bits corresponding to nonexistent and
         * offline "CPUs", and to TCs bound to VPEs other than the VPE
         * connected to the physical interrupt input for the interrupt
-        * in question.  Otherwise we have a nasty problem with interrupt
+        * in question.  Otherwise we have a nasty problem with interrupt
         * mask management.  This is best handled in non-performance-critical
-        * platform IRQ affinity setting code,  to minimize interrupt-time
+        * platform IRQ affinity setting code,  to minimize interrupt-time
         * checks.
         */
 
@@ -899,10 +900,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
                mips_ihb();
 
                /*
-                * Inspect TCStatus - if IXMT is set, we have to queue
+                * Inspect TCStatus - if IXMT is set, we have to queue
                 * a message. Otherwise, we set up the "interrupt"
                 * of the other TC
-                */
+                */
                tcstatus = read_tc_c0_tcstatus();
 
                if ((tcstatus & TCSTATUS_IXMT) != 0) {
@@ -964,7 +965,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
         * CU bit of Status is indicator that TC was
         * already running on a kernel stack...
         */
-       if (tcstatus & ST0_CU0)  {
+       if (tcstatus & ST0_CU0)  {
                /* Note that this "- 1" is pointer arithmetic */
                kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
        } else {
@@ -1288,7 +1289,7 @@ void smtc_idle_loop_hook(void)
                        for (tc = 0; tc < hook_ntcs; tc++) {
                                tcnoprog[tc] = 0;
                                clock_hang_reported[tc] = 0;
-                       }
+                       }
                        for (vpe = 0; vpe < 2; vpe++)
                                for (im = 0; im < 8; im++)
                                        imstuckcount[vpe][im] = 0;
@@ -1485,7 +1486,7 @@ static int halt_state_save[NR_CPUS];
 
 /*
  * To really, really be sure that nothing is being done
- * by other TCs, halt them all.  This code assumes that
+ * by other TCs, halt them all.         This code assumes that
  * a DVPE has already been done, so while their Halted
  * state is theoretically architecturally unstable, in
  * practice, it's not going to change while we're looking
index 7f1eca3..1ff43d5 100644 (file)
@@ -25,7 +25,7 @@ static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
 static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
 static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
 
-#define COUNTON        100
+#define COUNTON 100
 #define NR_LOOPS 5
 
 void __cpuinit synchronise_count_master(int cpu)
index b32466a..b79d13f 100644 (file)
@@ -41,9 +41,9 @@
 
 /*
  * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
- * convention.  It returns results in registers $v0 / $v1 which means there
+ * convention. It returns results in registers $v0 / $v1 which means there
  * is no need for it to do verify the validity of a userspace pointer
- * argument.  Historically that used to be expensive in Linux.  These days
+ * argument.  Historically that used to be expensive in Linux. These days
  * the performance advantage is negligible.
  */
 asmlinkage int sysm_pipe(void)
index 99d73b7..9d686bf 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Common time service routines for MIPS machines.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -62,8 +62,8 @@ EXPORT_SYMBOL(perf_irq);
  * time_init() - it does the following things.
  *
  * 1) plat_time_init() -
- *     a) (optional) set up RTC routines,
- *      b) (optional) calibrate and set the mips_hpt_frequency
+ *     a) (optional) set up RTC routines,
+ *     b) (optional) calibrate and set the mips_hpt_frequency
  *         (only needed if you intended to use cpu counter as timer interrupt
  *          source)
  * 2) calculate a couple of cached variables for later usage
@@ -75,7 +75,7 @@ unsigned int mips_hpt_frequency;
  * This function exists in order to cause an error due to a duplicate
  * definition if platform code should have its own implementation.  The hook
  * to use instead is plat_time_init.  plat_time_init does not receive the
- * irqaction pointer argument anymore.  This is because any function which
+ * irqaction pointer argument anymore. This is because any function which
  * initializes an interrupt timer now takes care of its own request_irq rsp.
  * setup_irq calls and each clock_event_device should use its own
  * struct irqrequest.
@@ -93,7 +93,7 @@ static __init int cpu_has_mfc0_count_bug(void)
        case CPU_R4000MC:
                /*
                 * V3.0 is documented as suffering from the mfc0 from count bug.
-                * Afaik this is the last version of the R4000.  Later versions
+                * Afaik this is the last version of the R4000.  Later versions
                 * were marketed as R4400.
                 */
                return 1;
index 9007966..a200b5b 100644 (file)
@@ -164,7 +164,7 @@ static void show_stacktrace(struct task_struct *task,
        i = 0;
        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
                if (i && ((i % (64 / field)) == 0))
-                       printk("\n       ");
+                       printk("\n       ");
                if (i > 39) {
                        printk(" ...");
                        break;
@@ -279,7 +279,7 @@ static void __show_regs(const struct pt_regs *regs)
        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
               (void *) regs->regs[31]);
 
-       printk("Status: %08x    ", (uint32_t) regs->cp0_status);
+       printk("Status: %08x    ", (uint32_t) regs->cp0_status);
 
        if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
                if (regs->cp0_status & ST0_KUO)
@@ -441,7 +441,7 @@ asmlinkage void do_be(struct pt_regs *regs)
        int data = regs->cp0_cause & 4;
        int action = MIPS_BE_FATAL;
 
-       /* XXX For now.  Fixme, this searches the wrong table ...  */
+       /* XXX For now.  Fixme, this searches the wrong table ...  */
        if (data && !user_mode(regs))
                fixup = search_dbe_tables(exception_epc(regs));
 
@@ -518,7 +518,7 @@ static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
        offset >>= 16;
 
        vaddr = (unsigned long __user *)
-               ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+               ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 
        if ((unsigned long)vaddr & 3)
                return SIGBUS;
@@ -558,7 +558,7 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
        offset >>= 16;
 
        vaddr = (unsigned long __user *)
-               ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+               ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
        reg = (opcode & RT) >> 16;
 
        if ((unsigned long)vaddr & 3)
@@ -739,7 +739,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 
                /* Restore the hardware register state */
-               own_fpu(1);     /* Using the FPU again.  */
+               own_fpu(1);     /* Using the FPU again.  */
 
                /* If something went wrong, signal */
                process_fpemu_return(sig, fault_addr);
@@ -966,7 +966,7 @@ int cu2_notifier_call_chain(unsigned long val, void *v)
 }
 
 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
-        void *data)
+       void *data)
 {
        struct pt_regs *regs = data;
 
@@ -974,7 +974,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
        default:
                die_if_kernel("Unhandled kernel unaligned access or invalid "
                              "instruction", regs);
-               /* Fall through  */
+               /* Fall through  */
 
        case CU2_EXCEPTION:
                force_sig(SIGILL, current);
@@ -1029,10 +1029,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                /*
                 * Old (MIPS I and MIPS II) processors will set this code
                 * for COP1X opcode instructions that replaced the original
-                * COP3 space.  We don't limit COP1 space instructions in
+                * COP3 space.  We don't limit COP1 space instructions in
                 * the emulator according to the CPU ISA, so we want to
                 * treat COP1X instructions consistently regardless of which
-                * code the CPU chose.  Therefore we redirect this trap to
+                * code the CPU chose.  Therefore we redirect this trap to
                 * the FP emulator too.
                 *
                 * Then some newer FPU-less processors use this code
@@ -1044,9 +1044,9 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                /* Fall through.  */
 
        case 1:
-               if (used_math())        /* Using the FPU again.  */
+               if (used_math())        /* Using the FPU again.  */
                        own_fpu(1);
-               else {                  /* First time FPU user.  */
+               else {                  /* First time FPU user.  */
                        init_fpu();
                        set_used_math();
                }
@@ -1114,7 +1114,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        show_regs(regs);
 
        if (multi_match) {
-               printk("Index   : %0x\n", read_c0_index());
+               printk("Index   : %0x\n", read_c0_index());
                printk("Pagemask: %0x\n", read_c0_pagemask());
                printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
                printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
@@ -1181,7 +1181,7 @@ asmlinkage void do_dsp(struct pt_regs *regs)
 asmlinkage void do_reserved(struct pt_regs *regs)
 {
        /*
-        * Game over - no way to handle this if it ever occurs.  Most probably
+        * Game over - no way to handle this if it ever occurs.  Most probably
         * caused by a new unknown cpu type or after another deadly
         * hard/software error.
         */
@@ -1705,7 +1705,7 @@ void __init trap_init(void)
 
 #if defined(CONFIG_KGDB)
        if (kgdb_early_setup)
-               return; /* Already done */
+               return; /* Already done */
 #endif
 
        if (cpu_has_veic || cpu_has_vint) {
@@ -1799,7 +1799,7 @@ void __init trap_init(void)
                 * The R6000 is the only R-series CPU that features a machine
                 * check exception (similar to the R4000 cache error) and
                 * unaligned ldc1/sdc1 exception.  The handlers have not been
-                * written yet.  Well, anyway there is no R6000 machine on the
+                * written yet.  Well, anyway there is no R6000 machine on the
                 * current list of targets for Linux/MIPS.
                 * (Duh, crap, there is someone with a triple R6k machine)
                 */
index 9c58bdf..6087a54 100644 (file)
  *
  * For now I enable fixing of address errors by default to make life easier.
  * I however intend to disable this somewhen in the future when the alignment
- * problems with user programs have been fixed.  For programmers this is the
+ * problems with user programs have been fixed.         For programmers this is the
  * right way to go.
  *
  * Fixing address errors is a per process option.  The option is inherited
- * across fork(2) and execve(2) calls.  If you really want to use the
+ * across fork(2) and execve(2) calls. If you really want to use the
  * option in your user programs - I discourage the use of the software
  * emulation strongly - use the following code in your userland stuff:
  *
  * #include <sys/sysmips.h>
  *
  * struct foo {
- *         unsigned char bar[8];
+ *        unsigned char bar[8];
  * };
  *
  * main(int argc, char *argv[])
  * {
- *         struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
- *         unsigned int *p = (unsigned int *) (x.bar + 3);
- *         int i;
+ *        struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ *        unsigned int *p = (unsigned int *) (x.bar + 3);
+ *        int i;
  *
- *         if (argc > 1)
- *                 sysmips(MIPS_FIXADE, atoi(argv[1]));
+ *        if (argc > 1)
+ *                sysmips(MIPS_FIXADE, atoi(argv[1]));
  *
- *         printf("*p = %08lx\n", *p);
+ *        printf("*p = %08lx\n", *p);
  *
- *         *p = 0xdeadface;
+ *        *p = 0xdeadface;
  *
- *         for(i = 0; i <= 7; i++)
- *         printf("%02x ", x.bar[i]);
- *         printf("\n");
+ *        for(i = 0; i <= 7; i++)
+ *        printf("%02x ", x.bar[i]);
+ *        printf("\n");
  * }
  *
  * Coprocessor loads are not supported; I think this case is unimportant
  * in the practice.
  *
  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
- *       exception for the R6000.
- *       A store crossing a page boundary might be executed only partially.
- *       Undo the partial store in this case.
+ *      exception for the R6000.
+ *      A store crossing a page boundary might be executed only partially.
+ *      Undo the partial store in this case.
  */
 #include <linux/mm.h>
 #include <linux/signal.h>
@@ -86,7 +86,7 @@
 #include <asm/inst.h>
 #include <asm/uaccess.h>
 
-#define STR(x)  __STR(x)
+#define STR(x) __STR(x)
 #define __STR(x)  #x
 
 enum {
index 0a4336b..05826d2 100644 (file)
@@ -22,12 +22,12 @@ PHDRS {
 
 #ifdef CONFIG_32BIT
        #ifdef CONFIG_CPU_LITTLE_ENDIAN
-               jiffies  = jiffies_64;
+               jiffies  = jiffies_64;
        #else
-               jiffies  = jiffies_64 + 4;
+               jiffies  = jiffies_64 + 4;
        #endif
 #else
-       jiffies  = jiffies_64;
+       jiffies  = jiffies_64;
 #endif
 
 SECTIONS
@@ -139,7 +139,7 @@ SECTIONS
 
        /*
         * Force .bss to 64K alignment so that .bss..swapper_pg_dir
-        * gets that alignment.  .sbss should be empty, so there will be
+        * gets that alignment.  .sbss should be empty, so there will be
         * no holes after __init_end. */
        BSS_SECTION(0, 0x10000, 0)
 
index 147cec1..1765bab 100644 (file)
@@ -254,7 +254,7 @@ static void __maybe_unused dump_mtregs(void)
               val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
 }
 
-/* Find some VPE program space  */
+/* Find some VPE program space */
 static void *alloc_progmem(unsigned long len)
 {
        void *addr;
@@ -292,7 +292,7 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
 }
 
 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
-   might -- code, read-only data, read-write data, small data.  Tally
+   might -- code, read-only data, read-write data, small data. Tally
    sizes, and place the offsets into sh_entsize fields: high bit means it
    belongs in init. */
 static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
@@ -386,7 +386,7 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
 
        if( (rel > 32768) || (rel < -32768) ) {
                printk(KERN_DEBUG "VPE loader: "
-                      "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+                      "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
                return -ENOEXEC;
        }
 
@@ -458,7 +458,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
        Elf32_Addr val, vallo;
        struct mips_hi16 *l, *next;
 
-       /* Sign extend the addend we extract from the lo insn.  */
+       /* Sign extend the addend we extract from the lo insn.  */
        vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
 
        if (mips_hi16_list != NULL) {
@@ -470,7 +470,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
                        /*
                         * The value for the HI16 had best be the same.
                         */
-                       if (v != l->value) {
+                       if (v != l->value) {
                                printk(KERN_DEBUG "VPE loader: "
                                       "apply_r_mips_lo16/hi16: \t"
                                       "inconsistent value information\n");
@@ -505,7 +505,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
        }
 
        /*
-        * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
+        * Ok, we're done with the HI16 relocs.  Now deal with the LO16.
         */
        val = v + vallo;
        insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -579,7 +579,7 @@ static int apply_relocations(Elf32_Shdr *sechdrs,
                res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
                if( res ) {
                        char *r = rstrs[ELF32_R_TYPE(r_info)];
-                       printk(KERN_WARNING "VPE loader: .text+0x%x "
+                       printk(KERN_WARNING "VPE loader: .text+0x%x "
                               "relocation type %s for symbol \"%s\" failed\n",
                               rel[i].r_offset, r ? r : "UNKNOWN",
                               strtab + sym->st_name);
@@ -697,18 +697,7 @@ static int vpe_run(struct vpe * v)
        dmt_flag = dmt();
        vpeflags = dvpe();
 
-       if (!list_empty(&v->tc)) {
-               if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
-                       evpe(vpeflags);
-                       emt(dmt_flag);
-                       local_irq_restore(flags);
-
-                       printk(KERN_WARNING
-                              "VPE loader: TC %d is already in use.\n",
-                              v->tc->index);
-                       return -ENOEXEC;
-               }
-       } else {
+       if (list_empty(&v->tc)) {
                evpe(vpeflags);
                emt(dmt_flag);
                local_irq_restore(flags);
@@ -720,6 +709,8 @@ static int vpe_run(struct vpe * v)
                return -ENOEXEC;
        }
 
+       t = list_first_entry(&v->tc, struct tc, tc);
+
        /* Put MVPE's into 'configuration state' */
        set_c0_mvpcontrol(MVPCONTROL_VPC);
 
@@ -772,7 +763,7 @@ static int vpe_run(struct vpe * v)
 
        /* Set up the XTC bit in vpeconf0 to point at our tc */
        write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
-                             | (t->index << VPECONF0_XTC_SHIFT));
+                             | (t->index << VPECONF0_XTC_SHIFT));
 
        back_to_back_c0_hazard();
 
@@ -926,34 +917,34 @@ static int vpe_elfload(struct vpe * v)
                               secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
                }
 
-               /* Fix up syms, so that st_value is a pointer to location. */
-               simplify_symbols(sechdrs, symindex, strtab, secstrings,
-                                hdr->e_shnum, &mod);
-
-               /* Now do relocations. */
-               for (i = 1; i < hdr->e_shnum; i++) {
-                       const char *strtab = (char *)sechdrs[strindex].sh_addr;
-                       unsigned int info = sechdrs[i].sh_info;
-
-                       /* Not a valid relocation section? */
-                       if (info >= hdr->e_shnum)
-                               continue;
-
-                       /* Don't bother with non-allocated sections */
-                       if (!(sechdrs[info].sh_flags & SHF_ALLOC))
-                               continue;
-
-                       if (sechdrs[i].sh_type == SHT_REL)
-                               err = apply_relocations(sechdrs, strtab, symindex, i,
-                                                       &mod);
-                       else if (sechdrs[i].sh_type == SHT_RELA)
-                               err = apply_relocate_add(sechdrs, strtab, symindex, i,
-                                                        &mod);
-                       if (err < 0)
-                               return err;
-
-               }
-       } else {
+               /* Fix up syms, so that st_value is a pointer to location. */
+               simplify_symbols(sechdrs, symindex, strtab, secstrings,
+                                hdr->e_shnum, &mod);
+
+               /* Now do relocations. */
+               for (i = 1; i < hdr->e_shnum; i++) {
+                       const char *strtab = (char *)sechdrs[strindex].sh_addr;
+                       unsigned int info = sechdrs[i].sh_info;
+
+                       /* Not a valid relocation section? */
+                       if (info >= hdr->e_shnum)
+                               continue;
+
+                       /* Don't bother with non-allocated sections */
+                       if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+                               continue;
+
+                       if (sechdrs[i].sh_type == SHT_REL)
+                               err = apply_relocations(sechdrs, strtab, symindex, i,
+                                                       &mod);
+                       else if (sechdrs[i].sh_type == SHT_RELA)
+                               err = apply_relocate_add(sechdrs, strtab, symindex, i,
+                                                        &mod);
+                       if (err < 0)
+                               return err;
+
+               }
+       } else {
                struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
 
                for (i = 0; i < hdr->e_phnum; i++) {
@@ -968,16 +959,16 @@ static int vpe_elfload(struct vpe * v)
                }
 
                for (i = 0; i < hdr->e_shnum; i++) {
-                       /* Internal symbols and strings. */
-                       if (sechdrs[i].sh_type == SHT_SYMTAB) {
-                               symindex = i;
-                               strindex = sechdrs[i].sh_link;
-                               strtab = (char *)hdr + sechdrs[strindex].sh_offset;
-
-                               /* mark the symtab's address for when we try to find the
-                                  magic symbols */
-                               sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
-                       }
+                       /* Internal symbols and strings. */
+                       if (sechdrs[i].sh_type == SHT_SYMTAB) {
+                               symindex = i;
+                               strindex = sechdrs[i].sh_link;
+                               strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+
+                               /* mark the symtab's address for when we try to find the
+                                  magic symbols */
+                               sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+                       }
                }
        }
 
@@ -1049,7 +1040,7 @@ static int getcwd(char *buff, int size)
        return ret;
 }
 
-/* checks VPE is unused and gets ready to load program  */
+/* checks VPE is unused and gets ready to load program */
 static int vpe_open(struct inode *inode, struct file *filp)
 {
        enum vpe_state state;
@@ -1121,11 +1112,11 @@ static int vpe_release(struct inode *inode, struct file *filp)
                if (vpe_elfload(v) >= 0) {
                        vpe_run(v);
                } else {
-                       printk(KERN_WARNING "VPE loader: ELF load failed.\n");
+                       printk(KERN_WARNING "VPE loader: ELF load failed.\n");
                        ret = -ENOEXEC;
                }
        } else {
-               printk(KERN_WARNING "VPE loader: only elf files are supported\n");
+               printk(KERN_WARNING "VPE loader: only elf files are supported\n");
                ret = -ENOEXEC;
        }
 
@@ -1149,7 +1140,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
        size_t ret = count;
        struct vpe *v;
 
-       if (iminor(file->f_path.dentry->d_inode) != minor)
+       if (iminor(file_inode(file)) != minor)
                return -ENODEV;
 
        v = get_vpe(tclimit);
index c154069..7726f61 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/watch.h>
 
 /*
- * Install the watch registers for the current thread.  A maximum of
+ * Install the watch registers for the current thread. A maximum of
  * four registers are installed although the machine may have more.
  */
 void mips_install_watch_registers(void)
@@ -72,7 +72,7 @@ void mips_read_watch_registers(void)
  }
 
 /*
- * Disable all watch registers.  Although only four registers are
+ * Disable all watch registers.         Although only four registers are
  * installed, all are cleared to eliminate the possibility of endless
  * looping in the watch handler.
  */
index ce2f129..3fc2e6d 100644 (file)
 #include "prom.h"
 
 /* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[3];
+static struct clk cpu_clk_generic[4];
 
-void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+                       unsigned long io, unsigned long ppe)
 {
        cpu_clk_generic[0].rate = cpu;
        cpu_clk_generic[1].rate = fpi;
        cpu_clk_generic[2].rate = io;
+       cpu_clk_generic[3].rate = ppe;
 }
 
 struct clk *clk_get_cpu(void)
@@ -51,6 +53,12 @@ struct clk *clk_get_io(void)
        return &cpu_clk_generic[2];
 }
 
+struct clk *clk_get_ppe(void)
+{
+       return &cpu_clk_generic[3];
+}
+EXPORT_SYMBOL_GPL(clk_get_ppe);
+
 static inline int clk_good(struct clk *clk)
 {
        return clk && !IS_ERR(clk);
@@ -145,9 +153,9 @@ static inline u32 get_counter_resolution(void)
        u32 res;
 
        __asm__ __volatile__(
-               ".set   push\n"
-               ".set   mips32r2\n"
-               "rdhwr  %0, $3\n"
+               ".set   push\n"
+               ".set   mips32r2\n"
+               "rdhwr  %0, $3\n"
                ".set pop\n"
                : "=&r" (res)
                : /* no input */
index fa67060..77e4bdb 100644 (file)
 #define CLOCK_167M     166666667
 #define CLOCK_196_608M 196608000
 #define CLOCK_200M     200000000
+#define CLOCK_222M     222000000
+#define CLOCK_240M     240000000
 #define CLOCK_250M     250000000
 #define CLOCK_266M     266666666
 #define CLOCK_300M     300000000
 #define CLOCK_333M     333333333
 #define CLOCK_393M     393215332
 #define CLOCK_400M     400000000
+#define CLOCK_450M     450000000
 #define CLOCK_500M     500000000
 #define CLOCK_600M     600000000
 
@@ -64,15 +67,17 @@ struct clk {
 };
 
 extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
-                               unsigned long io);
+                               unsigned long io, unsigned long ppe);
 
 extern unsigned long ltq_danube_cpu_hz(void);
 extern unsigned long ltq_danube_fpi_hz(void);
+extern unsigned long ltq_danube_pp32_hz(void);
 
 extern unsigned long ltq_ar9_cpu_hz(void);
 extern unsigned long ltq_ar9_fpi_hz(void);
 
 extern unsigned long ltq_vr9_cpu_hz(void);
 extern unsigned long ltq_vr9_fpi_hz(void);
+extern unsigned long ltq_vr9_pp32_hz(void);
 
 #endif
index 3a4520f..d4c59e0 100644 (file)
@@ -97,7 +97,7 @@
                        compatible = "lantiq,pci-xway";
                        bus-range = <0x0 0x0>;
                        ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000   /* pci memory */
-                                 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+                                 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
                        reg = <0x7000000 0x8000         /* config space */
                                0xE105400 0x400>;       /* pci bridge */
                };
index 68c1731..fac1f5b 100644 (file)
                        lantiq,bus-clock = <33333333>;
                        interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
                        interrupt-map = <
-                                0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+                               0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
                        >;
                        gpios-reset = <&gpio 21 0>;
                        req-mask = <0x1>;               /* GNT1 */
index 2d4ced3..ff4894a 100644 (file)
@@ -241,9 +241,9 @@ void __init ltq_soc_init(void)
 
        /* get our 3 static rates for cpu, fpi and io clocks */
        if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
-               clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+               clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
        else
-               clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+               clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
 
        /* add our clock domains */
        clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
index a7935bf..5119487 100644 (file)
 /* register definitions - external irqs */
 #define LTQ_EIU_EXIN_C         0x0000
 #define LTQ_EIU_EXIN_INIC      0x0004
+#define LTQ_EIU_EXIN_INC       0x0008
 #define LTQ_EIU_EXIN_INEN      0x000C
 
-/* irq numbers used by the external interrupt unit (EIU) */
-#define LTQ_EIU_IR0            (INT_NUM_IM4_IRL0 + 30)
-#define LTQ_EIU_IR1            (INT_NUM_IM3_IRL0 + 31)
-#define LTQ_EIU_IR2            (INT_NUM_IM1_IRL0 + 26)
-#define LTQ_EIU_IR3            INT_NUM_IM1_IRL0
-#define LTQ_EIU_IR4            (INT_NUM_IM1_IRL0 + 1)
-#define LTQ_EIU_IR5            (INT_NUM_IM1_IRL0 + 2)
-#define LTQ_EIU_IR6            (INT_NUM_IM2_IRL0 + 30)
-#define XWAY_EXIN_COUNT                3
+/* number of external interrupts */
 #define MAX_EIU                        6
 
 /* the performance counter */
 int gic_present;
 #endif
 
-static unsigned short ltq_eiu_irq[MAX_EIU] = {
-       LTQ_EIU_IR0,
-       LTQ_EIU_IR1,
-       LTQ_EIU_IR2,
-       LTQ_EIU_IR3,
-       LTQ_EIU_IR4,
-       LTQ_EIU_IR5,
-};
-
 static int exin_avail;
+static struct resource ltq_eiu_irq[MAX_EIU];
 static void __iomem *ltq_icu_membase[MAX_IM];
 static void __iomem *ltq_eiu_membase;
 static struct irq_domain *ltq_domain;
 
+int ltq_eiu_get_irq(int exin)
+{
+       if (exin < exin_avail)
+               return ltq_eiu_irq[exin].start;
+       return -1;
+}
+
 void ltq_disable_irq(struct irq_data *d)
 {
        u32 ier = LTQ_ICU_IM0_IER;
@@ -128,19 +120,65 @@ void ltq_enable_irq(struct irq_data *d)
        ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
 }
 
+static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+{
+       int i;
+
+       for (i = 0; i < MAX_EIU; i++) {
+               if (d->hwirq == ltq_eiu_irq[i].start) {
+                       int val = 0;
+                       int edge = 0;
+
+                       switch (type) {
+                       case IRQF_TRIGGER_NONE:
+                               break;
+                       case IRQF_TRIGGER_RISING:
+                               val = 1;
+                               edge = 1;
+                               break;
+                       case IRQF_TRIGGER_FALLING:
+                               val = 2;
+                               edge = 1;
+                               break;
+                       case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+                               val = 3;
+                               edge = 1;
+                               break;
+                       case IRQF_TRIGGER_HIGH:
+                               val = 5;
+                               break;
+                       case IRQF_TRIGGER_LOW:
+                               val = 6;
+                               break;
+                       default:
+                               pr_err("invalid type %d for irq %ld\n",
+                                       type, d->hwirq);
+                               return -EINVAL;
+                       }
+
+                       if (edge)
+                               irq_set_handler(d->hwirq, handle_edge_irq);
+
+                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
+                               (val << (i * 4)), LTQ_EIU_EXIN_C);
+               }
+       }
+
+       return 0;
+}
+
 static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
 {
        int i;
 
        ltq_enable_irq(d);
        for (i = 0; i < MAX_EIU; i++) {
-               if (d->hwirq == ltq_eiu_irq[i]) {
-                       /* low level - we should really handle set_type */
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
-                               (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
+               if (d->hwirq == ltq_eiu_irq[i].start) {
+                       /* by default we are low level triggered */
+                       ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
                        /* clear all pending */
-                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
-                               LTQ_EIU_EXIN_INIC);
+                       ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
+                               LTQ_EIU_EXIN_INC);
                        /* enable */
                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
                                LTQ_EIU_EXIN_INEN);
@@ -157,7 +195,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
 
        ltq_disable_irq(d);
        for (i = 0; i < MAX_EIU; i++) {
-               if (d->hwirq == ltq_eiu_irq[i]) {
+               if (d->hwirq == ltq_eiu_irq[i].start) {
                        /* disable */
                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
                                LTQ_EIU_EXIN_INEN);
@@ -186,6 +224,7 @@ static struct irq_chip ltq_eiu_type = {
        .irq_ack = ltq_ack_irq,
        .irq_mask = ltq_disable_irq,
        .irq_mask_ack = ltq_mask_and_ack_irq,
+       .irq_set_type = ltq_eiu_settype,
 };
 
 static void ltq_hw_irqdispatch(int module)
@@ -301,7 +340,7 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
                return 0;
 
        for (i = 0; i < exin_avail; i++)
-               if (hw == ltq_eiu_irq[i])
+               if (hw == ltq_eiu_irq[i].start)
                        chip = &ltq_eiu_type;
 
        irq_set_chip_and_handler(hw, chip, handle_level_irq);
@@ -323,7 +362,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
 {
        struct device_node *eiu_node;
        struct resource res;
-       int i;
+       int i, ret;
 
        for (i = 0; i < MAX_IM; i++) {
                if (of_address_to_resource(node, i, &res))
@@ -340,17 +379,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
        }
 
        /* the external interrupts are optional and xway only */
-       eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+       eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
        if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
                /* find out how many external irq sources we have */
-               const __be32 *count = of_get_property(node,
-                                                       "lantiq,count", NULL);
+               exin_avail = of_irq_count(eiu_node);
 
-               if (count)
-                       exin_avail = *count;
                if (exin_avail > MAX_EIU)
                        exin_avail = MAX_EIU;
 
+               ret = of_irq_to_resource_table(eiu_node,
+                                               ltq_eiu_irq, exin_avail);
+               if (ret != exin_avail)
+                       panic("failed to load external irq resources\n");
+
                if (request_mem_region(res.start, resource_size(&res),
                                                        res.name) < 0)
                        pr_err("Failed to request eiu memory");
index a3fa1a2..8e07b5f 100644 (file)
@@ -10,7 +10,7 @@
 #define _LTQ_PROM_H__
 
 #define LTQ_SYS_TYPE_LEN       0x100
-#define LTQ_SYS_REV_LEN         0x10
+#define LTQ_SYS_REV_LEN                0x10
 
 struct ltq_soc_info {
        unsigned char *name;
index 9aa17f7..1ab576d 100644 (file)
@@ -53,6 +53,29 @@ unsigned long ltq_danube_cpu_hz(void)
        }
 }
 
+unsigned long ltq_danube_pp32_hz(void)
+{
+       unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+       unsigned long clk;
+
+       switch (clksys) {
+       case 1:
+               clk = CLOCK_240M;
+               break;
+       case 2:
+               clk = CLOCK_222M;
+               break;
+       case 3:
+               clk = CLOCK_133M;
+               break;
+       default:
+               clk = CLOCK_266M;
+               break;
+       }
+
+       return clk;
+}
+
 unsigned long ltq_ar9_sys_hz(void)
 {
        if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
@@ -149,3 +172,23 @@ unsigned long ltq_vr9_fpi_hz(void)
 
        return clk;
 }
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+       unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 3;
+       unsigned long clk;
+
+       switch (clksys) {
+       case 1:
+               clk = CLOCK_450M;
+               break;
+       case 2:
+               clk = CLOCK_300M;
+               break;
+       default:
+               clk = CLOCK_500M;
+               break;
+       }
+
+       return clk;
+}
index 544dbb7..1fa0f17 100644 (file)
@@ -78,10 +78,19 @@ static struct ltq_xrx200_gphy_reset {
 /* reset and boot a gphy. these phys only exist on xrx200 SoC */
 int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr)
 {
+       struct clk *clk;
+
        if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) {
                dev_err(dev, "this SoC has no GPHY\n");
                return -EINVAL;
        }
+
+       clk = clk_get_sys("1f203000.rcu", "gphy");
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       clk_enable(clk);
+
        if (id > 1) {
                dev_err(dev, "%u is an invalid gphy id\n", id);
                return -EINVAL;
index 3925e66..c24924f 100644 (file)
@@ -305,7 +305,7 @@ void __init ltq_soc_init(void)
 
        /* check if all the core register ranges are available */
        if (!np_pmu || !np_cgu || !np_ebu)
-               panic("Failed to load core nodess from devicetree");
+               panic("Failed to load core nodes from devicetree");
 
        if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
                        of_address_to_resource(np_cgu, 0, &res_cgu) ||
@@ -356,14 +356,16 @@ void __init ltq_soc_init(void)
 
        if (of_machine_is_compatible("lantiq,ase")) {
                if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
-                       clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+                       clkdev_add_static(CLOCK_266M, CLOCK_133M,
+                                               CLOCK_133M, CLOCK_266M);
                else
-                       clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+                       clkdev_add_static(CLOCK_133M, CLOCK_133M,
+                                               CLOCK_133M, CLOCK_133M);
                clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
                clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
        } else if (of_machine_is_compatible("lantiq,vr9")) {
                clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
-                               ltq_vr9_fpi_hz());
+                               ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
                clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
                clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
                clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
@@ -374,12 +376,13 @@ void __init ltq_soc_init(void)
                                PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
                                PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
                                PMU_PPE_QSB | PMU_PPE_TOP);
+               clkdev_add_pmu("1f203000.rcu", "gphy", 0, PMU_GPHY);
        } else if (of_machine_is_compatible("lantiq,ar9")) {
                clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
-                               ltq_ar9_fpi_hz());
+                               ltq_ar9_fpi_hz(), CLOCK_250M);
                clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
        } else {
                clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
-                               ltq_danube_fpi_hz());
+                               ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
        }
 }
index 9cc4e4d..869bd3b 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the LASAT specific kernel interface routines under Linux.
 #
 
-obj-y                          += reset.o setup.o prom.o lasat_board.o \
+obj-y                          += reset.o setup.o prom.o lasat_board.o \
                                   at93c.o interrupt.o serial.o
 
 obj-$(CONFIG_LASAT_SYSCTL)     += sysctl.o
index 2da3704..3e718b1 100644 (file)
@@ -25,7 +25,7 @@ void ds1603_enable(void);
 void ds1603_disable(void);
 void ds1603_init(struct ds_defs *);
 
-#define TRIMMER_DEFAULT        3
+#define TRIMMER_DEFAULT 3
 #define TRIMMER_DISABLE_RTC 0
 
 #endif
index 460626b..dfb509d 100644 (file)
@@ -28,7 +28,7 @@ $(obj)/head.o: $(obj)/head.S $(KERNEL_IMAGE)
 
 OBJECTS = head.o kImage.o
 
-rom.sw:        $(obj)/rom.sw
+rom.sw: $(obj)/rom.sw
 rom.bin:       $(obj)/rom.bin
 
 $(obj)/rom.sw: $(obj)/rom.bin
index e0ecda9..41babbe 100644 (file)
@@ -7,7 +7,7 @@
 
        /* Magic words identifying a software image */
        .word   LASAT_K_MAGIC0_VAL
-       .word   LASAT_K_MAGIC1_VAL
+       .word   LASAT_K_MAGIC1_VAL
 
        /* Image header version */
        .word   0x00000002
index d3d04c3..7eb3348 100644 (file)
@@ -163,12 +163,12 @@ int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE])
 }
 
 #define FUNC_SET_CMD   0x20
-#define  EIGHT_BYTE    (1 << 4)
-#define  FOUR_BYTE     0
-#define  TWO_LINES     (1 << 3)
-#define  ONE_LINE      0
-#define  LARGE_FONT    (1 << 2)
-#define  SMALL_FONT    0
+#define         EIGHT_BYTE     (1 << 4)
+#define         FOUR_BYTE      0
+#define         TWO_LINES      (1 << 3)
+#define         ONE_LINE       0
+#define         LARGE_FONT     (1 << 2)
+#define         SMALL_FONT     0
 
 static void pvc_funcset(u8 cmd)
 {
@@ -177,9 +177,9 @@ static void pvc_funcset(u8 cmd)
 }
 
 #define ENTRYMODE_CMD          0x4
-#define  AUTO_INC              (1 << 1)
-#define  AUTO_DEC              0
-#define  CURSOR_FOLLOWS_DISP   (1 << 0)
+#define         AUTO_INC               (1 << 1)
+#define         AUTO_DEC               0
+#define         CURSOR_FOLLOWS_DISP    (1 << 0)
 
 static void pvc_entrymode(u8 cmd)
 {
@@ -188,20 +188,20 @@ static void pvc_entrymode(u8 cmd)
 }
 
 #define DISP_CNT_CMD   0x08
-#define  DISP_OFF      0
-#define  DISP_ON       (1 << 2)
-#define  CUR_ON                (1 << 1)
-#define  CUR_BLINK     (1 << 0)
+#define         DISP_OFF       0
+#define         DISP_ON        (1 << 2)
+#define         CUR_ON         (1 << 1)
+#define         CUR_BLINK      (1 << 0)
 void pvc_dispcnt(u8 cmd)
 {
        pvc_write(DISP_CNT_CMD | (cmd & (DISP_ON|CUR_ON|CUR_BLINK)), MODE_INST);
 }
 
 #define MOVE_CMD       0x10
-#define  DISPLAY       (1 << 3)
-#define  CURSOR                0
-#define  RIGHT         (1 << 2)
-#define  LEFT          0
+#define         DISPLAY        (1 << 3)
+#define         CURSOR         0
+#define         RIGHT          (1 << 2)
+#define         LEFT           0
 void pvc_move(u8 cmd)
 {
        pvc_write(MOVE_CMD | (cmd & (DISPLAY|RIGHT)), MODE_INST);
index 2f07577..d0119fc 100644 (file)
@@ -29,16 +29,16 @@ void pvc_dump_string(const unsigned char *str);
 int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE]);
 
 void pvc_dispcnt(u8 cmd);
-#define  DISP_OFF      0
-#define  DISP_ON       (1 << 2)
-#define  CUR_ON                (1 << 1)
-#define  CUR_BLINK     (1 << 0)
+#define         DISP_OFF       0
+#define         DISP_ON        (1 << 2)
+#define         CUR_ON         (1 << 1)
+#define         CUR_BLINK      (1 << 0)
 
 void pvc_move(u8 cmd);
-#define  DISPLAY       (1 << 3)
-#define  CURSOR                0
-#define  RIGHT         (1 << 2)
-#define  LEFT          0
+#define         DISPLAY        (1 << 3)
+#define         CURSOR         0
+#define         RIGHT          (1 << 2)
+#define         LEFT           0
 
 void pvc_clear(void);
 void pvc_home(void);
index 8e388da..c592bc8 100644 (file)
@@ -64,7 +64,7 @@ static int pvc_line_proc_open(struct inode *inode, struct file *file)
 static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
                                   size_t count, loff_t *pos)
 {
-       int lineno = *(int *)PDE(file->f_path.dentry->d_inode)->data;
+       int lineno = *(int *)PDE(file_inode(file))->data;
        char kbuf[PVC_LINELEN];
        size_t len;
 
index 5bcb6e8..2e5fbed 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Registration of Lasat UART platform device.
  *
- *  Copyright (C) 2007  Brian Murphy <brian@murphy.dk>
+ *  Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index d87ffd0..f27694f 100644 (file)
@@ -134,8 +134,8 @@ int proc_lasat_ip(ctl_table *table, int write,
        } else {
                ip = *(unsigned int *)(table->data);
                sprintf(ipbuf, "%d.%d.%d.%d",
-                       (ip)       & 0xff,
-                       (ip >>  8) & 0xff,
+                       (ip)       & 0xff,
+                       (ip >>  8) & 0xff,
                        (ip >> 16) & 0xff,
                        (ip >> 24) & 0xff);
                len = strlen(ipbuf);
index 239a9c9..81f1dcf 100644 (file)
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
 
 
 /**
- * __mips_change_bit - Toggle a bit in memory.  This is called by change_bit()
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
  * if it cannot find a faster solution.
  * @nr: Bit to change
  * @addr: Address to start counting from
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
 
 
 /**
- * __mips_test_and_change_bit - Change a bit and return its old value.  This is
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
  * called by test_and_change_bit() if it cannot find a faster solution.
  * @nr: Bit to change
  * @addr: Address to count from
index 6b876ca..507147a 100644 (file)
@@ -67,8 +67,8 @@
 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)   \
        LOAD    _t0, (offset + UNIT(0))(src);                   \
        LOAD    _t1, (offset + UNIT(1))(src);                   \
-       LOAD    _t2, (offset + UNIT(2))(src);                   \
-       LOAD    _t3, (offset + UNIT(3))(src);                   \
+       LOAD    _t2, (offset + UNIT(2))(src);                   \
+       LOAD    _t3, (offset + UNIT(3))(src);                   \
        ADDC(sum, _t0);                                         \
        ADDC(sum, _t1);                                         \
        ADDC(sum, _t2);                                         \
@@ -285,7 +285,7 @@ LEAF(csum_partial)
 1:
 #endif
        .set    reorder
-       /* Add the passed partial csum.  */
+       /* Add the passed partial csum.  */
        ADDC32(sum, a2)
        jr      ra
        .set    noreorder
@@ -298,7 +298,7 @@ LEAF(csum_partial)
  *     csum_partial_copy_nocheck(src, dst, len, sum)
  *     __csum_partial_copy_user(src, dst, len, sum, errp)
  *
- * See "Spec" in memcpy.S for details.  Unlike __copy_user, all
+ * See "Spec" in memcpy.S for details. Unlike __copy_user, all
  * function in this file use the standard calling convention.
  */
 
@@ -371,16 +371,16 @@ LEAF(csum_partial)
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define LDFIRST LOADR
-#define LDREST  LOADL
+#define LDREST LOADL
 #define STFIRST STORER
-#define STREST  STOREL
+#define STREST STOREL
 #define SHIFT_DISCARD SLLV
 #define SHIFT_DISCARD_REVERT SRLV
 #else
 #define LDFIRST LOADL
-#define LDREST  LOADR
+#define LDREST LOADR
 #define STFIRST STOREL
-#define STREST  STORER
+#define STREST STORER
 #define SHIFT_DISCARD SRLV
 #define SHIFT_DISCARD_REVERT SLLV
 #endif
@@ -430,7 +430,7 @@ FEXPORT(csum_partial_copy_nocheck)
         * src and dst are aligned; need to compute rem
         */
 .Lboth_aligned:
-        SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
+        SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
        beqz    t0, .Lcleanup_both_aligned # len < 8*NBYTES
         nop
        SUB     len, 8*NBYTES           # subtract here for bgez loop
@@ -518,7 +518,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc)
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
-        * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
+        * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
         * because can't assume read-access to dst.  Instead, use
         * STREST dst, which doesn't require read access to dst.
         *
@@ -532,7 +532,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc)
        li      bits, 8*NBYTES
        SLL     rem, len, 3     # rem = number of bits to keep
 EXC(   LOAD    t0, 0(src),             .Ll_exc)
-       SUB     bits, bits, rem # bits = number of bits to discard
+       SUB     bits, bits, rem # bits = number of bits to discard
        SHIFT_DISCARD t0, t0, bits
 EXC(   STREST  t0, -1(t1),             .Ls_exc)
        SHIFT_DISCARD_REVERT t0, t0, bits
@@ -551,7 +551,7 @@ EXC(        STREST  t0, -1(t1),             .Ls_exc)
         * Set match = (src and dst have same alignment)
         */
 #define match rem
-EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
+EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
        ADD     t2, zero, NBYTES
 EXC(   LDREST  t3, REST(0)(src),       .Ll_exc_copy)
        SUB     t2, t2, t1      # t2 = number of bytes copied
@@ -568,9 +568,9 @@ EXC(        STFIRST t3, FIRST(0)(dst),      .Ls_exc)
         ADD    src, src, t2
 
 .Lsrc_unaligned_dst_aligned:
-       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
+       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
        beqz    t0, .Lcleanup_src_unaligned
-        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
+        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
 1:
 /*
  * Avoid consecutive LD*'s to the same register since some mips
@@ -578,13 +578,13 @@ EXC(      STFIRST t3, FIRST(0)(dst),      .Ls_exc)
  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  * are to the same unit (unless src is aligned, but it's not).
  */
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
-       SUB     len, len, 4*NBYTES
+EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
+EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
+       SUB     len, len, 4*NBYTES
 EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
 EXC(   LDREST  t1, REST(1)(src),       .Ll_exc_copy)
-EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
-EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
+EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
+EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
 EXC(   LDREST  t2, REST(2)(src),       .Ll_exc_copy)
 EXC(   LDREST  t3, REST(3)(src),       .Ll_exc_copy)
        ADD     src, src, 4*NBYTES
@@ -634,7 +634,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc)
 #define SHIFT_INC -8
 #endif
        move    t2, zero        # partial word
-       li      t3, SHIFT_START # shift
+       li      t3, SHIFT_START # shift
 /* use .Ll_exc_copy here to return correct sum on fault */
 #define COPY_BYTE(N)                   \
 EXC(   lbu     t0, N(src), .Ll_exc_copy);      \
@@ -642,7 +642,7 @@ EXC(        lbu     t0, N(src), .Ll_exc_copy);      \
 EXC(   sb      t0, N(dst), .Ls_exc);   \
        SLLV    t0, t0, t3;             \
        addu    t3, SHIFT_INC;          \
-       beqz    len, .Lcopy_bytes_done; \
+       beqz    len, .Lcopy_bytes_done; \
         or     t2, t0
 
        COPY_BYTE(0)
index 288f795..44713af 100644 (file)
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(__delay);
  * Division by multiplication: you don't have to worry about
  * loss of precision.
  *
- * Use only for very small delays ( < 1 msec).  Should probably use a
+ * Use only for very small delays ( < 1 msec). Should probably use a
  * lookup table, really, as the multiplications take much too long with
  * short delays.  This is a "reasonable" implementation, though (and the
  * first constant multiplications gets optimized away if the delay is
index a99c1d3..32b9f21 100644 (file)
@@ -63,7 +63,7 @@ static void dump_tlb(int first, int last)
                tlb_read();
                BARRIER();
                pagemask = read_c0_pagemask();
-               entryhi  = read_c0_entryhi();
+               entryhi  = read_c0_entryhi();
                entrylo0 = read_c0_entrylo0();
                entrylo1 = read_c0_entrylo1();
 
index 65192c0..c5c40da 100644 (file)
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 #define LDFIRST LOADR
-#define LDREST  LOADL
+#define LDREST LOADL
 #define STFIRST STORER
-#define STREST  STOREL
+#define STREST STOREL
 #define SHIFT_DISCARD SLLV
 #else
 #define LDFIRST LOADL
-#define LDREST  LOADR
+#define LDREST LOADR
 #define STFIRST STOREL
-#define STREST  STORER
+#define STREST STORER
 #define SHIFT_DISCARD SRLV
 #endif
 
@@ -235,7 +235,7 @@ __copy_user_common:
         * src and dst are aligned; need to compute rem
         */
 .Lboth_aligned:
-        SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
+        SRL    t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
        beqz    t0, .Lcleanup_both_aligned # len < 8*NBYTES
         and    rem, len, (8*NBYTES-1)   # rem = len % (8*NBYTES)
        PREF(   0, 3*32(src) )
@@ -313,7 +313,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc_p1u)
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
-        * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
+        * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
         * because can't assume read-access to dst.  Instead, use
         * STREST dst, which doesn't require read access to dst.
         *
@@ -327,7 +327,7 @@ EXC(        STORE   t0, 0(dst),             .Ls_exc_p1u)
        li      bits, 8*NBYTES
        SLL     rem, len, 3     # rem = number of bits to keep
 EXC(   LOAD    t0, 0(src),             .Ll_exc)
-       SUB     bits, bits, rem # bits = number of bits to discard
+       SUB     bits, bits, rem # bits = number of bits to discard
        SHIFT_DISCARD t0, t0, bits
 EXC(   STREST  t0, -1(t1),             .Ls_exc)
        jr      ra
@@ -343,7 +343,7 @@ EXC(        STREST  t0, -1(t1),             .Ls_exc)
         * Set match = (src and dst have same alignment)
         */
 #define match rem
-EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
+EXC(   LDFIRST t3, FIRST(0)(src),      .Ll_exc)
        ADD     t2, zero, NBYTES
 EXC(   LDREST  t3, REST(0)(src),       .Ll_exc_copy)
        SUB     t2, t2, t1      # t2 = number of bytes copied
@@ -357,10 +357,10 @@ EXC(      STFIRST t3, FIRST(0)(dst),      .Ls_exc)
         ADD    src, src, t2
 
 .Lsrc_unaligned_dst_aligned:
-       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
+       SRL     t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
        PREF(   0, 3*32(src) )
        beqz    t0, .Lcleanup_src_unaligned
-        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
+        and    rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
        PREF(   1, 3*32(dst) )
 1:
 /*
@@ -370,13 +370,13 @@ EXC(      STFIRST t3, FIRST(0)(dst),      .Ls_exc)
  * are to the same unit (unless src is aligned, but it's not).
  */
        R10KCBARRIER(0(ra))
-EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
-EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
-       SUB     len, len, 4*NBYTES
+EXC(   LDFIRST t0, FIRST(0)(src),      .Ll_exc)
+EXC(   LDFIRST t1, FIRST(1)(src),      .Ll_exc_copy)
+       SUB     len, len, 4*NBYTES
 EXC(   LDREST  t0, REST(0)(src),       .Ll_exc_copy)
 EXC(   LDREST  t1, REST(1)(src),       .Ll_exc_copy)
-EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
-EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
+EXC(   LDFIRST t2, FIRST(2)(src),      .Ll_exc_copy)
+EXC(   LDFIRST t3, FIRST(3)(src),      .Ll_exc_copy)
 EXC(   LDREST  t2, REST(2)(src),       .Ll_exc_copy)
 EXC(   LDREST  t3, REST(3)(src),       .Ll_exc_copy)
        PREF(   0, 9*32(src) )          # 0 is PREF_LOAD  (not streamed)
@@ -388,7 +388,7 @@ EXC(        STORE   t0, UNIT(0)(dst),       .Ls_exc_p4u)
 EXC(   STORE   t1, UNIT(1)(dst),       .Ls_exc_p3u)
 EXC(   STORE   t2, UNIT(2)(dst),       .Ls_exc_p2u)
 EXC(   STORE   t3, UNIT(3)(dst),       .Ls_exc_p1u)
-       PREF(   1, 9*32(dst) )          # 1 is PREF_STORE (not streamed)
+       PREF(   1, 9*32(dst) )          # 1 is PREF_STORE (not streamed)
        .set    reorder                         /* DADDI_WAR */
        ADD     dst, dst, 4*NBYTES
        bne     len, rem, 1b
@@ -502,7 +502,7 @@ EXC(        lb      t1, 0(src),     .Ll_exc)
 
 
 #define SEXC(n)                                                        \
-       .set    reorder;                        /* DADDI_WAR */ \
+       .set    reorder;                        /* DADDI_WAR */ \
 .Ls_exc_p ## n ## u:                                           \
        ADD     len, len, n*NBYTES;                             \
        jr      ra;                                             \
index 606c8a9..053d3b0 100644 (file)
@@ -21,8 +21,8 @@
 
 #define EX(insn,reg,addr,handler)                      \
 9:     insn    reg, addr;                              \
-       .section __ex_table,"a";                        \
-       PTR     9b, handler;                            \
+       .section __ex_table,"a";                        \
+       PTR     9b, handler;                            \
        .previous
 
        .macro  f_fill64 dst, offset, val, fixup
index 9cee907..91615c2 100644 (file)
@@ -30,7 +30,7 @@ static void dump_tlb(int first, int last)
                        "tlbr\n\t"
                        "nop\n\t"
                        ".set\treorder");
-               entryhi  = read_c0_entryhi();
+               entryhi  = read_c0_entryhi();
                entrylo0 = read_c0_entrylo0();
 
                /* Unused entries have a virtual address of KSEG0.  */
index 7201b2f..bad5394 100644 (file)
@@ -23,7 +23,7 @@
 
 /*
  * Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space.  We don't deal with that.  If
+ * pointer which wraps into the kernel space.  We don't deal with that.         If
  * it happens at most some bytes of the exceptions handlers will be copied.
  */
 
index 6445716..beea03c 100644 (file)
@@ -21,9 +21,9 @@
  * maximum of a1 or 0 in case of error.
  *
  * Note: for performance reasons we deliberately accept that a user may
- *       make strlen_user and strnlen_user access the first few KSEG0
- *       bytes.  There's nothing secret there.  On 64-bit accessing beyond
- *       the maximum is a tad hairier ...
+ *      make strlen_user and strnlen_user access the first few KSEG0
+ *      bytes.  There's nothing secret there.  On 64-bit accessing beyond
+ *      the maximum is a tad hairier ...
  */
 LEAF(__strnlen_user_asm)
        LONG_L          v0, TI_ADDR_LIMIT($28)  # pointer ok?
index a6d1c77..65e3dfc 100644 (file)
@@ -4,7 +4,7 @@
  * for more details.
  *
  * Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2005  MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2005  MIPS Technologies, Inc. All rights reserved.
  *     Author: Maciej W. Rozycki <macro@mips.com>
  */
 
index 2b76cb0..0dc0055 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_MACH_LOONGSON) += common/
 # Lemote Fuloong mini-PC (Loongson 2E-based)
 #
 
-obj-$(CONFIG_LEMOTE_FULOONG2E)  += fuloong-2e/
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
 
 #
 # Lemote loongson2f family machines
index f27d7cc..cc0e4fd 100644 (file)
@@ -6,9 +6,9 @@
  * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/interrupt.h>
index 353e1d2..72fed00 100644 (file)
@@ -12,8 +12,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index b3fd5ea..ab4d6cc 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 5b5cbba..ec2e360 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 681d129..a73414d 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 4d9f65a..a6eb2e8 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 5d1f48f..c639b9d 100644 (file)
@@ -9,9 +9,9 @@
  *
  * Reference: AMD Geode(TM) CS5536 Companion Device Data Book
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 
index bdedf51..f7c905e 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 6dfeab1..81bed9d 100644 (file)
@@ -7,8 +7,8 @@
  * Copyright (C) 2009 Lemote, Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
index a71736f..ced461b 100644 (file)
@@ -4,9 +4,9 @@
  *  Copyright (c) 2009 Lemote Inc.
  *  Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/serial_reg.h>
index d93830a..0a18fcf 100644 (file)
@@ -12,8 +12,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index e8a0ffa..2186990 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  STLS2F GPIO Support
  *
- *  Copyright (c) 2008 Richard Liu,  STMicroelectronics  <richard.liu@st.com>
+ *  Copyright (c) 2008 Richard Liu,  STMicroelectronics         <richard.liu@st.com>
  *  Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -123,13 +123,13 @@ static void ls2f_gpio_set_value(struct gpio_chip *chip,
 }
 
 static struct gpio_chip ls2f_chip = {
-       .label                  = "ls2f",
-       .direction_input        = ls2f_gpio_direction_input,
-       .get                    = ls2f_gpio_get_value,
-       .direction_output       = ls2f_gpio_direction_output,
-       .set                    = ls2f_gpio_set_value,
-       .base                   = 0,
-       .ngpio                  = STLS2F_N_GPIO,
+       .label                  = "ls2f",
+       .direction_input        = ls2f_gpio_direction_input,
+       .get                    = ls2f_gpio_get_value,
+       .direction_output       = ls2f_gpio_direction_output,
+       .set                    = ls2f_gpio_set_value,
+       .base                   = 0,
+       .ngpio                  = STLS2F_N_GPIO,
 };
 
 static int __init ls2f_gpio_setup(void)
index 19d3415..ae7af1f 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 5897471..687003b 100644 (file)
@@ -2,9 +2,9 @@
  * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/delay.h>
index 2efd5d9..4becd4f 100644 (file)
@@ -4,8 +4,8 @@
  *
  * Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
 #define MACHTYPE_LEN 50
 
 static const char *system_types[] = {
-       [MACH_LOONGSON_UNKNOWN]         "unknown loongson machine",
-       [MACH_LEMOTE_FL2E]              "lemote-fuloong-2e-box",
-       [MACH_LEMOTE_FL2F]              "lemote-fuloong-2f-box",
-       [MACH_LEMOTE_ML2F7]             "lemote-mengloong-2f-7inches",
-       [MACH_LEMOTE_YL2F89]            "lemote-yeeloong-2f-8.9inches",
-       [MACH_DEXXON_GDIUM2F10]         "dexxon-gdium-2f",
+       [MACH_LOONGSON_UNKNOWN]         "unknown loongson machine",
+       [MACH_LEMOTE_FL2E]              "lemote-fuloong-2e-box",
+       [MACH_LEMOTE_FL2F]              "lemote-fuloong-2f-box",
+       [MACH_LEMOTE_ML2F7]             "lemote-mengloong-2f-7inches",
+       [MACH_LEMOTE_YL2F89]            "lemote-yeeloong-2f-8.9inches",
+       [MACH_DEXXON_GDIUM2F10]         "dexxon-gdium-2f",
        [MACH_LEMOTE_NAS]               "lemote-nas-2f",
-       [MACH_LEMOTE_LL2F]              "lemote-lynloong-2f",
-       [MACH_LOONGSON_END]             NULL,
+       [MACH_LEMOTE_LL2F]              "lemote-lynloong-2f",
+       [MACH_LOONGSON_END]             NULL,
 };
 
 const char *get_system_type(void)
index 30eba60..8626a42 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 31d8c5e..fa77844 100644 (file)
@@ -2,9 +2,9 @@
  * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/pci.h>
 #include <loongson.h>
 
 static struct resource loongson_pci_mem_resource = {
-       .name   = "pci memory space",
-       .start  = LOONGSON_PCI_MEM_START,
-       .end    = LOONGSON_PCI_MEM_END,
-       .flags  = IORESOURCE_MEM,
+       .name   = "pci memory space",
+       .start  = LOONGSON_PCI_MEM_START,
+       .end    = LOONGSON_PCI_MEM_END,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource loongson_pci_io_resource = {
-       .name   = "pci io space",
-       .start  = LOONGSON_PCI_IO_START,
-       .end    = IO_SPACE_LIMIT,
-       .flags  = IORESOURCE_IO,
+       .name   = "pci io space",
+       .start  = LOONGSON_PCI_IO_START,
+       .end    = IO_SPACE_LIMIT,
+       .flags  = IORESOURCE_IO,
 };
 
 static struct pci_controller  loongson_pci_controller = {
-       .pci_ops        = &loongson_pci_ops,
-       .io_resource    = &loongson_pci_io_resource,
-       .mem_resource   = &loongson_pci_mem_resource,
-       .mem_offset     = 0x00000000UL,
-       .io_offset      = 0x00000000UL,
+       .pci_ops        = &loongson_pci_ops,
+       .io_resource    = &loongson_pci_io_resource,
+       .mem_resource   = &loongson_pci_mem_resource,
+       .mem_offset     = 0x00000000UL,
+       .io_offset      = 0x00000000UL,
 };
 
 static void __init setup_pcimap(void)
@@ -42,7 +42,7 @@ static void __init setup_pcimap(void)
         * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M]
         *
         * pcimap: PCI_MAP2  PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
-        *           [<2G]   [384M,448M] [320M,384M] [0M,64M]
+        *           [<2G]   [384M,448M] [320M,384M] [0M,64M]
         */
        LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
                LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
index 502b059..0ed3832 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 9e10d62..35c8c64 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
@@ -26,9 +26,9 @@ static inline void loongson_reboot(void)
        func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4);
 
        __asm__ __volatile__(
-       "       .set    noat                                            \n"
-       "       jr      %[func]                                         \n"
-       "       .set    at                                              \n"
+       "       .set    noat                                            \n"
+       "       jr      %[func]                                         \n"
+       "       .set    at                                              \n"
        : /* No outputs */
        : [func] "r" (func));
 #endif
index 7580873..5f2b78a 100644 (file)
 }
 
 static struct plat_serial8250_port uart8250_data[][2] = {
-       [MACH_LOONGSON_UNKNOWN]         {},
-       [MACH_LEMOTE_FL2E]              {PORT(4), {} },
-       [MACH_LEMOTE_FL2F]              {PORT(3), {} },
-       [MACH_LEMOTE_ML2F7]             {PORT_M(3), {} },
-       [MACH_LEMOTE_YL2F89]            {PORT_M(3), {} },
-       [MACH_DEXXON_GDIUM2F10]         {PORT_M(3), {} },
-       [MACH_LEMOTE_NAS]               {PORT_M(3), {} },
-       [MACH_LEMOTE_LL2F]              {PORT(3), {} },
-       [MACH_LOONGSON_END]             {},
+       [MACH_LOONGSON_UNKNOWN]         {},
+       [MACH_LEMOTE_FL2E]              {PORT(4), {} },
+       [MACH_LEMOTE_FL2F]              {PORT(3), {} },
+       [MACH_LEMOTE_ML2F7]             {PORT_M(3), {} },
+       [MACH_LEMOTE_YL2F89]            {PORT_M(3), {} },
+       [MACH_DEXXON_GDIUM2F10]         {PORT_M(3), {} },
+       [MACH_LEMOTE_NAS]               {PORT_M(3), {} },
+       [MACH_LEMOTE_LL2F]              {PORT(3), {} },
+       [MACH_LOONGSON_END]             {},
 };
 
 static struct platform_device uart8250_device = {
index 27d826b..8223f8a 100644 (file)
@@ -2,9 +2,9 @@
  * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/module.h>
index 9fdd01f..262a1f6 100644 (file)
@@ -5,9 +5,9 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <asm/mc146818-time.h>
index d69ea54..e192ad0 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 3cf1fef..ef5ec8f 100644 (file)
@@ -2,9 +2,9 @@
  * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/interrupt.h>
@@ -48,9 +48,9 @@ static struct irqaction cascade_irqaction = {
 void __init mach_init_irq(void)
 {
        /* init all controller
-        *   0-15         ------> i8259 interrupt
-        *   16-23        ------> mips cpu interrupt
-        *   32-63        ------> bonito irq
+        *   0-15         ------> i8259 interrupt
+        *   16-23        ------> mips cpu interrupt
+        *   32-63        ------> bonito irq
         */
 
        /* most bonito irq should be level triggered */
index bc39ec6..da4d2ae 100644 (file)
@@ -4,8 +4,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 1595a21..5a3f186 100644 (file)
@@ -30,141 +30,141 @@ extern sci_handler yeeloong_report_lid_status;
  * 2, fill the PORT_LOW as EC register low part.
  * 3, fill the PORT_DATA as EC register write data or get the data from it.
  */
-#define        EC_IO_PORT_HIGH 0x0381
-#define        EC_IO_PORT_LOW  0x0382
-#define        EC_IO_PORT_DATA 0x0383
+#define EC_IO_PORT_HIGH 0x0381
+#define EC_IO_PORT_LOW 0x0382
+#define EC_IO_PORT_DATA 0x0383
 
 /*
  * EC delay time is 500us for register and status access
  */
-#define        EC_REG_DELAY    500     /* unit : us */
-#define        EC_CMD_TIMEOUT  0x1000
+#define EC_REG_DELAY   500     /* unit : us */
+#define EC_CMD_TIMEOUT 0x1000
 
 /*
  * EC access port for SCI communication
  */
-#define        EC_CMD_PORT             0x66
-#define        EC_STS_PORT             0x66
-#define        EC_DAT_PORT             0x62
-#define        CMD_INIT_IDLE_MODE      0xdd
-#define        CMD_EXIT_IDLE_MODE      0xdf
-#define        CMD_INIT_RESET_MODE     0xd8
-#define        CMD_REBOOT_SYSTEM       0x8c
-#define        CMD_GET_EVENT_NUM       0x84
-#define        CMD_PROGRAM_PIECE       0xda
+#define EC_CMD_PORT            0x66
+#define EC_STS_PORT            0x66
+#define EC_DAT_PORT            0x62
+#define CMD_INIT_IDLE_MODE     0xdd
+#define CMD_EXIT_IDLE_MODE     0xdf
+#define CMD_INIT_RESET_MODE    0xd8
+#define CMD_REBOOT_SYSTEM      0x8c
+#define CMD_GET_EVENT_NUM      0x84
+#define CMD_PROGRAM_PIECE      0xda
 
 /* temperature & fan registers */
-#define        REG_TEMPERATURE_VALUE   0xF458
-#define        REG_FAN_AUTO_MAN_SWITCH 0xF459
-#define        BIT_FAN_AUTO            0
-#define        BIT_FAN_MANUAL          1
-#define        REG_FAN_CONTROL         0xF4D2
-#define        BIT_FAN_CONTROL_ON      (1 << 0)
-#define        BIT_FAN_CONTROL_OFF     (0 << 0)
-#define        REG_FAN_STATUS          0xF4DA
-#define        BIT_FAN_STATUS_ON       (1 << 0)
-#define        BIT_FAN_STATUS_OFF      (0 << 0)
-#define        REG_FAN_SPEED_HIGH      0xFE22
-#define        REG_FAN_SPEED_LOW       0xFE23
-#define        REG_FAN_SPEED_LEVEL     0xF4CC
+#define REG_TEMPERATURE_VALUE  0xF458
+#define REG_FAN_AUTO_MAN_SWITCH 0xF459
+#define BIT_FAN_AUTO           0
+#define BIT_FAN_MANUAL         1
+#define REG_FAN_CONTROL                0xF4D2
+#define BIT_FAN_CONTROL_ON     (1 << 0)
+#define BIT_FAN_CONTROL_OFF    (0 << 0)
+#define REG_FAN_STATUS         0xF4DA
+#define BIT_FAN_STATUS_ON      (1 << 0)
+#define BIT_FAN_STATUS_OFF     (0 << 0)
+#define REG_FAN_SPEED_HIGH     0xFE22
+#define REG_FAN_SPEED_LOW      0xFE23
+#define REG_FAN_SPEED_LEVEL    0xF4CC
 /* fan speed divider */
-#define        FAN_SPEED_DIVIDER       480000  /* (60*1000*1000/62.5/2)*/
+#define FAN_SPEED_DIVIDER      480000  /* (60*1000*1000/62.5/2)*/
 
 /* battery registers */
-#define        REG_BAT_DESIGN_CAP_HIGH         0xF77D
-#define        REG_BAT_DESIGN_CAP_LOW          0xF77E
-#define        REG_BAT_FULLCHG_CAP_HIGH        0xF780
-#define        REG_BAT_FULLCHG_CAP_LOW         0xF781
-#define        REG_BAT_DESIGN_VOL_HIGH         0xF782
-#define        REG_BAT_DESIGN_VOL_LOW          0xF783
-#define        REG_BAT_CURRENT_HIGH            0xF784
-#define        REG_BAT_CURRENT_LOW             0xF785
-#define        REG_BAT_VOLTAGE_HIGH            0xF786
-#define        REG_BAT_VOLTAGE_LOW             0xF787
-#define        REG_BAT_TEMPERATURE_HIGH        0xF788
-#define        REG_BAT_TEMPERATURE_LOW         0xF789
-#define        REG_BAT_RELATIVE_CAP_HIGH       0xF492
-#define        REG_BAT_RELATIVE_CAP_LOW        0xF493
-#define        REG_BAT_VENDOR                  0xF4C4
-#define        FLAG_BAT_VENDOR_SANYO           0x01
-#define        FLAG_BAT_VENDOR_SIMPLO          0x02
-#define        REG_BAT_CELL_COUNT              0xF4C6
-#define        FLAG_BAT_CELL_3S1P              0x03
-#define        FLAG_BAT_CELL_3S2P              0x06
-#define        REG_BAT_CHARGE                  0xF4A2
-#define        FLAG_BAT_CHARGE_DISCHARGE       0x01
-#define        FLAG_BAT_CHARGE_CHARGE          0x02
-#define        FLAG_BAT_CHARGE_ACPOWER         0x00
-#define        REG_BAT_STATUS                  0xF4B0
-#define        BIT_BAT_STATUS_LOW              (1 << 5)
-#define        BIT_BAT_STATUS_DESTROY          (1 << 2)
-#define        BIT_BAT_STATUS_FULL             (1 << 1)
-#define        BIT_BAT_STATUS_IN               (1 << 0)
-#define        REG_BAT_CHARGE_STATUS           0xF4B1
-#define        BIT_BAT_CHARGE_STATUS_OVERTEMP  (1 << 2)
-#define        BIT_BAT_CHARGE_STATUS_PRECHG    (1 << 1)
-#define        REG_BAT_STATE                   0xF482
-#define        BIT_BAT_STATE_CHARGING          (1 << 1)
-#define        BIT_BAT_STATE_DISCHARGING       (1 << 0)
-#define        REG_BAT_POWER                   0xF440
-#define        BIT_BAT_POWER_S3                (1 << 2)
-#define        BIT_BAT_POWER_ON                (1 << 1)
-#define        BIT_BAT_POWER_ACIN              (1 << 0)
+#define REG_BAT_DESIGN_CAP_HIGH                0xF77D
+#define REG_BAT_DESIGN_CAP_LOW         0xF77E
+#define REG_BAT_FULLCHG_CAP_HIGH       0xF780
+#define REG_BAT_FULLCHG_CAP_LOW                0xF781
+#define REG_BAT_DESIGN_VOL_HIGH                0xF782
+#define REG_BAT_DESIGN_VOL_LOW         0xF783
+#define REG_BAT_CURRENT_HIGH           0xF784
+#define REG_BAT_CURRENT_LOW            0xF785
+#define REG_BAT_VOLTAGE_HIGH           0xF786
+#define REG_BAT_VOLTAGE_LOW            0xF787
+#define REG_BAT_TEMPERATURE_HIGH       0xF788
+#define REG_BAT_TEMPERATURE_LOW                0xF789
+#define REG_BAT_RELATIVE_CAP_HIGH      0xF492
+#define REG_BAT_RELATIVE_CAP_LOW       0xF493
+#define REG_BAT_VENDOR                 0xF4C4
+#define FLAG_BAT_VENDOR_SANYO          0x01
+#define FLAG_BAT_VENDOR_SIMPLO         0x02
+#define REG_BAT_CELL_COUNT             0xF4C6
+#define FLAG_BAT_CELL_3S1P             0x03
+#define FLAG_BAT_CELL_3S2P             0x06
+#define REG_BAT_CHARGE                 0xF4A2
+#define FLAG_BAT_CHARGE_DISCHARGE      0x01
+#define FLAG_BAT_CHARGE_CHARGE         0x02
+#define FLAG_BAT_CHARGE_ACPOWER                0x00
+#define REG_BAT_STATUS                 0xF4B0
+#define BIT_BAT_STATUS_LOW             (1 << 5)
+#define BIT_BAT_STATUS_DESTROY         (1 << 2)
+#define BIT_BAT_STATUS_FULL            (1 << 1)
+#define BIT_BAT_STATUS_IN              (1 << 0)
+#define REG_BAT_CHARGE_STATUS          0xF4B1
+#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
+#define BIT_BAT_CHARGE_STATUS_PRECHG   (1 << 1)
+#define REG_BAT_STATE                  0xF482
+#define BIT_BAT_STATE_CHARGING         (1 << 1)
+#define BIT_BAT_STATE_DISCHARGING      (1 << 0)
+#define REG_BAT_POWER                  0xF440
+#define BIT_BAT_POWER_S3               (1 << 2)
+#define BIT_BAT_POWER_ON               (1 << 1)
+#define BIT_BAT_POWER_ACIN             (1 << 0)
 
 /* other registers */
 /* Audio: rd/wr */
-#define        REG_AUDIO_VOLUME        0xF46C
-#define        REG_AUDIO_MUTE          0xF4E7
-#define        REG_AUDIO_BEEP          0xF4D0
+#define REG_AUDIO_VOLUME       0xF46C
+#define REG_AUDIO_MUTE         0xF4E7
+#define REG_AUDIO_BEEP         0xF4D0
 /* USB port power or not: rd/wr */
-#define        REG_USB0_FLAG           0xF461
-#define        REG_USB1_FLAG           0xF462
-#define        REG_USB2_FLAG           0xF463
-#define        BIT_USB_FLAG_ON         1
-#define        BIT_USB_FLAG_OFF        0
+#define REG_USB0_FLAG          0xF461
+#define REG_USB1_FLAG          0xF462
+#define REG_USB2_FLAG          0xF463
+#define BIT_USB_FLAG_ON                1
+#define BIT_USB_FLAG_OFF       0
 /* LID */
-#define        REG_LID_DETECT          0xF4BD
-#define        BIT_LID_DETECT_ON       1
-#define        BIT_LID_DETECT_OFF      0
+#define REG_LID_DETECT         0xF4BD
+#define BIT_LID_DETECT_ON      1
+#define BIT_LID_DETECT_OFF     0
 /* CRT */
-#define        REG_CRT_DETECT          0xF4AD
-#define        BIT_CRT_DETECT_PLUG     1
-#define        BIT_CRT_DETECT_UNPLUG   0
+#define REG_CRT_DETECT         0xF4AD
+#define BIT_CRT_DETECT_PLUG    1
+#define BIT_CRT_DETECT_UNPLUG  0
 /* LCD backlight brightness adjust: 9 levels */
-#define        REG_DISPLAY_BRIGHTNESS  0xF4F5
+#define REG_DISPLAY_BRIGHTNESS 0xF4F5
 /* Black screen Status */
-#define        BIT_DISPLAY_LCD_ON      1
-#define        BIT_DISPLAY_LCD_OFF     0
+#define BIT_DISPLAY_LCD_ON     1
+#define BIT_DISPLAY_LCD_OFF    0
 /* LCD backlight control: off/restore */
-#define        REG_BACKLIGHT_CTRL      0xF7BD
-#define        BIT_BACKLIGHT_ON        1
-#define        BIT_BACKLIGHT_OFF       0
+#define REG_BACKLIGHT_CTRL     0xF7BD
+#define BIT_BACKLIGHT_ON       1
+#define BIT_BACKLIGHT_OFF      0
 /* Reset the machine auto-clear: rd/wr */
-#define        REG_RESET               0xF4EC
-#define        BIT_RESET_ON            1
+#define REG_RESET              0xF4EC
+#define BIT_RESET_ON           1
 /* Light the led: rd/wr */
-#define        REG_LED                 0xF4C8
-#define        BIT_LED_RED_POWER       (1 << 0)
-#define        BIT_LED_ORANGE_POWER    (1 << 1)
-#define        BIT_LED_GREEN_CHARGE    (1 << 2)
-#define        BIT_LED_RED_CHARGE      (1 << 3)
-#define        BIT_LED_NUMLOCK         (1 << 4)
+#define REG_LED                        0xF4C8
+#define BIT_LED_RED_POWER      (1 << 0)
+#define BIT_LED_ORANGE_POWER   (1 << 1)
+#define BIT_LED_GREEN_CHARGE   (1 << 2)
+#define BIT_LED_RED_CHARGE     (1 << 3)
+#define BIT_LED_NUMLOCK                (1 << 4)
 /* Test led mode, all led on/off */
-#define        REG_LED_TEST            0xF4C2
-#define        BIT_LED_TEST_IN         1
-#define        BIT_LED_TEST_OUT        0
+#define REG_LED_TEST           0xF4C2
+#define BIT_LED_TEST_IN                1
+#define BIT_LED_TEST_OUT       0
 /* Camera on/off */
-#define        REG_CAMERA_STATUS       0xF46A
-#define        BIT_CAMERA_STATUS_ON    1
-#define        BIT_CAMERA_STATUS_OFF   0
-#define        REG_CAMERA_CONTROL      0xF7B7
-#define        BIT_CAMERA_CONTROL_OFF  0
-#define        BIT_CAMERA_CONTROL_ON   1
+#define REG_CAMERA_STATUS      0xF46A
+#define BIT_CAMERA_STATUS_ON   1
+#define BIT_CAMERA_STATUS_OFF  0
+#define REG_CAMERA_CONTROL     0xF7B7
+#define BIT_CAMERA_CONTROL_OFF 0
+#define BIT_CAMERA_CONTROL_ON  1
 /* Wlan Status */
-#define        REG_WLAN                0xF4FA
-#define        BIT_WLAN_ON             1
-#define        BIT_WLAN_OFF            0
-#define        REG_DISPLAY_LCD         0xF79F
+#define REG_WLAN               0xF4FA
+#define BIT_WLAN_ON            1
+#define BIT_WLAN_OFF           0
+#define REG_DISPLAY_LCD                0xF79F
 
 /* SCI Event Number from EC */
 enum {
index 14b0818..6f8682e 100644 (file)
@@ -2,9 +2,9 @@
  * Copyright (C) 2007 Lemote Inc.
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 
 #include <loongson.h>
 #include <machine.h>
 
-#define LOONGSON_TIMER_IRQ     (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
-#define LOONGSON_NORTH_BRIDGE_IRQ      (MIPS_CPU_IRQ_BASE + 6) /* bonito */
-#define LOONGSON_UART_IRQ      (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
-#define LOONGSON_SOUTH_BRIDGE_IRQ      (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
+#define LOONGSON_TIMER_IRQ     (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
+#define LOONGSON_NORTH_BRIDGE_IRQ      (MIPS_CPU_IRQ_BASE + 6) /* bonito */
+#define LOONGSON_UART_IRQ      (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
+#define LOONGSON_SOUTH_BRIDGE_IRQ      (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
 
 #define LOONGSON_INT_BIT_INT0          (1 << 11)
 #define LOONGSON_INT_BIT_INT1          (1 << 12)
@@ -108,9 +108,9 @@ struct irqaction cascade_irqaction = {
 void __init mach_init_irq(void)
 {
        /* init all controller
-        *   0-15         ------> i8259 interrupt
-        *   16-23        ------> mips cpu interrupt
-        *   32-63        ------> bonito irq
+        *   0-15         ------> i8259 interrupt
+        *   16-23        ------> mips cpu interrupt
+        *   32-63        ------> bonito irq
         */
 
        /* setup cs5536 as high level trigger */
index e860a27..b55e6ee 100644 (file)
@@ -2,8 +2,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -22,11 +22,11 @@ void __init mach_prom_init_machtype(void)
         * machines, this will help the users a lot.
         *
         * If no "machtype=" passed, get machine type from "PMON_VER=".
-        *      PMON_VER=LM8089         Lemote 8.9'' netbook
-        *               LM8101         Lemote 10.1'' netbook
-        *      (The above two netbooks have the same kernel support)
-        *               LM6XXX         Lemote FuLoong(2F) box series
-        *               LM9XXX         Lemote LynLoong PC series
+        *      PMON_VER=LM8089         Lemote 8.9'' netbook
+        *               LM8101         Lemote 10.1'' netbook
+        *      (The above two netbooks have the same kernel support)
+        *               LM6XXX         Lemote FuLoong(2F) box series
+        *               LM9XXX         Lemote LynLoong PC series
         */
        if (strstr(arcs_cmdline, "PMON_VER=LM")) {
                if (strstr(arcs_cmdline, "PMON_VER=LM8"))
index 36020a0..90962a3 100644 (file)
@@ -5,8 +5,8 @@
  * Copyright (C) 2009 Lemote Inc.
  * Author: Wu Zhangjin, wuzhangjin@gmail.com
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -90,9 +90,9 @@ void ml2f_reboot(void)
 #define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
 #define EC_SHUTDOWN_IO_PORT_LOW         0xff2e
 #define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
-#define REG_SHUTDOWN_HIGH        0xFC
-#define REG_SHUTDOWN_LOW         0x29
-#define BIT_SHUTDOWN_ON          (1 << 1)
+#define REG_SHUTDOWN_HIGH       0xFC
+#define REG_SHUTDOWN_LOW        0x29
+#define BIT_SHUTDOWN_ON                 (1 << 1)
 
 static void ml2f_shutdown(void)
 {
index 99bdefe..1186344 100644 (file)
@@ -1,4 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1)  += \
+cflags-$(CONFIG_CPU_LOONGSON1) += \
        $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
        -Wa,-mips32r2 -Wa,--trap
 
index 07133de..b4437f1 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 41bc8ff..455a770 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 69dad4c..fdf8cb5 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -23,7 +23,7 @@
                .mapbase        = LS1X_UART ## _id ## _BASE,    \
                .irq            = LS1X_UART ## _id ## _IRQ,     \
                .iotype         = UPIO_MEM,                     \
-               .flags          = UPF_IOREMAP | UPF_FIXED_TYPE, \
+               .flags          = UPF_IOREMAP | UPF_FIXED_TYPE, \
                .type           = PORT_16550A,                  \
        }
 
index 1f8e49f..2a47af5 100644 (file)
@@ -3,8 +3,8 @@
  *
  * Modified from arch/mips/pnx833x/common/prom.c.
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
@@ -73,7 +73,7 @@ void __init prom_free_prom_memory(void)
 
 #define PORT(offset)   (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset))
 
-void __init prom_putchar(char c)
+void prom_putchar(char c)
 {
        int timeout;
 
index fb979a7..d4f610f 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 62128cc..62f41af 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 1fbd526..b26b10d 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 9660723..121a848 100644 (file)
@@ -9,4 +9,3 @@ obj-y   := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
           sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
           sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
           dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
-
index 47c77e7..afb5a0b 100644 (file)
@@ -169,7 +169,7 @@ static int isBranchInstr(mips_instruction * i)
 
 /*
  * In the Linux kernel, we support selection of FPR format on the
- * basis of the Status.FR bit.  If an FPU is not present, the FR bit
+ * basis of the Status.FR bit. If an FPU is not present, the FR bit
  * is hardwired to zero, which would imply a 32-bit FPU even for
  * 64-bit CPUs so we rather look at TIF_32BIT_REGS.
  * FPU emu is slow and bulky and optimizing this function offers fairly
@@ -234,7 +234,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        if (xcp->cp0_cause & CAUSEF_BD) {
                /*
                 * The instruction to be emulated is in a branch delay slot
-                * which means that we have to  emulate the branch instruction
+                * which means that we have to  emulate the branch instruction
                 * BEFORE we do the cop1 instruction.
                 *
                 * This branch could be a COP1 branch, but in that case we
@@ -1335,8 +1335,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                else {
                        /*
                         * The 'ieee754_csr' is an alias of
-                        * ctx->fcr31.  No need to copy ctx->fcr31 to
-                        * ieee754_csr.  But ieee754_csr.rm is ieee
+                        * ctx->fcr31.  No need to copy ctx->fcr31 to
+                        * ieee754_csr.  But ieee754_csr.rm is ieee
                         * library modes. (not mips rounding mode)
                         */
                        /* convert to ieee library modes */
index b422fca..c57c8ad 100644 (file)
@@ -153,7 +153,7 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
                xe = xe;
                xs = xs;
 
-               if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+               if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
                        xm = XDPSRS1(xm);
                        xe++;
                }
index a2a51b8..b874d60 100644 (file)
@@ -87,7 +87,7 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
        if (xe > 512) {         /* x > 2**-512? */
                xe -= 512;      /* x = x / 2**512 */
                scalx += 256;
-       } else if (xe < -512) { /* x < 2**-512? */
+       } else if (xe < -512) { /* x < 2**-512? */
                xe += 512;      /* x = x * 2**512 */
                scalx -= 256;
        }
@@ -108,13 +108,13 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
        y.bits &= 0xffffffff00000000LL;
 
        /* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
-       /* t=y*y; z=t;  pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
+       /* t=y*y; z=t;  pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
        z = t = ieee754dp_mul(y, y);
        t.parts.bexp += 0x001;
        t = ieee754dp_add(t, z);
        z = ieee754dp_mul(ieee754dp_sub(x, z), y);
 
-       /* t=z/(t+x) ;  pt[n0]+=0x00100000; y+=t; */
+       /* t=z/(t+x) ;  pt[n0]+=0x00100000; y+=t; */
        t = ieee754dp_div(z, ieee754dp_add(t, x));
        t.parts.bexp += 0x001;
        y = ieee754dp_add(y, t);
index 0de098c..91e0a4b 100644 (file)
@@ -158,7 +158,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
                xe = xe;
                xs = xs;
 
-               if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+               if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
                        xm = XDPSRS1(xm);       /* shift preserving sticky */
                        xe++;
                }
index 30554e1..0015cf1 100644 (file)
 #endif
 
 const struct ieee754dp_konst __ieee754dp_spcvals[] = {
-       DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero   */
-       DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero   */
+       DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero   */
+       DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero   */
        DPSTR(0, DP_EBIAS, 0, 0),       /* + 1.0   */
        DPSTR(1, DP_EBIAS, 0, 0),       /* - 1.0   */
        DPSTR(0, 3 + DP_EBIAS, 0x40000, 0),     /* + 10.0   */
        DPSTR(1, 3 + DP_EBIAS, 0x40000, 0),     /* - 10.0   */
-       DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
-       DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
+       DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
+       DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
        DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */
        DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF),      /* + max */
        DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF),      /* - max */
        DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0),     /* + min normal */
        DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0),     /* - min normal */
-       DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
-       DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
+       DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
+       DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
        DPSTR(0, 31 + DP_EBIAS, 0, 0),  /* + 1.0e31 */
        DPSTR(0, 63 + DP_EBIAS, 0, 0),  /* + 1.0e63 */
 };
@@ -84,9 +84,9 @@ const struct ieee754sp_konst __ieee754sp_spcvals[] = {
        SPSTR(1, 3 + SP_EBIAS, 0x200000),       /* - 10.0   */
        SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0),    /* + infinity */
        SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0),    /* - infinity */
-       SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF),     /* + indef quiet Nan  */
-       SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
-       SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
+       SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF),     /* + indef quiet Nan  */
+       SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
+       SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
        SPSTR(0, SP_EMIN + SP_EBIAS, 0),        /* + min normal */
        SPSTR(1, SP_EMIN + SP_EBIAS, 0),        /* - min normal */
        SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1),    /* + min denormal */
index 080b5ca..068e56b 100644 (file)
@@ -116,7 +116,7 @@ static u64 get_rounding(int sn, u64 xm)
                                xm += 0x8;
                        break;
                case IEEE754_RD:        /* toward -Infinity */
-                       if (sn) /* ?? */
+                       if (sn) /* ?? */
                                xm += 0x8;
                        break;
                }
index 2a7d43f..4b6c6fb 100644 (file)
@@ -56,7 +56,7 @@
 
 #define CLPAIR(x, y)   ((x)*6+(y))
 
-#define CLEARCX        \
+#define CLEARCX \
   (ieee754_csr.cx = 0)
 
 #define SETCX(x) \
index 271d00d..15d1e36 100644 (file)
@@ -117,7 +117,7 @@ static unsigned get_rounding(int sn, unsigned xm)
                                xm += 0x8;
                        break;
                case IEEE754_RD:        /* toward -Infinity */
-                       if (sn) /* ?? */
+                       if (sn) /* ?? */
                                xm += 0x8;
                        break;
                }
index b99a693..9671671 100644 (file)
@@ -25,7 +25,7 @@
  *  Added preprocessor hacks to map to Linux kernel diagnostics.
  *
  *  Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ *  Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
  *************************************************************************/
 
 #include <linux/kernel.h>
index 52e6c58..1c58657 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  Kevin D. Kissell, kevink@mips and Carsten Langgaard, carstenl@mips.com
- *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ *  Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
  *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
index ae1a327..c446e64 100644 (file)
@@ -148,7 +148,7 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
                xe = xe;
                xs = xs;
 
-               if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+               if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
                        SPXSRSX1();
                }
        } else {
index 2722a25..fa4675c 100644 (file)
@@ -131,7 +131,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
                        hrm = hxm * hym;        /* 16 * 16 => 32 */
 
                        {
-                               unsigned t = lxm * hym; /* 16 * 16 => 32 */
+                               unsigned t = lxm * hym; /* 16 * 16 => 32 */
                                {
                                        unsigned at = lrm + (t << 16);
                                        hrm += at < lrm;
@@ -141,7 +141,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
                        }
 
                        {
-                               unsigned t = hxm * lym; /* 16 * 16 => 32 */
+                               unsigned t = hxm * lym; /* 16 * 16 => 32 */
                                {
                                        unsigned at = lrm + (t << 16);
                                        hrm += at < lrm;
index 886ed5b..e595c6f 100644 (file)
@@ -153,7 +153,7 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
                xe = xe;
                xs = xs;
 
-               if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+               if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
                        SPXSRSX1();     /* shift preserving sticky */
                }
        } else {
index 90ceb96..1dcec30 100644 (file)
@@ -16,9 +16,9 @@ obj-$(CONFIG_CPU_R3000)               += c-r3k.o tlb-r3k.o
 obj-$(CONFIG_CPU_R8000)                += c-r4k.o cex-gen.o tlb-r8k.o
 obj-$(CONFIG_CPU_SB1)          += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
 obj-$(CONFIG_CPU_TX39XX)       += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON)        += c-octeon.o cex-oct.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
 
 obj-$(CONFIG_IP22_CPU_SCACHE)  += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE)  += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE)        += sc-rm7k.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
 obj-$(CONFIG_MIPS_CPU_SCACHE)  += sc-mips.o
index 6ec04da..8557fb5 100644 (file)
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
  * Called to flush all memory associated with a memory
  * context.
  *
- * @mm:     Memory context to flush
+ * @mm:            Memory context to flush
  */
 static void octeon_flush_cache_mm(struct mm_struct *mm)
 {
index 031c4c2..704dc73 100644 (file)
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
        write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
 
        for (i = 0; i < size; i += 0x080) {
-               asm(    "sb\t$0, 0x000(%0)\n\t"
+               asm(    "sb\t$0, 0x000(%0)\n\t"
                        "sb\t$0, 0x004(%0)\n\t"
                        "sb\t$0, 0x008(%0)\n\t"
                        "sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
        write_c0_status((ST0_ISC|flags)&~ST0_IEC);
 
        for (i = 0; i < size; i += 0x080) {
-               asm(    "sb\t$0, 0x000(%0)\n\t"
+               asm(    "sb\t$0, 0x000(%0)\n\t"
                        "sb\t$0, 0x004(%0)\n\t"
                        "sb\t$0, 0x008(%0)\n\t"
                        "sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
        write_c0_status(flags&~ST0_IEC);
 
        /* Fill the TLB to avoid an exception with caches isolated. */
-       asm(    "lw\t$0, 0x000(%0)\n\t"
+       asm(    "lw\t$0, 0x000(%0)\n\t"
                "lw\t$0, 0x004(%0)\n\t"
                : : "r" (addr) );
 
        write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
 
-       asm(    "sb\t$0, 0x000(%0)\n\t"
+       asm(    "sb\t$0, 0x000(%0)\n\t"
                "sb\t$0, 0x004(%0)\n\t"
                : : "r" (addr) );
 
index 0f7d788..ecca559 100644 (file)
@@ -160,7 +160,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
                "1:\n\t" \
                )
 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2        JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
 
 static inline void blast_r4600_v1_icache32(void)
 {
@@ -177,7 +177,7 @@ static inline void tx49_blast_icache32(void)
        unsigned long end = start + current_cpu_data.icache.waysize;
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
        unsigned long ws_end = current_cpu_data.icache.ways <<
-                              current_cpu_data.icache.waybit;
+                              current_cpu_data.icache.waybit;
        unsigned long ws, addr;
 
        CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +208,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
        unsigned long end = start + PAGE_SIZE;
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
        unsigned long ws_end = current_cpu_data.icache.ways <<
-                              current_cpu_data.icache.waybit;
+                              current_cpu_data.icache.waybit;
        unsigned long ws, addr;
 
        CACHE32_UNROLL32_ALIGN2;
@@ -637,7 +637,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                         * for the cache instruction on MIPS processors and
                         * some processors, among them the RM5200 and RM7000
                         * QED processors will throw an address error for cache
-                        * hit ops with insufficient alignment.  Solved by
+                        * hit ops with insufficient alignment.  Solved by
                         * aligning the address to cache line size.
                         */
                        blast_inv_scache_range(addr, addr + size);
@@ -864,7 +864,7 @@ static void __cpuinit probe_pcache(void)
                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
                c->icache.ways = 1;
-               c->icache.waybit = 0;   /* doesn't matter */
+               c->icache.waybit = 0;   /* doesn't matter */
 
                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +923,7 @@ static void __cpuinit probe_pcache(void)
                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
                c->icache.ways = 1;
-               c->icache.waybit = 0;   /* doesn't matter */
+               c->icache.waybit = 0;   /* doesn't matter */
 
                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +986,8 @@ static void __cpuinit probe_pcache(void)
                c->icache.ways = 1 + ((config1 >> 16) & 7);
 
                icache_size = c->icache.sets *
-                             c->icache.ways *
-                             c->icache.linesz;
+                             c->icache.ways *
+                             c->icache.linesz;
                c->icache.waybit = __ffs(icache_size/c->icache.ways);
 
                if (config & 0x8)               /* VI bit */
@@ -1006,8 +1006,8 @@ static void __cpuinit probe_pcache(void)
                c->dcache.ways = 1 + ((config1 >> 7) & 7);
 
                dcache_size = c->dcache.sets *
-                             c->dcache.ways *
-                             c->dcache.linesz;
+                             c->dcache.ways *
+                             c->dcache.linesz;
                c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
 
                c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1016,7 @@ static void __cpuinit probe_pcache(void)
 
        /*
         * Processor configuration sanity check for the R4000SC erratum
-        * #5.  With page sizes larger than 32kB there is no possibility
+        * #5.  With page sizes larger than 32kB there is no possibility
         * to get a VCE exception anymore so we don't care about this
         * misconfiguration.  The case is rather theoretical anyway;
         * presumably no vendor is shipping his hardware in the "bad"
@@ -1057,6 +1057,7 @@ static void __cpuinit probe_pcache(void)
                break;
 
        case CPU_M14KC:
+       case CPU_M14KEC:
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
@@ -1088,7 +1089,7 @@ static void __cpuinit probe_pcache(void)
                break;
        }
 
-#ifdef  CONFIG_CPU_LOONGSON2
+#ifdef CONFIG_CPU_LOONGSON2
        /*
         * LOONGSON2 has 4 way icache, but when using indexed cache op,
         * one op will act on all 4 ways
@@ -1228,7 +1229,7 @@ static void __cpuinit setup_scache(void)
 #ifdef CONFIG_R5000_CPU_SCACHE
                r5k_sc_init();
 #endif
-                return;
+               return;
 
        case CPU_RM7000:
 #ifdef CONFIG_RM7000_CPU_SCACHE
index 87d23ca..ba9da27 100644 (file)
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg;        /* in r3k-tlb.c */
 /* This sequence is required to ensure icache is disabled immediately */
 #define TX39_STOP_STREAMING() \
 __asm__ __volatile__( \
-       ".set    push\n\t" \
-       ".set    noreorder\n\t" \
-       "b       1f\n\t" \
+       ".set    push\n\t" \
+       ".set    noreorder\n\t" \
+       "b       1f\n\t" \
        "nop\n\t" \
        "1:\n\t" \
        ".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
                /* TX39/H core (writethru direct-map cache) */
                __flush_cache_vmap      = tx39__flush_cache_vmap;
                __flush_cache_vunmap    = tx39__flush_cache_vunmap;
-               flush_cache_all = tx39h_flush_icache_all;
+               flush_cache_all = tx39h_flush_icache_all;
                __flush_cache_all       = tx39h_flush_icache_all;
                flush_cache_mm          = (void *) tx39h_flush_icache_all;
                flush_cache_range       = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
                _dma_cache_inv = tx39_dma_cache_inv;
 
                shm_align_mask = max_t(unsigned long,
-                                      (dcache_size / current_cpu_data.dcache.ways) - 1,
-                                      PAGE_SIZE - 1);
+                                      (dcache_size / current_cpu_data.dcache.ways) - 1,
+                                      PAGE_SIZE - 1);
 
                break;
        }
index 3571090..576add3 100644 (file)
@@ -27,7 +27,7 @@
 
 /*
  * We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't.  (BCM1250/BCM112x erratum SOC-48.)
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
  */
 #undef DUMP_L2_ECC_TAG_ON_ERROR
 
@@ -48,7 +48,7 @@
 #define CP0_CERRI_EXTERNAL     (1 << 26)
 
 #define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA         (CP0_CERRI_DATA_PARITY)
+#define CP0_CERRI_DATA        (CP0_CERRI_DATA_PARITY)
 
 #define CP0_CERRD_MULTIPLE     (1 << 31)
 #define CP0_CERRD_TAG_STATE    (1 << 30)
@@ -56,8 +56,8 @@
 #define CP0_CERRD_DATA_SBE     (1 << 28)
 #define CP0_CERRD_DATA_DBE     (1 << 27)
 #define CP0_CERRD_EXTERNAL     (1 << 26)
-#define CP0_CERRD_LOAD         (1 << 25)
-#define CP0_CERRD_STORE        (1 << 24)
+#define CP0_CERRD_LOAD        (1 << 25)
+#define CP0_CERRD_STORE               (1 << 24)
 #define CP0_CERRD_FILLWB       (1 << 23)
 #define CP0_CERRD_COHERENCY    (1 << 22)
 #define CP0_CERRD_DUPTAG       (1 << 21)
    (CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
 #define CP0_CERRD_TYPES \
    (CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA         (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+#define CP0_CERRD_DATA        (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
 
-static uint32_t        extract_ic(unsigned short addr, int data);
-static uint32_t        extract_dc(unsigned short addr, int data);
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
 
 static inline void breakout_errctl(unsigned int val)
 {
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
          "=r" (dpahi), "=r" (dpalo), "=r" (eepc));
 
        cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
-       printk(" c0_errorepc ==   %08x\n", eepc);
-       printk(" c0_errctl   ==   %08x", errctl);
+       printk(" c0_errorepc ==   %08x\n", eepc);
+       printk(" c0_errctl   ==   %08x", errctl);
        breakout_errctl(errctl);
        if (errctl & CP0_ERRCTL_ICACHE) {
-               printk(" c0_cerr_i   ==   %08x", cerr_i);
+               printk(" c0_cerr_i   ==   %08x", cerr_i);
                breakout_cerri(cerr_i);
                if (CP0_CERRI_IDX_VALID(cerr_i)) {
                        /* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
                }
        }
        if (errctl & CP0_ERRCTL_DCACHE) {
-               printk(" c0_cerr_d   ==   %08x", cerr_d);
+               printk(" c0_cerr_d   ==   %08x", cerr_d);
                breakout_cerrd(cerr_d);
                if (CP0_CERRD_DPA_VALID(cerr_d)) {
                        printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
        /*
         * Calling panic() when a fatal cache error occurs scrambles the
         * state of the system (and the cache), making it difficult to
-        * investigate after the fact.  However, if you just stall the CPU,
+        * investigate after the fact.  However, if you just stall the CPU,
         * the other CPU may keep on running, which is typically very
         * undesirable.
         */
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
                                "       dmfc0  $1, $28, 1\n\t"
                                "       dsrl32 %1, $1, 0 \n\t"
                                "       sll    %2, $1, 0 \n\t"
-                               "       .set    pop         \n"
+                               "       .set    pop         \n"
                                : "=r" (datahi), "=r" (insta), "=r" (instb)
                                : "r" ((way << 13) | addr | (offset << 3)));
                                predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
 {
        uint64_t t;
        uint32_t w;
-       uint8_t  p;
-       int      i;
+       uint8_t  p;
+       int      i;
 
        p = 0;
        for (i = 7; i >= 0; i--)
index e743622..45dff5c 100644 (file)
 #include <asm/stackframe.h>
 
 /*
- * Game over.  Go to the button.  Press gently.  Swear where allowed by
+ * Game over.  Go to the button.  Press gently.         Swear where allowed by
  * legislation.
  */
        LEAF(except_vec2_generic)
        .set    noreorder
        .set    noat
-       .set    mips0
+       .set    mips0
        /*
         * This is a very bad place to be.  Our cache error
         * detection has triggered.  If we have write-back data
-        * in the cache, we may not be able to recover.  As a
+        * in the cache, we may not be able to recover.  As a
         * first-order desperate measure, turn off KSEG0 cacheing.
         */
        mfc0    k0,CP0_CONFIG
index 3db8553..9029092 100644 (file)
@@ -18,7 +18,7 @@
  */
        LEAF(except_vec2_octeon)
 
-       .set    push
+       .set    push
        .set    mips64r2
        .set    noreorder
        .set    noat
        /* due to an errata we need to read the COP0 CacheErr (Dcache)
         * before any cache/DRAM access  */
 
-       rdhwr   k0, $0        /* get core_id */
-       PTR_LA  k1, cache_err_dcache
-       sll     k0, k0, 3
+       rdhwr   k0, $0        /* get core_id */
+       PTR_LA  k1, cache_err_dcache
+       sll     k0, k0, 3
        PTR_ADDU k1, k0, k1    /* k1 = &cache_err_dcache[core_id] */
 
-       dmfc0   k0, CP0_CACHEERR, 1
-       sd      k0, (k1)
-       dmtc0   $0, CP0_CACHEERR, 1
+       dmfc0   k0, CP0_CACHEERR, 1
+       sd      k0, (k1)
+       dmtc0   $0, CP0_CACHEERR, 1
 
-        /* check whether this is a nested exception */
-       mfc0    k1, CP0_STATUS
-       andi    k1, k1, ST0_EXL
-       beqz    k1, 1f
+       /* check whether this is a nested exception */
+       mfc0    k1, CP0_STATUS
+       andi    k1, k1, ST0_EXL
+       beqz    k1, 1f
         nop
        j       cache_parity_error_octeon_non_recoverable
         nop
 1:     j       handle_cache_err
         nop
 
-       .set    pop
+       .set    pop
        END(except_vec2_octeon)
 
  /* We need to jump to handle_cache_err so that the previous handler
   * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
-  * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached).  */
+  * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
        LEAF(handle_cache_err)
-       .set    push
-        .set    noreorder
-        .set    noat
+       .set    push
+       .set    noreorder
+       .set    noat
 
        SAVE_ALL
        KMODE
-       jal     cache_parity_error_octeon_recoverable
+       jal     cache_parity_error_octeon_recoverable
        nop
-       j       ret_from_exception
+       j       ret_from_exception
        nop
 
        .set pop
index 89c412b..fe1d887 100644 (file)
@@ -24,9 +24,9 @@
 #include <asm/cacheops.h>
 #include <asm/sibyte/board.h>
 
-#define C0_ERRCTL     $26             /* CP0: Error info */
-#define C0_CERR_I     $27             /* CP0: Icache error */
-#define C0_CERR_D     $27,1           /* CP0: Dcache error */
+#define C0_ERRCTL     $26            /* CP0: Error info */
+#define C0_CERR_I     $27            /* CP0: Icache error */
+#define C0_CERR_D     $27,1          /* CP0: Dcache error */
 
        /*
         * Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
        /*
         * k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31.  Any
         * Dcache errors we can recover from will take more extensive
-        * processing.  For now, they are considered "unrecoverable".
+        * processing.  For now, they are considered "unrecoverable".
         * Note that 'DC' becoming set (outside of ERL mode) will
         * cause 'IC' to clear; so if there's an Icache error, we'll
         * only find out about it if we recover from this error and
index 3fab204..f9ef838 100644 (file)
@@ -4,7 +4,7 @@
  * for more details.
  *
  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2000, 2001, 06         Ralf Baechle <ralf@linux-mips.org>
  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  */
 
index ddcec1e..0fead53 100644 (file)
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
 
 #ifdef CONFIG_KPROBES
        /*
-        * This is to notify the fault handler of the kprobes.  The
+        * This is to notify the fault handler of the kprobes.  The
         * exception code is redundant as it is also carried in REGS,
         * but we pass it anyhow.
         */
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
        }
 
 no_context:
-       /* Are we prepared to handle this kernel fault?  */
+       /* Are we prepared to handle this kernel fault?  */
        if (fixup_exception(regs)) {
                current->thread.cp0_baduaddr = address;
                return;
index dcfd573..d4ea5c9 100644 (file)
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  * @nr_pages:  number of pages from start to pin
  * @write:     whether pages will be written to
  * @pages:     array that receives pointers to the pages pinned.
- *             Should be at least nr_pages long.
+ *             Should be at least nr_pages long.
  *
  * Attempt to pin user pages in memory without taking mm->mmap_sem.
  * If not successful, it will fall back to taking the lock and
index be9acb2..6792925 100644 (file)
@@ -66,7 +66,7 @@
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
+ * when needed.         This is necessary only on R4000 / R4400 SC and MC versions
  * where we have to avoid VCED / VECI exceptions for good performance at
  * any price.  Since page is never written to after the initialization we
  * don't have to care about aliases on other CPUs.
@@ -380,7 +380,7 @@ void __init mem_init(void)
        high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
 
        totalram_pages += free_all_bootmem();
-       totalram_pages -= setup_zero_pages();   /* Setup zeroed pages.  */
+       totalram_pages -= setup_zero_pages();   /* Setup zeroed pages.  */
 
        reservedpages = ram = 0;
        for (tmp = 0; tmp < max_low_pfn; tmp++)
index cacfd31..7f840bc 100644 (file)
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
        phys_t end;
        unsigned long pfn;
        pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
-                                  | __WRITEABLE | flags);
+                                  | __WRITEABLE | flags);
 
        address &= ~PMD_MASK;
        end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
        if (!p)
                printk(KERN_ERR "iounmap: bad address %p\n", addr);
 
-        kfree(p);
+       kfree(p);
 }
 
 EXPORT_SYMBOL(__ioremap);
index 8e666c5..a29fba5 100644 (file)
@@ -271,7 +271,7 @@ void __cpuinit build_clear_page(void)
                uasm_i_lui(&buf, AT, 0xa000);
 
        off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
-                               * cache_line_size : 0;
+                               * cache_line_size : 0;
        while (off) {
                build_clear_pref(&buf, -off);
                off -= cache_line_size;
@@ -417,13 +417,13 @@ void __cpuinit build_copy_page(void)
                uasm_i_lui(&buf, AT, 0xa000);
 
        off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
-                               cache_line_size : 0;
+                               cache_line_size : 0;
        while (off) {
                build_copy_load_pref(&buf, -off);
                off -= cache_line_size;
        }
        off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
-                               cache_line_size : 0;
+                               cache_line_size : 0;
        while (off) {
                build_copy_store_pref(&buf, -off);
                off -= cache_line_size;
index ee331bb..e8adc00 100644 (file)
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
        entry = (unsigned long)invalid_pmd_table;
 #endif
 
-       p = (unsigned long *) page;
+       p = (unsigned long *) page;
        end = p + PTRS_PER_PGD;
 
        do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
 {
        unsigned long *p, *end;
 
-       p = (unsigned long *) addr;
+       p = (unsigned long *) addr;
        end = p + PTRS_PER_PMD;
 
        do {
index 1eb708e..c6aaed9 100644 (file)
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
 }
 
 /* XXX Check with wje if the Indy caches can differenciate between
-   writeback + invalidate and just invalidate.  */
+   writeback + invalidate and just invalidate. */
 static struct bcache_ops indy_sc_ops = {
        .bc_enable = indy_sc_enable,
        .bc_disable = indy_sc_disable,
index 8d90ff2..8bc6772 100644 (file)
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
 
 static void r5k_sc_enable(void)
 {
-        unsigned long flags;
+       unsigned long flags;
 
        local_irq_save(flags);
        set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
 
 static void r5k_sc_disable(void)
 {
-        unsigned long flags;
+       unsigned long flags;
 
        local_irq_save(flags);
        blast_r5000_scache();
index 2a7c972..493131c 100644 (file)
@@ -424,7 +424,7 @@ void __cpuinit tlb_init(void)
                write_c0_pagegrain(pg);
        }
 
-        /* From this point on the ARC firmware is dead.  */
+       /* From this point on the ARC firmware is dead.  */
        local_flush_tlb_all();
 
        /* Did I tell you that ARC SUCKS?  */
index 1c8ac49..820e661 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Synthesize TLB refill handlers at runtime.
  *
- * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009         Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  * Copyright (C) 2011  MIPS Technologies, Inc.
@@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
 /*
  * pgtable bits are assigned dynamically depending on processor feature
  * and statically based on kernel configuration.  This spits out the actual
- * values the kernel is using.  Required to make sense from disassembled
+ * values the kernel is using. Required to make sense from disassembled
  * TLB exception handlers.
  */
 static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata;
  * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  * 2. A timing hazard exists for the TLBP instruction.
  *
- *      stalling_instruction
- *      TLBP
+ *     stalling_instruction
+ *     TLBP
  *
  * The JTLB is being read for the TLBP throughout the stall generated by the
  * previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata;
  * The software work-around is to not allow the instruction preceding the TLBP
  * to stall - make it an NOP or some other instruction guaranteed not to stall.
  *
- * Errata 2 will not be fixed.  This errata is also on the R5000.
+ * Errata 2 will not be fixed. This errata is also on the R5000.
  *
  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  */
@@ -581,6 +581,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_4KC:
        case CPU_4KEC:
        case CPU_M14KC:
+       case CPU_M14KEC:
        case CPU_SB1:
        case CPU_SB1A:
        case CPU_4KSC:
@@ -748,7 +749,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
         */
        small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 
-       /* We can clobber tmp.  It isn't used after this.*/
+       /* We can clobber tmp.  It isn't used after this.*/
        if (!small_sequence)
                uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
@@ -830,12 +831,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                /* Clear lower 23 bits of context. */
                uasm_i_dins(p, ptr, 0, 0, 23);
 
-               /* 1 0  1 0 1  << 6  xkphys cached */
+               /* 1 0  1 0 1  << 6  xkphys cached */
                uasm_i_ori(p, ptr, ptr, 0x540);
                uasm_i_drotr(p, ptr, ptr, 11);
        }
 #elif defined(CONFIG_SMP)
-# ifdef  CONFIG_MIPS_MT_SMTC
+# ifdef         CONFIG_MIPS_MT_SMTC
        /*
         * SMTC uses TCBind value as "CPU" index
         */
@@ -955,7 +956,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 
        /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
-#ifdef  CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC
        /*
         * SMTC uses TCBind value as "CPU" index
         */
@@ -965,7 +966,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 #else
        /*
         * smp_processor_id() << 3 is stored in CONTEXT.
-         */
+        */
        uasm_i_mfc0(p, ptr, C0_CONTEXT);
        UASM_i_LA_mostly(p, tmp, pgdc);
        uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1154,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
 
        if (pgd_reg == -1) {
                vmalloc_branch_delay_filled = 1;
-               /* 1 0  1 0 1  << 6  xkphys cached */
+               /* 1 0  1 0 1  << 6  xkphys cached */
                uasm_i_ori(p, ptr, ptr, 0x540);
                uasm_i_drotr(p, ptr, ptr, 11);
        }
@@ -1171,9 +1172,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
        uasm_l_vmalloc_done(l, *p);
 
        /*
-        *                         tmp          ptr
-        * fall-through case =   badvaddr  *pgd_current
-        * vmalloc case      =   badvaddr  swapper_pg_dir
+        *                         tmp          ptr
+        * fall-through case =   badvaddr  *pgd_current
+        * vmalloc case      =   badvaddr  swapper_pg_dir
         */
 
        if (vmalloc_branch_delay_filled)
@@ -1212,7 +1213,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
        uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
        /*
         * The in the LWX case we don't want to do the load in the
-        * delay slot.  It cannot issue in the same cycle and may be
+        * delay slot.  It cannot issue in the same cycle and may be
         * speculative and unneeded.
         */
        if (use_lwx_insns())
index 39b8910..942ff6c 100644 (file)
@@ -7,7 +7,7 @@
  * support a subset of instructions, and does not try to hide pipeline
  * effects like branch delay slots.
  *
- * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  */
@@ -119,30 +119,30 @@ static struct insn insn_table[] __uasminitdata = {
        { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
        { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
+       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
        { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
        { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
        { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
+       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
        { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
        { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
@@ -345,7 +345,7 @@ Ip_u2u1msbu3(op)                                    \
 }                                                      \
 UASM_EXPORT_SYMBOL(uasm_i##op);
 
-#define I_u2u1msbdu3(op)                               \
+#define I_u2u1msbdu3(op)                               \
 Ip_u2u1msbu3(op)                                       \
 {                                                      \
        build_insn(buf, insn##op, b, a, d-1, c);        \
index 469d9b0..1e47844 100644 (file)
@@ -70,12 +70,12 @@ void amon_cpu_start(int cpu,
        launch->sp = sp;
        launch->a0 = a0;
 
-       smp_wmb();              /* Target must see parameters before go */
+       smp_wmb();              /* Target must see parameters before go */
        launch->flags |= LAUNCH_FGO;
-       smp_wmb();              /* Target must see go before we poll  */
+       smp_wmb();              /* Target must see go before we poll  */
 
        while ((launch->flags & LAUNCH_FGONE) == 0)
                ;
-       smp_rmb();      /* Target will be updating flags soon */
+       smp_rmb();      /* Target will be updating flags soon */
        pr_debug("launch: cpu%d gone!\n", cpu);
 }
index 1871c30..5576a30 100644 (file)
@@ -46,7 +46,7 @@ void  __init prom_init_cmdline(void)
 
        cp = &(arcs_cmdline[0]);
        while(actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
+               strcpy(cp, prom_argv(actr));
                cp += strlen(prom_argv(actr));
                *cp++ = ' ';
                actr++;
index 7c8828f..9bc58a2 100644 (file)
@@ -37,10 +37,10 @@ void mips_display_message(const char *str)
                display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
        for (i = 0; i <= 14; i=i+2) {
-                if (*str)
-                        __raw_writel(*str++, display + i);
+                if (*str)
+                        __raw_writel(*str++, display + i);
                 else
-                        __raw_writel(' ', display + i);
+                        __raw_writel(' ', display + i);
        }
 }
 
index 27a6cdb..c2cbce9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005  MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
  *     All rights reserved.
  *     Authors: Carsten Langgaard <carstenl@mips.com>
  *              Maciej W. Rozycki <macro@mips.com>
@@ -110,20 +110,20 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
 
 int get_ethernet_addr(char *ethernet_addr)
 {
-        char *ethaddr_str;
+       char *ethaddr_str;
 
-        ethaddr_str = prom_getenv("ethaddr");
+       ethaddr_str = prom_getenv("ethaddr");
        if (!ethaddr_str) {
-               printk("ethaddr not set in boot prom\n");
+               printk("ethaddr not set in boot prom\n");
                return -1;
        }
        str2eaddr(ethernet_addr, ethaddr_str);
 
        if (init_debug > 1) {
-               int i;
+               int i;
                printk("get_ethernet_addr: ");
-               for (i=0; i<5; i++)
-                       printk("%02x:", (unsigned char)*(ethernet_addr+i));
+               for (i=0; i<5; i++)
+                       printk("%02x:", (unsigned char)*(ethernet_addr+i));
                printk("%02x\n", *(ethernet_addr+i));
        }
 
index 647b863..e364af7 100644 (file)
@@ -84,10 +84,10 @@ static inline int mips_pcibios_iack(void)
 
                /* Flush Bonito register block */
                (void) BONITO_PCIMAP_CFG;
-               iob();    /* sync */
+               iob();    /* sync */
 
                irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
-               iob();    /* sync */
+               iob();    /* sync */
                irq &= 0xff;
                BONITO_PCIMAP_CFG = 0;
                break;
@@ -136,7 +136,7 @@ static void malta_ipi_irqdispatch(void)
 
        irq = gic_get_int();
        if (irq < 0)
-               return;  /* interrupt has already been cleared */
+               return;  /* interrupt has already been cleared */
 
        do_IRQ(MIPS_GIC_IRQ_BASE + irq);
 }
@@ -149,7 +149,7 @@ static void corehi_irqdispatch(void)
        struct pt_regs *regs = get_irq_regs();
 
        printk(KERN_EMERG "CoreHI interrupt, shouldn't happen, we die here!\n");
-       printk(KERN_EMERG "epc   : %08lx\nStatus: %08lx\n"
+       printk(KERN_EMERG "epc   : %08lx\nStatus: %08lx\n"
                        "Cause : %08lx\nbadVaddr : %08lx\n",
                        regs->cp0_epc, regs->cp0_status,
                        regs->cp0_cause, regs->cp0_badvaddr);
@@ -249,20 +249,20 @@ static inline unsigned int irq_ffs(unsigned int pending)
  * on hardware interrupt 0 (MIPS IRQ 2)) like:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        Combined hardware interrupt (hw0)
- *             3        Hardware (ignored)
- *             4        Hardware (ignored)
- *             5        Hardware (ignored)
- *             6        Hardware (ignored)
- *             7        R4k timer (what we use)
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        Combined hardware interrupt (hw0)
+ *            3        Hardware (ignored)
+ *            4        Hardware (ignored)
+ *            5        Hardware (ignored)
+ *            6        Hardware (ignored)
+ *            7        R4k timer (what we use)
  *
  * We handle the IRQ according to _our_ priority which is:
  *
- * Highest ----     R4k Timer
- * Lowest  ----     Combined hardware interrupt
+ * Highest ----            R4k Timer
+ * Lowest  ----            Combined hardware interrupt
  *
  * then we just return, if multiple IRQs are pending then we will just take
  * another exception, big deal.
@@ -396,7 +396,7 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
 
 static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { X, X,            X,           X,              0 },
-       { X, X,            X,           X,              0 },
+       { X, X,            X,           X,              0 },
        { X, X,            X,           X,              0 },
        { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
        { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
@@ -410,7 +410,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
        { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
        { 0, GIC_CPU_NMI,  GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
        { 0, GIC_CPU_NMI,  GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
-       { X, X,            X,           X,              0 },
+       { X, X,            X,           X,              0 },
        /* The remainder of this table is initialised by fill_ipi_map */
 };
 #undef X
@@ -634,7 +634,7 @@ void malta_be_init(void)
 
 static char *tr[8] = {
        "mem",  "gcr",  "gic",  "mmio",
-       "0x04", "0x05", "0x06", "0x07"
+       "0x04", "0x05", "0x06", "0x07"
 };
 
 static char *mcmd[32] = {
@@ -673,10 +673,10 @@ static char *mcmd[32] = {
 };
 
 static char *core[8] = {
-       "Invalid/OK",   "Invalid/Data",
+       "Invalid/OK",   "Invalid/Data",
        "Shared/OK",    "Shared/Data",
        "Modified/OK",  "Modified/Data",
-       "Exclusive/OK", "Exclusive/Data"
+       "Exclusive/OK", "Exclusive/Data"
 };
 
 static char *causes[32] = {
index a96d281..f3d43aa 100644 (file)
@@ -47,7 +47,7 @@ static char *mtypes[3] = {
 };
 #endif
 
-/* determined physical memory size, not overridden by command line args  */
+/* determined physical memory size, not overridden by command line args         */
 unsigned long physical_memsize = 0L;
 
 static struct prom_pmemblock * __init prom_getmdesc(void)
@@ -158,7 +158,7 @@ void __init prom_meminit(void)
                size = p->size;
 
                add_memory_region(base, size, type);
-                p++;
+               p++;
        }
 }
 
index 2147cb3..37134dd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005  MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
  *     All rights reserved.
  *     Authors: Carsten Langgaard <carstenl@mips.com>
  *              Maciej W. Rozycki <macro@mips.com>
@@ -127,7 +127,7 @@ void __init mips_pcibios_init(void)
                        map = map1;
                }
                mask = ~(start ^ end);
-                /* We don't support remapping with a discontiguous mask.  */
+               /* We don't support remapping with a discontiguous mask.  */
                BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
                       mask != ~((mask & -mask) - 1));
                gt64120_mem_resource.start = start;
@@ -144,7 +144,7 @@ void __init mips_pcibios_init(void)
                map = GT_READ(GT_PCI0IOREMAP_OFS);
                end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
                mask = ~(start ^ end);
-                /* We don't support remapping with a discontiguous mask.  */
+               /* We don't support remapping with a discontiguous mask.  */
                BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
                       mask != ~((mask & -mask) - 1));
                gt64120_io_resource.start = map & mask;
index 7473217..132f866 100644 (file)
@@ -93,7 +93,7 @@ static struct mtd_partition malta_mtd_partitions[] = {
                .mask_flags =   MTD_WRITEABLE
        }, {
                .name =         "User FS",
-               .offset =       0x100000,
+               .offset =       0x100000,
                .size =         0x2e0000
        }, {
                .name =         "Board Config",
index 2e28f65..200f64d 100644 (file)
@@ -78,9 +78,9 @@ const char *get_system_type(void)
 }
 
 #if defined(CONFIG_MIPS_MT_SMTC)
-const char display_string[] = "       SMTC LINUX ON MALTA       ";
+const char display_string[] = "              SMTC LINUX ON MALTA       ";
 #else
-const char display_string[] = "        LINUX ON MALTA       ";
+const char display_string[] = "               LINUX ON MALTA       ";
 #endif /* CONFIG_MIPS_MT_SMTC */
 
 #ifdef CONFIG_BLK_DEV_FD
index 1efc8c3..becbf47 100644 (file)
@@ -126,7 +126,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
         * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
         * that signal is brought to IP2 of both VPEs. To avoid racing
         * concurrent interrupt service events, IP2 is enabled only on
-        * one VPE, by convention VPE0.  So long as no bits are ever
+        * one VPE, by convention VPE0.  So long as no bits are ever
         * cleared in the affinity mask, there will never be any
         * interrupt forwarding.  But as soon as a program or operator
         * sets affinity for one of the related IRQs, we need to make
index 115f5bc..a144b89 100644 (file)
@@ -17,7 +17,6 @@
  *
  * Setting up the clock on the MIPS boards.
  */
-
 #include <linux/types.h>
 #include <linux/i8253.h>
 #include <linux/init.h>
@@ -25,7 +24,6 @@
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
-#include <linux/time.h>
 #include <linux/timex.h>
 #include <linux/mc146818rtc.h>
 
 #include <asm/hardirq.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
-#include <asm/cpu.h>
 #include <asm/setup.h>
 #include <asm/time.h>
 #include <asm/mc146818-time.h>
 #include <asm/msc01_ic.h>
+#include <asm/gic.h>
 
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/prom.h>
@@ -46,6 +44,7 @@
 #include <asm/mips-boards/maltaint.h>
 
 unsigned long cpu_khz;
+int gic_frequency;
 
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
@@ -61,44 +60,50 @@ static void mips_perf_dispatch(void)
        do_IRQ(mips_cpu_perf_irq);
 }
 
+static unsigned int freqround(unsigned int freq, unsigned int amount)
+{
+       freq += amount;
+       freq -= freq % (amount*2);
+       return freq;
+}
+
 /*
- * Estimate CPU frequency.  Sets mips_hpt_frequency as a side-effect
+ * Estimate CPU and GIC frequencies.
  */
-static unsigned int __init estimate_cpu_frequency(void)
+static void __init estimate_frequencies(void)
 {
-       unsigned int prid = read_c0_prid() & 0xffff00;
-       unsigned int count;
-
        unsigned long flags;
-       unsigned int start;
+       unsigned int count, start;
+       unsigned int giccount = 0, gicstart = 0;
 
        local_irq_save(flags);
 
-       /* Start counter exactly on falling edge of update flag */
+       /* Start counter exactly on falling edge of update flag. */
        while (CMOS_READ(RTC_REG_A) & RTC_UIP);
        while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
-       /* Start r4k counter. */
+       /* Initialize counters. */
        start = read_c0_count();
+       if (gic_present)
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
 
-       /* Read counter exactly on falling edge of update flag */
+       /* Read counter exactly on falling edge of update flag. */
        while (CMOS_READ(RTC_REG_A) & RTC_UIP);
        while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
-       count = read_c0_count() - start;
+       count = read_c0_count();
+       if (gic_present)
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
 
-       /* restore interrupts */
        local_irq_restore(flags);
 
-       mips_hpt_frequency = count;
-       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
-           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
-               count *= 2;
-
-       count += 5000;    /* round */
-       count -= count%10000;
+       count -= start;
+       if (gic_present)
+               giccount -= gicstart;
 
-       return count;
+       mips_hpt_frequency = count;
+       if (gic_present)
+               gic_frequency = giccount;
 }
 
 void read_persistent_clock(struct timespec *ts)
@@ -144,22 +149,34 @@ unsigned int __cpuinit get_c0_compare_int(void)
 
 void __init plat_time_init(void)
 {
-       unsigned int est_freq;
-
-        /* Set Data mode - binary. */
-        CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);
-
-       est_freq = estimate_cpu_frequency();
+       unsigned int prid = read_c0_prid() & 0xffff00;
+       unsigned int freq;
 
-       printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
-              (est_freq%1000000)*100/1000000);
+       estimate_frequencies();
 
-        cpu_khz = est_freq / 1000;
+       freq = mips_hpt_frequency;
+       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+               freq *= 2;
+       freq = freqround(freq, 5000);
+       pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+              (freq%1000000)*100/1000000);
+       cpu_khz = freq / 1000;
+
+       if (gic_present) {
+               freq = freqround(gic_frequency, 5000);
+               pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
+                      (freq%1000000)*100/1000000);
+               gic_clocksource_init(gic_frequency);
+       } else
+               init_r4k_clocksource();
 
-       mips_scroll_message();
-#ifdef CONFIG_I8253            /* Only Malta has a PIT */
+#ifdef CONFIG_I8253
+       /* Only Malta has a PIT. */
        setup_pit_timer();
 #endif
 
+       mips_scroll_message();
+
        plat_perf_setup();
 }
index 626afea..10ec701 100644 (file)
@@ -5,10 +5,12 @@
 # Copyright (C) 2008 Wind River Systems, Inc.
 #   written by Ralf Baechle <ralf@linux-mips.org>
 #
+# Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
+# Steven J. Hill <sjhill@mips.com>
+#
 obj-y                          := sead3-lcd.o sead3-cmdline.o \
                                   sead3-display.o sead3-init.o sead3-int.o \
-                                  sead3-mtd.o sead3-net.o \
-                                  sead3-memory.o sead3-platform.o \
+                                  sead3-mtd.o sead3-net.o sead3-platform.o \
                                   sead3-reset.o sead3-setup.o sead3-time.o
 
 obj-y                          += sead3-i2c-dev.o sead3-i2c.o \
@@ -17,3 +19,7 @@ obj-y                         += sead3-i2c-dev.o sead3-i2c.o \
 
 obj-$(CONFIG_EARLY_PRINTK)     += sead3-console.o
 obj-$(CONFIG_USB_EHCI_HCD)     += sead3-ehci.o
+obj-$(CONFIG_OF)               += sead3.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+       $(call if_changed,dtc)
index a95ac59..322148c 100644 (file)
@@ -33,12 +33,12 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
 
 static struct led_classdev sead3_pled = {
        .name           = "sead3::pled",
-       .brightness_set = sead3_pled_set,
+       .brightness_set = sead3_pled_set,
 };
 
 static struct led_classdev sead3_fled = {
        .name           = "sead3::fled",
-       .brightness_set = sead3_fled_set,
+       .brightness_set = sead3_fled_set,
 };
 
 #ifdef CONFIG_PM
@@ -125,4 +125,3 @@ module_exit(sead3_led_exit);
 MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
 MODULE_DESCRIPTION("SEAD3 LED driver");
 MODULE_LICENSE("GPL");
-
index b367391..2ddef19 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/serial_reg.h>
 #include <linux/io.h>
 
-#define SEAD_UART1_REGS_BASE    0xbf000800   /* ttyS1 = DB9 port */
-#define SEAD_UART0_REGS_BASE    0xbf000900   /* ttyS0 = USB port   */
+#define SEAD_UART1_REGS_BASE   0xbf000800   /* ttyS1 = DB9 port */
+#define SEAD_UART0_REGS_BASE   0xbf000900   /* ttyS0 = USB port   */
 #define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4))
 
 static char console_port = 1;
index 8308c7f..e389326 100644 (file)
@@ -21,7 +21,7 @@ static unsigned int max_display_count;
 #define LCD_SETDDRAM                   0x80
 #define LCD_IR_BF                      0x80
 
-const char display_string[] = "               LINUX ON SEAD3               ";
+const char display_string[] = "                      LINUX ON SEAD3               ";
 
 static void scroll_display_message(unsigned long data);
 static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
index 7aa2225..1f787a6 100644 (file)
 #include <linux/platform_device.h>
 
 #define PIC32_I2CxCON          0x0000
-#define  PIC32_I2CCON_ON       (1<<15)
-#define  PIC32_I2CCON_ACKDT    (1<<5)
-#define  PIC32_I2CCON_ACKEN    (1<<4)
-#define  PIC32_I2CCON_RCEN     (1<<3)
-#define  PIC32_I2CCON_PEN      (1<<2)
-#define  PIC32_I2CCON_RSEN     (1<<1)
-#define  PIC32_I2CCON_SEN      (1<<0)
+#define         PIC32_I2CCON_ON        (1<<15)
+#define         PIC32_I2CCON_ACKDT     (1<<5)
+#define         PIC32_I2CCON_ACKEN     (1<<4)
+#define         PIC32_I2CCON_RCEN      (1<<3)
+#define         PIC32_I2CCON_PEN       (1<<2)
+#define         PIC32_I2CCON_RSEN      (1<<1)
+#define         PIC32_I2CCON_SEN       (1<<0)
 #define PIC32_I2CxCONCLR       0x0004
 #define PIC32_I2CxCONSET       0x0008
 #define PIC32_I2CxSTAT         0x0010
 #define PIC32_I2CxSTATCLR      0x0014
-#define  PIC32_I2CSTAT_ACKSTAT (1<<15)
-#define  PIC32_I2CSTAT_TRSTAT  (1<<14)
-#define  PIC32_I2CSTAT_BCL     (1<<10)
-#define  PIC32_I2CSTAT_IWCOL   (1<<7)
-#define  PIC32_I2CSTAT_I2COV   (1<<6)
+#define         PIC32_I2CSTAT_ACKSTAT  (1<<15)
+#define         PIC32_I2CSTAT_TRSTAT   (1<<14)
+#define         PIC32_I2CSTAT_BCL      (1<<10)
+#define         PIC32_I2CSTAT_IWCOL    (1<<7)
+#define         PIC32_I2CSTAT_I2COV    (1<<6)
 #define PIC32_I2CxBRG          0x0040
 #define PIC32_I2CxTRN          0x0050
 #define PIC32_I2CxRCV          0x0060
 
 static DEFINE_SPINLOCK(pic32_bus_lock);
 
-static void __iomem *bus_xfer   = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer  = (void __iomem *)0xbf000600;
 static void __iomem *bus_status = (void __iomem *)0xbf000060;
 
-#define DELAY()        udelay(100)
+#define DELAY() udelay(100)
 
 static inline unsigned int ioready(void)
 {
index a958cad..f95abaa 100644 (file)
@@ -77,7 +77,6 @@ void __init prom_init(void)
        board_ejtag_handler_setup = mips_ejtag_setup;
 
        prom_init_cmdline();
-       prom_meminit();
 #ifdef CONFIG_EARLY_PRINTK
        if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
                prom_init_early_console(0);
@@ -89,3 +88,7 @@ void __init prom_init(void)
                strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
 #endif
 }
+
+void prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/mti-sead3/sead3-memory.c b/arch/mips/mti-sead3/sead3-memory.c
deleted file mode 100644 (file)
index da92441..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <asm/sections.h>
-#include <asm/mips-boards/prom.h>
-
-enum yamon_memtypes {
-       yamon_dontuse,
-       yamon_prom,
-       yamon_free,
-};
-
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-/* determined physical memory size, not overridden by command line args  */
-unsigned long physical_memsize = 0L;
-
-struct prom_pmemblock * __init prom_getmdesc(void)
-{
-       char *memsize_str, *ptr;
-       unsigned int memsize;
-       static char cmdline[COMMAND_LINE_SIZE] __initdata;
-       long val;
-       int tmp;
-
-       /* otherwise look in the environment */
-       memsize_str = prom_getenv("memsize");
-       if (!memsize_str) {
-               pr_warn("memsize not set in boot prom, set to default 32Mb\n");
-               physical_memsize = 0x02000000;
-       } else {
-               tmp = kstrtol(memsize_str, 0, &val);
-               physical_memsize = (unsigned long)val;
-       }
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
-          word of physical memory */
-       physical_memsize -= PAGE_SIZE;
-#endif
-
-       /* Check the command line for a memsize directive that overrides
-          the physical/default amount */
-       strcpy(cmdline, arcs_cmdline);
-       ptr = strstr(cmdline, "memsize=");
-       if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
-               ptr = strstr(ptr, " memsize=");
-
-       if (ptr)
-               memsize = memparse(ptr + 8, &ptr);
-       else
-               memsize = physical_memsize;
-
-       memset(mdesc, 0, sizeof(mdesc));
-
-       mdesc[0].type = yamon_dontuse;
-       mdesc[0].base = 0x00000000;
-       mdesc[0].size = 0x00001000;
-
-       mdesc[1].type = yamon_prom;
-       mdesc[1].base = 0x00001000;
-       mdesc[1].size = 0x000ef000;
-
-       /*
-        * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
-        * south bridge and PCI access always forwarded to the ISA Bus and
-        * BIOSCS# is always generated.
-        * This mean that this area can't be used as DMA memory for PCI
-        * devices.
-        */
-       mdesc[2].type = yamon_dontuse;
-       mdesc[2].base = 0x000f0000;
-       mdesc[2].size = 0x00010000;
-
-       mdesc[3].type = yamon_dontuse;
-       mdesc[3].base = 0x00100000;
-       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
-               mdesc[3].base;
-
-       mdesc[4].type = yamon_free;
-       mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
-       mdesc[4].size = memsize - mdesc[4].base;
-
-       return &mdesc[0];
-}
-
-static int __init prom_memtype_classify(unsigned int type)
-{
-       switch (type) {
-       case yamon_free:
-               return BOOT_MEM_RAM;
-       case yamon_prom:
-               return BOOT_MEM_ROM_DATA;
-       default:
-               return BOOT_MEM_RESERVED;
-       }
-}
-
-void __init prom_meminit(void)
-{
-       struct prom_pmemblock *p;
-
-       p = prom_getmdesc();
-
-       while (p->size) {
-               long type;
-               unsigned long base, size;
-
-               type = prom_memtype_classify(p->type);
-               base = p->base;
-               size = p->size;
-
-               add_memory_region(base, size, type);
-               p++;
-       }
-}
-
-void __init prom_free_prom_memory(void)
-{
-       unsigned long addr;
-       int i;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
-                       continue;
-
-               addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
-                               addr, addr + boot_mem_map.map[i].size);
-       }
-}
index 04d704d..dd11e7e 100644 (file)
@@ -19,8 +19,8 @@ static struct smsc911x_platform_config sead3_smsc911x_data = {
 
 struct resource sead3_net_resourcess[] = {
        {
-               .start                  = 0x1f010000,
-               .end                    = 0x1f01ffff,
+               .start                  = 0x1f010000,
+               .end                    = 0x1f01ffff,
                .flags                  = IORESOURCE_MEM
        },
        {
index 9f0d89b..eb2bf93 100644 (file)
 #define PIC32_SYSRD    0x02
 #define PIC32_WR       0x10
 #define PIC32_SYSWR    0x20
-#define PIC32_IRQ_CLR   0x40
+#define PIC32_IRQ_CLR  0x40
 #define PIC32_STATUS   0x80
 
-#define DELAY()        udelay(100)     /* FIXME: needed? */
+#define DELAY() udelay(100)    /* FIXME: needed? */
 
 /* spinlock to ensure atomic access to PIC32 */
 static DEFINE_SPINLOCK(pic32_bus_lock);
 
 /* FIXME: io_remap these */
-static void __iomem *bus_xfer   = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer  = (void __iomem *)0xbf000600;
 static void __iomem *bus_status = (void __iomem *)0xbf000060;
 
 static inline unsigned int ioready(void)
index 514675e..b921e5e 100644 (file)
 #define PIC32_I2CxCONCLR       0x0004
 #define PIC32_I2CxCONSET       0x0008
 #define PIC32_I2CxCONINV       0x000C
-#define  I2CCON_ON             (1<<15)
-#define  I2CCON_FRZ            (1<<14)
-#define  I2CCON_SIDL           (1<<13)
-#define  I2CCON_SCLREL         (1<<12)
-#define  I2CCON_STRICT         (1<<11)
-#define  I2CCON_A10M           (1<<10)
-#define  I2CCON_DISSLW         (1<<9)
-#define  I2CCON_SMEN           (1<<8)
-#define  I2CCON_GCEN           (1<<7)
-#define  I2CCON_STREN          (1<<6)
-#define  I2CCON_ACKDT          (1<<5)
-#define  I2CCON_ACKEN          (1<<4)
-#define  I2CCON_RCEN           (1<<3)
-#define  I2CCON_PEN            (1<<2)
-#define  I2CCON_RSEN           (1<<1)
-#define  I2CCON_SEN            (1<<0)
+#define         I2CCON_ON              (1<<15)
+#define         I2CCON_FRZ             (1<<14)
+#define         I2CCON_SIDL            (1<<13)
+#define         I2CCON_SCLREL          (1<<12)
+#define         I2CCON_STRICT          (1<<11)
+#define         I2CCON_A10M            (1<<10)
+#define         I2CCON_DISSLW          (1<<9)
+#define         I2CCON_SMEN            (1<<8)
+#define         I2CCON_GCEN            (1<<7)
+#define         I2CCON_STREN           (1<<6)
+#define         I2CCON_ACKDT           (1<<5)
+#define         I2CCON_ACKEN           (1<<4)
+#define         I2CCON_RCEN            (1<<3)
+#define         I2CCON_PEN             (1<<2)
+#define         I2CCON_RSEN            (1<<1)
+#define         I2CCON_SEN             (1<<0)
 
 #define PIC32_I2CxSTAT         0x0010
 #define PIC32_I2CxSTATCLR      0x0014
 #define PIC32_I2CxSTATSET      0x0018
 #define PIC32_I2CxSTATINV      0x001C
-#define  I2CSTAT_ACKSTAT       (1<<15)
-#define  I2CSTAT_TRSTAT                (1<<14)
-#define  I2CSTAT_BCL           (1<<10)
-#define  I2CSTAT_GCSTAT                (1<<9)
-#define  I2CSTAT_ADD10         (1<<8)
-#define  I2CSTAT_IWCOL         (1<<7)
-#define  I2CSTAT_I2COV         (1<<6)
-#define  I2CSTAT_DA            (1<<5)
-#define  I2CSTAT_P             (1<<4)
-#define  I2CSTAT_S             (1<<3)
-#define  I2CSTAT_RW            (1<<2)
-#define  I2CSTAT_RBF           (1<<1)
-#define  I2CSTAT_TBF           (1<<0)
+#define         I2CSTAT_ACKSTAT        (1<<15)
+#define         I2CSTAT_TRSTAT         (1<<14)
+#define         I2CSTAT_BCL            (1<<10)
+#define         I2CSTAT_GCSTAT         (1<<9)
+#define         I2CSTAT_ADD10          (1<<8)
+#define         I2CSTAT_IWCOL          (1<<7)
+#define         I2CSTAT_I2COV          (1<<6)
+#define         I2CSTAT_DA             (1<<5)
+#define         I2CSTAT_P              (1<<4)
+#define         I2CSTAT_S              (1<<3)
+#define         I2CSTAT_RW             (1<<2)
+#define         I2CSTAT_RBF            (1<<1)
+#define         I2CSTAT_TBF            (1<<0)
 
 #define PIC32_I2CxADD          0x0020
 #define PIC32_I2CxADDCLR       0x0024
index 8ad46ad..f012fd1 100644 (file)
@@ -6,6 +6,12 @@
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/bootmem.h>
+
+#include <asm/mips-boards/generic.h>
+#include <asm/prom.h>
 
 int coherentio;                /* 0 => no DMA cache coherency (may be set by user) */
 int hw_coherentio;     /* 0 => no HW DMA cache coherency (reflects real HW) */
@@ -17,4 +23,25 @@ const char *get_system_type(void)
 
 void __init plat_mem_setup(void)
 {
+       /*
+        * Load the builtin devicetree. This causes the chosen node to be
+        * parsed resulting in our memory appearing
+        */
+       __dt_setup_arch(&__dtb_start);
+}
+
+void __init device_tree_init(void)
+{
+       unsigned long base, size;
+
+       if (!initial_boot_params)
+               return;
+
+       base = virt_to_phys((void *)initial_boot_params);
+       size = be32_to_cpu(initial_boot_params->totalsize);
+
+       /* Before we do anything, lets reserve the dt blob */
+       reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+       unflatten_device_tree();
 }
index 048e781..239e4e3 100644 (file)
@@ -43,11 +43,11 @@ static unsigned int __init estimate_cpu_frequency(void)
 
        local_irq_save(flags);
 
-       orig = readl(status_reg) & 0x2;               /* get original sample */
+       orig = readl(status_reg) & 0x2;               /* get original sample */
        /* wait for transition */
        while ((readl(status_reg) & 0x2) == orig)
                ;
-       orig = orig ^ 0x2;                            /* flip the bit */
+       orig = orig ^ 0x2;                            /* flip the bit */
 
        write_c0_count(0);
 
@@ -56,7 +56,7 @@ static unsigned int __init estimate_cpu_frequency(void)
                /* wait for transition */
                while ((readl(status_reg) & 0x2) == orig)
                        ;
-               orig = orig ^ 0x2;                            /* flip the bit */
+               orig = orig ^ 0x2;                            /* flip the bit */
                tick++;
        }
 
@@ -71,7 +71,7 @@ static unsigned int __init estimate_cpu_frequency(void)
                (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
                freq *= 2;
 
-       freq += 5000;        /* rounding */
+       freq += 5000;        /* rounding */
        freq -= freq%10000;
 
        return freq ;
diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/mti-sead3/sead3.dts
new file mode 100644 (file)
index 0000000..658f437
--- /dev/null
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/memreserve/ 0x00000000 0x00001000;    // reserved
+/memreserve/ 0x00001000 0x000ef000;    // ROM data
+/memreserve/ 0x000f0000 0x004cc000;    // reserved
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "mti,sead-3";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mti,mips14KEc", "mti,mips14Kc";
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS1,38400 rootdelay=10 root=/dev/sda3";
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x0 0x08000000>;
+       };
+};
index cdfc9ab..fb8eb4c 100644 (file)
@@ -13,5 +13,5 @@ cflags-$(CONFIG_CPU_XLP)      += $(call cc-option,-march=xlp,-march=mips64r2)
 #
 # NETLOGIC processor support
 #
-platform-$(CONFIG_NLM_COMMON)          += netlogic/
-load-$(CONFIG_NLM_COMMON)      += 0xffffffff80100000
+platform-$(CONFIG_NLM_COMMON)  += netlogic/
+load-$(CONFIG_NLM_COMMON)      += 0xffffffff80100000
index 00dcc7a..9f84c60 100644 (file)
@@ -69,7 +69,7 @@
 #else
 #define SMP_IRQ_MASK   0
 #endif
-#define PERCPU_IRQ_MASK        (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
                                (1ull << IRQ_FMN))
 
 struct nlm_pic_irq {
@@ -105,21 +105,23 @@ static void xlp_pic_disable(struct irq_data *d)
 static void xlp_pic_mask_ack(struct irq_data *d)
 {
        struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
-       uint64_t mask = 1ull << pd->picirq;
 
-       write_c0_eirr(mask);            /* ack by writing EIRR */
+       clear_c0_eimr(pd->picirq);
+       ack_c0_eirr(pd->picirq);
 }
 
 static void xlp_pic_unmask(struct irq_data *d)
 {
        struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
 
-       if (!pd)
-               return;
+       BUG_ON(!pd);
 
        if (pd->extra_ack)
                pd->extra_ack(d);
 
+       /* re-enable the intr on this cpu */
+       set_c0_eimr(pd->picirq);
+
        /* Ack is a single write, no need to lock */
        nlm_pic_ack(pd->node->picbase, pd->irt);
 }
@@ -134,32 +136,17 @@ static struct irq_chip xlp_pic = {
 
 static void cpuintr_disable(struct irq_data *d)
 {
-       uint64_t eimr;
-       uint64_t mask = 1ull << d->irq;
-
-       eimr = read_c0_eimr();
-       write_c0_eimr(eimr & ~mask);
+       clear_c0_eimr(d->irq);
 }
 
 static void cpuintr_enable(struct irq_data *d)
 {
-       uint64_t eimr;
-       uint64_t mask = 1ull << d->irq;
-
-       eimr = read_c0_eimr();
-       write_c0_eimr(eimr | mask);
+       set_c0_eimr(d->irq);
 }
 
 static void cpuintr_ack(struct irq_data *d)
 {
-       uint64_t mask = 1ull << d->irq;
-
-       write_c0_eirr(mask);
-}
-
-static void cpuintr_nop(struct irq_data *d)
-{
-       WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq);
+       ack_c0_eirr(d->irq);
 }
 
 /*
@@ -170,9 +157,9 @@ struct irq_chip nlm_cpu_intr = {
        .name           = "XLP-CPU-INTR",
        .irq_enable     = cpuintr_enable,
        .irq_disable    = cpuintr_disable,
-       .irq_mask       = cpuintr_nop,
-       .irq_ack        = cpuintr_nop,
-       .irq_eoi        = cpuintr_ack,
+       .irq_mask       = cpuintr_disable,
+       .irq_ack        = cpuintr_ack,
+       .irq_eoi        = cpuintr_enable,
 };
 
 static void __init nlm_init_percpu_irqs(void)
@@ -230,7 +217,7 @@ static void nlm_init_node_irqs(int node)
                nlm_setup_pic_irq(node, i, i, irt);
                /* set interrupts to first cpu in node */
                nlm_pic_init_irt(nodep->picbase, irt, i,
-                                       node * NLM_CPUS_PER_NODE);
+                                       node * NLM_CPUS_PER_NODE, 0);
                irqmask |= (1ull << i);
        }
        nodep->irqmask = irqmask;
@@ -265,7 +252,7 @@ asmlinkage void plat_irq_dispatch(void)
        int i, node;
 
        node = nlm_nodeid();
-       eirr = read_c0_eirr() & read_c0_eimr();
+       eirr = read_c0_eirr_and_eimr();
 
        i = __ilog2_u64(eirr);
        if (i == -1)
index a080d9e..2bb95dc 100644 (file)
@@ -84,15 +84,19 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 /* IRQ_IPI_SMP_FUNCTION Handler */
 void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
 {
-       write_c0_eirr(1ull << irq);
+       clear_c0_eimr(irq);
+       ack_c0_eirr(irq);
        smp_call_function_interrupt();
+       set_c0_eimr(irq);
 }
 
 /* IRQ_IPI_SMP_RESCHEDULE  handler */
 void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
 {
-       write_c0_eirr(1ull << irq);
+       clear_c0_eimr(irq);
+       ack_c0_eirr(irq);
        scheduler_ipi();
+       set_c0_eimr(irq);
 }
 
 /*
index a0b7487..0265174 100644 (file)
 #include <asm/netlogic/xlp-hal/sys.h>
 #include <asm/netlogic/xlp-hal/cpucontrol.h>
 
-#define        CP0_EBASE       $15
+#define CP0_EBASE      $15
 #define SYS_CPU_COHERENT_BASE(node)    CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
                        XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
                        SYS_CPU_NONCOHERENT_MODE * 4
 
-#define        XLP_AX_WORKAROUND       /* enable Ax silicon workarounds */
+#define XLP_AX_WORKAROUND      /* enable Ax silicon workarounds */
 
 /* Enable XLP features and workarounds in the LSU */
 .macro xlp_config_lsu
 #endif
        mtcr    t1, t0
 
+       li      t0, ICU_DEFEATURE
+       mfcr    t1, t0
+       ori     t1, 0x1000      /* Enable Icache partitioning */
+       mtcr    t1, t0
+
+
 #ifdef XLP_AX_WORKAROUND
        li      t0, SCHED_DEFEATURE
        lui     t1, 0x0100      /* Disable BRU accepting ALU ops */
@@ -85,7 +91,7 @@
        li      t0, LSU_DEBUG_DATA0
        li      t1, LSU_DEBUG_ADDR
        li      t2, 0           /* index */
-       li      t3, 0x1000      /* loop count */
+       li      t3, 0x1000      /* loop count */
 1:
        sll     v0, t2, 5
        mtcr    zero, t0
@@ -134,7 +140,7 @@ FEXPORT(nlm_reset_entry)
        and     k1, k0, k1
        beqz    k1, 1f          /* go to real reset entry */
        nop
-       li      k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+       li      k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
        ld      k0, BOOT_NMI_HANDLER(k1)
        jr      k0
        nop
@@ -235,7 +241,7 @@ EXPORT(nlm_reset_entry_end)
 
 FEXPORT(xlp_boot_core0_siblings)       /* "Master" cpu starts from here */
        xlp_config_lsu
-       dmtc0   sp, $4, 2               /* SP saved in UserLocal */
+       dmtc0   sp, $4, 2               /* SP saved in UserLocal */
        SAVE_ALL
        sync
        /* find the location to which nlm_boot_siblings was relocated */
@@ -301,13 +307,13 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
         */
        li      t0, 0x400
        mfcr    t1, t0
-       li      t2, 6           /* XLR thread mode mask */
+       li      t2, 6           /* XLR thread mode mask */
        nor     t3, t2, zero
        and     t2, t1, t2      /* t2 - current thread mode */
        li      v0, CKSEG1ADDR(RESET_DATA_PHYS)
        lw      v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
        sll     v1, 1
-       beq     v1, t2, 1f      /* same as request value */
+       beq     v1, t2, 1f      /* same as request value */
        nop                     /* nothing to do */
 
        and     t2, t1, t3      /* mask out old thread mode */
index bd3e498..5c56555 100644 (file)
 #include <linux/init.h>
 
 #include <asm/time.h>
+#include <asm/cpu-features.h>
+
 #include <asm/netlogic/interrupt.h>
 #include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
 
 unsigned int __cpuinit get_c0_compare_int(void)
 {
        return IRQ_TIMER;
 }
 
+static cycle_t nlm_get_pic_timer(struct clocksource *cs)
+{
+       uint64_t picbase = nlm_get_node(0)->picbase;
+
+       return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
+{
+       uint64_t picbase = nlm_get_node(0)->picbase;
+
+       return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+       .name           = "PIC",
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+       uint64_t picbase = nlm_get_node(0)->picbase;
+
+       nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+       if (current_cpu_data.cputype == CPU_XLR) {
+               csrc_pic.mask   = CLOCKSOURCE_MASK(32);
+               csrc_pic.read   = nlm_get_pic_timer32;
+       } else {
+               csrc_pic.mask   = CLOCKSOURCE_MASK(64);
+               csrc_pic.read   = nlm_get_pic_timer;
+       }
+       csrc_pic.rating = 1000;
+       clocksource_register_hz(&csrc_pic, PIC_CLK_HZ);
+}
+
 void __init plat_time_init(void)
 {
+       nlm_init_pic_timer();
        mips_hpt_frequency = nlm_get_cpu_frequency();
+       if (current_cpu_type() == CPU_XLR)
+               preset_lpj = mips_hpt_frequency / (3 * HZ);
+       else
+               preset_lpj = mips_hpt_frequency / (2 * HZ);
        pr_info("MIPS counter frequency [%ld]\n",
                        (unsigned long)mips_hpt_frequency);
 }
index e14f423..7628b54 100644 (file)
@@ -20,7 +20,7 @@
                #address-cells = <2>;
                #size-cells = <1>;
                compatible = "simple-bus";
-               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
                          1 0  0 0x16000000  0x01000000>; // GBU chipselects
 
                serial0: serial@30000 {
index 529e747..c68fd40 100644 (file)
@@ -111,8 +111,8 @@ unsigned int nlm_get_core_frequency(int node, int core)
        dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
        pll_divf = ((rstval >> 10) & 0x7f) + 1;
        pll_divr = ((rstval >> 8)  & 0x3) + 1;
-       ext_div  = ((rstval >> 30) & 0x3) + 1;
-       dfs_div  = ((dfsval >> (core * 4)) & 0xf) + 1;
+       ext_div  = ((rstval >> 30) & 0x3) + 1;
+       dfs_div  = ((dfsval >> (core * 4)) & 0xf) + 1;
 
        num = 800000000ULL * pll_divf;
        denom = 3 * pll_divr * ext_div * dfs_div;
index dbe083a..1d0b66c 100644 (file)
@@ -52,7 +52,7 @@ static void nlm_usb_intr_en(int node, int port)
        port_addr = nlm_get_usb_regbase(node, port);
        val = nlm_read_usb_reg(port_addr, USB_INT_EN);
        val = USB_CTRL_INTERRUPT_EN  | USB_OHCI_INTERRUPT_EN |
-               USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN  |
+               USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN  |
                USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
        nlm_write_usb_reg(port_addr, USB_INT_EN, val);
 }
index cb90106..abb3e08 100644 (file)
@@ -51,7 +51,7 @@
 #include <asm/netlogic/xlp-hal/xlp.h>
 #include <asm/netlogic/xlp-hal/sys.h>
 
-static int xlp_wakeup_core(uint64_t sysbase, int core)
+static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
 {
        uint32_t coremask, value;
        int count;
@@ -82,36 +82,51 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
        struct nlm_soc_info *nodep;
        uint64_t syspcibase;
        uint32_t syscoremask;
-       int core, n, cpu;
+       int core, n, cpu, count, val;
 
        for (n = 0; n < NLM_NR_NODES; n++) {
                syspcibase = nlm_get_sys_pcibase(n);
                if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
                        break;
 
-               /* read cores in reset from SYS and account for boot cpu */
-               nlm_node_init(n);
+               /* read cores in reset from SYS */
+               if (n != 0)
+                       nlm_node_init(n);
                nodep = nlm_get_node(n);
                syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
-               if (n == 0)
+               /* The boot cpu */
+               if (n == 0) {
                        syscoremask |= 1;
+                       nodep->coremask = 1;
+               }
 
                for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+                       /* we will be on node 0 core 0 */
+                       if (n == 0 && core == 0)
+                               continue;
+
                        /* see if the core exists */
                        if ((syscoremask & (1 << core)) == 0)
                                continue;
 
-                       /* see if at least the first thread is enabled */
+                       /* see if at least the first hw thread is enabled */
                        cpu = (n * NLM_CORES_PER_NODE + core)
                                                * NLM_THREADS_PER_CORE;
                        if (!cpumask_test_cpu(cpu, wakeup_mask))
                                continue;
 
                        /* wake up the core */
-                       if (xlp_wakeup_core(nodep->sysbase, core))
-                               nodep->coremask |= 1u << core;
-                       else
-                               pr_err("Failed to enable core %d\n", core);
+                       if (!xlp_wakeup_core(nodep->sysbase, n, core))
+                               continue;
+
+                       /* core is up */
+                       nodep->coremask |= 1u << core;
+
+                       /* spin until the first hw thread sets its ready */
+                       count = 0x20000000;
+                       do {
+                               val = *(volatile int *)&nlm_cpu_ready[cpu];
+                       } while (val == 0 && --count > 0);
                }
        }
 }
index bed2cff..ed3bf0e 100644 (file)
@@ -164,8 +164,8 @@ static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
        int i, j;
 
        for (i = 0; i < num_core; i++) {
-               cpu[i].start_stn_id     = (8 * i);
-               cpu[i].end_stn_id       = (8 * i + 8);
+               cpu[i].start_stn_id     = (8 * i);
+               cpu[i].end_stn_id       = (8 * i + 8);
 
                for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
                        xlr_board_fmn_config.bucket_size[j] = 32;
@@ -216,6 +216,8 @@ void xlr_board_info_setup(void)
        case PRID_IMP_NETLOGIC_XLS404B:
        case PRID_IMP_NETLOGIC_XLS408B:
        case PRID_IMP_NETLOGIC_XLS416B:
+       case PRID_IMP_NETLOGIC_XLS608B:
+       case PRID_IMP_NETLOGIC_XLS616B:
                setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
                                        FMN_STNID_GMAC0_TX3, 8, 8, 32);
                setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
index 340ab16..6d3c727 100644 (file)
@@ -36,7 +36,7 @@ static struct mtd_partition xlr_nor_parts[] = {
        {
                .name = "User FS",
                .offset = 0x800000,
-               .size   = MTDPART_SIZ_FULL,
+               .size   = MTDPART_SIZ_FULL,
        }
 };
 
@@ -46,13 +46,13 @@ static struct mtd_partition xlr_nor_parts[] = {
 static struct mtd_partition xlr_nand_parts[] = {
        {
                .name   = "Root Filesystem",
-               .offset = 64 * 64 * 2048,
+               .offset = 64 * 64 * 2048,
                .size   = 432 * 64 * 2048,
        },
        {
                .name   = "Home Filesystem",
-               .offset = MTDPART_OFS_APPEND,
-               .size   = MTDPART_SIZ_FULL,
+               .offset = MTDPART_OFS_APPEND,
+               .size   = MTDPART_SIZ_FULL,
        },
 };
 
@@ -74,8 +74,8 @@ static struct platform_device xlr_nor_dev = {
        .dev    = {
                .platform_data  = &xlr_nor_data,
        },
-       .num_resources  = ARRAY_SIZE(xlr_nor_res),
-       .resource       = xlr_nor_res,
+       .num_resources  = ARRAY_SIZE(xlr_nor_res),
+       .resource       = xlr_nor_res,
 };
 
 const char *xlr_part_probes[] = { "cmdlinepart", NULL };
index 507230e..7b96a91 100644 (file)
@@ -64,7 +64,7 @@ void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
                .iotype         = UPIO_MEM32,           \
                .flags          = (UPF_SKIP_TEST |      \
                         UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
-               .uartclk        = PIC_CLKS_PER_SEC,     \
+               .uartclk        = PIC_CLK_HZ,           \
                .type           = PORT_16550A,          \
                .serial_in      = nlm_xlr_uart_in,      \
                .serial_out     = nlm_xlr_uart_out,     \
@@ -162,18 +162,18 @@ int xls_platform_usb_init(void)
        nlm_write_reg(usb_mmio, 50, 0x1f000000);
 
        /* Enable ports */
-       nlm_write_reg(usb_mmio,  1, 0x07000500);
+       nlm_write_reg(usb_mmio,  1, 0x07000500);
 
        val = nlm_read_reg(gpio_mmio, 21);
        if (((val >> 22) & 0x01) == 0) {
                pr_info("Detected USB Device mode - Not supported!\n");
-               nlm_write_reg(usb_mmio,  0, 0x01000000);
+               nlm_write_reg(usb_mmio,  0, 0x01000000);
                return 0;
        }
 
        pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
        /* Clear reset, host mode */
-       nlm_write_reg(usb_mmio,  0, 0x02000000);
+       nlm_write_reg(usb_mmio,  0, 0x02000000);
 
        /* Memory resource for various XLS usb ports */
        usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
@@ -221,8 +221,8 @@ static struct resource i2c_resources[] = {
 };
 
 static struct platform_device nlm_xlr_i2c_1 = {
-       .name           = "xlr-i2cbus",
-       .id             = 1,
+       .name           = "xlr-i2cbus",
+       .id             = 1,
        .num_resources  = 1,
        .resource       = i2c_resources,
 };
index c5ce699..e3e0941 100644 (file)
@@ -70,7 +70,7 @@ static void __init nlm_early_serial_setup(void)
        s.iotype        = UPIO_MEM32;
        s.regshift      = 2;
        s.irq           = PIC_UART_0_IRQ;
-       s.uartclk       = PIC_CLKS_PER_SEC;
+       s.uartclk       = PIC_CLK_HZ;
        s.serial_in     = nlm_xlr_uart_in;
        s.serial_out    = nlm_xlr_uart_out;
        s.mapbase       = uart_base;
@@ -163,7 +163,7 @@ static void prom_add_memory(void)
 {
        struct nlm_boot_mem_map *bootm;
        u64 start, size;
-       u64 pref_backup = 512;  /* avoid pref walking beyond end */
+       u64 pref_backup = 512;  /* avoid pref walking beyond end */
        int i;
 
        bootm = (void *)(long)nlm_prom_info.psb_mem_map;
index e32db1f..af763e8 100644 (file)
@@ -27,10 +27,10 @@ static int op_mips_setup(void)
        /* Pre-compute the values to stuff in the hardware registers.  */
        model->reg_setup(ctr);
 
-       /* Configure the registers on all cpus.  */
+       /* Configure the registers on all cpus.  */
        on_each_cpu(model->cpu_setup, NULL, 1);
 
-        return 0;
+       return 0;
 }
 
 static int op_mips_create_files(struct super_block *sb, struct dentry *root)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        switch (current_cpu_type()) {
        case CPU_5KC:
        case CPU_M14KC:
+       case CPU_M14KEC:
        case CPU_20KC:
        case CPU_24K:
        case CPU_25KF:
@@ -110,7 +111,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        ops->create_files       = op_mips_create_files;
        ops->setup              = op_mips_setup;
-       //ops->shutdown         = op_mips_shutdown;
+       //ops->shutdown         = op_mips_shutdown;
        ops->start              = op_mips_start;
        ops->stop               = op_mips_stop;
        ops->cpu_type           = lmodel->cpu_type;
index 60d3ea6..b249ec0 100644 (file)
 
 #define LOONGSON2_CPU_TYPE     "mips/loongson2"
 
-#define LOONGSON2_PERFCNT_OVERFLOW             (1ULL   << 31)
+#define LOONGSON2_PERFCNT_OVERFLOW             (1ULL   << 31)
 
 #define LOONGSON2_PERFCTRL_EXL                 (1UL    <<  0)
-#define LOONGSON2_PERFCTRL_KERNEL              (1UL    <<  1)
-#define LOONGSON2_PERFCTRL_SUPERVISOR          (1UL    <<  2)
-#define LOONGSON2_PERFCTRL_USER                        (1UL    <<  3)
-#define LOONGSON2_PERFCTRL_ENABLE              (1UL    <<  4)
+#define LOONGSON2_PERFCTRL_KERNEL              (1UL    <<  1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR          (1UL    <<  2)
+#define LOONGSON2_PERFCTRL_USER                        (1UL    <<  3)
+#define LOONGSON2_PERFCTRL_ENABLE              (1UL    <<  4)
 #define LOONGSON2_PERFCTRL_EVENT(idx, event) \
        (((event) & 0x0f) << ((idx) ? 9 : 5))
 
index 7862546..1fd3614 100644 (file)
 
 #include "op_impl.h"
 
-#define M_PERFCTL_EXL                  (1UL      <<  0)
-#define M_PERFCTL_KERNEL               (1UL      <<  1)
-#define M_PERFCTL_SUPERVISOR           (1UL      <<  2)
-#define M_PERFCTL_USER                 (1UL      <<  3)
-#define M_PERFCTL_INTERRUPT_ENABLE     (1UL      <<  4)
+#define M_PERFCTL_EXL                  (1UL      <<  0)
+#define M_PERFCTL_KERNEL               (1UL      <<  1)
+#define M_PERFCTL_SUPERVISOR           (1UL      <<  2)
+#define M_PERFCTL_USER                 (1UL      <<  3)
+#define M_PERFCTL_INTERRUPT_ENABLE     (1UL      <<  4)
 #define M_PERFCTL_EVENT(event)         (((event) & 0x3ff)  << 5)
-#define M_PERFCTL_VPEID(vpe)           ((vpe)    << 16)
+#define M_PERFCTL_VPEID(vpe)           ((vpe)    << 16)
 #define M_PERFCTL_MT_EN(filter)                ((filter) << 20)
-#define    M_TC_EN_ALL                 M_PERFCTL_MT_EN(0)
-#define    M_TC_EN_VPE                 M_PERFCTL_MT_EN(1)
-#define    M_TC_EN_TC                  M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid)           ((tcid)   << 22)
-#define M_PERFCTL_WIDE                 (1UL      << 30)
-#define M_PERFCTL_MORE                 (1UL      << 31)
+#define           M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
+#define           M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
+#define           M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid)           ((tcid)   << 22)
+#define M_PERFCTL_WIDE                 (1UL      << 30)
+#define M_PERFCTL_MORE                 (1UL      << 31)
 
-#define M_COUNTER_OVERFLOW             (1UL      << 31)
+#define M_COUNTER_OVERFLOW             (1UL      << 31)
 
 /* Netlogic XLR specific, count events in all threads in a core */
-#define M_PERFCTL_COUNT_ALL_THREADS    (1UL      << 13)
+#define M_PERFCTL_COUNT_ALL_THREADS    (1UL      << 13)
 
 static int (*save_perf_irq)(void);
 
@@ -143,7 +143,7 @@ static struct mipsxx_register_config {
        unsigned int counter[4];
 } reg;
 
-/* Compute all of the registers in preparation for enabling profiling.  */
+/* Compute all of the registers in preparation for enabling profiling. */
 
 static void mipsxx_reg_setup(struct op_counter_config *ctr)
 {
@@ -159,7 +159,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
                        continue;
 
                reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
-                                M_PERFCTL_INTERRUPT_ENABLE;
+                                M_PERFCTL_INTERRUPT_ENABLE;
                if (ctr[i].kernel)
                        reg.control[i] |= M_PERFCTL_KERNEL;
                if (ctr[i].user)
@@ -172,7 +172,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
        }
 }
 
-/* Program all of the registers in preparation for enabling profiling.  */
+/* Program all of the registers in preparation for enabling profiling. */
 
 static void mipsxx_cpu_setup(void *args)
 {
@@ -351,6 +351,10 @@ static int __init mipsxx_init(void)
                op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
                break;
 
+       case CPU_M14KEC:
+               op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
+               break;
+
        case CPU_20KC:
                op_model_mipsxx_ops.cpu_type = "mips/20K";
                break;
index ce995d3..2cb1d31 100644 (file)
@@ -27,7 +27,6 @@ obj-$(CONFIG_PCI_AR724X)      += pci-ar724x.o
 #
 obj-$(CONFIG_LASAT)            += pci-lasat.o
 obj-$(CONFIG_MIPS_COBALT)      += fixup-cobalt.o
-obj-$(CONFIG_SOC_PNX8550)      += fixup-pnx8550.o ops-pnx8550.o
 obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
 obj-$(CONFIG_LEMOTE_MACH2F)    += fixup-lemote2f.o ops-loongson2.o
 obj-$(CONFIG_MIPS_MALTA)       += fixup-malta.o
@@ -55,10 +54,10 @@ obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
 obj-$(CONFIG_ZAO_CAPCELLA)     += fixup-capcella.o
 obj-$(CONFIG_WR_PPMC)          += fixup-wrppmc.o
 obj-$(CONFIG_MIKROTIK_RB532)   += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON)        += pci-octeon.o pcie-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
 obj-$(CONFIG_CPU_XLR)          += pci-xlr.o
 obj-$(CONFIG_CPU_XLP)          += pci-xlp.o
 
 ifdef CONFIG_PCI_MSI
-obj-$(CONFIG_CPU_CAVIUM_OCTEON)        += msi-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
 endif
index 9553b14..a138e8e 100644 (file)
@@ -94,14 +94,14 @@ static void qube_raq_galileo_fixup(struct pci_dev *dev)
         * --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
         *
         * On all machines prior to Q2, we had the STOP line disconnected
-        * from Galileo to VIA on PCI.  The new Galileo does not function
+        * from Galileo to VIA on PCI.  The new Galileo does not function
         * correctly unless we have it connected.
         *
         * Therefore we must set the disconnect/retry cycle values to
         * something sensible when using the new Galileo.
         */
 
-       printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
+       printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
 
 #if 0
        if (dev->revision >= 0x10) {
@@ -149,30 +149,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
         qube_raq_via_board_id_fixup);
 
 static char irq_tab_qube1[] __initdata = {
-  [COBALT_PCICONF_CPU]     = 0,
-  [COBALT_PCICONF_ETH0]    = QUBE1_ETH0_IRQ,
+  [COBALT_PCICONF_CPU]    = 0,
+  [COBALT_PCICONF_ETH0]           = QUBE1_ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
-  [COBALT_PCICONF_VIA]     = 0,
+  [COBALT_PCICONF_VIA]    = 0,
   [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
-  [COBALT_PCICONF_ETH1]    = 0
+  [COBALT_PCICONF_ETH1]           = 0
 };
 
 static char irq_tab_cobalt[] __initdata = {
-  [COBALT_PCICONF_CPU]     = 0,
-  [COBALT_PCICONF_ETH0]    = ETH0_IRQ,
+  [COBALT_PCICONF_CPU]    = 0,
+  [COBALT_PCICONF_ETH0]           = ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
-  [COBALT_PCICONF_VIA]     = 0,
+  [COBALT_PCICONF_VIA]    = 0,
   [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
-  [COBALT_PCICONF_ETH1]    = ETH1_IRQ
+  [COBALT_PCICONF_ETH1]           = ETH1_IRQ
 };
 
 static char irq_tab_raq2[] __initdata = {
-  [COBALT_PCICONF_CPU]     = 0,
-  [COBALT_PCICONF_ETH0]    = ETH0_IRQ,
+  [COBALT_PCICONF_CPU]    = 0,
+  [COBALT_PCICONF_ETH0]           = ETH0_IRQ,
   [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
-  [COBALT_PCICONF_VIA]     = 0,
+  [COBALT_PCICONF_VIA]    = 0,
   [COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
-  [COBALT_PCICONF_ETH1]    = ETH1_IRQ
+  [COBALT_PCICONF_ETH1]           = ETH1_IRQ
 };
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
index beaec32..19caf77 100644 (file)
@@ -42,7 +42,7 @@
  *
  */
 
-#define        MAX_SLOT_NUM 10
+#define MAX_SLOT_NUM 10
 static unsigned char irq_map[][5] __initdata = {
        [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC,
               MARKEINS_PCI_IRQ_INTD, 0,},
index 63ab4a0..50da773 100644 (file)
@@ -6,9 +6,9 @@
  * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
  * Author: Fuxin Zhang, zhangfx@lemote.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  This program is free software; you can redistribute         it and/or modify it
+ *  under  the terms of         the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
  *  option) any later version.
  */
 #include <linux/init.h>
@@ -152,7 +152,7 @@ static void loongson2e_686b_func1_fixup(struct pci_dev *pdev)
        /* disable read prefetch/write post buffers */
        pci_write_config_byte(pdev, 0x41, 0x02);
 
-       /* use 3/4 as fifo thresh hold  */
+       /* use 3/4 as fifo thresh hold  */
        pci_write_config_byte(pdev, 0x43, 0x0a);
        pci_write_config_byte(pdev, 0x44, 0x00);
 
index 190fffd..133685e 100644 (file)
 #define INTC   MACEPCI_SHARED1_IRQ
 #define INTD   MACEPCI_SHARED2_IRQ
 static char irq_tab_mace[][5] __initdata = {
-      /* Dummy  INT#A  INT#B  INT#C  INT#D */
-       {0,         0,     0,     0,     0}, /* This is placeholder row - never used */
-       {0,     SCSI0, SCSI0, SCSI0, SCSI0},
-       {0,     SCSI1, SCSI1, SCSI1, SCSI1},
-       {0,     INTA0,  INTB,  INTC,  INTD},
-       {0,     INTA1,  INTC,  INTD,  INTB},
-       {0,     INTA2,  INTD,  INTB,  INTC},
+      /* Dummy INT#A  INT#B  INT#C  INT#D */
+       {0,         0,     0,     0,     0}, /* This is placeholder row - never used */
+       {0,     SCSI0, SCSI0, SCSI0, SCSI0},
+       {0,     SCSI1, SCSI1, SCSI1, SCSI1},
+       {0,     INTA0,  INTB,  INTC,  INTD},
+       {0,     INTA1,  INTC,  INTD,  INTB},
+       {0,     INTA2,  INTD,  INTB,  INTC},
 };
 
 
index 519daae..95ab9a1 100644 (file)
@@ -31,7 +31,7 @@
 
 /* all the pci device has the PCIA pin, check the datasheet. */
 static char irq_tab[][5] __initdata = {
-       /*      INTA    INTB    INTC    INTD */
+       /*      INTA    INTB    INTC    INTD */
        {0, 0, 0, 0, 0},        /*  11: Unused */
        {0, 0, 0, 0, 0},        /*  12: Unused */
        {0, 0, 0, 0, 0},        /*  13: Unused */
@@ -69,15 +69,15 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
                case 2:
                        pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
                                              CS5536_IDE_INTR);
-                       return CS5536_IDE_INTR; /*  for IDE */
+                       return CS5536_IDE_INTR; /*  for IDE */
                case 3:
                        pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
                                              CS5536_ACC_INTR);
-                       return CS5536_ACC_INTR; /*  for AUDIO */
-               case 4: /*  for OHCI */
-               case 5: /*  for EHCI */
-               case 6: /*  for UDC */
-               case 7: /*  for OTG */
+                       return CS5536_ACC_INTR; /*  for AUDIO */
+               case 4: /*  for OHCI */
+               case 5: /*  for EHCI */
+               case 6: /*  for UDC */
+               case 7: /*  for OTG */
                        pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
                                              CS5536_USB_INTR);
                        return CS5536_USB_INTR;
index 75d03f6..07ada7f 100644 (file)
@@ -12,7 +12,7 @@ static char pci_irq[5] = {
 };
 
 static char irq_tab[][5] __initdata = {
-       /*      INTA    INTB    INTC    INTD */
+       /*      INTA    INTB    INTC    INTD */
        {0,     0,      0,      0,      0 },    /*  0: GT64120 PCI bridge */
        {0,     0,      0,      0,      0 },    /*  1: Unused */
        {0,     0,      0,      0,      0 },    /*  2: Unused */
@@ -23,7 +23,7 @@ static char irq_tab[][5] __initdata = {
        {0,     0,      0,      0,      0 },    /*  7: Unused */
        {0,     0,      0,      0,      0 },    /*  8: Unused */
        {0,     0,      0,      0,      0 },    /*  9: Unused */
-       {0,     0,      0,      0,      PCID }, /* 10: PIIX4 USB */
+       {0,     0,      0,      0,      PCID }, /* 10: PIIX4 USB */
        {0,     PCIB,   0,      0,      0 },    /* 11: AMD 79C973 Ethernet */
        {0,     PCIC,   0,      0,      0 },    /* 12: Crystal 4281 Sound */
        {0,     0,      0,      0,      0 },    /* 13: Unused */
@@ -31,9 +31,9 @@ static char irq_tab[][5] __initdata = {
        {0,     0,      0,      0,      0 },    /* 15: Unused */
        {0,     0,      0,      0,      0 },    /* 16: Unused */
        {0,     0,      0,      0,      0 },    /* 17: Bonito/SOC-it PCI Bridge*/
-       {0,     PCIA,   PCIB,   PCIC,   PCID }, /* 18: PCI Slot 1 */
-       {0,     PCIB,   PCIC,   PCID,   PCIA }, /* 19: PCI Slot 2 */
-       {0,     PCIC,   PCID,   PCIA,   PCIB }, /* 20: PCI Slot 3 */
+       {0,     PCIA,   PCIB,   PCIC,   PCID }, /* 18: PCI Slot 1 */
+       {0,     PCIB,   PCIC,   PCID,   PCIA }, /* 19: PCI Slot 2 */
+       {0,     PCIC,   PCID,   PCIA,   PCIB }, /* 20: PCI Slot 3 */
        {0,     PCID,   PCIA,   PCIB,   PCIC }  /* 21: PCI Slot 4 */
 };
 
@@ -54,8 +54,8 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
        static int piixirqmap[16] = {  /* PIIX PIRQC[A:D] irq mappings */
-               0,  0,  0,  3,
-               4,  5,  6,  7,
+               0,  0,  0,  3,
+               4,  5,  6,  7,
                0,  9, 10, 11,
                12, 0, 14, 15
        };
index 65735b1..fab405c 100644 (file)
 #if defined(CONFIG_PMC_MSP7120_GW)
 /* Garibaldi Board IRQ wiring to PCI slots */
 static char irq_tab[][5] __initdata = {
-       /* INTA    INTB    INTC    INTD */
-       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
-       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
-       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
-       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
-       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
-       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
-       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
-       {0,     0,      0,      0,      0 },    /*  6 (AD[16]): Unused */
-       {0,     0,      0,      0,      0 },    /*  7 (AD[17]): Unused */
-       {0,     0,      0,      0,      0 },    /*  8 (AD[18]): Unused */
-       {0,     0,      0,      0,      0 },    /*  9 (AD[19]): Unused */
-       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
-       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
-       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
-       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
-       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
-       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
-       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
-       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
-       {0,     IRQ4,   IRQ4,   0,      0 },    /* 18 (AD[28]): slot 0 */
-       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
-       {0,     IRQ5,   IRQ5,   0,      0 },    /* 20 (AD[30]): slot 1 */
-       {0,     IRQ6,   IRQ6,   0,      0 }     /* 21 (AD[31]): slot 2 */
+       /* INTA    INTB    INTC    INTD */
+       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
+       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
+       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
+       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
+       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
+       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
+       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
+       {0,     0,      0,      0,      0 },    /*  6 (AD[16]): Unused */
+       {0,     0,      0,      0,      0 },    /*  7 (AD[17]): Unused */
+       {0,     0,      0,      0,      0 },    /*  8 (AD[18]): Unused */
+       {0,     0,      0,      0,      0 },    /*  9 (AD[19]): Unused */
+       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
+       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
+       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
+       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
+       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
+       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
+       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
+       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
+       {0,     IRQ4,   IRQ4,   0,      0 },    /* 18 (AD[28]): slot 0 */
+       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
+       {0,     IRQ5,   IRQ5,   0,      0 },    /* 20 (AD[30]): slot 1 */
+       {0,     IRQ6,   IRQ6,   0,      0 }     /* 21 (AD[31]): slot 2 */
 };
 
 #elif defined(CONFIG_PMC_MSP7120_EVAL)
 
 /* MSP7120 Eval Board IRQ wiring to PCI slots */
 static char irq_tab[][5] __initdata = {
-       /* INTA    INTB    INTC    INTD */
-       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
-       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
-       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
-       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
-       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
-       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
-       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
-       {0,     IRQ6,   IRQ6,   0,      0 },    /*  6 (AD[16]): slot 3 (mini) */
-       {0,     IRQ5,   IRQ5,   0,      0 },    /*  7 (AD[17]): slot 2 (mini) */
-       {0,     IRQ4,   IRQ4,   IRQ4,   IRQ4},  /*  8 (AD[18]): slot 0 (PCI) */
-       {0,     IRQ5,   IRQ5,   IRQ5,   IRQ5},  /*  9 (AD[19]): slot 1 (PCI) */
-       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
-       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
-       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
-       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
-       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
-       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
-       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
-       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
-       {0,     0,      0,      0,      0 },    /* 18 (AD[28]): Unused */
-       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
-       {0,     0,      0,      0,      0 },    /* 20 (AD[30]): Unused */
-       {0,     0,      0,      0,      0 }     /* 21 (AD[31]): Unused */
+       /* INTA    INTB    INTC    INTD */
+       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
+       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
+       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
+       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
+       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
+       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
+       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
+       {0,     IRQ6,   IRQ6,   0,      0 },    /*  6 (AD[16]): slot 3 (mini) */
+       {0,     IRQ5,   IRQ5,   0,      0 },    /*  7 (AD[17]): slot 2 (mini) */
+       {0,     IRQ4,   IRQ4,   IRQ4,   IRQ4},  /*  8 (AD[18]): slot 0 (PCI) */
+       {0,     IRQ5,   IRQ5,   IRQ5,   IRQ5},  /*  9 (AD[19]): slot 1 (PCI) */
+       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
+       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
+       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
+       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
+       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
+       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
+       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
+       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
+       {0,     0,      0,      0,      0 },    /* 18 (AD[28]): Unused */
+       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
+       {0,     0,      0,      0,      0 },    /* 20 (AD[30]): Unused */
+       {0,     0,      0,      0,      0 }     /* 21 (AD[31]): Unused */
 };
 
 #else
 
 /* Unknown board -- don't assign any IRQs */
 static char irq_tab[][5] __initdata = {
-       /* INTA    INTB    INTC    INTD */
-       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
-       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
-       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
-       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
-       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
-       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
-       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
-       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
-       {0,     0,      0,      0,      0 },    /*  6 (AD[16]): Unused */
-       {0,     0,      0,      0,      0 },    /*  7 (AD[17]): Unused */
-       {0,     0,      0,      0,      0 },    /*  8 (AD[18]): Unused */
-       {0,     0,      0,      0,      0 },    /*  9 (AD[19]): Unused */
-       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
-       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
-       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
-       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
-       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
-       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
-       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
-       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
-       {0,     0,      0,      0,      0 },    /* 18 (AD[28]): Unused */
-       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
-       {0,     0,      0,      0,      0 },    /* 20 (AD[30]): Unused */
-       {0,     0,      0,      0,      0 }     /* 21 (AD[31]): Unused */
+       /* INTA    INTB    INTC    INTD */
+       {0,     0,      0,      0,      0 },    /*    (AD[0]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[1]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[2]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[3]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[4]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[5]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[6]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[7]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[8]): Unused */
+       {0,     0,      0,      0,      0 },    /*    (AD[9]): Unused */
+       {0,     0,      0,      0,      0 },    /*  0 (AD[10]): Unused */
+       {0,     0,      0,      0,      0 },    /*  1 (AD[11]): Unused */
+       {0,     0,      0,      0,      0 },    /*  2 (AD[12]): Unused */
+       {0,     0,      0,      0,      0 },    /*  3 (AD[13]): Unused */
+       {0,     0,      0,      0,      0 },    /*  4 (AD[14]): Unused */
+       {0,     0,      0,      0,      0 },    /*  5 (AD[15]): Unused */
+       {0,     0,      0,      0,      0 },    /*  6 (AD[16]): Unused */
+       {0,     0,      0,      0,      0 },    /*  7 (AD[17]): Unused */
+       {0,     0,      0,      0,      0 },    /*  8 (AD[18]): Unused */
+       {0,     0,      0,      0,      0 },    /*  9 (AD[19]): Unused */
+       {0,     0,      0,      0,      0 },    /* 10 (AD[20]): Unused */
+       {0,     0,      0,      0,      0 },    /* 11 (AD[21]): Unused */
+       {0,     0,      0,      0,      0 },    /* 12 (AD[22]): Unused */
+       {0,     0,      0,      0,      0 },    /* 13 (AD[23]): Unused */
+       {0,     0,      0,      0,      0 },    /* 14 (AD[24]): Unused */
+       {0,     0,      0,      0,      0 },    /* 15 (AD[25]): Unused */
+       {0,     0,      0,      0,      0 },    /* 16 (AD[26]): Unused */
+       {0,     0,      0,      0,      0 },    /* 17 (AD[27]): Unused */
+       {0,     0,      0,      0,      0 },    /* 18 (AD[28]): Unused */
+       {0,     0,      0,      0,      0 },    /* 19 (AD[29]): Unused */
+       {0,     0,      0,      0,      0 },    /* 20 (AD[30]): Unused */
+       {0,     0,      0,      0,      0 }     /* 21 (AD[31]): Unused */
 };
 #endif
 
@@ -168,14 +168,14 @@ static char irq_tab[][5] __initdata = {
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Perform platform specific device initialization at
- *               pci_enable_device() time.
- *               None are needed for the MSP7120 PCI Controller.
+ *              pci_enable_device() time.
+ *              None are needed for the MSP7120 PCI Controller.
  *
- *  INPUTS:      dev     - structure describing the PCI device
+ *  INPUTS:     dev     - structure describing the PCI device
  *
- *  OUTPUTS:     none
+ *  OUTPUTS:    none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL
+ *  RETURNS:    PCIBIOS_SUCCESSFUL
  *
  ****************************************************************************/
 int pcibios_plat_dev_init(struct pci_dev *dev)
@@ -190,16 +190,16 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
  *
  *  DESCRIPTION: Perform board supplied PCI IRQ mapping routine.
  *
- *  INPUTS:      dev     - unused
- *               slot    - PCI slot. Identified by which bit of the AD[] bus
- *                         drives the IDSEL line. AD[10] is 0, AD[31] is
- *                         slot 21.
- *               pin     - numbered using the scheme of the PCI_INTERRUPT_PIN
- *                         field of the config header.
+ *  INPUTS:     dev     - unused
+ *              slot    - PCI slot. Identified by which bit of the AD[] bus
+ *                        drives the IDSEL line. AD[10] is 0, AD[31] is
+ *                        slot 21.
+ *              pin     - numbered using the scheme of the PCI_INTERRUPT_PIN
+ *                        field of the config header.
  *
- *  OUTPUTS:     none
+ *  OUTPUTS:    none
  *
- *  RETURNS:     IRQ number
+ *  RETURNS:    IRQ number
  *
  ****************************************************************************/
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/mips/pci/fixup-pnx8550.c b/arch/mips/pci/fixup-pnx8550.c
deleted file mode 100644 (file)
index 96857ac..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *  Philips PNX8550 pci fixups.
- *
- *  Copyright 2005 Embedded Alley Solutions, Inc
- *  source@embeddealley.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/int.h>
-
-
-#undef DEBUG
-#ifdef         DEBUG
-#define        DBG(x...)       printk(x)
-#else
-#define        DBG(x...)
-#endif
-
-extern char pnx8550_irq_tab[][5];
-
-void __init pcibios_fixup_resources(struct pci_dev *dev)
-{
-       /* no need to fixup IO resources */
-}
-
-void __init pcibios_fixup(void)
-{
-       /* nothing to do here */
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return pnx8550_irq_tab[slot][pin];
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       return 0;
-}
index 5c8a79b..f67ebee 100644 (file)
  * Logic CL-GD5434 VGA is device 3.
  */
 static char irq_tab_rm200[8][5] __initdata = {
-       /*       INTA  INTB  INTC  INTD */
-       {     0,    0,    0,    0,    0 },      /* EISA bridge */
+       /*       INTA  INTB  INTC  INTD */
+       {     0,    0,    0,    0,    0 },      /* EISA bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
-       {   ETH,  ETH,  ETH,  ETH,  ETH },      /* Ethernet */
+       {   ETH,  ETH,  ETH,  ETH,  ETH },      /* Ethernet */
        {  INTB, INTB, INTB, INTB, INTB },      /* VGA */
-       {     0,    0,    0,    0,    0 },      /* Unused */
+       {     0,    0,    0,    0,    0 },      /* Unused */
        {     0, INTB, INTC, INTD, INTA },      /* Slot 2 */
        {     0, INTC, INTD, INTA, INTB },      /* Slot 3 */
        {     0, INTD, INTA, INTB, INTC },      /* Slot 4 */
@@ -58,20 +58,20 @@ static char irq_tab_rm200[8][5] __initdata = {
  * The VGA card is optional for RM300 systems.
  */
 static char irq_tab_rm300d[8][5] __initdata = {
-       /*       INTA  INTB  INTC  INTD */
-       {     0,    0,    0,    0,    0 },      /* EISA bridge */
+       /*       INTA  INTB  INTC  INTD */
+       {     0,    0,    0,    0,    0 },      /* EISA bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
        {     0, INTC, INTD, INTA, INTB },      /* Slot 1 */
        {  INTB, INTB, INTB, INTB, INTB },      /* VGA */
-       {     0,    0,    0,    0,    0 },      /* Unused */
+       {     0,    0,    0,    0,    0 },      /* Unused */
        {     0, INTB, INTC, INTD, INTA },      /* Slot 2 */
        {     0, INTC, INTD, INTA, INTB },      /* Slot 3 */
        {     0, INTD, INTA, INTB, INTC },      /* Slot 4 */
 };
 
 static char irq_tab_rm300e[5][5] __initdata = {
-       /*       INTA  INTB  INTC  INTD */
-       {     0,    0,    0,    0,    0 },      /* HOST bridge */
+       /*       INTA  INTB  INTC  INTD */
+       {     0,    0,    0,    0,    0 },      /* HOST bridge */
        {  SCSI, SCSI, SCSI, SCSI, SCSI },      /* SCSI */
        {     0, INTC, INTD, INTA, INTB },      /* Bridge/i960 */
        {     0, INTD, INTA, INTB, INTC },      /* Slot 1 */
@@ -97,30 +97,30 @@ static char irq_tab_rm300e[5][5] __initdata = {
 #define INTD   PCIT_IRQ_INTD
 
 static char irq_tab_pcit[13][5] __initdata = {
-       /*       INTA  INTB  INTC  INTD */
-       {     0,     0,     0,     0,     0 },  /* HOST bridge */
+       /*       INTA  INTB  INTC  INTD */
+       {     0,     0,     0,     0,     0 },  /* HOST bridge */
        { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 },  /* SCSI */
        { SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 },  /* SCSI */
-       {   ETH,   ETH,   ETH,   ETH,   ETH },  /* Ethernet */
-       {     0,  INTA,  INTB,  INTC,  INTD },  /* PCI-PCI bridge */
-       {     0,     0,     0,     0,     0 },  /* Unused */
-       {     0,     0,     0,     0,     0 },  /* Unused */
-       {     0,     0,     0,     0,     0 },  /* Unused */
-       {     0,  INTA,  INTB,  INTC,  INTD },  /* Slot 1 */
-       {     0,  INTB,  INTC,  INTD,  INTA },  /* Slot 2 */
-       {     0,  INTC,  INTD,  INTA,  INTB },  /* Slot 3 */
-       {     0,  INTD,  INTA,  INTB,  INTC },  /* Slot 4 */
-       {     0,  INTA,  INTB,  INTC,  INTD },  /* Slot 5 */
+       {   ETH,   ETH,   ETH,   ETH,   ETH },  /* Ethernet */
+       {     0,  INTA,  INTB,  INTC,  INTD },  /* PCI-PCI bridge */
+       {     0,     0,     0,     0,     0 },  /* Unused */
+       {     0,     0,     0,     0,     0 },  /* Unused */
+       {     0,     0,     0,     0,     0 },  /* Unused */
+       {     0,  INTA,  INTB,  INTC,  INTD },  /* Slot 1 */
+       {     0,  INTB,  INTC,  INTD,  INTA },  /* Slot 2 */
+       {     0,  INTC,  INTD,  INTA,  INTB },  /* Slot 3 */
+       {     0,  INTD,  INTA,  INTB,  INTC },  /* Slot 4 */
+       {     0,  INTA,  INTB,  INTC,  INTD },  /* Slot 5 */
 };
 
 static char irq_tab_pcit_cplus[13][5] __initdata = {
-       /*       INTA  INTB  INTC  INTD */
-       {     0,     0,     0,     0,     0 },  /* HOST bridge */
-       {     0,  INTB,  INTC,  INTD,  INTA },  /* PCI Slot 9 */
-       {     0,     0,     0,     0,     0 },  /* PCI-EISA */
-       {     0,     0,     0,     0,     0 },  /* Unused */
-       {     0,  INTA,  INTB,  INTC,  INTD },  /* PCI-PCI bridge */
-       {     0,  INTB,  INTC,  INTD,  INTA },  /* fixup */
+       /*       INTA  INTB  INTC  INTD */
+       {     0,     0,     0,     0,     0 },  /* HOST bridge */
+       {     0,  INTB,  INTC,  INTD,  INTA },  /* PCI Slot 9 */
+       {     0,     0,     0,     0,     0 },  /* PCI-EISA */
+       {     0,     0,     0,     0,     0 },  /* Unused */
+       {     0,  INTA,  INTB,  INTC,  INTD },  /* PCI-PCI bridge */
+       {     0,  INTB,  INTC,  INTD,  INTA },  /* fixup */
 };
 
 static inline int is_rm300_revd(void)
@@ -146,18 +146,18 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
                }
                return irq_tab_pcit_cplus[slot][pin];
        case SNI_BRD_PCI_TOWER:
-               return irq_tab_pcit[slot][pin];
+               return irq_tab_pcit[slot][pin];
 
        case SNI_BRD_PCI_MTOWER:
-               if (is_rm300_revd())
-                       return irq_tab_rm300d[slot][pin];
-               /* fall through */
+               if (is_rm300_revd())
+                       return irq_tab_rm300d[slot][pin];
+               /* fall through */
 
        case SNI_BRD_PCI_DESKTOP:
-               return irq_tab_rm200[slot][pin];
+               return irq_tab_rm200[slot][pin];
 
        case SNI_BRD_PCI_MTOWER_CPLUS:
-               return irq_tab_rm300e[slot][pin];
+               return irq_tab_rm300e[slot][pin];
        }
 
        return 0;
index 8084b17..d0b0083 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
  *
- *  Copyright (C) 2003  Megasolution Inc. <matsu@megasolution.jp>
+ *  Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
  *  Copyright (C) 2004-2005  Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
index 2fe29db..8c5039e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
  *
- *  Copyright (C) 2005  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 3d27754..29737ed 100644 (file)
@@ -20,7 +20,7 @@
 #define PCI_SLOT_MAXNR 32 /* Each PCI bus has 32 physical slots */
 
 static char pci_irq_tab[PCI_SLOT_MAXNR][5] __initdata = {
-       /* 0    INTA   INTB   INTC   INTD */
+       /* 0    INTA   INTB   INTC   INTD */
        [0] = {0, 0, 0, 0, 0},          /* Slot 0: GT64120 PCI bridge */
        [6] = {0, WRPPMC_PCI_INTA_IRQ, 0, 0, 0},
 };
index 4a15662..6144bb3 100644 (file)
@@ -174,8 +174,8 @@ static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm63xx_pci_ops = {
-       .read   = bcm63xx_pci_read,
-       .write  = bcm63xx_pci_write
+       .read   = bcm63xx_pci_read,
+       .write  = bcm63xx_pci_write
 };
 
 #ifdef CONFIG_CARDBUS
@@ -370,8 +370,8 @@ static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn,
                return fake_cb_bridge_read(where, size, val);
        }
 
-       /* a  configuration  cycle for  the  device  behind the  cardbus
-        * bridge is  actually done as a  type 0 cycle  on the primary
+       /* a  configuration  cycle for  the  device  behind the  cardbus
+        * bridge is  actually done as a  type 0 cycle  on the primary
         * bus. This means that only  one device can be on the cardbus
         * bus */
        if (fake_cb_bridge_regs.bus_assigned &&
@@ -403,8 +403,8 @@ static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm63xx_cb_ops = {
-       .read   = bcm63xx_cb_read,
-       .write   = bcm63xx_cb_write,
+       .read   = bcm63xx_cb_read,
+       .write   = bcm63xx_cb_write,
 };
 
 /*
@@ -523,6 +523,6 @@ static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
 
 
 struct pci_ops bcm63xx_pcie_ops = {
-       .read   = bcm63xx_pcie_read,
-       .write  = bcm63xx_pcie_write
+       .read   = bcm63xx_pcie_read,
+       .write  = bcm63xx_pcie_write
 };
index 1b3e03f..830352e 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <asm/mips-boards/bonito64.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 #define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
@@ -137,7 +137,7 @@ static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
                data = val;
        else {
                if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
-                                              where, &data))
+                                              where, &data))
                        return -1;
 
                if (size == 1)
index 3d896c5..effcbda 100644 (file)
 
 #include <asm/gt64120.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 /*
  *  PCI configuration cycle AD bus definition
  */
 /* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF           0
-#define PCI_CFG_TYPE0_FUNC_SHF          8
+#define PCI_CFG_TYPE0_REG_SHF          0
+#define PCI_CFG_TYPE0_FUNC_SHF         8
 
 /* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF           0
-#define PCI_CFG_TYPE1_FUNC_SHF          8
-#define PCI_CFG_TYPE1_DEV_SHF           11
-#define PCI_CFG_TYPE1_BUS_SHF           16
+#define PCI_CFG_TYPE1_REG_SHF          0
+#define PCI_CFG_TYPE1_FUNC_SHF         8
+#define PCI_CFG_TYPE1_DEV_SHF          11
+#define PCI_CFG_TYPE1_BUS_SHF          16
 
 static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
                struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -50,7 +50,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
 
        /* Clear cause register bits */
        GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
-                                    GT_INTRCAUSE_TARABORT0_BIT));
+                                    GT_INTRCAUSE_TARABORT0_BIT));
 
        /* Setup address */
        GT_WRITE(GT_PCI0_CFGADDR_OFS,
@@ -87,7 +87,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
 
                /* Clear bits */
                GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
-                                            GT_INTRCAUSE_TARABORT0_BIT));
+                                            GT_INTRCAUSE_TARABORT0_BIT));
 
                return -1;
        }
@@ -106,7 +106,7 @@ static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
        u32 data = 0;
 
        if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
-                                              where, &data))
+                                              where, &data))
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (size == 1)
@@ -128,7 +128,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
                data = val;
        else {
                if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
-                                                      devfn, where, &data))
+                                                      devfn, where, &data))
                        return PCIBIOS_DEVICE_NOT_FOUND;
 
                if (size == 1)
@@ -140,7 +140,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
        }
 
        if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
-                                              where, &data))
+                                              where, &data))
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        return PCIBIOS_SUCCESSFUL;
index 1f2afb5..16e7c25 100644 (file)
@@ -23,7 +23,7 @@
 #define LTQ_PCI_CFG_DEVNUM_SHF 11
 #define LTQ_PCI_CFG_FUNNUM_SHF 8
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
index afd2211..98254af 100644 (file)
@@ -24,7 +24,7 @@
 #include <cs5536/cs5536.h>
 #endif
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 #define CFG_SPACE_REG(offset) \
index 5d9fbb0..92a8543 100644 (file)
@@ -1,8 +1,8 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005  MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
  *    All rights reserved.
  *    Authors: Carsten Langgaard <carstenl@mips.com>
- *             Maciej W. Rozycki <macro@mips.com>
+ *            Maciej W. Rozycki <macro@mips.com>
  * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
  *
  *  This program is free software; you can distribute it and/or modify it
 
 #include <asm/mips-boards/msc01_pci.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 /*
  *  PCI configuration cycle AD bus definition
  */
 /* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF           0
-#define PCI_CFG_TYPE0_FUNC_SHF          8
+#define PCI_CFG_TYPE0_REG_SHF          0
+#define PCI_CFG_TYPE0_FUNC_SHF         8
 
 /* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF           0
-#define PCI_CFG_TYPE1_FUNC_SHF          8
-#define PCI_CFG_TYPE1_DEV_SHF           11
-#define PCI_CFG_TYPE1_BUS_SHF           16
+#define PCI_CFG_TYPE1_REG_SHF          0
+#define PCI_CFG_TYPE1_FUNC_SHF         8
+#define PCI_CFG_TYPE1_DEV_SHF          11
+#define PCI_CFG_TYPE1_BUS_SHF          16
 
 static int msc_pcibios_config_access(unsigned char access_type,
        struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -97,7 +97,7 @@ static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
                return PCIBIOS_BAD_REGISTER_NUMBER;
 
        if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
-                                     &data))
+                                     &data))
                return -1;
 
        if (size == 1)
@@ -124,7 +124,7 @@ static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
                data = val;
        else {
                if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
-                                             where, &data))
+                                             where, &data))
                        return -1;
 
                if (size == 1)
index 99929cf..499e35c 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/lasat/lasat.h>
 #include <asm/nile4.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 #define LO(reg) (reg / 4)
index 389bf66..d0b6f83 100644 (file)
@@ -9,8 +9,8 @@
  * Much of the code is derived from the original DDB5074 port by
  * Geert Uytterhoeven <geert@sonycom.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
@@ -57,18 +57,18 @@ static void pci_proc_init(void);
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Prints the count of how many times each PCI
- *               interrupt has asserted. Can be invoked by the
- *               /proc filesystem.
+ *              interrupt has asserted. Can be invoked by the
+ *              /proc filesystem.
  *
- *  INPUTS:      page    - part of STDOUT calculation
- *               off     - part of STDOUT calculation
- *               count   - part of STDOUT calculation
- *               data    - unused
+ *  INPUTS:     page    - part of STDOUT calculation
+ *              off     - part of STDOUT calculation
+ *              count   - part of STDOUT calculation
+ *              data    - unused
  *
- *  OUTPUTS:     start   - new start location
- *               eof     - end of file pointer
+ *  OUTPUTS:    start   - new start location
+ *              eof     - end of file pointer
  *
- *  RETURNS:     len     - STDOUT length
+ *  RETURNS:    len     - STDOUT length
  *
  ****************************************************************************/
 static int read_msp_pci_counts(char *page, char **start, off_t off,
@@ -106,21 +106,21 @@ static int read_msp_pci_counts(char *page, char **start, off_t off,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Generates a configuration write cycle for debug purposes.
- *               The IDSEL line asserted and location and data written are
- *               immaterial. Just want to be able to prove that a
- *               configuration write can be correctly generated on the
- *               PCI bus.  Intent is that this function by invocable from
- *               the /proc filesystem.
+ *              The IDSEL line asserted and location and data written are
+ *              immaterial. Just want to be able to prove that a
+ *              configuration write can be correctly generated on the
+ *              PCI bus.  Intent is that this function by invocable from
+ *              the /proc filesystem.
  *
- *  INPUTS:      page    - part of STDOUT calculation
- *               off     - part of STDOUT calculation
- *               count   - part of STDOUT calculation
- *               data    - unused
+ *  INPUTS:     page    - part of STDOUT calculation
+ *              off     - part of STDOUT calculation
+ *              count   - part of STDOUT calculation
+ *              data    - unused
  *
- *  OUTPUTS:     start   - new start location
- *               eof     - end of file pointer
+ *  OUTPUTS:    start   - new start location
+ *              eof     - end of file pointer
  *
- *  RETURNS:     len     - STDOUT length
+ *  RETURNS:    len     - STDOUT length
  *
  ****************************************************************************/
 static int gen_pci_cfg_wr(char *page, char **start, off_t off,
@@ -190,11 +190,11 @@ static int gen_pci_cfg_wr(char *page, char **start, off_t off,
  *
  *  DESCRIPTION: Create entries in the /proc filesystem for debug access.
  *
- *  INPUTS:      none
+ *  INPUTS:     none
  *
- *  OUTPUTS:     none
+ *  OUTPUTS:    none
  *
- *  RETURNS:     none
+ *  RETURNS:    none
  *
  ****************************************************************************/
 static void pci_proc_init(void)
@@ -214,44 +214,44 @@ static DEFINE_SPINLOCK(bpci_lock);
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Defines the address range that pciauto() will use to
- *               assign to the I/O BARs of PCI devices.
- *
- *               Use the start and end addresses of the MSP7120 PCI Host
- *               Controller I/O space, in the form that they appear on the
- *               PCI bus AFTER MSP7120 has performed address translation.
- *
- *               For I/O accesses, MSP7120 ignores OATRAN and maps I/O
- *               accesses into the bottom 0xFFF region of address space,
- *               so that is the range to put into the pci_io_resource
- *               struct.
- *
- *               In MSP4200, the start address was 0x04 instead of the
- *              expected 0x00. Will just assume there was a good reason
- *              for this!
- *
- *  NOTES:       Linux, by default, will assign I/O space to the lowest
- *               region of address space. Since MSP7120 and Linux,
- *               by default, have no offset in between how they map, the
- *               io_offset element of pci_controller struct should be set
- *               to zero.
+ *              assign to the I/O BARs of PCI devices.
+ *
+ *              Use the start and end addresses of the MSP7120 PCI Host
+ *              Controller I/O space, in the form that they appear on the
+ *              PCI bus AFTER MSP7120 has performed address translation.
+ *
+ *              For I/O accesses, MSP7120 ignores OATRAN and maps I/O
+ *              accesses into the bottom 0xFFF region of address space,
+ *              so that is the range to put into the pci_io_resource
+ *              struct.
+ *
+ *              In MSP4200, the start address was 0x04 instead of the
+ *              expected 0x00. Will just assume there was a good reason
+ *              for this!
+ *
+ *  NOTES:      Linux, by default, will assign I/O space to the lowest
+ *              region of address space. Since MSP7120 and Linux,
+ *              by default, have no offset in between how they map, the
+ *              io_offset element of pci_controller struct should be set
+ *              to zero.
  *  ELEMENTS:
- *    name       - String used for a meaningful name.
+ *    name      - String used for a meaningful name.
  *
- *    start      - Start address of MSP7120's I/O space, as MSP7120 presents
- *                 the address on the PCI bus.
+ *    start     - Start address of MSP7120's I/O space, as MSP7120 presents
+ *                the address on the PCI bus.
  *
- *    end        - End address of MSP7120's I/O space, as MSP7120 presents
- *                 the address on the PCI bus.
+ *    end       - End address of MSP7120's I/O space, as MSP7120 presents
+ *                the address on the PCI bus.
  *
- *    flags      - Attributes indicating the type of resource. In this case,
- *                 indicate I/O space.
+ *    flags     - Attributes indicating the type of resource. In this case,
+ *                indicate I/O space.
  *
  ****************************************************************************/
 static struct resource pci_io_resource = {
        .name   = "pci IO space",
        .start  = 0x04,
        .end    = 0x0FFF,
-       .flags  = IORESOURCE_IO /* I/O space */
+       .flags  = IORESOURCE_IO /* I/O space */
 };
 
 /*****************************************************************************
@@ -260,26 +260,26 @@ static struct resource pci_io_resource = {
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Defines the address range that pciauto() will use to
- *               assign to the memory BARs of PCI devices.
+ *              assign to the memory BARs of PCI devices.
  *
- *               The .start and .end values are dependent upon how address
- *               translation is performed by the OATRAN regiser.
+ *              The .start and .end values are dependent upon how address
+ *              translation is performed by the OATRAN regiser.
  *
- *               The values to use for .start and .end are the values
- *               in the form they appear on the PCI bus AFTER MSP7120 has
- *               performed OATRAN address translation.
+ *              The values to use for .start and .end are the values
+ *              in the form they appear on the PCI bus AFTER MSP7120 has
+ *              performed OATRAN address translation.
  *
  *  ELEMENTS:
- *    name       - String used for a meaningful name.
+ *    name      - String used for a meaningful name.
  *
- *    start      - Start address of MSP7120's memory space, as MSP7120 presents
- *                 the address on the PCI bus.
+ *    start     - Start address of MSP7120's memory space, as MSP7120 presents
+ *                the address on the PCI bus.
  *
- *    end        - End address of MSP7120's memory space, as MSP7120 presents
- *                 the address on the PCI bus.
+ *    end       - End address of MSP7120's memory space, as MSP7120 presents
+ *                the address on the PCI bus.
  *
- *    flags      - Attributes indicating the type of resource. In this case,
- *                 indicate memory space.
+ *    flags     - Attributes indicating the type of resource. In this case,
+ *                indicate memory space.
  *
  ****************************************************************************/
 static struct resource pci_mem_resource = {
@@ -295,17 +295,17 @@ static struct resource pci_mem_resource = {
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: PCI status interrupt handler. Updates the count of how
- *               many times each status bit has been set, then clears
- *               the status bits. If the appropriate macros are defined,
- *               these counts can be viewed via the /proc filesystem.
+ *              many times each status bit has been set, then clears
+ *              the status bits. If the appropriate macros are defined,
+ *              these counts can be viewed via the /proc filesystem.
  *
- *  INPUTS:      irq     - unused
- *               dev_id  - unused
- *               pt_regs - unused
+ *  INPUTS:     irq     - unused
+ *              dev_id  - unused
+ *              pt_regs - unused
  *
- *  OUTPUTS:     none
+ *  OUTPUTS:    none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
+ *  RETURNS:    PCIBIOS_SUCCESSFUL  - success
  *
  ****************************************************************************/
 static irqreturn_t bpci_interrupt(int irq, void *dev_id)
@@ -335,41 +335,41 @@ static irqreturn_t bpci_interrupt(int irq, void *dev_id)
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Performs a PCI configuration access (rd or wr), then
- *               checks that the access succeeded by querying MSP7120's
- *               PCI status bits.
+ *              checks that the access succeeded by querying MSP7120's
+ *              PCI status bits.
  *
  *  INPUTS:
- *               access_type  - kind of PCI configuration cycle to perform
- *                              (read or write). Legal values are
- *                              PCI_ACCESS_WRITE and PCI_ACCESS_READ.
- *
- *               bus          - pointer to the bus number of the device to
- *                              be targeted for the configuration cycle.
- *                              The only element of the pci_bus structure
- *                              used is bus->number. This argument determines
- *                              if the configuration access will be Type 0 or
- *                              Type 1. Since MSP7120 assumes itself to be the
- *                              PCI Host, any non-zero bus->number generates
- *                              a Type 1 access.
- *
- *               devfn        - this is an 8-bit field. The lower three bits
- *                              specify the function number of the device to
- *                              be targeted for the configuration cycle, with
- *                              all three-bit combinations being legal. The
- *                              upper five bits specify the device number,
- *                              with legal values being 10 to 31.
- *
- *               where        - address within the Configuration Header
- *                              space to access.
- *
- *               data         - for write accesses, contains the data to
- *                              write.
+ *              access_type  - kind of PCI configuration cycle to perform
+ *                             (read or write). Legal values are
+ *                             PCI_ACCESS_WRITE and PCI_ACCESS_READ.
+ *
+ *              bus          - pointer to the bus number of the device to
+ *                             be targeted for the configuration cycle.
+ *                             The only element of the pci_bus structure
+ *                             used is bus->number. This argument determines
+ *                             if the configuration access will be Type 0 or
+ *                             Type 1. Since MSP7120 assumes itself to be the
+ *                             PCI Host, any non-zero bus->number generates
+ *                             a Type 1 access.
+ *
+ *              devfn        - this is an 8-bit field. The lower three bits
+ *                             specify the function number of the device to
+ *                             be targeted for the configuration cycle, with
+ *                             all three-bit combinations being legal. The
+ *                             upper five bits specify the device number,
+ *                             with legal values being 10 to 31.
+ *
+ *              where        - address within the Configuration Header
+ *                             space to access.
+ *
+ *              data         - for write accesses, contains the data to
+ *                             write.
  *
  *  OUTPUTS:
- *               data         - for read accesses, contains the value read.
+ *              data         - for read accesses, contains the value read.
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
- *               -1                  - access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL  - success
+ *              -1                  - access failure
  *
  ****************************************************************************/
 int msp_pcibios_config_access(unsigned char access_type,
@@ -429,7 +429,7 @@ int msp_pcibios_config_access(unsigned char access_type,
         * for this Block Copy, called Block Copy 0 Fault (BC0F) and
         * Block Copy 1 Fault (BC1F). MSP4200 and MSP7120 don't have this
         * dedicated Block Copy block, so these two interrupts are now
-        * marked reserved. In case the  Block Copy is resurrected in a
+        * marked reserved. In case the  Block Copy is resurrected in a
         * future design, maintain the code that treats these two interrupts
         * specially.
         *
@@ -439,7 +439,7 @@ int msp_pcibios_config_access(unsigned char access_type,
        preg->if_status = ~(BPCI_IFSTATUS_BC0F | BPCI_IFSTATUS_BC1F);
 
        /* Setup address that is to appear on PCI bus */
-       preg->config_addr = BPCI_CFGADDR_ENABLE |
+       preg->config_addr = BPCI_CFGADDR_ENABLE |
                (bus_num << BPCI_CFGADDR_BUSNUM_SHF) |
                (dev_fn << BPCI_CFGADDR_FUNCTNUM_SHF) |
                (where & 0xFC);
@@ -494,21 +494,21 @@ int msp_pcibios_config_access(unsigned char access_type,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Read a byte from PCI configuration address spac
- *               Since the hardware can't address 8 bit chunks
- *               directly, read a 32-bit chunk, then mask off extraneous
- *               bits.
+ *              Since the hardware can't address 8 bit chunks
+ *              directly, read a 32-bit chunk, then mask off extraneous
+ *              bits.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the read is destined for.
- *               devfn  - device/function combination that the read is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the read is destined for.
+ *              devfn  - device/function combination that the read is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
  *
- *  OUTPUTS      val    - read data
+ *  OUTPUTS     val    - read data
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
- *               -1                  - read access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL  - success
+ *              -1                  - read access failure
  *
  ****************************************************************************/
 static int
@@ -541,22 +541,22 @@ msp_pcibios_read_config_byte(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Read a word (16 bits) from PCI configuration address space.
- *               Since the hardware can't address 16 bit chunks
- *               directly, read a 32-bit chunk, then mask off extraneous
- *               bits.
+ *              Since the hardware can't address 16 bit chunks
+ *              directly, read a 32-bit chunk, then mask off extraneous
+ *              bits.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the read is destined for.
- *               devfn  - device/function combination that the read is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the read is destined for.
+ *              devfn  - device/function combination that the read is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
  *
- *  OUTPUTS      val    - read data
+ *  OUTPUTS     val    - read data
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL           - success
- *               PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
- *               -1                           - read access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL           - success
+ *              PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
+ *              -1                           - read access failure
  *
  ****************************************************************************/
 static int
@@ -600,20 +600,20 @@ msp_pcibios_read_config_word(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Read a double word (32 bits) from PCI configuration
- *               address space.
+ *              address space.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the read is destined for.
- *               devfn  - device/function combination that the read is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the read is destined for.
+ *              devfn  - device/function combination that the read is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
  *
- *  OUTPUTS      val    - read data
+ *  OUTPUTS     val    - read data
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL           - success
- *               PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
- *               -1                           - read access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL           - success
+ *              PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
+ *              -1                           - read access failure
  *
  ****************************************************************************/
 static int
@@ -652,21 +652,21 @@ msp_pcibios_read_config_dword(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Write a byte to PCI configuration address space.
- *               Since the hardware can't address 8 bit chunks
- *               directly, a read-modify-write is performed.
+ *              Since the hardware can't address 8 bit chunks
+ *              directly, a read-modify-write is performed.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the write is destined for.
- *               devfn  - device/function combination that the write is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
- *               val    - value to write
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the write is destined for.
+ *              devfn  - device/function combination that the write is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
+ *              val    - value to write
  *
- *  OUTPUTS      none
+ *  OUTPUTS     none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
- *               -1                  - write access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL  - success
+ *              -1                  - write access failure
  *
  ****************************************************************************/
 static int
@@ -700,22 +700,22 @@ msp_pcibios_write_config_byte(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Write a word (16-bits) to PCI configuration address space.
- *               Since the hardware can't address 16 bit chunks
- *               directly, a read-modify-write is performed.
+ *              Since the hardware can't address 16 bit chunks
+ *              directly, a read-modify-write is performed.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the write is destined for.
- *               devfn  - device/function combination that the write is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
- *               val    - value to write
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the write is destined for.
+ *              devfn  - device/function combination that the write is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
+ *              val    - value to write
  *
- *  OUTPUTS      none
+ *  OUTPUTS     none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL           - success
- *               PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
- *               -1                           - write access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL           - success
+ *              PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
+ *              -1                           - write access failure
  *
  ****************************************************************************/
 static int
@@ -753,21 +753,21 @@ msp_pcibios_write_config_word(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Write a double word (32-bits) to PCI configuration address
- *               space.
+ *              space.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the write is destined for.
- *               devfn  - device/function combination that the write is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
- *               val    - value to write
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the write is destined for.
+ *              devfn  - device/function combination that the write is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
+ *              val    - value to write
  *
- *  OUTPUTS      none
+ *  OUTPUTS     none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL           - success
- *               PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
- *               -1                           - write access failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL           - success
+ *              PCIBIOS_BAD_REGISTER_NUMBER  - bad register address
+ *              -1                           - write access failure
  *
  ****************************************************************************/
 static int
@@ -794,22 +794,22 @@ msp_pcibios_write_config_dword(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Interface the PCI configuration read request with
- *               the appropriate function, based on how many bytes
- *               the read request is.
+ *              the appropriate function, based on how many bytes
+ *              the read request is.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the write is destined for.
- *               devfn  - device/function combination that the write is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
- *               size   - in units of bytes, should be 1, 2, or 4.
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the write is destined for.
+ *              devfn  - device/function combination that the write is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
+ *              size   - in units of bytes, should be 1, 2, or 4.
  *
- *  OUTPUTS      val    - value read, with any extraneous bytes masked
- *                        to zero.
+ *  OUTPUTS     val    - value read, with any extraneous bytes masked
+ *                       to zero.
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL   - success
- *               -1                   - failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL   - success
+ *              -1                   - failure
  *
  ****************************************************************************/
 int
@@ -845,22 +845,22 @@ msp_pcibios_read_config(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Interface the PCI configuration write request with
- *               the appropriate function, based on how many bytes
- *               the read request is.
+ *              the appropriate function, based on how many bytes
+ *              the read request is.
  *
- *  INPUTS       bus    - structure containing attributes for the PCI bus
- *                        that the write is destined for.
- *               devfn  - device/function combination that the write is
- *                        destined for.
- *               where  - register within the Configuration Header space
- *                        to access.
- *               size   - in units of bytes, should be 1, 2, or 4.
- *               val    - value to write
+ *  INPUTS      bus    - structure containing attributes for the PCI bus
+ *                       that the write is destined for.
+ *              devfn  - device/function combination that the write is
+ *                       destined for.
+ *              where  - register within the Configuration Header space
+ *                       to access.
+ *              size   - in units of bytes, should be 1, 2, or 4.
+ *              val    - value to write
  *
- *  OUTPUTS:     none
+ *  OUTPUTS:    none
  *
- *  RETURNS:     PCIBIOS_SUCCESSFUL   - success
- *               -1                   - failure
+ *  RETURNS:    PCIBIOS_SUCCESSFUL   - success
+ *              -1                   - failure
  *
  ****************************************************************************/
 int
@@ -897,11 +897,11 @@ msp_pcibios_write_config(struct pci_bus *bus,
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: structure to abstract the hardware specific PCI
- *               configuration accesses.
+ *              configuration accesses.
  *
  *  ELEMENTS:
- *    read      - function for Linux to generate PCI Configuration reads.
- *    write     - function for Linux to generate PCI Configuration writes.
+ *    read     - function for Linux to generate PCI Configuration reads.
+ *    write    - function for Linux to generate PCI Configuration writes.
  *
  ****************************************************************************/
 struct pci_ops msp_pci_ops = {
@@ -917,27 +917,27 @@ struct pci_ops msp_pci_ops = {
  *  Describes the attributes of the MSP7120 PCI Host Controller
  *
  *  ELEMENTS:
- *    pci_ops      - abstracts the hardware specific PCI configuration
- *                   accesses.
+ *    pci_ops     - abstracts the hardware specific PCI configuration
+ *                  accesses.
  *
  *    mem_resource - address range pciauto() uses to assign to PCI device
- *                   memory BARs.
+ *                  memory BARs.
  *
  *    mem_offset   - offset between how MSP7120 outbound PCI memory
- *                   transaction addresses appear on the PCI bus and how Linux
- *                   wants to configure memory BARs of the PCI devices.
- *                   MSP7120 does nothing funky, so just set to zero.
+ *                  transaction addresses appear on the PCI bus and how Linux
+ *                  wants to configure memory BARs of the PCI devices.
+ *                  MSP7120 does nothing funky, so just set to zero.
  *
  *    io_resource  - address range pciauto() uses to assign to PCI device
- *                   I/O BARs.
+ *                  I/O BARs.
  *
- *    io_offset    - offset between how MSP7120 outbound PCI I/O
- *                   transaction addresses appear on the PCI bus and how
- *                   Linux defaults to configure I/O BARs of the PCI devices.
- *                   MSP7120 maps outbound I/O accesses into the bottom
- *                   bottom 4K of PCI address space (and ignores OATRAN).
- *                   Since the Linux default is to configure I/O BARs to the
- *                   bottom 4K, no special offset is needed. Just set to zero.
+ *    io_offset           - offset between how MSP7120 outbound PCI I/O
+ *                  transaction addresses appear on the PCI bus and how
+ *                  Linux defaults to configure I/O BARs of the PCI devices.
+ *                  MSP7120 maps outbound I/O accesses into the bottom
+ *                  bottom 4K of PCI address space (and ignores OATRAN).
+ *                  Since the Linux default is to configure I/O BARs to the
+ *                  bottom 4K, no special offset is needed. Just set to zero.
  *
  ****************************************************************************/
 static struct pci_controller msp_pci_controller = {
@@ -955,7 +955,7 @@ static struct pci_controller msp_pci_controller = {
  *  _________________________________________________________________________
  *
  *  DESCRIPTION: Initialize the PCI Host Controller and register it with
- *               Linux so Linux can seize control of the PCI bus.
+ *              Linux so Linux can seize control of the PCI bus.
  *
  ****************************************************************************/
 void __init msp_pci_init(void)
@@ -979,7 +979,7 @@ void __init msp_pci_init(void)
        *(unsigned long *)QFLUSH_REG_1 = 3;
 
        /* Configure PCI Host Controller. */
-       preg->if_status = ~0;           /* Clear cause register bits */
+       preg->if_status = ~0;           /* Clear cause register bits */
        preg->config_addr = 0;          /* Clear config access */
        preg->oatran    = MSP_PCI_OATRAN; /* PCI outbound addr translation */
        preg->if_mask   = 0xF8BF87C0;   /* Enable all PCI status interrupts */
diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c
deleted file mode 100644 (file)
index 1e6213f..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- *
- *  BRIEF MODULE DESCRIPTION
- *
- *  2.6 port, Embedded Alley Solutions, Inc
- *
- *  Based on:
- *  Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/glb.h>
-
-static inline void clear_status(void)
-{
-       unsigned long pci_stat;
-
-       pci_stat = inl(PCI_BASE | PCI_GPPM_STATUS);
-       outl(pci_stat, PCI_BASE | PCI_GPPM_ICLR);
-}
-
-static inline unsigned int
-calc_cfg_addr(struct pci_bus *bus, unsigned int devfn, int where)
-{
-       unsigned int addr;
-
-       addr = ((bus->number > 0) ? (((bus->number & 0xff) << PCI_CFG_BUS_SHIFT) | 1) : 0);
-       addr |= ((devfn & 0xff) << PCI_CFG_FUNC_SHIFT) | (where & 0xfc);
-
-       return addr;
-}
-
-static int
-config_access(unsigned int pci_cmd, struct pci_bus *bus, unsigned int devfn, int where, unsigned int pci_mode, unsigned int *val)
-{
-       unsigned int flags;
-       unsigned long loops = 0;
-       unsigned long ioaddr = calc_cfg_addr(bus, devfn, where);
-
-       local_irq_save(flags);
-       /*Clear pending interrupt status */
-       if (inl(PCI_BASE | PCI_GPPM_STATUS)) {
-               clear_status();
-               while (!(inl(PCI_BASE | PCI_GPPM_STATUS) == 0)) ;
-       }
-
-       outl(ioaddr, PCI_BASE | PCI_GPPM_ADDR);
-
-       if ((pci_cmd == PCI_CMD_IOW) || (pci_cmd == PCI_CMD_CONFIG_WRITE))
-               outl(*val, PCI_BASE | PCI_GPPM_WDAT);
-
-       outl(INIT_PCI_CYCLE | pci_cmd | (pci_mode & PCI_BYTE_ENABLE_MASK),
-            PCI_BASE | PCI_GPPM_CTRL);
-
-       loops =
-           ((loops_per_jiffy *
-             PCI_IO_JIFFIES_TIMEOUT) >> (PCI_IO_JIFFIES_SHIFT));
-       while (1) {
-               if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_DONE) {
-                       if ((pci_cmd == PCI_CMD_IOR) ||
-                           (pci_cmd == PCI_CMD_CONFIG_READ))
-                               *val = inl(PCI_BASE | PCI_GPPM_RDAT);
-                       clear_status();
-                       local_irq_restore(flags);
-                       return PCIBIOS_SUCCESSFUL;
-               } else if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_R_MABORT) {
-                       break;
-               }
-
-               loops--;
-               if (loops == 0) {
-                       printk("%s : Arbiter Locked.\n", __func__);
-               }
-       }
-
-       clear_status();
-       if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_IOW)) {
-               printk("%s timeout (GPPM_CTRL=%X) ioaddr %lX pci_cmd %X\n",
-                      __func__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr,
-                      pci_cmd);
-       }
-
-       if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_CONFIG_READ))
-               *val = 0xffffffff;
-       local_irq_restore(flags);
-       return PCIBIOS_DEVICE_NOT_FOUND;
-}
-
-/*
- * We can't address 8 and 16 bit words directly.  Instead we have to
- * read/write a 32bit word and mask/modify the data we actually want.
- */
-static int
-read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val)
-{
-       unsigned int data = 0;
-       int err;
-
-       if (bus == NULL)
-               return -1;
-
-       err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data);
-       switch (where & 0x03) {
-       case 0:
-               *val = (unsigned char)(data & 0x000000ff);
-               break;
-       case 1:
-               *val = (unsigned char)((data & 0x0000ff00) >> 8);
-               break;
-       case 2:
-               *val = (unsigned char)((data & 0x00ff0000) >> 16);
-               break;
-       case 3:
-               *val = (unsigned char)((data & 0xff000000) >> 24);
-               break;
-       }
-
-       return err;
-}
-
-static int
-read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val)
-{
-       unsigned int data = 0;
-       int err;
-
-       if (bus == NULL)
-               return -1;
-
-       if (where & 0x01)
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(3 << (where & 3)), &data);
-       switch (where & 0x02) {
-       case 0:
-               *val = (unsigned short)(data & 0x0000ffff);
-               break;
-       case 2:
-               *val = (unsigned short)((data & 0xffff0000) >> 16);
-               break;
-       }
-
-       return err;
-}
-
-static int
-read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
-{
-       int err;
-       if (bus == NULL)
-               return -1;
-
-       if (where & 0x03)
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, 0, val);
-
-       return err;
-}
-
-static int
-write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
-{
-       unsigned int data = (unsigned int)val;
-       int err;
-
-       if (bus == NULL)
-               return -1;
-
-       switch (where & 0x03) {
-       case 1:
-               data = (data << 8);
-               break;
-       case 2:
-               data = (data << 16);
-               break;
-       case 3:
-               data = (data << 24);
-               break;
-       default:
-               break;
-       }
-
-       err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data);
-
-       return err;
-}
-
-static int
-write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val)
-{
-       unsigned int data = (unsigned int)val;
-       int err;
-
-       if (bus == NULL)
-               return -1;
-
-       if (where & 0x01)
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       switch (where & 0x02) {
-       case 2:
-               data = (data << 16);
-               break;
-       default:
-               break;
-       }
-       err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(3 << (where & 3)), &data);
-
-       return err;
-}
-
-static int
-write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
-{
-       int err;
-       if (bus == NULL)
-               return -1;
-
-       if (where & 0x03)
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, 0, &val);
-
-       return err;
-}
-
-static int config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
-{
-       switch (size) {
-       case 1: {
-                       u8 _val;
-                       int rc = read_config_byte(bus, devfn, where, &_val);
-                       *val = _val;
-                       return rc;
-               }
-       case 2: {
-                       u16 _val;
-                       int rc = read_config_word(bus, devfn, where, &_val);
-                       *val = _val;
-                       return rc;
-               }
-       default:
-               return read_config_dword(bus, devfn, where, val);
-       }
-}
-
-static int config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
-{
-       switch (size) {
-       case 1:
-               return write_config_byte(bus, devfn, where, (u8) val);
-       case 2:
-               return write_config_word(bus, devfn, where, (u16) val);
-       default:
-               return write_config_dword(bus, devfn, where, val);
-       }
-}
-
-struct pci_ops pnx8550_pci_ops = {
-       config_read,
-       config_write
-};
index d1f8fa2..7c7182e 100644 (file)
@@ -35,7 +35,7 @@
 #include <asm/mach-rc32434/rc32434.h>
 #include <asm/mach-rc32434/pci.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 
index 97ed25b..35daa7f 100644 (file)
@@ -14,8 +14,8 @@
 
 /*
  * It seems that on the RM200 only lower 3 bits of the 5 bit PCI device
- * address are decoded.  We therefore manually have to reject attempts at
- * reading outside this range.  Being on the paranoid side we only do this
+ * address are decoded.         We therefore manually have to reject attempts at
+ * reading outside this range. Being on the paranoid side we only do this
  * test for bus 0 and hope forwarding and decoding work properly for any
  * subordinated busses.
  *
@@ -31,8 +31,8 @@ static int set_config_address(unsigned int busno, unsigned int devfn, int reg)
 
        *(volatile u32 *)PCIMT_CONFIG_ADDRESS =
                 ((busno    & 0xff) << 16) |
-                ((devfn    & 0xff) <<  8) |
-                 (reg      & 0xfc);
+                ((devfn    & 0xff) <<  8) |
+                 (reg      & 0xfc);
 
        return PCIBIOS_SUCCESSFUL;
 }
index 0d69d6f..3d5df51 100644 (file)
@@ -2,16 +2,16 @@
  * Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
  *
  * Based on linux/arch/mips/pci/ops-tx4938.c,
- *          linux/arch/mips/pci/fixup-rbtx4938.c,
- *          linux/arch/mips/txx9/rbtx4938/setup.c,
+ *         linux/arch/mips/pci/fixup-rbtx4938.c,
+ *         linux/arch/mips/txx9/rbtx4938/setup.c,
  *         and RBTX49xx patch from CELF patch archive.
  *
  * 2003-2005 (c) MontaVista Software, Inc.
  * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
  * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 28962a7..551128c 100644 (file)
@@ -33,7 +33,7 @@
 #define PCICONFAREG    (void __iomem *)KSEG1ADDR(0x0f000c18)
 
 static inline int set_pci_configuration_address(unsigned char number,
-                                                unsigned int devfn, int where)
+                                               unsigned int devfn, int where)
 {
        if (number == 0) {
                /*
@@ -59,7 +59,7 @@ static inline int set_pci_configuration_address(unsigned char number,
 }
 
 static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
-                           int size, uint32_t *val)
+                          int size, uint32_t *val)
 {
        uint32_t data;
 
@@ -87,7 +87,7 @@ static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
 }
 
 static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
-                            int size, uint32_t val)
+                           int size, uint32_t val)
 {
        uint32_t data;
        int shift;
index c4ea6cc..38a80c8 100644 (file)
@@ -29,7 +29,7 @@
 #define PCI_ACCESS_WRITE       1
 
 struct alchemy_pci_context {
-       struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
+       struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
        void __iomem *regs;                     /* ctrl base */
        /* tools for wired entry for config space access */
        unsigned long last_elo0;
@@ -381,7 +381,7 @@ static int alchemy_pci_probe(struct platform_device *pdev)
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
-               dev_err(&pdev->dev, "no  pcictl ctrl regs resource\n");
+               dev_err(&pdev->dev, "no  pcictl ctrl regs resource\n");
                ret = -ENODEV;
                goto out1;
        }
@@ -482,7 +482,7 @@ out:
 
 static struct platform_driver alchemy_pcictl_driver = {
        .probe          = alchemy_pci_probe,
-       .driver = {
+       .driver = {
                .name   = "alchemy-pci",
                .owner  = THIS_MODULE,
        },
index 6eaa4f2..412ec02 100644 (file)
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
 
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR71XX_PCI_MEM_BASE    0x10000000
-#define AR71XX_PCI_MEM_SIZE    0x07000000
-
-#define AR71XX_PCI_WIN0_OFFS           0x10000000
-#define AR71XX_PCI_WIN1_OFFS           0x11000000
-#define AR71XX_PCI_WIN2_OFFS           0x12000000
-#define AR71XX_PCI_WIN3_OFFS           0x13000000
-#define AR71XX_PCI_WIN4_OFFS           0x14000000
-#define AR71XX_PCI_WIN5_OFFS           0x15000000
-#define AR71XX_PCI_WIN6_OFFS           0x16000000
-#define AR71XX_PCI_WIN7_OFFS           0x07000000
-
-#define AR71XX_PCI_CFG_BASE            \
-       (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
-#define AR71XX_PCI_CFG_SIZE            0x100
 
 #define AR71XX_PCI_REG_CRP_AD_CBE      0x00
 #define AR71XX_PCI_REG_CRP_WRDATA      0x04
 
 #define AR71XX_PCI_IRQ_COUNT           5
 
-static DEFINE_SPINLOCK(ar71xx_pci_lock);
-static void __iomem *ar71xx_pcicfg_base;
+struct ar71xx_pci_controller {
+       void __iomem *cfg_base;
+       spinlock_t lock;
+       int irq;
+       int irq_base;
+       struct pci_controller pci_ctrl;
+       struct resource io_res;
+       struct resource mem_res;
+};
 
 /* Byte lane enable bits */
 static const u8 ar71xx_pci_ble_table[4][4] = {
@@ -107,9 +99,18 @@ static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
        return ret;
 }
 
-static int ar71xx_pci_check_error(int quiet)
+static inline struct ar71xx_pci_controller *
+pci_bus_to_ar71xx_controller(struct pci_bus *bus)
 {
-       void __iomem *base = ar71xx_pcicfg_base;
+       struct pci_controller *hose;
+
+       hose = (struct pci_controller *) bus->sysdata;
+       return container_of(hose, struct ar71xx_pci_controller, pci_ctrl);
+}
+
+static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet)
+{
+       void __iomem *base = apc->cfg_base;
        u32 pci_err;
        u32 ahb_err;
 
@@ -144,9 +145,10 @@ static int ar71xx_pci_check_error(int quiet)
        return !!(ahb_err | pci_err);
 }
 
-static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc,
+                                         int where, int size, u32 value)
 {
-       void __iomem *base = ar71xx_pcicfg_base;
+       void __iomem *base = apc->cfg_base;
        u32 ad_cbe;
 
        value = value << (8 * (where & 3));
@@ -162,7 +164,8 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
                                         unsigned int devfn,
                                         int where, int size, u32 cmd)
 {
-       void __iomem *base = ar71xx_pcicfg_base;
+       struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+       void __iomem *base = apc->cfg_base;
        u32 addr;
 
        addr = ar71xx_pci_bus_addr(bus, devfn, where);
@@ -171,13 +174,14 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
        __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
                     base + AR71XX_PCI_REG_CFG_CBE);
 
-       return ar71xx_pci_check_error(1);
+       return ar71xx_pci_check_error(apc, 1);
 }
 
 static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
                                  int where, int size, u32 *value)
 {
-       void __iomem *base = ar71xx_pcicfg_base;
+       struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+       void __iomem *base = apc->cfg_base;
        unsigned long flags;
        u32 data;
        int err;
@@ -186,7 +190,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        ret = PCIBIOS_SUCCESSFUL;
        data = ~0;
 
-       spin_lock_irqsave(&ar71xx_pci_lock, flags);
+       spin_lock_irqsave(&apc->lock, flags);
 
        err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
                                     AR71XX_PCI_CFG_CMD_READ);
@@ -195,7 +199,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
        else
                data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
 
-       spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+       spin_unlock_irqrestore(&apc->lock, flags);
 
        *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
 
@@ -205,7 +209,8 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
 static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
                                   int where, int size, u32 value)
 {
-       void __iomem *base = ar71xx_pcicfg_base;
+       struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+       void __iomem *base = apc->cfg_base;
        unsigned long flags;
        int err;
        int ret;
@@ -213,7 +218,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
        value = value << (8 * (where & 3));
        ret = PCIBIOS_SUCCESSFUL;
 
-       spin_lock_irqsave(&ar71xx_pci_lock, flags);
+       spin_lock_irqsave(&apc->lock, flags);
 
        err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
                                     AR71XX_PCI_CFG_CMD_WRITE);
@@ -222,7 +227,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
        else
                __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
 
-       spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+       spin_unlock_irqrestore(&apc->lock, flags);
 
        return ret;
 }
@@ -232,45 +237,28 @@ static struct pci_ops ar71xx_pci_ops = {
        .write  = ar71xx_pci_write_config,
 };
 
-static struct resource ar71xx_pci_io_resource = {
-       .name           = "PCI IO space",
-       .start          = 0,
-       .end            = 0,
-       .flags          = IORESOURCE_IO,
-};
-
-static struct resource ar71xx_pci_mem_resource = {
-       .name           = "PCI memory space",
-       .start          = AR71XX_PCI_MEM_BASE,
-       .end            = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
-       .flags          = IORESOURCE_MEM
-};
-
-static struct pci_controller ar71xx_pci_controller = {
-       .pci_ops        = &ar71xx_pci_ops,
-       .mem_resource   = &ar71xx_pci_mem_resource,
-       .io_resource    = &ar71xx_pci_io_resource,
-};
-
 static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
+       struct ar71xx_pci_controller *apc;
        void __iomem *base = ath79_reset_base;
        u32 pending;
 
+       apc = irq_get_handler_data(irq);
+
        pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
                  __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
 
        if (pending & AR71XX_PCI_INT_DEV0)
-               generic_handle_irq(ATH79_PCI_IRQ(0));
+               generic_handle_irq(apc->irq_base + 0);
 
        else if (pending & AR71XX_PCI_INT_DEV1)
-               generic_handle_irq(ATH79_PCI_IRQ(1));
+               generic_handle_irq(apc->irq_base + 1);
 
        else if (pending & AR71XX_PCI_INT_DEV2)
-               generic_handle_irq(ATH79_PCI_IRQ(2));
+               generic_handle_irq(apc->irq_base + 2);
 
        else if (pending & AR71XX_PCI_INT_CORE)
-               generic_handle_irq(ATH79_PCI_IRQ(4));
+               generic_handle_irq(apc->irq_base + 4);
 
        else
                spurious_interrupt();
@@ -278,10 +266,14 @@ static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
 
 static void ar71xx_pci_irq_unmask(struct irq_data *d)
 {
-       unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+       struct ar71xx_pci_controller *apc;
+       unsigned int irq;
        void __iomem *base = ath79_reset_base;
        u32 t;
 
+       apc = irq_data_get_irq_chip_data(d);
+       irq = d->irq - apc->irq_base;
+
        t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
        __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
 
@@ -291,10 +283,14 @@ static void ar71xx_pci_irq_unmask(struct irq_data *d)
 
 static void ar71xx_pci_irq_mask(struct irq_data *d)
 {
-       unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+       struct ar71xx_pci_controller *apc;
+       unsigned int irq;
        void __iomem *base = ath79_reset_base;
        u32 t;
 
+       apc = irq_data_get_irq_chip_data(d);
+       irq = d->irq - apc->irq_base;
+
        t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
        __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
 
@@ -309,7 +305,7 @@ static struct irq_chip ar71xx_pci_irq_chip = {
        .irq_mask_ack   = ar71xx_pci_irq_mask,
 };
 
-static __init void ar71xx_pci_irq_init(void)
+static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc)
 {
        void __iomem *base = ath79_reset_base;
        int i;
@@ -319,15 +315,19 @@ static __init void ar71xx_pci_irq_init(void)
 
        BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
 
-       for (i = ATH79_PCI_IRQ_BASE;
-            i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+       apc->irq_base = ATH79_PCI_IRQ_BASE;
+       for (i = apc->irq_base;
+            i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) {
                irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
                                         handle_level_irq);
+               irq_set_chip_data(i, apc);
+       }
 
-       irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+       irq_set_handler_data(apc->irq, apc);
+       irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
 }
 
-static __init void ar71xx_pci_reset(void)
+static void ar71xx_pci_reset(void)
 {
        void __iomem *ddr_base = ath79_ddr_base;
 
@@ -349,27 +349,83 @@ static __init void ar71xx_pci_reset(void)
        mdelay(100);
 }
 
-__init int ar71xx_pcibios_init(void)
+static int ar71xx_pci_probe(struct platform_device *pdev)
 {
+       struct ar71xx_pci_controller *apc;
+       struct resource *res;
        u32 t;
 
-       ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
-       if (ar71xx_pcicfg_base == NULL)
+       apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller),
+                          GFP_KERNEL);
+       if (!apc)
+               return -ENOMEM;
+
+       spin_lock_init(&apc->lock);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+       if (!res)
+               return -EINVAL;
+
+       apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (!apc->cfg_base)
                return -ENOMEM;
 
+       apc->irq = platform_get_irq(pdev, 0);
+       if (apc->irq < 0)
+               return -EINVAL;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+       if (!res)
+               return -EINVAL;
+
+       apc->io_res.parent = res;
+       apc->io_res.name = "PCI IO space";
+       apc->io_res.start = res->start;
+       apc->io_res.end = res->end;
+       apc->io_res.flags = IORESOURCE_IO;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+       if (!res)
+               return -EINVAL;
+
+       apc->mem_res.parent = res;
+       apc->mem_res.name = "PCI memory space";
+       apc->mem_res.start = res->start;
+       apc->mem_res.end = res->end;
+       apc->mem_res.flags = IORESOURCE_MEM;
+
        ar71xx_pci_reset();
 
        /* setup COMMAND register */
        t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
          | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
-       ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+       ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
 
        /* clear bus errors */
-       ar71xx_pci_check_error(1);
+       ar71xx_pci_check_error(apc, 1);
+
+       ar71xx_pci_irq_init(apc);
 
-       ar71xx_pci_irq_init();
+       apc->pci_ctrl.pci_ops = &ar71xx_pci_ops;
+       apc->pci_ctrl.mem_resource = &apc->mem_res;
+       apc->pci_ctrl.io_resource = &apc->io_res;
 
-       register_pci_controller(&ar71xx_pci_controller);
+       register_pci_controller(&apc->pci_ctrl);
 
        return 0;
 }
+
+static struct platform_driver ar71xx_pci_driver = {
+       .probe = ar71xx_pci_probe,
+       .driver = {
+               .name = "ar71xx-pci",
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init ar71xx_pci_init(void)
+{
+       return platform_driver_register(&ar71xx_pci_driver);
+}
+
+postcore_initcall(ar71xx_pci_init);
index c11c75b..8a0700d 100644 (file)
@@ -9,19 +9,13 @@
  *  by the Free Software Foundation.
  */
 
+#include <linux/spinlock.h>
 #include <linux/irq.h>
 #include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR724X_PCI_CFG_BASE    0x14000000
-#define AR724X_PCI_CFG_SIZE    0x1000
-#define AR724X_PCI_CTRL_BASE   (AR71XX_APB_BASE + 0x000f0000)
-#define AR724X_PCI_CTRL_SIZE   0x100
-
-#define AR724X_PCI_MEM_BASE    0x10000000
-#define AR724X_PCI_MEM_SIZE    0x04000000
 
 #define AR724X_PCI_REG_RESET           0x18
 #define AR724X_PCI_REG_INT_STATUS      0x4c
 
 #define AR7240_BAR0_WAR_VALUE  0xffff
 
-static DEFINE_SPINLOCK(ar724x_pci_lock);
-static void __iomem *ar724x_pci_devcfg_base;
-static void __iomem *ar724x_pci_ctrl_base;
+#define AR724X_PCI_CMD_INIT    (PCI_COMMAND_MEMORY |           \
+                                PCI_COMMAND_MASTER |           \
+                                PCI_COMMAND_INVALIDATE |       \
+                                PCI_COMMAND_PARITY |           \
+                                PCI_COMMAND_SERR |             \
+                                PCI_COMMAND_FAST_BACK)
+
+struct ar724x_pci_controller {
+       void __iomem *devcfg_base;
+       void __iomem *ctrl_base;
+       void __iomem *crp_base;
+
+       int irq;
+       int irq_base;
+
+       bool link_up;
+       bool bar0_is_cached;
+       u32  bar0_value;
 
-static u32 ar724x_pci_bar0_value;
-static bool ar724x_pci_bar0_is_cached;
-static bool ar724x_pci_link_up;
+       spinlock_t lock;
 
-static inline bool ar724x_pci_check_link(void)
+       struct pci_controller pci_controller;
+       struct resource io_res;
+       struct resource mem_res;
+};
+
+static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
 {
        u32 reset;
 
-       reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+       reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
        return reset & AR724X_PCI_RESET_LINK_UP;
 }
 
+static inline struct ar724x_pci_controller *
+pci_bus_to_ar724x_controller(struct pci_bus *bus)
+{
+       struct pci_controller *hose;
+
+       hose = (struct pci_controller *) bus->sysdata;
+       return container_of(hose, struct ar724x_pci_controller, pci_controller);
+}
+
+static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
+                                 int where, int size, u32 value)
+{
+       unsigned long flags;
+       void __iomem *base;
+       u32 data;
+       int s;
+
+       WARN_ON(where & (size - 1));
+
+       if (!apc->link_up)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       base = apc->crp_base;
+
+       spin_lock_irqsave(&apc->lock, flags);
+       data = __raw_readl(base + (where & ~3));
+
+       switch (size) {
+       case 1:
+               s = ((where & 3) * 8);
+               data &= ~(0xff << s);
+               data |= ((value & 0xff) << s);
+               break;
+       case 2:
+               s = ((where & 2) * 8);
+               data &= ~(0xffff << s);
+               data |= ((value & 0xffff) << s);
+               break;
+       case 4:
+               data = value;
+               break;
+       default:
+               spin_unlock_irqrestore(&apc->lock, flags);
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       }
+
+       __raw_writel(data, base + (where & ~3));
+       /* flush write */
+       __raw_readl(base + (where & ~3));
+       spin_unlock_irqrestore(&apc->lock, flags);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
 static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
                            int size, uint32_t *value)
 {
+       struct ar724x_pci_controller *apc;
        unsigned long flags;
        void __iomem *base;
        u32 data;
 
-       if (!ar724x_pci_link_up)
+       apc = pci_bus_to_ar724x_controller(bus);
+       if (!apc->link_up)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (devfn)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
-       base = ar724x_pci_devcfg_base;
+       base = apc->devcfg_base;
 
-       spin_lock_irqsave(&ar724x_pci_lock, flags);
+       spin_lock_irqsave(&apc->lock, flags);
        data = __raw_readl(base + (where & ~3));
 
        switch (size) {
@@ -85,17 +153,17 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
        case 4:
                break;
        default:
-               spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+               spin_unlock_irqrestore(&apc->lock, flags);
 
                return PCIBIOS_BAD_REGISTER_NUMBER;
        }
 
-       spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+       spin_unlock_irqrestore(&apc->lock, flags);
 
        if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
-           ar724x_pci_bar0_is_cached) {
+           apc->bar0_is_cached) {
                /* use the cached value */
-               *value = ar724x_pci_bar0_value;
+               *value = apc->bar0_value;
        } else {
                *value = data;
        }
@@ -106,12 +174,14 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                             int size, uint32_t value)
 {
+       struct ar724x_pci_controller *apc;
        unsigned long flags;
        void __iomem *base;
        u32 data;
        int s;
 
-       if (!ar724x_pci_link_up)
+       apc = pci_bus_to_ar724x_controller(bus);
+       if (!apc->link_up)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (devfn)
@@ -129,18 +199,18 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                         * BAR0 register in order to make the device memory
                         * accessible.
                         */
-                       ar724x_pci_bar0_is_cached = true;
-                       ar724x_pci_bar0_value = value;
+                       apc->bar0_is_cached = true;
+                       apc->bar0_value = value;
 
                        value = AR7240_BAR0_WAR_VALUE;
                } else {
-                       ar724x_pci_bar0_is_cached = false;
+                       apc->bar0_is_cached = false;
                }
        }
 
-       base = ar724x_pci_devcfg_base;
+       base = apc->devcfg_base;
 
-       spin_lock_irqsave(&ar724x_pci_lock, flags);
+       spin_lock_irqsave(&apc->lock, flags);
        data = __raw_readl(base + (where & ~3));
 
        switch (size) {
@@ -158,7 +228,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
                data = value;
                break;
        default:
-               spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+               spin_unlock_irqrestore(&apc->lock, flags);
 
                return PCIBIOS_BAD_REGISTER_NUMBER;
        }
@@ -166,7 +236,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
        __raw_writel(data, base + (where & ~3));
        /* flush write */
        __raw_readl(base + (where & ~3));
-       spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+       spin_unlock_irqrestore(&apc->lock, flags);
 
        return PCIBIOS_SUCCESSFUL;
 }
@@ -176,38 +246,20 @@ static struct pci_ops ar724x_pci_ops = {
        .write  = ar724x_pci_write,
 };
 
-static struct resource ar724x_io_resource = {
-       .name   = "PCI IO space",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_IO,
-};
-
-static struct resource ar724x_mem_resource = {
-       .name   = "PCI memory space",
-       .start  = AR724X_PCI_MEM_BASE,
-       .end    = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
-       .flags  = IORESOURCE_MEM,
-};
-
-static struct pci_controller ar724x_pci_controller = {
-       .pci_ops        = &ar724x_pci_ops,
-       .io_resource    = &ar724x_io_resource,
-       .mem_resource   = &ar724x_mem_resource,
-};
-
 static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
+       struct ar724x_pci_controller *apc;
        void __iomem *base;
        u32 pending;
 
-       base = ar724x_pci_ctrl_base;
+       apc = irq_get_handler_data(irq);
+       base = apc->ctrl_base;
 
        pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
                  __raw_readl(base + AR724X_PCI_REG_INT_MASK);
 
        if (pending & AR724X_PCI_INT_DEV0)
-               generic_handle_irq(ATH79_PCI_IRQ(0));
+               generic_handle_irq(apc->irq_base + 0);
 
        else
                spurious_interrupt();
@@ -215,13 +267,17 @@ static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
 
 static void ar724x_pci_irq_unmask(struct irq_data *d)
 {
+       struct ar724x_pci_controller *apc;
        void __iomem *base;
+       int offset;
        u32 t;
 
-       base = ar724x_pci_ctrl_base;
+       apc = irq_data_get_irq_chip_data(d);
+       base = apc->ctrl_base;
+       offset = apc->irq_base - d->irq;
 
-       switch (d->irq) {
-       case ATH79_PCI_IRQ(0):
+       switch (offset) {
+       case 0:
                t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
                __raw_writel(t | AR724X_PCI_INT_DEV0,
                             base + AR724X_PCI_REG_INT_MASK);
@@ -232,13 +288,17 @@ static void ar724x_pci_irq_unmask(struct irq_data *d)
 
 static void ar724x_pci_irq_mask(struct irq_data *d)
 {
+       struct ar724x_pci_controller *apc;
        void __iomem *base;
+       int offset;
        u32 t;
 
-       base = ar724x_pci_ctrl_base;
+       apc = irq_data_get_irq_chip_data(d);
+       base = apc->ctrl_base;
+       offset = apc->irq_base - d->irq;
 
-       switch (d->irq) {
-       case ATH79_PCI_IRQ(0):
+       switch (offset) {
+       case 0:
                t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
                __raw_writel(t & ~AR724X_PCI_INT_DEV0,
                             base + AR724X_PCI_REG_INT_MASK);
@@ -262,53 +322,123 @@ static struct irq_chip ar724x_pci_irq_chip = {
        .irq_mask_ack   = ar724x_pci_irq_mask,
 };
 
-static void __init ar724x_pci_irq_init(int irq)
+static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
+                               int id)
 {
        void __iomem *base;
        int i;
 
-       base = ar724x_pci_ctrl_base;
+       base = apc->ctrl_base;
 
        __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
        __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
 
-       BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+       apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
 
-       for (i = ATH79_PCI_IRQ_BASE;
-            i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+       for (i = apc->irq_base;
+            i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
                irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
                                         handle_level_irq);
+               irq_set_chip_data(i, apc);
+       }
 
-       irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+       irq_set_handler_data(apc->irq, apc);
+       irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
 }
 
-int __init ar724x_pcibios_init(int irq)
+static int ar724x_pci_probe(struct platform_device *pdev)
 {
-       int ret;
+       struct ar724x_pci_controller *apc;
+       struct resource *res;
+       int id;
 
-       ret = -ENOMEM;
+       id = pdev->id;
+       if (id == -1)
+               id = 0;
 
-       ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
-                                        AR724X_PCI_CFG_SIZE);
-       if (ar724x_pci_devcfg_base == NULL)
-               goto err;
+       apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
+                           GFP_KERNEL);
+       if (!apc)
+               return -ENOMEM;
 
-       ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
-                                      AR724X_PCI_CTRL_SIZE);
-       if (ar724x_pci_ctrl_base == NULL)
-               goto err_unmap_devcfg;
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
+       if (!res)
+               return -EINVAL;
 
-       ar724x_pci_link_up = ar724x_pci_check_link();
-       if (!ar724x_pci_link_up)
-               pr_warn("ar724x: PCIe link is down\n");
+       apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (apc->ctrl_base == NULL)
+               return -EBUSY;
 
-       ar724x_pci_irq_init(irq);
-       register_pci_controller(&ar724x_pci_controller);
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+       if (!res)
+               return -EINVAL;
 
-       return PCIBIOS_SUCCESSFUL;
+       apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (!apc->devcfg_base)
+               return -EBUSY;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
+       if (!res)
+               return -EINVAL;
 
-err_unmap_devcfg:
-       iounmap(ar724x_pci_devcfg_base);
-err:
-       return ret;
+       apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (apc->crp_base == NULL)
+               return -EBUSY;
+
+       apc->irq = platform_get_irq(pdev, 0);
+       if (apc->irq < 0)
+               return -EINVAL;
+
+       spin_lock_init(&apc->lock);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+       if (!res)
+               return -EINVAL;
+
+       apc->io_res.parent = res;
+       apc->io_res.name = "PCI IO space";
+       apc->io_res.start = res->start;
+       apc->io_res.end = res->end;
+       apc->io_res.flags = IORESOURCE_IO;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+       if (!res)
+               return -EINVAL;
+
+       apc->mem_res.parent = res;
+       apc->mem_res.name = "PCI memory space";
+       apc->mem_res.start = res->start;
+       apc->mem_res.end = res->end;
+       apc->mem_res.flags = IORESOURCE_MEM;
+
+       apc->pci_controller.pci_ops = &ar724x_pci_ops;
+       apc->pci_controller.io_resource = &apc->io_res;
+       apc->pci_controller.mem_resource = &apc->mem_res;
+
+       apc->link_up = ar724x_pci_check_link(apc);
+       if (!apc->link_up)
+               dev_warn(&pdev->dev, "PCIe link is down\n");
+
+       ar724x_pci_irq_init(apc, id);
+
+       ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
+
+       register_pci_controller(&apc->pci_controller);
+
+       return 0;
 }
+
+static struct platform_driver ar724x_pci_driver = {
+       .probe = ar724x_pci_probe,
+       .driver = {
+               .name = "ar724x-pci",
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init ar724x_pci_init(void)
+{
+       return platform_driver_register(&ar724x_pci_driver);
+}
+
+postcore_initcall(ar724x_pci_init);
index 37b52dc..e2e69e1 100644 (file)
@@ -54,8 +54,8 @@
 
 static void *cfg_space;
 
-#define PCI_BUS_ENABLED        1
-#define PCI_DEVICE_MODE        2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
 
 static int bcm1480_bus_status;
 
@@ -194,7 +194,7 @@ struct pci_controller bcm1480_controller = {
        .pci_ops        = &bcm1480_pci_ops,
        .mem_resource   = &bcm1480_mem_resource,
        .io_resource    = &bcm1480_io_resource,
-       .io_offset      = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
+       .io_offset      = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
 };
 
 
@@ -227,7 +227,7 @@ static int __init bcm1480_pcibios_init(void)
                                             PCI_COMMAND));
                if (!(cmdreg & PCI_COMMAND_MASTER)) {
                        printk
-                           ("PCI: Skipping PCI probe.  Bus is not initialized.\n");
+                           ("PCI: Skipping PCI probe.  Bus is not initialized.\n");
                        iounmap(cfg_space);
                        return 1; /* XXX */
                }
index 50cc6e9..1263c5e 100644 (file)
@@ -53,8 +53,8 @@
 
 static void *ht_cfg_space;
 
-#define PCI_BUS_ENABLED        1
-#define PCI_DEVICE_MODE        2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
 
 static int bcm1480ht_bus_status;
 
@@ -191,7 +191,7 @@ struct pci_controller bcm1480ht_controller = {
        .io_resource    = &bcm1480ht_io_resource,
        .index          = 1,
        .get_busno      = bcm1480ht_pcibios_get_busno,
-       .io_offset      = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
+       .io_offset      = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
 };
 
 static int __init bcm1480ht_pcibios_init(void)
index c682468..76f16ea 100644 (file)
@@ -91,7 +91,7 @@ static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
 int pcibios_plat_dev_init(struct pci_dev *dev)
 {
 #ifdef CONFIG_BCM47XX_SSB
-       if (bcm47xx_bus_type ==  BCM47XX_BUS_TYPE_SSB)
+       if (bcm47xx_bus_type ==  BCM47XX_BUS_TYPE_SSB)
                return bcm47xx_pcibios_plat_dev_init_ssb(dev);
        else
 #endif
index ca179b6..88e781c 100644 (file)
 int bcm63xx_pci_enabled;
 
 static struct resource bcm_pci_mem_resource = {
-       .name   = "bcm63xx PCI memory space",
-       .start  = BCM_PCI_MEM_BASE_PA,
-       .end    = BCM_PCI_MEM_END_PA,
-       .flags  = IORESOURCE_MEM
+       .name   = "bcm63xx PCI memory space",
+       .start  = BCM_PCI_MEM_BASE_PA,
+       .end    = BCM_PCI_MEM_END_PA,
+       .flags  = IORESOURCE_MEM
 };
 
 static struct resource bcm_pci_io_resource = {
-       .name   = "bcm63xx PCI IO space",
-       .start  = BCM_PCI_IO_BASE_PA,
+       .name   = "bcm63xx PCI IO space",
+       .start  = BCM_PCI_IO_BASE_PA,
 #ifdef CONFIG_CARDBUS
-       .end    = BCM_PCI_IO_HALF_PA,
+       .end    = BCM_PCI_IO_HALF_PA,
 #else
-       .end    = BCM_PCI_IO_END_PA,
+       .end    = BCM_PCI_IO_END_PA,
 #endif
-       .flags  = IORESOURCE_IO
+       .flags  = IORESOURCE_IO
 };
 
 struct pci_controller bcm63xx_controller = {
@@ -55,17 +55,17 @@ struct pci_controller bcm63xx_controller = {
  */
 #ifdef CONFIG_CARDBUS
 static struct resource bcm_cb_mem_resource = {
-       .name   = "bcm63xx Cardbus memory space",
-       .start  = BCM_CB_MEM_BASE_PA,
-       .end    = BCM_CB_MEM_END_PA,
-       .flags  = IORESOURCE_MEM
+       .name   = "bcm63xx Cardbus memory space",
+       .start  = BCM_CB_MEM_BASE_PA,
+       .end    = BCM_CB_MEM_END_PA,
+       .flags  = IORESOURCE_MEM
 };
 
 static struct resource bcm_cb_io_resource = {
-       .name   = "bcm63xx Cardbus IO space",
-       .start  = BCM_PCI_IO_HALF_PA + 1,
-       .end    = BCM_PCI_IO_END_PA,
-       .flags  = IORESOURCE_IO
+       .name   = "bcm63xx Cardbus IO space",
+       .start  = BCM_PCI_IO_HALF_PA + 1,
+       .end    = BCM_PCI_IO_END_PA,
+       .flags  = IORESOURCE_IO
 };
 
 struct pci_controller bcm63xx_cb_controller = {
@@ -76,17 +76,17 @@ struct pci_controller bcm63xx_cb_controller = {
 #endif
 
 static struct resource bcm_pcie_mem_resource = {
-       .name   = "bcm63xx PCIe memory space",
-       .start  = BCM_PCIE_MEM_BASE_PA,
-       .end    = BCM_PCIE_MEM_END_PA,
-       .flags  = IORESOURCE_MEM,
+       .name   = "bcm63xx PCIe memory space",
+       .start  = BCM_PCIE_MEM_BASE_PA,
+       .end    = BCM_PCIE_MEM_END_PA,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource bcm_pcie_io_resource = {
-       .name   = "bcm63xx PCIe IO space",
-       .start  = 0,
-       .end    = 0,
-       .flags  = 0,
+       .name   = "bcm63xx PCIe IO space",
+       .start  = 0,
+       .end    = 0,
+       .flags  = 0,
 };
 
 struct pci_controller bcm63xx_pcie_controller = {
@@ -111,7 +111,7 @@ static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
        u32 tmp;
 
        tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
-       tmp |=  MPI_PCICFGCTL_WRITEEN_MASK;
+       tmp |=  MPI_PCICFGCTL_WRITEEN_MASK;
        bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
        bcm_mpi_writel(val, MPI_PCICFGDATA_REG);
 }
@@ -211,7 +211,7 @@ static int __init bcm63xx_register_pci(void)
         * first bytes to access it from CPU.
         *
         * this means that  no io access from CPU  should happen while
-        * we do a configuration cycle,  but there's no way we can add
+        * we do a configuration cycle,  but there's no way we can add
         * a spinlock for each io access, so this is currently kind of
         * broken on SMP.
         */
@@ -244,9 +244,9 @@ static int __init bcm63xx_register_pci(void)
        bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG);
 #endif
 
-       /* setup local bus  to PCI access (IO memory),  we have only 1
-        * IO window  for both PCI  and cardbus, but it  cannot handle
-        * both  at the  same time,  assume standard  PCI for  now, if
+       /* setup local bus  to PCI access (IO memory),  we have only 1
+        * IO window  for both PCI  and cardbus, but it  cannot handle
+        * both  at the  same time,  assume standard  PCI for  now, if
         * cardbus card has  IO zone, PCI fixup will  change window to
         * cardbus */
        val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK;
@@ -284,7 +284,7 @@ static int __init bcm63xx_register_pci(void)
                bcm_mpi_writel(0, MPI_SP1_RANGE_REG);
        }
 
-       /* change  host bridge  retry  counter to  infinite number  of
+       /* change  host bridge  retry  counter to  infinite number  of
         * retry,  needed for  some broadcom  wifi cards  with Silicon
         * Backplane bus where access to srom seems very slow  */
        val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS);
index e6736d5..ffab4da 100644 (file)
@@ -7,7 +7,7 @@
 #include <bcm63xx_dev_pci.h>
 
 /*
- * Cardbus shares  the PCI bus, but has  no IDSEL, so a  special id is
+ * Cardbus shares  the PCI bus, but has         no IDSEL, so a  special id is
  * reserved for it.  If you have a standard PCI device at this id, you
  * need to change the following definition.
  */
index 7f4f49b..6eb65e4 100644 (file)
@@ -30,7 +30,7 @@
 
 /*
  * XXX: No kmalloc available when we do our crosstalk scan,
- *     we should try to move it later in the boot process.
+ *     we should try to move it later in the boot process.
  */
 static struct bridge_controller bridges[MAX_PCI_BUSSES];
 
@@ -103,7 +103,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
         * swap pio's to pci mem and io space (big windows)
         */
        bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
-                                BRIDGE_CTRL_MEM_SWAP;
+                                BRIDGE_CTRL_MEM_SWAP;
 #ifdef CONFIG_PAGE_SIZE_4KB
        bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
 #else /* 16kB or larger */
@@ -123,7 +123,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
                bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
                bc->pci_int[slot] = -1;
        }
-       bridge->b_wid_tflush;     /* wait until Bridge PIO complete */
+       bridge->b_wid_tflush;     /* wait until Bridge PIO complete */
 
        bc->base = bridge;
 
@@ -184,7 +184,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 }
 
 /*
- * Device might live on a subordinate PCI bus.  XXX Walk up the chain of buses
+ * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
  * to find the slot number in sense of the bridge device register.
  * XXX This also means multiple devices might rely on conflicting bridge
  * settings.
index 532b561..b1e061f 100644 (file)
@@ -18,9 +18,9 @@
 
 /*
  * Handle errors from the bridge.  This includes master and target aborts,
- * various command and address errors, and the interrupt test.  This gets
- * registered on the bridge error irq.  It's conceivable that some of these
- * conditions warrant a panic.  Anybody care to say which ones?
+ * various command and address errors, and the interrupt test. This gets
+ * registered on the bridge error irq. It's conceivable that some of these
+ * conditions warrant a panic. Anybody care to say which ones?
  */
 static irqreturn_t macepci_error(int irq, void *dev)
 {
index 910fb4c..879077b 100644 (file)
@@ -129,8 +129,16 @@ static int ltq_pci_startup(struct platform_device *pdev)
 
        /* setup reset gpio used by pci */
        reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
-       if (gpio_is_valid(reset_gpio))
-               devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
+       if (gpio_is_valid(reset_gpio)) {
+               int ret = devm_gpio_request(&pdev->dev,
+                                               reset_gpio, "pci-reset");
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "failed to request gpio %d\n", reset_gpio);
+                       return ret;
+               }
+               gpio_direction_output(reset_gpio, 1);
+       }
 
        /* enable auto-switching between PCI and EBU */
        ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
index a98e543..40d2797 100644 (file)
@@ -51,15 +51,15 @@ static int __init lasat_pci_setup(void)
 
 arch_initcall(lasat_pci_setup);
 
-#define LASAT_IRQ_ETH1   (LASAT_IRQ_BASE + 0)
-#define LASAT_IRQ_ETH0   (LASAT_IRQ_BASE + 1)
-#define LASAT_IRQ_HDC    (LASAT_IRQ_BASE + 2)
-#define LASAT_IRQ_COMP   (LASAT_IRQ_BASE + 3)
-#define LASAT_IRQ_HDLC   (LASAT_IRQ_BASE + 4)
-#define LASAT_IRQ_PCIA   (LASAT_IRQ_BASE + 5)
-#define LASAT_IRQ_PCIB   (LASAT_IRQ_BASE + 6)
-#define LASAT_IRQ_PCIC   (LASAT_IRQ_BASE + 7)
-#define LASAT_IRQ_PCID   (LASAT_IRQ_BASE + 8)
+#define LASAT_IRQ_ETH1  (LASAT_IRQ_BASE + 0)
+#define LASAT_IRQ_ETH0  (LASAT_IRQ_BASE + 1)
+#define LASAT_IRQ_HDC   (LASAT_IRQ_BASE + 2)
+#define LASAT_IRQ_COMP  (LASAT_IRQ_BASE + 3)
+#define LASAT_IRQ_HDLC  (LASAT_IRQ_BASE + 4)
+#define LASAT_IRQ_PCIA  (LASAT_IRQ_BASE + 5)
+#define LASAT_IRQ_PCIB  (LASAT_IRQ_BASE + 6)
+#define LASAT_IRQ_PCIC  (LASAT_IRQ_BASE + 7)
+#define LASAT_IRQ_PCID  (LASAT_IRQ_BASE + 8)
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
@@ -69,13 +69,13 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        case 3:
                return LASAT_IRQ_PCIA + (((slot-1) + (pin-1)) % 4);
        case 4:
-               return LASAT_IRQ_ETH1;   /* Ethernet 1 (LAN 2) */
+               return LASAT_IRQ_ETH1;   /* Ethernet 1 (LAN 2) */
        case 5:
-               return LASAT_IRQ_ETH0;   /* Ethernet 0 (LAN 1) */
+               return LASAT_IRQ_ETH0;   /* Ethernet 0 (LAN 1) */
        case 6:
-               return LASAT_IRQ_HDC;    /* IDE controller */
+               return LASAT_IRQ_HDC;    /* IDE controller */
        default:
-               return 0xff;            /* Illegal */
+               return 0xff;            /* Illegal */
        }
 
        return -1;
index 5b5ed76..95c2ea8 100644 (file)
@@ -30,8 +30,8 @@
  * addresses. Use PCI endian swapping 1 so no address swapping is
  * necessary. The Linux io routines will endian swap the data.
  */
-#define OCTEON_PCI_IOSPACE_BASE     0x80011a0400000000ull
-#define OCTEON_PCI_IOSPACE_SIZE     (1ull<<32)
+#define OCTEON_PCI_IOSPACE_BASE            0x80011a0400000000ull
+#define OCTEON_PCI_IOSPACE_SIZE            (1ull<<32)
 
 /* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
 #define OCTEON_PCI_MEMSPACE_OFFSET  (0x00011b0000000000ull)
@@ -68,10 +68,10 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
  *
  * @dev:    The Linux PCI device structure for the device to map
  * @slot:   The slot number for this device on __BUS 0__. Linux
- *               enumerates through all the bridges and figures out the
- *               slot on Bus 0 where this device eventually hooks to.
+ *              enumerates through all the bridges and figures out the
+ *              slot on Bus 0 where this device eventually hooks to.
  * @pin:    The PCI interrupt pin read from the device, then swizzled
- *               as it goes through each bridge.
+ *              as it goes through each bridge.
  * Returns Interrupt number for the device
  */
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -120,8 +120,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        /* Enable the PCIe normal error reporting */
        config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
        config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
-       config |= PCI_EXP_DEVCTL_FERE;  /* Fatal Error Reporting */
-       config |= PCI_EXP_DEVCTL_URRE;  /* Unsupported Request */
+       config |= PCI_EXP_DEVCTL_FERE;  /* Fatal Error Reporting */
+       config |= PCI_EXP_DEVCTL_URRE;  /* Unsupported Request */
        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
 
        /* Find the Advanced Error Reporting capability */
@@ -226,10 +226,10 @@ const char *octeon_get_pci_interrupts(void)
  *
  * @dev:    The Linux PCI device structure for the device to map
  * @slot:   The slot number for this device on __BUS 0__. Linux
- *               enumerates through all the bridges and figures out the
- *               slot on Bus 0 where this device eventually hooks to.
+ *              enumerates through all the bridges and figures out the
+ *              slot on Bus 0 where this device eventually hooks to.
  * @pin:    The PCI interrupt pin read from the device, then swizzled
- *               as it goes through each bridge.
+ *              as it goes through each bridge.
  * Returns Interrupt number for the device
  */
 int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
@@ -404,8 +404,8 @@ static void octeon_pci_initialize(void)
                ctl_status_2.s.bb1_siz = 1;  /* BAR1 is 2GB */
                ctl_status_2.s.bb_ca = 1;    /* Don't use L2 with big bars */
                ctl_status_2.s.bb_es = 1;    /* Big bar in byte swap mode */
-               ctl_status_2.s.bb1 = 1;      /* BAR1 is big */
-               ctl_status_2.s.bb0 = 1;      /* BAR0 is big */
+               ctl_status_2.s.bb1 = 1;      /* BAR1 is big */
+               ctl_status_2.s.bb0 = 1;      /* BAR0 is big */
        }
 
        octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
@@ -446,7 +446,7 @@ static void octeon_pci_initialize(void)
                 * count. [1..31] and 0=32.  NOTE: If the user
                 * programs these bits beyond the Designed Maximum
                 * outstanding count, then the designed maximum table
-                * depth will be used instead.  No additional
+                * depth will be used instead.  No additional
                 * Deferred/Split transactions will be accepted if
                 * this outstanding maximum count is
                 * reached. Furthermore, no additional deferred/split
@@ -456,7 +456,7 @@ static void octeon_pci_initialize(void)
                cfg19.s.tdomc = 4;
                /*
                 * Master Deferred Read Request Outstanding Max Count
-                * (PCI only).  CR4C[26:24] Max SAC cycles MAX DAC
+                * (PCI only).  CR4C[26:24] Max SAC cycles MAX DAC
                 * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
                 * 5 2 110 6 3 111 7 3 For example, if these bits are
                 * programmed to 100, the core can support 2 DAC
@@ -550,7 +550,7 @@ static void octeon_pci_initialize(void)
 
        /*
         * Affects PCI performance when OCTEON services reads to its
-        * BAR1/BAR2. Refer to Section 10.6.1.  The recommended values are
+        * BAR1/BAR2. Refer to Section 10.6.1.  The recommended values are
         * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
         * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
         * these values need to be changed so they won't possibly prefetch off
index 5f3a69c..b128cb9 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/mach-rc32434/rc32434.h>
 #include <asm/mach-rc32434/pci.h>
 
-#define PCI_ACCESS_READ  0
+#define PCI_ACCESS_READ         0
 #define PCI_ACCESS_WRITE 1
 
 /* define an unsigned array for the PCI registers */
@@ -82,11 +82,11 @@ extern struct pci_ops rc32434_pci_ops;
 #define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
 #define PCI_MEM2_END   (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN)  - 1)
 #define PCI_IO1_START  (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
-#define PCI_IO1_END                                                    \
+#define PCI_IO1_END                                                    \
        (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
 #define PCI_IO2_START                                                  \
        (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
-#define PCI_IO2_END                                                    \
+#define PCI_IO2_END                                                    \
        (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
 
 struct pci_controller rc32434_controller2;
index dd97f3a..cdefcc4 100644 (file)
@@ -55,9 +55,9 @@
 
 static void *cfg_space;
 
-#define PCI_BUS_ENABLED        1
-#define LDT_BUS_ENABLED        2
-#define PCI_DEVICE_MODE        4
+#define PCI_BUS_ENABLED 1
+#define LDT_BUS_ENABLED 2
+#define PCI_DEVICE_MODE 4
 
 static int sb1250_bus_status;
 
@@ -239,7 +239,7 @@ static int __init sb1250_pcibios_init(void)
                               PCI_COMMAND));
                if (!(cmdreg & PCI_COMMAND_MASTER)) {
                        printk
-                           ("PCI: Skipping PCI probe.  Bus is not initialized.\n");
+                           ("PCI: Skipping PCI probe.  Bus is not initialized.\n");
                        iounmap(cfg_space);
                        return 0;
                }
index 444b8d8..157c771 100644 (file)
@@ -69,17 +69,17 @@ static struct pci_target_address_window pci_target_window1 = {
 };
 
 static struct resource pci_mem_resource = {
-       .name   = "PCI Memory resources",
-       .start  = PCI_MEM_RESOURCE_START,
-       .end    = PCI_MEM_RESOURCE_END,
-       .flags  = IORESOURCE_MEM,
+       .name   = "PCI Memory resources",
+       .start  = PCI_MEM_RESOURCE_START,
+       .end    = PCI_MEM_RESOURCE_END,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct resource pci_io_resource = {
-       .name   = "PCI I/O resources",
-       .start  = PCI_IO_RESOURCE_START,
-       .end    = PCI_IO_RESOURCE_END,
-       .flags  = IORESOURCE_IO,
+       .name   = "PCI I/O resources",
+       .start  = PCI_IO_RESOURCE_START,
+       .end    = PCI_IO_RESOURCE_END,
+       .flags  = IORESOURCE_IO,
 };
 
 static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
@@ -97,7 +97,7 @@ static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
 };
 
 static struct pci_controller vr41xx_pci_controller = {
-       .pci_ops        = &vr41xx_pci_ops,
+       .pci_ops        = &vr41xx_pci_ops,
        .mem_resource   = &pci_mem_resource,
        .io_resource    = &pci_io_resource,
 };
@@ -148,7 +148,7 @@ static int __init vr41xx_pciu_init(void)
        else if ((vtclock / 2) < pci_clock_max)
                pciu_write(PCICLKSELREG, HALF_VTCLOCK);
        else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 &&
-                (vtclock / 3) < pci_clock_max)
+                (vtclock / 3) < pci_clock_max)
                pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK);
        else if ((vtclock / 4) < pci_clock_max)
                pciu_write(PCICLKSELREG, QUARTER_VTCLOCK);
@@ -281,7 +281,7 @@ static int __init vr41xx_pciu_init(void)
        pciu_write(PCIAPCNTREG, val);
 
        pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
-                              PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+                              PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
                               PCI_COMMAND_SERR);
 
        /* Clear bus error */
index 6b1ae2e..e6b4a1b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
  *
- *  Copyright (C) 2002  MontaVista Software Inc.
+ *  Copyright (C) 2002 MontaVista Software Inc.
  *    Author: Yoichi Yuasa <source@mvista.com>
  *  Copyright (C) 2004-2005  Yoichi Yuasa <yuasa@linux-mips.org>
  *
index 140557a..653d2db 100644 (file)
@@ -46,6 +46,7 @@
 
 #include <asm/netlogic/interrupt.h>
 #include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
 
 #include <asm/netlogic/xlp-hal/iomap.h>
 #include <asm/netlogic/xlp-hal/pic.h>
@@ -55,7 +56,7 @@
 
 static void *pci_config_base;
 
-#define        pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
 
 /* PCI ops */
 static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -64,8 +65,12 @@ static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
        u32 data;
        u32 *cfgaddr;
 
+       where &= ~3;
+       if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954)
+               return 0xffffffff;
+
        cfgaddr = (u32 *)(pci_config_base +
-                       pci_cfg_addr(bus->number, devfn, where & ~3));
+                       pci_cfg_addr(bus->number, devfn, where));
        data = *cfgaddr;
        return data;
 }
@@ -135,54 +140,60 @@ struct pci_ops nlm_pci_ops = {
 };
 
 static struct resource nlm_pci_mem_resource = {
-       .name           = "XLP PCI MEM",
-       .start          = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
-       .end            = 0xdfffffffUL,
-       .flags          = IORESOURCE_MEM,
+       .name           = "XLP PCI MEM",
+       .start          = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+       .end            = 0xdfffffffUL,
+       .flags          = IORESOURCE_MEM,
 };
 
 static struct resource nlm_pci_io_resource = {
-       .name           = "XLP IO MEM",
-       .start          = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
-       .end            = 0x17ffffffUL,
-       .flags          = IORESOURCE_IO,
+       .name           = "XLP IO MEM",
+       .start          = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
+       .end            = 0x17ffffffUL,
+       .flags          = IORESOURCE_IO,
 };
 
 struct pci_controller nlm_pci_controller = {
-       .index          = 0,
-       .pci_ops        = &nlm_pci_ops,
-       .mem_resource   = &nlm_pci_mem_resource,
-       .mem_offset     = 0x00000000UL,
-       .io_resource    = &nlm_pci_io_resource,
-       .io_offset      = 0x00000000UL,
+       .index          = 0,
+       .pci_ops        = &nlm_pci_ops,
+       .mem_resource   = &nlm_pci_mem_resource,
+       .mem_offset     = 0x00000000UL,
+       .io_resource    = &nlm_pci_io_resource,
+       .io_offset      = 0x00000000UL,
 };
 
-static int get_irq_vector(const struct pci_dev *dev)
+static struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
 {
-       /*
-        * For XLP PCIe, there is an IRQ per Link, find out which
-        * link the device is on to assign interrupts
-       */
-       if (dev->bus->self == NULL)
-               return 0;
+       struct pci_bus *bus, *p;
 
-       switch  (dev->bus->self->devfn) {
-       case 0x8:
-               return PIC_PCIE_LINK_0_IRQ;
-       case 0x9:
-               return PIC_PCIE_LINK_1_IRQ;
-       case 0xa:
-               return PIC_PCIE_LINK_2_IRQ;
-       case 0xb:
-               return PIC_PCIE_LINK_3_IRQ;
-       }
-       WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
-       return 0;
+       /* Find the bridge on bus 0 */
+       bus = dev->bus;
+       for (p = bus->parent; p && p->number != 0; p = p->parent)
+               bus = p;
+
+       return p ? bus->self : NULL;
+}
+
+static inline int nlm_pci_link_to_irq(int link)
+{
+       return PIC_PCIE_LINK_0_IRQ + link;
 }
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-       return get_irq_vector(dev);
+       struct pci_dev *lnkdev;
+       int lnkslot, lnkfunc;
+
+       /*
+        * For XLP PCIe, there is an IRQ per Link, find out which
+        * link the device is on to assign interrupts
+       */
+       lnkdev = xlp_get_pcie_link(dev);
+       if (lnkdev == NULL)
+               return 0;
+       lnkfunc = PCI_FUNC(lnkdev->devfn);
+       lnkslot = PCI_SLOT(lnkdev->devfn);
+       return nlm_irq_to_xirq(lnkslot / 8, nlm_pci_link_to_irq(lnkfunc));
 }
 
 /* Do platform specific device initialization at pci_enable_device() time */
@@ -191,51 +202,76 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
        return 0;
 }
 
-static int xlp_enable_pci_bswap(void)
+/*
+ * If big-endian, enable hardware byteswap on the PCIe bridges.
+ * This will make both the SoC and PCIe devices behave consistently with
+ * readl/writel.
+ */
+#ifdef __BIG_ENDIAN
+static void xlp_config_pci_bswap(int node, int link)
 {
-       uint64_t pciebase, sysbase;
-       int node, i;
+       uint64_t nbubase, lnkbase;
        u32 reg;
 
-       /* Chip-0 so node set to 0 */
-       node = 0;
-       sysbase = nlm_get_bridge_regbase(node);
+       nbubase = nlm_get_bridge_regbase(node);
+       lnkbase = nlm_get_pcie_base(node, link);
+
        /*
         *  Enable byte swap in hardware. Program each link's PCIe SWAP regions
         * from the link's address ranges.
         */
-       for (i = 0; i < 4; i++) {
-               pciebase = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, i));
-               if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
-                       continue;
+       reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
+       nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
 
-               reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_BASE0 + i);
-               nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+       reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_LIMIT0 + link);
+       nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
 
-               reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_LIMIT0 + i);
-               nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_LIM,
-                       reg | 0xfff);
+       reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
+       nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
 
-               reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_BASE0 + i);
-               nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_BASE, reg);
-
-               reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_LIMIT0 + i);
-               nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
-       }
-       return 0;
+       reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
+       nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
 }
+#else
+/* Swap configuration not needed in little-endian mode */
+static inline void xlp_config_pci_bswap(int node, int link) {}
+#endif /* __BIG_ENDIAN */
 
 static int __init pcibios_init(void)
 {
+       struct nlm_soc_info *nodep;
+       uint64_t pciebase;
+       int link, n;
+       u32 reg;
+
        /* Firmware assigns PCI resources */
        pci_set_flags(PCI_PROBE_ONLY);
        pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20);
 
        /* Extend IO port for memory mapped io */
-       ioport_resource.start =  0;
+       ioport_resource.start =  0;
        ioport_resource.end   = ~0;
 
-       xlp_enable_pci_bswap();
+       for (n = 0; n < NLM_NR_NODES; n++) {
+               nodep = nlm_get_node(n);
+               if (!nodep->coremask)
+                       continue;       /* node does not exist */
+
+               for (link = 0; link < 4; link++) {
+                       pciebase = nlm_get_pcie_base(n, link);
+                       if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
+                               continue;
+                       xlp_config_pci_bswap(n, link);
+
+                       /* put in intpin and irq - u-boot does not */
+                       reg = nlm_read_pci_reg(pciebase, 0xf);
+                       reg &= ~0x1fu;
+                       reg |= (1 << 8) | nlm_pci_link_to_irq(link);
+                       nlm_write_pci_reg(pciebase, 0xf, reg);
+                       pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link);
+               }
+       }
+
        set_io_port_base(CKSEG1);
        nlm_pci_controller.io_map_base = CKSEG1;
 
index 0c18ccc..4427abb 100644 (file)
@@ -56,7 +56,7 @@
 
 static void *pci_config_base;
 
-#define        pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
 
 /* PCI ops */
 static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -136,26 +136,26 @@ struct pci_ops nlm_pci_ops = {
 };
 
 static struct resource nlm_pci_mem_resource = {
-       .name           = "XLR PCI MEM",
-       .start          = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
-       .end            = 0xdfffffffUL,
-       .flags          = IORESOURCE_MEM,
+       .name           = "XLR PCI MEM",
+       .start          = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+       .end            = 0xdfffffffUL,
+       .flags          = IORESOURCE_MEM,
 };
 
 static struct resource nlm_pci_io_resource = {
-       .name           = "XLR IO MEM",
-       .start          = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
-       .end            = 0x100fffffUL,
-       .flags          = IORESOURCE_IO,
+       .name           = "XLR IO MEM",
+       .start          = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
+       .end            = 0x100fffffUL,
+       .flags          = IORESOURCE_IO,
 };
 
 struct pci_controller nlm_pci_controller = {
-       .index          = 0,
-       .pci_ops        = &nlm_pci_ops,
-       .mem_resource   = &nlm_pci_mem_resource,
-       .mem_offset     = 0x00000000UL,
-       .io_resource    = &nlm_pci_io_resource,
-       .io_offset      = 0x00000000UL,
+       .index          = 0,
+       .pci_ops        = &nlm_pci_ops,
+       .mem_resource   = &nlm_pci_mem_resource,
+       .mem_offset     = 0x00000000UL,
+       .io_resource    = &nlm_pci_io_resource,
+       .io_offset      = 0x00000000UL,
 };
 
 /*
@@ -259,7 +259,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
                MSI_ADDR_REDIRECTION_CPU;
 
        msg.data = MSI_DATA_TRIGGER_EDGE |
-               MSI_DATA_LEVEL_ASSERT    |
+               MSI_DATA_LEVEL_ASSERT    |
                MSI_DATA_DELIVERY_FIXED;
 
        ret = irq_set_msi_desc(irq, desc);
@@ -344,7 +344,7 @@ static int __init pcibios_init(void)
        pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
 
        /* Extend IO port for memory mapped io */
-       ioport_resource.start =  0;
+       ioport_resource.start =  0;
        ioport_resource.end   = ~0;
 
        set_io_port_base(CKSEG1);
index a184344..0872f12 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
@@ -175,9 +175,20 @@ static DEFINE_MUTEX(pci_scan_mutex);
 
 void register_pci_controller(struct pci_controller *hose)
 {
-       if (request_resource(&iomem_resource, hose->mem_resource) < 0)
+       struct resource *parent;
+
+       parent = hose->mem_resource->parent;
+       if (!parent)
+               parent = &iomem_resource;
+
+       if (request_resource(parent, hose->mem_resource) < 0)
                goto out;
-       if (request_resource(&ioport_resource, hose->io_resource) < 0) {
+
+       parent = hose->io_resource->parent;
+       if (!parent)
+               parent = &ioport_resource;
+
+       if (request_resource(parent, hose->io_resource) < 0) {
                release_resource(hose->mem_resource);
                goto out;
        }
index fdb4d55..5e36c33 100644 (file)
@@ -43,7 +43,7 @@ union cvmx_pcie_address {
                uint64_t upper:2;       /* Normally 2 for XKPHYS */
                uint64_t reserved_49_61:13;     /* Must be zero */
                uint64_t io:1;  /* 1 for IO space access */
-               uint64_t did:5; /* PCIe DID = 3 */
+               uint64_t did:5; /* PCIe DID = 3 */
                uint64_t subdid:3;      /* PCIe SubDID = 1 */
                uint64_t reserved_36_39:4;      /* Must be zero */
                uint64_t es:2;  /* Endian swap = 1 */
@@ -74,7 +74,7 @@ union cvmx_pcie_address {
                uint64_t upper:2;       /* Normally 2 for XKPHYS */
                uint64_t reserved_49_61:13;     /* Must be zero */
                uint64_t io:1;  /* 1 for IO space access */
-               uint64_t did:5; /* PCIe DID = 3 */
+               uint64_t did:5; /* PCIe DID = 3 */
                uint64_t subdid:3;      /* PCIe SubDID = 2 */
                uint64_t reserved_36_39:4;      /* Must be zero */
                uint64_t es:2;  /* Endian swap = 1 */
@@ -85,7 +85,7 @@ union cvmx_pcie_address {
                uint64_t upper:2;       /* Normally 2 for XKPHYS */
                uint64_t reserved_49_61:13;     /* Must be zero */
                uint64_t io:1;  /* 1 for IO space access */
-               uint64_t did:5; /* PCIe DID = 3 */
+               uint64_t did:5; /* PCIe DID = 3 */
                uint64_t subdid:3;      /* PCIe SubDID = 3-6 */
                uint64_t reserved_36_39:4;      /* Must be zero */
                uint64_t address:36;    /* PCIe Mem address */
@@ -166,7 +166,7 @@ static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
  * Read a PCIe config space register indirectly. This is used for
  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
  *
- * @pcie_port:  PCIe port to read from
+ * @pcie_port: PCIe port to read from
  * @cfg_offset: Address to read
  *
  * Returns Value read
@@ -194,9 +194,9 @@ static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
  * Write a PCIe config space register indirectly. This is used for
  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
  *
- * @pcie_port:  PCIe port to write to
+ * @pcie_port: PCIe port to write to
  * @cfg_offset: Address to write
- * @val:        Value to write
+ * @val:       Value to write
  */
 static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
                                 uint32_t val)
@@ -222,7 +222,7 @@ static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
  * @pcie_port: PCIe port to access
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  *
  * Returns 64bit Octeon IO address
@@ -259,7 +259,7 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  *
  * Returns Result of the read
@@ -281,7 +281,7 @@ static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  *
  * Returns Result of the read
@@ -303,7 +303,7 @@ static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  *
  * Returns Result of the read
@@ -325,7 +325,7 @@ static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  * @val:       Value to write
  */
@@ -344,7 +344,7 @@ static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  * @val:       Value to write
  */
@@ -363,7 +363,7 @@ static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
  * @pcie_port: PCIe port the device is on
  * @bus:       Sub bus
  * @dev:       Device ID
- * @fn:        Device sub function
+ * @fn:               Device sub function
  * @reg:       Register to access
  * @val:       Value to write
  */
@@ -883,14 +883,14 @@ retry:
 
        /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
        npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
-       npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
-       npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
+       npei_mem_access_ctl.s.max_word = 0;     /* Allow 16 words to combine */
+       npei_mem_access_ctl.s.timer = 127;      /* Wait up to 127 cycles for more data */
        cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
 
        /* Setup Mem access SubDIDs */
        mem_access_subid.u64 = 0;
        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
-       mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
+       mem_access_subid.s.nmerge = 1;  /* Due to an errata on pass 1 chips, no merging is allowed. */
        mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
        mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
        mem_access_subid.s.nsr = 0;     /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
@@ -926,7 +926,7 @@ retry:
 
        bar1_index.u32 = 0;
        bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
-       bar1_index.s.ca = 1;       /* Not Cached */
+       bar1_index.s.ca = 1;       /* Not Cached */
        bar1_index.s.end_swp = 1;  /* Endian Swap mode */
        bar1_index.s.addr_v = 1;   /* Valid entry */
 
@@ -1342,11 +1342,11 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
        /* Setup Mem access SubDIDs */
        mem_access_subid.u64 = 0;
        mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
-       mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
-       mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
-       mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
-       mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
-       mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
+       mem_access_subid.s.nmerge = 0;  /* Allow merging as it works on CN6XXX. */
+       mem_access_subid.s.esr = 1;     /* Endian-swap for Reads. */
+       mem_access_subid.s.esw = 1;     /* Endian-swap for Writes. */
+       mem_access_subid.s.wtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
+       mem_access_subid.s.rtype = 0;   /* "No snoop" and "Relaxed ordering" are not set */
        /* PCIe Adddress Bits <63:34>. */
        if (OCTEON_IS_MODEL(OCTEON_CN68XX))
                mem_access_subid.cn68xx.ba = 0;
@@ -1409,7 +1409,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
 
        bar1_index.u64 = 0;
        bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
-       bar1_index.s.ca = 1;       /* Not Cached */
+       bar1_index.s.ca = 1;       /* Not Cached */
        bar1_index.s.end_swp = 1;  /* Endian Swap mode */
        bar1_index.s.addr_v = 1;   /* Valid entry */
 
@@ -1458,10 +1458,10 @@ static int cvmx_pcie_rc_initialize(int pcie_port)
  *
  * @dev:    The Linux PCI device structure for the device to map
  * @slot:   The slot number for this device on __BUS 0__. Linux
- *               enumerates through all the bridges and figures out the
- *               slot on Bus 0 where this device eventually hooks to.
+ *              enumerates through all the bridges and figures out the
+ *              slot on Bus 0 where this device eventually hooks to.
  * @pin:    The PCI interrupt pin read from the device, then swizzled
- *               as it goes through each bridge.
+ *              as it goes through each bridge.
  * Returns Interrupt number for the device
  */
 int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
@@ -1503,7 +1503,7 @@ int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
        return pin - 1 + OCTEON_IRQ_PCI_INT0;
 }
 
-static  void set_cfg_read_retry(u32 retry_cnt)
+static void set_cfg_read_retry(u32 retry_cnt)
 {
        union cvmx_pemx_ctl_status pemx_ctl;
        pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
@@ -1931,7 +1931,7 @@ static int __init octeon_pcie_setup(void)
                        OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
                        sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
                        if (sriox_status_reg.s.srio) {
-                               srio_war15205 += 1;      /* Port is SRIO */
+                               srio_war15205 += 1;      /* Port is SRIO */
                                port = 0;
                        }
                }
@@ -2004,7 +2004,7 @@ static int __init octeon_pcie_setup(void)
                        OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
                        sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
                        if (sriox_status_reg.s.srio) {
-                               srio_war15205 += 1;      /* Port is SRIO */
+                               srio_war15205 += 1;      /* Port is SRIO */
                                port = 1;
                        }
                }
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
deleted file mode 100644 (file)
index 3482b8c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-choice
-       prompt "PMC-Sierra MSP SOC type"
-       depends on PMC_MSP
-
-config PMC_MSP4200_EVAL
-       bool "PMC-Sierra MSP4200 Eval Board"
-       select IRQ_MSP_SLP
-       select HW_HAS_PCI
-
-config PMC_MSP4200_GW
-       bool "PMC-Sierra MSP4200 VoIP Gateway"
-       select IRQ_MSP_SLP
-       select HW_HAS_PCI
-
-config PMC_MSP7120_EVAL
-       bool "PMC-Sierra MSP7120 Eval Board"
-       select SYS_SUPPORTS_MULTITHREADING
-       select IRQ_MSP_CIC
-       select HW_HAS_PCI
-
-config PMC_MSP7120_GW
-       bool "PMC-Sierra MSP7120 Residential Gateway"
-       select SYS_SUPPORTS_MULTITHREADING
-       select IRQ_MSP_CIC
-       select HW_HAS_PCI
-       select MSP_HAS_USB
-       select MSP_ETH
-
-config PMC_MSP7120_FPGA
-       bool "PMC-Sierra MSP7120 FPGA"
-       select SYS_SUPPORTS_MULTITHREADING
-       select IRQ_MSP_CIC
-       select HW_HAS_PCI
-
-endchoice
-
-config MSP_HAS_USB
-       boolean
-       depends on PMC_MSP
-
-config MSP_ETH
-       boolean
-       select MSP_HAS_MAC
-       depends on PMC_MSP
-
-config MSP_HAS_MAC
-       boolean
-       depends on PMC_MSP
diff --git a/arch/mips/pmc-sierra/Platform b/arch/mips/pmc-sierra/Platform
deleted file mode 100644 (file)
index 387fda6..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# PMC-Sierra MSP SOCs
-#
-platform-$(CONFIG_PMC_MSP)     += pmc-sierra/msp71xx/
-cflags-$(CONFIG_PMC_MSP)       += -I$(srctree)/arch/mips/include/asm/pmc-sierra/msp71xx \
-                                       -mno-branch-likely
-load-$(CONFIG_PMC_MSP)         += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/msp71xx/Makefile b/arch/mips/pmc-sierra/msp71xx/Makefile
deleted file mode 100644 (file)
index cefba77..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the PMC-Sierra MSP SOCs
-#
-obj-y += msp_prom.o msp_setup.o msp_irq.o \
-        msp_time.o msp_serial.o msp_elb.o
-obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o
-obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o
-obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o
-obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o
-obj-$(CONFIG_PCI) += msp_pci.o
-obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o
-obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o
-obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o
-obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio.c b/arch/mips/pmc-sierra/msp71xx/gpio.c
deleted file mode 100644 (file)
index aaccbe5..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two
- * types of registers. The data register sets the output level when in output
- * mode and when in input mode will contain the value at the input. The config
- * register sets the various modes for each gpio.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * @author Patrick Glass <patrickglass@gmail.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-
-#define MSP71XX_CFG_OFFSET(gpio)       (4 * (gpio))
-#define CONF_MASK                      0x0F
-#define MSP71XX_GPIO_INPUT             0x01
-#define MSP71XX_GPIO_OUTPUT            0x08
-
-#define MSP71XX_GPIO_BASE              0x0B8400000L
-
-#define to_msp71xx_gpio_chip(c) container_of(c, struct msp71xx_gpio_chip, chip)
-
-static spinlock_t gpio_lock;
-
-/*
- * struct msp71xx_gpio_chip - container for gpio chip and registers
- * @chip: chip structure for the specified gpio bank
- * @data_reg: register for reading and writing the gpio pin value
- * @config_reg: register to set the mode for the gpio pin bank
- * @out_drive_reg: register to set the output drive mode for the gpio pin bank
- */
-struct msp71xx_gpio_chip {
-       struct gpio_chip chip;
-       void __iomem *data_reg;
-       void __iomem *config_reg;
-       void __iomem *out_drive_reg;
-};
-
-/*
- * msp71xx_gpio_get() - return the chip's gpio value
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose value will be returned
- *
- * It will return 0 if gpio value is low and other if high.
- */
-static int msp71xx_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
-
-       return __raw_readl(msp_chip->data_reg) & (1 << offset);
-}
-
-/*
- * msp71xx_gpio_set() - set the output value for the gpio
- * @chip: chip structure who controls the specified gpio
- * @offset: gpio whose value will be assigned
- * @value: logic level to assign to the gpio initially
- *
- * This will set the gpio bit specified to the desired value. It will set the
- * gpio pin low if value is 0 otherwise it will be high.
- */
-static void msp71xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
-       unsigned long flags;
-       u32 data;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       data = __raw_readl(msp_chip->data_reg);
-       if (value)
-               data |= (1 << offset);
-       else
-               data &= ~(1 << offset);
-       __raw_writel(data, msp_chip->data_reg);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-/*
- * msp71xx_set_gpio_mode() - declare the mode for a gpio
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose value will be assigned
- * @mode: desired configuration for the gpio (see datasheet)
- *
- * It will set the gpio pin config to the @mode value passed in.
- */
-static int msp71xx_set_gpio_mode(struct gpio_chip *chip,
-                                unsigned offset, int mode)
-{
-       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
-       const unsigned bit_offset = MSP71XX_CFG_OFFSET(offset);
-       unsigned long flags;
-       u32 cfg;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       cfg = __raw_readl(msp_chip->config_reg);
-       cfg &= ~(CONF_MASK << bit_offset);
-       cfg |= (mode << bit_offset);
-       __raw_writel(cfg, msp_chip->config_reg);
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return 0;
-}
-
-/*
- * msp71xx_direction_output() - declare the direction mode for a gpio
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose value will be assigned
- * @value: logic level to assign to the gpio initially
- *
- * This call will set the mode for the @gpio to output. It will set the
- * gpio pin low if value is 0 otherwise it will be high.
- */
-static int msp71xx_direction_output(struct gpio_chip *chip,
-                                   unsigned offset, int value)
-{
-       msp71xx_gpio_set(chip, offset, value);
-
-       return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_OUTPUT);
-}
-
-/*
- * msp71xx_direction_input() - declare the direction mode for a gpio
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose to which the value will be assigned
- *
- * This call will set the mode for the @gpio to input.
- */
-static int msp71xx_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_INPUT);
-}
-
-/*
- * msp71xx_set_output_drive() - declare the output drive for the gpio line
- * @gpio: gpio pin whose output drive you wish to modify
- * @value: zero for active drain 1 for open drain drive
- *
- * This call will set the output drive mode for the @gpio to output.
- */
-int msp71xx_set_output_drive(unsigned gpio, int value)
-{
-       unsigned long flags;
-       u32 data;
-
-       if (gpio > 15 || gpio < 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-
-       data = __raw_readl((void __iomem *)(MSP71XX_GPIO_BASE + 0x190));
-       if (value)
-               data |= (1 << gpio);
-       else
-               data &= ~(1 << gpio);
-       __raw_writel(data, (void __iomem *)(MSP71XX_GPIO_BASE + 0x190));
-
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL(msp71xx_set_output_drive);
-
-#define MSP71XX_GPIO_BANK(name, dr, cr, base_gpio, num_gpio) \
-{ \
-       .chip = { \
-               .label            = name, \
-               .direction_input  = msp71xx_direction_input, \
-               .direction_output = msp71xx_direction_output, \
-               .get              = msp71xx_gpio_get, \
-               .set              = msp71xx_gpio_set, \
-               .base             = base_gpio, \
-               .ngpio            = num_gpio \
-       }, \
-       .data_reg       = (void __iomem *)(MSP71XX_GPIO_BASE + dr), \
-       .config_reg     = (void __iomem *)(MSP71XX_GPIO_BASE + cr), \
-       .out_drive_reg  = (void __iomem *)(MSP71XX_GPIO_BASE + 0x190), \
-}
-
-/*
- * struct msp71xx_gpio_banks[] - container array of gpio banks
- * @chip: chip structure for the specified gpio bank
- * @data_reg: register for reading and writing the gpio pin value
- * @config_reg: register to set the mode for the gpio pin bank
- *
- * This array structure defines the gpio banks for the PMC MIPS Processor.
- * We specify the bank name, the data register, the config register, base
- * starting gpio number, and the number of gpios exposed by the bank.
- */
-static struct msp71xx_gpio_chip msp71xx_gpio_banks[] = {
-
-       MSP71XX_GPIO_BANK("GPIO_1_0", 0x170, 0x180, 0, 2),
-       MSP71XX_GPIO_BANK("GPIO_5_2", 0x174, 0x184, 2, 4),
-       MSP71XX_GPIO_BANK("GPIO_9_6", 0x178, 0x188, 6, 4),
-       MSP71XX_GPIO_BANK("GPIO_15_10", 0x17C, 0x18C, 10, 6),
-};
-
-void __init msp71xx_init_gpio(void)
-{
-       int i;
-
-       spin_lock_init(&gpio_lock);
-
-       for (i = 0; i < ARRAY_SIZE(msp71xx_gpio_banks); i++)
-               gpiochip_add(&msp71xx_gpio_banks[i].chip);
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c b/arch/mips/pmc-sierra/msp71xx/gpio_extended.c
deleted file mode 100644 (file)
index 2a99f36..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is
- * a set of hardware registers that have no need for explicit locking as
- * it is handled by unique method of writing individual set/clr bits.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * @author Patrick Glass <patrickglass@gmail.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#define MSP71XX_DATA_OFFSET(gpio)      (2 * (gpio))
-#define MSP71XX_READ_OFFSET(gpio)      (MSP71XX_DATA_OFFSET(gpio) + 1)
-#define MSP71XX_CFG_OUT_OFFSET(gpio)   (MSP71XX_DATA_OFFSET(gpio) + 16)
-#define MSP71XX_CFG_IN_OFFSET(gpio)    (MSP71XX_CFG_OUT_OFFSET(gpio) + 1)
-
-#define MSP71XX_EXD_GPIO_BASE  0x0BC000000L
-
-#define to_msp71xx_exd_gpio_chip(c) \
-                       container_of(c, struct msp71xx_exd_gpio_chip, chip)
-
-/*
- * struct msp71xx_exd_gpio_chip - container for gpio chip and registers
- * @chip: chip structure for the specified gpio bank
- * @reg: register for control and data of gpio pin
- */
-struct msp71xx_exd_gpio_chip {
-       struct gpio_chip chip;
-       void __iomem *reg;
-};
-
-/*
- * msp71xx_exd_gpio_get() - return the chip's gpio value
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose value will be returned
- *
- * It will return 0 if gpio value is low and other if high.
- */
-static int msp71xx_exd_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       struct msp71xx_exd_gpio_chip *msp71xx_chip =
-           to_msp71xx_exd_gpio_chip(chip);
-       const unsigned bit = MSP71XX_READ_OFFSET(offset);
-
-       return __raw_readl(msp71xx_chip->reg) & (1 << bit);
-}
-
-/*
- * msp71xx_exd_gpio_set() - set the output value for the gpio
- * @chip: chip structure who controls the specified gpio
- * @offset: gpio whose value will be assigned
- * @value: logic level to assign to the gpio initially
- *
- * This will set the gpio bit specified to the desired value. It will set the
- * gpio pin low if value is 0 otherwise it will be high.
- */
-static void msp71xx_exd_gpio_set(struct gpio_chip *chip,
-                                unsigned offset, int value)
-{
-       struct msp71xx_exd_gpio_chip *msp71xx_chip =
-           to_msp71xx_exd_gpio_chip(chip);
-       const unsigned bit = MSP71XX_DATA_OFFSET(offset);
-
-       __raw_writel(1 << (bit + (value ? 1 : 0)), msp71xx_chip->reg);
-}
-
-/*
- * msp71xx_exd_direction_output() - declare the direction mode for a gpio
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose value will be assigned
- * @value: logic level to assign to the gpio initially
- *
- * This call will set the mode for the @gpio to output. It will set the
- * gpio pin low if value is 0 otherwise it will be high.
- */
-static int msp71xx_exd_direction_output(struct gpio_chip *chip,
-                                       unsigned offset, int value)
-{
-       struct msp71xx_exd_gpio_chip *msp71xx_chip =
-           to_msp71xx_exd_gpio_chip(chip);
-
-       msp71xx_exd_gpio_set(chip, offset, value);
-       __raw_writel(1 << MSP71XX_CFG_OUT_OFFSET(offset), msp71xx_chip->reg);
-       return 0;
-}
-
-/*
- * msp71xx_exd_direction_input() - declare the direction mode for a gpio
- * @chip: chip structure which controls the specified gpio
- * @offset: gpio whose to which the value will be assigned
- *
- * This call will set the mode for the @gpio to input.
- */
-static int msp71xx_exd_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       struct msp71xx_exd_gpio_chip *msp71xx_chip =
-           to_msp71xx_exd_gpio_chip(chip);
-
-       __raw_writel(1 << MSP71XX_CFG_IN_OFFSET(offset), msp71xx_chip->reg);
-       return 0;
-}
-
-#define MSP71XX_EXD_GPIO_BANK(name, exd_reg, base_gpio, num_gpio) \
-{ \
-       .chip = { \
-               .label            = name, \
-               .direction_input  = msp71xx_exd_direction_input, \
-               .direction_output = msp71xx_exd_direction_output, \
-               .get              = msp71xx_exd_gpio_get, \
-               .set              = msp71xx_exd_gpio_set, \
-               .base             = base_gpio, \
-               .ngpio            = num_gpio, \
-       }, \
-       .reg    = (void __iomem *)(MSP71XX_EXD_GPIO_BASE + exd_reg), \
-}
-
-/*
- * struct msp71xx_exd_gpio_banks[] - container array of gpio banks
- * @chip: chip structure for the specified gpio bank
- * @reg: register for reading and writing the gpio pin value
- *
- * This array structure defines the extended gpio banks for the
- * PMC MIPS Processor. We specify the bank name, the data/config
- * register,the base starting gpio number, and the number of
- * gpios exposed by the bank of gpios.
- */
-static struct msp71xx_exd_gpio_chip msp71xx_exd_gpio_banks[] = {
-
-       MSP71XX_EXD_GPIO_BANK("GPIO_23_16", 0x188, 16, 8),
-       MSP71XX_EXD_GPIO_BANK("GPIO_27_24", 0x18C, 24, 4),
-};
-
-void __init msp71xx_init_gpio_extended(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(msp71xx_exd_gpio_banks); i++)
-               gpiochip_add(&msp71xx_exd_gpio_banks[i].chip);
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_elb.c b/arch/mips/pmc-sierra/msp71xx/msp_elb.c
deleted file mode 100644 (file)
index 3e96410..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Sets up the proper Chip Select configuration registers.  It is assumed that
- * PMON sets up the ADDR and MASK registers properly.
- *
- * Copyright 2005-2006 PMC-Sierra, Inc.
- * Author: Marc St-Jean, Marc_St-Jean@pmc-sierra.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <msp_regs.h>
-
-static int __init msp_elb_setup(void)
-{
-#if defined(CONFIG_PMC_MSP7120_GW) \
- || defined(CONFIG_PMC_MSP7120_EVAL)
-       /*
-        * Force all CNFG to be identical and equal to CS0,
-        * according to OPS doc
-        */
-       *CS1_CNFG_REG = *CS2_CNFG_REG = *CS3_CNFG_REG = *CS0_CNFG_REG;
-#endif
-       return 0;
-}
-
-subsys_initcall(msp_elb_setup);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_eth.c b/arch/mips/pmc-sierra/msp71xx/msp_eth.c
deleted file mode 100644 (file)
index c584df3..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * The setup file for ethernet related hardware on PMC-Sierra MSP processors.
- *
- * Copyright 2010 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <msp_regs.h>
-#include <msp_int.h>
-#include <msp_gpio_macros.h>
-
-
-#define MSP_ETHERNET_GPIO0     14
-#define MSP_ETHERNET_GPIO1     15
-#define MSP_ETHERNET_GPIO2     16
-
-#ifdef CONFIG_MSP_HAS_TSMAC
-#define MSP_TSMAC_SIZE 0x10020
-#define MSP_TSMAC_ID   "pmc_tsmac"
-
-static struct resource msp_tsmac0_resources[] = {
-       [0] = {
-               .start  = MSP_MAC0_BASE,
-               .end    = MSP_MAC0_BASE + MSP_TSMAC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_MAC0,
-               .end    = MSP_INT_MAC0,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource msp_tsmac1_resources[] = {
-       [0] = {
-               .start  = MSP_MAC1_BASE,
-               .end    = MSP_MAC1_BASE + MSP_TSMAC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_MAC1,
-               .end    = MSP_INT_MAC1,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-static struct resource msp_tsmac2_resources[] = {
-       [0] = {
-               .start  = MSP_MAC2_BASE,
-               .end    = MSP_MAC2_BASE + MSP_TSMAC_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_SAR,
-               .end    = MSP_INT_SAR,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-
-static struct platform_device tsmac_device[] = {
-       [0] = {
-               .name   = MSP_TSMAC_ID,
-               .id     = 0,
-               .num_resources = ARRAY_SIZE(msp_tsmac0_resources),
-               .resource = msp_tsmac0_resources,
-       },
-       [1] = {
-               .name   = MSP_TSMAC_ID,
-               .id     = 1,
-               .num_resources = ARRAY_SIZE(msp_tsmac1_resources),
-               .resource = msp_tsmac1_resources,
-       },
-       [2] = {
-               .name   = MSP_TSMAC_ID,
-               .id     = 2,
-               .num_resources = ARRAY_SIZE(msp_tsmac2_resources),
-               .resource = msp_tsmac2_resources,
-       },
-};
-#define msp_eth_devs   tsmac_device
-
-#else
-/* If it is not TSMAC assume MSP_ETH (100Mbps) */
-#define MSP_ETH_ID     "pmc_mspeth"
-#define MSP_ETH_SIZE   0xE0
-static struct resource msp_eth0_resources[] = {
-       [0] = {
-               .start  = MSP_MAC0_BASE,
-               .end    = MSP_MAC0_BASE + MSP_ETH_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_MAC0,
-               .end    = MSP_INT_MAC0,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource msp_eth1_resources[] = {
-       [0] = {
-               .start  = MSP_MAC1_BASE,
-               .end    = MSP_MAC1_BASE + MSP_ETH_SIZE - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_MAC1,
-               .end    = MSP_INT_MAC1,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-
-
-static struct platform_device mspeth_device[] = {
-       [0] = {
-               .name   = MSP_ETH_ID,
-               .id     = 0,
-               .num_resources = ARRAY_SIZE(msp_eth0_resources),
-               .resource = msp_eth0_resources,
-       },
-       [1] = {
-               .name   = MSP_ETH_ID,
-               .id     = 1,
-               .num_resources = ARRAY_SIZE(msp_eth1_resources),
-               .resource = msp_eth1_resources,
-       },
-
-};
-#define msp_eth_devs   mspeth_device
-
-#endif
-int __init msp_eth_setup(void)
-{
-       int i, ret = 0;
-
-       /* Configure the GPIO and take the ethernet PHY out of reset */
-       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0);
-       msp_gpio_pin_hi(MSP_ETHERNET_GPIO0);
-
-#ifdef CONFIG_MSP_HAS_TSMAC
-       /* 3 phys on boards with TSMAC */
-       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO1);
-       msp_gpio_pin_hi(MSP_ETHERNET_GPIO1);
-
-       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO2);
-       msp_gpio_pin_hi(MSP_ETHERNET_GPIO2);
-#endif
-       for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) {
-               ret = platform_device_register(&msp_eth_devs[i]);
-               printk(KERN_INFO "device: %d, return value = %d\n", i, ret);
-               if (ret) {
-                       platform_device_unregister(&msp_eth_devs[i]);
-                       break;
-               }
-       }
-
-       if (ret)
-               printk(KERN_WARNING "Could not initialize "
-                                               "MSPETH device structures.\n");
-
-       return ret;
-}
-subsys_initcall(msp_eth_setup);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c b/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c
deleted file mode 100644 (file)
index bb57ed9..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Sets up interrupt handlers for various hardware switches which are
- * connected to interrupt lines.
- *
- * Copyright 2005-2207 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <msp_int.h>
-#include <msp_regs.h>
-#include <msp_regops.h>
-
-/* For hwbutton_interrupt->initial_state */
-#define HWBUTTON_HI    0x1
-#define HWBUTTON_LO    0x2
-
-/*
- * This struct describes a hardware button
- */
-struct hwbutton_interrupt {
-       char *name;                     /* Name of button */
-       int irq;                        /* Actual LINUX IRQ */
-       int eirq;                       /* Extended IRQ number (0-7) */
-       int initial_state;              /* The "normal" state of the switch */
-       void (*handle_hi)(void *);      /* Handler: switch input has gone HI */
-       void (*handle_lo)(void *);      /* Handler: switch input has gone LO */
-       void *data;                     /* Optional data to pass to handler */
-};
-
-#ifdef CONFIG_PMC_MSP7120_GW
-extern void msp_restart(char *);
-
-static void softreset_push(void *data)
-{
-       printk(KERN_WARNING "SOFTRESET switch was pushed\n");
-
-       /*
-        * In the future you could move this to the release handler,
-        * timing the difference between the 'push' and 'release', and only
-        * doing this ungraceful restart if the button has been down for
-        * a certain amount of time; otherwise doing a graceful restart.
-        */
-
-       msp_restart(NULL);
-}
-
-static void softreset_release(void *data)
-{
-       printk(KERN_WARNING "SOFTRESET switch was released\n");
-
-       /* Do nothing */
-}
-
-static void standby_on(void *data)
-{
-       printk(KERN_WARNING "STANDBY switch was set to ON (not implemented)\n");
-
-       /* TODO: Put board in standby mode */
-}
-
-static void standby_off(void *data)
-{
-       printk(KERN_WARNING
-               "STANDBY switch was set to OFF (not implemented)\n");
-
-       /* TODO: Take out of standby mode */
-}
-
-static struct hwbutton_interrupt softreset_sw = {
-       .name = "Softreset button",
-       .irq = MSP_INT_EXT0,
-       .eirq = 0,
-       .initial_state = HWBUTTON_HI,
-       .handle_hi = softreset_release,
-       .handle_lo = softreset_push,
-       .data = NULL,
-};
-
-static struct hwbutton_interrupt standby_sw = {
-       .name = "Standby switch",
-       .irq = MSP_INT_EXT1,
-       .eirq = 1,
-       .initial_state = HWBUTTON_HI,
-       .handle_hi = standby_off,
-       .handle_lo = standby_on,
-       .data = NULL,
-};
-#endif /* CONFIG_PMC_MSP7120_GW */
-
-static irqreturn_t hwbutton_handler(int irq, void *data)
-{
-       struct hwbutton_interrupt *hirq = data;
-       unsigned long cic_ext = *CIC_EXT_CFG_REG;
-
-       if (CIC_EXT_IS_ACTIVE_HI(cic_ext, hirq->eirq)) {
-               /* Interrupt: pin is now HI */
-               CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq);
-               hirq->handle_hi(hirq->data);
-       } else {
-               /* Interrupt: pin is now LO */
-               CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq);
-               hirq->handle_lo(hirq->data);
-       }
-
-       /*
-        * Invert the POLARITY of this level interrupt to ack the interrupt
-        * Thus next state change will invoke the opposite message
-        */
-       *CIC_EXT_CFG_REG = cic_ext;
-
-       return IRQ_HANDLED;
-}
-
-static int msp_hwbutton_register(struct hwbutton_interrupt *hirq)
-{
-       unsigned long cic_ext;
-
-       if (hirq->handle_hi == NULL || hirq->handle_lo == NULL)
-               return -EINVAL;
-
-       cic_ext = *CIC_EXT_CFG_REG;
-       CIC_EXT_SET_TRIGGER_LEVEL(cic_ext, hirq->eirq);
-       if (hirq->initial_state == HWBUTTON_HI)
-               CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq);
-       else
-               CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq);
-       *CIC_EXT_CFG_REG = cic_ext;
-
-       return request_irq(hirq->irq, hwbutton_handler, 0,
-                          hirq->name, hirq);
-}
-
-static int __init msp_hwbutton_setup(void)
-{
-#ifdef CONFIG_PMC_MSP7120_GW
-       msp_hwbutton_register(&softreset_sw);
-       msp_hwbutton_register(&standby_sw);
-#endif
-       return 0;
-}
-
-subsys_initcall(msp_hwbutton_setup);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
deleted file mode 100644 (file)
index d3c3d81..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * IRQ vector handles
- *
- * Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/time.h>
-
-#include <asm/irq_cpu.h>
-
-#include <msp_int.h>
-
-/* SLP bases systems */
-extern void msp_slp_irq_init(void);
-extern void msp_slp_irq_dispatch(void);
-
-/* CIC based systems */
-extern void msp_cic_irq_init(void);
-extern void msp_cic_irq_dispatch(void);
-
-/* VSMP support init */
-extern void msp_vsmp_int_init(void);
-
-/* vectored interrupt implementation */
-
-/* SW0/1 interrupts are used for SMP/SMTC */
-static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
-static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
-static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
-static inline void usb_int_dispatch(void)  { do_IRQ(MSP_INT_USB);  }
-static inline void sec_int_dispatch(void)  { do_IRQ(MSP_INT_SEC);  }
-
-/*
- * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
- * hierarchical system.  The first level are the direct MIPS interrupts
- * and are assigned the interrupt range 0-7.  The second level is the SLM
- * interrupt controller and is assigned the range 8-39.  The third level
- * comprises the Peripherial block, the PCI block, the PCI MSI block and
- * the SLP.  The PCI interrupts and the SLP errors are handled by the
- * relevant subsystems so the core interrupt code needs only concern
- * itself with the Peripheral block.  These are assigned interrupts in
- * the range 40-71.
- */
-
-asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
-{
-       u32 pending;
-
-       pending = read_c0_status() & read_c0_cause();
-
-       /*
-        * jump to the correct interrupt routine
-        * These are arranged in priority order and the timer
-        * comes first!
-        */
-
-#ifdef CONFIG_IRQ_MSP_CIC      /* break out the CIC stuff for now */
-       if (pending & C_IRQ4)   /* do the peripherals first, that's the timer */
-               msp_cic_irq_dispatch();
-
-       else if (pending & C_IRQ0)
-               do_IRQ(MSP_INT_MAC0);
-
-       else if (pending & C_IRQ1)
-               do_IRQ(MSP_INT_MAC1);
-
-       else if (pending & C_IRQ2)
-               do_IRQ(MSP_INT_USB);
-
-       else if (pending & C_IRQ3)
-               do_IRQ(MSP_INT_SAR);
-
-       else if (pending & C_IRQ5)
-               do_IRQ(MSP_INT_SEC);
-
-#else
-       if (pending & C_IRQ5)
-               do_IRQ(MSP_INT_TIMER);
-
-       else if (pending & C_IRQ0)
-               do_IRQ(MSP_INT_MAC0);
-
-       else if (pending & C_IRQ1)
-               do_IRQ(MSP_INT_MAC1);
-
-       else if (pending & C_IRQ3)
-               do_IRQ(MSP_INT_VE);
-
-       else if (pending & C_IRQ4)
-               msp_slp_irq_dispatch();
-#endif
-
-       else if (pending & C_SW0)       /* do software after hardware */
-               do_IRQ(MSP_INT_SW0);
-
-       else if (pending & C_SW1)
-               do_IRQ(MSP_INT_SW1);
-}
-
-static struct irqaction cic_cascade_msp = {
-       .handler = no_action,
-       .name    = "MSP CIC cascade",
-       .flags   = IRQF_NO_THREAD,
-};
-
-static struct irqaction per_cascade_msp = {
-       .handler = no_action,
-       .name    = "MSP PER cascade",
-       .flags   = IRQF_NO_THREAD,
-};
-
-void __init arch_init_irq(void)
-{
-       /* assume we'll be using vectored interrupt mode except in UP mode*/
-#ifdef CONFIG_MIPS_MT
-       BUG_ON(!cpu_has_vint);
-#endif
-       /* initialize the 1st-level CPU based interrupt controller */
-       mips_cpu_irq_init();
-
-#ifdef CONFIG_IRQ_MSP_CIC
-       msp_cic_irq_init();
-#ifdef CONFIG_MIPS_MT
-       set_vi_handler(MSP_INT_CIC, msp_cic_irq_dispatch);
-       set_vi_handler(MSP_INT_MAC0, mac0_int_dispatch);
-       set_vi_handler(MSP_INT_MAC1, mac1_int_dispatch);
-       set_vi_handler(MSP_INT_SAR, mac2_int_dispatch);
-       set_vi_handler(MSP_INT_USB, usb_int_dispatch);
-       set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
-#ifdef CONFIG_MIPS_MT_SMP
-       msp_vsmp_int_init();
-#elif defined CONFIG_MIPS_MT_SMTC
-       /*Set hwmask for all platform devices */
-       irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
-       irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
-       irq_hwmask[MSP_INT_USB] = C_IRQ2;
-       irq_hwmask[MSP_INT_SAR] = C_IRQ3;
-       irq_hwmask[MSP_INT_SEC] = C_IRQ5;
-
-#endif /* CONFIG_MIPS_MT_SMP */
-#endif /* CONFIG_MIPS_MT */
-       /* setup the cascaded interrupts */
-       setup_irq(MSP_INT_CIC, &cic_cascade_msp);
-       setup_irq(MSP_INT_PER, &per_cascade_msp);
-
-#else
-       /* setup the 2nd-level SLP register based interrupt controller */
-       /* VSMP /SMTC support support is not enabled for SLP */
-       msp_slp_irq_init();
-
-       /* setup the cascaded SLP/PER interrupts */
-       setup_irq(MSP_INT_SLP, &cic_cascade_msp);
-       setup_irq(MSP_INT_PER, &per_cascade_msp);
-#endif
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
deleted file mode 100644 (file)
index 2e6f7ca..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
- *
- * This file define the irq handler for MSP CIC subsystem interrupts.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/irq.h>
-
-#include <asm/mipsregs.h>
-
-#include <msp_cic_int.h>
-#include <msp_regs.h>
-
-/*
- * External API
- */
-extern void msp_per_irq_init(void);
-extern void msp_per_irq_dispatch(void);
-
-
-/*
- * Convenience Macro.  Should be somewhere generic.
- */
-#define get_current_vpe()   \
-       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
-
-#ifdef CONFIG_SMP
-
-#define LOCK_VPE(flags, mtflags) \
-do {                           \
-       local_irq_save(flags);  \
-       mtflags = dmt();        \
-} while (0)
-
-#define UNLOCK_VPE(flags, mtflags) \
-do {                           \
-       emt(mtflags);           \
-       local_irq_restore(flags);\
-} while (0)
-
-#define LOCK_CORE(flags, mtflags) \
-do {                           \
-       local_irq_save(flags);  \
-       mtflags = dvpe();       \
-} while (0)
-
-#define UNLOCK_CORE(flags, mtflags)            \
-do {                           \
-       evpe(mtflags);          \
-       local_irq_restore(flags);\
-} while (0)
-
-#else
-
-#define LOCK_VPE(flags, mtflags)
-#define UNLOCK_VPE(flags, mtflags)
-#endif
-
-/* ensure writes to cic are completed */
-static inline void cic_wmb(void)
-{
-       const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
-       volatile u32 dummy_read;
-
-       wmb();
-       dummy_read = __raw_readl(cic_mem);
-       dummy_read++;
-}
-
-static void unmask_cic_irq(struct irq_data *d)
-{
-       volatile u32   *cic_msk_reg = CIC_VPE0_MSK_REG;
-       int vpe;
-#ifdef CONFIG_SMP
-       unsigned int mtflags;
-       unsigned long  flags;
-
-       /*
-       * Make sure we have IRQ affinity.  It may have changed while
-       * we were processing the IRQ.
-       */
-       if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
-               return;
-#endif
-
-       vpe = get_current_vpe();
-       LOCK_VPE(flags, mtflags);
-       cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
-       UNLOCK_VPE(flags, mtflags);
-       cic_wmb();
-}
-
-static void mask_cic_irq(struct irq_data *d)
-{
-       volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
-       int     vpe = get_current_vpe();
-#ifdef CONFIG_SMP
-       unsigned long flags, mtflags;
-#endif
-       LOCK_VPE(flags, mtflags);
-       cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
-       UNLOCK_VPE(flags, mtflags);
-       cic_wmb();
-}
-static void msp_cic_irq_ack(struct irq_data *d)
-{
-       mask_cic_irq(d);
-       /*
-       * Only really necessary for 18, 16-14 and sometimes 3:0
-       * (since these can be edge sensitive) but it doesn't
-       * hurt for the others
-       */
-       *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
-       smtc_im_ack_irq(d->irq);
-}
-
-/*Note: Limiting to VSMP . Not tested in SMTC */
-
-#ifdef CONFIG_MIPS_MT_SMP
-static int msp_cic_irq_set_affinity(struct irq_data *d,
-                                   const struct cpumask *cpumask, bool force)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned int  mtflags;
-       unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
-       volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
-
-       /* timer balancing should be disabled in kernel code */
-       BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
-
-       LOCK_CORE(flags, mtflags);
-       /* enable if any of each VPE's TCs require this IRQ */
-       for_each_online_cpu(cpu) {
-               if (cpumask_test_cpu(cpu, cpumask))
-                       cic_mask[cpu] |= imask;
-               else
-                       cic_mask[cpu] &= ~imask;
-
-       }
-
-       UNLOCK_CORE(flags, mtflags);
-       return 0;
-
-}
-#endif
-
-static struct irq_chip msp_cic_irq_controller = {
-       .name = "MSP_CIC",
-       .irq_mask = mask_cic_irq,
-       .irq_mask_ack = msp_cic_irq_ack,
-       .irq_unmask = unmask_cic_irq,
-       .irq_ack = msp_cic_irq_ack,
-#ifdef CONFIG_MIPS_MT_SMP
-       .irq_set_affinity = msp_cic_irq_set_affinity,
-#endif
-};
-
-void __init msp_cic_irq_init(void)
-{
-       int i;
-       /* Mask/clear interrupts. */
-       *CIC_VPE0_MSK_REG = 0x00000000;
-       *CIC_VPE1_MSK_REG = 0x00000000;
-       *CIC_STS_REG      = 0xFFFFFFFF;
-       /*
-       * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
-       * These inputs map to EXT_INT_POL[6:4] inside the CIC.
-       * They are to be active low, level sensitive.
-       */
-       *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
-
-       /* initialize all the IRQ descriptors */
-       for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
-               irq_set_chip_and_handler(i, &msp_cic_irq_controller,
-                                        handle_level_irq);
-#ifdef CONFIG_MIPS_MT_SMTC
-               /* Mask of CIC interrupt */
-               irq_hwmask[i] = C_IRQ4;
-#endif
-       }
-
-       /* Initialize the PER interrupt sub-system */
-        msp_per_irq_init();
-}
-
-/* CIC masked by CIC vector processing before dispatch called */
-void msp_cic_irq_dispatch(void)
-{
-       volatile u32    *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
-       u32     cic_mask;
-       u32      pending;
-       int     cic_status = *CIC_STS_REG;
-       cic_mask = cic_msk_reg[get_current_vpe()];
-       pending = cic_status & cic_mask;
-       if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
-               do_IRQ(MSP_INT_VPE0_TIMER);
-       } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
-               do_IRQ(MSP_INT_VPE1_TIMER);
-       } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
-               msp_per_irq_dispatch();
-       } else if (pending) {
-               do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
-       } else{
-               spurious_interrupt();
-       }
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
deleted file mode 100644 (file)
index 598b6a6..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
- *
- * This file define the irq handler for MSP PER subsystem interrupts.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-
-#include <asm/mipsregs.h>
-
-#include <msp_cic_int.h>
-#include <msp_regs.h>
-
-
-/*
- * Convenience Macro.  Should be somewhere generic.
- */
-#define get_current_vpe()      \
-       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
-
-#ifdef CONFIG_SMP
-/*
- * The PER registers must be protected from concurrent access.
- */
-
-static DEFINE_SPINLOCK(per_lock);
-#endif
-
-/* ensure writes to per are completed */
-
-static inline void per_wmb(void)
-{
-       const volatile void __iomem *per_mem = PER_INT_MSK_REG;
-       volatile u32 dummy_read;
-
-       wmb();
-       dummy_read = __raw_readl(per_mem);
-       dummy_read++;
-}
-
-static inline void unmask_per_irq(struct irq_data *d)
-{
-#ifdef CONFIG_SMP
-       unsigned long flags;
-       spin_lock_irqsave(&per_lock, flags);
-       *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
-       spin_unlock_irqrestore(&per_lock, flags);
-#else
-       *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
-#endif
-       per_wmb();
-}
-
-static inline void mask_per_irq(struct irq_data *d)
-{
-#ifdef CONFIG_SMP
-       unsigned long flags;
-       spin_lock_irqsave(&per_lock, flags);
-       *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
-       spin_unlock_irqrestore(&per_lock, flags);
-#else
-       *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
-#endif
-       per_wmb();
-}
-
-static inline void msp_per_irq_ack(struct irq_data *d)
-{
-       mask_per_irq(d);
-       /*
-        * In the PER interrupt controller, only bits 11 and 10
-        * are write-to-clear, (SPI TX complete, SPI RX complete).
-        * It does nothing for any others.
-        */
-       *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
-}
-
-#ifdef CONFIG_SMP
-static int msp_per_irq_set_affinity(struct irq_data *d,
-                                   const struct cpumask *affinity, bool force)
-{
-       /* WTF is this doing ????? */
-       unmask_per_irq(d);
-       return 0;
-}
-#endif
-
-static struct irq_chip msp_per_irq_controller = {
-       .name = "MSP_PER",
-       .irq_enable = unmask_per_irq,
-       .irq_disable = mask_per_irq,
-       .irq_ack = msp_per_irq_ack,
-#ifdef CONFIG_SMP
-       .irq_set_affinity = msp_per_irq_set_affinity,
-#endif
-};
-
-void __init msp_per_irq_init(void)
-{
-       int i;
-       /* Mask/clear interrupts. */
-       *PER_INT_MSK_REG  = 0x00000000;
-       *PER_INT_STS_REG  = 0xFFFFFFFF;
-       /* initialize all the IRQ descriptors */
-       for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
-               irq_set_chip(i, &msp_per_irq_controller);
-#ifdef CONFIG_MIPS_MT_SMTC
-               irq_hwmask[i] = C_IRQ4;
-#endif
-       }
-}
-
-void msp_per_irq_dispatch(void)
-{
-       u32     per_mask = *PER_INT_MSK_REG;
-       u32     per_status = *PER_INT_STS_REG;
-       u32     pending;
-
-       pending = per_status & per_mask;
-       if (pending) {
-               do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
-       } else {
-               spurious_interrupt();
-       }
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
deleted file mode 100644 (file)
index 83a1c5e..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * This file define the irq handler for MSP SLM subsystem interrupts.
- *
- * Copyright 2005-2006 PMC-Sierra, Inc, derived from irq_cpu.c
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-
-#include <asm/mipsregs.h>
-
-#include <msp_slp_int.h>
-#include <msp_regs.h>
-
-static inline void unmask_msp_slp_irq(struct irq_data *d)
-{
-       unsigned int irq = d->irq;
-
-       /* check for PER interrupt range */
-       if (irq < MSP_PER_INTBASE)
-               *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE));
-       else
-               *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
-}
-
-static inline void mask_msp_slp_irq(struct irq_data *d)
-{
-       unsigned int irq = d->irq;
-
-       /* check for PER interrupt range */
-       if (irq < MSP_PER_INTBASE)
-               *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE));
-       else
-               *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
-}
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues.  Same for msp_slp_irq_end.
- */
-static inline void ack_msp_slp_irq(struct irq_data *d)
-{
-       unsigned int irq = d->irq;
-
-       /* check for PER interrupt range */
-       if (irq < MSP_PER_INTBASE)
-               *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE));
-       else
-               *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
-}
-
-static struct irq_chip msp_slp_irq_controller = {
-       .name = "MSP_SLP",
-       .irq_ack = ack_msp_slp_irq,
-       .irq_mask = mask_msp_slp_irq,
-       .irq_unmask = unmask_msp_slp_irq,
-};
-
-void __init msp_slp_irq_init(void)
-{
-       int i;
-
-       /* Mask/clear interrupts. */
-       *SLP_INT_MSK_REG = 0x00000000;
-       *PER_INT_MSK_REG = 0x00000000;
-       *SLP_INT_STS_REG = 0xFFFFFFFF;
-       *PER_INT_STS_REG = 0xFFFFFFFF;
-
-       /* initialize all the IRQ descriptors */
-       for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++)
-               irq_set_chip_and_handler(i, &msp_slp_irq_controller,
-                                        handle_level_irq);
-}
-
-void msp_slp_irq_dispatch(void)
-{
-       u32 pending;
-       int intbase;
-
-       intbase = MSP_SLP_INTBASE;
-       pending = *SLP_INT_STS_REG & *SLP_INT_MSK_REG;
-
-       /* check for PER interrupt */
-       if (pending == (1 << (MSP_INT_PER - MSP_SLP_INTBASE))) {
-               intbase = MSP_PER_INTBASE;
-               pending = *PER_INT_STS_REG & *PER_INT_MSK_REG;
-       }
-
-       /* check for spurious interrupt */
-       if (pending == 0x00000000) {
-               printk(KERN_ERR "Spurious %s interrupt?\n",
-                       (intbase == MSP_SLP_INTBASE) ? "SLP" : "PER");
-               return;
-       }
-
-       /* dispatch the irq */
-       do_IRQ(ffs(pending) + intbase - 1);
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_pci.c b/arch/mips/pmc-sierra/msp71xx/msp_pci.c
deleted file mode 100644 (file)
index f764fe7..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * The setup file for PCI related hardware on PMC-Sierra MSP processors.
- *
- * Copyright 2005-2006 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <msp_prom.h>
-#include <msp_regs.h>
-
-extern void msp_pci_init(void);
-
-static int __init msp_pci_setup(void)
-{
-#if 0 /* Linux 2.6 initialization code to be completed */
-       if (getdeviceid() & DEV_ID_SINGLE_PC) {
-               /* If single card mode */
-               slmRegs *sreg = (slmRegs *) SREG_BASE;
-
-               sreg->single_pc_enable = SINGLE_PCCARD;
-       }
-#endif
-
-       msp_pci_init();
-
-       return 0;
-}
-
-subsys_initcall(msp_pci_setup);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_prom.c b/arch/mips/pmc-sierra/msp71xx/msp_prom.c
deleted file mode 100644 (file)
index db00deb..0000000
+++ /dev/null
@@ -1,503 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *    PROM library initialisation code, assuming a version of
- *    pmon is the boot code.
- *
- * Copyright 2000,2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- *             ppopov@mvista.com or source@mvista.com
- *
- * This file was derived from Carsten Langgaard's
- * arch/mips/mips-boards/xx files.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <asm-generic/sections.h>
-#include <asm/page.h>
-
-#include <msp_prom.h>
-#include <msp_regs.h>
-
-/* global PROM environment variables and pointers */
-int prom_argc;
-char **prom_argv, **prom_envp;
-int *prom_vec;
-
-/* debug flag */
-int init_debug = 1;
-
-/* memory blocks */
-struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-/* default feature sets */
-static char msp_default_features[] =
-#if defined(CONFIG_PMC_MSP4200_EVAL) \
- || defined(CONFIG_PMC_MSP4200_GW)
-       "ERER";
-#elif defined(CONFIG_PMC_MSP7120_EVAL) \
- || defined(CONFIG_PMC_MSP7120_GW)
-       "EMEMSP";
-#elif defined(CONFIG_PMC_MSP7120_FPGA)
-       "EMEM";
-#endif
-
-/* conversion functions */
-static inline unsigned char str2hexnum(unsigned char c)
-{
-       if (c >= '0' && c <= '9')
-               return c - '0';
-       if (c >= 'a' && c <= 'f')
-               return c - 'a' + 10;
-       return 0; /* foo */
-}
-
-static inline int str2eaddr(unsigned char *ea, unsigned char *str)
-{
-       int index = 0;
-       unsigned char num = 0;
-
-       while (*str != '\0') {
-               if ((*str == '.') || (*str == ':')) {
-                       ea[index++] = num;
-                       num = 0;
-                       str++;
-               } else {
-                       num = num << 4;
-                       num |= str2hexnum(*str++);
-               }
-       }
-
-       if (index == 5) {
-               ea[index++] = num;
-               return 0;
-       } else
-               return -1;
-}
-EXPORT_SYMBOL(str2eaddr);
-
-static inline unsigned long str2hex(unsigned char *str)
-{
-       int value = 0;
-
-       while (*str) {
-               value = value << 4;
-               value |= str2hexnum(*str++);
-       }
-
-       return value;
-}
-
-/* function to query the system information */
-const char *get_system_type(void)
-{
-#if defined(CONFIG_PMC_MSP4200_EVAL)
-       return "PMC-Sierra MSP4200 Eval Board";
-#elif defined(CONFIG_PMC_MSP4200_GW)
-       return "PMC-Sierra MSP4200 VoIP Gateway";
-#elif defined(CONFIG_PMC_MSP7120_EVAL)
-       return "PMC-Sierra MSP7120 Eval Board";
-#elif defined(CONFIG_PMC_MSP7120_GW)
-       return "PMC-Sierra MSP7120 Residential Gateway";
-#elif defined(CONFIG_PMC_MSP7120_FPGA)
-       return "PMC-Sierra MSP7120 FPGA";
-#else
-       #error "What is the type of *your* MSP?"
-#endif
-}
-
-int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr)
-{
-       char *ethaddr_str;
-
-       ethaddr_str = prom_getenv(ethaddr_name);
-       if (!ethaddr_str) {
-               printk(KERN_WARNING "%s not set in boot prom\n", ethaddr_name);
-               return -1;
-       }
-
-       if (str2eaddr(ethernet_addr, ethaddr_str) == -1) {
-               printk(KERN_WARNING "%s badly formatted-<%s>\n",
-                       ethaddr_name, ethaddr_str);
-               return -1;
-       }
-
-       if (init_debug > 1) {
-               int i;
-               printk(KERN_DEBUG "get_ethernet_addr: for %s ", ethaddr_name);
-               for (i = 0; i < 5; i++)
-                       printk(KERN_DEBUG "%02x:",
-                               (unsigned char)*(ethernet_addr+i));
-               printk(KERN_DEBUG "%02x\n", *(ethernet_addr+i));
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(get_ethernet_addr);
-
-static char *get_features(void)
-{
-       char *feature = prom_getenv(FEATURES);
-
-       if (feature == NULL) {
-               /* default features based on MACHINE_TYPE */
-               feature = msp_default_features;
-       }
-
-       return feature;
-}
-
-static char test_feature(char c)
-{
-       char *feature = get_features();
-
-       while (*feature) {
-               if (*feature++ == c)
-                       return *feature;
-               feature++;
-       }
-
-       return FEATURE_NOEXIST;
-}
-
-unsigned long get_deviceid(void)
-{
-       char *deviceid = prom_getenv(DEVICEID);
-
-       if (deviceid == NULL)
-               return *DEV_ID_REG;
-       else
-               return str2hex(deviceid);
-}
-
-char identify_pci(void)
-{
-       return test_feature(PCI_KEY);
-}
-EXPORT_SYMBOL(identify_pci);
-
-char identify_pcimux(void)
-{
-       return test_feature(PCIMUX_KEY);
-}
-
-char identify_sec(void)
-{
-       return test_feature(SEC_KEY);
-}
-EXPORT_SYMBOL(identify_sec);
-
-char identify_spad(void)
-{
-       return test_feature(SPAD_KEY);
-}
-EXPORT_SYMBOL(identify_spad);
-
-char identify_tdm(void)
-{
-       return test_feature(TDM_KEY);
-}
-EXPORT_SYMBOL(identify_tdm);
-
-char identify_zsp(void)
-{
-       return test_feature(ZSP_KEY);
-}
-EXPORT_SYMBOL(identify_zsp);
-
-static char identify_enetfeature(char key, unsigned long interface_num)
-{
-       char *feature = get_features();
-
-       while (*feature) {
-               if (*feature++ == key && interface_num-- == 0)
-                       return *feature;
-               feature++;
-       }
-
-       return FEATURE_NOEXIST;
-}
-
-char identify_enet(unsigned long interface_num)
-{
-       return identify_enetfeature(ENET_KEY, interface_num);
-}
-EXPORT_SYMBOL(identify_enet);
-
-char identify_enetTxD(unsigned long interface_num)
-{
-       return identify_enetfeature(ENETTXD_KEY, interface_num);
-}
-EXPORT_SYMBOL(identify_enetTxD);
-
-unsigned long identify_family(void)
-{
-       unsigned long deviceid;
-
-       deviceid = get_deviceid();
-
-       return deviceid & CPU_DEVID_FAMILY;
-}
-EXPORT_SYMBOL(identify_family);
-
-unsigned long identify_revision(void)
-{
-       unsigned long deviceid;
-
-       deviceid = get_deviceid();
-
-       return deviceid & CPU_DEVID_REVISION;
-}
-EXPORT_SYMBOL(identify_revision);
-
-/* PROM environment functions */
-char *prom_getenv(char *env_name)
-{
-       /*
-        * Return a pointer to the given environment variable.  prom_envp
-        * points to a null terminated array of pointers to variables.
-        * Environment variables are stored in the form of "memsize=64"
-        */
-
-       char **var = prom_envp;
-       int i = strlen(env_name);
-
-       while (*var) {
-               if (strncmp(env_name, *var, i) == 0) {
-                       return (*var + strlen(env_name) + 1);
-               }
-               var++;
-       }
-
-       return NULL;
-}
-
-/* PROM commandline functions */
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while (actr < prom_argc) {
-               strcpy(cp, prom_argv[actr]);
-               cp += strlen(prom_argv[actr]);
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
-               --cp;
-       *cp = '\0';
-}
-
-/* memory allocation functions */
-static int __init prom_memtype_classify(unsigned int type)
-{
-       switch (type) {
-       case yamon_free:
-               return BOOT_MEM_RAM;
-       case yamon_prom:
-               return BOOT_MEM_ROM_DATA;
-       default:
-               return BOOT_MEM_RESERVED;
-       }
-}
-
-void __init prom_meminit(void)
-{
-       struct prom_pmemblock *p;
-
-       p = prom_getmdesc();
-
-       while (p->size) {
-               long type;
-               unsigned long base, size;
-
-               type = prom_memtype_classify(p->type);
-               base = p->base;
-               size = p->size;
-
-               add_memory_region(base, size, type);
-               p++;
-       }
-}
-
-void __init prom_free_prom_memory(void)
-{
-       int     argc;
-       char    **argv;
-       char    **envp;
-       char    *ptr;
-       int     len = 0;
-       int     i;
-       unsigned long addr;
-
-       /*
-        * preserve environment variables and command line from pmon/bbload
-        * first preserve the command line
-        */
-       for (argc = 0; argc < prom_argc; argc++) {
-               len += sizeof(char *);                  /* length of pointer */
-               len += strlen(prom_argv[argc]) + 1;     /* length of string */
-       }
-       len += sizeof(char *);          /* plus length of null pointer */
-
-       argv = kmalloc(len, GFP_KERNEL);
-       ptr = (char *) &argv[prom_argc + 1];    /* strings follow array */
-
-       for (argc = 0; argc < prom_argc; argc++) {
-               argv[argc] = ptr;
-               strcpy(ptr, prom_argv[argc]);
-               ptr += strlen(prom_argv[argc]) + 1;
-       }
-       argv[prom_argc] = NULL;         /* end array with null pointer */
-       prom_argv = argv;
-
-       /* next preserve the environment variables */
-       len = 0;
-       i = 0;
-       for (envp = prom_envp; *envp != NULL; envp++) {
-               i++;            /* count number of environment variables */
-               len += sizeof(char *);          /* length of pointer */
-               len += strlen(*envp) + 1;       /* length of string */
-       }
-       len += sizeof(char *);          /* plus length of null pointer */
-
-       envp = kmalloc(len, GFP_KERNEL);
-       ptr = (char *) &envp[i+1];
-
-       for (argc = 0; argc < i; argc++) {
-               envp[argc] = ptr;
-               strcpy(ptr, prom_envp[argc]);
-               ptr += strlen(prom_envp[argc]) + 1;
-       }
-       envp[i] = NULL;                 /* end array with null pointer */
-       prom_envp = envp;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
-                       continue;
-
-               addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
-                               addr, addr + boot_mem_map.map[i].size);
-       }
-}
-
-struct prom_pmemblock *__init prom_getmdesc(void)
-{
-       static char     memsz_env[] __initdata = "memsize";
-       static char     heaptop_env[] __initdata = "heaptop";
-       char            *str;
-       unsigned int    memsize;
-       unsigned int    heaptop;
-       int i;
-
-       str = prom_getenv(memsz_env);
-       if (!str) {
-               ppfinit("memsize not set in boot prom, "
-                       "set to default (32Mb)\n");
-               memsize = 0x02000000;
-       } else {
-               memsize = simple_strtol(str, NULL, 0);
-
-               if (memsize == 0) {
-                       /* if memsize is a bad size, use reasonable default */
-                       memsize = 0x02000000;
-               }
-
-               /* convert to physical address (removing caching bits, etc) */
-               memsize = CPHYSADDR(memsize);
-       }
-
-       str = prom_getenv(heaptop_env);
-       if (!str) {
-               heaptop = CPHYSADDR((u32)&_text);
-               ppfinit("heaptop not set in boot prom, "
-                       "set to default 0x%08x\n", heaptop);
-       } else {
-               heaptop = simple_strtol(str, NULL, 16);
-               if (heaptop == 0) {
-                       /* heaptop conversion bad, might have 0xValue */
-                       heaptop = simple_strtol(str, NULL, 0);
-
-                       if (heaptop == 0) {
-                               /* heaptop still bad, use reasonable default */
-                               heaptop = CPHYSADDR((u32)&_text);
-                       }
-               }
-
-               /* convert to physical address (removing caching bits, etc) */
-               heaptop = CPHYSADDR((u32)heaptop);
-       }
-
-       /* the base region */
-       i = 0;
-       mdesc[i].type = BOOT_MEM_RESERVED;
-       mdesc[i].base = 0x00000000;
-       mdesc[i].size = PAGE_ALIGN(0x300 + 0x80);
-               /* jtag interrupt vector + sizeof vector */
-
-       /* PMON data */
-       if (heaptop > mdesc[i].base + mdesc[i].size) {
-               i++;                    /* 1 */
-               mdesc[i].type = BOOT_MEM_ROM_DATA;
-               mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size;
-               mdesc[i].size = heaptop - mdesc[i].base;
-       }
-
-       /* end of PMON data to start of kernel -- probably zero .. */
-       if (heaptop != CPHYSADDR((u32)_text)) {
-               i++;    /* 2 */
-               mdesc[i].type = BOOT_MEM_RAM;
-               mdesc[i].base = heaptop;
-               mdesc[i].size = CPHYSADDR((u32)_text) - mdesc[i].base;
-       }
-
-       /*  kernel proper */
-       i++;                    /* 3 */
-       mdesc[i].type = BOOT_MEM_RESERVED;
-       mdesc[i].base = CPHYSADDR((u32)_text);
-       mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base;
-
-       /* Remainder of RAM -- under memsize */
-       i++;                    /* 5 */
-       mdesc[i].type = yamon_free;
-       mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size;
-       mdesc[i].size = memsize - mdesc[i].base;
-
-       return &mdesc[0];
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmc-sierra/msp71xx/msp_serial.c
deleted file mode 100644 (file)
index a1c7c7d..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * The setup file for serial related hardware on PMC-Sierra MSP processors.
- *
- * Copyright 2005 PMC-Sierra, Inc.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/serial_reg.h>
-#include <linux/slab.h>
-
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/serial.h>
-#include <linux/serial_8250.h>
-
-#include <msp_prom.h>
-#include <msp_int.h>
-#include <msp_regs.h>
-
-struct msp_uart_data {
-       int     last_lcr;
-};
-
-static void msp_serial_out(struct uart_port *p, int offset, int value)
-{
-       struct msp_uart_data *d = p->private_data;
-
-       if (offset == UART_LCR)
-               d->last_lcr = value;
-
-       offset <<= p->regshift;
-       writeb(value, p->membase + offset);
-}
-
-static unsigned int msp_serial_in(struct uart_port *p, int offset)
-{
-       offset <<= p->regshift;
-
-       return readb(p->membase + offset);
-}
-
-static int msp_serial_handle_irq(struct uart_port *p)
-{
-       struct msp_uart_data *d = p->private_data;
-       unsigned int iir = readb(p->membase + (UART_IIR << p->regshift));
-
-       if (serial8250_handle_irq(p, iir)) {
-               return 1;
-       } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
-               /*
-                * The DesignWare APB UART has an Busy Detect (0x07) interrupt
-                * meaning an LCR write attempt occurred while the UART was
-                * busy. The interrupt must be cleared by reading the UART
-                * status register (USR) and the LCR re-written.
-                *
-                * Note: MSP reserves 0x20 bytes of address space for the UART
-                * and the USR is mapped in a separate block at an offset of
-                * 0xc0 from the start of the UART.
-                */
-               (void)readb(p->membase + 0xc0);
-               writeb(d->last_lcr, p->membase + (UART_LCR << p->regshift));
-
-               return 1;
-       }
-
-       return 0;
-}
-
-void __init msp_serial_setup(void)
-{
-       char    *s;
-       char    *endp;
-       struct uart_port up;
-       unsigned int uartclk;
-
-       memset(&up, 0, sizeof(up));
-
-       /* Check if clock was specified in environment */
-       s = prom_getenv("uartfreqhz");
-       if(!(s && *s && (uartclk = simple_strtoul(s, &endp, 10)) && *endp == 0))
-               uartclk = MSP_BASE_BAUD;
-       ppfinit("UART clock set to %d\n", uartclk);
-
-       /* Initialize first serial port */
-       up.mapbase      = MSP_UART0_BASE;
-       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
-       up.irq          = MSP_INT_UART0;
-       up.uartclk      = uartclk;
-       up.regshift     = 2;
-       up.iotype       = UPIO_MEM;
-       up.flags        = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
-       up.type         = PORT_16550A;
-       up.line         = 0;
-       up.serial_out   = msp_serial_out;
-       up.serial_in    = msp_serial_in;
-       up.handle_irq   = msp_serial_handle_irq;
-       up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
-       if (!up.private_data) {
-               pr_err("failed to allocate uart private data\n");
-               return;
-       }
-       if (early_serial_setup(&up)) {
-               kfree(up.private_data);
-               pr_err("Early serial init of port 0 failed\n");
-       }
-
-       /* Initialize the second serial port, if one exists */
-       switch (mips_machtype) {
-               case MACH_MSP4200_EVAL:
-               case MACH_MSP4200_GW:
-               case MACH_MSP4200_FPGA:
-               case MACH_MSP7120_EVAL:
-               case MACH_MSP7120_GW:
-               case MACH_MSP7120_FPGA:
-                       /* Enable UART1 on MSP4200 and MSP7120 */
-                       *GPIO_CFG2_REG = 0x00002299;
-                       break;
-
-               default:
-                       return; /* No second serial port, good-bye. */
-       }
-
-       up.mapbase      = MSP_UART1_BASE;
-       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
-       up.irq          = MSP_INT_UART1;
-       up.line         = 1;
-       up.private_data         = (void*)UART1_STATUS_REG;
-       if (early_serial_setup(&up)) {
-               kfree(up.private_data);
-               pr_err("Early serial init of port 1 failed\n");
-       }
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmc-sierra/msp71xx/msp_setup.c
deleted file mode 100644 (file)
index 7a834b2..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * The generic setup file for PMC-Sierra MSP processors
- *
- * Copyright 2005-2007 PMC-Sierra, Inc,
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <asm/bootinfo.h>
-#include <asm/cacheflush.h>
-#include <asm/r4kcache.h>
-#include <asm/reboot.h>
-#include <asm/smp-ops.h>
-#include <asm/time.h>
-
-#include <msp_prom.h>
-#include <msp_regs.h>
-
-#if defined(CONFIG_PMC_MSP7120_GW)
-#include <msp_regops.h>
-#define MSP_BOARD_RESET_GPIO   9
-#endif
-
-extern void msp_serial_setup(void);
-extern void pmctwiled_setup(void);
-
-#if defined(CONFIG_PMC_MSP7120_EVAL) || \
-    defined(CONFIG_PMC_MSP7120_GW) || \
-    defined(CONFIG_PMC_MSP7120_FPGA)
-/*
- * Performs the reset for MSP7120-based boards
- */
-void msp7120_reset(void)
-{
-       void *start, *end, *iptr;
-       register int i;
-
-       /* Diasble all interrupts */
-       local_irq_disable();
-#ifdef CONFIG_SYS_SUPPORTS_MULTITHREADING
-       dvpe();
-#endif
-
-       /* Cache the reset code of this function */
-       __asm__ __volatile__ (
-               "       .set    push                            \n"
-               "       .set    mips3                           \n"
-               "       la      %0,startpoint                   \n"
-               "       la      %1,endpoint                     \n"
-               "       .set    pop                             \n"
-               : "=r" (start), "=r" (end)
-               :
-       );
-
-       for (iptr = (void *)((unsigned int)start & ~(L1_CACHE_BYTES - 1));
-            iptr < end; iptr += L1_CACHE_BYTES)
-               cache_op(Fill, iptr);
-
-       __asm__ __volatile__ (
-               "startpoint:                                    \n"
-       );
-
-       /* Put the DDRC into self-refresh mode */
-       DDRC_INDIRECT_WRITE(DDRC_CTL(10), 0xb, 1 << 16);
-
-       /*
-        * IMPORTANT!
-        * DO NOT do anything from here on out that might even
-        * think about fetching from RAM - i.e., don't call any
-        * non-inlined functions, and be VERY sure that any inline
-        * functions you do call do NOT access any sort of RAM
-        * anywhere!
-        */
-
-       /* Wait a bit for the DDRC to settle */
-       for (i = 0; i < 100000000; i++);
-
-#if defined(CONFIG_PMC_MSP7120_GW)
-       /*
-        * Set GPIO 9 HI, (tied to board reset logic)
-        * GPIO 9 is the 4th GPIO of register 3
-        *
-        * NOTE: We cannot use the higher-level msp_gpio_mode()/out()
-        * as GPIO char driver may not be enabled and it would look up
-        * data inRAM!
-        */
-       set_value_reg32(GPIO_CFG3_REG, 0xf000, 0x8000);
-       set_reg32(GPIO_DATA3_REG, 8);
-
-       /*
-        * In case GPIO9 doesn't reset the board (jumper configurable!)
-        * fallback to device reset below.
-        */
-#endif
-       /* Set bit 1 of the MSP7120 reset register */
-       *RST_SET_REG = 0x00000001;
-
-       __asm__ __volatile__ (
-               "endpoint:                                      \n"
-       );
-}
-#endif
-
-void msp_restart(char *command)
-{
-       printk(KERN_WARNING "Now rebooting .......\n");
-
-#if defined(CONFIG_PMC_MSP7120_EVAL) || \
-    defined(CONFIG_PMC_MSP7120_GW) || \
-    defined(CONFIG_PMC_MSP7120_FPGA)
-       msp7120_reset();
-#else
-       /* No chip-specific reset code, just jump to the ROM reset vector */
-       set_c0_status(ST0_BEV | ST0_ERL);
-       change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-       flush_cache_all();
-       write_c0_wired(0);
-
-       __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
-#endif
-}
-
-void msp_halt(void)
-{
-       printk(KERN_WARNING "\n** You can safely turn off the power\n");
-       while (1)
-               /* If possible call official function to get CPU WARs */
-               if (cpu_wait)
-                       (*cpu_wait)();
-               else
-                       __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
-}
-
-void msp_power_off(void)
-{
-       msp_halt();
-}
-
-void __init plat_mem_setup(void)
-{
-       _machine_restart = msp_restart;
-       _machine_halt = msp_halt;
-       pm_power_off = msp_power_off;
-}
-
-extern struct plat_smp_ops msp_smtc_smp_ops;
-
-void __init prom_init(void)
-{
-       unsigned long family;
-       unsigned long revision;
-
-       prom_argc = fw_arg0;
-       prom_argv = (char **)fw_arg1;
-       prom_envp = (char **)fw_arg2;
-
-       /*
-        * Someday we can use this with PMON2000 to get a
-        * platform call prom routines for output etc. without
-        * having to use grody hacks.  For now it's unused.
-        *
-        * struct callvectors *cv = (struct callvectors *) fw_arg3;
-        */
-       family = identify_family();
-       revision = identify_revision();
-
-       switch (family) {
-       case FAMILY_FPGA:
-               if (FPGA_IS_MSP4200(revision)) {
-                       /* Old-style revision ID */
-                       mips_machtype = MACH_MSP4200_FPGA;
-               } else {
-                       mips_machtype = MACH_MSP_OTHER;
-               }
-               break;
-
-       case FAMILY_MSP4200:
-#if defined(CONFIG_PMC_MSP4200_EVAL)
-               mips_machtype  = MACH_MSP4200_EVAL;
-#elif defined(CONFIG_PMC_MSP4200_GW)
-               mips_machtype  = MACH_MSP4200_GW;
-#else
-               mips_machtype = MACH_MSP_OTHER;
-#endif
-               break;
-
-       case FAMILY_MSP4200_FPGA:
-               mips_machtype  = MACH_MSP4200_FPGA;
-               break;
-
-       case FAMILY_MSP7100:
-#if defined(CONFIG_PMC_MSP7120_EVAL)
-               mips_machtype = MACH_MSP7120_EVAL;
-#elif defined(CONFIG_PMC_MSP7120_GW)
-               mips_machtype = MACH_MSP7120_GW;
-#else
-               mips_machtype = MACH_MSP_OTHER;
-#endif
-               break;
-
-       case FAMILY_MSP7100_FPGA:
-               mips_machtype  = MACH_MSP7120_FPGA;
-               break;
-
-       default:
-               /* we don't recognize the machine */
-               mips_machtype  = MACH_UNKNOWN;
-               panic("***Bogosity factor five***, exiting");
-               break;
-       }
-
-       prom_init_cmdline();
-
-       prom_meminit();
-
-       /*
-        * Sub-system setup follows.
-        * Setup functions can  either be called here or using the
-        * subsys_initcall mechanism (i.e. see msp_pci_setup). The
-        * order in which they are called can be changed by using the
-        * link order in arch/mips/pmc-sierra/msp71xx/Makefile.
-        *
-        * NOTE: Please keep sub-system specific initialization code
-        * in separate specific files.
-        */
-       msp_serial_setup();
-
-       if (register_vsmp_smp_ops()) {
-#ifdef CONFIG_MIPS_MT_SMTC
-               register_smp_ops(&msp_smtc_smp_ops);
-#endif
-       }
-
-#ifdef CONFIG_PMCTWILED
-       /*
-        * Setup LED states before the subsys_initcall loads other
-        * dependent drivers/modules.
-        */
-       pmctwiled_setup();
-#endif
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smp.c b/arch/mips/pmc-sierra/msp71xx/msp_smp.c
deleted file mode 100644 (file)
index 1017058..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2010 PMC-Sierra, Inc.
- *
- *  VSMP support for MSP platforms . Derived from malta vsmp support.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-
-#ifdef CONFIG_MIPS_MT_SMP
-#define MIPS_CPU_IPI_RESCHED_IRQ 0     /* SW int 0 for resched */
-#define MIPS_CPU_IPI_CALL_IRQ 1                /* SW int 1 for call */
-
-
-static void ipi_resched_dispatch(void)
-{
-       do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
-       do_IRQ(MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
-       smp_call_function_interrupt();
-
-       return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
-       .handler        = ipi_resched_interrupt,
-       .flags          = IRQF_PERCPU,
-       .name           = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
-       .handler        = ipi_call_interrupt,
-       .flags          = IRQF_PERCPU,
-       .name           = "IPI_call"
-};
-
-void __init arch_init_ipiirq(int irq, struct irqaction *action)
-{
-       setup_irq(irq, action);
-       irq_set_handler(irq, handle_percpu_irq);
-}
-
-void __init msp_vsmp_int_init(void)
-{
-       set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
-       set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
-       arch_init_ipiirq(MIPS_CPU_IPI_RESCHED_IRQ, &irq_resched);
-       arch_init_ipiirq(MIPS_CPU_IPI_CALL_IRQ, &irq_call);
-}
-#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c b/arch/mips/pmc-sierra/msp71xx/msp_smtc.c
deleted file mode 100644 (file)
index c8dcc1c..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * MSP71xx Platform-specific hooks for SMP operation
- */
-#include <linux/irq.h>
-#include <linux/init.h>
-
-#include <asm/mipsmtregs.h>
-#include <asm/mipsregs.h>
-#include <asm/smtc.h>
-#include <asm/smtc_ipi.h>
-
-/* VPE/SMP Prototype implements platform interfaces directly */
-
-/*
- * Cause the specified action to be performed on a targeted "CPU"
- */
-
-static void msp_smtc_send_ipi_single(int cpu, unsigned int action)
-{
-       /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
-       smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
-}
-
-static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
-                                               unsigned int action)
-{
-       unsigned int i;
-
-       for_each_cpu(i, mask)
-               msp_smtc_send_ipi_single(i, action);
-}
-
-/*
- * Post-config but pre-boot cleanup entry point
- */
-static void __cpuinit msp_smtc_init_secondary(void)
-{
-       int myvpe;
-
-       /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
-       myvpe = read_c0_tcbind() & TCBIND_CURVPE;
-       if (myvpe > 0)
-               change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
-                               STATUSF_IP6 | STATUSF_IP7);
-       smtc_init_secondary();
-}
-
-/*
- * Platform "CPU" startup hook
- */
-static void __cpuinit msp_smtc_boot_secondary(int cpu,
-                                       struct task_struct *idle)
-{
-       smtc_boot_secondary(cpu, idle);
-}
-
-/*
- * SMP initialization finalization entry point
- */
-static void __cpuinit msp_smtc_smp_finish(void)
-{
-       smtc_smp_finish();
-}
-
-/*
- * Hook for after all CPUs are online
- */
-
-static void msp_smtc_cpus_done(void)
-{
-}
-
-/*
- * Platform SMP pre-initialization
- *
- * As noted above, we can assume a single CPU for now
- * but it may be multithreaded.
- */
-
-static void __init msp_smtc_smp_setup(void)
-{
-       /*
-        * we won't get the definitive value until
-        * we've run smtc_prepare_cpus later, but
-        */
-
-       if (read_c0_config3() & (1 << 2))
-               smp_num_siblings = smtc_build_cpu_map(0);
-}
-
-static void __init msp_smtc_prepare_cpus(unsigned int max_cpus)
-{
-       smtc_prepare_cpus(max_cpus);
-}
-
-struct plat_smp_ops msp_smtc_smp_ops = {
-       .send_ipi_single        = msp_smtc_send_ipi_single,
-       .send_ipi_mask          = msp_smtc_send_ipi_mask,
-       .init_secondary         = msp_smtc_init_secondary,
-       .smp_finish             = msp_smtc_smp_finish,
-       .cpus_done              = msp_smtc_cpus_done,
-       .boot_secondary         = msp_smtc_boot_secondary,
-       .smp_setup              = msp_smtc_smp_setup,
-       .prepare_cpus           = msp_smtc_prepare_cpus,
-};
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
deleted file mode 100644 (file)
index 8b42f30..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Setting up the clock on MSP SOCs.  No RTC typically.
- *
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- */
-
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-
-#include <asm/cevt-r4k.h>
-#include <asm/mipsregs.h>
-#include <asm/time.h>
-
-#include <msp_prom.h>
-#include <msp_int.h>
-#include <msp_regs.h>
-
-#define get_current_vpe()   \
-       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
-
-static struct irqaction timer_vpe1;
-static int tim_installed;
-
-void __init plat_time_init(void)
-{
-       char    *endp, *s;
-       unsigned long cpu_rate = 0;
-
-       if (cpu_rate == 0) {
-               s = prom_getenv("clkfreqhz");
-               cpu_rate = simple_strtoul(s, &endp, 10);
-               if (endp != NULL && *endp != 0) {
-                       printk(KERN_ERR
-                               "Clock rate in Hz parse error: %s\n", s);
-                       cpu_rate = 0;
-               }
-       }
-
-       if (cpu_rate == 0) {
-               s = prom_getenv("clkfreq");
-               cpu_rate = 1000 * simple_strtoul(s, &endp, 10);
-               if (endp != NULL && *endp != 0) {
-                       printk(KERN_ERR
-                               "Clock rate in MHz parse error: %s\n", s);
-                       cpu_rate = 0;
-               }
-       }
-
-       if (cpu_rate == 0) {
-#if defined(CONFIG_PMC_MSP7120_EVAL) \
- || defined(CONFIG_PMC_MSP7120_GW)
-               cpu_rate = 400000000;
-#elif defined(CONFIG_PMC_MSP7120_FPGA)
-               cpu_rate = 25000000;
-#else
-               cpu_rate = 150000000;
-#endif
-               printk(KERN_ERR
-                       "Failed to determine CPU clock rate, "
-                       "assuming %ld hz ...\n", cpu_rate);
-       }
-
-       printk(KERN_WARNING "Clock rate set to %ld\n", cpu_rate);
-
-       /* timer frequency is 1/2 clock rate */
-       mips_hpt_frequency = cpu_rate/2;
-}
-
-unsigned int __cpuinit get_c0_compare_int(void)
-{
-       /* MIPS_MT modes may want timer for second VPE */
-       if ((get_current_vpe()) && !tim_installed) {
-               memcpy(&timer_vpe1, &c0_compare_irqaction, sizeof(timer_vpe1));
-               setup_irq(MSP_INT_VPE1_TIMER, &timer_vpe1);
-               tim_installed++;
-       }
-
-       return get_current_vpe() ? MSP_INT_VPE1_TIMER : MSP_INT_VPE0_TIMER;
-}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_usb.c b/arch/mips/pmc-sierra/msp71xx/msp_usb.c
deleted file mode 100644 (file)
index 9a1aef8..0000000
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * The setup file for USB related hardware on PMC-Sierra MSP processors.
- *
- * Copyright 2006 PMC-Sierra, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET)
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-
-#include <asm/mipsregs.h>
-
-#include <msp_regs.h>
-#include <msp_int.h>
-#include <msp_prom.h>
-#include <msp_usb.h>
-
-
-#if defined(CONFIG_USB_EHCI_HCD)
-static struct resource msp_usbhost0_resources[] = {
-       [0] = { /* EHCI-HS operational and capabilities registers */
-               .start  = MSP_USB0_HS_START,
-               .end    = MSP_USB0_HS_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_USB,
-               .end    = MSP_INT_USB,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = { /* MSBus-to-AMBA bridge register space */
-               .start  = MSP_USB0_MAB_START,
-               .end    = MSP_USB0_MAB_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [3] = { /* Identification and general hardware parameters */
-               .start  = MSP_USB0_ID_START,
-               .end    = MSP_USB0_ID_END,
-               .flags  = IORESOURCE_MEM,
-       },
-};
-
-static u64 msp_usbhost0_dma_mask = 0xffffffffUL;
-
-static struct mspusb_device msp_usbhost0_device = {
-       .dev    = {
-               .name   = "pmcmsp-ehci",
-               .id     = 0,
-               .dev    = {
-                       .dma_mask = &msp_usbhost0_dma_mask,
-                       .coherent_dma_mask = 0xffffffffUL,
-               },
-               .num_resources  = ARRAY_SIZE(msp_usbhost0_resources),
-               .resource       = msp_usbhost0_resources,
-       },
-};
-
-/* MSP7140/MSP82XX has two USB2 hosts. */
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-static u64 msp_usbhost1_dma_mask = 0xffffffffUL;
-
-static struct resource msp_usbhost1_resources[] = {
-       [0] = { /* EHCI-HS operational and capabilities registers */
-               .start  = MSP_USB1_HS_START,
-               .end    = MSP_USB1_HS_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_USB,
-               .end    = MSP_INT_USB,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = { /* MSBus-to-AMBA bridge register space */
-               .start  = MSP_USB1_MAB_START,
-               .end    = MSP_USB1_MAB_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [3] = { /* Identification and general hardware parameters */
-               .start  = MSP_USB1_ID_START,
-               .end    = MSP_USB1_ID_END,
-               .flags  = IORESOURCE_MEM,
-       },
-};
-
-static struct mspusb_device msp_usbhost1_device = {
-       .dev    = {
-               .name   = "pmcmsp-ehci",
-               .id     = 1,
-               .dev    = {
-                       .dma_mask = &msp_usbhost1_dma_mask,
-                       .coherent_dma_mask = 0xffffffffUL,
-               },
-               .num_resources  = ARRAY_SIZE(msp_usbhost1_resources),
-               .resource       = msp_usbhost1_resources,
-       },
-};
-#endif /* CONFIG_MSP_HAS_DUAL_USB */
-#endif /* CONFIG_USB_EHCI_HCD */
-
-#if defined(CONFIG_USB_GADGET)
-static struct resource msp_usbdev0_resources[] = {
-       [0] = { /* EHCI-HS operational and capabilities registers */
-               .start  = MSP_USB0_HS_START,
-               .end    = MSP_USB0_HS_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_USB,
-               .end    = MSP_INT_USB,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = { /* MSBus-to-AMBA bridge register space */
-               .start  = MSP_USB0_MAB_START,
-               .end    = MSP_USB0_MAB_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [3] = { /* Identification and general hardware parameters */
-               .start  = MSP_USB0_ID_START,
-               .end    = MSP_USB0_ID_END,
-               .flags  = IORESOURCE_MEM,
-       },
-};
-
-static u64 msp_usbdev_dma_mask = 0xffffffffUL;
-
-/* This may need to be converted to a mspusb_device, too. */
-static struct mspusb_device msp_usbdev0_device = {
-       .dev    = {
-               .name   = "msp71xx_udc",
-               .id     = 0,
-               .dev    = {
-                       .dma_mask = &msp_usbdev_dma_mask,
-                       .coherent_dma_mask = 0xffffffffUL,
-               },
-               .num_resources  = ARRAY_SIZE(msp_usbdev0_resources),
-               .resource       = msp_usbdev0_resources,
-       },
-};
-
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-static struct resource msp_usbdev1_resources[] = {
-       [0] = { /* EHCI-HS operational and capabilities registers */
-               .start  = MSP_USB1_HS_START,
-               .end    = MSP_USB1_HS_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = MSP_INT_USB,
-               .end    = MSP_INT_USB,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = { /* MSBus-to-AMBA bridge register space */
-               .start  = MSP_USB1_MAB_START,
-               .end    = MSP_USB1_MAB_END,
-               .flags  = IORESOURCE_MEM,
-       },
-       [3] = { /* Identification and general hardware parameters */
-               .start  = MSP_USB1_ID_START,
-               .end    = MSP_USB1_ID_END,
-               .flags  = IORESOURCE_MEM,
-       },
-};
-
-/* This may need to be converted to a mspusb_device, too. */
-static struct mspusb_device msp_usbdev1_device = {
-       .dev    = {
-               .name   = "msp71xx_udc",
-               .id     = 0,
-               .dev    = {
-                       .dma_mask = &msp_usbdev_dma_mask,
-                       .coherent_dma_mask = 0xffffffffUL,
-               },
-               .num_resources  = ARRAY_SIZE(msp_usbdev1_resources),
-               .resource       = msp_usbdev1_resources,
-       },
-};
-
-#endif /* CONFIG_MSP_HAS_DUAL_USB */
-#endif /* CONFIG_USB_GADGET */
-
-static int __init msp_usb_setup(void)
-{
-       char            *strp;
-       char            envstr[32];
-       struct platform_device *msp_devs[NUM_USB_DEVS];
-       unsigned int val;
-
-       /* construct environment name usbmode */
-       /* set usbmode <host/device> as pmon environment var */
-       /*
-        * Could this perhaps be integrated into the "features" env var?
-        * Use the features key "U", and follow with "H" for host-mode,
-        * "D" for device-mode.  If it works for Ethernet, why not USB...
-        *  -- hammtrev, 2007/03/22
-        */
-       snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
-
-       /* set default host mode */
-       val = 1;
-
-       /* get environment string */
-       strp = prom_getenv((char *)&envstr[0]);
-       if (strp) {
-               /* compare string */
-               if (!strcmp(strp, "device"))
-                       val = 0;
-       }
-
-       if (val) {
-#if defined(CONFIG_USB_EHCI_HCD)
-               msp_devs[0] = &msp_usbhost0_device.dev;
-               ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name);
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-               msp_devs[1] = &msp_usbhost1_device.dev;
-               ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name);
-#endif
-#else
-               ppfinit("%s: echi_hcd not supported\n", __FILE__);
-#endif  /* CONFIG_USB_EHCI_HCD */
-       } else {
-#if defined(CONFIG_USB_GADGET)
-               /* get device mode structure */
-               msp_devs[0] = &msp_usbdev0_device.dev;
-               ppfinit("platform add USB DEVICE done %s.\n"
-                                       , msp_devs[0]->name);
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-               msp_devs[1] = &msp_usbdev1_device.dev;
-               ppfinit("platform add USB DEVICE done %s.\n"
-                                       , msp_devs[1]->name);
-#endif
-#else
-               ppfinit("%s: usb_gadget not supported\n", __FILE__);
-#endif  /* CONFIG_USB_GADGET */
-       }
-       /* add device */
-       platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs));
-
-       return 0;
-}
-
-subsys_initcall(msp_usb_setup);
-#endif /* CONFIG_USB_EHCI_HCD || CONFIG_USB_GADGET */
diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig
new file mode 100644 (file)
index 0000000..3482b8c
--- /dev/null
@@ -0,0 +1,48 @@
+choice
+       prompt "PMC-Sierra MSP SOC type"
+       depends on PMC_MSP
+
+config PMC_MSP4200_EVAL
+       bool "PMC-Sierra MSP4200 Eval Board"
+       select IRQ_MSP_SLP
+       select HW_HAS_PCI
+
+config PMC_MSP4200_GW
+       bool "PMC-Sierra MSP4200 VoIP Gateway"
+       select IRQ_MSP_SLP
+       select HW_HAS_PCI
+
+config PMC_MSP7120_EVAL
+       bool "PMC-Sierra MSP7120 Eval Board"
+       select SYS_SUPPORTS_MULTITHREADING
+       select IRQ_MSP_CIC
+       select HW_HAS_PCI
+
+config PMC_MSP7120_GW
+       bool "PMC-Sierra MSP7120 Residential Gateway"
+       select SYS_SUPPORTS_MULTITHREADING
+       select IRQ_MSP_CIC
+       select HW_HAS_PCI
+       select MSP_HAS_USB
+       select MSP_ETH
+
+config PMC_MSP7120_FPGA
+       bool "PMC-Sierra MSP7120 FPGA"
+       select SYS_SUPPORTS_MULTITHREADING
+       select IRQ_MSP_CIC
+       select HW_HAS_PCI
+
+endchoice
+
+config MSP_HAS_USB
+       boolean
+       depends on PMC_MSP
+
+config MSP_ETH
+       boolean
+       select MSP_HAS_MAC
+       depends on PMC_MSP
+
+config MSP_HAS_MAC
+       boolean
+       depends on PMC_MSP
diff --git a/arch/mips/pmcs-msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile
new file mode 100644 (file)
index 0000000..cefba77
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for the PMC-Sierra MSP SOCs
+#
+obj-y += msp_prom.o msp_setup.o msp_irq.o \
+        msp_time.o msp_serial.o msp_elb.o
+obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o
+obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o
+obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o
+obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o
+obj-$(CONFIG_PCI) += msp_pci.o
+obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o
+obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o
+obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o
+obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
diff --git a/arch/mips/pmcs-msp71xx/Platform b/arch/mips/pmcs-msp71xx/Platform
new file mode 100644 (file)
index 0000000..7af0734
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# PMC-Sierra MSP SOCs
+#
+platform-$(CONFIG_PMC_MSP)     += pmcs-msp71xx/
+cflags-$(CONFIG_PMC_MSP)       += -I$(srctree)/arch/mips/include/asm/mach-pmcs-msp71xx \
+                                       -mno-branch-likely
+load-$(CONFIG_PMC_MSP)         += 0xffffffff80100000
diff --git a/arch/mips/pmcs-msp71xx/gpio.c b/arch/mips/pmcs-msp71xx/gpio.c
new file mode 100644 (file)
index 0000000..aaccbe5
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * Generic PMC MSP71xx GPIO handling. These base gpio are controlled by two
+ * types of registers. The data register sets the output level when in output
+ * mode and when in input mode will contain the value at the input. The config
+ * register sets the various modes for each gpio.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * @author Patrick Glass <patrickglass@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+
+#define MSP71XX_CFG_OFFSET(gpio)       (4 * (gpio))
+#define CONF_MASK                      0x0F
+#define MSP71XX_GPIO_INPUT             0x01
+#define MSP71XX_GPIO_OUTPUT            0x08
+
+#define MSP71XX_GPIO_BASE              0x0B8400000L
+
+#define to_msp71xx_gpio_chip(c) container_of(c, struct msp71xx_gpio_chip, chip)
+
+static spinlock_t gpio_lock;
+
+/*
+ * struct msp71xx_gpio_chip - container for gpio chip and registers
+ * @chip: chip structure for the specified gpio bank
+ * @data_reg: register for reading and writing the gpio pin value
+ * @config_reg: register to set the mode for the gpio pin bank
+ * @out_drive_reg: register to set the output drive mode for the gpio pin bank
+ */
+struct msp71xx_gpio_chip {
+       struct gpio_chip chip;
+       void __iomem *data_reg;
+       void __iomem *config_reg;
+       void __iomem *out_drive_reg;
+};
+
+/*
+ * msp71xx_gpio_get() - return the chip's gpio value
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose value will be returned
+ *
+ * It will return 0 if gpio value is low and other if high.
+ */
+static int msp71xx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
+
+       return __raw_readl(msp_chip->data_reg) & (1 << offset);
+}
+
+/*
+ * msp71xx_gpio_set() - set the output value for the gpio
+ * @chip: chip structure who controls the specified gpio
+ * @offset: gpio whose value will be assigned
+ * @value: logic level to assign to the gpio initially
+ *
+ * This will set the gpio bit specified to the desired value. It will set the
+ * gpio pin low if value is 0 otherwise it will be high.
+ */
+static void msp71xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
+       unsigned long flags;
+       u32 data;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       data = __raw_readl(msp_chip->data_reg);
+       if (value)
+               data |= (1 << offset);
+       else
+               data &= ~(1 << offset);
+       __raw_writel(data, msp_chip->data_reg);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+}
+
+/*
+ * msp71xx_set_gpio_mode() - declare the mode for a gpio
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose value will be assigned
+ * @mode: desired configuration for the gpio (see datasheet)
+ *
+ * It will set the gpio pin config to the @mode value passed in.
+ */
+static int msp71xx_set_gpio_mode(struct gpio_chip *chip,
+                                unsigned offset, int mode)
+{
+       struct msp71xx_gpio_chip *msp_chip = to_msp71xx_gpio_chip(chip);
+       const unsigned bit_offset = MSP71XX_CFG_OFFSET(offset);
+       unsigned long flags;
+       u32 cfg;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       cfg = __raw_readl(msp_chip->config_reg);
+       cfg &= ~(CONF_MASK << bit_offset);
+       cfg |= (mode << bit_offset);
+       __raw_writel(cfg, msp_chip->config_reg);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return 0;
+}
+
+/*
+ * msp71xx_direction_output() - declare the direction mode for a gpio
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose value will be assigned
+ * @value: logic level to assign to the gpio initially
+ *
+ * This call will set the mode for the @gpio to output. It will set the
+ * gpio pin low if value is 0 otherwise it will be high.
+ */
+static int msp71xx_direction_output(struct gpio_chip *chip,
+                                   unsigned offset, int value)
+{
+       msp71xx_gpio_set(chip, offset, value);
+
+       return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_OUTPUT);
+}
+
+/*
+ * msp71xx_direction_input() - declare the direction mode for a gpio
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose to which the value will be assigned
+ *
+ * This call will set the mode for the @gpio to input.
+ */
+static int msp71xx_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return msp71xx_set_gpio_mode(chip, offset, MSP71XX_GPIO_INPUT);
+}
+
+/*
+ * msp71xx_set_output_drive() - declare the output drive for the gpio line
+ * @gpio: gpio pin whose output drive you wish to modify
+ * @value: zero for active drain 1 for open drain drive
+ *
+ * This call will set the output drive mode for the @gpio to output.
+ */
+int msp71xx_set_output_drive(unsigned gpio, int value)
+{
+       unsigned long flags;
+       u32 data;
+
+       if (gpio > 15 || gpio < 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       data = __raw_readl((void __iomem *)(MSP71XX_GPIO_BASE + 0x190));
+       if (value)
+               data |= (1 << gpio);
+       else
+               data &= ~(1 << gpio);
+       __raw_writel(data, (void __iomem *)(MSP71XX_GPIO_BASE + 0x190));
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(msp71xx_set_output_drive);
+
+#define MSP71XX_GPIO_BANK(name, dr, cr, base_gpio, num_gpio) \
+{ \
+       .chip = { \
+               .label            = name, \
+               .direction_input  = msp71xx_direction_input, \
+               .direction_output = msp71xx_direction_output, \
+               .get              = msp71xx_gpio_get, \
+               .set              = msp71xx_gpio_set, \
+               .base             = base_gpio, \
+               .ngpio            = num_gpio \
+       }, \
+       .data_reg       = (void __iomem *)(MSP71XX_GPIO_BASE + dr), \
+       .config_reg     = (void __iomem *)(MSP71XX_GPIO_BASE + cr), \
+       .out_drive_reg  = (void __iomem *)(MSP71XX_GPIO_BASE + 0x190), \
+}
+
+/*
+ * struct msp71xx_gpio_banks[] - container array of gpio banks
+ * @chip: chip structure for the specified gpio bank
+ * @data_reg: register for reading and writing the gpio pin value
+ * @config_reg: register to set the mode for the gpio pin bank
+ *
+ * This array structure defines the gpio banks for the PMC MIPS Processor.
+ * We specify the bank name, the data register, the config register, base
+ * starting gpio number, and the number of gpios exposed by the bank.
+ */
+static struct msp71xx_gpio_chip msp71xx_gpio_banks[] = {
+
+       MSP71XX_GPIO_BANK("GPIO_1_0", 0x170, 0x180, 0, 2),
+       MSP71XX_GPIO_BANK("GPIO_5_2", 0x174, 0x184, 2, 4),
+       MSP71XX_GPIO_BANK("GPIO_9_6", 0x178, 0x188, 6, 4),
+       MSP71XX_GPIO_BANK("GPIO_15_10", 0x17C, 0x18C, 10, 6),
+};
+
+void __init msp71xx_init_gpio(void)
+{
+       int i;
+
+       spin_lock_init(&gpio_lock);
+
+       for (i = 0; i < ARRAY_SIZE(msp71xx_gpio_banks); i++)
+               gpiochip_add(&msp71xx_gpio_banks[i].chip);
+}
diff --git a/arch/mips/pmcs-msp71xx/gpio_extended.c b/arch/mips/pmcs-msp71xx/gpio_extended.c
new file mode 100644 (file)
index 0000000..2a99f36
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is
+ * a set of hardware registers that have no need for explicit locking as
+ * it is handled by unique method of writing individual set/clr bits.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * @author Patrick Glass <patrickglass@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#define MSP71XX_DATA_OFFSET(gpio)      (2 * (gpio))
+#define MSP71XX_READ_OFFSET(gpio)      (MSP71XX_DATA_OFFSET(gpio) + 1)
+#define MSP71XX_CFG_OUT_OFFSET(gpio)   (MSP71XX_DATA_OFFSET(gpio) + 16)
+#define MSP71XX_CFG_IN_OFFSET(gpio)    (MSP71XX_CFG_OUT_OFFSET(gpio) + 1)
+
+#define MSP71XX_EXD_GPIO_BASE  0x0BC000000L
+
+#define to_msp71xx_exd_gpio_chip(c) \
+                       container_of(c, struct msp71xx_exd_gpio_chip, chip)
+
+/*
+ * struct msp71xx_exd_gpio_chip - container for gpio chip and registers
+ * @chip: chip structure for the specified gpio bank
+ * @reg: register for control and data of gpio pin
+ */
+struct msp71xx_exd_gpio_chip {
+       struct gpio_chip chip;
+       void __iomem *reg;
+};
+
+/*
+ * msp71xx_exd_gpio_get() - return the chip's gpio value
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose value will be returned
+ *
+ * It will return 0 if gpio value is low and other if high.
+ */
+static int msp71xx_exd_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct msp71xx_exd_gpio_chip *msp71xx_chip =
+           to_msp71xx_exd_gpio_chip(chip);
+       const unsigned bit = MSP71XX_READ_OFFSET(offset);
+
+       return __raw_readl(msp71xx_chip->reg) & (1 << bit);
+}
+
+/*
+ * msp71xx_exd_gpio_set() - set the output value for the gpio
+ * @chip: chip structure who controls the specified gpio
+ * @offset: gpio whose value will be assigned
+ * @value: logic level to assign to the gpio initially
+ *
+ * This will set the gpio bit specified to the desired value. It will set the
+ * gpio pin low if value is 0 otherwise it will be high.
+ */
+static void msp71xx_exd_gpio_set(struct gpio_chip *chip,
+                                unsigned offset, int value)
+{
+       struct msp71xx_exd_gpio_chip *msp71xx_chip =
+           to_msp71xx_exd_gpio_chip(chip);
+       const unsigned bit = MSP71XX_DATA_OFFSET(offset);
+
+       __raw_writel(1 << (bit + (value ? 1 : 0)), msp71xx_chip->reg);
+}
+
+/*
+ * msp71xx_exd_direction_output() - declare the direction mode for a gpio
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose value will be assigned
+ * @value: logic level to assign to the gpio initially
+ *
+ * This call will set the mode for the @gpio to output. It will set the
+ * gpio pin low if value is 0 otherwise it will be high.
+ */
+static int msp71xx_exd_direction_output(struct gpio_chip *chip,
+                                       unsigned offset, int value)
+{
+       struct msp71xx_exd_gpio_chip *msp71xx_chip =
+           to_msp71xx_exd_gpio_chip(chip);
+
+       msp71xx_exd_gpio_set(chip, offset, value);
+       __raw_writel(1 << MSP71XX_CFG_OUT_OFFSET(offset), msp71xx_chip->reg);
+       return 0;
+}
+
+/*
+ * msp71xx_exd_direction_input() - declare the direction mode for a gpio
+ * @chip: chip structure which controls the specified gpio
+ * @offset: gpio whose to which the value will be assigned
+ *
+ * This call will set the mode for the @gpio to input.
+ */
+static int msp71xx_exd_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct msp71xx_exd_gpio_chip *msp71xx_chip =
+           to_msp71xx_exd_gpio_chip(chip);
+
+       __raw_writel(1 << MSP71XX_CFG_IN_OFFSET(offset), msp71xx_chip->reg);
+       return 0;
+}
+
+#define MSP71XX_EXD_GPIO_BANK(name, exd_reg, base_gpio, num_gpio) \
+{ \
+       .chip = { \
+               .label            = name, \
+               .direction_input  = msp71xx_exd_direction_input, \
+               .direction_output = msp71xx_exd_direction_output, \
+               .get              = msp71xx_exd_gpio_get, \
+               .set              = msp71xx_exd_gpio_set, \
+               .base             = base_gpio, \
+               .ngpio            = num_gpio, \
+       }, \
+       .reg    = (void __iomem *)(MSP71XX_EXD_GPIO_BASE + exd_reg), \
+}
+
+/*
+ * struct msp71xx_exd_gpio_banks[] - container array of gpio banks
+ * @chip: chip structure for the specified gpio bank
+ * @reg: register for reading and writing the gpio pin value
+ *
+ * This array structure defines the extended gpio banks for the
+ * PMC MIPS Processor. We specify the bank name, the data/config
+ * register,the base starting gpio number, and the number of
+ * gpios exposed by the bank of gpios.
+ */
+static struct msp71xx_exd_gpio_chip msp71xx_exd_gpio_banks[] = {
+
+       MSP71XX_EXD_GPIO_BANK("GPIO_23_16", 0x188, 16, 8),
+       MSP71XX_EXD_GPIO_BANK("GPIO_27_24", 0x18C, 24, 4),
+};
+
+void __init msp71xx_init_gpio_extended(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(msp71xx_exd_gpio_banks); i++)
+               gpiochip_add(&msp71xx_exd_gpio_banks[i].chip);
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_elb.c b/arch/mips/pmcs-msp71xx/msp_elb.c
new file mode 100644 (file)
index 0000000..3e96410
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Sets up the proper Chip Select configuration registers.  It is assumed that
+ * PMON sets up the ADDR and MASK registers properly.
+ *
+ * Copyright 2005-2006 PMC-Sierra, Inc.
+ * Author: Marc St-Jean, Marc_St-Jean@pmc-sierra.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <msp_regs.h>
+
+static int __init msp_elb_setup(void)
+{
+#if defined(CONFIG_PMC_MSP7120_GW) \
+ || defined(CONFIG_PMC_MSP7120_EVAL)
+       /*
+        * Force all CNFG to be identical and equal to CS0,
+        * according to OPS doc
+        */
+       *CS1_CNFG_REG = *CS2_CNFG_REG = *CS3_CNFG_REG = *CS0_CNFG_REG;
+#endif
+       return 0;
+}
+
+subsys_initcall(msp_elb_setup);
diff --git a/arch/mips/pmcs-msp71xx/msp_eth.c b/arch/mips/pmcs-msp71xx/msp_eth.c
new file mode 100644 (file)
index 0000000..c584df3
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * The setup file for ethernet related hardware on PMC-Sierra MSP processors.
+ *
+ * Copyright 2010 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <msp_regs.h>
+#include <msp_int.h>
+#include <msp_gpio_macros.h>
+
+
+#define MSP_ETHERNET_GPIO0     14
+#define MSP_ETHERNET_GPIO1     15
+#define MSP_ETHERNET_GPIO2     16
+
+#ifdef CONFIG_MSP_HAS_TSMAC
+#define MSP_TSMAC_SIZE 0x10020
+#define MSP_TSMAC_ID   "pmc_tsmac"
+
+static struct resource msp_tsmac0_resources[] = {
+       [0] = {
+               .start  = MSP_MAC0_BASE,
+               .end    = MSP_MAC0_BASE + MSP_TSMAC_SIZE - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_MAC0,
+               .end    = MSP_INT_MAC0,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct resource msp_tsmac1_resources[] = {
+       [0] = {
+               .start  = MSP_MAC1_BASE,
+               .end    = MSP_MAC1_BASE + MSP_TSMAC_SIZE - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_MAC1,
+               .end    = MSP_INT_MAC1,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+static struct resource msp_tsmac2_resources[] = {
+       [0] = {
+               .start  = MSP_MAC2_BASE,
+               .end    = MSP_MAC2_BASE + MSP_TSMAC_SIZE - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_SAR,
+               .end    = MSP_INT_SAR,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+
+static struct platform_device tsmac_device[] = {
+       [0] = {
+               .name   = MSP_TSMAC_ID,
+               .id     = 0,
+               .num_resources = ARRAY_SIZE(msp_tsmac0_resources),
+               .resource = msp_tsmac0_resources,
+       },
+       [1] = {
+               .name   = MSP_TSMAC_ID,
+               .id     = 1,
+               .num_resources = ARRAY_SIZE(msp_tsmac1_resources),
+               .resource = msp_tsmac1_resources,
+       },
+       [2] = {
+               .name   = MSP_TSMAC_ID,
+               .id     = 2,
+               .num_resources = ARRAY_SIZE(msp_tsmac2_resources),
+               .resource = msp_tsmac2_resources,
+       },
+};
+#define msp_eth_devs   tsmac_device
+
+#else
+/* If it is not TSMAC assume MSP_ETH (100Mbps) */
+#define MSP_ETH_ID     "pmc_mspeth"
+#define MSP_ETH_SIZE   0xE0
+static struct resource msp_eth0_resources[] = {
+       [0] = {
+               .start  = MSP_MAC0_BASE,
+               .end    = MSP_MAC0_BASE + MSP_ETH_SIZE - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_MAC0,
+               .end    = MSP_INT_MAC0,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct resource msp_eth1_resources[] = {
+       [0] = {
+               .start  = MSP_MAC1_BASE,
+               .end    = MSP_MAC1_BASE + MSP_ETH_SIZE - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_MAC1,
+               .end    = MSP_INT_MAC1,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+
+
+static struct platform_device mspeth_device[] = {
+       [0] = {
+               .name   = MSP_ETH_ID,
+               .id     = 0,
+               .num_resources = ARRAY_SIZE(msp_eth0_resources),
+               .resource = msp_eth0_resources,
+       },
+       [1] = {
+               .name   = MSP_ETH_ID,
+               .id     = 1,
+               .num_resources = ARRAY_SIZE(msp_eth1_resources),
+               .resource = msp_eth1_resources,
+       },
+
+};
+#define msp_eth_devs   mspeth_device
+
+#endif
+int __init msp_eth_setup(void)
+{
+       int i, ret = 0;
+
+       /* Configure the GPIO and take the ethernet PHY out of reset */
+       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO0);
+       msp_gpio_pin_hi(MSP_ETHERNET_GPIO0);
+
+#ifdef CONFIG_MSP_HAS_TSMAC
+       /* 3 phys on boards with TSMAC */
+       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO1);
+       msp_gpio_pin_hi(MSP_ETHERNET_GPIO1);
+
+       msp_gpio_pin_mode(MSP_GPIO_OUTPUT, MSP_ETHERNET_GPIO2);
+       msp_gpio_pin_hi(MSP_ETHERNET_GPIO2);
+#endif
+       for (i = 0; i < ARRAY_SIZE(msp_eth_devs); i++) {
+               ret = platform_device_register(&msp_eth_devs[i]);
+               printk(KERN_INFO "device: %d, return value = %d\n", i, ret);
+               if (ret) {
+                       platform_device_unregister(&msp_eth_devs[i]);
+                       break;
+               }
+       }
+
+       if (ret)
+               printk(KERN_WARNING "Could not initialize "
+                                               "MSPETH device structures.\n");
+
+       return ret;
+}
+subsys_initcall(msp_eth_setup);
diff --git a/arch/mips/pmcs-msp71xx/msp_hwbutton.c b/arch/mips/pmcs-msp71xx/msp_hwbutton.c
new file mode 100644 (file)
index 0000000..bb57ed9
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Sets up interrupt handlers for various hardware switches which are
+ * connected to interrupt lines.
+ *
+ * Copyright 2005-2207 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <msp_int.h>
+#include <msp_regs.h>
+#include <msp_regops.h>
+
+/* For hwbutton_interrupt->initial_state */
+#define HWBUTTON_HI    0x1
+#define HWBUTTON_LO    0x2
+
+/*
+ * This struct describes a hardware button
+ */
+struct hwbutton_interrupt {
+       char *name;                     /* Name of button */
+       int irq;                        /* Actual LINUX IRQ */
+       int eirq;                       /* Extended IRQ number (0-7) */
+       int initial_state;              /* The "normal" state of the switch */
+       void (*handle_hi)(void *);      /* Handler: switch input has gone HI */
+       void (*handle_lo)(void *);      /* Handler: switch input has gone LO */
+       void *data;                     /* Optional data to pass to handler */
+};
+
+#ifdef CONFIG_PMC_MSP7120_GW
+extern void msp_restart(char *);
+
+static void softreset_push(void *data)
+{
+       printk(KERN_WARNING "SOFTRESET switch was pushed\n");
+
+       /*
+        * In the future you could move this to the release handler,
+        * timing the difference between the 'push' and 'release', and only
+        * doing this ungraceful restart if the button has been down for
+        * a certain amount of time; otherwise doing a graceful restart.
+        */
+
+       msp_restart(NULL);
+}
+
+static void softreset_release(void *data)
+{
+       printk(KERN_WARNING "SOFTRESET switch was released\n");
+
+       /* Do nothing */
+}
+
+static void standby_on(void *data)
+{
+       printk(KERN_WARNING "STANDBY switch was set to ON (not implemented)\n");
+
+       /* TODO: Put board in standby mode */
+}
+
+static void standby_off(void *data)
+{
+       printk(KERN_WARNING
+               "STANDBY switch was set to OFF (not implemented)\n");
+
+       /* TODO: Take out of standby mode */
+}
+
+static struct hwbutton_interrupt softreset_sw = {
+       .name = "Softreset button",
+       .irq = MSP_INT_EXT0,
+       .eirq = 0,
+       .initial_state = HWBUTTON_HI,
+       .handle_hi = softreset_release,
+       .handle_lo = softreset_push,
+       .data = NULL,
+};
+
+static struct hwbutton_interrupt standby_sw = {
+       .name = "Standby switch",
+       .irq = MSP_INT_EXT1,
+       .eirq = 1,
+       .initial_state = HWBUTTON_HI,
+       .handle_hi = standby_off,
+       .handle_lo = standby_on,
+       .data = NULL,
+};
+#endif /* CONFIG_PMC_MSP7120_GW */
+
+static irqreturn_t hwbutton_handler(int irq, void *data)
+{
+       struct hwbutton_interrupt *hirq = data;
+       unsigned long cic_ext = *CIC_EXT_CFG_REG;
+
+       if (CIC_EXT_IS_ACTIVE_HI(cic_ext, hirq->eirq)) {
+               /* Interrupt: pin is now HI */
+               CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq);
+               hirq->handle_hi(hirq->data);
+       } else {
+               /* Interrupt: pin is now LO */
+               CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq);
+               hirq->handle_lo(hirq->data);
+       }
+
+       /*
+        * Invert the POLARITY of this level interrupt to ack the interrupt
+        * Thus next state change will invoke the opposite message
+        */
+       *CIC_EXT_CFG_REG = cic_ext;
+
+       return IRQ_HANDLED;
+}
+
+static int msp_hwbutton_register(struct hwbutton_interrupt *hirq)
+{
+       unsigned long cic_ext;
+
+       if (hirq->handle_hi == NULL || hirq->handle_lo == NULL)
+               return -EINVAL;
+
+       cic_ext = *CIC_EXT_CFG_REG;
+       CIC_EXT_SET_TRIGGER_LEVEL(cic_ext, hirq->eirq);
+       if (hirq->initial_state == HWBUTTON_HI)
+               CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq);
+       else
+               CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq);
+       *CIC_EXT_CFG_REG = cic_ext;
+
+       return request_irq(hirq->irq, hwbutton_handler, 0,
+                          hirq->name, hirq);
+}
+
+static int __init msp_hwbutton_setup(void)
+{
+#ifdef CONFIG_PMC_MSP7120_GW
+       msp_hwbutton_register(&softreset_sw);
+       msp_hwbutton_register(&standby_sw);
+#endif
+       return 0;
+}
+
+subsys_initcall(msp_hwbutton_setup);
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
new file mode 100644 (file)
index 0000000..9da5619
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * IRQ vector handles
+ *
+ * Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/time.h>
+
+#include <asm/irq_cpu.h>
+
+#include <msp_int.h>
+
+/* SLP bases systems */
+extern void msp_slp_irq_init(void);
+extern void msp_slp_irq_dispatch(void);
+
+/* CIC based systems */
+extern void msp_cic_irq_init(void);
+extern void msp_cic_irq_dispatch(void);
+
+/* VSMP support init */
+extern void msp_vsmp_int_init(void);
+
+/* vectored interrupt implementation */
+
+/* SW0/1 interrupts are used for SMP/SMTC */
+static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
+static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
+static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
+static inline void usb_int_dispatch(void)  { do_IRQ(MSP_INT_USB);  }
+static inline void sec_int_dispatch(void)  { do_IRQ(MSP_INT_SEC);  }
+
+/*
+ * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
+ * hierarchical system.         The first level are the direct MIPS interrupts
+ * and are assigned the interrupt range 0-7.  The second level is the SLM
+ * interrupt controller and is assigned the range 8-39.         The third level
+ * comprises the Peripherial block, the PCI block, the PCI MSI block and
+ * the SLP.  The PCI interrupts and the SLP errors are handled by the
+ * relevant subsystems so the core interrupt code needs only concern
+ * itself with the Peripheral block.  These are assigned interrupts in
+ * the range 40-71.
+ */
+
+asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
+{
+       u32 pending;
+
+       pending = read_c0_status() & read_c0_cause();
+
+       /*
+        * jump to the correct interrupt routine
+        * These are arranged in priority order and the timer
+        * comes first!
+        */
+
+#ifdef CONFIG_IRQ_MSP_CIC      /* break out the CIC stuff for now */
+       if (pending & C_IRQ4)   /* do the peripherals first, that's the timer */
+               msp_cic_irq_dispatch();
+
+       else if (pending & C_IRQ0)
+               do_IRQ(MSP_INT_MAC0);
+
+       else if (pending & C_IRQ1)
+               do_IRQ(MSP_INT_MAC1);
+
+       else if (pending & C_IRQ2)
+               do_IRQ(MSP_INT_USB);
+
+       else if (pending & C_IRQ3)
+               do_IRQ(MSP_INT_SAR);
+
+       else if (pending & C_IRQ5)
+               do_IRQ(MSP_INT_SEC);
+
+#else
+       if (pending & C_IRQ5)
+               do_IRQ(MSP_INT_TIMER);
+
+       else if (pending & C_IRQ0)
+               do_IRQ(MSP_INT_MAC0);
+
+       else if (pending & C_IRQ1)
+               do_IRQ(MSP_INT_MAC1);
+
+       else if (pending & C_IRQ3)
+               do_IRQ(MSP_INT_VE);
+
+       else if (pending & C_IRQ4)
+               msp_slp_irq_dispatch();
+#endif
+
+       else if (pending & C_SW0)       /* do software after hardware */
+               do_IRQ(MSP_INT_SW0);
+
+       else if (pending & C_SW1)
+               do_IRQ(MSP_INT_SW1);
+}
+
+static struct irqaction cic_cascade_msp = {
+       .handler = no_action,
+       .name    = "MSP CIC cascade",
+       .flags   = IRQF_NO_THREAD,
+};
+
+static struct irqaction per_cascade_msp = {
+       .handler = no_action,
+       .name    = "MSP PER cascade",
+       .flags   = IRQF_NO_THREAD,
+};
+
+void __init arch_init_irq(void)
+{
+       /* assume we'll be using vectored interrupt mode except in UP mode*/
+#ifdef CONFIG_MIPS_MT
+       BUG_ON(!cpu_has_vint);
+#endif
+       /* initialize the 1st-level CPU based interrupt controller */
+       mips_cpu_irq_init();
+
+#ifdef CONFIG_IRQ_MSP_CIC
+       msp_cic_irq_init();
+#ifdef CONFIG_MIPS_MT
+       set_vi_handler(MSP_INT_CIC, msp_cic_irq_dispatch);
+       set_vi_handler(MSP_INT_MAC0, mac0_int_dispatch);
+       set_vi_handler(MSP_INT_MAC1, mac1_int_dispatch);
+       set_vi_handler(MSP_INT_SAR, mac2_int_dispatch);
+       set_vi_handler(MSP_INT_USB, usb_int_dispatch);
+       set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
+#ifdef CONFIG_MIPS_MT_SMP
+       msp_vsmp_int_init();
+#elif defined CONFIG_MIPS_MT_SMTC
+       /*Set hwmask for all platform devices */
+       irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
+       irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
+       irq_hwmask[MSP_INT_USB] = C_IRQ2;
+       irq_hwmask[MSP_INT_SAR] = C_IRQ3;
+       irq_hwmask[MSP_INT_SEC] = C_IRQ5;
+
+#endif /* CONFIG_MIPS_MT_SMP */
+#endif /* CONFIG_MIPS_MT */
+       /* setup the cascaded interrupts */
+       setup_irq(MSP_INT_CIC, &cic_cascade_msp);
+       setup_irq(MSP_INT_PER, &per_cascade_msp);
+
+#else
+       /* setup the 2nd-level SLP register based interrupt controller */
+       /* VSMP /SMTC support support is not enabled for SLP */
+       msp_slp_irq_init();
+
+       /* setup the cascaded SLP/PER interrupts */
+       setup_irq(MSP_INT_SLP, &cic_cascade_msp);
+       setup_irq(MSP_INT_PER, &per_cascade_msp);
+#endif
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
new file mode 100644 (file)
index 0000000..e49b499
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
+ *
+ * This file define the irq handler for MSP CIC subsystem interrupts.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+
+#include <asm/mipsregs.h>
+
+#include <msp_cic_int.h>
+#include <msp_regs.h>
+
+/*
+ * External API
+ */
+extern void msp_per_irq_init(void);
+extern void msp_per_irq_dispatch(void);
+
+
+/*
+ * Convenience Macro.  Should be somewhere generic.
+ */
+#define get_current_vpe()   \
+       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
+
+#ifdef CONFIG_SMP
+
+#define LOCK_VPE(flags, mtflags) \
+do {                           \
+       local_irq_save(flags);  \
+       mtflags = dmt();        \
+} while (0)
+
+#define UNLOCK_VPE(flags, mtflags) \
+do {                           \
+       emt(mtflags);           \
+       local_irq_restore(flags);\
+} while (0)
+
+#define LOCK_CORE(flags, mtflags) \
+do {                           \
+       local_irq_save(flags);  \
+       mtflags = dvpe();       \
+} while (0)
+
+#define UNLOCK_CORE(flags, mtflags)            \
+do {                           \
+       evpe(mtflags);          \
+       local_irq_restore(flags);\
+} while (0)
+
+#else
+
+#define LOCK_VPE(flags, mtflags)
+#define UNLOCK_VPE(flags, mtflags)
+#endif
+
+/* ensure writes to cic are completed */
+static inline void cic_wmb(void)
+{
+       const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
+       volatile u32 dummy_read;
+
+       wmb();
+       dummy_read = __raw_readl(cic_mem);
+       dummy_read++;
+}
+
+static void unmask_cic_irq(struct irq_data *d)
+{
+       volatile u32   *cic_msk_reg = CIC_VPE0_MSK_REG;
+       int vpe;
+#ifdef CONFIG_SMP
+       unsigned int mtflags;
+       unsigned long  flags;
+
+       /*
+       * Make sure we have IRQ affinity.  It may have changed while
+       * we were processing the IRQ.
+       */
+       if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
+               return;
+#endif
+
+       vpe = get_current_vpe();
+       LOCK_VPE(flags, mtflags);
+       cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
+       UNLOCK_VPE(flags, mtflags);
+       cic_wmb();
+}
+
+static void mask_cic_irq(struct irq_data *d)
+{
+       volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
+       int     vpe = get_current_vpe();
+#ifdef CONFIG_SMP
+       unsigned long flags, mtflags;
+#endif
+       LOCK_VPE(flags, mtflags);
+       cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
+       UNLOCK_VPE(flags, mtflags);
+       cic_wmb();
+}
+static void msp_cic_irq_ack(struct irq_data *d)
+{
+       mask_cic_irq(d);
+       /*
+       * Only really necessary for 18, 16-14 and sometimes 3:0
+       * (since these can be edge sensitive) but it doesn't
+       * hurt for the others
+       */
+       *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
+       smtc_im_ack_irq(d->irq);
+}
+
+/*Note: Limiting to VSMP . Not tested in SMTC */
+
+#ifdef CONFIG_MIPS_MT_SMP
+static int msp_cic_irq_set_affinity(struct irq_data *d,
+                                   const struct cpumask *cpumask, bool force)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned int  mtflags;
+       unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
+       volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
+
+       /* timer balancing should be disabled in kernel code */
+       BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
+
+       LOCK_CORE(flags, mtflags);
+       /* enable if any of each VPE's TCs require this IRQ */
+       for_each_online_cpu(cpu) {
+               if (cpumask_test_cpu(cpu, cpumask))
+                       cic_mask[cpu] |= imask;
+               else
+                       cic_mask[cpu] &= ~imask;
+
+       }
+
+       UNLOCK_CORE(flags, mtflags);
+       return 0;
+
+}
+#endif
+
+static struct irq_chip msp_cic_irq_controller = {
+       .name = "MSP_CIC",
+       .irq_mask = mask_cic_irq,
+       .irq_mask_ack = msp_cic_irq_ack,
+       .irq_unmask = unmask_cic_irq,
+       .irq_ack = msp_cic_irq_ack,
+#ifdef CONFIG_MIPS_MT_SMP
+       .irq_set_affinity = msp_cic_irq_set_affinity,
+#endif
+};
+
+void __init msp_cic_irq_init(void)
+{
+       int i;
+       /* Mask/clear interrupts. */
+       *CIC_VPE0_MSK_REG = 0x00000000;
+       *CIC_VPE1_MSK_REG = 0x00000000;
+       *CIC_STS_REG      = 0xFFFFFFFF;
+       /*
+       * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
+       * These inputs map to EXT_INT_POL[6:4] inside the CIC.
+       * They are to be active low, level sensitive.
+       */
+       *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
+
+       /* initialize all the IRQ descriptors */
+       for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
+               irq_set_chip_and_handler(i, &msp_cic_irq_controller,
+                                        handle_level_irq);
+#ifdef CONFIG_MIPS_MT_SMTC
+               /* Mask of CIC interrupt */
+               irq_hwmask[i] = C_IRQ4;
+#endif
+       }
+
+       /* Initialize the PER interrupt sub-system */
+        msp_per_irq_init();
+}
+
+/* CIC masked by CIC vector processing before dispatch called */
+void msp_cic_irq_dispatch(void)
+{
+       volatile u32    *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
+       u32     cic_mask;
+       u32      pending;
+       int     cic_status = *CIC_STS_REG;
+       cic_mask = cic_msk_reg[get_current_vpe()];
+       pending = cic_status & cic_mask;
+       if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
+               do_IRQ(MSP_INT_VPE0_TIMER);
+       } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
+               do_IRQ(MSP_INT_VPE1_TIMER);
+       } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
+               msp_per_irq_dispatch();
+       } else if (pending) {
+               do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
+       } else{
+               spurious_interrupt();
+       }
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_per.c b/arch/mips/pmcs-msp71xx/msp_irq_per.c
new file mode 100644 (file)
index 0000000..d1fd530
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
+ *
+ * This file define the irq handler for MSP PER subsystem interrupts.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/mipsregs.h>
+
+#include <msp_cic_int.h>
+#include <msp_regs.h>
+
+
+/*
+ * Convenience Macro.  Should be somewhere generic.
+ */
+#define get_current_vpe()      \
+       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
+
+#ifdef CONFIG_SMP
+/*
+ * The PER registers must be protected from concurrent access.
+ */
+
+static DEFINE_SPINLOCK(per_lock);
+#endif
+
+/* ensure writes to per are completed */
+
+static inline void per_wmb(void)
+{
+       const volatile void __iomem *per_mem = PER_INT_MSK_REG;
+       volatile u32 dummy_read;
+
+       wmb();
+       dummy_read = __raw_readl(per_mem);
+       dummy_read++;
+}
+
+static inline void unmask_per_irq(struct irq_data *d)
+{
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       spin_lock_irqsave(&per_lock, flags);
+       *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
+       spin_unlock_irqrestore(&per_lock, flags);
+#else
+       *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
+#endif
+       per_wmb();
+}
+
+static inline void mask_per_irq(struct irq_data *d)
+{
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       spin_lock_irqsave(&per_lock, flags);
+       *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
+       spin_unlock_irqrestore(&per_lock, flags);
+#else
+       *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
+#endif
+       per_wmb();
+}
+
+static inline void msp_per_irq_ack(struct irq_data *d)
+{
+       mask_per_irq(d);
+       /*
+        * In the PER interrupt controller, only bits 11 and 10
+        * are write-to-clear, (SPI TX complete, SPI RX complete).
+        * It does nothing for any others.
+        */
+       *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
+}
+
+#ifdef CONFIG_SMP
+static int msp_per_irq_set_affinity(struct irq_data *d,
+                                   const struct cpumask *affinity, bool force)
+{
+       /* WTF is this doing ????? */
+       unmask_per_irq(d);
+       return 0;
+}
+#endif
+
+static struct irq_chip msp_per_irq_controller = {
+       .name = "MSP_PER",
+       .irq_enable = unmask_per_irq,
+       .irq_disable = mask_per_irq,
+       .irq_ack = msp_per_irq_ack,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = msp_per_irq_set_affinity,
+#endif
+};
+
+void __init msp_per_irq_init(void)
+{
+       int i;
+       /* Mask/clear interrupts. */
+       *PER_INT_MSK_REG  = 0x00000000;
+       *PER_INT_STS_REG  = 0xFFFFFFFF;
+       /* initialize all the IRQ descriptors */
+       for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
+               irq_set_chip(i, &msp_per_irq_controller);
+#ifdef CONFIG_MIPS_MT_SMTC
+               irq_hwmask[i] = C_IRQ4;
+#endif
+       }
+}
+
+void msp_per_irq_dispatch(void)
+{
+       u32     per_mask = *PER_INT_MSK_REG;
+       u32     per_status = *PER_INT_STS_REG;
+       u32     pending;
+
+       pending = per_status & per_mask;
+       if (pending) {
+               do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
+       } else {
+               spurious_interrupt();
+       }
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_slp.c b/arch/mips/pmcs-msp71xx/msp_irq_slp.c
new file mode 100644 (file)
index 0000000..5f66a76
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * This file define the irq handler for MSP SLM subsystem interrupts.
+ *
+ * Copyright 2005-2006 PMC-Sierra, Inc, derived from irq_cpu.c
+ * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <asm/mipsregs.h>
+
+#include <msp_slp_int.h>
+#include <msp_regs.h>
+
+static inline void unmask_msp_slp_irq(struct irq_data *d)
+{
+       unsigned int irq = d->irq;
+
+       /* check for PER interrupt range */
+       if (irq < MSP_PER_INTBASE)
+               *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE));
+       else
+               *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
+}
+
+static inline void mask_msp_slp_irq(struct irq_data *d)
+{
+       unsigned int irq = d->irq;
+
+       /* check for PER interrupt range */
+       if (irq < MSP_PER_INTBASE)
+               *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE));
+       else
+               *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
+}
+
+/*
+ * While we ack the interrupt interrupts are disabled and thus we don't need
+ * to deal with concurrency issues.  Same for msp_slp_irq_end.
+ */
+static inline void ack_msp_slp_irq(struct irq_data *d)
+{
+       unsigned int irq = d->irq;
+
+       /* check for PER interrupt range */
+       if (irq < MSP_PER_INTBASE)
+               *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE));
+       else
+               *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
+}
+
+static struct irq_chip msp_slp_irq_controller = {
+       .name = "MSP_SLP",
+       .irq_ack = ack_msp_slp_irq,
+       .irq_mask = mask_msp_slp_irq,
+       .irq_unmask = unmask_msp_slp_irq,
+};
+
+void __init msp_slp_irq_init(void)
+{
+       int i;
+
+       /* Mask/clear interrupts. */
+       *SLP_INT_MSK_REG = 0x00000000;
+       *PER_INT_MSK_REG = 0x00000000;
+       *SLP_INT_STS_REG = 0xFFFFFFFF;
+       *PER_INT_STS_REG = 0xFFFFFFFF;
+
+       /* initialize all the IRQ descriptors */
+       for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++)
+               irq_set_chip_and_handler(i, &msp_slp_irq_controller,
+                                        handle_level_irq);
+}
+
+void msp_slp_irq_dispatch(void)
+{
+       u32 pending;
+       int intbase;
+
+       intbase = MSP_SLP_INTBASE;
+       pending = *SLP_INT_STS_REG & *SLP_INT_MSK_REG;
+
+       /* check for PER interrupt */
+       if (pending == (1 << (MSP_INT_PER - MSP_SLP_INTBASE))) {
+               intbase = MSP_PER_INTBASE;
+               pending = *PER_INT_STS_REG & *PER_INT_MSK_REG;
+       }
+
+       /* check for spurious interrupt */
+       if (pending == 0x00000000) {
+               printk(KERN_ERR "Spurious %s interrupt?\n",
+                       (intbase == MSP_SLP_INTBASE) ? "SLP" : "PER");
+               return;
+       }
+
+       /* dispatch the irq */
+       do_IRQ(ffs(pending) + intbase - 1);
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_pci.c b/arch/mips/pmcs-msp71xx/msp_pci.c
new file mode 100644 (file)
index 0000000..428dea2
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * The setup file for PCI related hardware on PMC-Sierra MSP processors.
+ *
+ * Copyright 2005-2006 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+
+#include <msp_prom.h>
+#include <msp_regs.h>
+
+extern void msp_pci_init(void);
+
+static int __init msp_pci_setup(void)
+{
+#if 0 /* Linux 2.6 initialization code to be completed */
+       if (getdeviceid() & DEV_ID_SINGLE_PC) {
+               /* If single card mode */
+               slmRegs *sreg = (slmRegs *) SREG_BASE;
+
+               sreg->single_pc_enable = SINGLE_PCCARD;
+       }
+#endif
+
+       msp_pci_init();
+
+       return 0;
+}
+
+subsys_initcall(msp_pci_setup);
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
new file mode 100644 (file)
index 0000000..0edb89a
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ *    PROM library initialisation code, assuming a version of
+ *    pmon is the boot code.
+ *
+ * Copyright 2000,2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ *             ppopov@mvista.com or source@mvista.com
+ *
+ * This file was derived from Carsten Langgaard's
+ * arch/mips/mips-boards/xx files.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm-generic/sections.h>
+#include <asm/page.h>
+
+#include <msp_prom.h>
+#include <msp_regs.h>
+
+/* global PROM environment variables and pointers */
+int prom_argc;
+char **prom_argv, **prom_envp;
+int *prom_vec;
+
+/* debug flag */
+int init_debug = 1;
+
+/* memory blocks */
+struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
+
+/* default feature sets */
+static char msp_default_features[] =
+#if defined(CONFIG_PMC_MSP4200_EVAL) \
+ || defined(CONFIG_PMC_MSP4200_GW)
+       "ERER";
+#elif defined(CONFIG_PMC_MSP7120_EVAL) \
+ || defined(CONFIG_PMC_MSP7120_GW)
+       "EMEMSP";
+#elif defined(CONFIG_PMC_MSP7120_FPGA)
+       "EMEM";
+#endif
+
+/* conversion functions */
+static inline unsigned char str2hexnum(unsigned char c)
+{
+       if (c >= '0' && c <= '9')
+               return c - '0';
+       if (c >= 'a' && c <= 'f')
+               return c - 'a' + 10;
+       return 0; /* foo */
+}
+
+static inline int str2eaddr(unsigned char *ea, unsigned char *str)
+{
+       int index = 0;
+       unsigned char num = 0;
+
+       while (*str != '\0') {
+               if ((*str == '.') || (*str == ':')) {
+                       ea[index++] = num;
+                       num = 0;
+                       str++;
+               } else {
+                       num = num << 4;
+                       num |= str2hexnum(*str++);
+               }
+       }
+
+       if (index == 5) {
+               ea[index++] = num;
+               return 0;
+       } else
+               return -1;
+}
+EXPORT_SYMBOL(str2eaddr);
+
+static inline unsigned long str2hex(unsigned char *str)
+{
+       int value = 0;
+
+       while (*str) {
+               value = value << 4;
+               value |= str2hexnum(*str++);
+       }
+
+       return value;
+}
+
+/* function to query the system information */
+const char *get_system_type(void)
+{
+#if defined(CONFIG_PMC_MSP4200_EVAL)
+       return "PMC-Sierra MSP4200 Eval Board";
+#elif defined(CONFIG_PMC_MSP4200_GW)
+       return "PMC-Sierra MSP4200 VoIP Gateway";
+#elif defined(CONFIG_PMC_MSP7120_EVAL)
+       return "PMC-Sierra MSP7120 Eval Board";
+#elif defined(CONFIG_PMC_MSP7120_GW)
+       return "PMC-Sierra MSP7120 Residential Gateway";
+#elif defined(CONFIG_PMC_MSP7120_FPGA)
+       return "PMC-Sierra MSP7120 FPGA";
+#else
+       #error "What is the type of *your* MSP?"
+#endif
+}
+
+int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr)
+{
+       char *ethaddr_str;
+
+       ethaddr_str = prom_getenv(ethaddr_name);
+       if (!ethaddr_str) {
+               printk(KERN_WARNING "%s not set in boot prom\n", ethaddr_name);
+               return -1;
+       }
+
+       if (str2eaddr(ethernet_addr, ethaddr_str) == -1) {
+               printk(KERN_WARNING "%s badly formatted-<%s>\n",
+                       ethaddr_name, ethaddr_str);
+               return -1;
+       }
+
+       if (init_debug > 1) {
+               int i;
+               printk(KERN_DEBUG "get_ethernet_addr: for %s ", ethaddr_name);
+               for (i = 0; i < 5; i++)
+                       printk(KERN_DEBUG "%02x:",
+                               (unsigned char)*(ethernet_addr+i));
+               printk(KERN_DEBUG "%02x\n", *(ethernet_addr+i));
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(get_ethernet_addr);
+
+static char *get_features(void)
+{
+       char *feature = prom_getenv(FEATURES);
+
+       if (feature == NULL) {
+               /* default features based on MACHINE_TYPE */
+               feature = msp_default_features;
+       }
+
+       return feature;
+}
+
+static char test_feature(char c)
+{
+       char *feature = get_features();
+
+       while (*feature) {
+               if (*feature++ == c)
+                       return *feature;
+               feature++;
+       }
+
+       return FEATURE_NOEXIST;
+}
+
+unsigned long get_deviceid(void)
+{
+       char *deviceid = prom_getenv(DEVICEID);
+
+       if (deviceid == NULL)
+               return *DEV_ID_REG;
+       else
+               return str2hex(deviceid);
+}
+
+char identify_pci(void)
+{
+       return test_feature(PCI_KEY);
+}
+EXPORT_SYMBOL(identify_pci);
+
+char identify_pcimux(void)
+{
+       return test_feature(PCIMUX_KEY);
+}
+
+char identify_sec(void)
+{
+       return test_feature(SEC_KEY);
+}
+EXPORT_SYMBOL(identify_sec);
+
+char identify_spad(void)
+{
+       return test_feature(SPAD_KEY);
+}
+EXPORT_SYMBOL(identify_spad);
+
+char identify_tdm(void)
+{
+       return test_feature(TDM_KEY);
+}
+EXPORT_SYMBOL(identify_tdm);
+
+char identify_zsp(void)
+{
+       return test_feature(ZSP_KEY);
+}
+EXPORT_SYMBOL(identify_zsp);
+
+static char identify_enetfeature(char key, unsigned long interface_num)
+{
+       char *feature = get_features();
+
+       while (*feature) {
+               if (*feature++ == key && interface_num-- == 0)
+                       return *feature;
+               feature++;
+       }
+
+       return FEATURE_NOEXIST;
+}
+
+char identify_enet(unsigned long interface_num)
+{
+       return identify_enetfeature(ENET_KEY, interface_num);
+}
+EXPORT_SYMBOL(identify_enet);
+
+char identify_enetTxD(unsigned long interface_num)
+{
+       return identify_enetfeature(ENETTXD_KEY, interface_num);
+}
+EXPORT_SYMBOL(identify_enetTxD);
+
+unsigned long identify_family(void)
+{
+       unsigned long deviceid;
+
+       deviceid = get_deviceid();
+
+       return deviceid & CPU_DEVID_FAMILY;
+}
+EXPORT_SYMBOL(identify_family);
+
+unsigned long identify_revision(void)
+{
+       unsigned long deviceid;
+
+       deviceid = get_deviceid();
+
+       return deviceid & CPU_DEVID_REVISION;
+}
+EXPORT_SYMBOL(identify_revision);
+
+/* PROM environment functions */
+char *prom_getenv(char *env_name)
+{
+       /*
+        * Return a pointer to the given environment variable.  prom_envp
+        * points to a null terminated array of pointers to variables.
+        * Environment variables are stored in the form of "memsize=64"
+        */
+
+       char **var = prom_envp;
+       int i = strlen(env_name);
+
+       while (*var) {
+               if (strncmp(env_name, *var, i) == 0) {
+                       return (*var + strlen(env_name) + 1);
+               }
+               var++;
+       }
+
+       return NULL;
+}
+
+/* PROM commandline functions */
+void  __init prom_init_cmdline(void)
+{
+       char *cp;
+       int actr;
+
+       actr = 1; /* Always ignore argv[0] */
+
+       cp = &(arcs_cmdline[0]);
+       while (actr < prom_argc) {
+               strcpy(cp, prom_argv[actr]);
+               cp += strlen(prom_argv[actr]);
+               *cp++ = ' ';
+               actr++;
+       }
+       if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
+               --cp;
+       *cp = '\0';
+}
+
+/* memory allocation functions */
+static int __init prom_memtype_classify(unsigned int type)
+{
+       switch (type) {
+       case yamon_free:
+               return BOOT_MEM_RAM;
+       case yamon_prom:
+               return BOOT_MEM_ROM_DATA;
+       default:
+               return BOOT_MEM_RESERVED;
+       }
+}
+
+void __init prom_meminit(void)
+{
+       struct prom_pmemblock *p;
+
+       p = prom_getmdesc();
+
+       while (p->size) {
+               long type;
+               unsigned long base, size;
+
+               type = prom_memtype_classify(p->type);
+               base = p->base;
+               size = p->size;
+
+               add_memory_region(base, size, type);
+               p++;
+       }
+}
+
+void __init prom_free_prom_memory(void)
+{
+       int     argc;
+       char    **argv;
+       char    **envp;
+       char    *ptr;
+       int     len = 0;
+       int     i;
+       unsigned long addr;
+
+       /*
+        * preserve environment variables and command line from pmon/bbload
+        * first preserve the command line
+        */
+       for (argc = 0; argc < prom_argc; argc++) {
+               len += sizeof(char *);                  /* length of pointer */
+               len += strlen(prom_argv[argc]) + 1;     /* length of string */
+       }
+       len += sizeof(char *);          /* plus length of null pointer */
+
+       argv = kmalloc(len, GFP_KERNEL);
+       ptr = (char *) &argv[prom_argc + 1];    /* strings follow array */
+
+       for (argc = 0; argc < prom_argc; argc++) {
+               argv[argc] = ptr;
+               strcpy(ptr, prom_argv[argc]);
+               ptr += strlen(prom_argv[argc]) + 1;
+       }
+       argv[prom_argc] = NULL;         /* end array with null pointer */
+       prom_argv = argv;
+
+       /* next preserve the environment variables */
+       len = 0;
+       i = 0;
+       for (envp = prom_envp; *envp != NULL; envp++) {
+               i++;            /* count number of environment variables */
+               len += sizeof(char *);          /* length of pointer */
+               len += strlen(*envp) + 1;       /* length of string */
+       }
+       len += sizeof(char *);          /* plus length of null pointer */
+
+       envp = kmalloc(len, GFP_KERNEL);
+       ptr = (char *) &envp[i+1];
+
+       for (argc = 0; argc < i; argc++) {
+               envp[argc] = ptr;
+               strcpy(ptr, prom_envp[argc]);
+               ptr += strlen(prom_envp[argc]) + 1;
+       }
+       envp[i] = NULL;                 /* end array with null pointer */
+       prom_envp = envp;
+
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
+                       continue;
+
+               addr = boot_mem_map.map[i].addr;
+               free_init_pages("prom memory",
+                               addr, addr + boot_mem_map.map[i].size);
+       }
+}
+
+struct prom_pmemblock *__init prom_getmdesc(void)
+{
+       static char     memsz_env[] __initdata = "memsize";
+       static char     heaptop_env[] __initdata = "heaptop";
+       char            *str;
+       unsigned int    memsize;
+       unsigned int    heaptop;
+       int i;
+
+       str = prom_getenv(memsz_env);
+       if (!str) {
+               ppfinit("memsize not set in boot prom, "
+                       "set to default (32Mb)\n");
+               memsize = 0x02000000;
+       } else {
+               memsize = simple_strtol(str, NULL, 0);
+
+               if (memsize == 0) {
+                       /* if memsize is a bad size, use reasonable default */
+                       memsize = 0x02000000;
+               }
+
+               /* convert to physical address (removing caching bits, etc) */
+               memsize = CPHYSADDR(memsize);
+       }
+
+       str = prom_getenv(heaptop_env);
+       if (!str) {
+               heaptop = CPHYSADDR((u32)&_text);
+               ppfinit("heaptop not set in boot prom, "
+                       "set to default 0x%08x\n", heaptop);
+       } else {
+               heaptop = simple_strtol(str, NULL, 16);
+               if (heaptop == 0) {
+                       /* heaptop conversion bad, might have 0xValue */
+                       heaptop = simple_strtol(str, NULL, 0);
+
+                       if (heaptop == 0) {
+                               /* heaptop still bad, use reasonable default */
+                               heaptop = CPHYSADDR((u32)&_text);
+                       }
+               }
+
+               /* convert to physical address (removing caching bits, etc) */
+               heaptop = CPHYSADDR((u32)heaptop);
+       }
+
+       /* the base region */
+       i = 0;
+       mdesc[i].type = BOOT_MEM_RESERVED;
+       mdesc[i].base = 0x00000000;
+       mdesc[i].size = PAGE_ALIGN(0x300 + 0x80);
+               /* jtag interrupt vector + sizeof vector */
+
+       /* PMON data */
+       if (heaptop > mdesc[i].base + mdesc[i].size) {
+               i++;                    /* 1 */
+               mdesc[i].type = BOOT_MEM_ROM_DATA;
+               mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size;
+               mdesc[i].size = heaptop - mdesc[i].base;
+       }
+
+       /* end of PMON data to start of kernel -- probably zero .. */
+       if (heaptop != CPHYSADDR((u32)_text)) {
+               i++;    /* 2 */
+               mdesc[i].type = BOOT_MEM_RAM;
+               mdesc[i].base = heaptop;
+               mdesc[i].size = CPHYSADDR((u32)_text) - mdesc[i].base;
+       }
+
+       /*  kernel proper */
+       i++;                    /* 3 */
+       mdesc[i].type = BOOT_MEM_RESERVED;
+       mdesc[i].base = CPHYSADDR((u32)_text);
+       mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base;
+
+       /* Remainder of RAM -- under memsize */
+       i++;                    /* 5 */
+       mdesc[i].type = yamon_free;
+       mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size;
+       mdesc[i].size = memsize - mdesc[i].base;
+
+       return &mdesc[0];
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_serial.c b/arch/mips/pmcs-msp71xx/msp_serial.c
new file mode 100644 (file)
index 0000000..d304be2
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * The setup file for serial related hardware on PMC-Sierra MSP processors.
+ *
+ * Copyright 2005 PMC-Sierra, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+
+#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/serial.h>
+#include <linux/serial_8250.h>
+
+#include <msp_prom.h>
+#include <msp_int.h>
+#include <msp_regs.h>
+
+struct msp_uart_data {
+       int     last_lcr;
+};
+
+static void msp_serial_out(struct uart_port *p, int offset, int value)
+{
+       struct msp_uart_data *d = p->private_data;
+
+       if (offset == UART_LCR)
+               d->last_lcr = value;
+
+       offset <<= p->regshift;
+       writeb(value, p->membase + offset);
+}
+
+static unsigned int msp_serial_in(struct uart_port *p, int offset)
+{
+       offset <<= p->regshift;
+
+       return readb(p->membase + offset);
+}
+
+static int msp_serial_handle_irq(struct uart_port *p)
+{
+       struct msp_uart_data *d = p->private_data;
+       unsigned int iir = readb(p->membase + (UART_IIR << p->regshift));
+
+       if (serial8250_handle_irq(p, iir)) {
+               return 1;
+       } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+               /*
+                * The DesignWare APB UART has an Busy Detect (0x07) interrupt
+                * meaning an LCR write attempt occurred while the UART was
+                * busy. The interrupt must be cleared by reading the UART
+                * status register (USR) and the LCR re-written.
+                *
+                * Note: MSP reserves 0x20 bytes of address space for the UART
+                * and the USR is mapped in a separate block at an offset of
+                * 0xc0 from the start of the UART.
+                */
+               (void)readb(p->membase + 0xc0);
+               writeb(d->last_lcr, p->membase + (UART_LCR << p->regshift));
+
+               return 1;
+       }
+
+       return 0;
+}
+
+void __init msp_serial_setup(void)
+{
+       char    *s;
+       char    *endp;
+       struct uart_port up;
+       unsigned int uartclk;
+
+       memset(&up, 0, sizeof(up));
+
+       /* Check if clock was specified in environment */
+       s = prom_getenv("uartfreqhz");
+       if(!(s && *s && (uartclk = simple_strtoul(s, &endp, 10)) && *endp == 0))
+               uartclk = MSP_BASE_BAUD;
+       ppfinit("UART clock set to %d\n", uartclk);
+
+       /* Initialize first serial port */
+       up.mapbase      = MSP_UART0_BASE;
+       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+       up.irq          = MSP_INT_UART0;
+       up.uartclk      = uartclk;
+       up.regshift     = 2;
+       up.iotype       = UPIO_MEM;
+       up.flags        = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+       up.type         = PORT_16550A;
+       up.line         = 0;
+       up.serial_out   = msp_serial_out;
+       up.serial_in    = msp_serial_in;
+       up.handle_irq   = msp_serial_handle_irq;
+       up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
+       if (!up.private_data) {
+               pr_err("failed to allocate uart private data\n");
+               return;
+       }
+       if (early_serial_setup(&up)) {
+               kfree(up.private_data);
+               pr_err("Early serial init of port 0 failed\n");
+       }
+
+       /* Initialize the second serial port, if one exists */
+       switch (mips_machtype) {
+               case MACH_MSP4200_EVAL:
+               case MACH_MSP4200_GW:
+               case MACH_MSP4200_FPGA:
+               case MACH_MSP7120_EVAL:
+               case MACH_MSP7120_GW:
+               case MACH_MSP7120_FPGA:
+                       /* Enable UART1 on MSP4200 and MSP7120 */
+                       *GPIO_CFG2_REG = 0x00002299;
+                       break;
+
+               default:
+                       return; /* No second serial port, good-bye. */
+       }
+
+       up.mapbase      = MSP_UART1_BASE;
+       up.membase      = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+       up.irq          = MSP_INT_UART1;
+       up.line         = 1;
+       up.private_data         = (void*)UART1_STATUS_REG;
+       if (early_serial_setup(&up)) {
+               kfree(up.private_data);
+               pr_err("Early serial init of port 1 failed\n");
+       }
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
new file mode 100644 (file)
index 0000000..1651cfd
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * The generic setup file for PMC-Sierra MSP processors
+ *
+ * Copyright 2005-2007 PMC-Sierra, Inc,
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/r4kcache.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+
+#include <msp_prom.h>
+#include <msp_regs.h>
+
+#if defined(CONFIG_PMC_MSP7120_GW)
+#include <msp_regops.h>
+#define MSP_BOARD_RESET_GPIO   9
+#endif
+
+extern void msp_serial_setup(void);
+extern void pmctwiled_setup(void);
+
+#if defined(CONFIG_PMC_MSP7120_EVAL) || \
+    defined(CONFIG_PMC_MSP7120_GW) || \
+    defined(CONFIG_PMC_MSP7120_FPGA)
+/*
+ * Performs the reset for MSP7120-based boards
+ */
+void msp7120_reset(void)
+{
+       void *start, *end, *iptr;
+       register int i;
+
+       /* Diasble all interrupts */
+       local_irq_disable();
+#ifdef CONFIG_SYS_SUPPORTS_MULTITHREADING
+       dvpe();
+#endif
+
+       /* Cache the reset code of this function */
+       __asm__ __volatile__ (
+               "       .set    push                            \n"
+               "       .set    mips3                           \n"
+               "       la      %0,startpoint                   \n"
+               "       la      %1,endpoint                     \n"
+               "       .set    pop                             \n"
+               : "=r" (start), "=r" (end)
+               :
+       );
+
+       for (iptr = (void *)((unsigned int)start & ~(L1_CACHE_BYTES - 1));
+            iptr < end; iptr += L1_CACHE_BYTES)
+               cache_op(Fill, iptr);
+
+       __asm__ __volatile__ (
+               "startpoint:                                    \n"
+       );
+
+       /* Put the DDRC into self-refresh mode */
+       DDRC_INDIRECT_WRITE(DDRC_CTL(10), 0xb, 1 << 16);
+
+       /*
+        * IMPORTANT!
+        * DO NOT do anything from here on out that might even
+        * think about fetching from RAM - i.e., don't call any
+        * non-inlined functions, and be VERY sure that any inline
+        * functions you do call do NOT access any sort of RAM
+        * anywhere!
+        */
+
+       /* Wait a bit for the DDRC to settle */
+       for (i = 0; i < 100000000; i++);
+
+#if defined(CONFIG_PMC_MSP7120_GW)
+       /*
+        * Set GPIO 9 HI, (tied to board reset logic)
+        * GPIO 9 is the 4th GPIO of register 3
+        *
+        * NOTE: We cannot use the higher-level msp_gpio_mode()/out()
+        * as GPIO char driver may not be enabled and it would look up
+        * data inRAM!
+        */
+       set_value_reg32(GPIO_CFG3_REG, 0xf000, 0x8000);
+       set_reg32(GPIO_DATA3_REG, 8);
+
+       /*
+        * In case GPIO9 doesn't reset the board (jumper configurable!)
+        * fallback to device reset below.
+        */
+#endif
+       /* Set bit 1 of the MSP7120 reset register */
+       *RST_SET_REG = 0x00000001;
+
+       __asm__ __volatile__ (
+               "endpoint:                                      \n"
+       );
+}
+#endif
+
+void msp_restart(char *command)
+{
+       printk(KERN_WARNING "Now rebooting .......\n");
+
+#if defined(CONFIG_PMC_MSP7120_EVAL) || \
+    defined(CONFIG_PMC_MSP7120_GW) || \
+    defined(CONFIG_PMC_MSP7120_FPGA)
+       msp7120_reset();
+#else
+       /* No chip-specific reset code, just jump to the ROM reset vector */
+       set_c0_status(ST0_BEV | ST0_ERL);
+       change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+       flush_cache_all();
+       write_c0_wired(0);
+
+       __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
+#endif
+}
+
+void msp_halt(void)
+{
+       printk(KERN_WARNING "\n** You can safely turn off the power\n");
+       while (1)
+               /* If possible call official function to get CPU WARs */
+               if (cpu_wait)
+                       (*cpu_wait)();
+               else
+                       __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
+}
+
+void msp_power_off(void)
+{
+       msp_halt();
+}
+
+void __init plat_mem_setup(void)
+{
+       _machine_restart = msp_restart;
+       _machine_halt = msp_halt;
+       pm_power_off = msp_power_off;
+}
+
+extern struct plat_smp_ops msp_smtc_smp_ops;
+
+void __init prom_init(void)
+{
+       unsigned long family;
+       unsigned long revision;
+
+       prom_argc = fw_arg0;
+       prom_argv = (char **)fw_arg1;
+       prom_envp = (char **)fw_arg2;
+
+       /*
+        * Someday we can use this with PMON2000 to get a
+        * platform call prom routines for output etc. without
+        * having to use grody hacks.  For now it's unused.
+        *
+        * struct callvectors *cv = (struct callvectors *) fw_arg3;
+        */
+       family = identify_family();
+       revision = identify_revision();
+
+       switch (family) {
+       case FAMILY_FPGA:
+               if (FPGA_IS_MSP4200(revision)) {
+                       /* Old-style revision ID */
+                       mips_machtype = MACH_MSP4200_FPGA;
+               } else {
+                       mips_machtype = MACH_MSP_OTHER;
+               }
+               break;
+
+       case FAMILY_MSP4200:
+#if defined(CONFIG_PMC_MSP4200_EVAL)
+               mips_machtype  = MACH_MSP4200_EVAL;
+#elif defined(CONFIG_PMC_MSP4200_GW)
+               mips_machtype  = MACH_MSP4200_GW;
+#else
+               mips_machtype = MACH_MSP_OTHER;
+#endif
+               break;
+
+       case FAMILY_MSP4200_FPGA:
+               mips_machtype  = MACH_MSP4200_FPGA;
+               break;
+
+       case FAMILY_MSP7100:
+#if defined(CONFIG_PMC_MSP7120_EVAL)
+               mips_machtype = MACH_MSP7120_EVAL;
+#elif defined(CONFIG_PMC_MSP7120_GW)
+               mips_machtype = MACH_MSP7120_GW;
+#else
+               mips_machtype = MACH_MSP_OTHER;
+#endif
+               break;
+
+       case FAMILY_MSP7100_FPGA:
+               mips_machtype  = MACH_MSP7120_FPGA;
+               break;
+
+       default:
+               /* we don't recognize the machine */
+               mips_machtype  = MACH_UNKNOWN;
+               panic("***Bogosity factor five***, exiting");
+               break;
+       }
+
+       prom_init_cmdline();
+
+       prom_meminit();
+
+       /*
+        * Sub-system setup follows.
+        * Setup functions can  either be called here or using the
+        * subsys_initcall mechanism (i.e. see msp_pci_setup). The
+        * order in which they are called can be changed by using the
+        * link order in arch/mips/pmc-sierra/msp71xx/Makefile.
+        *
+        * NOTE: Please keep sub-system specific initialization code
+        * in separate specific files.
+        */
+       msp_serial_setup();
+
+       if (register_vsmp_smp_ops()) {
+#ifdef CONFIG_MIPS_MT_SMTC
+               register_smp_ops(&msp_smtc_smp_ops);
+#endif
+       }
+
+#ifdef CONFIG_PMCTWILED
+       /*
+        * Setup LED states before the subsys_initcall loads other
+        * dependent drivers/modules.
+        */
+       pmctwiled_setup();
+#endif
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
new file mode 100644 (file)
index 0000000..1017058
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2010 PMC-Sierra, Inc.
+ *
+ *  VSMP support for MSP platforms . Derived from malta vsmp support.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ */
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define MIPS_CPU_IPI_RESCHED_IRQ 0     /* SW int 0 for resched */
+#define MIPS_CPU_IPI_CALL_IRQ 1                /* SW int 1 for call */
+
+
+static void ipi_resched_dispatch(void)
+{
+       do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ipi_call_dispatch(void)
+{
+       do_IRQ(MIPS_CPU_IPI_CALL_IRQ);
+}
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+       smp_call_function_interrupt();
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+       .handler        = ipi_resched_interrupt,
+       .flags          = IRQF_PERCPU,
+       .name           = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+       .handler        = ipi_call_interrupt,
+       .flags          = IRQF_PERCPU,
+       .name           = "IPI_call"
+};
+
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+       setup_irq(irq, action);
+       irq_set_handler(irq, handle_percpu_irq);
+}
+
+void __init msp_vsmp_int_init(void)
+{
+       set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+       set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+       arch_init_ipiirq(MIPS_CPU_IPI_RESCHED_IRQ, &irq_resched);
+       arch_init_ipiirq(MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+}
+#endif /* CONFIG_MIPS_MT_SMP */
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
new file mode 100644 (file)
index 0000000..c8dcc1c
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * MSP71xx Platform-specific hooks for SMP operation
+ */
+#include <linux/irq.h>
+#include <linux/init.h>
+
+#include <asm/mipsmtregs.h>
+#include <asm/mipsregs.h>
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+
+/* VPE/SMP Prototype implements platform interfaces directly */
+
+/*
+ * Cause the specified action to be performed on a targeted "CPU"
+ */
+
+static void msp_smtc_send_ipi_single(int cpu, unsigned int action)
+{
+       /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
+       smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
+}
+
+static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
+                                               unsigned int action)
+{
+       unsigned int i;
+
+       for_each_cpu(i, mask)
+               msp_smtc_send_ipi_single(i, action);
+}
+
+/*
+ * Post-config but pre-boot cleanup entry point
+ */
+static void __cpuinit msp_smtc_init_secondary(void)
+{
+       int myvpe;
+
+       /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
+       myvpe = read_c0_tcbind() & TCBIND_CURVPE;
+       if (myvpe > 0)
+               change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+                               STATUSF_IP6 | STATUSF_IP7);
+       smtc_init_secondary();
+}
+
+/*
+ * Platform "CPU" startup hook
+ */
+static void __cpuinit msp_smtc_boot_secondary(int cpu,
+                                       struct task_struct *idle)
+{
+       smtc_boot_secondary(cpu, idle);
+}
+
+/*
+ * SMP initialization finalization entry point
+ */
+static void __cpuinit msp_smtc_smp_finish(void)
+{
+       smtc_smp_finish();
+}
+
+/*
+ * Hook for after all CPUs are online
+ */
+
+static void msp_smtc_cpus_done(void)
+{
+}
+
+/*
+ * Platform SMP pre-initialization
+ *
+ * As noted above, we can assume a single CPU for now
+ * but it may be multithreaded.
+ */
+
+static void __init msp_smtc_smp_setup(void)
+{
+       /*
+        * we won't get the definitive value until
+        * we've run smtc_prepare_cpus later, but
+        */
+
+       if (read_c0_config3() & (1 << 2))
+               smp_num_siblings = smtc_build_cpu_map(0);
+}
+
+static void __init msp_smtc_prepare_cpus(unsigned int max_cpus)
+{
+       smtc_prepare_cpus(max_cpus);
+}
+
+struct plat_smp_ops msp_smtc_smp_ops = {
+       .send_ipi_single        = msp_smtc_send_ipi_single,
+       .send_ipi_mask          = msp_smtc_send_ipi_mask,
+       .init_secondary         = msp_smtc_init_secondary,
+       .smp_finish             = msp_smtc_smp_finish,
+       .cpus_done              = msp_smtc_cpus_done,
+       .boot_secondary         = msp_smtc_boot_secondary,
+       .smp_setup              = msp_smtc_smp_setup,
+       .prepare_cpus           = msp_smtc_prepare_cpus,
+};
diff --git a/arch/mips/pmcs-msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
new file mode 100644 (file)
index 0000000..8f12ecc
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Setting up the clock on MSP SOCs.  No RTC typically.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
+ *
+ * ########################################################################
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+
+#include <asm/cevt-r4k.h>
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+
+#include <msp_prom.h>
+#include <msp_int.h>
+#include <msp_regs.h>
+
+#define get_current_vpe()   \
+       ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
+
+static struct irqaction timer_vpe1;
+static int tim_installed;
+
+void __init plat_time_init(void)
+{
+       char    *endp, *s;
+       unsigned long cpu_rate = 0;
+
+       if (cpu_rate == 0) {
+               s = prom_getenv("clkfreqhz");
+               cpu_rate = simple_strtoul(s, &endp, 10);
+               if (endp != NULL && *endp != 0) {
+                       printk(KERN_ERR
+                               "Clock rate in Hz parse error: %s\n", s);
+                       cpu_rate = 0;
+               }
+       }
+
+       if (cpu_rate == 0) {
+               s = prom_getenv("clkfreq");
+               cpu_rate = 1000 * simple_strtoul(s, &endp, 10);
+               if (endp != NULL && *endp != 0) {
+                       printk(KERN_ERR
+                               "Clock rate in MHz parse error: %s\n", s);
+                       cpu_rate = 0;
+               }
+       }
+
+       if (cpu_rate == 0) {
+#if defined(CONFIG_PMC_MSP7120_EVAL) \
+ || defined(CONFIG_PMC_MSP7120_GW)
+               cpu_rate = 400000000;
+#elif defined(CONFIG_PMC_MSP7120_FPGA)
+               cpu_rate = 25000000;
+#else
+               cpu_rate = 150000000;
+#endif
+               printk(KERN_ERR
+                       "Failed to determine CPU clock rate, "
+                       "assuming %ld hz ...\n", cpu_rate);
+       }
+
+       printk(KERN_WARNING "Clock rate set to %ld\n", cpu_rate);
+
+       /* timer frequency is 1/2 clock rate */
+       mips_hpt_frequency = cpu_rate/2;
+}
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+       /* MIPS_MT modes may want timer for second VPE */
+       if ((get_current_vpe()) && !tim_installed) {
+               memcpy(&timer_vpe1, &c0_compare_irqaction, sizeof(timer_vpe1));
+               setup_irq(MSP_INT_VPE1_TIMER, &timer_vpe1);
+               tim_installed++;
+       }
+
+       return get_current_vpe() ? MSP_INT_VPE1_TIMER : MSP_INT_VPE0_TIMER;
+}
diff --git a/arch/mips/pmcs-msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c
new file mode 100644 (file)
index 0000000..4dab915
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * The setup file for USB related hardware on PMC-Sierra MSP processors.
+ *
+ * Copyright 2006 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_GADGET)
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/mipsregs.h>
+
+#include <msp_regs.h>
+#include <msp_int.h>
+#include <msp_prom.h>
+#include <msp_usb.h>
+
+
+#if defined(CONFIG_USB_EHCI_HCD)
+static struct resource msp_usbhost0_resources[] = {
+       [0] = { /* EHCI-HS operational and capabilities registers */
+               .start  = MSP_USB0_HS_START,
+               .end    = MSP_USB0_HS_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_USB,
+               .end    = MSP_INT_USB,
+               .flags  = IORESOURCE_IRQ,
+       },
+       [2] = { /* MSBus-to-AMBA bridge register space */
+               .start  = MSP_USB0_MAB_START,
+               .end    = MSP_USB0_MAB_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [3] = { /* Identification and general hardware parameters */
+               .start  = MSP_USB0_ID_START,
+               .end    = MSP_USB0_ID_END,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static u64 msp_usbhost0_dma_mask = 0xffffffffUL;
+
+static struct mspusb_device msp_usbhost0_device = {
+       .dev    = {
+               .name   = "pmcmsp-ehci",
+               .id     = 0,
+               .dev    = {
+                       .dma_mask = &msp_usbhost0_dma_mask,
+                       .coherent_dma_mask = 0xffffffffUL,
+               },
+               .num_resources  = ARRAY_SIZE(msp_usbhost0_resources),
+               .resource       = msp_usbhost0_resources,
+       },
+};
+
+/* MSP7140/MSP82XX has two USB2 hosts. */
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+static u64 msp_usbhost1_dma_mask = 0xffffffffUL;
+
+static struct resource msp_usbhost1_resources[] = {
+       [0] = { /* EHCI-HS operational and capabilities registers */
+               .start  = MSP_USB1_HS_START,
+               .end    = MSP_USB1_HS_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_USB,
+               .end    = MSP_INT_USB,
+               .flags  = IORESOURCE_IRQ,
+       },
+       [2] = { /* MSBus-to-AMBA bridge register space */
+               .start  = MSP_USB1_MAB_START,
+               .end    = MSP_USB1_MAB_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [3] = { /* Identification and general hardware parameters */
+               .start  = MSP_USB1_ID_START,
+               .end    = MSP_USB1_ID_END,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct mspusb_device msp_usbhost1_device = {
+       .dev    = {
+               .name   = "pmcmsp-ehci",
+               .id     = 1,
+               .dev    = {
+                       .dma_mask = &msp_usbhost1_dma_mask,
+                       .coherent_dma_mask = 0xffffffffUL,
+               },
+               .num_resources  = ARRAY_SIZE(msp_usbhost1_resources),
+               .resource       = msp_usbhost1_resources,
+       },
+};
+#endif /* CONFIG_MSP_HAS_DUAL_USB */
+#endif /* CONFIG_USB_EHCI_HCD */
+
+#if defined(CONFIG_USB_GADGET)
+static struct resource msp_usbdev0_resources[] = {
+       [0] = { /* EHCI-HS operational and capabilities registers */
+               .start  = MSP_USB0_HS_START,
+               .end    = MSP_USB0_HS_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_USB,
+               .end    = MSP_INT_USB,
+               .flags  = IORESOURCE_IRQ,
+       },
+       [2] = { /* MSBus-to-AMBA bridge register space */
+               .start  = MSP_USB0_MAB_START,
+               .end    = MSP_USB0_MAB_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [3] = { /* Identification and general hardware parameters */
+               .start  = MSP_USB0_ID_START,
+               .end    = MSP_USB0_ID_END,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static u64 msp_usbdev_dma_mask = 0xffffffffUL;
+
+/* This may need to be converted to a mspusb_device, too. */
+static struct mspusb_device msp_usbdev0_device = {
+       .dev    = {
+               .name   = "msp71xx_udc",
+               .id     = 0,
+               .dev    = {
+                       .dma_mask = &msp_usbdev_dma_mask,
+                       .coherent_dma_mask = 0xffffffffUL,
+               },
+               .num_resources  = ARRAY_SIZE(msp_usbdev0_resources),
+               .resource       = msp_usbdev0_resources,
+       },
+};
+
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+static struct resource msp_usbdev1_resources[] = {
+       [0] = { /* EHCI-HS operational and capabilities registers */
+               .start  = MSP_USB1_HS_START,
+               .end    = MSP_USB1_HS_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = MSP_INT_USB,
+               .end    = MSP_INT_USB,
+               .flags  = IORESOURCE_IRQ,
+       },
+       [2] = { /* MSBus-to-AMBA bridge register space */
+               .start  = MSP_USB1_MAB_START,
+               .end    = MSP_USB1_MAB_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [3] = { /* Identification and general hardware parameters */
+               .start  = MSP_USB1_ID_START,
+               .end    = MSP_USB1_ID_END,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+/* This may need to be converted to a mspusb_device, too. */
+static struct mspusb_device msp_usbdev1_device = {
+       .dev    = {
+               .name   = "msp71xx_udc",
+               .id     = 0,
+               .dev    = {
+                       .dma_mask = &msp_usbdev_dma_mask,
+                       .coherent_dma_mask = 0xffffffffUL,
+               },
+               .num_resources  = ARRAY_SIZE(msp_usbdev1_resources),
+               .resource       = msp_usbdev1_resources,
+       },
+};
+
+#endif /* CONFIG_MSP_HAS_DUAL_USB */
+#endif /* CONFIG_USB_GADGET */
+
+static int __init msp_usb_setup(void)
+{
+       char            *strp;
+       char            envstr[32];
+       struct platform_device *msp_devs[NUM_USB_DEVS];
+       unsigned int val;
+
+       /* construct environment name usbmode */
+       /* set usbmode <host/device> as pmon environment var */
+       /*
+        * Could this perhaps be integrated into the "features" env var?
+        * Use the features key "U", and follow with "H" for host-mode,
+        * "D" for device-mode.  If it works for Ethernet, why not USB...
+        *  -- hammtrev, 2007/03/22
+        */
+       snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
+
+       /* set default host mode */
+       val = 1;
+
+       /* get environment string */
+       strp = prom_getenv((char *)&envstr[0]);
+       if (strp) {
+               /* compare string */
+               if (!strcmp(strp, "device"))
+                       val = 0;
+       }
+
+       if (val) {
+#if defined(CONFIG_USB_EHCI_HCD)
+               msp_devs[0] = &msp_usbhost0_device.dev;
+               ppfinit("platform add USB HOST done %s.\n", msp_devs[0]->name);
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+               msp_devs[1] = &msp_usbhost1_device.dev;
+               ppfinit("platform add USB HOST done %s.\n", msp_devs[1]->name);
+#endif
+#else
+               ppfinit("%s: echi_hcd not supported\n", __FILE__);
+#endif /* CONFIG_USB_EHCI_HCD */
+       } else {
+#if defined(CONFIG_USB_GADGET)
+               /* get device mode structure */
+               msp_devs[0] = &msp_usbdev0_device.dev;
+               ppfinit("platform add USB DEVICE done %s.\n"
+                                       , msp_devs[0]->name);
+#ifdef CONFIG_MSP_HAS_DUAL_USB
+               msp_devs[1] = &msp_usbdev1_device.dev;
+               ppfinit("platform add USB DEVICE done %s.\n"
+                                       , msp_devs[1]->name);
+#endif
+#else
+               ppfinit("%s: usb_gadget not supported\n", __FILE__);
+#endif /* CONFIG_USB_GADGET */
+       }
+       /* add device */
+       platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs));
+
+       return 0;
+}
+
+subsys_initcall(msp_usb_setup);
+#endif /* CONFIG_USB_EHCI_HCD || CONFIG_USB_GADGET */
index 7e6ec4d..794526c 100644 (file)
@@ -1,5 +1,5 @@
 # NXP STB225
 platform-$(CONFIG_SOC_PNX833X) += pnx833x/
-cflags-$(CONFIG_SOC_PNX833X)    += -Iarch/mips/include/asm/mach-pnx833x
+cflags-$(CONFIG_SOC_PNX833X)   += -Iarch/mips/include/asm/mach-pnx833x
 load-$(CONFIG_NXP_STB220)      += 0xffffffff80001000
 load-$(CONFIG_NXP_STB225)      += 0xffffffff80001000
index a86d5d5..a4a9059 100644 (file)
@@ -35,64 +35,64 @@ static int mips_cpu_timer_irq;
 static const unsigned int irq_prio[PNX833X_PIC_NUM_IRQ] =
 {
     0, /* unused */
-    4, /* PNX833X_PIC_I2C0_INT                 1 */
-    4, /* PNX833X_PIC_I2C1_INT                 2 */
-    1, /* PNX833X_PIC_UART0_INT                3 */
-    1, /* PNX833X_PIC_UART1_INT                4 */
-    6, /* PNX833X_PIC_TS_IN0_DV_INT            5 */
-    6, /* PNX833X_PIC_TS_IN0_DMA_INT           6 */
-    7, /* PNX833X_PIC_GPIO_INT                 7 */
-    4, /* PNX833X_PIC_AUDIO_DEC_INT            8 */
-    5, /* PNX833X_PIC_VIDEO_DEC_INT            9 */
-    4, /* PNX833X_PIC_CONFIG_INT              10 */
-    4, /* PNX833X_PIC_AOI_INT                 11 */
-    9, /* PNX833X_PIC_SYNC_INT                12 */
-    9, /* PNX8335_PIC_SATA_INT                13 */
-    4, /* PNX833X_PIC_OSD_INT                 14 */
-    9, /* PNX833X_PIC_DISP1_INT               15 */
-    4, /* PNX833X_PIC_DEINTERLACER_INT        16 */
-    9, /* PNX833X_PIC_DISPLAY2_INT            17 */
-    4, /* PNX833X_PIC_VC_INT                  18 */
-    4, /* PNX833X_PIC_SC_INT                  19 */
-    9, /* PNX833X_PIC_IDE_INT                 20 */
-    9, /* PNX833X_PIC_IDE_DMA_INT             21 */
-    6, /* PNX833X_PIC_TS_IN1_DV_INT           22 */
-    6, /* PNX833X_PIC_TS_IN1_DMA_INT          23 */
-    4, /* PNX833X_PIC_SGDX_DMA_INT            24 */
-    4, /* PNX833X_PIC_TS_OUT_INT              25 */
-    4, /* PNX833X_PIC_IR_INT                  26 */
-    3, /* PNX833X_PIC_VMSP1_INT               27 */
-    3, /* PNX833X_PIC_VMSP2_INT               28 */
-    4, /* PNX833X_PIC_PIBC_INT                29 */
-    4, /* PNX833X_PIC_TS_IN0_TRD_INT          30 */
-    4, /* PNX833X_PIC_SGDX_TPD_INT            31 */
-    5, /* PNX833X_PIC_USB_INT                 32 */
-    4, /* PNX833X_PIC_TS_IN1_TRD_INT          33 */
-    4, /* PNX833X_PIC_CLOCK_INT               34 */
-    4, /* PNX833X_PIC_SGDX_PARSER_INT         35 */
-    4, /* PNX833X_PIC_VMSP_DMA_INT            36 */
+    4, /* PNX833X_PIC_I2C0_INT                1 */
+    4, /* PNX833X_PIC_I2C1_INT                2 */
+    1, /* PNX833X_PIC_UART0_INT                       3 */
+    1, /* PNX833X_PIC_UART1_INT                       4 */
+    6, /* PNX833X_PIC_TS_IN0_DV_INT           5 */
+    6, /* PNX833X_PIC_TS_IN0_DMA_INT          6 */
+    7, /* PNX833X_PIC_GPIO_INT                7 */
+    4, /* PNX833X_PIC_AUDIO_DEC_INT           8 */
+    5, /* PNX833X_PIC_VIDEO_DEC_INT           9 */
+    4, /* PNX833X_PIC_CONFIG_INT             10 */
+    4, /* PNX833X_PIC_AOI_INT                11 */
+    9, /* PNX833X_PIC_SYNC_INT               12 */
+    9, /* PNX8335_PIC_SATA_INT               13 */
+    4, /* PNX833X_PIC_OSD_INT                14 */
+    9, /* PNX833X_PIC_DISP1_INT                      15 */
+    4, /* PNX833X_PIC_DEINTERLACER_INT       16 */
+    9, /* PNX833X_PIC_DISPLAY2_INT           17 */
+    4, /* PNX833X_PIC_VC_INT                 18 */
+    4, /* PNX833X_PIC_SC_INT                 19 */
+    9, /* PNX833X_PIC_IDE_INT                20 */
+    9, /* PNX833X_PIC_IDE_DMA_INT            21 */
+    6, /* PNX833X_PIC_TS_IN1_DV_INT          22 */
+    6, /* PNX833X_PIC_TS_IN1_DMA_INT         23 */
+    4, /* PNX833X_PIC_SGDX_DMA_INT           24 */
+    4, /* PNX833X_PIC_TS_OUT_INT             25 */
+    4, /* PNX833X_PIC_IR_INT                 26 */
+    3, /* PNX833X_PIC_VMSP1_INT                      27 */
+    3, /* PNX833X_PIC_VMSP2_INT                      28 */
+    4, /* PNX833X_PIC_PIBC_INT               29 */
+    4, /* PNX833X_PIC_TS_IN0_TRD_INT         30 */
+    4, /* PNX833X_PIC_SGDX_TPD_INT           31 */
+    5, /* PNX833X_PIC_USB_INT                32 */
+    4, /* PNX833X_PIC_TS_IN1_TRD_INT         33 */
+    4, /* PNX833X_PIC_CLOCK_INT                      34 */
+    4, /* PNX833X_PIC_SGDX_PARSER_INT        35 */
+    4, /* PNX833X_PIC_VMSP_DMA_INT           36 */
 #if defined(CONFIG_SOC_PNX8335)
-    4, /* PNX8335_PIC_MIU_INT                 37 */
-    4, /* PNX8335_PIC_AVCHIP_IRQ_INT          38 */
-    9, /* PNX8335_PIC_SYNC_HD_INT             39 */
-    9, /* PNX8335_PIC_DISP_HD_INT             40 */
-    9, /* PNX8335_PIC_DISP_SCALER_INT         41 */
-    4, /* PNX8335_PIC_OSD_HD1_INT             42 */
-    4, /* PNX8335_PIC_DTL_WRITER_Y_INT        43 */
-    4, /* PNX8335_PIC_DTL_WRITER_C_INT        44 */
+    4, /* PNX8335_PIC_MIU_INT                37 */
+    4, /* PNX8335_PIC_AVCHIP_IRQ_INT         38 */
+    9, /* PNX8335_PIC_SYNC_HD_INT            39 */
+    9, /* PNX8335_PIC_DISP_HD_INT            40 */
+    9, /* PNX8335_PIC_DISP_SCALER_INT        41 */
+    4, /* PNX8335_PIC_OSD_HD1_INT            42 */
+    4, /* PNX8335_PIC_DTL_WRITER_Y_INT       43 */
+    4, /* PNX8335_PIC_DTL_WRITER_C_INT       44 */
     4, /* PNX8335_PIC_DTL_EMULATOR_Y_IR_INT   45 */
     4, /* PNX8335_PIC_DTL_EMULATOR_C_IR_INT   46 */
-    4, /* PNX8335_PIC_DENC_TTX_INT            47 */
-    4, /* PNX8335_PIC_MMI_SIF0_INT            48 */
-    4, /* PNX8335_PIC_MMI_SIF1_INT            49 */
-    4, /* PNX8335_PIC_MMI_CDMMU_INT           50 */
-    4, /* PNX8335_PIC_PIBCS_INT               51 */
-   12, /* PNX8335_PIC_ETHERNET_INT            52 */
-    3, /* PNX8335_PIC_VMSP1_0_INT             53 */
-    3, /* PNX8335_PIC_VMSP1_1_INT             54 */
-    4, /* PNX8335_PIC_VMSP1_DMA_INT           55 */
-    4, /* PNX8335_PIC_TDGR_DE_INT             56 */
-    4, /* PNX8335_PIC_IR1_IRQ_INT             57 */
+    4, /* PNX8335_PIC_DENC_TTX_INT           47 */
+    4, /* PNX8335_PIC_MMI_SIF0_INT           48 */
+    4, /* PNX8335_PIC_MMI_SIF1_INT           49 */
+    4, /* PNX8335_PIC_MMI_CDMMU_INT          50 */
+    4, /* PNX8335_PIC_PIBCS_INT                      51 */
+   12, /* PNX8335_PIC_ETHERNET_INT           52 */
+    3, /* PNX8335_PIC_VMSP1_0_INT            53 */
+    3, /* PNX8335_PIC_VMSP1_1_INT            54 */
+    4, /* PNX8335_PIC_VMSP1_DMA_INT          55 */
+    4, /* PNX8335_PIC_TDGR_DE_INT            56 */
+    4, /* PNX8335_PIC_IR1_IRQ_INT            57 */
 #endif
 };
 
index 05a1d92..d22dc0d 100644 (file)
@@ -6,7 +6,7 @@
  *    Daniel Laird <daniel.j.laird@nxp.com>
  *
  *  Based on software written by:
- *      Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ *     Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@
 #include <irq-mapping.h>
 #include <pnx833x.h>
 
-static u64 uart_dmamask     = DMA_BIT_MASK(32);
+static u64 uart_dmamask            = DMA_BIT_MASK(32);
 
 static struct resource pnx833x_uart_resources[] = {
        [0] = {
@@ -69,7 +69,7 @@ static struct resource pnx833x_uart_resources[] = {
 
 struct pnx8xxx_port pnx8xxx_ports[] = {
        [0] = {
-               .port   = {
+               .port   = {
                        .type           = PORT_PNX8XXX,
                        .iotype         = UPIO_MEM,
                        .membase        = (void __iomem *)PNX833X_UART0_PORTS_START,
@@ -82,7 +82,7 @@ struct pnx8xxx_port pnx8xxx_ports[] = {
                },
        },
        [1] = {
-               .port   = {
+               .port   = {
                        .type           = PORT_PNX8XXX,
                        .iotype         = UPIO_MEM,
                        .membase        = (void __iomem *)PNX833X_UART1_PORTS_START,
@@ -108,7 +108,7 @@ static struct platform_device pnx833x_uart_device = {
        .resource       = pnx833x_uart_resources,
 };
 
-static u64 ehci_dmamask     = DMA_BIT_MASK(32);
+static u64 ehci_dmamask            = DMA_BIT_MASK(32);
 
 static struct resource pnx833x_usb_ehci_resources[] = {
        [0] = {
@@ -183,7 +183,7 @@ static struct platform_device pnx833x_i2c0_device = {
        .dev = {
                .platform_data = &pnx833x_i2c_dev[0],
        },
-       .num_resources  = ARRAY_SIZE(pnx833x_i2c0_resources),
+       .num_resources  = ARRAY_SIZE(pnx833x_i2c0_resources),
        .resource       = pnx833x_i2c0_resources,
 };
 
@@ -193,7 +193,7 @@ static struct platform_device pnx833x_i2c1_device = {
        .dev = {
                .platform_data = &pnx833x_i2c_dev[1],
        },
-       .num_resources  = ARRAY_SIZE(pnx833x_i2c1_resources),
+       .num_resources  = ARRAY_SIZE(pnx833x_i2c1_resources),
        .resource       = pnx833x_i2c1_resources,
 };
 #endif
@@ -217,7 +217,7 @@ static struct platform_device pnx833x_ethernet_device = {
        .name = "ip3902-eth",
        .id   = -1,
        .dev  = {
-               .dma_mask          = &ethernet_dmamask,
+               .dma_mask          = &ethernet_dmamask,
                .coherent_dma_mask = DMA_BIT_MASK(32),
        },
        .num_resources = ARRAY_SIZE(pnx833x_ethernet_resources),
@@ -238,8 +238,8 @@ static struct resource pnx833x_sata_resources[] = {
 };
 
 static struct platform_device pnx833x_sata_device = {
-       .name          = "pnx833x-sata",
-       .id            = -1,
+       .name          = "pnx833x-sata",
+       .id            = -1,
        .num_resources = ARRAY_SIZE(pnx833x_sata_resources),
        .resource      = pnx833x_sata_resources,
 };
@@ -265,7 +265,7 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
                .chip_delay             = 25,
        },
        .ctrl = {
-               .cmd_ctrl               = pnx833x_flash_nand_cmd_ctrl
+               .cmd_ctrl               = pnx833x_flash_nand_cmd_ctrl
        }
 };
 
@@ -274,17 +274,17 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
  * 12 bytes more seems to be the standard that allows for NAND access.
  */
 static struct resource pnx833x_flash_nand_resource = {
-       .start  = PNX8335_NAND_BASE,
-       .end    = PNX8335_NAND_BASE + 12,
-       .flags  = IORESOURCE_MEM,
+       .start  = PNX8335_NAND_BASE,
+       .end    = PNX8335_NAND_BASE + 12,
+       .flags  = IORESOURCE_MEM,
 };
 
 static struct platform_device pnx833x_flash_nand = {
-       .name           = "gen_nand",
-       .id                     = -1,
+       .name           = "gen_nand",
+       .id                     = -1,
        .num_resources  = 1,
        .resource           = &pnx833x_flash_nand_resource,
-       .dev            = {
+       .dev            = {
                .platform_data = &pnx833x_flash_nand_data,
        },
 };
index 29969f9..dfafdd7 100644 (file)
@@ -6,7 +6,7 @@
  *    Daniel Laird <daniel.j.laird@nxp.com>
  *
  *  Based on software written by:
- *      Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ *     Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index e0ea96d..5cc9a9b 100644 (file)
@@ -6,7 +6,7 @@
  *    Daniel Laird <daniel.j.laird@nxp.com>
  *
  *  Based on software written by:
- *      Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ *     Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index e51fbc4..99b4d94 100644 (file)
@@ -6,7 +6,7 @@
  *    Daniel Laird <daniel.j.laird@nxp.com>
  *
  *  Based on software written by:
- *      Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ *     Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index 4b328ac..2ac5203 100644 (file)
@@ -6,7 +6,7 @@
  *    Daniel Laird <daniel.j.laird@nxp.com>
  *
  *  Based on software written by:
- *      Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ *     Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx8550/Makefile b/arch/mips/pnx8550/Makefile
deleted file mode 100644 (file)
index 3f7e856..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_SOC_PNX8550)      += common/
-obj-$(CONFIG_PNX8550_JBS)      += jbs/
-obj-$(CONFIG_PNX8550_STB810)   += stb810/
diff --git a/arch/mips/pnx8550/Platform b/arch/mips/pnx8550/Platform
deleted file mode 100644 (file)
index 0e7fbde..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-platform-$(CONFIG_SOC_PNX8550) += pnx8550/
-
-cflags-$(CONFIG_SOC_PNX8550)   +=                                      \
-               -I$(srctree)/arch/mips/include/asm/mach-pnx8550
-
-load-$(CONFIG_PNX8550_JBS)     += 0xffffffff80060000
-load-$(CONFIG_PNX8550_STB810)  += 0xffffffff80060000
diff --git a/arch/mips/pnx8550/common/Makefile b/arch/mips/pnx8550/common/Makefile
deleted file mode 100644 (file)
index f8ce695..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Per Hallsmark, per.hallsmark@mvista.com
-#
-# ########################################################################
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# #######################################################################
-#
-# Makefile for the PNX8550 specific kernel interface routines
-# under Linux.
-#
-
-obj-y := setup.o prom.o int.o reset.o time.o proc.o platform.o
-obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
deleted file mode 100644 (file)
index ec684b8..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- * Ported to 2.6.
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- * Copyright (C) 2000, 2001 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- *
- * Cleaned up and bug fixing: Pete Popov, ppopov@embeddedalley.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-/* default prio for interrupts */
-/* first one is a no-no so therefore always prio 0 (disabled) */
-static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
-       0, 1, 1, 1, 1, 15, 1, 1, 1, 1,  //   0 -  9
-       1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   //  10 - 19
-       1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   //  20 - 29
-       1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   //  30 - 39
-       1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   //  40 - 49
-       1, 1, 1, 1, 1, 1, 1, 1, 2, 1,   //  50 - 59
-       1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   //  60 - 69
-       1                       //  70
-};
-
-static void hw0_irqdispatch(int irq)
-{
-       /* find out which interrupt */
-       irq = PNX8550_GIC_VECTOR_0 >> 3;
-
-       if (irq == 0) {
-               printk("hw0_irqdispatch: irq 0, spurious interrupt?\n");
-               return;
-       }
-       do_IRQ(PNX8550_INT_GIC_MIN + irq);
-}
-
-
-static void timer_irqdispatch(int irq)
-{
-       irq = (0x01c0 & read_c0_config7()) >> 6;
-
-       if (unlikely(irq == 0)) {
-               printk("timer_irqdispatch: irq 0, spurious interrupt?\n");
-               return;
-       }
-
-       if (irq & 0x1)
-               do_IRQ(PNX8550_INT_TIMER1);
-       if (irq & 0x2)
-               do_IRQ(PNX8550_INT_TIMER2);
-       if (irq & 0x4)
-               do_IRQ(PNX8550_INT_TIMER3);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-       unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-
-       if (pending & STATUSF_IP2)
-               hw0_irqdispatch(2);
-       else if (pending & STATUSF_IP7) {
-               if (read_c0_config7() & 0x01c0)
-                       timer_irqdispatch(7);
-       } else
-               spurious_interrupt();
-}
-
-static inline void modify_cp0_intmask(unsigned clr_mask, unsigned set_mask)
-{
-       unsigned long status = read_c0_status();
-
-       status &= ~((clr_mask & 0xFF) << 8);
-       status |= (set_mask & 0xFF) << 8;
-
-       write_c0_status(status);
-}
-
-static inline void mask_gic_int(unsigned int irq_nr)
-{
-       /* interrupt disabled, bit 26(WE_ENABLE)=1 and bit 16(enable)=0 */
-       PNX8550_GIC_REQ(irq_nr) = 1<<28; /* set priority to 0 */
-}
-
-static inline void unmask_gic_int(unsigned int irq_nr)
-{
-       /* set prio mask to lower four bits and enable interrupt */
-       PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr];
-}
-
-static inline void mask_irq(struct irq_data *d)
-{
-       unsigned int irq_nr = d->irq;
-
-       if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
-               modify_cp0_intmask(1 << irq_nr, 0);
-       } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
-               (irq_nr <= PNX8550_INT_GIC_MAX)) {
-               mask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
-       } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
-               (irq_nr <= PNX8550_INT_TIMER_MAX)) {
-               modify_cp0_intmask(1 << 7, 0);
-       } else {
-               printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
-       }
-}
-
-static inline void unmask_irq(struct irq_data *d)
-{
-       unsigned int irq_nr = d->irq;
-
-       if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
-               modify_cp0_intmask(0, 1 << irq_nr);
-       } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
-               (irq_nr <= PNX8550_INT_GIC_MAX)) {
-               unmask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
-       } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
-               (irq_nr <= PNX8550_INT_TIMER_MAX)) {
-               modify_cp0_intmask(0, 1 << 7);
-       } else {
-               printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
-       }
-}
-
-int pnx8550_set_gic_priority(int irq, int priority)
-{
-       int gic_irq = irq-PNX8550_INT_GIC_MIN;
-       int prev_priority = PNX8550_GIC_REQ(gic_irq) & 0xf;
-
-        gic_prio[gic_irq] = priority;
-       PNX8550_GIC_REQ(gic_irq) |= (0x10000000 | gic_prio[gic_irq]);
-
-       return prev_priority;
-}
-
-static struct irq_chip level_irq_type = {
-       .name =         "PNX Level IRQ",
-       .irq_mask =     mask_irq,
-       .irq_unmask =   unmask_irq,
-};
-
-static struct irqaction gic_action = {
-       .handler =      no_action,
-       .flags =        IRQF_NO_THREAD,
-       .name =         "GIC",
-};
-
-static struct irqaction timer_action = {
-       .handler =      no_action,
-       .flags =        IRQF_TIMER,
-       .name =         "Timer",
-};
-
-void __init arch_init_irq(void)
-{
-       int i;
-       int configPR;
-
-       for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
-               irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
-       /* init of GIC/IPC interrupts */
-       /* should be done before cp0 since cp0 init enables the GIC int */
-       for (i = PNX8550_INT_GIC_MIN; i <= PNX8550_INT_GIC_MAX; i++) {
-               int gic_int_line = i - PNX8550_INT_GIC_MIN;
-               if (gic_int_line == 0 )
-                       continue;       // don't fiddle with int 0
-               /*
-                * enable change of TARGET, ENABLE and ACTIVE_LOW bits
-                * set TARGET        0 to route through hw0 interrupt
-                * set ACTIVE_LOW    0 active high  (correct?)
-                *
-                * We really should setup an interrupt description table
-                * to do this nicely.
-                * Note, PCI INTA is active low on the bus, but inverted
-                * in the GIC, so to us it's active high.
-                */
-               PNX8550_GIC_REQ(i - PNX8550_INT_GIC_MIN) = 0x1E000000;
-
-               /* mask/priority is still 0 so we will not get any
-                * interrupts until it is unmasked */
-
-               irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-       }
-
-       /* Priority level 0 */
-       PNX8550_GIC_PRIMASK_0 = PNX8550_GIC_PRIMASK_1 = 0;
-
-       /* Set int vector table address */
-       PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
-
-       irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
-                                handle_level_irq);
-       setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
-
-       /* init of Timer interrupts */
-       for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
-               irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
-       /* Stop Timer 1-3 */
-       configPR = read_c0_config7();
-       configPR |= 0x00000038;
-       write_c0_config7(configPR);
-
-       irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
-                                handle_level_irq);
-       setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
-}
-
-EXPORT_SYMBOL(pnx8550_set_gic_priority);
diff --git a/arch/mips/pnx8550/common/pci.c b/arch/mips/pnx8550/common/pci.c
deleted file mode 100644 (file)
index 98e86dd..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * Author: source@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <pci.h>
-#include <glb.h>
-#include <nand.h>
-
-static struct resource pci_io_resource = {
-       .start  = PNX8550_PCIIO + 0x1000,       /* reserve regacy I/O space */
-       .end    = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
-       .name   = "pci IO space",
-       .flags  = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
-       .start  = PNX8550_PCIMEM,
-       .end    = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
-       .name   = "pci memory space",
-       .flags  = IORESOURCE_MEM
-};
-
-extern struct pci_ops pnx8550_pci_ops;
-
-static struct pci_controller pnx8550_controller = {
-       .pci_ops        = &pnx8550_pci_ops,
-       .io_map_base    = PNX8550_PORT_BASE,
-       .io_resource    = &pci_io_resource,
-       .mem_resource   = &pci_mem_resource,
-};
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-static inline unsigned long get_system_mem_size(void)
-{
-       /* Read IP2031_RANK0_ADDR_LO */
-       unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
-       /* Read IP2031_RANK1_ADDR_HI */
-       unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
-       return dram_r1_hi - dram_r0_lo + 1;
-}
-
-static int __init pnx8550_pci_setup(void)
-{
-       int pci_mem_code;
-       int mem_size = get_system_mem_size() >> 20;
-
-       /* Clear the Global 2 Register, PCI Inta Output Enable Registers
-          Bit 1:Enable DAC Powerdown
-         -> 0:DACs are enabled and are working normally
-            1:DACs are powerdown
-          Bit 0:Enable of PCI inta output
-         -> 0 = Disable PCI inta output
-            1 = Enable PCI inta output
-       */
-       PNX8550_GLB2_ENAB_INTA_O = 0;
-
-       /* Calc the PCI mem size code */
-       if (mem_size >= 128)
-               pci_mem_code = SIZE_128M;
-       else if (mem_size >= 64)
-               pci_mem_code = SIZE_64M;
-       else if (mem_size >= 32)
-               pci_mem_code = SIZE_32M;
-       else
-               pci_mem_code = SIZE_16M;
-
-       /* Set PCI_XIO registers */
-       outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
-       outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
-       outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
-       outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
-
-       /* Send memory transaction via PCI_BASE2 */
-       outl(0x00000001, PCI_BASE | PCI_IO);
-
-       /* Unlock the setup register */
-       outl(0xca, PCI_BASE | PCI_UNLOCKREG);
-
-       /*
-        * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
-        * to work, and in order for bus_to_baddr to work without any
-        * hacks.
-        */
-       outl(0x00000000, PCI_BASE | PCI_BASE10);
-
-       /*
-        *These two bars are set by default or the boot code.
-        * However, it's safer to set them here so we're not boot
-        * code dependent.
-        */
-       outl(0x1be00000, PCI_BASE | PCI_BASE14);  /* PNX MMIO */
-       outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18);  /* XIO      */
-
-       outl(PCI_EN_TA |
-            PCI_EN_PCI2MMI |
-            PCI_EN_XIO |
-            PCI_SETUP_BASE18_SIZE(SIZE_32M) |
-            PCI_SETUP_BASE18_EN |
-            PCI_SETUP_BASE14_EN |
-            PCI_SETUP_BASE10_PREF |
-            PCI_SETUP_BASE10_SIZE(pci_mem_code) |
-            PCI_SETUP_CFGMANAGE_EN |
-            PCI_SETUP_PCIARB_EN,
-            PCI_BASE |
-            PCI_SETUP);        /* PCI_SETUP */
-       outl(0x00000000, PCI_BASE | PCI_CTRL);  /* PCI_CONTROL */
-
-       register_pci_controller(&pnx8550_controller);
-
-       return 0;
-}
-
-arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/pnx8550/common/platform.c b/arch/mips/pnx8550/common/platform.c
deleted file mode 100644 (file)
index 0a8faea..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Platform device support for NXP PNX8550 SoCs
- *
- * Copyright 2005, Embedded Alley Solutions, Inc
- *
- * Based on arch/mips/au1000/common/platform.c
- * Platform device support for Au1x00 SoCs.
- *
- * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/platform_device.h>
-#include <linux/usb/ohci_pdriver.h>
-
-#include <int.h>
-#include <usb.h>
-#include <uart.h>
-
-static struct resource pnx8550_usb_ohci_resources[] = {
-       [0] = {
-               .start          = PNX8550_USB_OHCI_OP_BASE,
-               .end            = PNX8550_USB_OHCI_OP_BASE +
-                                 PNX8550_USB_OHCI_OP_LEN,
-               .flags          = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start          = PNX8550_INT_USB,
-               .end            = PNX8550_INT_USB,
-               .flags          = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource pnx8550_uart_resources[] = {
-       [0] = {
-               .start          = PNX8550_UART_PORT0,
-               .end            = PNX8550_UART_PORT0 + 0xfff,
-               .flags          = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start          = PNX8550_UART_INT(0),
-               .end            = PNX8550_UART_INT(0),
-               .flags          = IORESOURCE_IRQ,
-       },
-       [2] = {
-               .start          = PNX8550_UART_PORT1,
-               .end            = PNX8550_UART_PORT1 + 0xfff,
-               .flags          = IORESOURCE_MEM,
-       },
-       [3] = {
-               .start          = PNX8550_UART_INT(1),
-               .end            = PNX8550_UART_INT(1),
-               .flags          = IORESOURCE_IRQ,
-       },
-};
-
-struct pnx8xxx_port pnx8xxx_ports[] = {
-       [0] = {
-               .port   = {
-                       .type           = PORT_PNX8XXX,
-                       .iotype         = UPIO_MEM,
-                       .membase        = (void __iomem *)PNX8550_UART_PORT0,
-                       .mapbase        = PNX8550_UART_PORT0,
-                       .irq            = PNX8550_UART_INT(0),
-                       .uartclk        = 3692300,
-                       .fifosize       = 16,
-                       .flags          = UPF_BOOT_AUTOCONF,
-                       .line           = 0,
-               },
-       },
-       [1] = {
-               .port   = {
-                       .type           = PORT_PNX8XXX,
-                       .iotype         = UPIO_MEM,
-                       .membase        = (void __iomem *)PNX8550_UART_PORT1,
-                       .mapbase        = PNX8550_UART_PORT1,
-                       .irq            = PNX8550_UART_INT(1),
-                       .uartclk        = 3692300,
-                       .fifosize       = 16,
-                       .flags          = UPF_BOOT_AUTOCONF,
-                       .line           = 1,
-               },
-       },
-};
-
-/* The dmamask must be set for OHCI to work */
-static u64 ohci_dmamask = DMA_BIT_MASK(32);
-
-static u64 uart_dmamask = DMA_BIT_MASK(32);
-
-static int pnx8550_usb_ohci_power_on(struct platform_device *pdev)
-{
-       /*
-        * Set register CLK48CTL to enable and 48MHz
-        */
-       outl(0x00000003, PCI_BASE | 0x0004770c);
-
-       /*
-        * Set register CLK12CTL to enable and 48MHz
-        */
-       outl(0x00000003, PCI_BASE | 0x00047710);
-
-       udelay(100);
-
-       return 0;
-}
-
-static void pnx8550_usb_ohci_power_off(struct platform_device *pdev)
-{
-       udelay(10);
-}
-
-static struct usb_ohci_pdata pnx8550_usb_ohci_pdata = {
-       .power_on       = pnx8550_usb_ohci_power_on,
-       .power_off      = pnx8550_usb_ohci_power_off,
-};
-
-static struct platform_device pnx8550_usb_ohci_device = {
-       .name           = "ohci-platform",
-       .id             = -1,
-       .dev = {
-               .dma_mask               = &ohci_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = &pnx8550_usb_ohci_pdata,
-       },
-       .num_resources  = ARRAY_SIZE(pnx8550_usb_ohci_resources),
-       .resource       = pnx8550_usb_ohci_resources,
-};
-
-static struct platform_device pnx8550_uart_device = {
-       .name           = "pnx8xxx-uart",
-       .id             = -1,
-       .dev = {
-               .dma_mask               = &uart_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data = pnx8xxx_ports,
-       },
-       .num_resources  = ARRAY_SIZE(pnx8550_uart_resources),
-       .resource       = pnx8550_uart_resources,
-};
-
-static struct platform_device *pnx8550_platform_devices[] __initdata = {
-       &pnx8550_usb_ohci_device,
-       &pnx8550_uart_device,
-};
-
-static int __init pnx8550_platform_init(void)
-{
-       return platform_add_devices(pnx8550_platform_devices,
-                                   ARRAY_SIZE(pnx8550_platform_devices));
-}
-
-arch_initcall(pnx8550_platform_init);
diff --git a/arch/mips/pnx8550/common/proc.c b/arch/mips/pnx8550/common/proc.c
deleted file mode 100644 (file)
index 3bba5ec..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-
-static int pnx8550_timers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
-        int len = 0;
-       int configPR = read_c0_config7();
-
-        if (offset==0) {
-               len += sprintf(&page[len], "Timer:       count,  compare, tc, status\n");
-                len += sprintf(&page[len], "    1: %11i, %8i,  %1i, %s\n",
-                              read_c0_count(), read_c0_compare(),
-                             (configPR>>6)&0x1, ((configPR>>3)&0x1)? "off":"on");
-                len += sprintf(&page[len], "    2: %11i, %8i,  %1i, %s\n",
-                              read_c0_count2(), read_c0_compare2(),
-                             (configPR>>7)&0x1, ((configPR>>4)&0x1)? "off":"on");
-                len += sprintf(&page[len], "    3: %11i, %8i,  %1i, %s\n",
-                              read_c0_count3(), read_c0_compare3(),
-                             (configPR>>8)&0x1, ((configPR>>5)&0x1)? "off":"on");
-        }
-
-        return len;
-}
-
-static int pnx8550_registers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
-        int len = 0;
-
-        if (offset==0) {
-                len += sprintf(&page[len], "config1:   %#10.8x\n", read_c0_config1());
-                len += sprintf(&page[len], "config2:   %#10.8x\n", read_c0_config2());
-                len += sprintf(&page[len], "config3:   %#10.8x\n", read_c0_config3());
-                len += sprintf(&page[len], "configPR:  %#10.8x\n", read_c0_config7());
-                len += sprintf(&page[len], "status:    %#10.8x\n", read_c0_status());
-                len += sprintf(&page[len], "cause:     %#10.8x\n", read_c0_cause());
-                len += sprintf(&page[len], "count:     %#10.8x\n", read_c0_count());
-                len += sprintf(&page[len], "count_2:   %#10.8x\n", read_c0_count2());
-                len += sprintf(&page[len], "count_3:   %#10.8x\n", read_c0_count3());
-                len += sprintf(&page[len], "compare:   %#10.8x\n", read_c0_compare());
-                len += sprintf(&page[len], "compare_2: %#10.8x\n", read_c0_compare2());
-                len += sprintf(&page[len], "compare_3: %#10.8x\n", read_c0_compare3());
-        }
-
-        return len;
-}
-
-static struct proc_dir_entry* pnx8550_dir;
-static struct proc_dir_entry* pnx8550_timers;
-static struct proc_dir_entry* pnx8550_registers;
-
-static int pnx8550_proc_init( void )
-{
-
-       // Create /proc/pnx8550
-        pnx8550_dir = proc_mkdir("pnx8550", NULL);
-        if (!pnx8550_dir) {
-                printk(KERN_ERR "Can't create pnx8550 proc dir\n");
-                return -1;
-        }
-
-       // Create /proc/pnx8550/timers
-        pnx8550_timers = create_proc_read_entry(
-               "timers",
-               0,
-               pnx8550_dir,
-               pnx8550_timers_read,
-               NULL);
-
-        if (!pnx8550_timers)
-                printk(KERN_ERR "Can't create pnx8550 timers proc file\n");
-
-       // Create /proc/pnx8550/registers
-        pnx8550_registers = create_proc_read_entry(
-               "registers",
-               0,
-               pnx8550_dir,
-               pnx8550_registers_read,
-               NULL);
-
-        if (!pnx8550_registers)
-                printk(KERN_ERR "Can't create pnx8550 registers proc file\n");
-
-       return 0;
-}
-
-__initcall(pnx8550_proc_init);
diff --git a/arch/mips/pnx8550/common/prom.c b/arch/mips/pnx8550/common/prom.c
deleted file mode 100644 (file)
index 49639e8..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- *
- * Based on jmr3927/common/prom.c
- *
- * 2004 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/serial_pnx8xxx.h>
-
-#include <asm/bootinfo.h>
-#include <uart.h>
-
-/* #define DEBUG_CMDLINE */
-
-extern int prom_argc;
-extern char **prom_argv, **prom_envp;
-
-typedef struct
-{
-    char *name;
-/*    char *val; */
-}t_env_var;
-
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-void __init prom_init_cmdline(void)
-{
-       int i;
-
-       arcs_cmdline[0] = '\0';
-       for (i = 0; i < prom_argc; i++) {
-               strcat(arcs_cmdline, prom_argv[i]);
-               strcat(arcs_cmdline, " ");
-       }
-}
-
-char *prom_getenv(char *envname)
-{
-       /*
-        * Return a pointer to the given environment variable.
-        * Environment variables are stored in the form of "memsize=64".
-        */
-
-       t_env_var *env = (t_env_var *)prom_envp;
-       int i;
-
-       i = strlen(envname);
-
-       while(env->name) {
-               if(strncmp(envname, env->name, i) == 0) {
-                       return(env->name + strlen(envname) + 1);
-               }
-               env++;
-       }
-       return(NULL);
-}
-
-inline unsigned char str2hexnum(unsigned char c)
-{
-       if(c >= '0' && c <= '9')
-               return c - '0';
-       if(c >= 'a' && c <= 'f')
-               return c - 'a' + 10;
-       if(c >= 'A' && c <= 'F')
-               return c - 'A' + 10;
-       return 0; /* foo */
-}
-
-inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
-       int i;
-
-       for(i = 0; i < 6; i++) {
-               unsigned char num;
-
-               if((*str == '.') || (*str == ':'))
-                       str++;
-               num = str2hexnum(*str++) << 4;
-               num |= (str2hexnum(*str++));
-               ea[i] = num;
-       }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
-        char *ethaddr_str;
-
-        ethaddr_str = prom_getenv("ethaddr");
-       if (!ethaddr_str) {
-               printk("ethaddr not set in boot prom\n");
-               return -1;
-       }
-       str2eaddr(ethernet_addr, ethaddr_str);
-       return 0;
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-extern int pnx8550_console_port;
-
-/* used by early printk */
-void prom_putchar(char c)
-{
-       if (pnx8550_console_port != -1) {
-               /* Wait until FIFO not full */
-               while( ((ip3106_fifo(UART_BASE, pnx8550_console_port) & PNX8XXX_UART_FIFO_TXFIFO) >> 16) >= 16)
-                       ;
-               /* Send one char */
-               ip3106_fifo(UART_BASE, pnx8550_console_port) = c;
-       }
-}
-
-EXPORT_SYMBOL(get_ethernet_addr);
-EXPORT_SYMBOL(str2eaddr);
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c
deleted file mode 100644 (file)
index e7a12ff..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * Reset the PNX8550 board.
- *
- */
-#include <linux/kernel.h>
-
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <glb.h>
-
-void pnx8550_machine_restart(char *command)
-{
-       PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
-}
-
-void pnx8550_machine_halt(void)
-{
-       while (1) {
-               if (cpu_wait)
-                       cpu_wait();
-       }
-}
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c
deleted file mode 100644 (file)
index fccd6b0..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- *  Based on Per Hallsmark, per.hallsmark@mvista.com
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/pm.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-#include <asm/time.h>
-
-#include <glb.h>
-#include <int.h>
-#include <pci.h>
-#include <uart.h>
-#include <nand.h>
-
-extern void __init board_setup(void);
-extern void pnx8550_machine_restart(char *);
-extern void pnx8550_machine_halt(void);
-extern struct resource ioport_resource;
-extern struct resource iomem_resource;
-extern char *prom_getcmdline(void);
-
-struct resource standard_io_resources[] = {
-       {
-               .start  = 0x00,
-               .end    = 0x1f,
-               .name   = "dma1",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x40,
-               .end    = 0x5f,
-               .name   = "timer",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0x80,
-               .end    = 0x8f,
-               .name   = "dma page reg",
-               .flags  = IORESOURCE_BUSY
-       }, {
-               .start  = 0xc0,
-               .end    = 0xdf,
-               .name   = "dma2",
-               .flags  = IORESOURCE_BUSY
-       },
-};
-
-#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
-
-extern struct resource pci_io_resource;
-extern struct resource pci_mem_resource;
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-unsigned long get_system_mem_size(void)
-{
-       /* Read IP2031_RANK0_ADDR_LO */
-       unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
-       /* Read IP2031_RANK1_ADDR_HI */
-       unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
-       return dram_r1_hi - dram_r0_lo + 1;
-}
-
-int pnx8550_console_port = -1;
-
-void __init plat_mem_setup(void)
-{
-       int i;
-       char* argptr;
-
-       board_setup();  /* board specific setup */
-
-        _machine_restart = pnx8550_machine_restart;
-        _machine_halt = pnx8550_machine_halt;
-        pm_power_off = pnx8550_machine_halt;
-
-       /* Clear the Global 2 Register, PCI Inta Output Enable Registers
-          Bit 1:Enable DAC Powerdown
-         -> 0:DACs are enabled and are working normally
-            1:DACs are powerdown
-          Bit 0:Enable of PCI inta output
-         -> 0 = Disable PCI inta output
-            1 = Enable PCI inta output
-       */
-       PNX8550_GLB2_ENAB_INTA_O = 0;
-
-       /* IO/MEM resources. */
-       set_io_port_base(PNX8550_PORT_BASE);
-       ioport_resource.start = 0;
-       ioport_resource.end = ~0;
-       iomem_resource.start = 0;
-       iomem_resource.end = ~0;
-
-       /* Request I/O space for devices on this board */
-       for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-               request_resource(&ioport_resource, standard_io_resources + i);
-
-       /* Place the Mode Control bit for GPIO pin 16 in primary function */
-       /* Pin 16 is used by UART1, UA1_TX                                */
-       outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
-                       (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
-                       PNX8550_GPIO_MC1);
-
-       argptr = prom_getcmdline();
-       if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
-               argptr += strlen("console=ttyS");
-               pnx8550_console_port = *argptr == '0' ? 0 : 1;
-
-               /* We must initialize the UART (console) before early printk */
-               /* Set LCR to 8-bit and BAUD to 38400 (no 5)                */
-               ip3106_lcr(UART_BASE, pnx8550_console_port) =
-                       PNX8XXX_UART_LCR_8BIT;
-               ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
-       }
-}
diff --git a/arch/mips/pnx8550/common/time.c b/arch/mips/pnx8550/common/time.c
deleted file mode 100644 (file)
index 831d6b3..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright 2001, 2002, 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
- *
- * Common time service routines for MIPS machines. See
- * Documents/MIPS/README.txt.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/param.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-#include <asm/time.h>
-#include <asm/hardirq.h>
-#include <asm/div64.h>
-#include <asm/debug.h>
-
-#include <int.h>
-#include <cm.h>
-
-static unsigned long cpj;
-
-static cycle_t hpt_read(struct clocksource *cs)
-{
-       return read_c0_count2();
-}
-
-static struct clocksource pnx_clocksource = {
-       .name           = "pnx8xxx",
-       .rating         = 200,
-       .read           = hpt_read,
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static irqreturn_t pnx8xxx_timer_interrupt(int irq, void *dev_id)
-{
-       struct clock_event_device *c = dev_id;
-
-       /* clear MATCH, signal the event */
-       c->event_handler(c);
-
-       return IRQ_HANDLED;
-}
-
-static struct irqaction pnx8xxx_timer_irq = {
-       .handler        = pnx8xxx_timer_interrupt,
-       .flags          = IRQF_PERCPU | IRQF_TIMER,
-       .name           = "pnx8xxx_timer",
-};
-
-static irqreturn_t monotonic_interrupt(int irq, void *dev_id)
-{
-       /* Timer 2 clear interrupt */
-       write_c0_compare2(-1);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction monotonic_irqaction = {
-       .handler = monotonic_interrupt,
-       .flags = IRQF_TIMER,
-       .name = "Monotonic timer",
-};
-
-static int pnx8xxx_set_next_event(unsigned long delta,
-                               struct clock_event_device *evt)
-{
-       write_c0_compare(delta);
-       return 0;
-}
-
-static struct clock_event_device pnx8xxx_clockevent = {
-       .name           = "pnx8xxx_clockevent",
-       .features       = CLOCK_EVT_FEAT_ONESHOT,
-       .set_next_event = pnx8xxx_set_next_event,
-};
-
-static inline void timer_ack(void)
-{
-       write_c0_compare(cpj);
-}
-
-__init void plat_time_init(void)
-{
-       unsigned int configPR;
-       unsigned int n;
-       unsigned int m;
-       unsigned int p;
-       unsigned int pow2p;
-
-       pnx8xxx_clockevent.cpumask = cpu_none_mask;
-       clockevents_register_device(&pnx8xxx_clockevent);
-       clocksource_register(&pnx_clocksource);
-
-       /* Timer 1 start */
-       configPR = read_c0_config7();
-       configPR &= ~0x00000008;
-       write_c0_config7(configPR);
-
-       /* Timer 2 start */
-       configPR = read_c0_config7();
-       configPR &= ~0x00000010;
-       write_c0_config7(configPR);
-
-       /* Timer 3 stop */
-       configPR = read_c0_config7();
-       configPR |= 0x00000020;
-       write_c0_config7(configPR);
-
-
-        /* PLL0 sets MIPS clock (PLL1 <=> TM1, PLL6 <=> TM2, PLL5 <=> mem) */
-        /* (but only if CLK_MIPS_CTL select value [bits 3:1] is 1:  FIXME) */
-
-        n = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_N_MASK) >> 16;
-        m = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_M_MASK) >> 8;
-        p = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_P_MASK) >> 2;
-       pow2p = (1 << p);
-
-       db_assert(m != 0 && pow2p != 0);
-
-        /*
-        * Compute the frequency as in the PNX8550 User Manual 1.0, p.186
-        * (a.k.a. 8-10).  Divide by HZ for a timer offset that results in
-        * HZ timer interrupts per second.
-        */
-       mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p));
-       cpj = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
-       write_c0_count(0);
-       timer_ack();
-
-       /* Setup Timer 2 */
-       write_c0_count2(0);
-       write_c0_compare2(0xffffffff);
-
-       setup_irq(PNX8550_INT_TIMER1, &pnx8xxx_timer_irq);
-       setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction);
-}
diff --git a/arch/mips/pnx8550/jbs/Makefile b/arch/mips/pnx8550/jbs/Makefile
deleted file mode 100644 (file)
index c4dc3d5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP JBS Board.
-
-obj-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/jbs/board_setup.c b/arch/mips/pnx8550/jbs/board_setup.c
deleted file mode 100644 (file)
index 57dd903..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  JBS Specific board startup routines.
- *
- *  Copyright 2005, Embedded Alley Solutions, Inc
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
-                                    "nop; nop; nop; nop; nop; nop;\n\t" \
-                                    ".set reorder\n\t")
-
-void __init board_setup(void)
-{
-       unsigned long configpr;
-
-       configpr = read_c0_config7();
-       configpr |= (1<<19); /* enable tlb */
-       write_c0_config7(configpr);
-       BARRIER;
-}
diff --git a/arch/mips/pnx8550/jbs/init.c b/arch/mips/pnx8550/jbs/init.c
deleted file mode 100644 (file)
index d59b4a4..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- *
- *  Copyright 2005 Embedded Alley Solutions, Inc
- *  source@embeddedalley.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void  __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
-       return "NXP PNX8550/JBS";
-}
-
-void __init prom_init(void)
-{
-       unsigned long memsize;
-
-       //memsize = 0x02800000; /* Trimedia uses memory above */
-       memsize = 0x08000000; /* Trimedia uses memory above */
-       add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/pnx8550/jbs/irqmap.c b/arch/mips/pnx8550/jbs/irqmap.c
deleted file mode 100644 (file)
index 7fc8984..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- *  NXP JBS board irqmap.
- *
- *  Copyright 2005 Embedded Alley Solutions, Inc
- *  source@embeddealley.com
- *
- *  This program is free software; you can redistribute         it and/or modify it
- *  under  the terms of         the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED          ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,          INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED          TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA, OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN         CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
-       [8]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-       [9]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-       [17]    = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/Makefile b/arch/mips/pnx8550/stb810/Makefile
deleted file mode 100644 (file)
index cb4ff02..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP STB810 Board.
-
-obj-y := prom_init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/stb810/board_setup.c b/arch/mips/pnx8550/stb810/board_setup.c
deleted file mode 100644 (file)
index af2a55e..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *  STB810 specific board startup routines.
- *
- *  Based on the arch/mips/nxp/pnx8550/jbs/board_setup.c
- *
- *  Author: MontaVista Software, Inc.
- *          source@mvista.com
- *
- *  Copyright 2005 MontaVista Software Inc.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; either version 2 of the License, or (at your
- *  option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-void __init board_setup(void)
-{
-       unsigned long configpr;
-
-       configpr = read_c0_config7();
-       configpr |= (1<<19); /* enable tlb */
-       write_c0_config7(configpr);
-}
diff --git a/arch/mips/pnx8550/stb810/irqmap.c b/arch/mips/pnx8550/stb810/irqmap.c
deleted file mode 100644 (file)
index 8c03496..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *  NXP STB810 board irqmap.
- *
- *  Author: MontaVista Software, Inc.
- *          source@mvista.com
- *
- *  Copyright 2005 MontaVista Software Inc.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; either version 2 of the License, or (at your
- *  option) any later version.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
-       [8]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-       [9]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-       [10]    = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/prom_init.c b/arch/mips/pnx8550/stb810/prom_init.c
deleted file mode 100644 (file)
index ca7f4ad..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *  STB810 specific prom routines
- *
- *  Author: MontaVista Software, Inc.
- *          source@mvista.com
- *
- *  Copyright 2005 MontaVista Software Inc.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; either version 2 of the License, or (at your
- *  option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void  __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
-       return "NXP PNX8950/STB810";
-}
-
-void __init prom_init(void)
-{
-       unsigned long memsize;
-
-       prom_argc = (int) fw_arg0;
-       prom_argv = (char **) fw_arg1;
-       prom_envp = (char **) fw_arg2;
-
-       prom_init_cmdline();
-
-       memsize = 0x08000000; /* Trimedia uses memory above */
-       add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
index 26a6ef1..521e596 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2009 Lemote Inc.
  * Author: Hu Hongbing <huhb@lemote.com>
- *         Wu Zhangjin <wuzhangjin@gmail.com>
+ *        Wu Zhangjin <wuzhangjin@gmail.com>
  */
 #include <asm/suspend.h>
 #include <asm/fpu.h>
index 61e2558..7e0277a 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2009 Lemote Inc.
  * Author: Hu Hongbing <huhb@lemote.com>
- *         Wu Zhangjin <wuzhangjin@gmail.com>
+ *        Wu Zhangjin <wuzhangjin@gmail.com>
  */
 #include <asm/asm-offsets.h>
 #include <asm/regdef.h>
index 7773f3d..2f539b4 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  *
- * Description:  Defines the platform resources for the SA settop.
+ * Description:         Defines the platform resources for the SA settop.
  */
 
 #include <linux/init.h>
@@ -90,12 +90,12 @@ const struct register_map calliope_register_map __initconst = {
        .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
        .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
 
-       .pcie_regs = {.phys = 0x000000},        /* -doesn't exist- */
+       .pcie_regs = {.phys = 0x000000},        /* -doesn't exist- */
        .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
        .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
        .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
        .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
        .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
        .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
-       .front_panel = {.phys = 0x000000},      /* -not used- */
+       .front_panel = {.phys = 0x000000},      /* -not used- */
 };
index da076db..7f8f342 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  *
- * Description:  Defines the platform resources for the SA settop.
+ * Description:         Defines the platform resources for the SA settop.
  */
 
 #include <linux/init.h>
index 47683b3..1265b49 100644 (file)
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       David VomLehn
+ * Author:      David VomLehn
  */
 
 #include <linux/init.h>
index 6ff4b10..14e7de1 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  *
- * Description:  Defines the platform resources for the SA settop.
+ * Description:         Defines the platform resources for the SA settop.
  */
 
 #include <linux/init.h>
index bce1872..d38b095 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- * Description:  Defines the platform resources for Gaia-based settops.
+ * Description:         Defines the platform resources for Gaia-based settops.
  *
  * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
  *
@@ -90,12 +90,12 @@ struct resource asic_resource = {
 
 /*
  * Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure.  This parameter
+ * Returns zero on success, a negative errno value on failure. This parameter
  * allows overriding of the bootloader-specified model.
  */
 static char __initdata cmdline[COMMAND_LINE_SIZE];
 
-#define        FORCEFAMILY_PARAM       "forcefamily"
+#define FORCEFAMILY_PARAM      "forcefamily"
 
 /*
  * check_forcefamily - check for, and parse, forcefamily command line parameter
@@ -486,7 +486,7 @@ static void __init pmem_setup_resource(void)
                resource->start = phys_to_dma(pmemaddr - 0x80000000);
                resource->end = resource->start + pmemlen - 1;
 
-               pr_info("persistent memory: start=0x%x  end=0x%x\n",
+               pr_info("persistent memory: start=0x%x  end=0x%x\n",
                        resource->start, resource->end);
        }
 }
index 99d82e1..f44cd92 100644 (file)
@@ -2,7 +2,7 @@
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
  * Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009  Cisco Systems, Inc.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
  *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
@@ -64,7 +64,7 @@ static void asic_irqdispatch(void)
 
        irq = get_int();
        if (irq < 0)
-               return;  /* interrupt has already been cleared */
+               return;  /* interrupt has already been cleared */
 
        do_IRQ(irq);
 }
index fa9ae95..9344902 100644 (file)
@@ -5,8 +5,8 @@
  * Modified from arch/mips/kernel/irq-rm7000.c:
  * Copyright (C) 2003 Ralf Baechle
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 3fc5d46..98dc516 100644 (file)
@@ -17,8 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  */
 
 #include <linux/init.h>
@@ -153,7 +153,7 @@ struct resource non_dvr_calliope_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
 
@@ -260,7 +260,7 @@ struct resource non_dvr_vze_calliope_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
 
@@ -380,6 +380,6 @@ struct resource non_dvr_vzf_calliope_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
index c532b50..7c6ce75 100644 (file)
@@ -17,8 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  */
 
 #include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_cronus_resources[] __initdata =
         *
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         */
@@ -185,7 +185,7 @@ struct resource dvr_cronus_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
 
@@ -241,7 +241,7 @@ struct resource non_dvr_cronus_resources[] __initdata =
         *
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         */
@@ -335,6 +335,6 @@ struct resource non_dvr_cronus_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
index b5537e4..a7937ba 100644 (file)
@@ -17,8 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  */
 
 #include <linux/init.h>
@@ -65,7 +65,7 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
         *
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         */
@@ -169,6 +169,6 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
index 8ac8c7a..2303bbf 100644 (file)
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       David VomLehn
+ * Author:      David VomLehn
  */
 
 #include <linux/init.h>
@@ -33,22 +33,22 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
+               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
+               .start  = 0x24000000,
+               .end    = 0x241FFFFF,           /* 2MiB */
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
+               .start  = 0x24200000,
+               .end    = 0x24201FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
+               .name   = "MediaMemory1",
+               .start  = 0x24202000,
+               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -56,22 +56,22 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
+               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
+               .start  = 0x60000000,
+               .end    = 0x601FFFFF,           /* 2MiB */
+               .flags  = IORESOURCE_IO,
        },
        {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
+               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
+               .start  = 0x60200000,
+               .end    = 0x60201FFF,
+               .flags  = IORESOURCE_IO,
        },
        {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
+               .name   = "MediaMemory2",
+               .start  = 0x60202000,
+               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -87,28 +87,28 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "DSP_Image_Buff",
+               .start  = 0x00000000,
+               .end    = 0x000FFFFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_CPU_PCM_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00009FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_AUX_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00003FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_Main_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00003FFF,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -119,16 +119,16 @@ struct resource dvr_gaia_resources[] __initdata = {
         * Arbitrary Based Buffers:
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         *
         */
        {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "AVMEMPartition0",
+               .start  = 0x63580000,
+               .end    = 0x64180000 - 1,  /* 12 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -141,10 +141,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "Docsis",
+               .start  = 0x62000000,
+               .end    = 0x62700000 - 1,       /* 7 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -157,10 +157,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "GraphicsHeap",
+               .start  = 0x62700000,
+               .end    = 0x63500000 - 1,       /* 14 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -173,10 +173,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "MulticomSHM",
+               .start  = 0x26000000,
+               .end    = 0x26020000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -189,10 +189,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x00280000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "BMM_Buffer",
+               .start  = 0x00000000,
+               .end    = 0x00280000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -205,10 +205,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
+               .name   = "DisplayBins0",
+               .start  = 0x00000000,
+               .end    = 0x00000FFF,           /* 4 KB total */
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -221,10 +221,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "DisplayBins1",
+               .start  = 0x64AD4000,
+               .end    = 0x64AD5000 - 1,  /* 4 KB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -237,11 +237,11 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "ITFS",
-               .start  = 0x64180000,
+               .name   = "ITFS",
+               .start  = 0x64180000,
                /* 815,104 bytes each for 2 ITFS partitions. */
-               .end    = 0x6430DFFF,
-               .flags  = IORESOURCE_IO,
+               .end    = 0x6430DFFF,
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -254,17 +254,17 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
+               .name   = "AvfsDmaMem",
+               .start  = 0x6430E000,
                /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
-               .end    = 0x64AD0000 - 1,
-               .flags  = IORESOURCE_IO,
+               .end    = 0x64AD0000 - 1,
+               .flags  = IORESOURCE_IO,
        },
        {
-               .name   = "AvfsFileSys",
-               .start  = 0x64AD0000,
-               .end    = 0x64AD1000 - 1,  /* 4K */
-               .flags  = IORESOURCE_IO,
+               .name   = "AvfsFileSys",
+               .start  = 0x64AD0000,
+               .end    = 0x64AD1000 - 1,  /* 4K */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -277,10 +277,10 @@ struct resource dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
+               .name   = "SmartCardInfo",
+               .start  = 0x64AD1000,
+               .end    = 0x64AD3800 - 1,
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -290,22 +290,22 @@ struct resource dvr_gaia_resources[] __initdata = {
         *         NP IPC - must be video bank 2
         */
        {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "NP_Reset_Vector",
+               .start  = 0x27c00000,
+               .end    = 0x27c01000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "NP_Image",
+               .start  = 0x27020000,
+               .end    = 0x27060000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
+               .name   = "NP_IPC",
+               .start  = 0x63500000,
+               .end    = 0x63580000 - 1,
+               .flags  = IORESOURCE_IO,
        },
        /*
         * Add other resources here
@@ -323,22 +323,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
+               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
+               .start  = 0x24000000,
+               .end    = 0x241FFFFF,           /* 2MiB */
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
+               .start  = 0x24200000,
+               .end    = 0x24201FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
+               .name   = "MediaMemory1",
+               .start  = 0x24202000,
+               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -346,22 +346,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
+               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
+               .start  = 0x60000000,
+               .end    = 0x601FFFFF,           /* 2MiB */
+               .flags  = IORESOURCE_IO,
        },
        {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
+               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
+               .start  = 0x60200000,
+               .end    = 0x60201FFF,
+               .flags  = IORESOURCE_IO,
        },
        {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
+               .name   = "MediaMemory2",
+               .start  = 0x60202000,
+               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -377,28 +377,28 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "DSP_Image_Buff",
+               .start  = 0x00000000,
+               .end    = 0x000FFFFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_CPU_PCM_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00009FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_AUX_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00003FFF,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
+               .name   = "ADSC_Main_Buff",
+               .start  = 0x00000000,
+               .end    = 0x00003FFF,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -409,16 +409,16 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         * Arbitrary Based Buffers:
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         *
         */
        {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "AVMEMPartition0",
+               .start  = 0x63580000,
+               .end    = 0x64180000 - 1,  /* 12 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -431,10 +431,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "Docsis",
+               .start  = 0x62000000,
+               .end    = 0x62700000 - 1,       /* 7 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -447,10 +447,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "GraphicsHeap",
+               .start  = 0x62700000,
+               .end    = 0x63500000 - 1,       /* 14 MB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -463,10 +463,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "MulticomSHM",
+               .start  = 0x26000000,
+               .end    = 0x26020000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -479,10 +479,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x000AA000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "BMM_Buffer",
+               .start  = 0x00000000,
+               .end    = 0x000AA000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -495,10 +495,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
+               .name   = "DisplayBins0",
+               .start  = 0x00000000,
+               .end    = 0x00000FFF,           /* 4 KB total */
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -511,10 +511,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
+               .name   = "DisplayBins1",
+               .start  = 0x64AD4000,
+               .end    = 0x64AD5000 - 1,  /* 4 KB total */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -523,10 +523,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
-               .end    = 0x645D2C00 - 1,  /* 945K * 3 for playback */
-               .flags  = IORESOURCE_IO,
+               .name   = "AvfsDmaMem",
+               .start  = 0x6430E000,
+               .end    = 0x645D2C00 - 1,  /* 945K * 3 for playback */
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -539,10 +539,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "DiagPersistentMemory",
-               .start  = 0x00000000,
-               .end    = 0x10000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "DiagPersistentMemory",
+               .start  = 0x00000000,
+               .end    = 0x10000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        /*
         *
@@ -555,10 +555,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *
         */
        {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
+               .name   = "SmartCardInfo",
+               .start  = 0x64AD1000,
+               .end    = 0x64AD3800 - 1,
+               .flags  = IORESOURCE_IO,
        },
        /*
         *
@@ -568,22 +568,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
         *         NP IPC - must be video bank 2
         */
        {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "NP_Reset_Vector",
+               .start  = 0x27c00000,
+               .end    = 0x27c01000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "NP_Image",
+               .start  = 0x27020000,
+               .end    = 0x27060000 - 1,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
+               .name   = "NP_IPC",
+               .start  = 0x63500000,
+               .end    = 0x63580000 - 1,
+               .flags  = IORESOURCE_IO,
        },
        { },
 };
index 96480a2..6e76f09 100644 (file)
@@ -17,8 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  */
 
 #include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_zeus_resources[] __initdata =
         *
         *  This memory area is used for allocating buffers for Video decoding
         *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
+        *  by the STAVMEM driver of the STAPI.  They could be Decimated
         *  Picture Buffers, Intermediate Buffers, as deemed necessary for
         *  video decoding purposes, for any video decoders on Zeus.
         */
@@ -175,7 +175,7 @@ struct resource dvr_zeus_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
 
@@ -299,6 +299,6 @@ struct resource non_dvr_zeus_resources[] __initdata =
         * End of Resource marker
         */
        {
-               .flags  = 0,
+               .flags  = 0,
        },
 };
index c697935..5bd9d8f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005  MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
  *     All rights reserved.
  *     Authors: Carsten Langgaard <carstenl@mips.com>
  *              Maciej W. Rozycki <macro@mips.com>
index a77c6f6..d060478 100644 (file)
@@ -19,9 +19,9 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       David VomLehn <dvomlehn@cisco.com>
+ * Author:      David VomLehn <dvomlehn@cisco.com>
  *
- * Description:  Defines the platform resources for the SA settop.
+ * Description:         Defines the platform resources for the SA settop.
  *
  * NOTE: The bootloader allocates persistent memory at an address which is
  * 16 MiB below the end of the highest address in KSEG0. All fixed
index fb3d296..6e5f1bd 100644 (file)
@@ -60,7 +60,7 @@ unsigned long ptv_memsize;
  * struct low_mem_reserved - Items in low memory that are reserved
  * @start:     Physical address of item
  * @size:      Size, in bytes, of this item
- * @is_aliased:        True if this is RAM aliased from another location. If false,
+ * @is_aliased: True if this is RAM aliased from another location. If false,
  *             it is something other than aliased RAM and the RAM in the
  *             unaliased address is still visible outside of low memory.
  */
index b0e2afa..d845eac 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *                             powertv-usb.c
  *
- * Description:  ASIC-specific USB device setup and shutdown
+ * Description:         ASIC-specific USB device setup and shutdown
  *
  * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
  * Copyright (C) 2009 Cisco Systems, Inc.
@@ -20,8 +20,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  *
- * Author:       Ken Eppinett
- *               David Schleef <ds@schleef.org>
+ * Author:      Ken Eppinett
+ *              David Schleef <ds@schleef.org>
  *
  * NOTE: The bootloader allocates persistent memory at an address which is
  * 16 MiB below the end of the highest address in KSEG0. All fixed
 #define MCC2_GMII_RX2_CLOCK_SELECT     (1 << 16)
 
 #define ETHER_CLK_CONFIG       (MCC2_GMII_GCLK_TO_PAD |        \
-                                MCC2_ETHER125_0_CLOCK_SELECT | \
+                                MCC2_ETHER125_0_CLOCK_SELECT | \
                                 MCC2_RMII_0_CLOCK_SELECT |     \
                                 MCC2_GMII_TX0_CLOCK_SELECT |   \
                                 MCC2_GMII_RX0_CLOCK_SELECT |   \
-                                MCC2_ETHER125_1_CLOCK_SELECT | \
+                                MCC2_ETHER125_1_CLOCK_SELECT | \
                                 MCC2_RMII_1_CLOCK_SELECT |     \
                                 MCC2_GMII_TX1_CLOCK_SELECT |   \
                                 MCC2_GMII_RX1_CLOCK_SELECT |   \
-                                MCC2_ETHER125_2_CLOCK_SELECT | \
+                                MCC2_ETHER125_2_CLOCK_SELECT | \
                                 MCC2_RMII_2_CLOCK_SELECT |     \
                                 MCC2_GMII_TX2_CLOCK_SELECT |   \
                                 MCC2_GMII_RX2_CLOCK_SELECT)
@@ -98,9 +98,9 @@
 
 #define QAM_FS_DISABLE_DIVIDE_BY_3             (1 << 5)
 #define QAM_FS_ENABLE_PROGRAM                  (1 << 4)
-#define        QAM_FS_ENABLE_OUTPUT                    (1 << 3)
-#define        QAM_FS_SELECT_TEST_BYPASS               (1 << 2)
-#define        QAM_FS_DISABLE_DIGITAL_STANDBY          (1 << 1)
+#define QAM_FS_ENABLE_OUTPUT                   (1 << 3)
+#define QAM_FS_SELECT_TEST_BYPASS              (1 << 2)
+#define QAM_FS_DISABLE_DIGITAL_STANDBY         (1 << 1)
 #define QAM_FS_CHOOSE_FS                       (1 << 0)
 
 /* Definitions for fs432x4a_ctl register */
 static struct resource ehci_resources[] = {
        {
                .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
+               .start  = 0,
+               .end    = 0xff,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .start  = irq_usbehci,
-               .end    = irq_usbehci,
-               .flags  = IORESOURCE_IRQ,
+               .start  = irq_usbehci,
+               .end    = irq_usbehci,
+               .flags  = IORESOURCE_IRQ,
        },
 };
 
@@ -169,14 +169,14 @@ static struct platform_device ehci_device = {
 static struct resource ohci_resources[] = {
        {
                .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
+               .start  = 0,
+               .end    = 0xff,
+               .flags  = IORESOURCE_MEM,
        },
        {
-               .start  = irq_usbohci,
-               .end    = irq_usbohci,
-               .flags  = IORESOURCE_IRQ,
+               .start  = irq_usbohci,
+               .end    = irq_usbohci,
+               .flags  = IORESOURCE_IRQ,
        },
 };
 
@@ -207,9 +207,9 @@ static DEFINE_SPINLOCK(usb_regs_lock);
  *
  * QAM frequency selection code, which affects the frequency at which USB
  * runs. The frequency is calculated as:
- *                             2^15 * ndiv * Fin
+ *                            2^15 * ndiv * Fin
  * Fout = ------------------------------------------------------------
- *        (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
+ *       (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
  * where:
  * Fin         54 MHz
  * ndiv                QAM_FS_NSDIV_54MHZ ? 8 : 16
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
new file mode 100644 (file)
index 0000000..a0b0197
--- /dev/null
@@ -0,0 +1,32 @@
+if RALINK
+
+choice
+       prompt "Ralink SoC selection"
+       default SOC_RT305X
+       help
+         Select Ralink MIPS SoC type.
+
+       config SOC_RT305X
+               bool "RT305x"
+               select USB_ARCH_HAS_HCD
+               select USB_ARCH_HAS_OHCI
+               select USB_ARCH_HAS_EHCI
+
+endchoice
+
+choice
+       prompt "Devicetree selection"
+       default DTB_RT_NONE
+       help
+         Select the devicetree.
+
+       config DTB_RT_NONE
+               bool "None"
+
+       config DTB_RT305X_EVAL
+               bool "RT305x eval kit"
+               depends on SOC_RT305X
+
+endchoice
+
+endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
new file mode 100644 (file)
index 0000000..939757f
--- /dev/null
@@ -0,0 +1,15 @@
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 as published
+# by the Free Software Foundation.#
+# Makefile for the Ralink common stuff
+#
+# Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+
+obj-y := prom.o of.o reset.o clk.o irq.o
+
+obj-$(CONFIG_SOC_RT305X) += rt305x.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-y += dts/
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
new file mode 100644 (file)
index 0000000..6babd65
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Ralink SoC common stuff
+#
+core-$(CONFIG_RALINK)          += arch/mips/ralink/
+cflags-$(CONFIG_RALINK)                += -I$(srctree)/arch/mips/include/asm/mach-ralink
+
+#
+# Ralink RT305x
+#
+load-$(CONFIG_SOC_RT305X)      += 0xffffffff80000000
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
new file mode 100644 (file)
index 0000000..8dfa22f
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+
+#include "common.h"
+
+struct clk {
+       struct clk_lookup cl;
+       unsigned long rate;
+};
+
+void ralink_clk_add(const char *dev, unsigned long rate)
+{
+       struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+       if (!clk)
+               panic("failed to add clock\n");
+
+       clk->cl.dev_id = dev;
+       clk->cl.clk = clk;
+
+       clk->rate = rate;
+
+       clkdev_add(&clk->cl);
+}
+
+/*
+ * Linux clock API
+ */
+int clk_enable(struct clk *clk)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+       return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+void __init plat_time_init(void)
+{
+       struct clk *clk;
+
+       ralink_of_remap();
+
+       ralink_clk_init();
+       clk = clk_get_sys("cpu", NULL);
+       if (IS_ERR(clk))
+               panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+       pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+       mips_hpt_frequency = clk_get_rate(clk) / 2;
+       clk_put(clk);
+}
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
new file mode 100644 (file)
index 0000000..3009903
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RALINK_COMMON_H__
+#define _RALINK_COMMON_H__
+
+#define RAMIPS_SYS_TYPE_LEN    32
+
+struct ralink_pinmux_grp {
+       const char *name;
+       u32 mask;
+       int gpio_first;
+       int gpio_last;
+};
+
+struct ralink_pinmux {
+       struct ralink_pinmux_grp *mode;
+       struct ralink_pinmux_grp *uart;
+       int uart_shift;
+       void (*wdt_reset)(void);
+};
+extern struct ralink_pinmux gpio_pinmux;
+
+struct ralink_soc_info {
+       unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
+       unsigned char *compatible;
+};
+extern struct ralink_soc_info soc_info;
+
+extern void ralink_of_remap(void);
+
+extern void ralink_clk_init(void);
+extern void ralink_clk_add(const char *dev, unsigned long rate);
+
+extern void prom_soc_init(struct ralink_soc_info *soc_info);
+
+__iomem void *plat_of_remap_node(const char *node);
+
+#endif /* _RALINK_COMMON_H__ */
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
new file mode 100644 (file)
index 0000000..1a69fb3
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
new file mode 100644 (file)
index 0000000..069d066
--- /dev/null
@@ -0,0 +1,106 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips24KEc";
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600 init=/init";
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+                ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt3052-sysc", "ralink,rt3050-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               timer@100 {
+                       compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
+                       reg = <0x100 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt3052-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               gpio0: gpio@600 {
+                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+                       reg = <0x600 0x34>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       ralink,ngpio = <24>;
+                       ralink,regs = [ 00 04 08 0c
+                                       20 24 28 2c
+                                       30 34 ];
+               };
+
+               gpio1: gpio@638 {
+                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+                       reg = <0x638 0x24>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       ralink,ngpio = <16>;
+                       ralink,regs = [ 00 04 08 0c
+                                       10 14 18 1c
+                                       20 24 ];
+               };
+
+               gpio2: gpio@660 {
+                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+                       reg = <0x660 0x24>;
+
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       ralink,ngpio = <12>;
+                       ralink,regs = [ 00 04 08 0c
+                                       10 14 18 1c
+                                       20 24 ];
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
new file mode 100644 (file)
index 0000000..148a590
--- /dev/null
@@ -0,0 +1,52 @@
+/dts-v1/;
+
+/include/ "rt3050.dtsi"
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
+       model = "Ralink RT3052 evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       palmbus@10000000 {
+               sysc@0 {
+                       ralink,pinmmux = "uartlite", "spi";
+                       ralink,uartmux = "gpio";
+                       ralink,wdtmux = <0>;
+               };
+       };
+
+       cfi@1f000000 {
+               compatible = "cfi-flash";
+               reg = <0x1f000000 0x800000>;
+
+               bank-width = <2>;
+               device-width = <2>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               partition@0 {
+                       label = "uboot";
+                       reg = <0x0 0x30000>;
+                       read-only;
+               };
+               partition@30000 {
+                       label = "uboot-env";
+                       reg = <0x30000 0x10000>;
+                       read-only;
+               };
+               partition@40000 {
+                       label = "calibration";
+                       reg = <0x40000 0x10000>;
+                       read-only;
+               };
+               partition@50000 {
+                       label = "linux";
+                       reg = <0x50000 0x7b0000>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
new file mode 100644 (file)
index 0000000..c4ae47e
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include <asm/addrspace.h>
+
+#define EARLY_UART_BASE         0x10000c00
+
+#define UART_REG_RX             0x00
+#define UART_REG_TX             0x04
+#define UART_REG_IER            0x08
+#define UART_REG_IIR            0x0c
+#define UART_REG_FCR            0x10
+#define UART_REG_LCR            0x14
+#define UART_REG_MCR            0x18
+#define UART_REG_LSR            0x1c
+
+static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
+
+static inline void uart_w32(u32 val, unsigned reg)
+{
+       __raw_writel(val, uart_membase + reg);
+}
+
+static inline u32 uart_r32(unsigned reg)
+{
+       return __raw_readl(uart_membase + reg);
+}
+
+void prom_putchar(unsigned char ch)
+{
+       while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+               ;
+       uart_w32(ch, UART_REG_TX);
+       while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+               ;
+}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
new file mode 100644 (file)
index 0000000..6d054c5
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+#include "common.h"
+
+/* INTC register offsets */
+#define INTC_REG_STATUS0       0x00
+#define INTC_REG_STATUS1       0x04
+#define INTC_REG_TYPE          0x20
+#define INTC_REG_RAW_STATUS    0x30
+#define INTC_REG_ENABLE                0x34
+#define INTC_REG_DISABLE       0x38
+
+#define INTC_INT_GLOBAL                BIT(31)
+
+#define RALINK_CPU_IRQ_INTC    (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_FE      (MIPS_CPU_IRQ_BASE + 5)
+#define RALINK_CPU_IRQ_WIFI    (MIPS_CPU_IRQ_BASE + 6)
+#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
+
+/* we have a cascade of 8 irqs */
+#define RALINK_INTC_IRQ_BASE   8
+
+/* we have 32 SoC irqs */
+#define RALINK_INTC_IRQ_COUNT  32
+
+#define RALINK_INTC_IRQ_PERFC   (RALINK_INTC_IRQ_BASE + 9)
+
+static void __iomem *rt_intc_membase;
+
+static inline void rt_intc_w32(u32 val, unsigned reg)
+{
+       __raw_writel(val, rt_intc_membase + reg);
+}
+
+static inline u32 rt_intc_r32(unsigned reg)
+{
+       return __raw_readl(rt_intc_membase + reg);
+}
+
+static void ralink_intc_irq_unmask(struct irq_data *d)
+{
+       rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE);
+}
+
+static void ralink_intc_irq_mask(struct irq_data *d)
+{
+       rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE);
+}
+
+static struct irq_chip ralink_intc_irq_chip = {
+       .name           = "INTC",
+       .irq_unmask     = ralink_intc_irq_unmask,
+       .irq_mask       = ralink_intc_irq_mask,
+       .irq_mask_ack   = ralink_intc_irq_mask,
+};
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+       return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+       u32 pending = rt_intc_r32(INTC_REG_STATUS0);
+
+       if (pending) {
+               struct irq_domain *domain = irq_get_handler_data(irq);
+               generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+       } else {
+               spurious_interrupt();
+       }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+       unsigned long pending;
+
+       pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+       if (pending & STATUSF_IP7)
+               do_IRQ(RALINK_CPU_IRQ_COUNTER);
+
+       else if (pending & STATUSF_IP5)
+               do_IRQ(RALINK_CPU_IRQ_FE);
+
+       else if (pending & STATUSF_IP6)
+               do_IRQ(RALINK_CPU_IRQ_WIFI);
+
+       else if (pending & STATUSF_IP2)
+               do_IRQ(RALINK_CPU_IRQ_INTC);
+
+       else
+               spurious_interrupt();
+}
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+       irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+       .xlate = irq_domain_xlate_onecell,
+       .map = intc_map,
+};
+
+static int __init intc_of_init(struct device_node *node,
+                              struct device_node *parent)
+{
+       struct resource res;
+       struct irq_domain *domain;
+       int irq;
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (!irq)
+               panic("Failed to get INTC IRQ");
+
+       if (of_address_to_resource(node, 0, &res))
+               panic("Failed to get intc memory range");
+
+       if (request_mem_region(res.start, resource_size(&res),
+                               res.name) < 0)
+               pr_err("Failed to request intc memory");
+
+       rt_intc_membase = ioremap_nocache(res.start,
+                                       resource_size(&res));
+       if (!rt_intc_membase)
+               panic("Failed to remap intc memory");
+
+       /* disable all interrupts */
+       rt_intc_w32(~0, INTC_REG_DISABLE);
+
+       /* route all INTC interrupts to MIPS HW0 interrupt */
+       rt_intc_w32(0, INTC_REG_TYPE);
+
+       domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
+                       RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
+       if (!domain)
+               panic("Failed to add irqdomain");
+
+       rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
+
+       irq_set_chained_handler(irq, ralink_intc_irq_handler);
+       irq_set_handler_data(irq, domain);
+
+       cp0_perfcount_irq = irq_create_mapping(domain, 9);
+
+       return 0;
+}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+       { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+       { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+       {},
+};
+
+void __init arch_init_irq(void)
+{
+       of_irq_init(of_irq_ids);
+}
+
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
new file mode 100644 (file)
index 0000000..4165e70
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+__iomem void *rt_sysc_membase;
+__iomem void *rt_memc_membase;
+
+extern struct boot_param_header __dtb_start;
+
+__iomem void *plat_of_remap_node(const char *node)
+{
+       struct resource res;
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, node);
+       if (!np)
+               panic("Failed to find %s node", node);
+
+       if (of_address_to_resource(np, 0, &res))
+               panic("Failed to get resource for %s", node);
+
+       if ((request_mem_region(res.start,
+                               resource_size(&res),
+                               res.name) < 0))
+               panic("Failed to request resources for %s", node);
+
+       return ioremap_nocache(res.start, resource_size(&res));
+}
+
+void __init device_tree_init(void)
+{
+       unsigned long base, size;
+       void *fdt_copy;
+
+       if (!initial_boot_params)
+               return;
+
+       base = virt_to_phys((void *)initial_boot_params);
+       size = be32_to_cpu(initial_boot_params->totalsize);
+
+       /* Before we do anything, lets reserve the dt blob */
+       reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+       /* The strings in the flattened tree are referenced directly by the
+        * device tree, so copy the flattened device tree from init memory
+        * to regular memory.
+        */
+       fdt_copy = alloc_bootmem(size);
+       memcpy(fdt_copy, initial_boot_params, size);
+       initial_boot_params = fdt_copy;
+
+       unflatten_device_tree();
+
+       /* free the space reserved for the dt blob */
+       free_bootmem(base, size);
+}
+
+void __init plat_mem_setup(void)
+{
+       set_io_port_base(KSEG1);
+
+       /*
+        * Load the builtin devicetree. This causes the chosen node to be
+        * parsed resulting in our memory appearing
+        */
+       __dt_setup_arch(&__dtb_start);
+}
+
+static int __init plat_of_setup(void)
+{
+       static struct of_device_id of_ids[3];
+       int len = sizeof(of_ids[0].compatible);
+
+       if (!of_have_populated_dt())
+               panic("device tree not present");
+
+       strncpy(of_ids[0].compatible, soc_info.compatible, len);
+       strncpy(of_ids[1].compatible, "palmbus", len);
+
+       if (of_platform_populate(NULL, of_ids, NULL, NULL))
+               panic("failed to populate DT\n");
+
+       return 0;
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
new file mode 100644 (file)
index 0000000..9c64f02
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
+ *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/string.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+struct ralink_soc_info soc_info;
+
+const char *get_system_type(void)
+{
+       return soc_info.sys_type;
+}
+
+static __init void prom_init_cmdline(int argc, char **argv)
+{
+       int i;
+
+       pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+              (unsigned int)fw_arg0, (unsigned int)fw_arg1,
+              (unsigned int)fw_arg2, (unsigned int)fw_arg3);
+
+       argc = fw_arg0;
+       argv = (char **) KSEG1ADDR(fw_arg1);
+
+       if (!argv) {
+               pr_debug("argv=%p is invalid, skipping\n",
+                      argv);
+               return;
+       }
+
+       for (i = 0; i < argc; i++) {
+               char *p = (char *) KSEG1ADDR(argv[i]);
+
+               if (CPHYSADDR(p) && *p) {
+                       pr_debug("argv[%d]: %s\n", i, p);
+                       strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+                       strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+               }
+       }
+}
+
+void __init prom_init(void)
+{
+       int argc;
+       char **argv;
+
+       prom_soc_init(&soc_info);
+
+       pr_info("SoC Type: %s\n", get_system_type());
+
+       prom_init_cmdline(argc, argv);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
new file mode 100644 (file)
index 0000000..22120e5
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/pm.h>
+#include <linux/io.h>
+
+#include <asm/reboot.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+/* Reset Control */
+#define SYSC_REG_RESET_CTRL     0x034
+#define RSTCTL_RESET_SYSTEM     BIT(0)
+
+static void ralink_restart(char *command)
+{
+       local_irq_disable();
+       rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL);
+       unreachable();
+}
+
+static void ralink_halt(void)
+{
+       local_irq_disable();
+       unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+       _machine_restart = ralink_restart;
+       _machine_halt = ralink_halt;
+       pm_power_off = ralink_halt;
+
+       return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
new file mode 100644 (file)
index 0000000..0a4bbdc
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt305x.h>
+
+#include "common.h"
+
+enum rt305x_soc_type rt305x_soc;
+
+struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT305X_GPIO_MODE_I2C,
+               .gpio_first = RT305X_GPIO_I2C_SD,
+               .gpio_last = RT305X_GPIO_I2C_SCLK,
+       }, {
+               .name = "spi",
+               .mask = RT305X_GPIO_MODE_SPI,
+               .gpio_first = RT305X_GPIO_SPI_EN,
+               .gpio_last = RT305X_GPIO_SPI_CLK,
+       }, {
+               .name = "uartlite",
+               .mask = RT305X_GPIO_MODE_UART1,
+               .gpio_first = RT305X_GPIO_UART1_TXD,
+               .gpio_last = RT305X_GPIO_UART1_RXD,
+       }, {
+               .name = "jtag",
+               .mask = RT305X_GPIO_MODE_JTAG,
+               .gpio_first = RT305X_GPIO_JTAG_TDO,
+               .gpio_last = RT305X_GPIO_JTAG_TDI,
+       }, {
+               .name = "mdio",
+               .mask = RT305X_GPIO_MODE_MDIO,
+               .gpio_first = RT305X_GPIO_MDIO_MDC,
+               .gpio_last = RT305X_GPIO_MDIO_MDIO,
+       }, {
+               .name = "sdram",
+               .mask = RT305X_GPIO_MODE_SDRAM,
+               .gpio_first = RT305X_GPIO_SDRAM_MD16,
+               .gpio_last = RT305X_GPIO_SDRAM_MD31,
+       }, {
+               .name = "rgmii",
+               .mask = RT305X_GPIO_MODE_RGMII,
+               .gpio_first = RT305X_GPIO_GE0_TXD0,
+               .gpio_last = RT305X_GPIO_GE0_RXCLK,
+       }, {0}
+};
+
+struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = RT305X_GPIO_MODE_UARTF,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "pcm uartf",
+               .mask = RT305X_GPIO_MODE_PCM_UARTF,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "pcm i2s",
+               .mask = RT305X_GPIO_MODE_PCM_I2S,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "i2s uartf",
+               .mask = RT305X_GPIO_MODE_I2S_UARTF,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "pcm gpio",
+               .mask = RT305X_GPIO_MODE_PCM_GPIO,
+               .gpio_first = RT305X_GPIO_10,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "gpio uartf",
+               .mask = RT305X_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "gpio i2s",
+               .mask = RT305X_GPIO_MODE_GPIO_I2S,
+               .gpio_first = RT305X_GPIO_7,
+               .gpio_last = RT305X_GPIO_14,
+       }, {
+               .name = "gpio",
+               .mask = RT305X_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+void rt305x_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on pin SRAM_CS_N */
+       t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+       t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
+               RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
+       rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
+}
+
+struct ralink_pinmux gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+       .wdt_reset = rt305x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+       u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+
+       if (soc_is_rt305x() || soc_is_rt3350()) {
+               t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) &
+                    RT305X_SYSCFG_CPUCLK_MASK;
+               switch (t) {
+               case RT305X_SYSCFG_CPUCLK_LOW:
+                       cpu_rate = 320000000;
+                       break;
+               case RT305X_SYSCFG_CPUCLK_HIGH:
+                       cpu_rate = 384000000;
+                       break;
+               }
+               sys_rate = uart_rate = wdt_rate = cpu_rate / 3;
+       } else if (soc_is_rt3352()) {
+               t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) &
+                    RT3352_SYSCFG0_CPUCLK_MASK;
+               switch (t) {
+               case RT3352_SYSCFG0_CPUCLK_LOW:
+                       cpu_rate = 384000000;
+                       break;
+               case RT3352_SYSCFG0_CPUCLK_HIGH:
+                       cpu_rate = 400000000;
+                       break;
+               }
+               sys_rate = wdt_rate = cpu_rate / 3;
+               uart_rate = 40000000;
+       } else if (soc_is_rt5350()) {
+               t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) &
+                    RT5350_SYSCFG0_CPUCLK_MASK;
+               switch (t) {
+               case RT5350_SYSCFG0_CPUCLK_360:
+                       cpu_rate = 360000000;
+                       sys_rate = cpu_rate / 3;
+                       break;
+               case RT5350_SYSCFG0_CPUCLK_320:
+                       cpu_rate = 320000000;
+                       sys_rate = cpu_rate / 4;
+                       break;
+               case RT5350_SYSCFG0_CPUCLK_300:
+                       cpu_rate = 300000000;
+                       sys_rate = cpu_rate / 3;
+                       break;
+               default:
+                       BUG();
+               }
+               uart_rate = 40000000;
+               wdt_rate = sys_rate;
+       } else {
+               BUG();
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000100.timer", wdt_rate);
+       ralink_clk_add("10000500.uart", uart_rate);
+       ralink_clk_add("10000c00.uartlite", uart_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt3050-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt3050-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+       unsigned char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+       if (n0 == RT3052_CHIP_NAME0 && n1 == RT3052_CHIP_NAME1) {
+               unsigned long icache_sets;
+
+               icache_sets = (read_c0_config1() >> 22) & 7;
+               if (icache_sets == 1) {
+                       rt305x_soc = RT305X_SOC_RT3050;
+                       name = "RT3050";
+                       soc_info->compatible = "ralink,rt3050-soc";
+               } else {
+                       rt305x_soc = RT305X_SOC_RT3052;
+                       name = "RT3052";
+                       soc_info->compatible = "ralink,rt3052-soc";
+               }
+       } else if (n0 == RT3350_CHIP_NAME0 && n1 == RT3350_CHIP_NAME1) {
+               rt305x_soc = RT305X_SOC_RT3350;
+               name = "RT3350";
+               soc_info->compatible = "ralink,rt3350-soc";
+       } else if (n0 == RT3352_CHIP_NAME0 && n1 == RT3352_CHIP_NAME1) {
+               rt305x_soc = RT305X_SOC_RT3352;
+               name = "RT3352";
+               soc_info->compatible = "ralink,rt3352-soc";
+       } else if (n0 == RT5350_CHIP_NAME0 && n1 == RT5350_CHIP_NAME1) {
+               rt305x_soc = RT305X_SOC_RT5350;
+               name = "RT5350";
+               soc_info->compatible = "ralink,rt5350-soc";
+       } else {
+               panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+       }
+
+       id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s id:%u rev:%u",
+               name,
+               (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+               (id & CHIP_ID_REV_MASK));
+}
index 716e9a1..3af00b2 100644 (file)
@@ -215,9 +215,9 @@ static struct resource rb532_wdt_res[] = {
 };
 
 static struct platform_device rb532_wdt = {
-       .name           = "rc32434_wdt",
-       .id             = -1,
-       .resource       = rb532_wdt_res,
+       .name           = "rc32434_wdt",
+       .id             = -1,
+       .resource       = rb532_wdt_res,
        .num_resources  = ARRAY_SIZE(rb532_wdt_res),
 };
 
@@ -235,8 +235,8 @@ static struct plat_serial8250_port rb532_uart_res[] = {
 };
 
 static struct platform_device rb532_uart = {
-       .name              = "serial8250",
-       .id                = PLAT8250_DEV_PLATFORM,
+       .name              = "serial8250",
+       .id                = PLAT8250_DEV_PLATFORM,
        .dev.platform_data = &rb532_uart_res,
 };
 
@@ -273,7 +273,7 @@ static void __init parse_mac_addr(char *macstr)
 
 
 /* NAND definitions */
-#define NAND_CHIP_DELAY        25
+#define NAND_CHIP_DELAY 25
 
 static void __init rb532_nand_setup(void)
 {
index 6ec41df..a180076 100644 (file)
@@ -44,10 +44,10 @@ struct rb532_gpio_chip {
 
 static struct resource rb532_gpio_reg0_res[] = {
        {
-               .name   = "gpio_reg0",
-               .start  = REGBASE + GPIOBASE,
-               .end    = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
-               .flags  = IORESOURCE_MEM,
+               .name   = "gpio_reg0",
+               .start  = REGBASE + GPIOBASE,
+               .end    = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
+               .flags  = IORESOURCE_MEM,
        }
 };
 
index f298430..3a431e8 100644 (file)
@@ -21,7 +21,7 @@
  *
  * Copyright 2002 MontaVista Software Inc.
  * Author: MontaVista Software, Inc.
- *              stevel@mvista.com or source@mvista.com
+ *             stevel@mvista.com or source@mvista.com
  */
 
 #include <linux/bitops.h>
@@ -51,7 +51,7 @@ struct intr_group {
        volatile u32 *base_addr;
 };
 
-#define RC32434_NR_IRQS  (GROUP4_IRQ_BASE + 32)
+#define RC32434_NR_IRQS         (GROUP4_IRQ_BASE + 32)
 
 #if (NR_IRQS < RC32434_NR_IRQS)
 #error Too little irqs defined. Did you override <asm/irq.h> ?
index 4a6057b..a0a7922 100644 (file)
@@ -2,7 +2,7 @@
  * Basic EISA bus support for the SGI Indigo-2.
  *
  * (C) 2002 Pascal Dameme <netinet@freesurf.fr>
- *      and Marc Zyngier <mzyngier@freesurf.fr>
+ *     and Marc Zyngier <mzyngier@freesurf.fr>
  *
  * This code is released under both the GPL version 2 and BSD
  * licenses.  Either license may be used.
 
 /* I2 has four EISA slots. */
 #define IP22_EISA_MAX_SLOTS      4
-#define EISA_MAX_IRQ             16
+#define EISA_MAX_IRQ            16
 
-#define EIU_MODE_REG     0x0001ffc0
-#define EIU_STAT_REG     0x0001ffc4
-#define EIU_PREMPT_REG   0x0001ffc8
-#define EIU_QUIET_REG    0x0001ffcc
-#define EIU_INTRPT_ACK   0x00010004
+#define EIU_MODE_REG    0x0001ffc0
+#define EIU_STAT_REG    0x0001ffc4
+#define EIU_PREMPT_REG  0x0001ffc8
+#define EIU_QUIET_REG   0x0001ffcc
+#define EIU_INTRPT_ACK  0x00010004
 
 static char __init *decode_eisa_sig(unsigned long addr)
 {
index f5ebc09..ab0e379 100644 (file)
@@ -15,7 +15,7 @@ static struct bus_type gio_bus_type;
 
 static struct {
        const char *name;
-       __u8       id;
+       __u8       id;
 } gio_name_table[] = {
        { .name = "SGI Impact", .id = 0x10 },
        { .name = "Phobos G160", .id = 0x35 },
@@ -376,15 +376,15 @@ static void ip22_check_gio(int slotno, unsigned long addr)
 }
 
 static struct bus_type gio_bus_type = {
-       .name      = "gio",
+       .name      = "gio",
        .dev_attrs = gio_dev_attrs,
-       .match     = gio_bus_match,
-       .probe     = gio_device_probe,
-       .remove    = gio_device_remove,
+       .match     = gio_bus_match,
+       .probe     = gio_device_probe,
+       .remove    = gio_device_remove,
        .suspend   = gio_device_suspend,
-       .resume    = gio_device_resume,
+       .resume    = gio_device_resume,
        .shutdown  = gio_device_shutdown,
-       .uevent    = gio_device_uevent,
+       .uevent    = gio_device_uevent,
 };
 
 static struct resource gio_bus_resource = {
index 3f2b763..3db64d5 100644 (file)
@@ -1,12 +1,12 @@
 /*
  * ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
- *             found on INDY and Indigo2 workstations.
+ *            found on INDY and Indigo2 workstations.
  *
  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
- *                    - Indigo2 changes
- *                    - Interrupt handling fixes
+ *                   - Indigo2 changes
+ *                   - Interrupt handling fixes
  * Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
  */
 #include <linux/types.h>
@@ -195,24 +195,24 @@ extern void indy_8254timer_irq(void);
  * at all) like:
  *
  *     MIPS IRQ        Source
- *      --------        ------
- *             0       Software (ignored)
- *             1        Software (ignored)
- *             2        Local IRQ level zero
- *             3        Local IRQ level one
- *             4        8254 Timer zero
- *             5        8254 Timer one
- *             6        Bus Error
- *             7        R4k timer (what we use)
+ *     --------        ------
+ *                   Software (ignored)
+ *            1        Software (ignored)
+ *            2        Local IRQ level zero
+ *            3        Local IRQ level one
+ *            4        8254 Timer zero
+ *            5        8254 Timer one
+ *            6        Bus Error
+ *            7        R4k timer (what we use)
  *
  * We handle the IRQ according to _our_ priority which is:
  *
- * Highest ----     R4k Timer
- *                  Local IRQ zero
- *                  Local IRQ one
- *                  Bus Error
- *                  8254 Timer zero
- * Lowest  ----     8254 Timer one
+ * Highest ----            R4k Timer
+ *                 Local IRQ zero
+ *                 Local IRQ one
+ *                 Bus Error
+ *                 8254 Timer zero
+ * Lowest  ----            8254 Timer one
  *
  * then we just return, if multiple IRQs are pending then we will just take
  * another exception, big deal.
index 75ada8a..7cec0a4 100644 (file)
@@ -121,22 +121,22 @@ void __init sgimc_init(void)
         */
 
        /* Step 0: Make sure we turn off the watchdog in case it's
-        *         still running (which might be the case after a
-        *         soft reboot).
+        *         still running (which might be the case after a
+        *         soft reboot).
         */
        tmp = sgimc->cpuctrl0;
        tmp &= ~SGIMC_CCTRL0_WDOG;
        sgimc->cpuctrl0 = tmp;
 
        /* Step 1: The CPU/GIO error status registers will not latch
-        *         up a new error status until the register has been
-        *         cleared by the cpu.  These status registers are
-        *         cleared by writing any value to them.
+        *         up a new error status until the register has been
+        *         cleared by the cpu.  These status registers are
+        *         cleared by writing any value to them.
         */
        sgimc->cstat = sgimc->gstat = 0;
 
        /* Step 2: Enable all parity checking in cpu control register
-        *         zero.
+        *         zero.
         */
        /* don't touch parity settings for IP28 */
        tmp = sgimc->cpuctrl0;
@@ -147,7 +147,7 @@ void __init sgimc_init(void)
        sgimc->cpuctrl0 = tmp;
 
        /* Step 3: Setup the MC write buffer depth, this is controlled
-        *         in cpu control register 1 in the lower 4 bits.
+        *         in cpu control register 1 in the lower 4 bits.
         */
        tmp = sgimc->cpuctrl1;
        tmp &= ~0xf;
@@ -155,26 +155,26 @@ void __init sgimc_init(void)
        sgimc->cpuctrl1 = tmp;
 
        /* Step 4: Initialize the RPSS divider register to run as fast
-        *         as it can correctly operate.  The register is laid
-        *         out as follows:
+        *         as it can correctly operate.  The register is laid
+        *         out as follows:
         *
-        *         ----------------------------------------
-        *         |  RESERVED  |   INCREMENT   | DIVIDER |
-        *         ----------------------------------------
-        *          31        16 15            8 7       0
+        *         ----------------------------------------
+        *         |  RESERVED  |   INCREMENT   | DIVIDER |
+        *         ----------------------------------------
+        *          31        16 15            8 7       0
         *
-        *         DIVIDER determines how often a 'tick' happens,
-        *         INCREMENT determines by how the RPSS increment
-        *         registers value increases at each 'tick'. Thus,
-        *         for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
+        *         DIVIDER determines how often a 'tick' happens,
+        *         INCREMENT determines by how the RPSS increment
+        *         registers value increases at each 'tick'. Thus,
+        *         for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
         */
        sgimc->divider = 0x101;
 
        /* Step 5: Initialize GIO64 arbitrator configuration register.
         *
         * NOTE: HPC init code in sgihpc_init() must run before us because
-        *       we need to know Guiness vs. FullHouse and the board
-        *       revision on this machine. You have been warned.
+        *       we need to know Guiness vs. FullHouse and the board
+        *       revision on this machine. You have been warned.
         */
 
        /* First the basic invariants across all GIO64 implementations. */
@@ -187,18 +187,18 @@ void __init sgimc_init(void)
                if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
                        tmp |= SGIMC_GIOPAR_HPC264;     /* 2nd HPC at 64bits */
                        tmp |= SGIMC_GIOPAR_PLINEEXP0;  /* exp0 pipelines */
-                       tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
+                       tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
                        tmp |= SGIMC_GIOPAR_RTIMEEXP0;  /* exp0 is realtime */
                } else {
                        tmp |= SGIMC_GIOPAR_HPC264;     /* 2nd HPC 64bits */
                        tmp |= SGIMC_GIOPAR_PLINEEXP0;  /* exp[01] pipelined */
                        tmp |= SGIMC_GIOPAR_PLINEEXP1;
-                       tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
+                       tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
                }
        } else {
                /* Guiness specific settings. */
                tmp |= SGIMC_GIOPAR_EISA64;     /* MC talks to EISA at 64bits */
-               tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
+               tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
        }
        sgimc->giopar = tmp;    /* poof */
 
index 0177566..e077036 100644 (file)
 #define EEPROM_WRITE   0xa000  /* serial memory write */
 #define EEPROM_WRALL   0x8800  /* write all registers */
 #define EEPROM_WDS     0x8000  /* disable all programming */
-#define        EEPROM_PRREAD   0xc000  /* read protect register */
-#define        EEPROM_PREN     0x9800  /* enable protect register mode */
-#define        EEPROM_PRCLEAR  0xffff  /* clear protect register */
-#define        EEPROM_PRWRITE  0xa000  /* write protect register */
-#define        EEPROM_PRDS     0x8000  /* disable protect register, forever */
+#define EEPROM_PRREAD  0xc000  /* read protect register */
+#define EEPROM_PREN    0x9800  /* enable protect register mode */
+#define EEPROM_PRCLEAR 0xffff  /* clear protect register */
+#define EEPROM_PRWRITE 0xa000  /* write protect register */
+#define EEPROM_PRDS    0x8000  /* disable protect register, forever */
 
 #define EEPROM_EPROT   0x01    /* Protect register enable */
 #define EEPROM_CSEL    0x02    /* Chip select */
@@ -27,7 +27,7 @@
 #define EEPROM_DATI    0x10    /* Data in */
 
 /* We need to use these functions early... */
-#define delay()        ({                                              \
+#define delay() ({                                             \
        int x;                                                  \
        for (x=0; x<100000; x++) __asm__ __volatile__(""); })
 
@@ -35,7 +35,7 @@
        __raw_writel(__raw_readl(ptr) & ~EEPROM_DATO, ptr);     \
        __raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr);     \
        __raw_writel(__raw_readl(ptr) & ~EEPROM_EPROT, ptr);    \
-       delay();                                                \
+       delay();                                                \
        __raw_writel(__raw_readl(ptr) | EEPROM_CSEL, ptr);      \
        __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
 
@@ -46,7 +46,7 @@
        __raw_writel(__raw_readl(ptr) | EEPROM_EPROT, ptr);     \
        __raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
 
-#define        BITS_IN_COMMAND 11
+#define BITS_IN_COMMAND 11
 /*
  * clock in the nvram command and the register number. For the
  * national semiconductor nv ram chip the op code is 3 bits and
index 698904d..a14fd32 100644 (file)
@@ -137,7 +137,7 @@ static int __init sgiseeq_devinit(void)
 
        eth0_pd.hpc = hpc3c0;
        eth0_pd.irq = SGI_ENET_IRQ;
-#define EADDR_NVOFS     250
+#define EADDR_NVOFS    250
        for (i = 0; i < 3; i++) {
                unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i);
 
@@ -155,17 +155,17 @@ static int __init sgiseeq_devinit(void)
                return 0;
 
        sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
-                        SGIMC_GIOPAR_HPC264;
+                        SGIMC_GIOPAR_HPC264;
        hpc3c1->pbus_piocfg[0][0] = 0x3ffff;
        /* interrupt/config register on Challenge S Mezz board */
        hpc3c1->pbus_extregs[0][0] = 0x30;
 
        eth1_pd.hpc = hpc3c1;
        eth1_pd.irq = SGI_GIO_0_IRQ;
-#define EADDR_NVOFS     250
+#define EADDR_NVOFS    250
        for (i = 0; i < 3; i++) {
                unsigned short tmp = ip22_eeprom_read(&hpc3c1->eeprom,
-                                                     EADDR_NVOFS / 2 + i);
+                                                     EADDR_NVOFS / 2 + i);
 
                eth1_pd.mac[2 * i]     = tmp >> 8;
                eth1_pd.mac[2 * i + 1] = tmp & 0xff;
index 20363d2..063c2dd 100644 (file)
@@ -101,7 +101,7 @@ static void debounce(unsigned long data)
        del_timer(&debounce_timer);
        if (sgint->istat1 & SGINT_ISTAT1_PWR) {
                /* Interrupt still being sent. */
-               debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s  */
+               debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s  */
                add_timer(&debounce_timer);
 
                sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR |
@@ -166,7 +166,7 @@ static irqreturn_t panel_int(int irq, void *dev_id)
 }
 
 static int panic_event(struct notifier_block *this, unsigned long event,
-                      void *ptr)
+                     void *ptr)
 {
        if (machine_state & MACHINE_PANICED)
                return NOTIFY_DONE;
index 0626555..3f47346 100644 (file)
@@ -136,14 +136,14 @@ static void save_and_clear_buserr(void)
        hpc3.scsi[1].cbp   = hpc3c0->scsi_chan1.cbptr;
        hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
 
-       hpc3.ethrx.addr  = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
-       hpc3.ethrx.ctrl  = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
-       hpc3.ethrx.cbp   = hpc3c0->ethregs.rx_cbptr;
+       hpc3.ethrx.addr  = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
+       hpc3.ethrx.ctrl  = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
+       hpc3.ethrx.cbp   = hpc3c0->ethregs.rx_cbptr;
        hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
 
-       hpc3.ethtx.addr  = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
-       hpc3.ethtx.ctrl  = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
-       hpc3.ethtx.cbp   = hpc3c0->ethregs.tx_cbptr;
+       hpc3.ethtx.addr  = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
+       hpc3.ethtx.ctrl  = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
+       hpc3.ethtx.cbp   = hpc3c0->ethregs.tx_cbptr;
        hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
 
        for (i = 0; i < 8; ++i) {
@@ -196,11 +196,11 @@ static void print_cache_tags(void)
                        scb | (1 << 12)*i);
        }
        i = read_c0_config();
-       scb = i & (1 << 13) ? 7:6;      /* scblksize = 2^[7..6] */
+       scb = i & (1 << 13) ? 7:6;      /* scblksize = 2^[7..6] */
        scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
 
        i = ((1 << scw) - 1) & ~((1 << scb) - 1);
-       printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x  (PA[%u:%u] %05x)\n",
+       printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x  (PA[%u:%u] %05x)\n",
                cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
                cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
                scw-1, scb, i & (unsigned)cache_tags.err_addr);
index 04cebad..692778d 100644 (file)
@@ -39,7 +39,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
 
        printk("Hub has valid error information:\n");
        if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
-               printk("Overrun is set.  Error stack may contain additional "
+               printk("Overrun is set.  Error stack may contain additional "
                       "information.\n");
        printk("Hub error address is %08lx\n",
               (errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
@@ -85,7 +85,7 @@ void __init ip27_be_init(void)
        board_be_handler = ip27_be_handler;
 
        LOCAL_HUB_S(PI_ERR_INT_PEND,
-                   cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+                   cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
        LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
        LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
        LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0);      /* Disable error stack */
index 984e561..b952d5b 100644 (file)
@@ -31,7 +31,7 @@ static inline struct ioc3_uartregs *console_uart(void)
        return &ioc3->sregs.uarta;
 }
 
-void __init prom_putchar(char c)
+void prom_putchar(char c)
 {
        struct ioc3_uartregs *uart = console_uart();
 
index cd0d5b0..328ceb3 100644 (file)
 static int force_fire_and_forget = 1;
 
 /**
- * hub_pio_map  -  establish a HUB PIO mapping
+ * hub_pio_map -  establish a HUB PIO mapping
  *
  * @hub:       hub to perform PIO mapping on
  * @widget:    widget ID to perform PIO mapping for
- * @xtalk_addr:        xtalk_address that needs to be mapped
+ * @xtalk_addr: xtalk_address that needs to be mapped
  * @size:      size of the PIO mapping
  *
  **/
@@ -78,8 +78,8 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
 /*
  * hub_setup_prb(nasid, prbnum, credits, conveyor)
  *
- *     Put a PRB into fire-and-forget mode if conveyor isn't set.  Otherwise,
- *     put it into conveyor belt mode with the specified number of credits.
+ *     Put a PRB into fire-and-forget mode if conveyor isn't set.  Otherwise,
+ *     put it into conveyor belt mode with the specified number of credits.
  */
 static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
 {
@@ -125,12 +125,12 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
  * so we turn off access to all widgets for the duration of the function.
  *
  * XXX - This code should really check what kind of widget we're talking
- * to.  Bridges can only handle three requests, but XG will do more.
+ * to. Bridges can only handle three requests, but XG will do more.
  * How many can crossbow handle to widget 0?  We're assuming 1.
  *
  * XXX - There is a bug in the crossbow that link reset PIOs do not
  * return write responses.  The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times.  This
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
  * only affects pio's to xbow registers, which should be rare.
  **/
 static void hub_set_piomode(nasid_t nasid)
@@ -167,7 +167,7 @@ static void hub_set_piomode(nasid_t nasid)
 }
 
 /*
- * hub_pio_init  -  PIO-related hub initialization
+ * hub_pio_init         -  PIO-related hub initialization
  *
  * @hub:       hubinfo structure for our hub
  */
index 923c080..d41b1c6 100644 (file)
@@ -151,7 +151,7 @@ nasid_t
 get_nasid(void)
 {
        return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
-                        >> NSRI_NODEID_SHFT);
+                        >> NSRI_NODEID_SHFT);
 }
 
 /*
index 69a939a..2315cfe 100644 (file)
@@ -62,7 +62,7 @@ extern int irq_to_slot[];
  * from the irq value
  */
 #define IRQ_TO_BRIDGE(i)               irq_to_bridge[(i)]
-#define        SLOT_FROM_PCI_IRQ(i)            irq_to_slot[i]
+#define SLOT_FROM_PCI_IRQ(i)           irq_to_slot[i]
 
 static inline int alloc_level(int cpu, int irq)
 {
@@ -281,11 +281,11 @@ static unsigned int startup_bridge_irq(struct irq_data *d)
        device |= (pin << (pin*3));
        bridge->b_int_device = device;
 
-        bridge->b_wid_tflush;
+       bridge->b_wid_tflush;
 
        intr_connect_level(cpu, swlevel);
 
-        return 0;       /* Never anything pending.  */
+       return 0;       /* Never anything pending.  */
 }
 
 /* Shutdown one of the (PCI ...) IRQs routes over a bridge.  */
index cd8fcab..3505d08 100644 (file)
@@ -31,8 +31,8 @@
 #include <asm/sn/sn_private.h>
 
 
-#define SLOT_PFNSHIFT           (SLOT_SHIFT - PAGE_SHIFT)
-#define PFN_NASIDSHFT           (NASID_SHFT - PAGE_SHIFT)
+#define SLOT_PFNSHIFT          (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT          (NASID_SHFT - PAGE_SHIFT)
 
 struct node_data *__node_data[MAX_COMPACT_NODES];
 
@@ -43,7 +43,7 @@ static int fine_mode;
 static int is_fine_dirmode(void)
 {
        return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
-               >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
+               >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
 }
 
 static hubreg_t get_region(cnodeid_t cnode)
@@ -66,7 +66,7 @@ static void gen_region_mask(hubreg_t *region_mask)
        }
 }
 
-#define        rou_rflag       rou_flags
+#define rou_rflag      rou_flags
 
 static int router_distance;
 
@@ -412,7 +412,7 @@ static void __init node_mem_init(cnodeid_t node)
        slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
                               sizeof(struct hub_data));
 
-       bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
+       bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
                                        start_pfn, end_pfn);
        free_bootmem_with_active_regions(node, end_pfn);
        reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
@@ -422,7 +422,7 @@ static void __init node_mem_init(cnodeid_t node)
 }
 
 /*
- * A node with nothing.  We use it to avoid any special casing in
+ * A node with nothing.         We use it to avoid any special casing in
  * cpumask_of_node
  */
 static struct node_data null_node = {
index 005c29e..a2358b4 100644 (file)
@@ -54,7 +54,7 @@ void install_cpu_nmi_handler(int slice)
 void nmi_cpu_eframe_save(nasid_t nasid, int slice)
 {
        struct reg_struct *nr;
-       int             i;
+       int             i;
 
        /* Get the pointer to the current cpu's register set. */
        nr = (struct reg_struct *)
@@ -86,12 +86,12 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
        printk("%s\n", print_tainted());
        printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
        printk("ra    : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
-       printk("Status: %08lx         ", nr->sr);
+       printk("Status: %08lx         ", nr->sr);
 
        if (nr->sr & ST0_KX)
                printk("KX ");
        if (nr->sr & ST0_SX)
-               printk("SX      ");
+               printk("SX      ");
        if (nr->sr & ST0_UX)
                printk("UX ");
 
index f347bc6..ac37e54 100644 (file)
@@ -29,7 +29,7 @@ void machine_restart(char *command) __attribute__((noreturn));
 void machine_halt(void) __attribute__((noreturn));
 void machine_power_off(void) __attribute__((noreturn));
 
-#define noreturn while(1);                             /* Silence gcc.  */
+#define noreturn while(1);                             /* Silence gcc.  */
 
 /* XXX How to pass the reboot command to the firmware??? */
 static void ip27_machine_restart(char *command)
index 735b43b..f946381 100644 (file)
@@ -191,7 +191,7 @@ static void __init ip27_cpus_done(void)
 }
 
 /*
- * Launch a slave into smp_bootstrap().  It doesn't take an argument, and we
+ * Launch a slave into smp_bootstrap().         It doesn't take an argument, and we
  * set sp to the kernel stack of the newly created idle process, gp to the proc
  * struct so that current_thread_info() will work.
  */
@@ -219,7 +219,7 @@ static void __init ip27_smp_setup(void)
 
        /*
         * Assumption to be fixed: we're always booted on logical / physical
-        * processor 0.  While we're always running on logical processor 0
+        * processor 0.  While we're always running on logical processor 0
         * this still means this is physical processor zero; it might for
         * example be disabled in the firmware.
         */
index 13cfeab..fff58ac 100644 (file)
@@ -117,8 +117,8 @@ void __cpuinit hub_rt_clock_event_init(void)
        cd->name                = name;
        cd->features            = CLOCK_EVT_FEAT_ONESHOT;
        clockevent_set_clock(cd, CYCLES_PER_SEC);
-       cd->max_delta_ns        = clockevent_delta2ns(0xfffffffffffff, cd);
-       cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
+       cd->max_delta_ns        = clockevent_delta2ns(0xfffffffffffff, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
        cd->rating              = 200;
        cd->irq                 = irq;
        cd->cpumask             = cpumask_of(cpu);
@@ -153,7 +153,7 @@ static cycle_t hub_rt_read(struct clocksource *cs)
 
 struct clocksource hub_rt_clocksource = {
        .name   = "HUB-RT",
-       .rating = 200,
+       .rating = 200,
        .read   = hub_rt_read,
        .mask   = CLOCKSOURCE_MASK(52),
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
index 5e871e7..a4df7d0 100644 (file)
 #include <asm/xtalk/xtalk.h>
 
 
-#define XBOW_WIDGET_PART_NUM    0x0
-#define XXBOW_WIDGET_PART_NUM   0xd000  /* Xbow in Xbridge */
-#define BASE_XBOW_PORT         8     /* Lowest external port */
+#define XBOW_WIDGET_PART_NUM   0x0
+#define XXBOW_WIDGET_PART_NUM  0xd000  /* Xbow in Xbridge */
+#define BASE_XBOW_PORT         8     /* Lowest external port */
 
 extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
 
 static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
 {
-       widgetreg_t             widget_id;
+       widgetreg_t             widget_id;
        xwidget_part_num_t      partnum;
 
        widget_id = *(volatile widgetreg_t *)
@@ -102,10 +102,10 @@ static int __cpuinit xbow_probe(nasid_t nasid)
 
 void __cpuinit xtalk_probe_node(cnodeid_t nid)
 {
-       volatile u64            hubreg;
-       nasid_t                 nasid;
+       volatile u64            hubreg;
+       nasid_t                 nasid;
        xwidget_part_num_t      partnum;
-       widgetreg_t             widget_id;
+       widgetreg_t             widget_id;
 
        nasid = COMPACT_TO_NASID_NODEID(nid);
        hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
@@ -115,7 +115,7 @@ void __cpuinit xtalk_probe_node(cnodeid_t nid)
                return;
 
        widget_id = *(volatile widgetreg_t *)
-                       (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+                      (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
        partnum = XWIDGET_PART_NUM(widget_id);
 
        printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
index e7d5054..e0c7d9e 100644 (file)
@@ -173,7 +173,7 @@ static struct irq_chip crime_edge_interrupt = {
 
 /*
  * This is for MACE PCI interrupts.  We can decrease bus traffic by masking
- * as close to the source as possible.  This also means we can take the
+ * as close to the source as possible. This also means we can take the
  * next chunk of the CRIME register in one piece.
  */
 
@@ -271,11 +271,11 @@ static void disable_maceisa_irq(struct irq_data *d)
        unsigned int crime_int = 0;
 
        maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
-        if (!(maceisa_mask & MACEISA_AUDIO_INT))
+       if (!(maceisa_mask & MACEISA_AUDIO_INT))
                crime_int |= MACE_AUDIO_INT;
-        if (!(maceisa_mask & MACEISA_MISC_INT))
+       if (!(maceisa_mask & MACEISA_MISC_INT))
                crime_int |= MACE_MISC_INT;
-        if (!(maceisa_mask & MACEISA_SUPERIO_INT))
+       if (!(maceisa_mask & MACEISA_SUPERIO_INT))
                crime_int |= MACE_SUPERIO_INT;
        crime_mask &= ~crime_int;
        crime->imask = crime_mask;
index 911dfe3..d03a075 100644 (file)
@@ -9,7 +9,7 @@ platform-$(CONFIG_SIBYTE_BCM1x80)       += sibyte/
 #
 # Sibyte SB1250 / BCM1480 family of SOCs
 #
-cflags-$(CONFIG_SIBYTE_BCM112X)        +=                                      \
+cflags-$(CONFIG_SIBYTE_BCM112X) +=                                     \
                -I$(srctree)/arch/mips/include/asm/mach-sibyte          \
                -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
 
@@ -18,11 +18,11 @@ cflags-$(CONFIG_SIBYTE_SB1250)      +=                                      \
                -I$(srctree)/arch/mips/include/asm/mach-sibyte          \
                -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
 
-cflags-$(CONFIG_SIBYTE_BCM1x55)        +=                                      \
+cflags-$(CONFIG_SIBYTE_BCM1x55) +=                                     \
                -I$(srctree)/arch/mips/include/asm/mach-sibyte          \
                -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
 
-cflags-$(CONFIG_SIBYTE_BCM1x80)        +=                                      \
+cflags-$(CONFIG_SIBYTE_BCM1x80) +=                                     \
                -I$(srctree)/arch/mips/include/asm/mach-sibyte          \
                -DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
 
index 215713e..09d6e16 100644 (file)
@@ -283,10 +283,10 @@ void __init arch_init_irq(void)
        for (cpu = 0; cpu < 4; cpu++) {
                __raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
                                                 (K_BCM1480_INT_MBOX_0_0 << 3)));
-        }
+       }
 
 
-       /* Clear the mailboxes.  The firmware may leave them dirty */
+       /* Clear the mailboxes.  The firmware may leave them dirty */
        for (cpu = 0; cpu < 4; cpu++) {
                __raw_writeq(0xffffffffffffffffULL,
                             IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
@@ -307,7 +307,7 @@ void __init arch_init_irq(void)
 
        /*
         * Note that the timer interrupts are also mapped, but this is
-        * done in bcm1480_time_init().  Also, the profiling driver
+        * done in bcm1480_time_init().  Also, the profiling driver
         * does its own management of IP7.
         */
 
@@ -325,7 +325,7 @@ static inline void dispatch_ip2(void)
 
        /*
         * Default...we've hit an IP[2] interrupt, which means we've got to
-        * check the 1480 interrupt registers to figure out what to do.  Need
+        * check the 1480 interrupt registers to figure out what to do.  Need
         * to detect which CPU we're on, now that smp_affinity is supported.
         */
        base = A_BCM1480_IMR_MAPPER(cpu);
index 6343011..588e180 100644 (file)
@@ -127,8 +127,8 @@ static __init void prom_meminit(void)
                                if ((initrd_pstart > addr) &&
                                    (initrd_pstart < (addr + size))) {
                                        add_memory_region(addr,
-                                                         initrd_pstart - addr,
-                                                         BOOT_MEM_RAM);
+                                                         initrd_pstart - addr,
+                                                         BOOT_MEM_RAM);
                                        rd_flag = 1;
                                }
                                if ((initrd_pend > addr) &&
@@ -195,7 +195,7 @@ static int __init initrd_setup(char *str)
 
        /*
         *Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
-        *  e.g. initrd=3abfd@80010000.  This is set up by the loader.
+        *  e.g. initrd=3abfd@80010000.  This is set up by the loader.
         */
        for (tmp = str; *tmp != '@'; tmp++) {
                if (!*tmp) {
@@ -244,7 +244,7 @@ void __init prom_init(void)
        int *prom_vec = (int *) fw_arg3;
 
        _machine_restart   = cfe_linux_restart;
-       _machine_halt      = cfe_linux_halt;
+       _machine_halt      = cfe_linux_halt;
        pm_power_off = cfe_linux_halt;
 
        /*
@@ -299,7 +299,7 @@ void __init prom_init(void)
 #ifdef CONFIG_BLK_DEV_INITRD
        {
                char *ptr;
-               /* Need to find out early whether we've got an initrd.  So scan
+               /* Need to find out early whether we've got an initrd.  So scan
                   the list looking now */
                for (ptr = arcs_cmdline; *ptr; ptr++) {
                        while (*ptr == ' ') {
index e8c4538..2188b39 100644 (file)
@@ -152,7 +152,7 @@ static u64 tb_period;
 
 static void arm_tb(void)
 {
-        u64 scdperfcnt;
+       u64 scdperfcnt;
        u64 next = (1ULL << 40) - tb_period;
        u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
 
@@ -257,8 +257,8 @@ static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
 
 /*
  * Requires: Already called zclk_timer_init with a value that won't
- *           saturate 40 bits.  No subsequent use of SCD performance counters
- *           or trace buffer.
+ *          saturate 40 bits.  No subsequent use of SCD performance counters
+ *          or trace buffer.
  */
 
 static int sbprof_zbprof_start(struct file *filp)
@@ -288,8 +288,8 @@ static int sbprof_zbprof_start(struct file *filp)
 
        /*
         * We grab this interrupt to prevent others from trying to use
-         * it, even though we don't want to service the interrupts
-         * (they only feed into the trace-on-interrupt mechanism)
+        * it, even though we don't want to service the interrupts
+        * (they only feed into the trace-on-interrupt mechanism)
         */
        if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
                free_irq(K_INT_TRACE_FREEZE, &sbp);
@@ -298,7 +298,7 @@ static int sbprof_zbprof_start(struct file *filp)
 
        /*
         * I need the core to mask these, but the interrupt mapper to
-        *  pass them through.  I am exploiting my knowledge that
+        *  pass them through.  I am exploiting my knowledge that
         *  cp0_status masks out IP[5]. krw
         */
 #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
@@ -328,7 +328,7 @@ static int sbprof_zbprof_start(struct file *filp)
        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
 
        /* Initialize Trace Event 0-7 */
-       /*                              when interrupt  */
+       /*                              when interrupt  */
        __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
@@ -479,7 +479,7 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
                        return err;
                }
                pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
-                        cur_sample, cur_count);
+                        cur_sample, cur_count);
                size -= cur_count;
                sample_left -= cur_count;
                if (!sample_left) {
@@ -540,7 +540,7 @@ static const struct file_operations sbprof_tb_fops = {
        .open           = sbprof_tb_open,
        .release        = sbprof_tb_release,
        .read           = sbprof_tb_read,
-       .unlocked_ioctl = sbprof_tb_ioctl,
+       .unlocked_ioctl = sbprof_tb_ioctl,
        .compat_ioctl   = sbprof_tb_ioctl,
        .mmap           = NULL,
        .llseek         = default_llseek,
index 86e6e54..e651105 100644 (file)
@@ -71,7 +71,7 @@ static void print_summary(uint32_t status, uint32_t l2_err,
  * already been destructively read out of the registers.
  *
  * notes: this is currently used by the cache error handler
- *        should provide locking against the interrupt handler
+ *       should provide locking against the interrupt handler
  */
 void check_bus_watcher(void)
 {
@@ -119,7 +119,7 @@ static int bw_print_buffer(char *page, struct bw_stats_struct *stats)
                       (int)G_SCD_BERR_RID(stats->status),
                       (int)G_SCD_BERR_DCODE(stats->status));
        /* XXXKW indicate multiple errors between printings, or stats
-           collection (or both)? */
+          collection (or both)? */
        if (stats->status & M_SCD_BERR_MULTERRS)
                len += sprintf(page+len, "Multiple errors observed since last check.\n");
        if (stats->status_printed) {
@@ -168,7 +168,7 @@ static void create_proc_decoder(struct bw_stats_struct *stats)
  * sibyte_bw_int - handle bus watcher interrupts and accumulate counts
  *
  * notes: possible re-entry due to multiple sources
- *        should check/indicate saturation
+ *       should check/indicate saturation
  */
 static irqreturn_t sibyte_bw_int(int irq, void *data)
 {
index 340aaf6..fca0cdb 100644 (file)
@@ -264,7 +264,7 @@ void __init arch_init_irq(void)
                     IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
                            (K_INT_MBOX_0 << 3)));
 
-       /* Clear the mailboxes.  The firmware may leave them dirty */
+       /* Clear the mailboxes.  The firmware may leave them dirty */
        __raw_writeq(0xffffffffffffffffULL,
                     IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
        __raw_writeq(0xffffffffffffffffULL,
@@ -277,7 +277,7 @@ void __init arch_init_irq(void)
 
        /*
         * Note that the timer interrupts are also mapped, but this is
-        * done in sb1250_time_init().  Also, the profiling driver
+        * done in sb1250_time_init().  Also, the profiling driver
         * does its own management of IP7.
         */
 
@@ -294,7 +294,7 @@ static inline void dispatch_ip2(void)
 
        /*
         * Default...we've hit an IP[2] interrupt, which means we've got to
-        * check the 1250 interrupt registers to figure out what to do.  Need
+        * check the 1250 interrupt registers to figure out what to do.  Need
         * to detect which CPU we're on, now that smp_affinity is supported.
         */
        mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
@@ -323,7 +323,7 @@ asmlinkage void plat_irq_dispatch(void)
        if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
                do_IRQ(MIPS_CPU_IRQ_BASE + 7);
        else if (pending & CAUSEF_IP4)
-               do_IRQ(K_INT_TIMER_0 + cpu);    /* sb1250_timer_interrupt() */
+               do_IRQ(K_INT_TIMER_0 + cpu);    /* sb1250_timer_interrupt() */
 
 #ifdef CONFIG_SMP
        else if (pending & CAUSEF_IP3)
index 92da315..a14bd4c 100644 (file)
@@ -203,8 +203,8 @@ void __init sb1250_setup(void)
        case K_SYS_REVISION_BCM1250_PASS1:
 #ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
                printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
-                           "and the kernel doesn't have the proper "
-                           "workarounds compiled in. @@@@\n");
+                           "and the kernel doesn't have the proper "
+                           "workarounds compiled in. @@@@\n");
                bad_config = 1;
 #endif
                break;
@@ -213,28 +213,28 @@ void __init sb1250_setup(void)
 #if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
     !defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
                printk("@@@@ This is a BCM1250 A3-A10 board, and the "
-                           "kernel doesn't have the proper workarounds "
-                           "compiled in. @@@@\n");
+                           "kernel doesn't have the proper workarounds "
+                           "compiled in. @@@@\n");
                bad_config = 1;
 #endif
 #ifdef CONFIG_CPU_HAS_PREFETCH
                printk("@@@@ Prefetches may be enabled in this kernel, "
-                           "but are buggy on this board.  @@@@\n");
+                           "but are buggy on this board.  @@@@\n");
                bad_config = 1;
 #endif
                break;
        case K_SYS_REVISION_BCM1250_PASS2_2:
 #ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
                printk("@@@@ This is a BCM1250 B1/B2. board, and the "
-                           "kernel doesn't have the proper workarounds "
-                           "compiled in. @@@@\n");
+                           "kernel doesn't have the proper workarounds "
+                           "compiled in. @@@@\n");
                bad_config = 1;
 #endif
 #if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
     !defined(CONFIG_CPU_HAS_PREFETCH)
                printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
-                           "conservatively configured for an 'A' stepping. "
-                           "@@@@\n");
+                           "conservatively configured for an 'A' stepping. "
+                           "@@@@\n");
 #endif
                break;
        default:
index 0973352..9480c14 100644 (file)
@@ -13,7 +13,7 @@
 
 #define DRV_NAME       "pata-swarm"
 
-#define SWARM_IDE_SHIFT        5
+#define SWARM_IDE_SHIFT 5
 #define SWARM_IDE_BASE 0x1f0
 #define SWARM_IDE_CTRL 0x3f6
 
@@ -123,7 +123,7 @@ static int __init sb1250_device_init(void)
        case K_SYS_SOC_TYPE_BCM1120:
        case K_SYS_SOC_TYPE_BCM1125:
        case K_SYS_SOC_TYPE_BCM1125H:
-       case K_SYS_SOC_TYPE_BCM1250_ALT2:       /* Hybrid */
+       case K_SYS_SOC_TYPE_BCM1250_ALT2:       /* Hybrid */
                ret = platform_add_devices(sb1250_devs, 2);
                break;
        case K_SYS_SOC_TYPE_BCM1x55:
index 4438b21..178a824 100644 (file)
@@ -4,8 +4,8 @@
  * Copyright (C) 2002 MontaVista Software Inc.
  * Author: jsun@mvista.com or jsun@junsun.net
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
  * Register bits
  */
 
-#define X1241REG_SR_BAT        0x80            /* currently on battery power */
+#define X1241REG_SR_BAT 0x80           /* currently on battery power */
 #define X1241REG_SR_RWEL 0x04          /* r/w latch is enabled, can write RTC */
 #define X1241REG_SR_WEL 0x02           /* r/w latch is unlocked, can enable r/w now */
 #define X1241REG_SR_RTCF 0x01          /* clock failed */
 #define X1241REG_BL_BP2 0x80           /* block protect 2 */
 #define X1241REG_BL_BP1 0x40           /* block protect 1 */
 #define X1241REG_BL_BP0 0x20           /* block protect 0 */
-#define X1241REG_BL_WD1        0x10
-#define X1241REG_BL_WD0        0x08
+#define X1241REG_BL_WD1 0x10
+#define X1241REG_BL_WD0 0x08
 #define X1241REG_HR_MIL 0x80           /* military time format */
 
 /*
 
 static int xicor_read(uint8_t addr)
 {
-        while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
-                ;
+       while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+               ;
 
        __raw_writeq((addr >> 8) & 0x7, SMB_CSR(R_SMB_CMD));
        __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_DATA));
        __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR2BYTE,
                     SMB_CSR(R_SMB_START));
 
-        while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
-                ;
+       while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+               ;
 
        __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
                     SMB_CSR(R_SMB_START));
 
-        while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
-                ;
+       while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+               ;
 
-        if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
-                /* Clear error bit by writing a 1 */
-                __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
-                return -1;
-        }
+       if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+               /* Clear error bit by writing a 1 */
+               __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+               return -1;
+       }
 
        return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff);
 }
 
 static int xicor_write(uint8_t addr, int b)
 {
-        while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
-                ;
+       while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+               ;
 
        __raw_writeq(addr, SMB_CSR(R_SMB_CMD));
        __raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA));
        __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE,
                     SMB_CSR(R_SMB_START));
 
-        while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
-                ;
+       while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+               ;
 
-        if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
-                /* Clear error bit by writing a 1 */
-                __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
-                return -1;
-        } else {
+       if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+               /* Clear error bit by writing a 1 */
+               __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+               return -1;
+       } else {
                return 0;
        }
 }
index e05ad4d..dd0ab98 100644 (file)
@@ -41,17 +41,17 @@ static struct platform_device a20r_serial8250_device = {
 };
 
 static struct resource a20r_ds1216_rsrc[] = {
-        {
-                .start = 0x1c081ffc,
-                .end   = 0x1c081fff,
-                .flags = IORESOURCE_MEM
-        }
+       {
+               .start = 0x1c081ffc,
+               .end   = 0x1c081fff,
+               .flags = IORESOURCE_MEM
+       }
 };
 
 static struct platform_device a20r_ds1216_device = {
-        .name           = "rtc-ds1216",
-        .num_resources  = ARRAY_SIZE(a20r_ds1216_rsrc),
-        .resource       = a20r_ds1216_rsrc
+       .name           = "rtc-ds1216",
+       .num_resources  = ARRAY_SIZE(a20r_ds1216_rsrc),
+       .resource       = a20r_ds1216_rsrc
 };
 
 static struct resource snirm_82596_rsrc[] = {
@@ -76,14 +76,14 @@ static struct resource snirm_82596_rsrc[] = {
                .flags = IORESOURCE_IRQ
        },
        {
-               .flags = 0x01                /* 16bit mpu port access */
+               .flags = 0x01                /* 16bit mpu port access */
        }
 };
 
 static struct platform_device snirm_82596_pdev = {
-       .name           = "snirm_82596",
-       .num_resources  = ARRAY_SIZE(snirm_82596_rsrc),
-       .resource       = snirm_82596_rsrc
+       .name           = "snirm_82596",
+       .num_resources  = ARRAY_SIZE(snirm_82596_rsrc),
+       .resource       = snirm_82596_rsrc
 };
 
 static struct resource snirm_53c710_rsrc[] = {
@@ -100,9 +100,9 @@ static struct resource snirm_53c710_rsrc[] = {
 };
 
 static struct platform_device snirm_53c710_pdev = {
-       .name           = "snirm_53c710",
-       .num_resources  = ARRAY_SIZE(snirm_53c710_rsrc),
-       .resource       = snirm_53c710_rsrc
+       .name           = "snirm_53c710",
+       .num_resources  = ARRAY_SIZE(snirm_53c710_rsrc),
+       .resource       = snirm_53c710_rsrc
 };
 
 static struct resource sc26xx_rsrc[] = {
@@ -171,7 +171,7 @@ static u32 a20r_ack_hwint(void)
        "       addiu   %1, -1                  \n"
        "       sw      $1, 0(%0)               \n"
        "       sync                            \n"
-               ".set   pop                     \n"
+               ".set   pop                     \n"
        :
        : "Jr" (PCIMT_UCONF), "Jr" (0xbc000000));
        write_c0_status(status);
@@ -236,13 +236,13 @@ static int __init snirm_a20r_setup_devinit(void)
        switch (sni_brd_type) {
        case SNI_BRD_TOWER_OASIC:
        case SNI_BRD_MINITOWER:
-               platform_device_register(&snirm_82596_pdev);
-               platform_device_register(&snirm_53c710_pdev);
-               platform_device_register(&sc26xx_pdev);
-               platform_device_register(&a20r_serial8250_device);
-               platform_device_register(&a20r_ds1216_device);
+               platform_device_register(&snirm_82596_pdev);
+               platform_device_register(&snirm_53c710_pdev);
+               platform_device_register(&sc26xx_pdev);
+               platform_device_register(&a20r_serial8250_device);
+               platform_device_register(&a20r_ds1216_device);
                sni_eisa_root_init();
-               break;
+               break;
        }
        return 0;
 }
index 6827feb..179b5d5 100644 (file)
@@ -22,7 +22,7 @@ static struct platform_device eisa_root_dev = {
 };
 
 static struct eisa_root_device eisa_bus_root = {
-       .dev           = &eisa_root_dev.dev,
+       .dev           = &eisa_root_dev.dev,
        .bus_base_addr = 0,
        .res           = &ioport_resource,
        .slots         = EISA_MAX_SLOTS,
index 5a4ec75..ac61b90 100644 (file)
@@ -58,25 +58,25 @@ void __init arch_init_irq(void)
        case SNI_BRD_10NEW:
        case SNI_BRD_TOWER_OASIC:
        case SNI_BRD_MINITOWER:
-               sni_a20r_irq_init();
-               break;
+               sni_a20r_irq_init();
+               break;
 
        case SNI_BRD_PCI_TOWER:
-               sni_pcit_irq_init();
-               break;
+               sni_pcit_irq_init();
+               break;
 
        case SNI_BRD_PCI_TOWER_CPLUS:
-               sni_pcit_cplus_irq_init();
-               break;
+               sni_pcit_cplus_irq_init();
+               break;
 
        case SNI_BRD_RM200:
-               sni_rm200_irq_init();
-               break;
+               sni_rm200_irq_init();
+               break;
 
        case SNI_BRD_PCI_MTOWER:
        case SNI_BRD_PCI_DESKTOP:
        case SNI_BRD_PCI_MTOWER_CPLUS:
-               sni_pcimt_irq_init();
-               break;
+               sni_pcimt_irq_init();
+               break;
        }
 }
index cdb1417..cec4b8c 100644 (file)
@@ -60,7 +60,7 @@ static inline void sni_pcimt_detect(void)
        p += sprintf(p, "%s PCI", (csmsr & 0x80) ? "RM200" : "RM300");
        if ((csmsr & 0x80) == 0)
                p += sprintf(p, ", board revision %s",
-                            (csmsr & 0x20) ? "D" : "C");
+                            (csmsr & 0x20) ? "D" : "C");
        asic = csmsr & 0x80;
        asic = (csmsr & 0x08) ? asic : !asic;
        p += sprintf(p, ", ASIC PCI Rev %s", asic ? "1.0" : "1.1");
@@ -91,22 +91,22 @@ static struct platform_device pcimt_serial8250_device = {
 };
 
 static struct resource pcimt_cmos_rsrc[] = {
-        {
-                .start = 0x70,
-                .end   = 0x71,
-                .flags = IORESOURCE_IO
-        },
-        {
-                .start = 8,
-                .end   = 8,
-                .flags = IORESOURCE_IRQ
-        }
+       {
+               .start = 0x70,
+               .end   = 0x71,
+               .flags = IORESOURCE_IO
+       },
+       {
+               .start = 8,
+               .end   = 8,
+               .flags = IORESOURCE_IRQ
+       }
 };
 
 static struct platform_device pcimt_cmos_device = {
-        .name           = "rtc_cmos",
-        .num_resources  = ARRAY_SIZE(pcimt_cmos_rsrc),
-        .resource       = pcimt_cmos_rsrc
+       .name           = "rtc_cmos",
+       .num_resources  = ARRAY_SIZE(pcimt_cmos_rsrc),
+       .resource       = pcimt_cmos_rsrc
 };
 
 
@@ -191,7 +191,7 @@ static struct pci_controller sni_controller = {
        .mem_offset     = 0x00000000UL,
        .io_resource    = &sni_io_resource,
        .io_offset      = 0x00000000UL,
-       .io_map_base    = SNI_PORT_BASE
+       .io_map_base    = SNI_PORT_BASE
 };
 
 static void enable_pcimt_irq(struct irq_data *d)
@@ -319,9 +319,9 @@ static int __init snirm_pcimt_setup_devinit(void)
        case SNI_BRD_PCI_MTOWER:
        case SNI_BRD_PCI_DESKTOP:
        case SNI_BRD_PCI_MTOWER_CPLUS:
-               platform_device_register(&pcimt_serial8250_device);
-               platform_device_register(&pcimt_cmos_device);
-               break;
+               platform_device_register(&pcimt_serial8250_device);
+               platform_device_register(&pcimt_cmos_device);
+               break;
        }
 
        return 0;
index b524637..7cddd03 100644 (file)
@@ -59,22 +59,22 @@ static struct platform_device pcit_cplus_serial8250_device = {
 };
 
 static struct resource pcit_cmos_rsrc[] = {
-        {
-                .start = 0x70,
-                .end   = 0x71,
-                .flags = IORESOURCE_IO
-        },
-        {
-                .start = 8,
-                .end   = 8,
-                .flags = IORESOURCE_IRQ
-        }
+       {
+               .start = 0x70,
+               .end   = 0x71,
+               .flags = IORESOURCE_IO
+       },
+       {
+               .start = 8,
+               .end   = 8,
+               .flags = IORESOURCE_IRQ
+       }
 };
 
 static struct platform_device pcit_cmos_device = {
-        .name           = "rtc_cmos",
-        .num_resources  = ARRAY_SIZE(pcit_cmos_rsrc),
-        .resource       = pcit_cmos_rsrc
+       .name           = "rtc_cmos",
+       .num_resources  = ARRAY_SIZE(pcit_cmos_rsrc),
+       .resource       = pcit_cmos_rsrc
 };
 
 static struct platform_device pcit_pcspeaker_pdev = {
@@ -153,7 +153,7 @@ static struct pci_controller sni_pcit_controller = {
        .mem_offset     = 0x00000000UL,
        .io_resource    = &sni_io_resource,
        .io_offset      = 0x00000000UL,
-       .io_map_base    = SNI_PORT_BASE
+       .io_map_base    = SNI_PORT_BASE
 };
 
 static void enable_pcit_irq(struct irq_data *d)
@@ -272,16 +272,16 @@ static int __init snirm_pcit_setup_devinit(void)
 {
        switch (sni_brd_type) {
        case SNI_BRD_PCI_TOWER:
-               platform_device_register(&pcit_serial8250_device);
-               platform_device_register(&pcit_cmos_device);
+               platform_device_register(&pcit_serial8250_device);
+               platform_device_register(&pcit_cmos_device);
                platform_device_register(&pcit_pcspeaker_pdev);
-               break;
+               break;
 
        case SNI_BRD_PCI_TOWER_CPLUS:
-               platform_device_register(&pcit_cplus_serial8250_device);
-               platform_device_register(&pcit_cmos_device);
+               platform_device_register(&pcit_cplus_serial8250_device);
+               platform_device_register(&pcit_cmos_device);
                platform_device_register(&pcit_pcspeaker_pdev);
-               break;
+               break;
        }
        return 0;
 }
index 3ab5b5d..a046b30 100644 (file)
@@ -48,17 +48,17 @@ static struct platform_device rm200_serial8250_device = {
 };
 
 static struct resource rm200_ds1216_rsrc[] = {
-        {
-                .start = 0x1cd41ffc,
-                .end   = 0x1cd41fff,
-                .flags = IORESOURCE_MEM
-        }
+       {
+               .start = 0x1cd41ffc,
+               .end   = 0x1cd41fff,
+               .flags = IORESOURCE_MEM
+       }
 };
 
 static struct platform_device rm200_ds1216_device = {
-        .name           = "rtc-ds1216",
-        .num_resources  = ARRAY_SIZE(rm200_ds1216_rsrc),
-        .resource       = rm200_ds1216_rsrc
+       .name           = "rtc-ds1216",
+       .num_resources  = ARRAY_SIZE(rm200_ds1216_rsrc),
+       .resource       = rm200_ds1216_rsrc
 };
 
 static struct resource snirm_82596_rm200_rsrc[] = {
@@ -88,9 +88,9 @@ static struct resource snirm_82596_rm200_rsrc[] = {
 };
 
 static struct platform_device snirm_82596_rm200_pdev = {
-       .name           = "snirm_82596",
-       .num_resources  = ARRAY_SIZE(snirm_82596_rm200_rsrc),
-       .resource       = snirm_82596_rm200_rsrc
+       .name           = "snirm_82596",
+       .num_resources  = ARRAY_SIZE(snirm_82596_rm200_rsrc),
+       .resource       = snirm_82596_rm200_rsrc
 };
 
 static struct resource snirm_53c710_rm200_rsrc[] = {
@@ -107,9 +107,9 @@ static struct resource snirm_53c710_rm200_rsrc[] = {
 };
 
 static struct platform_device snirm_53c710_rm200_pdev = {
-       .name           = "snirm_53c710",
-       .num_resources  = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
-       .resource       = snirm_53c710_rm200_rsrc
+       .name           = "snirm_53c710",
+       .num_resources  = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
+       .resource       = snirm_53c710_rm200_rsrc
 };
 
 static int __init snirm_setup_devinit(void)
@@ -134,9 +134,9 @@ device_initcall(snirm_setup_devinit);
  */
 
 static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock);
-#define PIC_CMD    0x00
-#define PIC_IMR    0x01
-#define PIC_ISR    PIC_CMD
+#define PIC_CMD           0x00
+#define PIC_IMR           0x01
+#define PIC_ISR           PIC_CMD
 #define PIC_POLL   PIC_ISR
 #define PIC_OCW3   PIC_ISR
 
@@ -421,8 +421,8 @@ void __init sni_rm200_i8259_irqs(void)
 }
 
 
-#define SNI_RM200_INT_STAT_REG  CKSEG1ADDR(0xbc000000)
-#define SNI_RM200_INT_ENA_REG   CKSEG1ADDR(0xbc080000)
+#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
+#define SNI_RM200_INT_ENA_REG  CKSEG1ADDR(0xbc080000)
 
 #define SNI_RM200_INT_START  24
 #define SNI_RM200_INT_END    28
index 2e9c283..5b09b35 100644 (file)
@@ -204,23 +204,23 @@ void __init plat_mem_setup(void)
        case SNI_BRD_10NEW:
        case SNI_BRD_TOWER_OASIC:
        case SNI_BRD_MINITOWER:
-               sni_a20r_init();
-               break;
+               sni_a20r_init();
+               break;
 
        case SNI_BRD_PCI_TOWER:
        case SNI_BRD_PCI_TOWER_CPLUS:
-               sni_pcit_init();
+               sni_pcit_init();
                break;
 
        case SNI_BRD_RM200:
-               sni_rm200_init();
-               break;
+               sni_rm200_init();
+               break;
 
        case SNI_BRD_PCI_MTOWER:
        case SNI_BRD_PCI_DESKTOP:
        case SNI_BRD_PCI_MTOWER_CPLUS:
-               sni_pcimt_init();
-               break;
+               sni_pcimt_init();
+               break;
        }
 
        _machine_restart = sni_machine_restart;
@@ -247,16 +247,16 @@ static void quirk_cirrus_ram_size(struct pci_dev *dev)
         */
        pci_read_config_word(dev, PCI_COMMAND, &cmd);
        if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY))
-               == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
-               vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
+               == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+               vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
                vga_wseq(NULL, CL_SEQRF, 0x18);
        }
 }
 
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8,
-                        quirk_cirrus_ram_size);
+                       quirk_cirrus_ram_size);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436,
-                        quirk_cirrus_ram_size);
+                       quirk_cirrus_ram_size);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
-                        quirk_cirrus_ram_size);
+                       quirk_cirrus_ram_size);
 #endif
index 494c9e7..cf8ec56 100644 (file)
 #include <asm/time.h>
 #include <asm-generic/rtc.h>
 
-#define SNI_CLOCK_TICK_RATE     3686400
-#define SNI_COUNTER2_DIV        64
-#define SNI_COUNTER0_DIV        ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
+#define SNI_CLOCK_TICK_RATE    3686400
+#define SNI_COUNTER2_DIV       64
+#define SNI_COUNTER0_DIV       ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
 
 static void a20r_set_mode(enum clock_event_mode mode,
-                          struct clock_event_device *evt)
+                         struct clock_event_device *evt)
 {
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
@@ -33,14 +33,14 @@ static void a20r_set_mode(enum clock_event_mode mode,
                *(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV >> 8;
                wmb();
 
-                break;
-        case CLOCK_EVT_MODE_ONESHOT:
-        case CLOCK_EVT_MODE_UNUSED:
-        case CLOCK_EVT_MODE_SHUTDOWN:
-                break;
-        case CLOCK_EVT_MODE_RESUME:
-                break;
-        }
+               break;
+       case CLOCK_EVT_MODE_ONESHOT:
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               break;
+       case CLOCK_EVT_MODE_RESUME:
+               break;
+       }
 }
 
 static struct clock_event_device a20r_clockevent_device = {
@@ -82,15 +82,15 @@ static void __init sni_a20r_timer_setup(void)
        struct irqaction *action = &a20r_irqaction;
        unsigned int cpu = smp_processor_id();
 
-       cd->cpumask             = cpumask_of(cpu);
+       cd->cpumask             = cpumask_of(cpu);
        clockevents_register_device(cd);
        action->dev_id = cd;
        setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
 }
 
-#define SNI_8254_TICK_RATE        1193182UL
+#define SNI_8254_TICK_RATE       1193182UL
 
-#define SNI_8254_TCSAMP_COUNTER   ((SNI_8254_TICK_RATE / HZ) + 255)
+#define SNI_8254_TCSAMP_COUNTER          ((SNI_8254_TICK_RATE / HZ) + 255)
 
 static __init unsigned long dosample(void)
 {
index a801abb..a176d1f 100644 (file)
@@ -6,5 +6,5 @@ cflags-$(CONFIG_MACH_TX39XX)    +=                                      \
 cflags-$(CONFIG_MACH_TX49XX)   +=                                      \
                 -I$(srctree)/arch/mips/include/asm/mach-tx49xx
 
-load-$(CONFIG_MACH_TX39XX)      += 0xffffffff80050000
-load-$(CONFIG_MACH_TX49XX)      += 0xffffffff80100000
+load-$(CONFIG_MACH_TX39XX)     += 0xffffffff80050000
+load-$(CONFIG_MACH_TX49XX)     += 0xffffffff80100000
index 7e3ac57..ed8e702 100644 (file)
@@ -2,7 +2,7 @@
  * Common tx4927 irq handler
  *
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  *  under the terms of the GNU General Public License as published by the
  *  Free Software Foundation; either version 2 of the License, or (at your
index 6b067db..0d7267e 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright 2001, 2003-2005 MontaVista Software Inc.
  * Author: MontaVista Software, Inc.
- *         ahennessy@mvista.com
- *         source@mvista.com
+ *        ahennessy@mvista.com
+ *        source@mvista.com
  * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
  *
  * This file is subject to the terms and conditions of the GNU General Public
index 70f9626..deea2ce 100644 (file)
@@ -2,7 +2,7 @@
  * common tx4927 memory interface
  *
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2002 MontaVista Software Inc.
  *
index ce8f8b9..2871327 100644 (file)
@@ -2,7 +2,7 @@
  * linux/arch/mips/txx9/pci.c
  *
  * Based on linux/arch/mips/txx9/rbtx4927/setup.c,
- *          linux/arch/mips/txx9/rbtx4938/setup.c,
+ *         linux/arch/mips/txx9/rbtx4938/setup.c,
  *         and RBTX49xx patch from CELF patch archive.
  *
  * Copyright 2001-2005 MontaVista Software Inc.
@@ -107,7 +107,7 @@ int txx9_pci_mem_high __initdata;
 
 /*
  * allocate pci_controller and resources.
- * mem_base, io_base: physical address.  0 for auto assignment.
+ * mem_base, io_base: physical address.         0 for auto assignment.
  * mem_size and io_size means max size on auto assignment.
  * pcic must be &txx9_primary_pcic or NULL.
  */
index 560fe89..5524f2c 100644 (file)
@@ -513,19 +513,19 @@ void __init txx9_sio_init(unsigned long baseaddr, int irq,
 }
 
 #ifdef CONFIG_EARLY_PRINTK
-static void __init null_prom_putchar(char c)
+static void null_prom_putchar(char c)
 {
 }
-void (*txx9_prom_putchar)(char c) __initdata = null_prom_putchar;
+void (*txx9_prom_putchar)(char c) = null_prom_putchar;
 
-void __init prom_putchar(char c)
+void prom_putchar(char c)
 {
        txx9_prom_putchar(c);
 }
 
 static void __iomem *early_txx9_sio_port;
 
-static void __init early_txx9_sio_putchar(char c)
+static void early_txx9_sio_putchar(char c)
 {
 #define TXX9_SICISR    0x0c
 #define TXX9_SITFIFO   0x1c
index 9505d58..110e05c 100644 (file)
@@ -132,6 +132,6 @@ void __init tx3927_mtd_init(int ch)
        unsigned long size = txx9_ce_res[ch].end - start + 1;
 
        if (!(tx3927_romcptr->cr[ch] & 0x8))
-               return; /* disabled */
+               return; /* disabled */
        txx9_physmap_flash_init(ch, start, size, &pdata);
 }
index 3418b2a..e714d6c 100644 (file)
@@ -250,7 +250,7 @@ void __init tx4927_mtd_init(int ch)
        unsigned long size = txx9_ce_res[ch].end - start + 1;
 
        if (!(TX4927_EBUSC_CR(ch) & 0x8))
-               return; /* disabled */
+               return; /* disabled */
        txx9_physmap_flash_init(ch, start, size, &pdata);
 }
 
index eb20801..0a3bf2d 100644 (file)
@@ -329,7 +329,7 @@ void __init tx4938_mtd_init(int ch)
        unsigned long size = txx9_ce_res[ch].end - start + 1;
 
        if (!(TX4938_EBUSC_CR(ch) & 0x8))
-               return; /* disabled */
+               return; /* disabled */
        txx9_physmap_flash_init(ch, start, size, &pdata);
 }
 
index 5ff7a95..729a509 100644 (file)
@@ -301,7 +301,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
        unsigned int ch_mask = 0;
        __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
 
-       cts_mask |= ~1; /* only SIO0 have RTS/CTS */
+       cts_mask |= ~1; /* only SIO0 have RTS/CTS */
        if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0)
                cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */
        if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2)
@@ -378,7 +378,7 @@ void __init tx4939_mtd_init(int ch)
        unsigned long size = txx9_ce_res[ch].end - start + 1;
 
        if (!(TX4939_EBUSC_CR(ch) & 0x8))
-               return; /* disabled */
+               return; /* disabled */
        txx9_physmap_flash_init(ch, start, size, &pdata);
 }
 
index 8ebc384..f98baa6 100644 (file)
 /* Common Registers */
 #define SMSC_FDC37M81X_CONFIG_INDEX  0x00
 #define SMSC_FDC37M81X_CONFIG_DATA   0x01
-#define SMSC_FDC37M81X_CONF          0x02
-#define SMSC_FDC37M81X_INDEX         0x03
-#define SMSC_FDC37M81X_DNUM          0x07
-#define SMSC_FDC37M81X_DID           0x20
-#define SMSC_FDC37M81X_DREV          0x21
-#define SMSC_FDC37M81X_PCNT          0x22
-#define SMSC_FDC37M81X_PMGT          0x23
-#define SMSC_FDC37M81X_OSC           0x24
-#define SMSC_FDC37M81X_CONFPA0       0x26
-#define SMSC_FDC37M81X_CONFPA1       0x27
-#define SMSC_FDC37M81X_TEST4         0x2B
-#define SMSC_FDC37M81X_TEST5         0x2C
-#define SMSC_FDC37M81X_TEST1         0x2D
-#define SMSC_FDC37M81X_TEST2         0x2E
-#define SMSC_FDC37M81X_TEST3         0x2F
+#define SMSC_FDC37M81X_CONF         0x02
+#define SMSC_FDC37M81X_INDEX        0x03
+#define SMSC_FDC37M81X_DNUM         0x07
+#define SMSC_FDC37M81X_DID          0x20
+#define SMSC_FDC37M81X_DREV         0x21
+#define SMSC_FDC37M81X_PCNT         0x22
+#define SMSC_FDC37M81X_PMGT         0x23
+#define SMSC_FDC37M81X_OSC          0x24
+#define SMSC_FDC37M81X_CONFPA0      0x26
+#define SMSC_FDC37M81X_CONFPA1      0x27
+#define SMSC_FDC37M81X_TEST4        0x2B
+#define SMSC_FDC37M81X_TEST5        0x2C
+#define SMSC_FDC37M81X_TEST1        0x2D
+#define SMSC_FDC37M81X_TEST2        0x2E
+#define SMSC_FDC37M81X_TEST3        0x2F
 
 /* Logical device numbers */
-#define SMSC_FDC37M81X_FDD           0x00
-#define SMSC_FDC37M81X_SERIAL1       0x04
-#define SMSC_FDC37M81X_SERIAL2       0x05
-#define SMSC_FDC37M81X_KBD           0x07
+#define SMSC_FDC37M81X_FDD          0x00
+#define SMSC_FDC37M81X_SERIAL1      0x04
+#define SMSC_FDC37M81X_SERIAL2      0x05
+#define SMSC_FDC37M81X_KBD          0x07
 
 /* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE        0x30
+#define SMSC_FDC37M81X_ACTIVE       0x30
 #define SMSC_FDC37M81X_BASEADDR0     0x60
 #define SMSC_FDC37M81X_BASEADDR1     0x61
-#define SMSC_FDC37M81X_INT           0x70
-#define SMSC_FDC37M81X_INT2          0x72
-#define SMSC_FDC37M81X_MODE          0xF0
+#define SMSC_FDC37M81X_INT          0x70
+#define SMSC_FDC37M81X_INT2         0x72
+#define SMSC_FDC37M81X_MODE         0xF0
 
 /* Chip Config Values */
 #define SMSC_FDC37M81X_CONFIG_ENTER  0x55
 #define SMSC_FDC37M81X_CONFIG_EXIT   0xaa
-#define SMSC_FDC37M81X_CHIP_ID       0x4d
+#define SMSC_FDC37M81X_CHIP_ID      0x4d
 
 static unsigned long g_smsc_fdc37m81x_base;
 
index 6c22c49..3f48292 100644 (file)
@@ -2,7 +2,7 @@
  * Toshiba RBTX4927 specific interrupt handlers
  *
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2002 MontaVista Software Inc.
  *
index cc97c6a..fe6d0b5 100644 (file)
@@ -2,7 +2,7 @@
  * rbtx4927 specific prom routines
  *
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2002 MontaVista Software Inc.
  *
index b15adfc..3c516ef 100644 (file)
@@ -2,7 +2,7 @@
  * Toshiba rbtx4927 specific setup
  *
  * Author: MontaVista Software, Inc.
- *         source@mvista.com
+ *        source@mvista.com
  *
  * Copyright 2001-2002 MontaVista Software Inc.
  *
index d6e70da..c9afd05 100644 (file)
@@ -107,10 +107,10 @@ static void __init rbtx4938_pci_setup(void)
 /* SPI support */
 
 /* chip select for SPI devices */
-#define        SEEPROM1_CS     7       /* PIO7 */
-#define        SEEPROM2_CS     0       /* IOC */
-#define        SEEPROM3_CS     1       /* IOC */
-#define        SRTC_CS 2       /* IOC */
+#define SEEPROM1_CS    7       /* PIO7 */
+#define SEEPROM2_CS    0       /* IOC */
+#define SEEPROM3_CS    1       /* IOC */
+#define SRTC_CS 2      /* IOC */
 #define SPI_BUSNO      0
 
 static int __init rbtx4938_ethaddr_init(void)
index e15641d..2da5f25 100644 (file)
@@ -243,7 +243,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
 }
 
 static struct platform_driver rbtx4939_led_driver = {
-       .driver  = {
+       .driver  = {
                .name = "rbtx4939-led",
                .owner = THIS_MODULE,
        },
@@ -337,7 +337,7 @@ static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
                shift = bdipsw & 3;
                while (len) {
                        curlen = min_t(unsigned long, len,
-                                    0x400000 - (from & (0x400000 - 1)));
+                                    0x400000 - (from & (0x400000 - 1)));
                        memcpy(to,
                               (void *)((from & ~0xc00000) |
                                        ((((from >> 22) + shift) & 3) << 22)),
index 6346c59..ff7d1c6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  bcu.c, Bus Control Unit routines for the NEC VR4100 series.
  *
- *  Copyright (C) 2002  MontaVista Software Inc.
+ *  Copyright (C) 2002 MontaVista Software Inc.
  *    Author: Yoichi Yuasa <source@mvista.com>
  *  Copyright (C) 2003-2005  Yoichi Yuasa <yuasa@linux-mips.org>
  *
@@ -176,7 +176,7 @@ static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long p
 }
 
 static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock,
-                                             unsigned long vtclock)
+                                            unsigned long vtclock)
 {
        unsigned long tclock = 0;
 
index 8ba7d04..05302bf 100644 (file)
@@ -217,24 +217,24 @@ static int __init vr41xx_cmu_init(void)
        unsigned long start, size;
 
        switch (current_cpu_type()) {
-        case CPU_VR4111:
-        case CPU_VR4121:
+       case CPU_VR4111:
+       case CPU_VR4121:
                start = CMU_TYPE1_BASE;
                size = CMU_TYPE1_SIZE;
-                break;
-        case CPU_VR4122:
-        case CPU_VR4131:
+               break;
+       case CPU_VR4122:
+       case CPU_VR4131:
                start = CMU_TYPE2_BASE;
                size = CMU_TYPE2_SIZE;
                break;
-        case CPU_VR4133:
+       case CPU_VR4133:
                start = CMU_TYPE3_BASE;
                size = CMU_TYPE3_SIZE;
-                break;
+               break;
        default:
                panic("Unexpected CPU of NEC VR4100 series");
                break;
-        }
+       }
 
        if (request_mem_region(start, size, "CMU") == NULL)
                return -EBUSY;
index b32b3bc..32cc8d6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  NEC VR4100 series GIU platform device.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index a39ef32..41e873b 100644 (file)
@@ -49,11 +49,11 @@ static unsigned char sysint1_assign[16] = {
 static unsigned char sysint2_assign[16] = {
        2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 
-#define ICU1_TYPE1_BASE        0x0b000080UL
-#define ICU2_TYPE1_BASE        0x0b000200UL
+#define ICU1_TYPE1_BASE 0x0b000080UL
+#define ICU2_TYPE1_BASE 0x0b000200UL
 
-#define ICU1_TYPE2_BASE        0x0f000080UL
-#define ICU2_TYPE2_BASE        0x0f0000a0UL
+#define ICU1_TYPE2_BASE 0x0f000080UL
+#define ICU2_TYPE2_BASE 0x0f0000a0UL
 
 #define ICU1_SIZE      0x20
 #define ICU2_SIZE      0x1c
index 9fbf5f0..70a3f90 100644 (file)
@@ -74,7 +74,7 @@ static inline void software_reset(void)
                change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
                flush_cache_all();
                write_c0_wired(0);
-               __asm__("jr     %0"::"r"(0xbfc00000));
+               __asm__("jr     %0"::"r"(0xbfc00000));
                break;
        }
 }
index 76e3e8a..c1e3d20 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  NEC VR4100 series RTC platform device.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index ff84142..45836a9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  type.c, System type for NEC VR4100 series.
  *
- *  Copyright (C) 2005  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index e758645..dc78b25 100644 (file)
@@ -2,6 +2,6 @@
 # Wind River PPMC Board (4KC + GT64120)
 #
 platform-$(CONFIG_WR_PPMC)     += wrppmc/
-cflags-$(CONFIG_WR_PPMC)       +=                                      \
+cflags-$(CONFIG_WR_PPMC)       +=                                      \
                -I$(srctree)/arch/mips/include/asm/mach-wrppmc
 load-$(CONFIG_WR_PPMC)         += 0xffffffff80100000
index c6e7062..f237bf4 100644 (file)
@@ -4,8 +4,8 @@
  * Copyright (C) 2006, Wind River System Inc.
  * Author: Rongkai.Zhan, <rongkai.zhan@windriver.com>
  *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
index 6f9d085..83f0f7d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Registration of WRPPMC UART platform device.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yuasa@linux-mips.org>
+ *  Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index ad0caea..b06c736 100644 (file)
@@ -8,6 +8,7 @@ config MN10300
        select HAVE_ARCH_KGDB
        select GENERIC_ATOMIC64
        select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
+       select HAVE_VIRT_TO_BUS
        select GENERIC_CLOCKEVENTS
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND3
index 4ebd6b3..f592d7a 100644 (file)
@@ -150,9 +150,4 @@ do {                                                \
  */
 #define ELF_PLATFORM  (NULL)
 
-#ifdef __KERNEL__
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#endif
-
 #endif /* _ASM_ELF_H */
index 0ac66f6..014a648 100644 (file)
@@ -12,6 +12,7 @@ config OPENRISC
        select ARCH_WANT_OPTIONAL_GPIOLIB
         select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
@@ -26,10 +27,6 @@ config OPENRISC
 config MMU
        def_bool y
 
-config SYMBOL_PREFIX
-        string
-        default ""
-
 config HAVE_DMA_ATTRS
        def_bool y
 
index a9e11ef..2c64f22 100644 (file)
@@ -54,6 +54,7 @@
 
 #include <asm-generic/bitops/atomic.h>
 #include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 
 #endif /* __ASM_GENERIC_BITOPS_H */
index f4aa8a5..d334e20 100644 (file)
@@ -62,7 +62,4 @@ extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
 
 #define ELF_PLATFORM   (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 3369138..cab746f 100644 (file)
@@ -70,7 +70,6 @@ struct thread_struct {
  */
 
 #define task_pt_regs(task) user_regs(task_thread_info(task))
-#define current_regs() user_regs(current_thread_info())
 
 #define INIT_SP         (sizeof(init_stack) + (unsigned long) &init_stack)
 
index 54afd0a..d8a455e 100644 (file)
@@ -201,12 +201,17 @@ EXCEPTION_ENTRY(_bus_fault_handler)
         l.nop
 
 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler)
+       l.and   r5,r5,r0
+       l.j     1f
+        l.nop
 
 EXCEPTION_ENTRY(_data_page_fault_handler)
        /* set up parameters for do_page_fault */
+       l.ori   r5,r0,0x300                // exception vector
+1:
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
-       l.ori   r5,r0,0x300                // exception vector
 
        /*
         * __PHX__: TODO
@@ -276,12 +281,17 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.nop
 
 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+EXCEPTION_ENTRY(_itlb_miss_page_fault_handler)
+       l.and   r5,r5,r0
+       l.j     1f
+        l.nop
 
 EXCEPTION_ENTRY(_insn_page_fault_handler)
        /* set up parameters for do_page_fault */
+       l.ori   r5,r0,0x400                // exception vector
+1:
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
-       l.ori   r5,r0,0x400                // exception vector
        l.ori   r6,r0,0x0                  // !write access
 
        /* call fault.c handler in or32/mm/fault.c */
@@ -1040,7 +1050,7 @@ ENTRY(_switch)
         * we are expected to have set up the arg to schedule_tail already,
         * hence we do so here unconditionally:
         */
-       l.lwz   r3,TI_STACK(r3)         /* Load 'prev' as schedule_tail arg */
+       l.lwz   r3,TI_TASK(r3)          /* Load 'prev' as schedule_tail arg */
        l.jr    r9
         l.nop
 
index 1088b5f..1d3c9c2 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/threads.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/serial_reg.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
        /* Jump to .init code at _start which lives in the .head section
         * and will be discarded after boot.
         */
-       LOAD_SYMBOL_2_GPR(r4, _start)
-       tophys  (r3,r4)                 /* MMU disabled */
-       l.jr    r3
+       LOAD_SYMBOL_2_GPR(r15, _start)
+       tophys  (r13,r15)                       /* MMU disabled */
+       l.jr    r13
         l.nop
 
 /* ---[ 0x200: BUS exception ]------------------------------------------- */
@@ -1069,8 +1070,7 @@ d_pte_not_present:
        EXCEPTION_LOAD_GPR4
        EXCEPTION_LOAD_GPR5
        EXCEPTION_LOAD_GPR6
-       l.j     _dispatch_do_dpage_fault
-       l.nop
+       EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
 
 /* ==============================================[ ITLB miss handler ]=== */
 ENTRY(itlb_miss_handler)
@@ -1192,8 +1192,7 @@ i_pte_not_present:
        EXCEPTION_LOAD_GPR4
        EXCEPTION_LOAD_GPR5
        EXCEPTION_LOAD_GPR6
-       l.j     _dispatch_do_ipage_fault
-       l.nop
+       EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
 
 /* ==============================================[ boot tlb handlers ]=== */
 
index 79dea97..e7fdc50 100644 (file)
@@ -167,15 +167,26 @@ void __init paging_init(void)
                unsigned long *dtlb_vector = __va(0x900);
                unsigned long *itlb_vector = __va(0xa00);
 
+               printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+               *itlb_vector = ((unsigned long)&itlb_miss_handler -
+                               (unsigned long)itlb_vector) >> 2;
+
+               /* Soft ordering constraint to ensure that dtlb_vector is
+                * the last thing updated
+                */
+               barrier();
+
                printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
                *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
                                (unsigned long)dtlb_vector) >> 2;
 
-               printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
-               *itlb_vector = ((unsigned long)&itlb_miss_handler -
-                               (unsigned long)itlb_vector) >> 2;
        }
 
+       /* Soft ordering constraint to ensure that cache invalidation and
+        * TLB flush really happen _after_ code has been modified.
+        */
+       barrier();
+
        /* Invalidate instruction caches after code modification */
        mtspr(SPR_ICBIR, 0x900);
        mtspr(SPR_ICBIR, 0xa00);
index 7f9b3c5..a9ff712 100644 (file)
@@ -5,6 +5,7 @@ config PARISC
        select HAVE_FUNCTION_TRACER if 64BIT
        select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT
+       select ARCH_WANT_FRAME_POINTERS
        select RTC_CLASS
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
@@ -18,7 +19,9 @@ config PARISC
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
+       select SYSCTL_ARCH_UNALIGN_ALLOW
        select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_VIRT_TO_BUS
        select MODULES_USE_ELF_RELA
        select CLONE_BACKWARDS
        select TTY # Needed for pdc_cons.c
index ed9a14c..01d95e2 100644 (file)
@@ -113,12 +113,10 @@ palo: vmlinux
 # Shorthands for known targets not supported by parisc, use vmlinux as default
 Image zImage bzImage: vmlinux
 
-kernel_install: vmlinux
+install: vmlinux
        sh $(src)/arch/parisc/install.sh \
                        $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
 
-install: kernel_install modules_install
-
 CLEAN_FILES    += lifimage
 MRPROPER_FILES += palo.conf
 
index af9cf30..f38e198 100644 (file)
@@ -115,8 +115,8 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 
-#define atomic_add(i,v)        ((void)(__atomic_add_return( (i),(v))))
-#define atomic_sub(i,v)        ((void)(__atomic_add_return(-(i),(v))))
+#define atomic_add(i,v)        ((void)(__atomic_add_return(        (i),(v))))
+#define atomic_sub(i,v)        ((void)(__atomic_add_return(-((int) (i)),(v))))
 #define atomic_inc(v)  ((void)(__atomic_add_return(   1,(v))))
 #define atomic_dec(v)  ((void)(__atomic_add_return(  -1,(v))))
 
index f61692d..00dc66f 100644 (file)
@@ -85,6 +85,7 @@ struct elf_prpsinfo32
  * could set a processor dependent flag in the thread_struct.
  */
 
+#undef SET_PERSONALITY
 #define SET_PERSONALITY(ex) \
        set_thread_flag(TIF_32BIT); \
        current->thread.map_base = DEFAULT_MAP_BASE32; \
index 98e9e71..940188d 100644 (file)
@@ -242,7 +242,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        unsigned long haddr, sigframe_size;
        int err = 0;
 #ifdef CONFIG_64BIT
-       compat_int_t compat_val;
        struct compat_rt_sigframe __user * compat_frame;
        compat_sigset_t compat_set;
 #endif
index 54d619d..5dfd248 100644 (file)
 
 static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
 {
-       struct vm_area_struct *vma;
+       struct vm_unmapped_area_info info;
 
-       addr = PAGE_ALIGN(addr);
-
-       for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr)
-                       return -ENOMEM;
-               if (!vma || addr + len <= vma->vm_start)
-                       return addr;
-               addr = vma->vm_end;
-       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = PAGE_ALIGN(addr);
+       info.high_limit = TASK_SIZE;
+       info.align_mask = 0;
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
 }
 
-#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
-
 /*
  * We need to know the offset to use.  Old scheme was to look for
  * existing mapping and use the same offset.  New scheme is to use the
@@ -63,30 +58,21 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
  */
 static int get_offset(struct address_space *mapping)
 {
-       int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
-       return offset & 0x3FF000;
+       return (unsigned long) mapping >> 8;
 }
 
 static unsigned long get_shared_area(struct address_space *mapping,
                unsigned long addr, unsigned long len, unsigned long pgoff)
 {
-       struct vm_area_struct *vma;
-       int offset = mapping ? get_offset(mapping) : 0;
+       struct vm_unmapped_area_info info;
 
-       offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
-
-       addr = DCACHE_ALIGN(addr - offset) + offset;
-
-       for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr)
-                       return -ENOMEM;
-               if (!vma || addr + len <= vma->vm_start)
-                       return addr;
-               addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
-               if (addr < vma->vm_end) /* handle wraparound */
-                       return -ENOMEM;
-       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = PAGE_ALIGN(addr);
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & (SHMLBA - 1);
+       info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+       return vm_unmapped_area(&info);
 }
 
 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
index eca69bb..051c8b9 100644 (file)
@@ -79,16 +79,6 @@ asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
                                (loff_t __user *)offset, count);
 }
 
-
-/* lseek() needs a wrapper because 'offset' can be negative, but the top
- * half of the argument has been zeroed by syscall.S.
- */
-
-asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
-{
-       return sys_lseek(fd, offset, origin);
-}
-
 asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
 {
         union semun u;
index fc9cab1..f57dc13 100644 (file)
@@ -76,7 +76,7 @@
        ENTRY_SAME(socket)
        /* struct stat is MAYBE identical wide and narrow ?? */
        ENTRY_COMP(newstat)
-       ENTRY_DIFF(lseek)
+       ENTRY_COMP(lseek)
        ENTRY_SAME(getpid)              /* 20 */
        /* the 'void * data' parameter may need re-packing in wide */
        ENTRY_COMP(mount)
        ENTRY_SAME(mmap2)
        ENTRY_SAME(mmap)                /* 90 */
        ENTRY_SAME(munmap)
-       ENTRY_SAME(truncate)
-       ENTRY_SAME(ftruncate)
+       ENTRY_COMP(truncate)
+       ENTRY_COMP(ftruncate)
        ENTRY_SAME(fchmod)
        ENTRY_SAME(fchown)              /* 95 */
        ENTRY_SAME(getpriority)
        ENTRY_COMP(sched_getaffinity)
        ENTRY_SAME(ni_syscall)  /* set_thread_area */
        ENTRY_SAME(ni_syscall)  /* get_thread_area */
-       ENTRY_SAME(io_setup)            /* 215 */
+       ENTRY_COMP(io_setup)            /* 215 */
        ENTRY_SAME(io_destroy)
-       ENTRY_SAME(io_getevents)
-       ENTRY_SAME(io_submit)
+       ENTRY_COMP(io_getevents)
+       ENTRY_COMP(io_submit)
        ENTRY_SAME(io_cancel)
        ENTRY_SAME(alloc_hugepages)     /* 220 */
        ENTRY_SAME(free_hugepages)
index 1dbca5c..a49cc81 100644 (file)
@@ -68,7 +68,7 @@
 DECLARE_PER_CPU(struct exception_data, exception_data);
 
 #define preserve_branch(label) do {                                    \
-       volatile int dummy;                                             \
+       volatile int dummy = 0;                                         \
        /* The following branch is never taken, it's just here to  */   \
        /* prevent gcc from optimizing away our exception code. */      \
        if (unlikely(dummy != dummy))                                   \
index 5c74706..b89d7eb 100644 (file)
@@ -87,9 +87,6 @@ config GENERIC_GPIO
        help
          Generic GPIO API support
 
-config ARCH_NO_VIRT_TO_BUS
-       def_bool PPC64
-
 config PPC
        bool
        default y
@@ -101,6 +98,7 @@ config PPC
        select HAVE_FUNCTION_GRAPH_TRACER
        select SYSCTL_EXCEPTION_TRACE
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select HAVE_VIRT_TO_BUS if !PPC64
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
index a5f8264..125e165 100644 (file)
        STEPUP4((t)+16, fn)
 
 _GLOBAL(powerpc_sha_transform)
-       PPC_STLU r1,-STACKFRAMESIZE(r1)
+       PPC_STLU r1,-INT_FRAME_SIZE(r1)
        SAVE_8GPRS(14, r1)
        SAVE_10GPRS(22, r1)
 
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
 
        REST_8GPRS(14, r1)
        REST_10GPRS(22, r1)
-       addi    r1,r1,STACKFRAMESIZE
+       addi    r1,r1,INT_FRAME_SIZE
        blr
index ef918a2..08bd299 100644 (file)
@@ -52,8 +52,6 @@
 #define smp_mb__before_clear_bit()     smp_mb()
 #define smp_mb__after_clear_bit()      smp_mb()
 
-#define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
-
 /* Macro for generating the ***_bits() functions */
 #define DEFINE_BITOP(fn, op, prefix, postfix)  \
 static __inline__ void fn(unsigned long mask,  \
index 6abf0a1..ac9790f 100644 (file)
@@ -103,8 +103,6 @@ do {                                                                \
 # define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
                (exec_stk == EXSTACK_DEFAULT) : 0)
 #else 
-# define SET_PERSONALITY(ex) \
-  set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
 # define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT)
 #endif /* __powerpc64__ */
 
index e665861..c9c67fc 100644 (file)
 #define SPRN_HSRR0     0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
 #define SPRN_FSCR      0x099   /* Facility Status & Control Register */
-#define FSCR_TAR       (1<<8)  /* Enable Target Adress Register */
+#define   FSCR_TAR     (1 << (63-55)) /* Enable Target Address Register */
+#define   FSCR_DSCR    (1 << (63-61)) /* Enable Data Stream Control Register */
 #define SPRN_TAR       0x32f   /* Target Address Register */
 #define SPRN_LPCR      0x13E   /* LPAR Control Register */
 #define   LPCR_VPM0    (1ul << (63-0))
index d906f33..ebbec52 100644 (file)
@@ -22,7 +22,7 @@ SYSCALL_SPU(chmod)
 SYSCALL_SPU(lchown)
 SYSCALL(ni_syscall)
 OLDSYS(stat)
-SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+COMPAT_SYS_SPU(lseek)
 SYSCALL_SPU(getpid)
 COMPAT_SYS(mount)
 SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
 COMPAT_SYS(process_vm_readv)
 COMPAT_SYS(process_vm_writev)
 SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
index f25b5c4..1487f0f 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          354
+#define __NR_syscalls          355
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 8c478c6..74cb4d7 100644 (file)
 #define __NR_process_vm_readv  351
 #define __NR_process_vm_writev 352
 #define __NR_finit_module      353
+#define __NR_kcmp              354
 
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index d29facb..ea847ab 100644 (file)
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
 
 _GLOBAL(__setup_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        bl      __init_hvmode_206
        mtlr    r11
        beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
        mfspr   r3,SPRN_LPCR
        oris    r3, r3, LPCR_AIL_3@h
        bl      __init_LPCR
-       bl      __init_FSCR
        bl      __init_TLB
        mtlr    r11
        blr
 
 _GLOBAL(__restore_cpu_power8)
        mflr    r11
+       bl      __init_FSCR
        mfmsr   r3
        rldicl. r0,r3,4,63
        beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
 
 __init_FSCR:
        mfspr   r3,SPRN_FSCR
-       ori     r3,r3,FSCR_TAR
+       ori     r3,r3,FSCR_TAR|FSCR_DSCR
        mtspr   SPRN_FSCR,r3
        blr
 
index a8a5361..87ef8f5 100644 (file)
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                              \
        mflr    r10 ;                                           \
        ld      r12,PACAKBASE(r13) ;                            \
        LOAD_HANDLER(r12, system_call_entry_direct) ;           \
-       mtlr    r12 ;                                           \
+       mtctr   r12 ;                                           \
        mfspr   r12,SPRN_SRR1 ;                                 \
        /* Re-use of r13... No spare regs to do this */ \
        li      r13,MSR_RI ;                                    \
        mtmsrd  r13,1 ;                                         \
        GET_PACA(r13) ; /* get r13 back */                      \
-       blr ;
+       bctr ;
 #else
        /* We can branch directly */
 #define SYSCALL_PSERIES_2_DIRECT                               \
index e88c643..11f5b03 100644 (file)
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
 
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_hash_unlock(current, &flags);
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index c8ae371..f19d0bd 100644 (file)
@@ -32,7 +32,7 @@
 static loff_t page_map_seek( struct file *file, loff_t off, int whence)
 {
        loff_t new;
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
 
        switch(whence) {
        case 0:
@@ -55,13 +55,13 @@ static loff_t page_map_seek( struct file *file, loff_t off, int whence)
 static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
                              loff_t *ppos)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        return simple_read_from_buffer(buf, nbytes, ppos, dp->data, dp->size);
 }
 
 static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
 
        if ((vma->vm_end - vma->vm_start) > dp->size)
                return -EINVAL;
index 8329190..c642f01 100644 (file)
@@ -191,7 +191,7 @@ static void free_flash_list(struct flash_block_list *f)
 
 static int rtas_flash_release(struct inode *inode, struct file *file)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_update_flash_t *uf;
        
        uf = (struct rtas_update_flash_t *) dp->data;
@@ -253,7 +253,7 @@ static void get_flash_status_msg(int status, char *buf)
 static ssize_t rtas_flash_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_update_flash_t *uf;
        char msg[RTAS_MSG_MAXLEN];
 
@@ -282,7 +282,7 @@ void rtas_block_ctor(void *ptr)
 static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
                                size_t count, loff_t *off)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_update_flash_t *uf;
        char *p;
        int next_free;
@@ -374,7 +374,7 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf)
 static ssize_t manage_flash_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_manage_flash_t *args_buf;
        char msg[RTAS_MSG_MAXLEN];
        int msglen;
@@ -391,7 +391,7 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf,
 static ssize_t manage_flash_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *off)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_manage_flash_t *args_buf;
        const char reject_str[] = "0";
        const char commit_str[] = "1";
@@ -462,7 +462,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
 static ssize_t validate_flash_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_validate_flash_t *args_buf;
        char msg[RTAS_MSG_MAXLEN];
        int msglen;
@@ -477,7 +477,7 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
 static ssize_t validate_flash_write(struct file *file, const char __user *buf,
                                    size_t count, loff_t *off)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_validate_flash_t *args_buf;
        int rc;
 
@@ -526,7 +526,7 @@ done:
 
 static int validate_flash_release(struct inode *inode, struct file *file)
 {
-       struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *dp = PDE(file_inode(file));
        struct rtas_validate_flash_t *args_buf;
 
        args_buf = (struct rtas_validate_flash_t *) dp->data;
index dbc44ba..d0bafc0 100644 (file)
@@ -146,24 +146,6 @@ asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
                            (off_t __user *)offset, count);
 }
 
-off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
-{
-       /* sign extend n */
-       return sys_lseek(fd, (int)offset, origin);
-}
-
-long compat_sys_truncate(const char __user * path, u32 length)
-{
-       /* sign extend length */
-       return sys_truncate(path, (int)length);
-}
-
-long compat_sys_ftruncate(int fd, u32 length)
-{
-       /* sign extend length */
-       return sys_ftruncate(fd, (int)length);
-}
-
 unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
                          unsigned long prot, unsigned long flags,
                          unsigned long fd, unsigned long pgoff)
index 2c86b0d..da8b13c 100644 (file)
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
        struct hpte_cache *pte;
-       struct hlist_node *node;
        int i;
 
        rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
        for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
                struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
 
-               hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+               hlist_for_each_entry_rcu(pte, list, list_vpte_long)
                        invalidate_pte(vcpu, pte);
        }
 
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
        struct hlist_head *list;
-       struct hlist_node *node;
        struct hpte_cache *pte;
 
        /* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
        rcu_read_lock();
 
        /* Check the list for matching entries and invalidate */
-       hlist_for_each_entry_rcu(pte, node, list, list_pte)
+       hlist_for_each_entry_rcu(pte, list, list_pte)
                if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
                        invalidate_pte(vcpu, pte);
 
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
        struct hlist_head *list;
-       struct hlist_node *node;
        struct hpte_cache *pte;
 
        /* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
        rcu_read_lock();
 
        /* Check the list for matching entries and invalidate */
-       hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
+       hlist_for_each_entry_rcu(pte, list, list_pte_long)
                if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
                        invalidate_pte(vcpu, pte);
 
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
        struct hlist_head *list;
-       struct hlist_node *node;
        struct hpte_cache *pte;
        u64 vp_mask = 0xfffffffffULL;
 
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
        rcu_read_lock();
 
        /* Check the list for matching entries and invalidate */
-       hlist_for_each_entry_rcu(pte, node, list, list_vpte)
+       hlist_for_each_entry_rcu(pte, list, list_vpte)
                if ((pte->pte.vpage & vp_mask) == guest_vp)
                        invalidate_pte(vcpu, pte);
 
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
        struct hlist_head *list;
-       struct hlist_node *node;
        struct hpte_cache *pte;
        u64 vp_mask = 0xffffff000ULL;
 
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
        rcu_read_lock();
 
        /* Check the list for matching entries and invalidate */
-       hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+       hlist_for_each_entry_rcu(pte, list, list_vpte_long)
                if ((pte->pte.vpage & vp_mask) == guest_vp)
                        invalidate_pte(vcpu, pte);
 
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
 {
        struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
-       struct hlist_node *node;
        struct hpte_cache *pte;
        int i;
 
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
        for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
                struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
 
-               hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
+               hlist_for_each_entry_rcu(pte, list, list_vpte_long)
                        if ((pte->pte.raddr >= pa_start) &&
                            (pte->pte.raddr < pa_end))
                                invalidate_pte(vcpu, pte);
index 657e3f2..c9500ea 100644 (file)
@@ -111,7 +111,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
        struct spu_context *ctx;
        if (file->f_op != &spufs_context_fops)
                return 0;
-       ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+       ctx = SPUFS_I(file_inode(file))->i_ctx;
        if (ctx->flags & SPU_CREATE_NOSCHED)
                return 0;
        return fd + 1;
@@ -137,7 +137,7 @@ static struct spu_context *coredump_next_context(int *fd)
                return NULL;
        *fd = n - 1;
        file = fcheck(*fd);
-       return SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+       return SPUFS_I(file_inode(file))->i_ctx;
 }
 
 int spufs_coredump_extra_notes_size(void)
index 0cfece4..68c57d3 100644 (file)
@@ -1852,7 +1852,7 @@ out:
 
 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
        if (!err) {
                mutex_lock(&inode->i_mutex);
@@ -2501,7 +2501,7 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
                             size_t len, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
        int error = 0, cnt = 0;
 
@@ -2571,7 +2571,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
 
 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
        unsigned int mask = 0;
        int rc;
index dba1ce2..863184b 100644 (file)
@@ -199,37 +199,18 @@ static int spufs_fill_dir(struct dentry *dir,
                const struct spufs_tree_descr *files, umode_t mode,
                struct spu_context *ctx)
 {
-       struct dentry *dentry, *tmp;
-       int ret;
-
        while (files->name && files->name[0]) {
-               ret = -ENOMEM;
-               dentry = d_alloc_name(dir, files->name);
+               int ret;
+               struct dentry *dentry = d_alloc_name(dir, files->name);
                if (!dentry)
-                       goto out;
+                       return -ENOMEM;
                ret = spufs_new_file(dir->d_sb, dentry, files->ops,
                                        files->mode & mode, files->size, ctx);
                if (ret)
-                       goto out;
+                       return ret;
                files++;
        }
        return 0;
-out:
-       /*
-        * remove all children from dir. dir->inode is not set so don't
-        * just simply use spufs_prune_dir() and panic afterwards :)
-        * dput() looks like it will do the right thing:
-        * - dec parent's ref counter
-        * - remove child from parent's child list
-        * - free child's inode if possible
-        * - free child
-        */
-       list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
-               dput(dentry);
-       }
-
-       shrink_dcache_parent(dir);
-       return ret;
 }
 
 static int spufs_dir_close(struct inode *inode, struct file *file)
@@ -269,10 +250,9 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
        struct inode *inode;
        struct spu_context *ctx;
 
-       ret = -ENOSPC;
        inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
        if (!inode)
-               goto out;
+               return -ENOSPC;
 
        if (dir->i_mode & S_ISGID) {
                inode->i_gid = dir->i_gid;
@@ -280,40 +260,38 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
        }
        ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
        SPUFS_I(inode)->i_ctx = ctx;
-       if (!ctx)
-               goto out_iput;
+       if (!ctx) {
+               iput(inode);
+               return -ENOSPC;
+       }
 
        ctx->flags = flags;
        inode->i_op = &simple_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
+
+       mutex_lock(&inode->i_mutex);
+
+       dget(dentry);
+       inc_nlink(dir);
+       inc_nlink(inode);
+
+       d_instantiate(dentry, inode);
+
        if (flags & SPU_CREATE_NOSCHED)
                ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
                                         mode, ctx);
        else
                ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
 
-       if (ret)
-               goto out_free_ctx;
-
-       if (spufs_get_sb_info(dir->i_sb)->debug)
+       if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
                ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
                                mode, ctx);
 
        if (ret)
-               goto out_free_ctx;
+               spufs_rmdir(dir, dentry);
 
-       d_instantiate(dentry, inode);
-       dget(dentry);
-       inc_nlink(dir);
-       inc_nlink(dentry->d_inode);
-       goto out;
+       mutex_unlock(&inode->i_mutex);
 
-out_free_ctx:
-       spu_forget(ctx);
-       put_spu_context(ctx);
-out_iput:
-       iput(inode);
-out:
        return ret;
 }
 
@@ -368,7 +346,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
                        return ERR_PTR(-EINVAL);
 
                neighbor = get_spu_context(
-                               SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
+                               SPUFS_I(file_inode(filp))->i_ctx);
 
                if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
                    !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
index baee994..b045fdd 100644 (file)
@@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp,
        if (filp->f_op != &spufs_context_fops)
                goto out;
 
-       i = SPUFS_I(filp->f_path.dentry->d_inode);
+       i = SPUFS_I(file_inode(filp));
        ret = spufs_run_spu(i->i_ctx, &npc, &status);
 
        if (put_user(npc, unpc))
index c9311cf..cf4e773 100644 (file)
@@ -86,7 +86,7 @@ static int hcall_inst_seq_open(struct inode *inode, struct file *file)
 
        rc = seq_open(file, &hcall_inst_seq_ops);
        seq = file->private_data;
-       seq->private = file->f_path.dentry->d_inode->i_private;
+       seq->private = file_inode(file)->i_private;
 
        return rc;
 }
index fcf4b4c..4557e91 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 
 #include <asm/hvcall.h>
 #include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
                        = (unsigned int)last_p_partition_ID;
 
                /* copy the Null-term char too */
-               strncpy(&next_partner_info->location_code[0],
+               strlcpy(&next_partner_info->location_code[0],
                        (char *)&pi_buff[2],
-                       strlen((char *)&pi_buff[2]) + 1);
+                       sizeof(next_partner_info->location_code));
 
                list_add_tail(&(next_partner_info->node), head);
                next_partner_info = NULL;
index 5544572..47f3cda 100644 (file)
@@ -46,16 +46,12 @@ static struct proc_dir_entry *proc_ppc64_scan_log_dump;     /* The proc file */
 static ssize_t scanlog_read(struct file *file, char __user *buf,
                            size_t count, loff_t *ppos)
 {
-        struct inode * inode = file->f_path.dentry->d_inode;
-       struct proc_dir_entry *dp;
-       unsigned int *data;
+       struct proc_dir_entry *dp = PDE(file_inode(file));
+       unsigned int *data = (unsigned int *)dp->data;
        int status;
        unsigned long len, off;
        unsigned int wait_time;
 
-        dp = PDE(inode);
-       data = (unsigned int *)dp->data;
-
        if (count > RTAS_DATA_BUF_SIZE)
                count = RTAS_DATA_BUF_SIZE;
 
index f09ae7b..4b50537 100644 (file)
@@ -134,6 +134,7 @@ config S390
        select HAVE_SYSCALL_WRAPPERS
        select HAVE_UID16 if 32BIT
        select HAVE_VIRT_CPU_ACCOUNTING
+       select HAVE_VIRT_TO_BUS
        select INIT_ALL_POSSIBLE
        select KTIME_SCALAR if 32BIT
        select MODULES_USE_ELF_RELA
index 13e76da..9fd4a40 100644 (file)
@@ -54,7 +54,7 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
        if (*ppos != 0)
                return 0;
 
-       df = file->f_path.dentry->d_inode->i_private;
+       df = file_inode(file)->i_private;
        mutex_lock(&df->lock);
        if (!df->data) {
                data = hypfs_dbfs_data_alloc(df);
index 06ea69b..8538015 100644 (file)
@@ -119,7 +119,7 @@ static void hypfs_evict_inode(struct inode *inode)
 
 static int hypfs_open(struct inode *inode, struct file *filp)
 {
-       char *data = filp->f_path.dentry->d_inode->i_private;
+       char *data = file_inode(filp)->i_private;
        struct hypfs_sb_info *fs_info;
 
        if (filp->f_mode & FMODE_WRITE) {
@@ -171,12 +171,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
                              unsigned long nr_segs, loff_t offset)
 {
        int rc;
-       struct super_block *sb;
-       struct hypfs_sb_info *fs_info;
+       struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
+       struct hypfs_sb_info *fs_info = sb->s_fs_info;
        size_t count = iov_length(iov, nr_segs);
 
-       sb = iocb->ki_filp->f_path.dentry->d_inode->i_sb;
-       fs_info = sb->s_fs_info;
        /*
         * Currently we only allow one update per second for two reasons:
         * 1. diag 204 is VERY expensive
index 178ff96..1bfdf24 100644 (file)
@@ -180,10 +180,7 @@ extern unsigned long elf_hwcap;
 extern char elf_platform[];
 #define ELF_PLATFORM (elf_platform)
 
-#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-#else /* CONFIG_64BIT */
+#ifdef CONFIG_64BIT
 #define SET_PERSONALITY(ex)                                    \
 do {                                                           \
        if (personality(current->personality) != PER_LINUX32)   \
index 96bc83e..51bcaa0 100644 (file)
@@ -16,9 +16,6 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-               return -EFAULT;
-
        pagefault_disable();
        ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
        pagefault_enable();
@@ -40,9 +37,6 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                                                u32 oldval, u32 newval)
 {
-       if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
-               return -EFAULT;
-
        return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
 }
 
index 97de120..4a29308 100644 (file)
@@ -340,6 +340,8 @@ extern unsigned long MODULES_END;
 #define _REGION3_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
 
 #define _REGION3_ENTRY_LARGE   0x400   /* RTTE-format control, large page  */
+#define _REGION3_ENTRY_RO      0x200   /* page protection bit              */
+#define _REGION3_ENTRY_CO      0x100   /* change-recording override        */
 
 /* Bits in the segment table entry */
 #define _SEGMENT_ENTRY_ORIGIN  ~0x7ffUL/* segment table origin             */
index 34268df..9c33ed4 100644 (file)
@@ -252,9 +252,7 @@ static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        might_fault();
-       if (access_ok(VERIFY_WRITE, to, n))
-               n = __copy_to_user(to, from, n);
-       return n;
+       return __copy_to_user(to, from, n);
 }
 
 /**
@@ -315,11 +313,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
                copy_from_user_overflow();
                return n;
        }
-       if (access_ok(VERIFY_READ, from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
+       return __copy_from_user(to, from, n);
 }
 
 static inline unsigned long __must_check
@@ -332,9 +326,7 @@ static inline unsigned long __must_check
 copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
        might_fault();
-       if (__access_ok(from,n) && __access_ok(to,n))
-               n = __copy_in_user(to, from, n);
-       return n;
+       return __copy_in_user(to, from, n);
 }
 
 /*
@@ -343,11 +335,8 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
 static inline long __must_check
 strncpy_from_user(char *dst, const char __user *src, long count)
 {
-        long res = -EFAULT;
        might_fault();
-        if (access_ok(VERIFY_READ, src, 1))
-               res = uaccess.strncpy_from_user(count, src, dst);
-        return res;
+       return uaccess.strncpy_from_user(count, src, dst);
 }
 
 static inline unsigned long
@@ -387,9 +376,7 @@ static inline unsigned long __must_check
 clear_user(void __user *to, unsigned long n)
 {
        might_fault();
-       if (access_ok(VERIFY_WRITE, to, n))
-               n = uaccess.clear_user(n, to);
-       return n;
+       return uaccess.clear_user(n, to);
 }
 
 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
index 3e71194..6de049f 100644 (file)
@@ -53,9 +53,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
        int err;
 
-       if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
-               return -EFAULT;
-
        /* If you change siginfo_t structure, please be sure
           this code is fixed accordingly.
           It should never copy any pad contained in the structure
@@ -110,9 +107,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
        int err;
        u32 tmp;
 
-       if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
-               return -EFAULT;
-
        err = __get_user(to->si_signo, &from->si_signo);
        err |= __get_user(to->si_errno, &from->si_errno);
        err |= __get_user(to->si_code, &from->si_code);
@@ -244,8 +238,6 @@ asmlinkage long sys32_sigreturn(void)
        sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
        sigset_t set;
 
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
                goto badframe;
        set_current_blocked(&set);
@@ -265,8 +257,6 @@ asmlinkage long sys32_rt_sigreturn(void)
        rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
        sigset_t set;
 
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
        set_current_blocked(&set);
@@ -325,8 +315,6 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
                        sigset_t *set, struct pt_regs * regs)
 {
        sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
-               goto give_sigsegv;
 
        if (frame == (void __user *) -1UL)
                goto give_sigsegv;
@@ -391,8 +379,6 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
 {
        int err = 0;
        rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
-               goto give_sigsegv;
 
        if (frame == (void __user *) -1UL)
                goto give_sigsegv;
index c14faf3..3c98c4d 100644 (file)
@@ -67,12 +67,6 @@ ENTRY(sys32_lchown16_wrapper)
        llgfr   %r4,%r4                 # __kernel_old_uid_emu31_t
        jg      sys32_lchown16          # branch to system call
 
-ENTRY(sys32_lseek_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       lgfr    %r3,%r3                 # off_t
-       llgfr   %r4,%r4                 # unsigned int
-       jg      sys_lseek               # branch to system call
-
 #sys32_getpid_wrapper                          # void
 
 ENTRY(sys32_mount_wrapper)
@@ -331,16 +325,6 @@ ENTRY(sys32_munmap_wrapper)
        llgfr   %r3,%r3                 # size_t
        jg      sys_munmap              # branch to system call
 
-ENTRY(sys32_truncate_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # long
-       jg      sys_truncate            # branch to system call
-
-ENTRY(sys32_ftruncate_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned long
-       jg      sys_ftruncate           # branch to system call
-
 ENTRY(sys32_fchmod_wrapper)
        llgfr   %r2,%r2                 # unsigned int
        llgfr   %r3,%r3                 # mode_t
index 09a94cd..f1279dc 100644 (file)
@@ -611,7 +611,7 @@ debug_open(struct inode *inode, struct file *file)
        debug_info_t *debug_info, *debug_info_snapshot;
 
        mutex_lock(&debug_mutex);
-       debug_info = file->f_path.dentry->d_inode->i_private;
+       debug_info = file_inode(file)->i_private;
        /* find debug view */
        for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
                if (!debug_info->views[i])
index c50665f..3ad5e95 100644 (file)
@@ -1711,10 +1711,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
        if (!insn)
                return -ENOENT;
        if (insn->name[0] == '\0')
-               snprintf(buf, sizeof(buf), "%s",
+               snprintf(buf, 8, "%s",
                         long_insn_name[(int) insn->name[1]]);
        else
-               snprintf(buf, sizeof(buf), "%.5s", insn->name);
+               snprintf(buf, 8, "%.5s", insn->name);
        return 0;
 }
 EXPORT_SYMBOL_GPL(insn_to_mnemonic);
index d1c7214..3388b2b 100644 (file)
@@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
 {
        struct kretprobe_instance *ri;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address;
        unsigned long trampoline_address;
        kprobe_opcode_t *correct_ret_addr;
@@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        orig_ret_address = 0;
        correct_ret_addr = NULL;
        trampoline_address = (unsigned long) &kretprobe_trampoline;
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
        correct_ret_addr = ri->ret_addr;
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
        kretprobe_hash_unlock(current, &flags);
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index f750bd7..7845e15 100644 (file)
@@ -222,7 +222,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
        struct mod_arch_syminfo *info;
        Elf_Addr loc, val;
        int r_type, r_sym;
-       int rc;
+       int rc = -ENOEXEC;
 
        /* This is where to make the change */
        loc = base + rela->r_offset;
index 9c6e747..c45becf 100644 (file)
@@ -116,8 +116,6 @@ SYSCALL_DEFINE0(sigreturn)
        sigframe __user *frame = (sigframe __user *)regs->gprs[15];
        sigset_t set;
 
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
        if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
                goto badframe;
        set_current_blocked(&set);
@@ -135,8 +133,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
        rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
        sigset_t set;
 
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
        if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
                goto badframe;
        set_current_blocked(&set);
@@ -195,8 +191,6 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        sigframe __user *frame;
 
        frame = get_sigframe(ka, regs, sizeof(sigframe));
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
-               goto give_sigsegv;
 
        if (frame == (void __user *) -1UL)
                goto give_sigsegv;
@@ -264,8 +258,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        rt_sigframe __user *frame;
 
        frame = get_sigframe(ka, regs, sizeof(rt_sigframe));
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
-               goto give_sigsegv;
 
        if (frame == (void __user *) -1UL)
                goto give_sigsegv;
index aaac708..630b935 100644 (file)
@@ -27,7 +27,7 @@ SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper)              /* 15 */
 SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper)    /* old lchown16 syscall*/
 NI_SYSCALL                                                     /* old break syscall holder */
 NI_SYSCALL                                                     /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
+SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
 SYSCALL(sys_getpid,sys_getpid,sys_getpid)                      /* 20 */
 SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
 SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
@@ -100,8 +100,8 @@ SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
 SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)   /* old readdir syscall */
 SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper)          /* 90 */
 SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
-SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
-SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
 SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
 SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper)    /* 95 old fchown16 syscall*/
 SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
index 2443ae4..1829742 100644 (file)
@@ -162,19 +162,19 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
 
 static size_t strnlen_user_mvcos(size_t count, const char __user *src)
 {
+       size_t done, len, offset, len_str;
        char buf[256];
-       int rc;
-       size_t done, len, len_str;
 
        done = 0;
        do {
-               len = min(count - done, (size_t) 256);
-               rc = uaccess.copy_from_user(len, src + done, buf);
-               if (unlikely(rc == len))
+               offset = (size_t)src & ~PAGE_MASK;
+               len = min(256UL, PAGE_SIZE - offset);
+               len = min(count - done, len);
+               if (copy_from_user_mvcos(len, src, buf))
                        return 0;
-               len -= rc;
                len_str = strnlen(buf, len);
                done += len_str;
+               src += len_str;
        } while ((len_str == len) && (done < count));
        return done + 1;
 }
@@ -182,18 +182,20 @@ static size_t strnlen_user_mvcos(size_t count, const char __user *src)
 static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
                                      char *dst)
 {
-       int rc;
-       size_t done, len, len_str;
+       size_t done, len, offset, len_str;
 
+       if (unlikely(!count))
+               return 0;
        done = 0;
        do {
-               len = min(count - done, (size_t) 4096);
-               rc = uaccess.copy_from_user(len, src + done, dst);
-               if (unlikely(rc == len))
+               offset = (size_t)src & ~PAGE_MASK;
+               len = min(count - done, PAGE_SIZE - offset);
+               if (copy_from_user_mvcos(len, src, dst))
                        return -EFAULT;
-               len -= rc;
                len_str = strnlen(dst, len);
                done += len_str;
+               src += len_str;
+               dst += len_str;
        } while ((len_str == len) && (done < count));
        return done;
 }
index a70ee84..dff631d 100644 (file)
 #include <asm/futex.h>
 #include "uaccess.h"
 
+#ifndef CONFIG_64BIT
+#define AHI    "ahi"
+#define SLR    "slr"
+#else
+#define AHI    "aghi"
+#define SLR    "slgr"
+#endif
+
+static size_t strnlen_kernel(size_t count, const char __user *src)
+{
+       register unsigned long reg0 asm("0") = 0UL;
+       unsigned long tmp1, tmp2;
+
+       asm volatile(
+               "   la    %2,0(%1)\n"
+               "   la    %3,0(%0,%1)\n"
+               "  "SLR"  %0,%0\n"
+               "0: srst  %3,%2\n"
+               "   jo    0b\n"
+               "   la    %0,1(%3)\n"   /* strnlen_kernel results includes \0 */
+               "  "SLR"  %0,%1\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+               : "d" (reg0) : "cc", "memory");
+       return count;
+}
+
+static size_t copy_in_kernel(size_t count, void __user *to,
+                            const void __user *from)
+{
+       unsigned long tmp1;
+
+       asm volatile(
+               "  "AHI"  %0,-1\n"
+               "   jo    5f\n"
+               "   bras  %3,3f\n"
+               "0:"AHI"  %0,257\n"
+               "1: mvc   0(1,%1),0(%2)\n"
+               "   la    %1,1(%1)\n"
+               "   la    %2,1(%2)\n"
+               "  "AHI"  %0,-1\n"
+               "   jnz   1b\n"
+               "   j     5f\n"
+               "2: mvc   0(256,%1),0(%2)\n"
+               "   la    %1,256(%1)\n"
+               "   la    %2,256(%2)\n"
+               "3:"AHI"  %0,-256\n"
+               "   jnm   2b\n"
+               "4: ex    %0,1b-0b(%3)\n"
+               "5:"SLR"  %0,%0\n"
+               "6:\n"
+               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+               : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
+               : : "cc", "memory");
+       return count;
+}
 
 /*
  * Returns kernel address for user virtual address. If the returned address is
@@ -123,10 +180,8 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
 {
        size_t rc;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy(to, (void __kernel __force *) from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, (void __user *) to, from);
        rc = __user_copy_pt((unsigned long) from, to, n, 0);
        if (unlikely(rc))
                memset(to + n - rc, 0, rc);
@@ -135,30 +190,28 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
 
 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
 {
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy((void __kernel __force *) to, from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, to, (void __user *) from);
        return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
 }
 
 static size_t clear_user_pt(size_t n, void __user *to)
 {
+       void *zpage = &empty_zero_page;
        long done, size, ret;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memset((void __kernel __force *) to, 0, n);
-               return 0;
-       }
        done = 0;
        do {
                if (n - done > PAGE_SIZE)
                        size = PAGE_SIZE;
                else
                        size = n - done;
-               ret = __user_copy_pt((unsigned long) to + done,
-                                     &empty_zero_page, size, 1);
+               if (segment_eq(get_fs(), KERNEL_DS))
+                       ret = copy_in_kernel(n, to, (void __user *) zpage);
+               else
+                       ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
                done += size;
+               to += size;
                if (ret)
                        return ret + n - done;
        } while (done < n);
@@ -172,8 +225,10 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
        unsigned long offset, done, len, kaddr;
        size_t len_str;
 
+       if (unlikely(!count))
+               return 0;
        if (segment_eq(get_fs(), KERNEL_DS))
-               return strnlen((const char __kernel __force *) src, count) + 1;
+               return strnlen_kernel(count, src);
        done = 0;
 retry:
        spin_lock(&mm->page_table_lock);
@@ -200,25 +255,27 @@ fault:
 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
                                   char *dst)
 {
-       size_t n = strnlen_user_pt(count, src);
+       size_t done, len, offset, len_str;
 
-       if (!n)
-               return -EFAULT;
-       if (n > count)
-               n = count;
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy(dst, (const char __kernel __force *) src, n);
-               if (dst[n-1] == '\0')
-                       return n-1;
-               else
-                       return n;
-       }
-       if (__user_copy_pt((unsigned long) src, dst, n, 0))
-               return -EFAULT;
-       if (dst[n-1] == '\0')
-               return n-1;
-       else
-               return n;
+       if (unlikely(!count))
+               return 0;
+       done = 0;
+       do {
+               offset = (size_t)src & ~PAGE_MASK;
+               len = min(count - done, PAGE_SIZE - offset);
+               if (segment_eq(get_fs(), KERNEL_DS)) {
+                       if (copy_in_kernel(len, (void __user *) dst, src))
+                               return -EFAULT;
+               } else {
+                       if (__user_copy_pt((unsigned long) src, dst, len, 0))
+                               return -EFAULT;
+               }
+               len_str = strnlen(dst, len);
+               done += len_str;
+               src += len_str;
+               dst += len_str;
+       } while ((len_str == len) && (done < count));
+       return done;
 }
 
 static size_t copy_in_user_pt(size_t n, void __user *to,
@@ -231,10 +288,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
        unsigned long kaddr_to, kaddr_from;
        int write_user;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy((void __force *) to, (void __force *) from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, to, from);
        done = 0;
 retry:
        spin_lock(&mm->page_table_lock);
index 6fbd063..4a75d47 100644 (file)
@@ -188,6 +188,8 @@ size_t strnlen_user_std(size_t size, const char __user *src)
        register unsigned long reg0 asm("0") = 0UL;
        unsigned long tmp1, tmp2;
 
+       if (unlikely(!size))
+               return 0;
        asm volatile(
                "   la    %2,0(%1)\n"
                "   la    %3,0(%0,%1)\n"
@@ -204,38 +206,24 @@ size_t strnlen_user_std(size_t size, const char __user *src)
        return size;
 }
 
-size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
+size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
 {
-       register unsigned long reg0 asm("0") = 0UL;
-       unsigned long tmp1, tmp2;
+       size_t done, len, offset, len_str;
 
-       asm volatile(
-               "   la    %3,0(%1)\n"
-               "   la    %4,0(%0,%1)\n"
-               "   sacf  256\n"
-               "0: srst  %4,%3\n"
-               "   jo    0b\n"
-               "   sacf  0\n"
-               "   la    %0,0(%4)\n"
-               "   jh    1f\n"         /* found \0 in string ? */
-               "  "AHI"  %4,1\n"       /* include \0 in copy */
-               "1:"SLR"  %0,%1\n"      /* %0 = return length (without \0) */
-               "  "SLR"  %4,%1\n"      /* %4 = copy length (including \0) */
-               "2: mvcp  0(%4,%2),0(%1),%5\n"
-               "   jz    9f\n"
-               "3:"AHI"  %4,-256\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "4: mvcp  0(%4,%2),0(%1),%5\n"
-               "   jnz   3b\n"
-               "   j     9f\n"
-               "7: sacf  0\n"
-               "8:"LHI"  %0,%6\n"
-               "9:\n"
-               EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
-               : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
-               : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
-       return size;
+       if (unlikely(!count))
+               return 0;
+       done = 0;
+       do {
+               offset = (size_t)src & ~PAGE_MASK;
+               len = min(count - done, PAGE_SIZE - offset);
+               if (copy_from_user_std(len, src, dst))
+                       return -EFAULT;
+               len_str = strnlen(dst, len);
+               done += len_str;
+               src += len_str;
+               dst += len_str;
+       } while ((len_str == len) && (done < count));
+       return done;
 }
 
 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)     \
index 04e4892..3ad65b0 100644 (file)
@@ -49,10 +49,13 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
                { "ASCE", "PGD", "PUD", "PMD", "PTE" };
 
        seq_printf(m, "%s ", level_name[level]);
-       if (pr & _PAGE_INVALID)
+       if (pr & _PAGE_INVALID) {
                seq_printf(m, "I\n");
-       else
-               seq_printf(m, "%s\n", pr & _PAGE_RO ? "RO" : "RW");
+               return;
+       }
+       seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW ");
+       seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : "   ");
+       seq_putc(m, '\n');
 }
 
 static void note_page(struct seq_file *m, struct pg_state *st,
@@ -125,6 +128,12 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
        }
 }
 
+#ifdef CONFIG_64BIT
+#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO)
+#else
+#define _PMD_PROT_MASK 0
+#endif
+
 static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
                           pud_t *pud, unsigned long addr)
 {
@@ -137,7 +146,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
                pmd = pmd_offset(pud, addr);
                if (!pmd_none(*pmd)) {
                        if (pmd_large(*pmd)) {
-                               prot = pmd_val(*pmd) & _SEGMENT_ENTRY_RO;
+                               prot = pmd_val(*pmd) & _PMD_PROT_MASK;
                                note_page(m, st, prot, 3);
                        } else
                                walk_pte_level(m, st, pmd, addr);
@@ -147,6 +156,12 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
        }
 }
 
+#ifdef CONFIG_64BIT
+#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
+#else
+#define _PUD_PROT_MASK 0
+#endif
+
 static void walk_pud_level(struct seq_file *m, struct pg_state *st,
                           pgd_t *pgd, unsigned long addr)
 {
@@ -159,7 +174,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
                pud = pud_offset(pgd, addr);
                if (!pud_none(*pud))
                        if (pud_large(*pud)) {
-                               prot = pud_val(*pud) & _PAGE_RO;
+                               prot = pud_val(*pud) & _PUD_PROT_MASK;
                                note_page(m, st, prot, 2);
                        } else
                                walk_pmd_level(m, st, pud, addr);
index e21aaf4..ffab84d 100644 (file)
@@ -236,7 +236,8 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
                                if (!new_page)
                                        goto out;
                                pmd_val(*pm_dir) = __pa(new_page) |
-                                       _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
+                                       _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
+                                       _SEGMENT_ENTRY_CO;
                                address = (address + PMD_SIZE) & PMD_MASK;
                                continue;
                        }
index a303c95..a5d07bc 100644 (file)
@@ -99,7 +99,7 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
 static int pci_perf_seq_open(struct inode *inode, struct file *filp)
 {
        return single_open(filp, pci_perf_show,
-                          filp->f_path.dentry->d_inode->i_private);
+                          file_inode(filp)->i_private);
 }
 
 static const struct file_operations debugfs_pci_perf_fops = {
@@ -121,7 +121,7 @@ static int pci_debug_show(struct seq_file *m, void *v)
 static int pci_debug_seq_open(struct inode *inode, struct file *filp)
 {
        return single_open(filp, pci_debug_show,
-                          filp->f_path.dentry->d_inode->i_private);
+                          file_inode(filp)->i_private);
 }
 
 static const struct file_operations debugfs_pci_debug_fops = {
index 90fd348..0297931 100644 (file)
@@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
 
 struct msi_desc *__irq_get_msi_desc(unsigned int irq)
 {
-       struct hlist_node *entry;
        struct msi_map *map;
 
-       hlist_for_each_entry_rcu(map, entry,
+       hlist_for_each_entry_rcu(map,
                        &msi_hash[msi_hashfn(irq)], msi_chain)
                if (map->irq == irq)
                        return map->msi;
index 3b1482e..e569aa1 100644 (file)
@@ -12,6 +12,7 @@ config SCORE
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
        select HAVE_MOD_ARCH_SPECIFIC
+       select HAVE_VIRT_TO_BUS
        select MODULES_USE_ELF_REL
        select CLONE_BACKWARDS
 
index 5d566c7..6a9421c 100644 (file)
@@ -52,11 +52,6 @@ typedef elf_fpreg_t  elf_fpregset_t;
 #define ELF_DATA       ELFDATA2LSB
 #define ELF_ARCH       EM_SCORE7
 
-#define SET_PERSONALITY(ex)                                    \
-do {                                                           \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK))); \
-} while (0)
-
 struct task_struct;
 struct pt_regs;
 
index ef6717a..5e85963 100644 (file)
@@ -148,9 +148,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        def_bool n
 
-config ARCH_NO_VIRT_TO_BUS
-       def_bool y
-
 config ARCH_HAS_DEFAULT_IDLE
        def_bool y
 
index 1208b09..42b46e6 100644 (file)
@@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
 
@@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index 620fa7f..aea1485 100644 (file)
@@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file)
 static ssize_t alignment_proc_write(struct file *file,
                const char __user *buffer, size_t count, loff_t *pos)
 {
-       int *data = PDE(file->f_path.dentry->d_inode)->data;
+       int *data = PDE(file_inode(file))->data;
        char mode;
 
        if (count > 0) {
index 58fb1e3..289127d 100644 (file)
@@ -146,9 +146,6 @@ config GENERIC_GPIO
        help
          Generic GPIO API support
 
-config ARCH_NO_VIRT_TO_BUS
-       def_bool y
-
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
        def_bool y if SPARC64
 
index ac74a2c..a24e41f 100644 (file)
@@ -128,7 +128,4 @@ typedef struct {
 
 #define ELF_PLATFORM   (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif /* !(__ASMSPARC_ELF_H) */
index a39d1ba..e722121 100644 (file)
@@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
 
@@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        kretprobe_hash_unlock(current, &flags);
        preempt_enable_no_resched();
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index 9fcc6b4..54df554 100644 (file)
@@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
 static int __ldc_channel_exists(unsigned long id)
 {
        struct ldc_channel *lp;
-       struct hlist_node *n;
 
-       hlist_for_each_entry(lp, n, &ldc_channel_list, list) {
+       hlist_for_each_entry(lp, &ldc_channel_list, list) {
                if (lp->id == id)
                        return 1;
        }
index cd5dc4d..b524f91 100644 (file)
@@ -726,7 +726,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-void do_signal32(sigset_t *oldset, struct pt_regs * regs)
+void do_signal32(struct pt_regs * regs)
 {
        struct ksignal ksig;
        unsigned long orig_i0 = 0;
index 260ddcd..0881348 100644 (file)
@@ -21,7 +21,7 @@ sys_call_table32:
 /*0*/  .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
 /*5*/  .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
 /*10*/  .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
 /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
 /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
 /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -43,8 +43,8 @@ sys_call_table32:
 /*110*/        .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
        .word sys_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
 /*120*/        .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
-       .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/        .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
+       .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
+/*130*/        .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
        .word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
 /*140*/        .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
        .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
index 4ce6e4c..ff496ab 100644 (file)
@@ -17,6 +17,7 @@ config TILE
        select GENERIC_IRQ_SHOW
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_SYSCALL_WRAPPERS if TILEGX
+       select HAVE_VIRT_TO_BUS
        select SYS_HYPERVISOR
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_CLOCKEVENTS
index 60651df..dc50b15 100644 (file)
@@ -9,6 +9,7 @@ config UNICORE32
        select GENERIC_ATOMIC64
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_LZMA
+       select HAVE_VIRT_TO_BUS
        select ARCH_HAVE_CUSTOM_GPIO_H
        select GENERIC_FIND_FIRST_BIT
        select GENERIC_IRQ_PROBE
index 6a93833..a4f24f5 100644 (file)
@@ -112,6 +112,7 @@ config X86
        select GENERIC_STRNLEN_USER
        select HAVE_CONTEXT_TRACKING if X86_64
        select HAVE_IRQ_TIME_ACCOUNTING
+       select HAVE_VIRT_TO_BUS
        select MODULES_USE_ELF_REL if X86_32
        select MODULES_USE_ELF_RELA if X86_64
        select CLONE_BACKWARDS if X86_32
index f8fa411..c205035 100644 (file)
 
 static efi_system_table_t *sys_table;
 
+static void efi_char16_printk(efi_char16_t *str)
+{
+       struct efi_simple_text_output_protocol *out;
+
+       out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+       efi_call_phys2(out->output_string, out, str);
+}
+
 static void efi_printk(char *str)
 {
        char *s8;
 
        for (s8 = str; *s8; s8++) {
-               struct efi_simple_text_output_protocol *out;
                efi_char16_t ch[2] = { 0 };
 
                ch[0] = *s8;
-               out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
-
                if (*s8 == '\n') {
                        efi_char16_t nl[2] = { '\r', 0 };
-                       efi_call_phys2(out->output_string, out, nl);
+                       efi_char16_printk(nl);
                }
 
-               efi_call_phys2(out->output_string, out, ch);
+               efi_char16_printk(ch);
        }
 }
 
@@ -709,7 +714,12 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
                        if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
                                break;
 
-                       *p++ = *str++;
+                       if (*str == '/') {
+                               *p++ = '\\';
+                               *str++;
+                       } else {
+                               *p++ = *str++;
+                       }
                }
 
                *p = '\0';
@@ -737,7 +747,9 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
                status = efi_call_phys5(fh->open, fh, &h, filename_16,
                                        EFI_FILE_MODE_READ, (u64)0);
                if (status != EFI_SUCCESS) {
-                       efi_printk("Failed to open initrd file\n");
+                       efi_printk("Failed to open initrd file: ");
+                       efi_char16_printk(filename_16);
+                       efi_printk("\n");
                        goto close_handles;
                }
 
index a703af1..03abf9b 100644 (file)
@@ -271,7 +271,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
        if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
             N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
            N_TRSIZE(ex) || N_DRSIZE(ex) ||
-           i_size_read(bprm->file->f_path.dentry->d_inode) <
+           i_size_read(file_inode(bprm->file)) <
            ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
                return -ENOEXEC;
        }
@@ -425,12 +425,10 @@ beyond_if:
 
 static int load_aout_library(struct file *file)
 {
-       struct inode *inode;
        unsigned long bss, start_addr, len, error;
        int retval;
        struct exec ex;
 
-       inode = file->f_path.dentry->d_inode;
 
        retval = -ENOEXEC;
        error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
@@ -440,7 +438,7 @@ static int load_aout_library(struct file *file)
        /* We come in here for the regular a.out style of shared libraries */
        if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
            N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
-           i_size_read(inode) <
+           i_size_read(file_inode(file)) <
            ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
                goto out;
        }
index 592f5a9..ad7a20c 100644 (file)
@@ -218,11 +218,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
  * Some system calls that need sign extended arguments. This could be
  * done by a generic wrapper.
  */
-long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
-{
-       return sys_lseek(fd, offset, whence);
-}
-
 long sys32_kill(int pid, int sig)
 {
        return sys_kill(pid, sig);
index 28677c5..60c89f3 100644 (file)
@@ -102,7 +102,14 @@ extern void efi_call_phys_epilog(void);
 extern void efi_unmap_memmap(void);
 extern void efi_memory_uc(u64 addr, unsigned long size);
 
-#ifndef CONFIG_EFI
+#ifdef CONFIG_EFI
+
+static inline bool efi_is_native(void)
+{
+       return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+#else
 /*
  * IF EFI is not configured, have the EFI calls return -ENOSYS.
  */
index 86cb51e..0525a8b 100644 (file)
@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs);
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
 
+
+#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
+#include <asm/compat.h>
+
+/*
+ * Because ia32 syscalls do not map to x86_64 syscall numbers
+ * this screws up the trace output when tracing a ia32 task.
+ * Instead of reporting bogus syscalls, just do not trace them.
+ *
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+       if (is_compat_task())
+               return true;
+       return false;
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
+#endif /* !__ASSEMBLY__  && !COMPILE_OFFSETS */
+
 #endif /* _ASM_X86_FTRACE_H */
index 0218d91..8459efc 100644 (file)
@@ -43,7 +43,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
 asmlinkage long sys32_personality(unsigned long);
 asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
 
-long sys32_lseek(unsigned int, int, unsigned int);
 long sys32_kill(int, int);
 long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
 long sys32_vm86_warning(void);
index 2d946e6..2cd056e 100644 (file)
@@ -20,7 +20,6 @@
 struct task_struct;
 struct exec_domain;
 #include <asm/processor.h>
-#include <asm/ftrace.h>
 #include <linux/atomic.h>
 
 struct thread_info {
index a5b4dce..904611b 100644 (file)
@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
 {
        if (config_enabled(CONFIG_X86_32) && !arg)
                force_enable_local_apic = 1;
-       else if (!strncmp(arg, "notscdeadline", 13))
+       else if (arg && !strncmp(arg, "notscdeadline", 13))
                setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
        return 0;
 }
index 4914e94..529c893 100644 (file)
@@ -107,6 +107,27 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
        EVENT_CONSTRAINT_END
 };
 
+static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
+{
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+       INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
+       INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
+       INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /*  MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+       EVENT_CONSTRAINT_END
+};
+
 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
 {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
@@ -2095,7 +2116,7 @@ __init int intel_pmu_init(void)
 
                intel_pmu_lbr_init_snb();
 
-               x86_pmu.event_constraints = intel_snb_event_constraints;
+               x86_pmu.event_constraints = intel_ivb_event_constraints;
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
                x86_pmu.extra_regs = intel_snb_extra_regs;
index 60c7891..1e4dbcf 100644 (file)
@@ -85,7 +85,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
 {
        char __user *tmp = buf;
        struct cpuid_regs cmd;
-       int cpu = iminor(file->f_path.dentry->d_inode);
+       int cpu = iminor(file_inode(file));
        u64 pos = *ppos;
        ssize_t bytes = 0;
        int err = 0;
@@ -116,7 +116,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
        unsigned int cpu;
        struct cpuinfo_x86 *c;
 
-       cpu = iminor(file->f_path.dentry->d_inode);
+       cpu = iminor(file_inode(file));
        if (cpu >= nr_cpu_ids || !cpu_online(cpu))
                return -ENXIO;  /* No such CPU */
 
index 48d9d4e..992f442 100644 (file)
@@ -5,8 +5,6 @@
 #include <asm/setup.h>
 #include <asm/bios_ebda.h>
 
-#define BIOS_LOWMEM_KILOBYTES 0x413
-
 /*
  * The BIOS places the EBDA/XBDA at the top of conventional
  * memory, and usually decreases the reported amount of
  * chipset: reserve a page before VGA to prevent PCI prefetch
  * into it (errata #56). Usually the page is reserved anyways,
  * unless you have no PS/2 mouse plugged in.
+ *
+ * This functions is deliberately very conservative.  Losing
+ * memory in the bottom megabyte is rarely a problem, as long
+ * as we have enough memory to install the trampoline.  Using
+ * memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem.
  */
+
+#define BIOS_LOWMEM_KILOBYTES  0x413
+#define LOWMEM_CAP             0x9f000U        /* Absolute maximum */
+#define INSANE_CUTOFF          0x20000U        /* Less than this = insane */
+
 void __init reserve_ebda_region(void)
 {
        unsigned int lowmem, ebda_addr;
 
-       /* To determine the position of the EBDA and the */
-       /* end of conventional memory, we need to look at */
-       /* the BIOS data area. In a paravirtual environment */
-       /* that area is absent. We'll just have to assume */
-       /* that the paravirt case can handle memory setup */
-       /* correctly, without our help. */
+       /*
+        * To determine the position of the EBDA and the
+        * end of conventional memory, we need to look at
+        * the BIOS data area. In a paravirtual environment
+        * that area is absent. We'll just have to assume
+        * that the paravirt case can handle memory setup
+        * correctly, without our help.
+        */
        if (paravirt_enabled())
                return;
 
@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
        /* start of EBDA area */
        ebda_addr = get_bios_ebda();
 
-       /* Fixup: bios puts an EBDA in the top 64K segment */
-       /* of conventional memory, but does not adjust lowmem. */
-       if ((lowmem - ebda_addr) <= 0x10000)
-               lowmem = ebda_addr;
+       /*
+        * Note: some old Dells seem to need 4k EBDA without
+        * reporting so, so just consider the memory above 0x9f000
+        * to be off limits (bugzilla 2990).
+        */
+
+       /* If the EBDA address is below 128K, assume it is bogus */
+       if (ebda_addr < INSANE_CUTOFF)
+               ebda_addr = LOWMEM_CAP;
 
-       /* Fixup: bios does not report an EBDA at all. */
-       /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
-       if ((ebda_addr == 0) && (lowmem >= 0x9f000))
-               lowmem = 0x9f000;
+       /* If lowmem is less than 128K, assume it is bogus */
+       if (lowmem < INSANE_CUTOFF)
+               lowmem = LOWMEM_CAP;
 
-       /* Paranoia: should never happen, but... */
-       if ((lowmem == 0) || (lowmem >= 0x100000))
-               lowmem = 0x9f000;
+       /* Use the lower of the lowmem and EBDA markers as the cutoff */
+       lowmem = min(lowmem, ebda_addr);
+       lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
 
        /* reserve all memory between lowmem and the 1MB mark */
        memblock_reserve(lowmem, 0x100000 - lowmem);
index b7de3b2..6859e96 100644 (file)
@@ -48,7 +48,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
        .globl startup_64
 startup_64:
        /*
-        * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+        * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
         * and someone has loaded an identity mapped page table
         * for us.  These identity mapped page tables map all of the
         * kernel pages and possibly all of memory.
@@ -159,7 +159,7 @@ startup_64:
        jmp 1f
 ENTRY(secondary_startup_64)
        /*
-        * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
+        * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
         * and someone has loaded a mapped page table.
         *
         * %rsi holds a physical pointer to real_mode_data.
index e124554..3f06e61 100644 (file)
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
        kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
         *       will be the real return address, and all the rest will
         *       point to kretprobe_trampoline.
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 
        correct_ret_addr = ri->ret_addr;
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 
        kretprobe_hash_unlock(current, &flags);
 
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
index 4929502..ce13049 100644 (file)
@@ -71,7 +71,7 @@ static ssize_t msr_read(struct file *file, char __user *buf,
        u32 __user *tmp = (u32 __user *) buf;
        u32 data[2];
        u32 reg = *ppos;
-       int cpu = iminor(file->f_path.dentry->d_inode);
+       int cpu = iminor(file_inode(file));
        int err = 0;
        ssize_t bytes = 0;
 
@@ -99,7 +99,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
        const u32 __user *tmp = (const u32 __user *)buf;
        u32 data[2];
        u32 reg = *ppos;
-       int cpu = iminor(file->f_path.dentry->d_inode);
+       int cpu = iminor(file_inode(file));
        int err = 0;
        ssize_t bytes = 0;
 
@@ -125,7 +125,7 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
 {
        u32 __user *uregs = (u32 __user *)arg;
        u32 regs[8];
-       int cpu = iminor(file->f_path.dentry->d_inode);
+       int cpu = iminor(file_inode(file));
        int err;
 
        switch (ioc) {
@@ -171,13 +171,12 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
 
 static int msr_open(struct inode *inode, struct file *file)
 {
-       unsigned int cpu;
+       unsigned int cpu = iminor(file_inode(file));
        struct cpuinfo_x86 *c;
 
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
 
-       cpu = iminor(file->f_path.dentry->d_inode);
        if (cpu >= nr_cpu_ids || !cpu_online(cpu))
                return -ENXIO;  /* No such CPU */
 
index f84f5c5..6030805 100644 (file)
@@ -509,3 +509,4 @@ void local_touch_nmi(void)
 {
        __this_cpu_write(last_nmi_rip, 0);
 }
+EXPORT_SYMBOL_GPL(local_touch_nmi);
index 85c3959..2cb9470 100644 (file)
@@ -185,7 +185,7 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
 
        for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
                __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
-                            __pa_symbol(i) + (idx*PAGE_SIZE),
+                            __pa(i) + (idx*PAGE_SIZE),
                             PAGE_KERNEL_VVAR);
        }
 
index 9c857f0..84d3285 100644 (file)
@@ -1056,15 +1056,6 @@ void __init setup_arch(char **cmdline_p)
        setup_bios_corruption_check();
 #endif
 
-       /*
-        * In the memory hotplug case, the kernel needs info from SRAT to
-        * determine which memory is hotpluggable before allocating memory
-        * using memblock.
-        */
-       acpi_boot_table_init();
-       early_acpi_boot_init();
-       early_parse_srat();
-
 #ifdef CONFIG_X86_32
        printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
                        (max_pfn_mapped<<PAGE_SHIFT) - 1);
@@ -1110,6 +1101,10 @@ void __init setup_arch(char **cmdline_p)
        /*
         * Parse the ACPI tables for possible boot-time SMP configuration.
         */
+       acpi_boot_table_init();
+
+       early_acpi_boot_init();
+
        initmem_init();
        memblock_find_dma_reserve();
 
@@ -1196,8 +1191,7 @@ void __init setup_arch(char **cmdline_p)
         * mismatched firmware/kernel archtectures since there is no
         * support for runtime services.
         */
-       if (efi_enabled(EFI_BOOT) &&
-           IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
+       if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
                pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
        }
index 4ed3edb..956ca35 100644 (file)
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
-#define for_each_gfn_sp(kvm, sp, gfn, pos)                             \
-  hlist_for_each_entry(sp, pos,                                                \
+#define for_each_gfn_sp(kvm, sp, gfn)                                  \
+  hlist_for_each_entry(sp,                                             \
    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)  \
        if ((sp)->gfn != (gfn)) {} else
 
-#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)              \
-  hlist_for_each_entry(sp, pos,                                                \
+#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn)                   \
+  hlist_for_each_entry(sp,                                             \
    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)  \
                if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
                        (sp)->role.invalid) {} else
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 {
        struct kvm_mmu_page *s;
-       struct hlist_node *node;
        LIST_HEAD(invalid_list);
        bool flush = false;
 
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
                if (!s->unsync)
                        continue;
 
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        union kvm_mmu_page_role role;
        unsigned quadrant;
        struct kvm_mmu_page *sp;
-       struct hlist_node *node;
        bool need_sync = false;
 
        role = vcpu->arch.mmu.base_role;
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
        }
-       for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
+       for_each_gfn_sp(vcpu->kvm, sp, gfn) {
                if (!need_sync && sp->unsync)
                        need_sync = true;
 
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_mmu_page *sp;
-       struct hlist_node *node;
        LIST_HEAD(invalid_list);
        int r;
 
        pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
        r = 0;
        spin_lock(&kvm->mmu_lock);
-       for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
+       for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
                pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
                         sp->role.word);
                r = 1;
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 {
        struct kvm_mmu_page *s;
-       struct hlist_node *node;
 
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
                if (s->unsync)
                        continue;
                WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
                                  bool can_unsync)
 {
        struct kvm_mmu_page *s;
-       struct hlist_node *node;
        bool need_unsync = false;
 
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
                if (!can_unsync)
                        return 1;
 
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        gfn_t gfn = gpa >> PAGE_SHIFT;
        union kvm_mmu_page_role mask = { .word = 0 };
        struct kvm_mmu_page *sp;
-       struct hlist_node *node;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
        int npte;
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
        mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
-       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
+       for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
                if (detect_write_misaligned(sp, gpa, bytes) ||
                      detect_write_flooding(sp)) {
                        zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
index fb674fd..2b97525 100644 (file)
@@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
        if (pmd_large(*pmd))
                return spurious_fault_check(error_code, (pte_t *) pmd);
 
-       /*
-        * Note: don't use pte_present() here, since it returns true
-        * if the _PAGE_PROTNONE bit is set.  However, this aliases the
-        * _PAGE_GLOBAL bit, which for kernel pages give false positives
-        * when CONFIG_DEBUG_PAGEALLOC is used.
-        */
        pte = pte_offset_kernel(pmd, address);
-       if (!(pte_flags(*pte) & _PAGE_PRESENT))
+       if (!pte_present(*pte))
                return 0;
 
        ret = spurious_fault_check(error_code, pte);
index dfd3025..72fe01e 100644 (file)
@@ -97,8 +97,7 @@ void numa_set_node(int cpu, int node)
 #endif
        per_cpu(x86_cpu_to_node_map, cpu) = node;
 
-       if (node != NUMA_NO_NODE)
-               set_cpu_numa_node(cpu, node);
+       set_cpu_numa_node(cpu, node);
 }
 
 void numa_clear_node(int cpu)
@@ -213,9 +212,10 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
         * Allocate node data.  Try node-local memory and then any node.
         * Never allocate in DMA zone.
         */
-       nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+       nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
        if (!nd_pa) {
-               pr_err("Cannot find %zu bytes in any node\n", nd_size);
+               pr_err("Cannot find %zu bytes in node %d\n",
+                      nd_size, nid);
                return;
        }
        nd = __va(nd_pa);
@@ -560,12 +560,10 @@ static int __init numa_init(int (*init_func)(void))
        for (i = 0; i < MAX_LOCAL_APIC; i++)
                set_apicid_to_node(i, NUMA_NO_NODE);
 
-       /*
-        * Do not clear numa_nodes_parsed or zero numa_meminfo here, because
-        * SRAT was parsed earlier in early_parse_srat().
-        */
+       nodes_clear(numa_nodes_parsed);
        nodes_clear(node_possible_map);
        nodes_clear(node_online_map);
+       memset(&numa_meminfo, 0, sizeof(numa_meminfo));
        WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
        numa_reset_distance();
 
index ca1f1c2..091934e 100644 (file)
@@ -472,6 +472,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
 
+       /*
+        * Set the PSE and GLOBAL flags only if the PRESENT flag is
+        * set otherwise pmd_present/pmd_huge will return true even on
+        * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
+        * for the ancient hardware that doesn't support it.
+        */
+       if (pgprot_val(new_prot) & _PAGE_PRESENT)
+               pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+       else
+               pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+
+       new_prot = canon_pgprot(new_prot);
+
        /*
         * old_pte points to the large page base address. So we need
         * to add the offset of the virtual address:
@@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
                 * The address is aligned and the number of pages
                 * covers the full page.
                 */
-               new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
+               new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
                __set_pmd_pte(kpte, address, new_pte);
                cpa->flags |= CPA_FLUSHTLB;
                do_split = 0;
@@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
 #ifdef CONFIG_X86_64
        if (level == PG_LEVEL_1G) {
                pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
-               pgprot_val(ref_prot) |= _PAGE_PSE;
+               /*
+                * Set the PSE flags only if the PRESENT flag is set
+                * otherwise pmd_present/pmd_huge will return true
+                * even on a non present pmd.
+                */
+               if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+                       pgprot_val(ref_prot) |= _PAGE_PSE;
+               else
+                       pgprot_val(ref_prot) &= ~_PAGE_PSE;
        }
 #endif
 
+       /*
+        * Set the GLOBAL flags only if the PRESENT flag is set
+        * otherwise pmd/pte_present will return true even on a non
+        * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
+        * for the ancient hardware that doesn't support it.
+        */
+       if (pgprot_val(ref_prot) & _PAGE_PRESENT)
+               pgprot_val(ref_prot) |= _PAGE_GLOBAL;
+       else
+               pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
+
        /*
         * Get the target pfn from the original entry:
         */
        pfn = pte_pfn(*kpte);
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
-               set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
+               set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
 
        if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
                                PFN_DOWN(__pa(address)) + 1))
@@ -684,6 +716,18 @@ repeat:
 
                new_prot = static_protections(new_prot, address, pfn);
 
+               /*
+                * Set the GLOBAL flags only if the PRESENT flag is
+                * set otherwise pte_present will return true even on
+                * a non present pte. The canon_pgprot will clear
+                * _PAGE_GLOBAL for the ancient hardware that doesn't
+                * support it.
+                */
+               if (pgprot_val(new_prot) & _PAGE_PRESENT)
+                       pgprot_val(new_prot) |= _PAGE_GLOBAL;
+               else
+                       pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
+
                /*
                 * We need to keep the pfn from the existing PTE,
                 * after all we're only going to change it's attributes
index 79836d0..cdd0da9 100644 (file)
@@ -141,126 +141,11 @@ static inline int save_add_info(void) {return 1;}
 static inline int save_add_info(void) {return 0;}
 #endif
 
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-static void __init
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
-       int overlap, i;
-       unsigned long start_pfn, end_pfn;
-
-       start_pfn = PFN_DOWN(start);
-       end_pfn = PFN_UP(end);
-
-       /*
-        * For movablemem_map=acpi:
-        *
-        * SRAT:                |_____| |_____| |_________| |_________| ......
-        * node id:                0       1         1           2
-        * hotpluggable:           n       y         y           n
-        * movablemem_map:              |_____| |_________|
-        *
-        * Using movablemem_map, we can prevent memblock from allocating memory
-        * on ZONE_MOVABLE at boot time.
-        *
-        * Before parsing SRAT, memblock has already reserve some memory ranges
-        * for other purposes, such as for kernel image. We cannot prevent
-        * kernel from using these memory, so we need to exclude these memory
-        * even if it is hotpluggable.
-        * Furthermore, to ensure the kernel has enough memory to boot, we make
-        * all the memory on the node which the kernel resides in
-        * un-hotpluggable.
-        */
-       if (hotpluggable && movablemem_map.acpi) {
-               /* Exclude ranges reserved by memblock. */
-               struct memblock_type *rgn = &memblock.reserved;
-
-               for (i = 0; i < rgn->cnt; i++) {
-                       if (end <= rgn->regions[i].base ||
-                           start >= rgn->regions[i].base +
-                           rgn->regions[i].size)
-                               continue;
-
-                       /*
-                        * If the memory range overlaps the memory reserved by
-                        * memblock, then the kernel resides in this node.
-                        */
-                       node_set(node, movablemem_map.numa_nodes_kernel);
-
-                       goto out;
-               }
-
-               /*
-                * If the kernel resides in this node, then the whole node
-                * should not be hotpluggable.
-                */
-               if (node_isset(node, movablemem_map.numa_nodes_kernel))
-                       goto out;
-
-               insert_movablemem_map(start_pfn, end_pfn);
-
-               /*
-                * numa_nodes_hotplug nodemask represents which nodes are put
-                * into movablemem_map.map[].
-                */
-               node_set(node, movablemem_map.numa_nodes_hotplug);
-               goto out;
-       }
-
-       /*
-        * For movablemem_map=nn[KMG]@ss[KMG]:
-        *
-        * SRAT:                |_____| |_____| |_________| |_________| ......
-        * node id:                0       1         1           2
-        * user specified:                |__|                 |___|
-        * movablemem_map:                |___| |_________|    |______| ......
-        *
-        * Using movablemem_map, we can prevent memblock from allocating memory
-        * on ZONE_MOVABLE at boot time.
-        *
-        * NOTE: In this case, SRAT info will be ingored.
-        */
-       overlap = movablemem_map_overlap(start_pfn, end_pfn);
-       if (overlap >= 0) {
-               /*
-                * If part of this range is in movablemem_map, we need to
-                * add the range after it to extend the range to the end
-                * of the node, because from the min address specified to
-                * the end of the node will be ZONE_MOVABLE.
-                */
-               start_pfn = max(start_pfn,
-                           movablemem_map.map[overlap].start_pfn);
-               insert_movablemem_map(start_pfn, end_pfn);
-
-               /*
-                * Set the nodemask, so that if the address range on one node
-                * is not continuse, we can add the subsequent ranges on the
-                * same node into movablemem_map.
-                */
-               node_set(node, movablemem_map.numa_nodes_hotplug);
-       } else {
-               if (node_isset(node, movablemem_map.numa_nodes_hotplug))
-                       /*
-                        * Insert the range if we already have movable ranges
-                        * on the same node.
-                        */
-                       insert_movablemem_map(start_pfn, end_pfn);
-       }
-out:
-       return;
-}
-#else          /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-static inline void
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
-}
-#endif         /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
 int __init
 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 {
        u64 start, end;
-       u32 hotpluggable;
        int node, pxm;
 
        if (srat_disabled())
@@ -269,8 +154,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                goto out_err_bad_srat;
        if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
                goto out_err;
-       hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
-       if (hotpluggable && !save_add_info())
+       if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
                goto out_err;
 
        start = ma->base_address;
@@ -290,12 +174,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 
        node_set(node, numa_nodes_parsed);
 
-       printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
+       printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
               node, pxm,
-              (unsigned long long) start, (unsigned long long) end - 1,
-              hotpluggable ? "Hot Pluggable": "");
-
-       handle_movablemem(node, start, end, hotpluggable);
+              (unsigned long long) start, (unsigned long long) end - 1);
 
        return 0;
 out_err_bad_srat:
index 56ab749..94e7662 100644 (file)
@@ -162,6 +162,9 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        struct msi_desc *msidesc;
        int *v;
 
+       if (type == PCI_CAP_ID_MSI && nvec > 1)
+               return 1;
+
        v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
        if (!v)
                return -ENOMEM;
@@ -220,6 +223,9 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        struct msi_desc *msidesc;
        struct msi_msg msg;
 
+       if (type == PCI_CAP_ID_MSI && nvec > 1)
+               return 1;
+
        list_for_each_entry(msidesc, &dev->msi_list, list) {
                __read_msi_msg(msidesc, &msg);
                pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
@@ -263,6 +269,9 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        int ret = 0;
        struct msi_desc *msidesc;
 
+       if (type == PCI_CAP_ID_MSI && nvec > 1)
+               return 1;
+
        list_for_each_entry(msidesc, &dev->msi_list, list) {
                struct physdev_map_pirq map_irq;
                domid_t domid;
index 70b2a3a..5f2ecaf 100644 (file)
@@ -69,11 +69,6 @@ struct efi_memory_map memmap;
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
-static inline bool efi_is_native(void)
-{
-       return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
-}
-
 unsigned long x86_efi_facility;
 
 /*
@@ -85,9 +80,10 @@ int efi_enabled(int facility)
 }
 EXPORT_SYMBOL(efi_enabled);
 
+static bool __initdata disable_runtime = false;
 static int __init setup_noefi(char *arg)
 {
-       clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+       disable_runtime = true;
        return 0;
 }
 early_param("noefi", setup_noefi);
@@ -734,7 +730,7 @@ void __init efi_init(void)
        if (!efi_is_native())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
        else {
-               if (efi_runtime_init())
+               if (disable_runtime || efi_runtime_init())
                        return;
                set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        }
index f2fe78f..e6d55f0 100644 (file)
@@ -25,7 +25,7 @@
 16     i386    lchown                  sys_lchown16
 17     i386    break
 18     i386    oldstat                 sys_stat
-19     i386    lseek                   sys_lseek                       sys32_lseek
+19     i386    lseek                   sys_lseek                       compat_sys_lseek
 20     i386    getpid                  sys_getpid
 21     i386    mount                   sys_mount                       compat_sys_mount
 22     i386    umount                  sys_oldumount
@@ -98,8 +98,8 @@
 89     i386    readdir                 sys_old_readdir                 compat_sys_old_readdir
 90     i386    mmap                    sys_old_mmap                    sys32_mmap
 91     i386    munmap                  sys_munmap
-92     i386    truncate                sys_truncate
-93     i386    ftruncate               sys_ftruncate
+92     i386    truncate                sys_truncate                    compat_sys_truncate
+93     i386    ftruncate               sys_ftruncate                   compat_sys_ftruncate
 94     i386    fchmod                  sys_fchmod
 95     i386    fchown                  sys_fchown16
 96     i386    getpriority             sys_getpriority
index 39928d1..c8e1c7b 100644 (file)
@@ -67,6 +67,7 @@
 #include <asm/hypervisor.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
+#include <asm/pat.h>
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
@@ -1417,7 +1418,14 @@ asmlinkage void __init xen_start_kernel(void)
         */
        acpi_numa = -1;
 #endif
-
+#ifdef CONFIG_X86_PAT
+       /*
+        * For right now disable the PAT. We should remove this once
+        * git commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1
+        * (xen/pat: Disable PAT support for now) is reverted.
+        */
+       pat_enabled = 0;
+#endif
        /* Don't do the full vcpu_info placement stuff until we have a
           possible map and a non-dummy shared_info. */
        per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
index cb557be..35876ff 100644 (file)
@@ -9,13 +9,16 @@ config XTENSA
        select HAVE_IDE
        select GENERIC_ATOMIC64
        select HAVE_GENERIC_HARDIRQS
+       select HAVE_VIRT_TO_BUS
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select MODULES_USE_ELF_RELA
        select GENERIC_PCI_IOMAP
+       select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select CLONE_BACKWARDS
        select IRQ_DOMAIN
+       select HAVE_OPROFILE
        help
          Xtensa processors are 32-bit RISC machines designed by Tensilica
          primarily for embedded systems.  These processors are both
@@ -31,7 +34,7 @@ config GENERIC_HWEIGHT
        def_bool y
 
 config GENERIC_GPIO
-       def_bool y
+       bool
 
 config ARCH_HAS_ILOG2_U32
        def_bool n
@@ -71,6 +74,12 @@ config XTENSA_VARIANT_DC232B
        help
          This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
 
+config XTENSA_VARIANT_DC233C
+       bool "dc233c - Diamond 233L Standard Core Rev.C (LE)"
+       select MMU
+       help
+         This variant refers to Tensilica's Diamond 233L Standard core Rev.C (LE).
+
 config XTENSA_VARIANT_S6000
        bool "s6000 - Stretch software configurable processor"
        select VARIANT_IRQ_SWITCH
@@ -197,6 +206,42 @@ config BUILTIN_DTB
        string "DTB to build into the kernel image"
        depends on OF
 
+config BLK_DEV_SIMDISK
+       tristate "Host file-based simulated block device support"
+       default n
+       depends on XTENSA_PLATFORM_ISS
+       help
+         Create block devices that map to files in the host file system.
+         Device binding to host file may be changed at runtime via proc
+         interface provided the device is not in use.
+
+config BLK_DEV_SIMDISK_COUNT
+       int "Number of host file-based simulated block devices"
+       range 1 10
+       depends on BLK_DEV_SIMDISK
+       default 2
+       help
+         This is the default minimal number of created block devices.
+         Kernel/module parameter 'simdisk_count' may be used to change this
+         value at runtime. More file names (but no more than 10) may be
+         specified as parameters, simdisk_count grows accordingly.
+
+config SIMDISK0_FILENAME
+       string "Host filename for the first simulated device"
+       depends on BLK_DEV_SIMDISK = y
+       default ""
+       help
+         Attach a first simdisk to a host file. Conventionally, this file
+         contains a root file system.
+
+config SIMDISK1_FILENAME
+       string "Host filename for the second simulated device"
+       depends on BLK_DEV_SIMDISK = y && BLK_DEV_SIMDISK_COUNT != 1
+       default ""
+       help
+         Another simulated disk in a host file for a buildroot-independent
+         storage.
+
 source "mm/Kconfig"
 
 source "drivers/pcmcia/Kconfig"
index 0aa7270..136224b 100644 (file)
@@ -15,6 +15,7 @@
 
 variant-$(CONFIG_XTENSA_VARIANT_FSF)           := fsf
 variant-$(CONFIG_XTENSA_VARIANT_DC232B)                := dc232b
+variant-$(CONFIG_XTENSA_VARIANT_DC233C)                := dc233c
 variant-$(CONFIG_XTENSA_VARIANT_S6000)         := s6000
 variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM)  := custom
 
@@ -86,9 +87,10 @@ core-y               += arch/xtensa/kernel/ arch/xtensa/mm/
 core-y         += $(buildvar) $(buildplf)
 
 libs-y         += arch/xtensa/lib/ $(LIBGCC)
+drivers-$(CONFIG_OPROFILE)     += arch/xtensa/oprofile/
 
 ifneq ($(CONFIG_BUILTIN_DTB),"")
-core-$(CONFIG_OF) += arch/xtensa/boot/
+core-$(CONFIG_OF) += arch/xtensa/boot/dts/
 endif
 
 boot           := arch/xtensa/boot
@@ -101,7 +103,7 @@ zImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $@
 
 %.dtb:
-       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+       $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
 
 define archhelp
   @echo '* zImage      - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
index 818647e..64ffc4b 100644 (file)
@@ -25,18 +25,6 @@ bootdir-$(CONFIG_XTENSA_PLATFORM_ISS)         += boot-elf
 bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf boot-uboot
 bootdir-$(CONFIG_XTENSA_PLATFORM_XTFPGA) += boot-redboot boot-elf boot-uboot
 
-
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB)
-endif
-
-# Rule to build device tree blobs
-$(obj)/%.dtb: $(src)/dts/%.dts FORCE
-       $(call if_changed_dep,dtc)
-
-clean-files := *.dtb.S
-
 zImage Image: $(bootdir-y)
 
 $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
new file mode 100644 (file)
index 0000000..5f711bb
--- /dev/null
@@ -0,0 +1,15 @@
+#
+# arch/xtensa/boot/dts/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB),"")
+obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+endif
+
+clean-files := *.dtb.S
index c3f2891..e7fb447 100644 (file)
@@ -7,7 +7,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
  */
 
 #ifndef _XTENSA_ATOMIC_H
 
 /*
  * This Xtensa implementation assumes that the right mechanism
- * for exclusion is for locking interrupts to level 1.
+ * for exclusion is for locking interrupts to level EXCM_LEVEL.
  *
  * Locking interrupts looks like this:
  *
- *    rsil a15, 1
+ *    rsil a15, LOCKLEVEL
  *    <code>
  *    wsr  a15, PS
  *    rsync
index aed7ad6..0593de6 100644 (file)
@@ -12,6 +12,7 @@
 #define _XTENSA_CHECKSUM_H
 
 #include <linux/in6.h>
+#include <asm/uaccess.h>
 #include <variant/core.h>
 
 /*
index 264d5fa..eacb25a 100644 (file)
@@ -84,7 +84,8 @@ typedef struct {
        elf_greg_t sar;
        elf_greg_t windowstart;
        elf_greg_t windowbase;
-       elf_greg_t reserved[8+48];
+       elf_greg_t threadptr;
+       elf_greg_t reserved[7+48];
        elf_greg_t a[64];
 } xtensa_gregset_t;
 
index c90ea5b..d7546c9 100644 (file)
@@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t;
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 #define __HAVE_ARCH_PTEP_MKDIRTY
 #define __HAVE_ARCH_PTE_SAME
+/* We provide our own get_unmapped_area to cope with
+ * SHM area cache aliasing for userland.
+ */
+#define HAVE_ARCH_UNMAPPED_AREA
 
 #include <asm-generic/pgtable.h>
 
index e5fb6b0..7e409a5 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
  */
 
 #ifndef _XTENSA_PROCESSOR_H
@@ -68,7 +68,7 @@
 /* LOCKLEVEL defines the interrupt level that masks all
  * general-purpose interrupts.
  */
-#define LOCKLEVEL 1
+#define LOCKLEVEL XCHAL_EXCM_LEVEL
 
 /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
  * registers
index 682b1de..81f31bc 100644 (file)
@@ -38,6 +38,7 @@ struct pt_regs {
        unsigned long syscall;          /*  56 */
        unsigned long icountlevel;      /*  60 */
        unsigned long scompare1;        /*  64 */
+       unsigned long threadptr;        /*  68 */
 
        /* Additional configurable registers that are used by the compiler. */
        xtregs_opt_t xtregs_opt;
@@ -48,7 +49,7 @@ struct pt_regs {
        /* current register frame.
         * Note: The ESF for kernel exceptions ends after 16 registers!
         */
-       unsigned long areg[16];         /* 128 (64) */
+       unsigned long areg[16];
 };
 
 #include <variant/core.h>
index 76096a4..b24de67 100644 (file)
@@ -88,6 +88,7 @@
 #define PS_UM_BIT              5
 #define PS_EXCM_BIT            4
 #define PS_INTLEVEL_SHIFT      0
+#define PS_INTLEVEL_WIDTH      4
 #define PS_INTLEVEL_MASK       0x0000000F
 
 /*  DBREAKCn register fields.  */
index 405a8c4..8d5d9df 100644 (file)
@@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct)
                "beqz   %2, 2f\n\t"
                "beq    %2, %3, 1b\n"
                "2:\n\t"
-               "sub    %2, %3, %2"
+               "sub    %2, %2, %3"
                : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
                : "0" (__cs), "1" (__ct));
 
@@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
                "beqz   %3, 2f\n\t"
                "beq    %2, %3, 1b\n"
                "2:\n\t"
-               "sub    %2, %3, %2"
+               "sub    %2, %2, %3"
                : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
                : "0" (__cs), "1" (__ct), "r" (__cs+__n));
 
index 175b3d5..9e85ce8 100644 (file)
@@ -5,7 +5,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
  */
 
 #ifndef _XTENSA_TIMEX_H
 #define _INTLEVEL(x)   XCHAL_INT ## x ## _LEVEL
 #define INTLEVEL(x)    _INTLEVEL(x)
 
-#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
+#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     0
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
+#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     1
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
+#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     2
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 #else
index 54f7044..b5464ef 100644 (file)
 extern void * __init trap_set_handler(int cause, void *handler);
 extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
 
+static inline void spill_registers(void)
+{
+       unsigned int a0, ps;
+
+       __asm__ __volatile__ (
+               "movi   a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
+               "mov    a12, a0\n\t"
+               "rsr    a13, sar\n\t"
+               "xsr    a14, ps\n\t"
+               "movi   a0, _spill_registers\n\t"
+               "rsync\n\t"
+               "callx0 a0\n\t"
+               "mov    a0, a12\n\t"
+               "wsr    a13, sar\n\t"
+               "wsr    a14, ps\n\t"
+               : : "a" (&a0), "a" (&ps)
+#if defined(CONFIG_FRAME_POINTER)
+               : "a2", "a3", "a4",       "a11", "a12", "a13", "a14", "a15",
+#else
+               : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+#endif
+                 "memory");
+}
+
 #endif /* _XTENSA_TRAPS_H */
index dacf716..586756e 100644 (file)
@@ -102,16 +102,7 @@ typedef struct {
 
 #ifndef __ASSEMBLY__
 
-#define SIG_BLOCK          0   /* for blocking signals */
-#define SIG_UNBLOCK        1   /* for unblocking signals */
-#define SIG_SETMASK        2   /* for setting the signal mask */
-
-/* Type of a signal handler.  */
-typedef void (*__sighandler_t)(int);
-
-#define SIG_DFL        ((__sighandler_t)0)     /* default signal handling */
-#define SIG_IGN        ((__sighandler_t)1)     /* ignore signal */
-#define SIG_ERR        ((__sighandler_t)-1)    /* error return from signal */
+#include <asm-generic/signal-defs.h>
 
 #ifndef __KERNEL__
 
index 19fac3f..51940fe 100644 (file)
@@ -728,8 +728,13 @@ __SYSCALL(330, sys_prlimit64, 4)
 #define __NR_kcmp                              331
 __SYSCALL(331, sys_kcmp, 5)
 
+#define __NR_finit_module                      332
+__SYSCALL(332, sys_finit_module, 3)
 
-#define __NR_syscall_count                     332
+#define __NR_accept4                           333
+__SYSCALL(333, sys_accept4, 4)
+
+#define __NR_syscall_count                     334
 
 /*
  * sysxtensa syscall handler
index 0701fad..1915c7c 100644 (file)
@@ -42,6 +42,7 @@ int main(void)
        DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
        DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
        DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1));
+       DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr));
        DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
        DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
        DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
index 3777fec..63845f9 100644 (file)
@@ -7,7 +7,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2007 by Tensilica Inc.
+ * Copyright (C) 2004 - 2008 by Tensilica Inc.
  *
  * Chris Zankel <chris@zankel.net>
  *
@@ -130,6 +130,11 @@ _user_exception:
        s32i    a3, a1, PT_SAR
        s32i    a2, a1, PT_ICOUNTLEVEL
 
+#if XCHAL_HAVE_THREADPTR
+       rur     a2, threadptr
+       s32i    a2, a1, PT_THREADPTR
+#endif
+
        /* Rotate ws so that the current windowbase is at bit0. */
        /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
 
@@ -349,15 +354,16 @@ common_exception:
         * so we can allow exceptions and interrupts (*) again.
         * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
         *
-        * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
-        *     (interrupts disabled) and if this exception is not an interrupt.
+        * (*) We only allow interrupts of higher priority than current IRQ
         */
 
        rsr     a3, ps
        addi    a0, a0, -4
        movi    a2, 1
-       extui   a3, a3, 0, 1            # a3 = PS.INTLEVEL[0]
-       moveqz  a3, a2, a0              # a3 = 1 iff interrupt exception
+       extui   a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+                                       # a3 = PS.INTLEVEL
+       movnez  a2, a3, a3              # a2 = 1: level-1, > 1: high priority
+       moveqz  a3, a2, a0              # a3 = IRQ level iff interrupt
        movi    a2, 1 << PS_WOE_BIT
        or      a3, a3, a2
        rsr     a0, exccause
@@ -398,7 +404,7 @@ common_exception:
        callx4  a4
 
        /* Jump here for exception exit */
-
+       .global common_exception_return
 common_exception_return:
 
        /* Jump if we are returning from kernel exceptions. */
@@ -509,6 +515,11 @@ user_exception_exit:
         *       (if we have restored WSBITS-1 frames).
         */
 
+#if XCHAL_HAVE_THREADPTR
+       l32i    a3, a1, PT_THREADPTR
+       wur     a3, threadptr
+#endif
+
 2:     j       common_exception_exit
 
        /* This is the kernel exception exit.
@@ -641,19 +652,51 @@ common_exception_exit:
 
        l32i    a0, a1, PT_DEPC
        l32i    a3, a1, PT_AREG3
+       _bltui  a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+
+       wsr     a0, depc
        l32i    a2, a1, PT_AREG2
-       _bgeui  a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+       l32i    a0, a1, PT_AREG0
+       l32i    a1, a1, PT_AREG1
+       rfde
 
+1:
        /* Restore a0...a3 and return */
 
+       rsr     a0, ps
+       extui   a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+       movi    a0, 2f
+       slli    a2, a2, 4
+       add     a0, a2, a0
+       l32i    a2, a1, PT_AREG2
+       jx      a0
+
+       .macro  irq_exit_level level
+       .align  16
+       .if     XCHAL_EXCM_LEVEL >= \level
+       l32i    a0, a1, PT_PC
+       wsr     a0, epc\level
        l32i    a0, a1, PT_AREG0
        l32i    a1, a1, PT_AREG1
-       rfe
+       rfi     \level
+       .endif
+       .endm
 
-1:     wsr     a0, depc
+       .align  16
+2:
        l32i    a0, a1, PT_AREG0
        l32i    a1, a1, PT_AREG1
-       rfde
+       rfe
+
+       .align  16
+       /* no rfi for level-1 irq, handled by rfe above*/
+       nop
+
+       irq_exit_level 2
+       irq_exit_level 3
+       irq_exit_level 4
+       irq_exit_level 5
+       irq_exit_level 6
 
 ENDPROC(kernel_exception)
 
@@ -753,7 +796,7 @@ ENTRY(unrecoverable_exception)
        wsr     a1, windowbase
        rsync
 
-       movi    a1, (1 << PS_WOE_BIT) | 1
+       movi    a1, (1 << PS_WOE_BIT) | LOCKLEVEL
        wsr     a1, ps
        rsync
 
@@ -1474,7 +1517,7 @@ ENTRY(_spill_registers)
        l32i    a1, a3, EXC_TABLE_KSTK
        wsr     a3, excsave1
 
-       movi    a4, (1 << PS_WOE_BIT) | 1
+       movi    a4, (1 << PS_WOE_BIT) | LOCKLEVEL
        wsr     a4, ps
        rsync
 
@@ -1922,7 +1965,7 @@ ENTRY(_switch_to)
        s32i    a6, a3, EXC_TABLE_FIXUP
        s32i    a7, a3, EXC_TABLE_KSTK
 
-       /* restore context of the task that 'next' addresses */
+       /* restore context of the task 'next' */
 
        l32i    a0, a13, THREAD_RA      # restore return address
        l32i    a1, a13, THREAD_SP      # restore stack pointer
index 91d9095..df88f98 100644 (file)
@@ -7,7 +7,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
  *
  * Chris Zankel <chris@zankel.net>
  * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -128,14 +128,14 @@ ENTRY(_startup)
        wsr     a0, cpenable
 #endif
 
-       /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
+       /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
         *
         * Note: PS.EXCM must be cleared before using any loop
         *       instructions; otherwise, they are silently disabled, and
         *       at most one iteration of the loop is executed.
         */
 
-       movi    a1, 1
+       movi    a1, LOCKLEVEL
        wsr     a1, ps
        rsync
 
@@ -211,7 +211,8 @@ ENTRY(_startup)
        movi    a1, init_thread_union
        addi    a1, a1, KERNEL_STACK_SIZE
 
-       movi    a2, 0x00040001          # WOE=1, INTLEVEL=1, UM=0
+       movi    a2, (1 << PS_WOE_BIT) | LOCKLEVEL
+                                       # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
        wsr     a2, ps                  # (enable reg-windows; progmode stack)
        rsync
 
index 0dd5784..5cd82e9 100644 (file)
@@ -259,9 +259,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
                        memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
                               &regs->areg[XCHAL_NUM_AREGS - len/4], len);
                }
-// FIXME: we need to set THREADPTR in thread_info...
+
+               /* The thread pointer is passed in the '4th argument' (= a5) */
                if (clone_flags & CLONE_SETTLS)
-                       childregs->areg[2] = childregs->areg[6];
+                       childregs->threadptr = childregs->areg[5];
        } else {
                p->thread.ra = MAKE_RA_FOR_CALL(
                                (unsigned long)ret_from_kernel_thread, 1);
index 61fb2e9..562fac6 100644 (file)
@@ -53,9 +53,8 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
 {
        struct pt_regs *regs = task_pt_regs(child);
        xtensa_gregset_t __user *gregset = uregs;
-       unsigned long wm = regs->wmask;
        unsigned long wb = regs->windowbase;
-       int live, i;
+       int i;
 
        if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
                return -EIO;
@@ -67,13 +66,11 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
        __put_user(regs->lcount, &gregset->lcount);
        __put_user(regs->windowstart, &gregset->windowstart);
        __put_user(regs->windowbase, &gregset->windowbase);
+       __put_user(regs->threadptr, &gregset->threadptr);
 
-       live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
-
-       for (i = 0; i < live; i++)
-               __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
-       for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
-               __put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
+       for (i = 0; i < XCHAL_NUM_AREGS; i++)
+               __put_user(regs->areg[i],
+                               gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
 
        return 0;
 }
@@ -84,7 +81,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
        xtensa_gregset_t *gregset = uregs;
        const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
        unsigned long ps;
-       unsigned long wb;
+       unsigned long wb, ws;
 
        if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
                return -EIO;
@@ -94,21 +91,33 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
        __get_user(regs->lbeg, &gregset->lbeg);
        __get_user(regs->lend, &gregset->lend);
        __get_user(regs->lcount, &gregset->lcount);
-       __get_user(regs->windowstart, &gregset->windowstart);
+       __get_user(ws, &gregset->windowstart);
        __get_user(wb, &gregset->windowbase);
+       __get_user(regs->threadptr, &gregset->threadptr);
 
        regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
 
        if (wb >= XCHAL_NUM_AREGS / 4)
                return -EFAULT;
 
-       regs->windowbase = wb;
+       if (wb != regs->windowbase || ws != regs->windowstart) {
+               unsigned long rotws, wmask;
+
+               rotws = (((ws | (ws << WSBITS)) >> wb) &
+                               ((1 << WSBITS) - 1)) & ~1;
+               wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
+                       (rotws & 0xF) | 1;
+               regs->windowbase = wb;
+               regs->windowstart = ws;
+               regs->wmask = wmask;
+       }
 
        if (wb != 0 &&  __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
-                                        gregset->a, wb * 16))
+                               gregset->a, wb * 16))
                return -EFAULT;
 
-       if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16))
+       if (__copy_from_user(regs->areg, gregset->a + wb * 4,
+                               (WSBITS - wb) * 16))
                return -EFAULT;
 
        return 0;
index 24c1a57..6dd25ec 100644 (file)
@@ -328,6 +328,27 @@ extern char _UserExceptionVector_literal_start;
 extern char _UserExceptionVector_text_end;
 extern char _DoubleExceptionVector_literal_start;
 extern char _DoubleExceptionVector_text_end;
+#if XCHAL_EXCM_LEVEL >= 2
+extern char _Level2InterruptVector_text_start;
+extern char _Level2InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+extern char _Level3InterruptVector_text_start;
+extern char _Level3InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+extern char _Level4InterruptVector_text_start;
+extern char _Level4InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+extern char _Level5InterruptVector_text_start;
+extern char _Level5InterruptVector_text_end;
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+extern char _Level6InterruptVector_text_start;
+extern char _Level6InterruptVector_text_end;
+#endif
+
 
 
 #ifdef CONFIG_S32C1I_SELFTEST
@@ -482,6 +503,27 @@ void __init setup_arch(char **cmdline_p)
        mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
                    __pa(&_DoubleExceptionVector_text_end), 0);
 
+#if XCHAL_EXCM_LEVEL >= 2
+       mem_reserve(__pa(&_Level2InterruptVector_text_start),
+                   __pa(&_Level2InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+       mem_reserve(__pa(&_Level3InterruptVector_text_start),
+                   __pa(&_Level3InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+       mem_reserve(__pa(&_Level4InterruptVector_text_start),
+                   __pa(&_Level4InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+       mem_reserve(__pa(&_Level5InterruptVector_text_start),
+                   __pa(&_Level5InterruptVector_text_end), 0);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+       mem_reserve(__pa(&_Level6InterruptVector_text_start),
+                   __pa(&_Level6InterruptVector_text_end), 0);
+#endif
+
        bootmem_init();
 
 #ifdef CONFIG_OF
index d7590dd..718eca1 100644 (file)
@@ -337,7 +337,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        struct rt_sigframe *frame;
        int err = 0;
        int signal;
-       unsigned long sp, ra;
+       unsigned long sp, ra, tp;
 
        sp = regs->areg[1];
 
@@ -391,7 +391,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
         * Return context not modified until this point.
         */
 
-       /* Set up registers for signal handler */
+       /* Set up registers for signal handler; preserve the threadptr */
+       tp = regs->threadptr;
        start_thread(regs, (unsigned long) ka->sa.sa_handler,
                     (unsigned long) frame);
 
@@ -402,6 +403,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->areg[6] = (unsigned long) signal;
        regs->areg[7] = (unsigned long) &frame->info;
        regs->areg[8] = (unsigned long) &frame->uc;
+       regs->threadptr = tp;
 
        /* Set access mode to USER_DS.  Nomenclature is outdated, but
         * functionality is used in uaccess.h
index 54fa842..5d3f7a1 100644 (file)
@@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
 #include <uapi/asm/unistd.h>
 };
 
+#define COLOUR_ALIGN(addr, pgoff) \
+       ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+        (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
 asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
 {
        unsigned long ret;
@@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
 {
        return sys_fadvise64_64(fd, offset, len, advice);
 }
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct vm_area_struct *vmm;
+
+       if (flags & MAP_FIXED) {
+               /* We do not accept a shared mapping if it would violate
+                * cache aliasing constraints.
+                */
+               if ((flags & MAP_SHARED) &&
+                               ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+                       return -EINVAL;
+               return addr;
+       }
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+       if (!addr)
+               addr = TASK_UNMAPPED_BASE;
+
+       if (flags & MAP_SHARED)
+               addr = COLOUR_ALIGN(addr, pgoff);
+       else
+               addr = PAGE_ALIGN(addr);
+
+       for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
+               /* At this point:  (!vmm || addr < vmm->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+               if (!vmm || addr + len <= vmm->vm_start)
+                       return addr;
+               addr = vmm->vm_end;
+               if (flags & MAP_SHARED)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+       }
+}
index ded955d..923db5c 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
+#include <asm/traps.h>
 
 #ifdef CONFIG_KGDB
 extern int gdb_enter;
@@ -193,28 +194,49 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
 }
 
 /*
- * Level-1 interrupt.
- * We currently have no priority encoding.
+ * IRQ handler.
+ * PS.INTLEVEL is the current IRQ priority level.
  */
 
-unsigned long ignored_level1_interrupts;
 extern void do_IRQ(int, struct pt_regs *);
 
-void do_interrupt (struct pt_regs *regs)
+void do_interrupt(struct pt_regs *regs)
 {
-       unsigned long intread = get_sr (interrupt);
-       unsigned long intenable = get_sr (intenable);
-       int i, mask;
-
-       /* Handle all interrupts (no priorities).
-        * (Clear the interrupt before processing, in case it's
-        *  edge-triggered or software-generated)
-        */
+       static const unsigned int_level_mask[] = {
+               0,
+               XCHAL_INTLEVEL1_MASK,
+               XCHAL_INTLEVEL2_MASK,
+               XCHAL_INTLEVEL3_MASK,
+               XCHAL_INTLEVEL4_MASK,
+               XCHAL_INTLEVEL5_MASK,
+               XCHAL_INTLEVEL6_MASK,
+               XCHAL_INTLEVEL7_MASK,
+       };
+       unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
+
+       if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
+               return;
 
-       for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
-               if (mask & (intread & intenable)) {
-                       set_sr (mask, intclear);
-                       do_IRQ (i,regs);
+       for (;;) {
+               unsigned intread = get_sr(interrupt);
+               unsigned intenable = get_sr(intenable);
+               unsigned int_at_level = intread & intenable &
+                       int_level_mask[level];
+
+               if (!int_at_level)
+                       return;
+
+               /*
+                * Clear the interrupt before processing, in case it's
+                *  edge-triggered or software-generated
+                */
+               while (int_at_level) {
+                       unsigned i = __ffs(int_at_level);
+                       unsigned mask = 1 << i;
+
+                       int_at_level ^= mask;
+                       set_sr(mask, intclear);
+                       do_IRQ(i, regs);
                }
        }
 }
@@ -392,26 +414,6 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task)
        return sp;
 }
 
-static inline void spill_registers(void)
-{
-       unsigned int a0, ps;
-
-       __asm__ __volatile__ (
-               "movi   a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
-               "mov    a12, a0\n\t"
-               "rsr    a13, sar\n\t"
-               "xsr    a14, ps\n\t"
-               "movi   a0, _spill_registers\n\t"
-               "rsync\n\t"
-               "callx0 a0\n\t"
-               "mov    a0, a12\n\t"
-               "wsr    a13, sar\n\t"
-               "wsr    a14, ps\n\t"
-               :: "a" (&a0), "a" (&ps)
-               : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
-                 "memory");
-}
-
 void show_trace(struct task_struct *task, unsigned long *sp)
 {
        unsigned long a0, a1, pc;
index 68df35f..82109b4 100644 (file)
@@ -10,7 +10,7 @@
  * Public License.  See the file "COPYING" in the main directory of
  * this archive for more details.
  *
- * Copyright (C) 2005 Tensilica, Inc.
+ * Copyright (C) 2005 - 2008 Tensilica, Inc.
  *
  * Chris Zankel <chris@zankel.net>
  *
@@ -366,6 +366,41 @@ ENTRY(_DebugInterruptVector)
 ENDPROC(_DebugInterruptVector)
 
 
+
+/*
+ * Medium priority level interrupt vectors
+ *
+ * Each takes less than 16 (0x10) bytes, no literals, by placing
+ * the extra 8 bytes that would otherwise be required in the window
+ * vectors area where there is space.  With relocatable vectors,
+ * all vectors are within ~ 4 kB range of each other, so we can
+ * simply jump (J) to another vector without having to use JX.
+ *
+ * common_exception code gets current IRQ level in PS.INTLEVEL
+ * and preserves it for the IRQ handling time.
+ */
+
+       .macro  irq_entry_level level
+
+       .if     XCHAL_EXCM_LEVEL >= \level
+       .section .Level\level\()InterruptVector.text, "ax"
+ENTRY(_Level\level\()InterruptVector)
+       wsr     a0, epc1
+       rsr     a0, epc\level
+       xsr     a0, epc1
+                                       # branch to user or kernel vector
+       j       _SimulateUserKernelVectorException
+       .endif
+
+       .endm
+
+       irq_entry_level 2
+       irq_entry_level 3
+       irq_entry_level 4
+       irq_entry_level 5
+       irq_entry_level 6
+
+
 /* Window overflow and underflow handlers.
  * The handlers must be 64 bytes apart, first starting with the underflow
  * handlers underflow-4 to underflow-12, then the overflow handlers
@@ -396,6 +431,26 @@ ENTRY_ALIGN64(_WindowOverflow4)
 ENDPROC(_WindowOverflow4)
 
 
+#if XCHAL_EXCM_LEVEL >= 2
+       /*  Not a window vector - but a convenient location
+        *  (where we know there's space) for continuation of
+        *  medium priority interrupt dispatch code.
+        *  On entry here, a0 contains PS, and EPC2 contains saved a0:
+        */
+       .align 4
+_SimulateUserKernelVectorException:
+       wsr     a0, excsave2
+       movi    a0, 4                   # LEVEL1_INTERRUPT cause
+       wsr     a0, exccause
+       rsr     a0, ps
+       bbsi.l  a0, PS_UM_BIT, 1f       # branch if user mode
+       rsr     a0, excsave2            # restore a0
+       j       _KernelExceptionVector  # simulate kernel vector exception
+1:     rsr     a0, excsave2            # restore a0
+       j       _UserExceptionVector    # simulate user vector exception
+#endif
+
+
 /* 4-Register Window Underflow Vector (Handler) */
 
 ENTRY_ALIGN64(_WindowUnderflow4)
index 255154f..1469524 100644 (file)
@@ -7,7 +7,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2008 Tensilica Inc.
  *
  * Chris Zankel <chris@zankel.net>
  * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
@@ -134,6 +134,26 @@ SECTIONS
 
     RELOCATE_ENTRY(_WindowVectors_text,
                   .WindowVectors.text);
+#if XCHAL_EXCM_LEVEL >= 2
+    RELOCATE_ENTRY(_Level2InterruptVector_text,
+                  .Level2InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+    RELOCATE_ENTRY(_Level3InterruptVector_text,
+                  .Level3InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+    RELOCATE_ENTRY(_Level4InterruptVector_text,
+                  .Level4InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+    RELOCATE_ENTRY(_Level5InterruptVector_text,
+                  .Level5InterruptVector.text);
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+    RELOCATE_ENTRY(_Level6InterruptVector_text,
+                  .Level6InterruptVector.text);
+#endif
     RELOCATE_ENTRY(_KernelExceptionVector_text,
                   .KernelExceptionVector.text);
     RELOCATE_ENTRY(_UserExceptionVector_text,
@@ -177,11 +197,53 @@ SECTIONS
                  XCHAL_DEBUG_VECTOR_VADDR,
                  4,
                  .DebugInterruptVector.literal)
+#undef LAST
+#define LAST   .DebugInterruptVector.text
+#if XCHAL_EXCM_LEVEL >= 2
+  SECTION_VECTOR (_Level2InterruptVector_text,
+                 .Level2InterruptVector.text,
+                 XCHAL_INTLEVEL2_VECTOR_VADDR,
+                 SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST  .Level2InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 3
+  SECTION_VECTOR (_Level3InterruptVector_text,
+                 .Level3InterruptVector.text,
+                 XCHAL_INTLEVEL3_VECTOR_VADDR,
+                 SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST  .Level3InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 4
+  SECTION_VECTOR (_Level4InterruptVector_text,
+                 .Level4InterruptVector.text,
+                 XCHAL_INTLEVEL4_VECTOR_VADDR,
+                 SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST  .Level4InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 5
+  SECTION_VECTOR (_Level5InterruptVector_text,
+                 .Level5InterruptVector.text,
+                 XCHAL_INTLEVEL5_VECTOR_VADDR,
+                 SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST  .Level5InterruptVector.text
+#endif
+#if XCHAL_EXCM_LEVEL >= 6
+  SECTION_VECTOR (_Level6InterruptVector_text,
+                 .Level6InterruptVector.text,
+                 XCHAL_INTLEVEL6_VECTOR_VADDR,
+                 SIZEOF(LAST), LAST)
+# undef LAST
+# define LAST  .Level6InterruptVector.text
+#endif
   SECTION_VECTOR (_KernelExceptionVector_literal,
                  .KernelExceptionVector.literal,
                  XCHAL_KERNEL_VECTOR_VADDR - 4,
-                 SIZEOF(.DebugInterruptVector.text),
-                 .DebugInterruptVector.text)
+                 SIZEOF(LAST), LAST)
+#undef LAST
   SECTION_VECTOR (_KernelExceptionVector_text,
                  .KernelExceptionVector.text,
                  XCHAL_KERNEL_VECTOR_VADDR,
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
new file mode 100644 (file)
index 0000000..69ffbe8
--- /dev/null
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+               oprof.o cpu_buffer.o buffer_sync.o \
+               event_buffer.o oprofile_files.o \
+               oprofilefs.o oprofile_stats.o \
+               timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
new file mode 100644 (file)
index 0000000..66f32ee
--- /dev/null
@@ -0,0 +1,171 @@
+/**
+ * @file backtrace.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+/* A struct that maps to the part of the frame containing the a0 and
+ * a1 registers.
+ */
+struct frame_start {
+       unsigned long a0;
+       unsigned long a1;
+};
+
+static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
+{
+       unsigned long windowstart = regs->windowstart;
+       unsigned long windowbase = regs->windowbase;
+       unsigned long a0 = regs->areg[0];
+       unsigned long a1 = regs->areg[1];
+       unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
+       int index;
+
+       /* First add the current PC to the trace. */
+       if (pc != 0 && pc <= TASK_SIZE)
+               oprofile_add_trace(pc);
+       else
+               return;
+
+       /* Two steps:
+        *
+        * 1. Look through the register window for the
+        * previous PCs in the call trace.
+        *
+        * 2. Look on the stack.
+        */
+
+       /* Step 1.  */
+       /* Rotate WINDOWSTART to move the bit corresponding to
+        * the current window to the bit #0.
+        */
+       windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+       /* Look for bits that are set, they correspond to
+        * valid windows.
+        */
+       for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+               if (windowstart & (1 << index)) {
+                       /* Read a0 and a1 from the
+                        * corresponding position in AREGs.
+                        */
+                       a0 = regs->areg[index * 4];
+                       a1 = regs->areg[index * 4 + 1];
+                       /* Get the PC from a0 and a1. */
+                       pc = MAKE_PC_FROM_RA(a0, pc);
+
+                       /* Add the PC to the trace. */
+                       if (pc != 0 && pc <= TASK_SIZE)
+                               oprofile_add_trace(pc);
+                       else
+                               return;
+               }
+
+       /* Step 2. */
+       /* We are done with the register window, we need to
+        * look through the stack.
+        */
+       if (depth > 0) {
+               /* Start from the a1 register. */
+               /* a1 = regs->areg[1]; */
+               while (a0 != 0 && depth--) {
+
+                       struct frame_start frame_start;
+                       /* Get the location for a1, a0 for the
+                        * previous frame from the current a1.
+                        */
+                       unsigned long *psp = (unsigned long *)a1;
+                       psp -= 4;
+
+                       /* Check if the region is OK to access. */
+                       if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
+                               return;
+                       /* Copy a1, a0 from user space stack frame. */
+                       if (__copy_from_user_inatomic(&frame_start, psp,
+                                               sizeof(frame_start)))
+                               return;
+
+                       a0 = frame_start.a0;
+                       a1 = frame_start.a1;
+                       pc = MAKE_PC_FROM_RA(a0, pc);
+
+                       if (pc != 0 && pc <= TASK_SIZE)
+                               oprofile_add_trace(pc);
+                       else
+                               return;
+               }
+       }
+}
+
+static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
+{
+       unsigned long pc = regs->pc;
+       unsigned long *psp;
+       unsigned long sp_start, sp_end;
+       unsigned long a0 = regs->areg[0];
+       unsigned long a1 = regs->areg[1];
+
+       sp_start = a1 & ~(THREAD_SIZE-1);
+       sp_end = sp_start + THREAD_SIZE;
+
+       /* Spill the register window to the stack first. */
+       spill_registers();
+
+       /* Read the stack frames one by one and create the PC
+        * from the a0 and a1 registers saved there.
+        */
+       while (a1 > sp_start && a1 < sp_end && depth--) {
+               pc = MAKE_PC_FROM_RA(a0, pc);
+
+               /* Add the PC to the trace. */
+               if (kernel_text_address(pc))
+                       oprofile_add_trace(pc);
+
+               if (pc == (unsigned long) &common_exception_return) {
+                       regs = (struct pt_regs *)a1;
+                       if (user_mode(regs)) {
+                               pc = regs->pc;
+                               if (pc != 0 && pc <= TASK_SIZE)
+                                       oprofile_add_trace(pc);
+                               else
+                                       return;
+                               return xtensa_backtrace_user(regs, depth);
+                       }
+                       a0 = regs->areg[0];
+                       a1 = regs->areg[1];
+                       continue;
+               }
+
+               psp = (unsigned long *)a1;
+
+               a0 = *(psp - 4);
+               a1 = *(psp - 3);
+
+               if (a1 <= (unsigned long)psp)
+                       return;
+
+       }
+       return;
+}
+
+void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+       if (user_mode(regs))
+               xtensa_backtrace_user(regs, depth);
+       else
+               xtensa_backtrace_kernel(regs, depth);
+}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
new file mode 100644 (file)
index 0000000..a67eea3
--- /dev/null
@@ -0,0 +1,26 @@
+/**
+ * @file init.c
+ *
+ * @remark Copyright 2008 Tensilica Inc.
+ * @remark Read the file COPYING
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+
+extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+       ops->backtrace = xtensa_backtrace;
+       return -ENODEV;
+}
+
+
+void oprofile_arch_exit(void)
+{
+}
index b7d1a5c..d2369b7 100644 (file)
@@ -6,3 +6,4 @@
 
 obj-y                  = console.o setup.o
 obj-$(CONFIG_NET)      += network.o
+obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
new file mode 100644 (file)
index 0000000..f58ffc3
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * arch/xtensa/platforms/iss/simdisk.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2013 Tensilica Inc.
+ *   Authors   Victor Prupis
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <platform/simcall.h>
+
+#define SIMDISK_MAJOR 240
+#define SECTOR_SHIFT 9
+#define SIMDISK_MINORS 1
+#define MAX_SIMDISK_COUNT 10
+
+struct simdisk {
+       const char *filename;
+       spinlock_t lock;
+       struct request_queue *queue;
+       struct gendisk *gd;
+       struct proc_dir_entry *procfile;
+       int users;
+       unsigned long size;
+       int fd;
+};
+
+
+static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT;
+module_param(simdisk_count, int, S_IRUGO);
+MODULE_PARM_DESC(simdisk_count, "Number of simdisk units.");
+
+static int n_files;
+static const char *filename[MAX_SIMDISK_COUNT] = {
+#ifdef CONFIG_SIMDISK0_FILENAME
+       CONFIG_SIMDISK0_FILENAME,
+#ifdef CONFIG_SIMDISK1_FILENAME
+       CONFIG_SIMDISK1_FILENAME,
+#endif
+#endif
+};
+
+static int simdisk_param_set_filename(const char *val,
+               const struct kernel_param *kp)
+{
+       if (n_files < ARRAY_SIZE(filename))
+               filename[n_files++] = val;
+       else
+               return -EINVAL;
+       return 0;
+}
+
+static const struct kernel_param_ops simdisk_param_ops_filename = {
+       .set = simdisk_param_set_filename,
+};
+module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0);
+MODULE_PARM_DESC(filename, "Backing storage filename.");
+
+static int simdisk_major = SIMDISK_MAJOR;
+
+static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
+               unsigned long nsect, char *buffer, int write)
+{
+       unsigned long offset = sector << SECTOR_SHIFT;
+       unsigned long nbytes = nsect << SECTOR_SHIFT;
+
+       if (offset > dev->size || dev->size - offset < nbytes) {
+               pr_notice("Beyond-end %s (%ld %ld)\n",
+                               write ? "write" : "read", offset, nbytes);
+               return;
+       }
+
+       spin_lock(&dev->lock);
+       while (nbytes > 0) {
+               unsigned long io;
+
+               __simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
+               if (write)
+                       io = simc_write(dev->fd, buffer, nbytes);
+               else
+                       io = simc_read(dev->fd, buffer, nbytes);
+               if (io == -1) {
+                       pr_err("SIMDISK: IO error %d\n", errno);
+                       break;
+               }
+               buffer += io;
+               offset += io;
+               nbytes -= io;
+       }
+       spin_unlock(&dev->lock);
+}
+
+static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
+{
+       int i;
+       struct bio_vec *bvec;
+       sector_t sector = bio->bi_sector;
+
+       bio_for_each_segment(bvec, bio, i) {
+               char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
+               unsigned len = bvec->bv_len >> SECTOR_SHIFT;
+
+               simdisk_transfer(dev, sector, len, buffer,
+                               bio_data_dir(bio) == WRITE);
+               sector += len;
+               __bio_kunmap_atomic(bio, KM_USER0);
+       }
+       return 0;
+}
+
+static void simdisk_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct simdisk *dev = q->queuedata;
+       int status = simdisk_xfer_bio(dev, bio);
+       bio_endio(bio, status);
+}
+
+
+static int simdisk_open(struct block_device *bdev, fmode_t mode)
+{
+       struct simdisk *dev = bdev->bd_disk->private_data;
+
+       spin_lock(&dev->lock);
+       if (!dev->users)
+               check_disk_change(bdev);
+       ++dev->users;
+       spin_unlock(&dev->lock);
+       return 0;
+}
+
+static int simdisk_release(struct gendisk *disk, fmode_t mode)
+{
+       struct simdisk *dev = disk->private_data;
+       spin_lock(&dev->lock);
+       --dev->users;
+       spin_unlock(&dev->lock);
+       return 0;
+}
+
+static const struct block_device_operations simdisk_ops = {
+       .owner          = THIS_MODULE,
+       .open           = simdisk_open,
+       .release        = simdisk_release,
+};
+
+static struct simdisk *sddev;
+static struct proc_dir_entry *simdisk_procdir;
+
+static int simdisk_attach(struct simdisk *dev, const char *filename)
+{
+       int err = 0;
+
+       filename = kstrdup(filename, GFP_KERNEL);
+       if (filename == NULL)
+               return -ENOMEM;
+
+       spin_lock(&dev->lock);
+
+       if (dev->fd != -1) {
+               err = -EBUSY;
+               goto out;
+       }
+       dev->fd = simc_open(filename, O_RDWR, 0);
+       if (dev->fd == -1) {
+               pr_err("SIMDISK: Can't open %s: %d\n", filename, errno);
+               err = -ENODEV;
+               goto out;
+       }
+       dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
+       set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
+       dev->filename = filename;
+       pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
+out:
+       if (err)
+               kfree(filename);
+       spin_unlock(&dev->lock);
+
+       return err;
+}
+
+static int simdisk_detach(struct simdisk *dev)
+{
+       int err = 0;
+
+       spin_lock(&dev->lock);
+
+       if (dev->users != 0) {
+               err = -EBUSY;
+       } else if (dev->fd != -1) {
+               if (simc_close(dev->fd)) {
+                       pr_err("SIMDISK: error closing %s: %d\n",
+                                       dev->filename, errno);
+                       err = -EIO;
+               } else {
+                       pr_info("SIMDISK: %s detached from %s\n",
+                                       dev->gd->disk_name, dev->filename);
+                       dev->fd = -1;
+                       kfree(dev->filename);
+                       dev->filename = NULL;
+               }
+       }
+       spin_unlock(&dev->lock);
+       return err;
+}
+
+static int proc_read_simdisk(char *page, char **start, off_t off,
+               int count, int *eof, void *data)
+{
+       int len;
+       struct simdisk *dev = (struct simdisk *) data;
+       len = sprintf(page, "%s\n", dev->filename ? dev->filename : "");
+       return len;
+}
+
+static int proc_write_simdisk(struct file *file, const char *buffer,
+               unsigned long count, void *data)
+{
+       char *tmp = kmalloc(count + 1, GFP_KERNEL);
+       struct simdisk *dev = (struct simdisk *) data;
+       int err;
+
+       if (tmp == NULL)
+               return -ENOMEM;
+       if (copy_from_user(tmp, buffer, count)) {
+               err = -EFAULT;
+               goto out_free;
+       }
+
+       err = simdisk_detach(dev);
+       if (err != 0)
+               goto out_free;
+
+       if (count > 0 && tmp[count - 1] == '\n')
+               tmp[count - 1] = 0;
+       else
+               tmp[count] = 0;
+
+       if (tmp[0])
+               err = simdisk_attach(dev, tmp);
+
+       if (err == 0)
+               err = count;
+out_free:
+       kfree(tmp);
+       return err;
+}
+
+static int __init simdisk_setup(struct simdisk *dev, int which,
+               struct proc_dir_entry *procdir)
+{
+       char tmp[2] = { '0' + which, 0 };
+
+       dev->fd = -1;
+       dev->filename = NULL;
+       spin_lock_init(&dev->lock);
+       dev->users = 0;
+
+       dev->queue = blk_alloc_queue(GFP_KERNEL);
+       if (dev->queue == NULL) {
+               pr_err("blk_alloc_queue failed\n");
+               goto out_alloc_queue;
+       }
+
+       blk_queue_make_request(dev->queue, simdisk_make_request);
+       dev->queue->queuedata = dev;
+
+       dev->gd = alloc_disk(SIMDISK_MINORS);
+       if (dev->gd == NULL) {
+               pr_err("alloc_disk failed\n");
+               goto out_alloc_disk;
+       }
+       dev->gd->major = simdisk_major;
+       dev->gd->first_minor = which;
+       dev->gd->fops = &simdisk_ops;
+       dev->gd->queue = dev->queue;
+       dev->gd->private_data = dev;
+       snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
+       set_capacity(dev->gd, 0);
+       add_disk(dev->gd);
+
+       dev->procfile = create_proc_entry(tmp, 0644, procdir);
+       dev->procfile->data = dev;
+       dev->procfile->read_proc = proc_read_simdisk;
+       dev->procfile->write_proc = proc_write_simdisk;
+       return 0;
+
+out_alloc_disk:
+       blk_cleanup_queue(dev->queue);
+       dev->queue = NULL;
+out_alloc_queue:
+       simc_close(dev->fd);
+       return -EIO;
+}
+
+static int __init simdisk_init(void)
+{
+       int i;
+
+       if (register_blkdev(simdisk_major, "simdisk") < 0) {
+               pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major);
+               return -EIO;
+       }
+       pr_info("SIMDISK: major: %d\n", simdisk_major);
+
+       if (n_files > simdisk_count)
+               simdisk_count = n_files;
+       if (simdisk_count > MAX_SIMDISK_COUNT)
+               simdisk_count = MAX_SIMDISK_COUNT;
+
+       sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
+                       GFP_KERNEL);
+       if (sddev == NULL)
+               goto out_unregister;
+
+       simdisk_procdir = proc_mkdir("simdisk", 0);
+       if (simdisk_procdir == NULL)
+               goto out_free_unregister;
+
+       for (i = 0; i < simdisk_count; ++i) {
+               if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) {
+                       if (filename[i] != NULL && filename[i][0] != 0 &&
+                                       (n_files == 0 || i < n_files))
+                               simdisk_attach(sddev + i, filename[i]);
+               }
+       }
+
+       return 0;
+
+out_free_unregister:
+       kfree(sddev);
+out_unregister:
+       unregister_blkdev(simdisk_major, "simdisk");
+       return -ENOMEM;
+}
+module_init(simdisk_init);
+
+static void simdisk_teardown(struct simdisk *dev, int which,
+               struct proc_dir_entry *procdir)
+{
+       char tmp[2] = { '0' + which, 0 };
+
+       simdisk_detach(dev);
+       if (dev->gd)
+               del_gendisk(dev->gd);
+       if (dev->queue)
+               blk_cleanup_queue(dev->queue);
+       remove_proc_entry(tmp, procdir);
+}
+
+static void __exit simdisk_exit(void)
+{
+       int i;
+
+       for (i = 0; i < simdisk_count; ++i)
+               simdisk_teardown(sddev + i, i, simdisk_procdir);
+       remove_proc_entry("simdisk", 0);
+       kfree(sddev);
+       unregister_blkdev(simdisk_major, "simdisk");
+}
+module_exit(simdisk_exit);
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR);
+
+MODULE_LICENSE("GPL");
index 4b9951a..9d888a2 100644 (file)
@@ -100,7 +100,7 @@ static void __init update_clock_frequency(struct device_node *node)
        }
 
        *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR);
-       prom_update_property(node, newfreq);
+       of_update_property(node, newfreq);
 }
 
 #define MAC_LEN 6
@@ -128,7 +128,7 @@ static void __init update_local_mac(struct device_node *node)
 
        memcpy(newmac->value, macaddr, MAC_LEN);
        ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f;
-       prom_update_property(node, newmac);
+       of_update_property(node, newmac);
 }
 
 static int __init machine_setup(void)
diff --git a/arch/xtensa/variants/dc233c/include/variant/core.h b/arch/xtensa/variants/dc233c/include/variant/core.h
new file mode 100644 (file)
index 0000000..3a2e53b
--- /dev/null
@@ -0,0 +1,475 @@
+/* 
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ *                             processor CORE configuration
+ *
+ *  See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+   Copyright (c) 1999-2010 Tensilica Inc.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be included
+   in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+   IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+   CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+   TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+   SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+           Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ *  Note:  Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ *  configured, and a value of 0 otherwise.  These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+                               ISA
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE                  0       /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED            1       /* windowed registers option */
+#define XCHAL_NUM_AREGS                        32      /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2           5       /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE     3       /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG               1       /* debug option */
+#define XCHAL_HAVE_DENSITY             1       /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS               1       /* zero-overhead loops */
+#define XCHAL_HAVE_NSA                 1       /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX              1       /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT                        1       /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS              1       /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16               1       /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32               1       /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH          0       /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32               1       /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R                        1       /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS   1       /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16             0       /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX                        1       /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES       0       /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES  0       /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12          1       /* (obsolete option) */
+#define XCHAL_HAVE_ABS                 1       /* ABS instruction */
+/*#define XCHAL_HAVE_POPC              0*/     /* POPC instruction */
+/*#define XCHAL_HAVE_CRC               0*/     /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC                1       /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I              1       /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION         0       /* speculation */
+#define XCHAL_HAVE_FULL_RESET          1       /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS             1       /* */
+#define XCHAL_NUM_MISC_REGS            2       /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER          0       /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID                        1       /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS         1       /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS       0       /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL         0       /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR           1       /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS            0       /* boolean registers */
+#define XCHAL_HAVE_CP                  1       /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG                        8       /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16               1       /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005       0       /* vector floating-point pkg */
+#define XCHAL_HAVE_FP                  0       /* floating point pkg */
+#define XCHAL_HAVE_DFP                 0       /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel           0       /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1             0       /* Vectra I  pkg */
+#define XCHAL_HAVE_VECTRALX            0       /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO             0       /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2               0       /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP     0       /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2             0       /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16               0       /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT         0       /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV                0       /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD      0       /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3                        0       /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16               0       /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI       0       /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16             0       /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16               0       /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+                               MISC
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES  8       /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH         4       /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH               4       /* data width in bytes */
+/*  In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1       /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION        1       /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW                0       /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW       0       /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION               900001  /* sw version of this header */
+
+#define XCHAL_CORE_ID                  "dc233c"        /* alphanum core name
+                                                  (CoreID) set in the Xtensa
+                                                  Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION         "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID          0x00004B21      /* 22-bit sw build ID */
+
+/*
+ *  These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0             0xC56707FE      /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1             0x14404B21      /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME          "LX4.0.1"       /* full version name */
+#define XCHAL_HW_VERSION_MAJOR         2400    /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR         1       /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION               240001  /* major*100+minor */
+#define XCHAL_HW_REL_LX4               1
+#define XCHAL_HW_REL_LX4_0             1
+#define XCHAL_HW_REL_LX4_0_1           1
+#define XCHAL_HW_CONFIGID_RELIABLE     1
+/*  If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR     2400    /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR     1       /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION           240001  /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR     2400    /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR     1       /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION           240001  /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+                               CACHE
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE          32      /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE          32      /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH         5       /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH         5       /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE              16384   /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE              16384   /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK      1       /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT       0       /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH            0       /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+    Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+                               CACHE
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF                 1       /* any outbound PIF present */
+
+/*  If present, cache size in bytes == (ways * 2^(linewidth + setwidth)).  */
+
+/*  Number of cache sets in log2(lines per way):  */
+#define XCHAL_ICACHE_SETWIDTH          7
+#define XCHAL_DCACHE_SETWIDTH          7
+
+/*  Cache set associativity (number of ways):  */
+#define XCHAL_ICACHE_WAYS              4
+#define XCHAL_DCACHE_WAYS              4
+
+/*  Cache features:  */
+#define XCHAL_ICACHE_LINE_LOCKABLE     1
+#define XCHAL_DCACHE_LINE_LOCKABLE     1
+#define XCHAL_ICACHE_ECC_PARITY                0
+#define XCHAL_DCACHE_ECC_PARITY                0
+
+/*  Cache access size in bytes (affects operation of SICW instruction):  */
+#define XCHAL_ICACHE_ACCESS_SIZE       4
+#define XCHAL_DCACHE_ACCESS_SIZE       4
+
+/*  Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits):  */
+#define XCHAL_CA_BITS                  4
+
+
+/*----------------------------------------------------------------------
+                       INTERNAL I/D RAM/ROMs and XLMI
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM              0       /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM              0       /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM              0       /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM              0       /* number of core data RAMs */
+#define XCHAL_NUM_URAM                 0       /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI                 0       /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE      1       /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+                       INTERRUPTS and TIMERS
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS          1       /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS  1       /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI                 1       /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT              1       /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS               3       /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS           22      /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2      5       /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS                17      /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS            6       /* number of interrupt levels
+                                                  (not including level zero) */
+#define XCHAL_EXCM_LEVEL               3       /* level masked by PS.EXCM */
+       /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/*  Masks of interrupts at each interrupt level:  */
+#define XCHAL_INTLEVEL1_MASK           0x001F80FF
+#define XCHAL_INTLEVEL2_MASK           0x00000100
+#define XCHAL_INTLEVEL3_MASK           0x00200E00
+#define XCHAL_INTLEVEL4_MASK           0x00001000
+#define XCHAL_INTLEVEL5_MASK           0x00002000
+#define XCHAL_INTLEVEL6_MASK           0x00000000
+#define XCHAL_INTLEVEL7_MASK           0x00004000
+
+/*  Masks of interrupts at each range 1..n of interrupt levels:  */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK  0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK  0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK  0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK  0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK  0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK  0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK  0x003FFFFF
+
+/*  Level of each interrupt:  */
+#define XCHAL_INT0_LEVEL               1
+#define XCHAL_INT1_LEVEL               1
+#define XCHAL_INT2_LEVEL               1
+#define XCHAL_INT3_LEVEL               1
+#define XCHAL_INT4_LEVEL               1
+#define XCHAL_INT5_LEVEL               1
+#define XCHAL_INT6_LEVEL               1
+#define XCHAL_INT7_LEVEL               1
+#define XCHAL_INT8_LEVEL               2
+#define XCHAL_INT9_LEVEL               3
+#define XCHAL_INT10_LEVEL              3
+#define XCHAL_INT11_LEVEL              3
+#define XCHAL_INT12_LEVEL              4
+#define XCHAL_INT13_LEVEL              5
+#define XCHAL_INT14_LEVEL              7
+#define XCHAL_INT15_LEVEL              1
+#define XCHAL_INT16_LEVEL              1
+#define XCHAL_INT17_LEVEL              1
+#define XCHAL_INT18_LEVEL              1
+#define XCHAL_INT19_LEVEL              1
+#define XCHAL_INT20_LEVEL              1
+#define XCHAL_INT21_LEVEL              3
+#define XCHAL_DEBUGLEVEL               6       /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT    1       /* OCD external db interrupt */
+#define XCHAL_NMILEVEL                 7       /* NMI "level" (for use with
+                                                  EXCSAVE/EPS/EPC_n, RFI n) */
+
+/*  Type of each interrupt:  */
+#define XCHAL_INT0_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE        XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE        XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE        XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE       XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE       XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE       XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE       XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE       XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE       XTHAL_INTTYPE_EXTERN_EDGE
+
+/*  Masks of interrupts for each type of interrupt:  */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED        0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE    0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL        0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER       0x00002440
+#define XCHAL_INTTYPE_MASK_NMI         0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/*  Interrupt numbers assigned to specific interrupt sources:  */
+#define XCHAL_TIMER0_INTERRUPT         6       /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT         10      /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT         13      /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT         XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT            14      /* non-maskable interrupt */
+
+/*  Interrupt numbers for levels at which only one interrupt is configured:  */
+#define XCHAL_INTLEVEL2_NUM            8
+#define XCHAL_INTLEVEL4_NUM            12
+#define XCHAL_INTLEVEL5_NUM            13
+#define XCHAL_INTLEVEL7_NUM            14
+/*  (There are many interrupts each at level(s) 1, 3.)  */
+
+
+/*
+ *  External interrupt vectors/levels.
+ *  These macros describe how Xtensa processor interrupt numbers
+ *  (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ *  map to external BInterrupt<n> pins, for those interrupts
+ *  configured as external (level-triggered, edge-triggered, or NMI).
+ *  See the Xtensa processor databook for more details.
+ */
+
+/*  Core interrupt numbers mapped to each EXTERNAL interrupt number:  */
+#define XCHAL_EXTINT0_NUM              0       /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM              1       /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM              2       /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM              3       /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM              4       /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM              5       /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM              8       /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM              9       /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM              12      /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM              14      /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM             15      /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM             16      /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM             17      /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM             18      /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM             19      /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM             20      /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM             21      /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+                       EXCEPTIONS and VECTORS
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION              2       /* Xtensa Exception Architecture
+                                                  number: 1 == XEA1 (old)
+                                                          2 == XEA2 (new)
+                                                          0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1                        0       /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2                        1       /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX                        0       /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS          1       /* exception option */
+#define XCHAL_HAVE_HALT                        0       /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER          0       /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY      0       /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT       1       /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE             1       /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR      0x00002000  /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR      0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP    0
+
+#define XCHAL_RESET_VECTOR0_VADDR      0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR      0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR      0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR      0x00001000
+#define XCHAL_RESET_VECTOR_VADDR       0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR       0xFE000000
+#define XCHAL_USER_VECOFS              0x00000340
+#define XCHAL_USER_VECTOR_VADDR                0x00002340
+#define XCHAL_USER_VECTOR_PADDR                0x00002340
+#define XCHAL_KERNEL_VECOFS            0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR      0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR      0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS         0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR   0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR   0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS                0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS                0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS                0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS                0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS       0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS       0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR     0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR     0x00002000
+#define XCHAL_INTLEVEL2_VECOFS         0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR   0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR   0x00002180
+#define XCHAL_INTLEVEL3_VECOFS         0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR   0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR   0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS         0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR   0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR   0x00002200
+#define XCHAL_INTLEVEL5_VECOFS         0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR   0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR   0x00002240
+#define XCHAL_INTLEVEL6_VECOFS         0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR   0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR   0x00002280
+#define XCHAL_DEBUG_VECOFS             XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR       XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR       XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS               0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR         0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR         0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS         XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR   XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR   XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+                               DEBUG
+  ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD                 1       /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK               2       /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK               2       /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY       1       /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+                               MMU
+  ----------------------------------------------------------------------*/
+
+/*  See core-matmap.h header file for more details.  */
+
+#define XCHAL_HAVE_TLBS                        1       /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY                1       /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY             6       /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP                0       /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR           0       /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR     0       /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR       0       /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU             1       /* full MMU (with page table
+                                                  [autorefill] and protection)
+                                                  usable for an MMU-based OS */
+/*  If none of the above last 4 are set, it's a custom TLB configuration.  */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2    2       /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2    2       /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS            8       /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS                        4       /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS            2       /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie-asm.h b/arch/xtensa/variants/dc233c/include/variant/tie-asm.h
new file mode 100644 (file)
index 0000000..5dbd981
--- /dev/null
@@ -0,0 +1,193 @@
+/* 
+ * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
+ *
+ *  NOTE:  This header file is not meant to be included directly.
+ */
+
+/* This header file contains assembly-language definitions (assembly
+   macros, etc.) for this specific Xtensa processor's TIE extensions
+   and options.  It is customized to this Xtensa processor configuration.
+
+   Copyright (c) 1999-2010 Tensilica Inc.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be included
+   in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+   IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+   CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+   TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+   SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
+
+#ifndef _XTENSA_CORE_TIE_ASM_H
+#define _XTENSA_CORE_TIE_ASM_H
+
+/*  Selection parameter values for save-area save/restore macros:  */
+/*  Option vs. TIE:  */
+#define XTHAL_SAS_TIE  0x0001  /* custom extension or coprocessor */
+#define XTHAL_SAS_OPT  0x0002  /* optional (and not a coprocessor) */
+#define XTHAL_SAS_ANYOT        0x0003  /* both of the above */
+/*  Whether used automatically by compiler:  */
+#define XTHAL_SAS_NOCC 0x0004  /* not used by compiler w/o special opts/code */
+#define XTHAL_SAS_CC   0x0008  /* used by compiler without special opts/code */
+#define XTHAL_SAS_ANYCC        0x000C  /* both of the above */
+/*  ABI handling across function calls:  */
+#define XTHAL_SAS_CALR 0x0010  /* caller-saved */
+#define XTHAL_SAS_CALE 0x0020  /* callee-saved */
+#define XTHAL_SAS_GLOB 0x0040  /* global across function calls (in thread) */
+#define XTHAL_SAS_ANYABI 0x0070        /* all of the above three */
+/*  Misc  */
+#define XTHAL_SAS_ALL  0xFFFF  /* include all default NCP contents */
+#define XTHAL_SAS3(optie,ccuse,abi)    ( ((optie) & XTHAL_SAS_ANYOT)  \
+                                       | ((ccuse) & XTHAL_SAS_ANYCC)  \
+                                       | ((abi)   & XTHAL_SAS_ANYABI) )
+
+
+
+    /*
+     *  Macro to save all non-coprocessor (extra) custom TIE and optional state
+     *  (not including zero-overhead loop registers).
+     *  Required parameters:
+     *      ptr                Save area pointer address register (clobbered)
+     *                 (register must contain a 4 byte aligned address).
+     *      at1..at4   Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+     *                 registers are clobbered, the remaining are unused).
+     *  Optional parameters:
+     *      continue   If macro invoked as part of a larger store sequence, set to 1
+     *                 if this is not the first in the sequence.  Defaults to 0.
+     *      ofs                Offset from start of larger sequence (from value of first ptr
+     *                 in sequence) at which to store.  Defaults to next available space
+     *                 (or 0 if <continue> is 0).
+     *      select     Select what category(ies) of registers to store, as a bitmask
+     *                 (see XTHAL_SAS_xxx constants).  Defaults to all registers.
+     *      alloc      Select what category(ies) of registers to allocate; if any
+     *                 category is selected here that is not in <select>, space for
+     *                 the corresponding registers is skipped without doing any store.
+     */
+    .macro xchal_ncp_store  ptr at1 at2 at3 at4  continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+       xchal_sa_start  \continue, \ofs
+       // Optional global register used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1020, 4, 4
+       rur.THREADPTR   \at1            // threadptr option
+       s32i    \at1, \ptr, .Lxchal_ofs_+0
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 4
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1020, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 4
+       .endif
+       // Optional caller-saved registers used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1016, 4, 4
+       rsr     \at1, ACCLO                     // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+0
+       rsr     \at1, ACCHI                     // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 8
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1016, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 8
+       .endif
+       // Optional caller-saved registers not used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1004, 4, 4
+       rsr     \at1, M0                        // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+0
+       rsr     \at1, M1                        // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+4
+       rsr     \at1, M2                        // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+8
+       rsr     \at1, M3                        // MAC16 option
+       s32i    \at1, \ptr, .Lxchal_ofs_+12
+       rsr     \at1, SCOMPARE1                 // conditional store option
+       s32i    \at1, \ptr, .Lxchal_ofs_+16
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 20
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1004, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 20
+       .endif
+    .endm      // xchal_ncp_store
+
+    /*
+     *  Macro to restore all non-coprocessor (extra) custom TIE and optional state
+     *  (not including zero-overhead loop registers).
+     *  Required parameters:
+     *      ptr                Save area pointer address register (clobbered)
+     *                 (register must contain a 4 byte aligned address).
+     *      at1..at4   Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
+     *                 registers are clobbered, the remaining are unused).
+     *  Optional parameters:
+     *      continue   If macro invoked as part of a larger load sequence, set to 1
+     *                 if this is not the first in the sequence.  Defaults to 0.
+     *      ofs                Offset from start of larger sequence (from value of first ptr
+     *                 in sequence) at which to load.  Defaults to next available space
+     *                 (or 0 if <continue> is 0).
+     *      select     Select what category(ies) of registers to load, as a bitmask
+     *                 (see XTHAL_SAS_xxx constants).  Defaults to all registers.
+     *      alloc      Select what category(ies) of registers to allocate; if any
+     *                 category is selected here that is not in <select>, space for
+     *                 the corresponding registers is skipped without doing any load.
+     */
+    .macro xchal_ncp_load  ptr at1 at2 at3 at4  continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
+       xchal_sa_start  \continue, \ofs
+       // Optional global register used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1020, 4, 4
+       l32i    \at1, \ptr, .Lxchal_ofs_+0
+       wur.THREADPTR   \at1            // threadptr option
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 4
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1020, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 4
+       .endif
+       // Optional caller-saved registers used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1016, 4, 4
+       l32i    \at1, \ptr, .Lxchal_ofs_+0
+       wsr     \at1, ACCLO                     // MAC16 option
+       l32i    \at1, \ptr, .Lxchal_ofs_+4
+       wsr     \at1, ACCHI                     // MAC16 option
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 8
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1016, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 8
+       .endif
+       // Optional caller-saved registers not used by default by the compiler:
+       .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
+       xchal_sa_align  \ptr, 0, 1004, 4, 4
+       l32i    \at1, \ptr, .Lxchal_ofs_+0
+       wsr     \at1, M0                        // MAC16 option
+       l32i    \at1, \ptr, .Lxchal_ofs_+4
+       wsr     \at1, M1                        // MAC16 option
+       l32i    \at1, \ptr, .Lxchal_ofs_+8
+       wsr     \at1, M2                        // MAC16 option
+       l32i    \at1, \ptr, .Lxchal_ofs_+12
+       wsr     \at1, M3                        // MAC16 option
+       l32i    \at1, \ptr, .Lxchal_ofs_+16
+       wsr     \at1, SCOMPARE1                 // conditional store option
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 20
+       .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
+       xchal_sa_align  \ptr, 0, 1004, 4, 4
+       .set    .Lxchal_ofs_, .Lxchal_ofs_ + 20
+       .endif
+    .endm      // xchal_ncp_load
+
+
+#define XCHAL_NCP_NUM_ATMPS    1
+
+
+
+#define XCHAL_SA_NUM_ATMPS     1
+
+#endif /*_XTENSA_CORE_TIE_ASM_H*/
+
diff --git a/arch/xtensa/variants/dc233c/include/variant/tie.h b/arch/xtensa/variants/dc233c/include/variant/tie.h
new file mode 100644 (file)
index 0000000..815e52b
--- /dev/null
@@ -0,0 +1,150 @@
+/* 
+ * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
+ *
+ *  NOTE:  This header file is not meant to be included directly.
+ */
+
+/* This header file describes this specific Xtensa processor's TIE extensions
+   that extend basic Xtensa core functionality.  It is customized to this
+   Xtensa processor configuration.
+
+   Copyright (c) 1999-2010 Tensilica Inc.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be included
+   in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+   IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+   CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+   TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+   SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
+
+#ifndef _XTENSA_CORE_TIE_H
+#define _XTENSA_CORE_TIE_H
+
+#define XCHAL_CP_NUM                   1       /* number of coprocessors */
+#define XCHAL_CP_MAX                   8       /* max CP ID + 1 (0 if none) */
+#define XCHAL_CP_MASK                  0x80    /* bitmask of all CPs by ID */
+#define XCHAL_CP_PORT_MASK             0x80    /* bitmask of only port CPs */
+
+/*  Basic parameters of each coprocessor:  */
+#define XCHAL_CP7_NAME                 "XTIOP"
+#define XCHAL_CP7_IDENT                        XTIOP
+#define XCHAL_CP7_SA_SIZE              0       /* size of state save area */
+#define XCHAL_CP7_SA_ALIGN             1       /* min alignment of save area */
+#define XCHAL_CP_ID_XTIOP              7       /* coprocessor ID (0..7) */
+
+/*  Filler info for unassigned coprocessors, to simplify arrays etc:  */
+#define XCHAL_CP0_SA_SIZE              0
+#define XCHAL_CP0_SA_ALIGN             1
+#define XCHAL_CP1_SA_SIZE              0
+#define XCHAL_CP1_SA_ALIGN             1
+#define XCHAL_CP2_SA_SIZE              0
+#define XCHAL_CP2_SA_ALIGN             1
+#define XCHAL_CP3_SA_SIZE              0
+#define XCHAL_CP3_SA_ALIGN             1
+#define XCHAL_CP4_SA_SIZE              0
+#define XCHAL_CP4_SA_ALIGN             1
+#define XCHAL_CP5_SA_SIZE              0
+#define XCHAL_CP5_SA_ALIGN             1
+#define XCHAL_CP6_SA_SIZE              0
+#define XCHAL_CP6_SA_ALIGN             1
+
+/*  Save area for non-coprocessor optional and custom (TIE) state:  */
+#define XCHAL_NCP_SA_SIZE              32
+#define XCHAL_NCP_SA_ALIGN             4
+
+/*  Total save area for optional and custom state (NCP + CPn):  */
+#define XCHAL_TOTAL_SA_SIZE            32      /* with 16-byte align padding */
+#define XCHAL_TOTAL_SA_ALIGN           4       /* actual minimum alignment */
+
+/*
+ * Detailed contents of save areas.
+ * NOTE:  caller must define the XCHAL_SA_REG macro (not defined here)
+ * before expanding the XCHAL_xxx_SA_LIST() macros.
+ *
+ * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
+ *             dbnum,base,regnum,bitsz,gapsz,reset,x...)
+ *
+ *     s = passed from XCHAL_*_LIST(s), eg. to select how to expand
+ *     ccused = set if used by compiler without special options or code
+ *     abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
+ *     kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
+ *     opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
+ *     name = lowercase reg name (no quotes)
+ *     galign = group byte alignment (power of 2) (galign >= align)
+ *     align = register byte alignment (power of 2)
+ *     asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
+ *       (not including any pad bytes required to galign this or next reg)
+ *     dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
+ *     base = reg shortname w/o index (or sr=special, ur=TIE user reg)
+ *     regnum = reg index in regfile, or special/TIE-user reg number
+ *     bitsz = number of significant bits (regfile width, or ur/sr mask bits)
+ *     gapsz = intervening bits, if bitsz bits not stored contiguously
+ *     (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
+ *     reset = register reset value (or 0 if undefined at reset)
+ *     x = reserved for future use (0 until then)
+ *
+ *  To filter out certain registers, e.g. to expand only the non-global
+ *  registers used by the compiler, you can do something like this:
+ *
+ *  #define XCHAL_SA_REG(s,ccused,p...)        SELCC##ccused(p)
+ *  #define SELCC0(p...)
+ *  #define SELCC1(abikind,p...)       SELAK##abikind(p)
+ *  #define SELAK0(p...)               REG(p)
+ *  #define SELAK1(p...)               REG(p)
+ *  #define SELAK2(p...)
+ *  #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
+ *             ...what you want to expand...
+ */
+
+#define XCHAL_NCP_SA_NUM       8
+#define XCHAL_NCP_SA_LIST(s)   \
+ XCHAL_SA_REG(s,1,2,1,1,      threadptr, 4, 4, 4,0x03E7,  ur,231, 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1,          acclo, 4, 4, 4,0x0210,  sr,16 , 32,0,0,0) \
+ XCHAL_SA_REG(s,1,0,0,1,          acchi, 4, 4, 4,0x0211,  sr,17 ,  8,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m0, 4, 4, 4,0x0220,  sr,32 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m1, 4, 4, 4,0x0221,  sr,33 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m2, 4, 4, 4,0x0222,  sr,34 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,             m3, 4, 4, 4,0x0223,  sr,35 , 32,0,0,0) \
+ XCHAL_SA_REG(s,0,0,0,1,      scompare1, 4, 4, 4,0x020C,  sr,12 , 32,0,0,0)
+
+#define XCHAL_CP0_SA_NUM       0
+#define XCHAL_CP0_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP1_SA_NUM       0
+#define XCHAL_CP1_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP2_SA_NUM       0
+#define XCHAL_CP2_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP3_SA_NUM       0
+#define XCHAL_CP3_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP4_SA_NUM       0
+#define XCHAL_CP4_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP5_SA_NUM       0
+#define XCHAL_CP5_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP6_SA_NUM       0
+#define XCHAL_CP6_SA_LIST(s)   /* empty */
+
+#define XCHAL_CP7_SA_NUM       0
+#define XCHAL_CP7_SA_LIST(s)   /* empty */
+
+/* Byte length of instruction from its first nibble (op0 field), per FLIX.  */
+#define XCHAL_OP0_FORMAT_LENGTHS       3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
+
+#endif /*_XTENSA_CORE_TIE_H*/
+
index 4a85ccf..a7e40a7 100644 (file)
@@ -4,7 +4,6 @@
 menuconfig BLOCK
        bool "Enable the block layer" if EXPERT
        default y
-       select PERCPU_RWSEM
        help
         Provide block layer support for the kernel.
 
index b8858fb..b2b9837 100644 (file)
 
 static DEFINE_MUTEX(blkcg_pol_mutex);
 
-struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
+                           .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
 EXPORT_SYMBOL_GPL(blkcg_root);
 
 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+                                     struct request_queue *q, bool update_hint);
+
+/**
+ * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
+ * @d_blkg: loop cursor pointing to the current descendant
+ * @pos_cgrp: used for iteration
+ * @p_blkg: target blkg to walk descendants of
+ *
+ * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
+ * read locked.  If called under either blkcg or queue lock, the iteration
+ * is guaranteed to include all and only online blkgs.  The caller may
+ * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
+ * subtree.
+ */
+#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg)         \
+       cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
+               if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+                                             (p_blkg)->q, false)))
+
 static bool blkcg_policy_enabled(struct request_queue *q,
                                 const struct blkcg_policy *pol)
 {
@@ -112,9 +133,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 
                blkg->pd[i] = pd;
                pd->blkg = blkg;
+               pd->plid = i;
 
                /* invoke per-policy init */
-               if (blkcg_policy_enabled(blkg->q, pol))
+               if (pol->pd_init_fn)
                        pol->pd_init_fn(blkg);
        }
 
@@ -125,8 +147,19 @@ err_free:
        return NULL;
 }
 
+/**
+ * __blkg_lookup - internal version of blkg_lookup()
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ * @update_hint: whether to update lookup hint with the result or not
+ *
+ * This is internal version and shouldn't be used by policy
+ * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
+ * @q's bypass state.  If @update_hint is %true, the caller should be
+ * holding @q->queue_lock and lookup hint is updated on success.
+ */
 static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
-                                     struct request_queue *q)
+                                     struct request_queue *q, bool update_hint)
 {
        struct blkcg_gq *blkg;
 
@@ -135,14 +168,19 @@ static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
                return blkg;
 
        /*
-        * Hint didn't match.  Look up from the radix tree.  Note that we
-        * may not be holding queue_lock and thus are not sure whether
-        * @blkg from blkg_tree has already been removed or not, so we
-        * can't update hint to the lookup result.  Leave it to the caller.
+        * Hint didn't match.  Look up from the radix tree.  Note that the
+        * hint can only be updated under queue_lock as otherwise @blkg
+        * could have already been removed from blkg_tree.  The caller is
+        * responsible for grabbing queue_lock if @update_hint.
         */
        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
-       if (blkg && blkg->q == q)
+       if (blkg && blkg->q == q) {
+               if (update_hint) {
+                       lockdep_assert_held(q->queue_lock);
+                       rcu_assign_pointer(blkcg->blkg_hint, blkg);
+               }
                return blkg;
+       }
 
        return NULL;
 }
@@ -162,7 +200,7 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 
        if (unlikely(blk_queue_bypass(q)))
                return NULL;
-       return __blkg_lookup(blkcg, q);
+       return __blkg_lookup(blkcg, q, false);
 }
 EXPORT_SYMBOL_GPL(blkg_lookup);
 
@@ -170,75 +208,129 @@ EXPORT_SYMBOL_GPL(blkg_lookup);
  * If @new_blkg is %NULL, this function tries to allocate a new one as
  * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
  */
-static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
-                                            struct request_queue *q,
-                                            struct blkcg_gq *new_blkg)
+static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
+                                   struct request_queue *q,
+                                   struct blkcg_gq *new_blkg)
 {
        struct blkcg_gq *blkg;
-       int ret;
+       int i, ret;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
        lockdep_assert_held(q->queue_lock);
 
-       /* lookup and update hint on success, see __blkg_lookup() for details */
-       blkg = __blkg_lookup(blkcg, q);
-       if (blkg) {
-               rcu_assign_pointer(blkcg->blkg_hint, blkg);
-               goto out_free;
-       }
-
        /* blkg holds a reference to blkcg */
        if (!css_tryget(&blkcg->css)) {
-               blkg = ERR_PTR(-EINVAL);
-               goto out_free;
+               ret = -EINVAL;
+               goto err_free_blkg;
        }
 
        /* allocate */
        if (!new_blkg) {
                new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
                if (unlikely(!new_blkg)) {
-                       blkg = ERR_PTR(-ENOMEM);
-                       goto out_put;
+                       ret = -ENOMEM;
+                       goto err_put_css;
                }
        }
        blkg = new_blkg;
 
-       /* insert */
+       /* link parent and insert */
+       if (blkcg_parent(blkcg)) {
+               blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
+               if (WARN_ON_ONCE(!blkg->parent)) {
+                       blkg = ERR_PTR(-EINVAL);
+                       goto err_put_css;
+               }
+               blkg_get(blkg->parent);
+       }
+
        spin_lock(&blkcg->lock);
        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
        if (likely(!ret)) {
                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
                list_add(&blkg->q_node, &q->blkg_list);
+
+               for (i = 0; i < BLKCG_MAX_POLS; i++) {
+                       struct blkcg_policy *pol = blkcg_policy[i];
+
+                       if (blkg->pd[i] && pol->pd_online_fn)
+                               pol->pd_online_fn(blkg);
+               }
        }
+       blkg->online = true;
        spin_unlock(&blkcg->lock);
 
        if (!ret)
                return blkg;
 
-       blkg = ERR_PTR(ret);
-out_put:
+       /* @blkg failed fully initialized, use the usual release path */
+       blkg_put(blkg);
+       return ERR_PTR(ret);
+
+err_put_css:
        css_put(&blkcg->css);
-out_free:
+err_free_blkg:
        blkg_free(new_blkg);
-       return blkg;
+       return ERR_PTR(ret);
 }
 
+/**
+ * blkg_lookup_create - lookup blkg, try to create one if not there
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
+ * create one.  blkg creation is performed recursively from blkcg_root such
+ * that all non-root blkg's have access to the parent blkg.  This function
+ * should be called under RCU read lock and @q->queue_lock.
+ *
+ * Returns pointer to the looked up or created blkg on success, ERR_PTR()
+ * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
+ * dead and bypassing, returns ERR_PTR(-EBUSY).
+ */
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q)
 {
+       struct blkcg_gq *blkg;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       lockdep_assert_held(q->queue_lock);
+
        /*
         * This could be the first entry point of blkcg implementation and
         * we shouldn't allow anything to go through for a bypassing queue.
         */
        if (unlikely(blk_queue_bypass(q)))
                return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
-       return __blkg_lookup_create(blkcg, q, NULL);
+
+       blkg = __blkg_lookup(blkcg, q, true);
+       if (blkg)
+               return blkg;
+
+       /*
+        * Create blkgs walking down from blkcg_root to @blkcg, so that all
+        * non-root blkgs have access to their parents.
+        */
+       while (true) {
+               struct blkcg *pos = blkcg;
+               struct blkcg *parent = blkcg_parent(blkcg);
+
+               while (parent && !__blkg_lookup(parent, q, false)) {
+                       pos = parent;
+                       parent = blkcg_parent(parent);
+               }
+
+               blkg = blkg_create(pos, q, NULL);
+               if (pos == blkcg || IS_ERR(blkg))
+                       return blkg;
+       }
 }
 EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
+       int i;
 
        lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
@@ -247,6 +339,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        WARN_ON_ONCE(list_empty(&blkg->q_node));
        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+
+               if (blkg->pd[i] && pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg);
+       }
+       blkg->online = false;
+
        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
        list_del_init(&blkg->q_node);
        hlist_del_init_rcu(&blkg->blkcg_node);
@@ -301,8 +401,10 @@ static void blkg_rcu_free(struct rcu_head *rcu_head)
 
 void __blkg_release(struct blkcg_gq *blkg)
 {
-       /* release the extra blkcg reference this blkg has been holding */
+       /* release the blkcg and parent blkg refs this blkg has been holding */
        css_put(&blkg->blkcg->css);
+       if (blkg->parent)
+               blkg_put(blkg->parent);
 
        /*
         * A group is freed in rcu manner. But having an rcu lock does not
@@ -357,7 +459,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
 {
        struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
        struct blkcg_gq *blkg;
-       struct hlist_node *n;
        int i;
 
        mutex_lock(&blkcg_pol_mutex);
@@ -368,7 +469,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
         * stat updates.  This is a debug feature which shouldn't exist
         * anyway.  If you get hit by a race, retry.
         */
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                for (i = 0; i < BLKCG_MAX_POLS; i++) {
                        struct blkcg_policy *pol = blkcg_policy[i];
 
@@ -402,8 +503,9 @@ static const char *blkg_dev_name(struct blkcg_gq *blkg)
  *
  * This function invokes @prfill on each blkg of @blkcg if pd for the
  * policy specified by @pol exists.  @prfill is invoked with @sf, the
- * policy data and @data.  If @show_total is %true, the sum of the return
- * values from @prfill is printed with "Total" label at the end.
+ * policy data and @data and the matching queue lock held.  If @show_total
+ * is %true, the sum of the return values from @prfill is printed with
+ * "Total" label at the end.
  *
  * This is to be used to construct print functions for
  * cftype->read_seq_string method.
@@ -415,14 +517,16 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
                       bool show_total)
 {
        struct blkcg_gq *blkg;
-       struct hlist_node *n;
        u64 total = 0;
 
-       spin_lock_irq(&blkcg->lock);
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+               spin_lock_irq(blkg->q->queue_lock);
                if (blkcg_policy_enabled(blkg->q, pol))
                        total += prfill(sf, blkg->pd[pol->plid], data);
-       spin_unlock_irq(&blkcg->lock);
+               spin_unlock_irq(blkg->q->queue_lock);
+       }
+       rcu_read_unlock();
 
        if (show_total)
                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
@@ -481,6 +585,7 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
        seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
        return v;
 }
+EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 
 /**
  * blkg_prfill_stat - prfill callback for blkg_stat
@@ -513,6 +618,82 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 }
 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 
+/**
+ * blkg_stat_recursive_sum - collect hierarchical blkg_stat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_stat specified by @off from @pd and all its online
+ * descendants and return the sum.  The caller must be holding the queue
+ * lock for online tests.
+ */
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+       struct blkcg_policy *pol = blkcg_policy[pd->plid];
+       struct blkcg_gq *pos_blkg;
+       struct cgroup *pos_cgrp;
+       u64 sum;
+
+       lockdep_assert_held(pd->blkg->q->queue_lock);
+
+       sum = blkg_stat_read((void *)pd + off);
+
+       rcu_read_lock();
+       blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+               struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+               struct blkg_stat *stat = (void *)pos_pd + off;
+
+               if (pos_blkg->online)
+                       sum += blkg_stat_read(stat);
+       }
+       rcu_read_unlock();
+
+       return sum;
+}
+EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
+
+/**
+ * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * Collect the blkg_rwstat specified by @off from @pd and all its online
+ * descendants and return the sum.  The caller must be holding the queue
+ * lock for online tests.
+ */
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+                                            int off)
+{
+       struct blkcg_policy *pol = blkcg_policy[pd->plid];
+       struct blkcg_gq *pos_blkg;
+       struct cgroup *pos_cgrp;
+       struct blkg_rwstat sum;
+       int i;
+
+       lockdep_assert_held(pd->blkg->q->queue_lock);
+
+       sum = blkg_rwstat_read((void *)pd + off);
+
+       rcu_read_lock();
+       blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+               struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
+               struct blkg_rwstat *rwstat = (void *)pos_pd + off;
+               struct blkg_rwstat tmp;
+
+               if (!pos_blkg->online)
+                       continue;
+
+               tmp = blkg_rwstat_read(rwstat);
+
+               for (i = 0; i < BLKG_RWSTAT_NR; i++)
+                       sum.cnt[i] += tmp.cnt[i];
+       }
+       rcu_read_unlock();
+
+       return sum;
+}
+EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
+
 /**
  * blkg_conf_prep - parse and prepare for per-blkg config update
  * @blkcg: target block cgroup
@@ -658,6 +839,7 @@ static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
                return ERR_PTR(-ENOMEM);
 
        blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+       blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
        blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 done:
        spin_lock_init(&blkcg->lock);
@@ -777,7 +959,7 @@ int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol)
 {
        LIST_HEAD(pds);
-       struct blkcg_gq *blkg;
+       struct blkcg_gq *blkg, *new_blkg;
        struct blkg_policy_data *pd, *n;
        int cnt = 0, ret;
        bool preloaded;
@@ -786,19 +968,27 @@ int blkcg_activate_policy(struct request_queue *q,
                return 0;
 
        /* preallocations for root blkg */
-       blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
-       if (!blkg)
+       new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+       if (!new_blkg)
                return -ENOMEM;
 
        preloaded = !radix_tree_preload(GFP_KERNEL);
 
        blk_queue_bypass_start(q);
 
-       /* make sure the root blkg exists and count the existing blkgs */
+       /*
+        * Make sure the root blkg exists and count the existing blkgs.  As
+        * @q is bypassing at this point, blkg_lookup_create() can't be
+        * used.  Open code it.
+        */
        spin_lock_irq(q->queue_lock);
 
        rcu_read_lock();
-       blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
+       blkg = __blkg_lookup(&blkcg_root, q, false);
+       if (blkg)
+               blkg_free(new_blkg);
+       else
+               blkg = blkg_create(&blkcg_root, q, new_blkg);
        rcu_read_unlock();
 
        if (preloaded)
@@ -846,6 +1036,7 @@ int blkcg_activate_policy(struct request_queue *q,
 
                blkg->pd[pol->plid] = pd;
                pd->blkg = blkg;
+               pd->plid = pol->plid;
                pol->pd_init_fn(blkg);
 
                spin_unlock(&blkg->blkcg->lock);
@@ -892,6 +1083,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
                /* grab blkcg lock too while removing @pd from @blkg */
                spin_lock(&blkg->blkcg->lock);
 
+               if (pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg);
                if (pol->pd_exit_fn)
                        pol->pd_exit_fn(blkg);
 
index 2459730..f2b2929 100644 (file)
@@ -54,6 +54,7 @@ struct blkcg {
 
        /* TODO: per-policy storage in blkcg */
        unsigned int                    cfq_weight;     /* belongs to cfq */
+       unsigned int                    cfq_leaf_weight;
 };
 
 struct blkg_stat {
@@ -80,8 +81,9 @@ struct blkg_rwstat {
  * beginning and pd_size can't be smaller than pd.
  */
 struct blkg_policy_data {
-       /* the blkg this per-policy data belongs to */
+       /* the blkg and policy id this per-policy data belongs to */
        struct blkcg_gq                 *blkg;
+       int                             plid;
 
        /* used during policy activation */
        struct list_head                alloc_node;
@@ -94,17 +96,27 @@ struct blkcg_gq {
        struct list_head                q_node;
        struct hlist_node               blkcg_node;
        struct blkcg                    *blkcg;
+
+       /* all non-root blkcg_gq's are guaranteed to have access to parent */
+       struct blkcg_gq                 *parent;
+
        /* request allocation list for this blkcg-q pair */
        struct request_list             rl;
+
        /* reference count */
        int                             refcnt;
 
+       /* is this blkg online? protected by both blkcg and q locks */
+       bool                            online;
+
        struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
 
        struct rcu_head                 rcu_head;
 };
 
 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
 
@@ -117,6 +129,8 @@ struct blkcg_policy {
 
        /* operations */
        blkcg_pol_init_pd_fn            *pd_init_fn;
+       blkcg_pol_online_pd_fn          *pd_online_fn;
+       blkcg_pol_offline_pd_fn         *pd_offline_fn;
        blkcg_pol_exit_pd_fn            *pd_exit_fn;
        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 };
@@ -150,6 +164,10 @@ u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
                       int off);
 
+u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
+struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
+                                            int off);
+
 struct blkg_conf_ctx {
        struct gendisk                  *disk;
        struct blkcg_gq                 *blkg;
@@ -180,6 +198,19 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
        return task_blkcg(current);
 }
 
+/**
+ * blkcg_parent - get the parent of a blkcg
+ * @blkcg: blkcg of interest
+ *
+ * Return the parent blkcg of @blkcg.  Can be called anytime.
+ */
+static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
+{
+       struct cgroup *pcg = blkcg->css.cgroup->parent;
+
+       return pcg ? cgroup_to_blkcg(pcg) : NULL;
+}
+
 /**
  * blkg_to_pdata - get policy private data
  * @blkg: blkg of interest
@@ -386,6 +417,18 @@ static inline void blkg_stat_reset(struct blkg_stat *stat)
        stat->cnt = 0;
 }
 
+/**
+ * blkg_stat_merge - merge a blkg_stat into another
+ * @to: the destination blkg_stat
+ * @from: the source
+ *
+ * Add @from's count to @to.
+ */
+static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
+{
+       blkg_stat_add(to, blkg_stat_read(from));
+}
+
 /**
  * blkg_rwstat_add - add a value to a blkg_rwstat
  * @rwstat: target blkg_rwstat
@@ -434,14 +477,14 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
 }
 
 /**
- * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * blkg_rwstat_total - read the total count of a blkg_rwstat
  * @rwstat: blkg_rwstat to read
  *
  * Return the total count of @rwstat regardless of the IO direction.  This
  * function can be called without synchronization and takes care of u64
  * atomicity.
  */
-static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
 {
        struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
 
@@ -457,6 +500,25 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
        memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
 }
 
+/**
+ * blkg_rwstat_merge - merge a blkg_rwstat into another
+ * @to: the destination blkg_rwstat
+ * @from: the source
+ *
+ * Add @from's counts to @to.
+ */
+static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
+                                    struct blkg_rwstat *from)
+{
+       struct blkg_rwstat v = blkg_rwstat_read(from);
+       int i;
+
+       u64_stats_update_begin(&to->syncp);
+       for (i = 0; i < BLKG_RWSTAT_NR; i++)
+               to->cnt[i] += v.cnt[i];
+       u64_stats_update_end(&to->syncp);
+}
+
 #else  /* CONFIG_BLK_CGROUP */
 
 struct cgroup;
index 277134c..074b758 100644 (file)
@@ -39,7 +39,6 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 
 DEFINE_IDA(blk_queue_ida);
@@ -1348,7 +1347,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
        if (!ll_back_merge_fn(q, req, bio))
                return false;
 
-       trace_block_bio_backmerge(q, bio);
+       trace_block_bio_backmerge(q, req, bio);
 
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
@@ -1370,7 +1369,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
        if (!ll_front_merge_fn(q, req, bio))
                return false;
 
-       trace_block_bio_frontmerge(q, bio);
+       trace_block_bio_frontmerge(q, req, bio);
 
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
@@ -1553,13 +1552,6 @@ get_rq:
                if (list_empty(&plug->list))
                        trace_block_plug(q);
                else {
-                       if (!plug->should_sort) {
-                               struct request *__rq;
-
-                               __rq = list_entry_rq(plug->list.prev);
-                               if (__rq->q != q)
-                                       plug->should_sort = 1;
-                       }
                        if (request_count >= BLK_MAX_REQUEST_COUNT) {
                                blk_flush_plug_list(plug, false);
                                trace_block_plug(q);
@@ -2890,7 +2882,6 @@ void blk_start_plug(struct blk_plug *plug)
        plug->magic = PLUG_MAGIC;
        INIT_LIST_HEAD(&plug->list);
        INIT_LIST_HEAD(&plug->cb_list);
-       plug->should_sort = 0;
 
        /*
         * If this is a nested plug, don't actually assign it. It will be
@@ -2992,10 +2983,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 
        list_splice_init(&plug->list, &list);
 
-       if (plug->should_sort) {
-               list_sort(NULL, &list, plug_rq_cmp);
-               plug->should_sort = 0;
-       }
+       list_sort(NULL, &list, plug_rq_cmp);
 
        q = NULL;
        depth = 0;
index c88202f..e706213 100644 (file)
@@ -121,9 +121,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
        /* Prevent hang_check timer from firing at us during very long I/O */
        hang_check = sysctl_hung_task_timeout_secs;
        if (hang_check)
-               while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
+               while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
        else
-               wait_for_completion(&wait);
+               wait_for_completion_io(&wait);
 
        if (rq->errors)
                err = -EIO;
index 720ad60..db8f1b5 100644 (file)
@@ -436,7 +436,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 
        bio_get(bio);
        submit_bio(WRITE_FLUSH, bio);
-       wait_for_completion(&wait);
+       wait_for_completion_io(&wait);
 
        /*
         * The driver must store the error location in ->bi_sector, if
index fab4cdd..9c4bb82 100644 (file)
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
  */
 void put_io_context_active(struct io_context *ioc)
 {
-       struct hlist_node *n;
        unsigned long flags;
        struct io_cq *icq;
 
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
         */
 retry:
        spin_lock_irqsave_nested(&ioc->lock, flags, 1);
-       hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+       hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
                if (icq->flags & ICQ_EXITED)
                        continue;
                if (spin_trylock(icq->q->queue_lock)) {
index b3a1f2b..d6f50d5 100644 (file)
@@ -126,7 +126,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        /* Wait for bios in-flight */
        if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion(&wait);
+               wait_for_completion_io(&wait);
 
        if (!test_bit(BIO_UPTODATE, &bb.flags))
                ret = -EIO;
@@ -200,7 +200,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 
        /* Wait for bios in-flight */
        if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion(&wait);
+               wait_for_completion_io(&wait);
 
        if (!test_bit(BIO_UPTODATE, &bb.flags))
                ret = -ENOTSUPP;
@@ -262,7 +262,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 
        /* Wait for bios in-flight */
        if (!atomic_dec_and_test(&bb.done))
-               wait_for_completion(&wait);
+               wait_for_completion_io(&wait);
 
        if (!test_bit(BIO_UPTODATE, &bb.flags))
                /* One of bios in the batch was completed with error.*/
index 7881477..6206a93 100644 (file)
@@ -497,6 +497,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
        return res;
 }
 
+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+{
+       struct request_queue *q = container_of(rcu_head, struct request_queue,
+                                              rcu_head);
+       kmem_cache_free(blk_requestq_cachep, q);
+}
+
 /**
  * blk_release_queue: - release a &struct request_queue when it is no longer needed
  * @kobj:    the kobj belonging to the request queue to be released
@@ -538,7 +545,7 @@ static void blk_release_queue(struct kobject *kobj)
        bdi_destroy(&q->backing_dev_info);
 
        ida_simple_remove(&blk_queue_ida, q->id);
-       kmem_cache_free(blk_requestq_cachep, q);
+       call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
 
 static const struct sysfs_ops queue_sysfs_ops = {
index 47fdfdd..e837b8f 100644 (file)
@@ -61,7 +61,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
 /*
  * Internal elevator interface
  */
-#define ELV_ON_HASH(rq)                (!hlist_unhashed(&(rq)->hash))
+#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
 
 void blk_insert_flush(struct request *rq);
 void blk_abort_flushes(struct request_queue *q);
index ff64ae3..420a5a9 100644 (file)
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
 {
        struct bsg_device *bd;
-       struct hlist_node *entry;
 
        mutex_lock(&bsg_mutex);
 
-       hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+       hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
                if (bd->queue == q) {
                        atomic_inc(&bd->ref_count);
                        goto found;
@@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
 {
        struct bsg_class_device *bcd;
        dev_t dev;
-       int ret, minor;
+       int ret;
        struct device *class_dev = NULL;
        const char *devname;
 
@@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
 
        mutex_lock(&bsg_mutex);
 
-       ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
-       if (!ret) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
-
-       ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
-       if (ret < 0)
+       ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+       if (ret < 0) {
+               if (ret == -ENOSPC) {
+                       printk(KERN_ERR "bsg: too many bsg devices\n");
+                       ret = -EINVAL;
+               }
                goto unlock;
-
-       if (minor >= BSG_MAX_DEVS) {
-               printk(KERN_ERR "bsg: too many bsg devices\n");
-               ret = -EINVAL;
-               goto remove_idr;
        }
 
-       bcd->minor = minor;
+       bcd->minor = ret;
        bcd->queue = q;
        bcd->parent = get_device(parent);
        bcd->release = release;
@@ -1059,8 +1051,7 @@ unregister_class_dev:
        device_unregister(class_dev);
 put_dev:
        put_device(parent);
-remove_idr:
-       idr_remove(&bsg_minor_idr, minor);
+       idr_remove(&bsg_minor_idr, bcd->minor);
 unlock:
        mutex_unlock(&bsg_mutex);
        return ret;
index e62e920..4f0ade7 100644 (file)
@@ -85,7 +85,6 @@ struct cfq_rb_root {
        struct rb_root rb;
        struct rb_node *left;
        unsigned count;
-       unsigned total_weight;
        u64 min_vdisktime;
        struct cfq_ttime ttime;
 };
@@ -155,7 +154,7 @@ struct cfq_queue {
  * First index in the service_trees.
  * IDLE is handled separately, so it has negative index
  */
-enum wl_prio_t {
+enum wl_class_t {
        BE_WORKLOAD = 0,
        RT_WORKLOAD = 1,
        IDLE_WORKLOAD = 2,
@@ -223,10 +222,45 @@ struct cfq_group {
 
        /* group service_tree key */
        u64 vdisktime;
+
+       /*
+        * The number of active cfqgs and sum of their weights under this
+        * cfqg.  This covers this cfqg's leaf_weight and all children's
+        * weights, but does not cover weights of further descendants.
+        *
+        * If a cfqg is on the service tree, it's active.  An active cfqg
+        * also activates its parent and contributes to the children_weight
+        * of the parent.
+        */
+       int nr_active;
+       unsigned int children_weight;
+
+       /*
+        * vfraction is the fraction of vdisktime that the tasks in this
+        * cfqg are entitled to.  This is determined by compounding the
+        * ratios walking up from this cfqg to the root.
+        *
+        * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
+        * vfractions on a service tree is approximately 1.  The sum may
+        * deviate a bit due to rounding errors and fluctuations caused by
+        * cfqgs entering and leaving the service tree.
+        */
+       unsigned int vfraction;
+
+       /*
+        * There are two weights - (internal) weight is the weight of this
+        * cfqg against the sibling cfqgs.  leaf_weight is the wight of
+        * this cfqg against the child cfqgs.  For the root cfqg, both
+        * weights are kept in sync for backward compatibility.
+        */
        unsigned int weight;
        unsigned int new_weight;
        unsigned int dev_weight;
 
+       unsigned int leaf_weight;
+       unsigned int new_leaf_weight;
+       unsigned int dev_leaf_weight;
+
        /* number of cfqq currently on this group */
        int nr_cfqq;
 
@@ -248,14 +282,15 @@ struct cfq_group {
        struct cfq_rb_root service_trees[2][3];
        struct cfq_rb_root service_tree_idle;
 
-       unsigned long saved_workload_slice;
-       enum wl_type_t saved_workload;
-       enum wl_prio_t saved_serving_prio;
+       unsigned long saved_wl_slice;
+       enum wl_type_t saved_wl_type;
+       enum wl_class_t saved_wl_class;
 
        /* number of requests that are on the dispatch list or inside driver */
        int dispatched;
        struct cfq_ttime ttime;
-       struct cfqg_stats stats;
+       struct cfqg_stats stats;        /* stats for this cfqg */
+       struct cfqg_stats dead_stats;   /* stats pushed from dead children */
 };
 
 struct cfq_io_cq {
@@ -280,8 +315,8 @@ struct cfq_data {
        /*
         * The priority currently being served
         */
-       enum wl_prio_t serving_prio;
-       enum wl_type_t serving_type;
+       enum wl_class_t serving_wl_class;
+       enum wl_type_t serving_wl_type;
        unsigned long workload_expires;
        struct cfq_group *serving_group;
 
@@ -353,17 +388,17 @@ struct cfq_data {
 
 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 
-static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
-                                           enum wl_prio_t prio,
+static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
+                                           enum wl_class_t class,
                                            enum wl_type_t type)
 {
        if (!cfqg)
                return NULL;
 
-       if (prio == IDLE_WORKLOAD)
+       if (class == IDLE_WORKLOAD)
                return &cfqg->service_tree_idle;
 
-       return &cfqg->service_trees[prio][type];
+       return &cfqg->service_trees[class][type];
 }
 
 enum cfqq_state_flags {
@@ -502,7 +537,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 {
        struct cfqg_stats *stats = &cfqg->stats;
 
-       if (blkg_rwstat_sum(&stats->queued))
+       if (blkg_rwstat_total(&stats->queued))
                return;
 
        /*
@@ -546,7 +581,7 @@ static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
        struct cfqg_stats *stats = &cfqg->stats;
 
        blkg_stat_add(&stats->avg_queue_size_sum,
-                     blkg_rwstat_sum(&stats->queued));
+                     blkg_rwstat_total(&stats->queued));
        blkg_stat_add(&stats->avg_queue_size_samples, 1);
        cfqg_stats_update_group_wait_time(stats);
 }
@@ -572,6 +607,13 @@ static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
        return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 }
 
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
+{
+       struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
+
+       return pblkg ? blkg_to_cfqg(pblkg) : NULL;
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -586,8 +628,9 @@ static inline void cfqg_put(struct cfq_group *cfqg)
        char __pbuf[128];                                               \
                                                                        \
        blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
-       blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
-                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',            \
+       blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
+                       cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
+                       cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
                          __pbuf, ##args);                              \
 } while (0)
 
@@ -646,11 +689,9 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
                                io_start_time - start_time);
 }
 
-static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+/* @stats = 0 */
+static void cfqg_stats_reset(struct cfqg_stats *stats)
 {
-       struct cfq_group *cfqg = blkg_to_cfqg(blkg);
-       struct cfqg_stats *stats = &cfqg->stats;
-
        /* queued stats shouldn't be cleared */
        blkg_rwstat_reset(&stats->service_bytes);
        blkg_rwstat_reset(&stats->serviced);
@@ -669,13 +710,58 @@ static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
 #endif
 }
 
+/* @to += @from */
+static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
+{
+       /* queued stats shouldn't be cleared */
+       blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
+       blkg_rwstat_merge(&to->serviced, &from->serviced);
+       blkg_rwstat_merge(&to->merged, &from->merged);
+       blkg_rwstat_merge(&to->service_time, &from->service_time);
+       blkg_rwstat_merge(&to->wait_time, &from->wait_time);
+       blkg_stat_merge(&from->time, &from->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
+       blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+       blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
+       blkg_stat_merge(&to->dequeue, &from->dequeue);
+       blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
+       blkg_stat_merge(&to->idle_time, &from->idle_time);
+       blkg_stat_merge(&to->empty_time, &from->empty_time);
+#endif
+}
+
+/*
+ * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
+ * recursive stats can still account for the amount used by this cfqg after
+ * it's gone.
+ */
+static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
+{
+       struct cfq_group *parent = cfqg_parent(cfqg);
+
+       lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
+
+       if (unlikely(!parent))
+               return;
+
+       cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
+       cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
+       cfqg_stats_reset(&cfqg->stats);
+       cfqg_stats_reset(&cfqg->dead_stats);
+}
+
 #else  /* CONFIG_CFQ_GROUP_IOSCHED */
 
+static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 static inline void cfqg_get(struct cfq_group *cfqg) { }
 static inline void cfqg_put(struct cfq_group *cfqg) { }
 
 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
-       blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+       blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
+                       cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
+                       cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
+                               ##args)
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)         do {} while (0)
 
 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
@@ -732,7 +818,7 @@ static inline bool iops_mode(struct cfq_data *cfqd)
                return false;
 }
 
-static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
+static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 {
        if (cfq_class_idle(cfqq))
                return IDLE_WORKLOAD;
@@ -751,23 +837,23 @@ static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
        return SYNC_WORKLOAD;
 }
 
-static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
+static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
                                        struct cfq_data *cfqd,
                                        struct cfq_group *cfqg)
 {
-       if (wl == IDLE_WORKLOAD)
+       if (wl_class == IDLE_WORKLOAD)
                return cfqg->service_tree_idle.count;
 
-       return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
-               + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
-               + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
+       return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
+               cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
+               cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 }
 
 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
                                        struct cfq_group *cfqg)
 {
-       return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
-               cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
+       return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
+               cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 }
 
 static void cfq_dispatch_insert(struct request_queue *, struct request *);
@@ -847,13 +933,27 @@ cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 }
 
-static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
+/**
+ * cfqg_scale_charge - scale disk time charge according to cfqg weight
+ * @charge: disk time being charged
+ * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
+ *
+ * Scale @charge according to @vfraction, which is in range (0, 1].  The
+ * scaling is inversely proportional.
+ *
+ * scaled = charge / vfraction
+ *
+ * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
+ */
+static inline u64 cfqg_scale_charge(unsigned long charge,
+                                   unsigned int vfraction)
 {
-       u64 d = delta << CFQ_SERVICE_SHIFT;
+       u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
 
-       d = d * CFQ_WEIGHT_DEFAULT;
-       do_div(d, cfqg->weight);
-       return d;
+       /* charge / vfraction */
+       c <<= CFQ_SERVICE_SHIFT;
+       do_div(c, vfraction);
+       return c;
 }
 
 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
@@ -909,9 +1009,7 @@ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 static inline unsigned
 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
-       struct cfq_rb_root *st = &cfqd->grp_service_tree;
-
-       return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
+       return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
 }
 
 static inline unsigned
@@ -1178,20 +1276,61 @@ static void
 cfq_update_group_weight(struct cfq_group *cfqg)
 {
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
+
        if (cfqg->new_weight) {
                cfqg->weight = cfqg->new_weight;
                cfqg->new_weight = 0;
        }
+
+       if (cfqg->new_leaf_weight) {
+               cfqg->leaf_weight = cfqg->new_leaf_weight;
+               cfqg->new_leaf_weight = 0;
+       }
 }
 
 static void
 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 {
+       unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
+       struct cfq_group *pos = cfqg;
+       struct cfq_group *parent;
+       bool propagate;
+
+       /* add to the service tree */
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
        cfq_update_group_weight(cfqg);
        __cfq_group_service_tree_add(st, cfqg);
-       st->total_weight += cfqg->weight;
+
+       /*
+        * Activate @cfqg and calculate the portion of vfraction @cfqg is
+        * entitled to.  vfraction is calculated by walking the tree
+        * towards the root calculating the fraction it has at each level.
+        * The compounded ratio is how much vfraction @cfqg owns.
+        *
+        * Start with the proportion tasks in this cfqg has against active
+        * children cfqgs - its leaf_weight against children_weight.
+        */
+       propagate = !pos->nr_active++;
+       pos->children_weight += pos->leaf_weight;
+       vfr = vfr * pos->leaf_weight / pos->children_weight;
+
+       /*
+        * Compound ->weight walking up the tree.  Both activation and
+        * vfraction calculation are done in the same loop.  Propagation
+        * stops once an already activated node is met.  vfraction
+        * calculation should always continue to the root.
+        */
+       while ((parent = cfqg_parent(pos))) {
+               if (propagate) {
+                       propagate = !parent->nr_active++;
+                       parent->children_weight += pos->weight;
+               }
+               vfr = vfr * pos->weight / parent->children_weight;
+               pos = parent;
+       }
+
+       cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
 static void
@@ -1222,7 +1361,32 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 static void
 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
 {
-       st->total_weight -= cfqg->weight;
+       struct cfq_group *pos = cfqg;
+       bool propagate;
+
+       /*
+        * Undo activation from cfq_group_service_tree_add().  Deactivate
+        * @cfqg and propagate deactivation upwards.
+        */
+       propagate = !--pos->nr_active;
+       pos->children_weight -= pos->leaf_weight;
+
+       while (propagate) {
+               struct cfq_group *parent = cfqg_parent(pos);
+
+               /* @pos has 0 nr_active at this point */
+               WARN_ON_ONCE(pos->children_weight);
+               pos->vfraction = 0;
+
+               if (!parent)
+                       break;
+
+               propagate = !--parent->nr_active;
+               parent->children_weight -= pos->weight;
+               pos = parent;
+       }
+
+       /* remove from the service tree */
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
                cfq_rb_erase(&cfqg->rb_node, st);
 }
@@ -1241,7 +1405,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
        cfq_group_service_tree_del(st, cfqg);
-       cfqg->saved_workload_slice = 0;
+       cfqg->saved_wl_slice = 0;
        cfqg_stats_update_dequeue(cfqg);
 }
 
@@ -1284,6 +1448,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
        unsigned int used_sl, charge, unaccounted_sl = 0;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
+       unsigned int vfr;
 
        BUG_ON(nr_sync < 0);
        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
@@ -1293,20 +1458,25 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
        else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
                charge = cfqq->allocated_slice;
 
-       /* Can't update vdisktime while group is on service tree */
+       /*
+        * Can't update vdisktime while on service tree and cfqg->vfraction
+        * is valid only while on it.  Cache vfr, leave the service tree,
+        * update vdisktime and go back on.  The re-addition to the tree
+        * will also update the weights as necessary.
+        */
+       vfr = cfqg->vfraction;
        cfq_group_service_tree_del(st, cfqg);
-       cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
-       /* If a new weight was requested, update now, off tree */
+       cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
        cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
        if (time_after(cfqd->workload_expires, jiffies)) {
-               cfqg->saved_workload_slice = cfqd->workload_expires
+               cfqg->saved_wl_slice = cfqd->workload_expires
                                                - jiffies;
-               cfqg->saved_workload = cfqd->serving_type;
-               cfqg->saved_serving_prio = cfqd->serving_prio;
+               cfqg->saved_wl_type = cfqd->serving_wl_type;
+               cfqg->saved_wl_class = cfqd->serving_wl_class;
        } else
-               cfqg->saved_workload_slice = 0;
+               cfqg->saved_wl_slice = 0;
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
@@ -1344,6 +1514,52 @@ static void cfq_pd_init(struct blkcg_gq *blkg)
 
        cfq_init_cfqg_base(cfqg);
        cfqg->weight = blkg->blkcg->cfq_weight;
+       cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
+}
+
+static void cfq_pd_offline(struct blkcg_gq *blkg)
+{
+       /*
+        * @blkg is going offline and will be ignored by
+        * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
+        * that they don't get lost.  If IOs complete after this point, the
+        * stats for them will be lost.  Oh well...
+        */
+       cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
+}
+
+/* offset delta from cfqg->stats to cfqg->dead_stats */
+static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
+                                       offsetof(struct cfq_group, stats);
+
+/* to be used by recursive prfill, sums live and dead stats recursively */
+static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+{
+       u64 sum = 0;
+
+       sum += blkg_stat_recursive_sum(pd, off);
+       sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
+       return sum;
+}
+
+/* to be used by recursive prfill, sums live and dead rwstats recursively */
+static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
+                                                      int off)
+{
+       struct blkg_rwstat a, b;
+
+       a = blkg_rwstat_recursive_sum(pd, off);
+       b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
+       blkg_rwstat_merge(&a, &b);
+       return a;
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+       struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+       cfqg_stats_reset(&cfqg->stats);
+       cfqg_stats_reset(&cfqg->dead_stats);
 }
 
 /*
@@ -1400,6 +1616,26 @@ static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
        return 0;
 }
 
+static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
+                                         struct blkg_policy_data *pd, int off)
+{
+       struct cfq_group *cfqg = pd_to_cfqg(pd);
+
+       if (!cfqg->dev_leaf_weight)
+               return 0;
+       return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
+}
+
+static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
+                                        struct cftype *cft,
+                                        struct seq_file *sf)
+{
+       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+                         cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
+                         false);
+       return 0;
+}
+
 static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
                            struct seq_file *sf)
 {
@@ -1407,8 +1643,16 @@ static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
        return 0;
 }
 
-static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
-                                 const char *buf)
+static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
+                                struct seq_file *sf)
+{
+       seq_printf(sf, "%u\n",
+                  cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
+       return 0;
+}
+
+static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+                                   const char *buf, bool is_leaf_weight)
 {
        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
        struct blkg_conf_ctx ctx;
@@ -1422,8 +1666,13 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
        ret = -EINVAL;
        cfqg = blkg_to_cfqg(ctx.blkg);
        if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
-               cfqg->dev_weight = ctx.v;
-               cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+               if (!is_leaf_weight) {
+                       cfqg->dev_weight = ctx.v;
+                       cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
+               } else {
+                       cfqg->dev_leaf_weight = ctx.v;
+                       cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
+               }
                ret = 0;
        }
 
@@ -1431,29 +1680,63 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
        return ret;
 }
 
-static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+                                 const char *buf)
+{
+       return __cfqg_set_weight_device(cgrp, cft, buf, false);
+}
+
+static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
+                                      const char *buf)
+{
+       return __cfqg_set_weight_device(cgrp, cft, buf, true);
+}
+
+static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
+                           bool is_leaf_weight)
 {
        struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
        struct blkcg_gq *blkg;
-       struct hlist_node *n;
 
        if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
                return -EINVAL;
 
        spin_lock_irq(&blkcg->lock);
-       blkcg->cfq_weight = (unsigned int)val;
 
-       hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+       if (!is_leaf_weight)
+               blkcg->cfq_weight = val;
+       else
+               blkcg->cfq_leaf_weight = val;
+
+       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
                struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
-               if (cfqg && !cfqg->dev_weight)
-                       cfqg->new_weight = blkcg->cfq_weight;
+               if (!cfqg)
+                       continue;
+
+               if (!is_leaf_weight) {
+                       if (!cfqg->dev_weight)
+                               cfqg->new_weight = blkcg->cfq_weight;
+               } else {
+                       if (!cfqg->dev_leaf_weight)
+                               cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
+               }
        }
 
        spin_unlock_irq(&blkcg->lock);
        return 0;
 }
 
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+       return __cfq_set_weight(cgrp, cft, val, false);
+}
+
+static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+       return __cfq_set_weight(cgrp, cft, val, true);
+}
+
 static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
                           struct seq_file *sf)
 {
@@ -1474,6 +1757,42 @@ static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
        return 0;
 }
 
+static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
+                                     struct blkg_policy_data *pd, int off)
+{
+       u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
+
+       return __blkg_prfill_u64(sf, pd, sum);
+}
+
+static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
+                                       struct blkg_policy_data *pd, int off)
+{
+       struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
+
+       return __blkg_prfill_rwstat(sf, pd, &sum);
+}
+
+static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
+                                    struct seq_file *sf)
+{
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+       blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
+                         &blkcg_policy_cfq, cft->private, false);
+       return 0;
+}
+
+static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
+                                      struct seq_file *sf)
+{
+       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+       blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
+                         &blkcg_policy_cfq, cft->private, true);
+       return 0;
+}
+
 #ifdef CONFIG_DEBUG_BLK_CGROUP
 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
                                      struct blkg_policy_data *pd, int off)
@@ -1503,17 +1822,49 @@ static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
 #endif /* CONFIG_DEBUG_BLK_CGROUP */
 
 static struct cftype cfq_blkcg_files[] = {
+       /* on root, weight is mapped to leaf_weight */
+       {
+               .name = "weight_device",
+               .flags = CFTYPE_ONLY_ON_ROOT,
+               .read_seq_string = cfqg_print_leaf_weight_device,
+               .write_string = cfqg_set_leaf_weight_device,
+               .max_write_len = 256,
+       },
+       {
+               .name = "weight",
+               .flags = CFTYPE_ONLY_ON_ROOT,
+               .read_seq_string = cfq_print_leaf_weight,
+               .write_u64 = cfq_set_leaf_weight,
+       },
+
+       /* no such mapping necessary for !roots */
        {
                .name = "weight_device",
+               .flags = CFTYPE_NOT_ON_ROOT,
                .read_seq_string = cfqg_print_weight_device,
                .write_string = cfqg_set_weight_device,
                .max_write_len = 256,
        },
        {
                .name = "weight",
+               .flags = CFTYPE_NOT_ON_ROOT,
                .read_seq_string = cfq_print_weight,
                .write_u64 = cfq_set_weight,
        },
+
+       {
+               .name = "leaf_weight_device",
+               .read_seq_string = cfqg_print_leaf_weight_device,
+               .write_string = cfqg_set_leaf_weight_device,
+               .max_write_len = 256,
+       },
+       {
+               .name = "leaf_weight",
+               .read_seq_string = cfq_print_leaf_weight,
+               .write_u64 = cfq_set_leaf_weight,
+       },
+
+       /* statistics, covers only the tasks in the cfqg */
        {
                .name = "time",
                .private = offsetof(struct cfq_group, stats.time),
@@ -1554,6 +1905,48 @@ static struct cftype cfq_blkcg_files[] = {
                .private = offsetof(struct cfq_group, stats.queued),
                .read_seq_string = cfqg_print_rwstat,
        },
+
+       /* the same statictics which cover the cfqg and its descendants */
+       {
+               .name = "time_recursive",
+               .private = offsetof(struct cfq_group, stats.time),
+               .read_seq_string = cfqg_print_stat_recursive,
+       },
+       {
+               .name = "sectors_recursive",
+               .private = offsetof(struct cfq_group, stats.sectors),
+               .read_seq_string = cfqg_print_stat_recursive,
+       },
+       {
+               .name = "io_service_bytes_recursive",
+               .private = offsetof(struct cfq_group, stats.service_bytes),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
+       {
+               .name = "io_serviced_recursive",
+               .private = offsetof(struct cfq_group, stats.serviced),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
+       {
+               .name = "io_service_time_recursive",
+               .private = offsetof(struct cfq_group, stats.service_time),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
+       {
+               .name = "io_wait_time_recursive",
+               .private = offsetof(struct cfq_group, stats.wait_time),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
+       {
+               .name = "io_merged_recursive",
+               .private = offsetof(struct cfq_group, stats.merged),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
+       {
+               .name = "io_queued_recursive",
+               .private = offsetof(struct cfq_group, stats.queued),
+               .read_seq_string = cfqg_print_rwstat_recursive,
+       },
 #ifdef CONFIG_DEBUG_BLK_CGROUP
        {
                .name = "avg_queue_size",
@@ -1612,15 +2005,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct rb_node **p, *parent;
        struct cfq_queue *__cfqq;
        unsigned long rb_key;
-       struct cfq_rb_root *service_tree;
+       struct cfq_rb_root *st;
        int left;
        int new_cfqq = 1;
 
-       service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
-                                               cfqq_type(cfqq));
+       st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
        if (cfq_class_idle(cfqq)) {
                rb_key = CFQ_IDLE_DELAY;
-               parent = rb_last(&service_tree->rb);
+               parent = rb_last(&st->rb);
                if (parent && parent != &cfqq->rb_node) {
                        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
                        rb_key += __cfqq->rb_key;
@@ -1638,7 +2030,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                cfqq->slice_resid = 0;
        } else {
                rb_key = -HZ;
-               __cfqq = cfq_rb_first(service_tree);
+               __cfqq = cfq_rb_first(st);
                rb_key += __cfqq ? __cfqq->rb_key : jiffies;
        }
 
@@ -1647,8 +2039,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                /*
                 * same position, nothing more to do
                 */
-               if (rb_key == cfqq->rb_key &&
-                   cfqq->service_tree == service_tree)
+               if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
                        return;
 
                cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -1657,11 +2048,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        left = 1;
        parent = NULL;
-       cfqq->service_tree = service_tree;
-       p = &service_tree->rb.rb_node;
+       cfqq->service_tree = st;
+       p = &st->rb.rb_node;
        while (*p) {
-               struct rb_node **n;
-
                parent = *p;
                __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
 
@@ -1669,22 +2058,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * sort by key, that represents service time.
                 */
                if (time_before(rb_key, __cfqq->rb_key))
-                       n = &(*p)->rb_left;
+                       p = &parent->rb_left;
                else {
-                       n = &(*p)->rb_right;
+                       p = &parent->rb_right;
                        left = 0;
                }
-
-               p = n;
        }
 
        if (left)
-               service_tree->left = &cfqq->rb_node;
+               st->left = &cfqq->rb_node;
 
        cfqq->rb_key = rb_key;
        rb_link_node(&cfqq->rb_node, parent, p);
-       rb_insert_color(&cfqq->rb_node, &service_tree->rb);
-       service_tree->count++;
+       rb_insert_color(&cfqq->rb_node, &st->rb);
+       st->count++;
        if (add_front || !new_cfqq)
                return;
        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
@@ -2030,8 +2417,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                                   struct cfq_queue *cfqq)
 {
        if (cfqq) {
-               cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
-                               cfqd->serving_prio, cfqd->serving_type);
+               cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
+                               cfqd->serving_wl_class, cfqd->serving_wl_type);
                cfqg_stats_update_avg_queue_size(cfqq->cfqg);
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
@@ -2117,19 +2504,18 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
  */
 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 {
-       struct cfq_rb_root *service_tree =
-               service_tree_for(cfqd->serving_group, cfqd->serving_prio,
-                                       cfqd->serving_type);
+       struct cfq_rb_root *st = st_for(cfqd->serving_group,
+                       cfqd->serving_wl_class, cfqd->serving_wl_type);
 
        if (!cfqd->rq_queued)
                return NULL;
 
        /* There is nothing to dispatch */
-       if (!service_tree)
+       if (!st)
                return NULL;
-       if (RB_EMPTY_ROOT(&service_tree->rb))
+       if (RB_EMPTY_ROOT(&st->rb))
                return NULL;
-       return cfq_rb_first(service_tree);
+       return cfq_rb_first(st);
 }
 
 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
@@ -2285,17 +2671,17 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
 
 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       enum wl_prio_t prio = cfqq_prio(cfqq);
-       struct cfq_rb_root *service_tree = cfqq->service_tree;
+       enum wl_class_t wl_class = cfqq_class(cfqq);
+       struct cfq_rb_root *st = cfqq->service_tree;
 
-       BUG_ON(!service_tree);
-       BUG_ON(!service_tree->count);
+       BUG_ON(!st);
+       BUG_ON(!st->count);
 
        if (!cfqd->cfq_slice_idle)
                return false;
 
        /* We never do for idle class queues. */
-       if (prio == IDLE_WORKLOAD)
+       if (wl_class == IDLE_WORKLOAD)
                return false;
 
        /* We do for queues that were marked with idle window flag. */
@@ -2307,11 +2693,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * Otherwise, we do only if they are the last ones
         * in their service tree.
         */
-       if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
-          !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
+       if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
+          !cfq_io_thinktime_big(cfqd, &st->ttime, false))
                return true;
-       cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
-                       service_tree->count);
+       cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
        return false;
 }
 
@@ -2494,8 +2879,8 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
        }
 }
 
-static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
-                               struct cfq_group *cfqg, enum wl_prio_t prio)
+static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
+                       struct cfq_group *cfqg, enum wl_class_t wl_class)
 {
        struct cfq_queue *queue;
        int i;
@@ -2505,7 +2890,7 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
 
        for (i = 0; i <= SYNC_WORKLOAD; ++i) {
                /* select the one with lowest rb_key */
-               queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
+               queue = cfq_rb_first(st_for(cfqg, wl_class, i));
                if (queue &&
                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
                        lowest_key = queue->rb_key;
@@ -2517,26 +2902,27 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
        return cur_best;
 }
 
-static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static void
+choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
        unsigned slice;
        unsigned count;
        struct cfq_rb_root *st;
        unsigned group_slice;
-       enum wl_prio_t original_prio = cfqd->serving_prio;
+       enum wl_class_t original_class = cfqd->serving_wl_class;
 
        /* Choose next priority. RT > BE > IDLE */
        if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
-               cfqd->serving_prio = RT_WORKLOAD;
+               cfqd->serving_wl_class = RT_WORKLOAD;
        else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
-               cfqd->serving_prio = BE_WORKLOAD;
+               cfqd->serving_wl_class = BE_WORKLOAD;
        else {
-               cfqd->serving_prio = IDLE_WORKLOAD;
+               cfqd->serving_wl_class = IDLE_WORKLOAD;
                cfqd->workload_expires = jiffies + 1;
                return;
        }
 
-       if (original_prio != cfqd->serving_prio)
+       if (original_class != cfqd->serving_wl_class)
                goto new_workload;
 
        /*
@@ -2544,7 +2930,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
         * expiration time
         */
-       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+       st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
        count = st->count;
 
        /*
@@ -2555,9 +2941,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
 new_workload:
        /* otherwise select new workload type */
-       cfqd->serving_type =
-               cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
-       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
+       cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
+                                       cfqd->serving_wl_class);
+       st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
        count = st->count;
 
        /*
@@ -2568,10 +2954,11 @@ new_workload:
        group_slice = cfq_group_slice(cfqd, cfqg);
 
        slice = group_slice * count /
-               max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
-                     cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
+               max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
+                     cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
+                                       cfqg));
 
-       if (cfqd->serving_type == ASYNC_WORKLOAD) {
+       if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
                unsigned int tmp;
 
                /*
@@ -2617,14 +3004,14 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
        cfqd->serving_group = cfqg;
 
        /* Restore the workload type data */
-       if (cfqg->saved_workload_slice) {
-               cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
-               cfqd->serving_type = cfqg->saved_workload;
-               cfqd->serving_prio = cfqg->saved_serving_prio;
+       if (cfqg->saved_wl_slice) {
+               cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
+               cfqd->serving_wl_type = cfqg->saved_wl_type;
+               cfqd->serving_wl_class = cfqg->saved_wl_class;
        } else
                cfqd->workload_expires = jiffies - 1;
 
-       choose_service_tree(cfqd, cfqg);
+       choose_wl_class_and_type(cfqd, cfqg);
 }
 
 /*
@@ -3206,6 +3593,8 @@ retry:
                        spin_lock_irq(cfqd->queue->queue_lock);
                        if (new_cfqq)
                                goto retry;
+                       else
+                               return &cfqd->oom_cfqq;
                } else {
                        cfqq = kmem_cache_alloc_node(cfq_pool,
                                        gfp_mask | __GFP_ZERO,
@@ -3403,7 +3792,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
                return true;
 
        /* Allow preemption only if we are idling on sync-noidle tree */
-       if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
+       if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
            new_cfqq->service_tree->count == 2 &&
            RB_EMPTY_ROOT(&cfqq->sort_list))
@@ -3455,7 +3844,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
         * doesn't happen
         */
        if (old_type != cfqq_type(cfqq))
-               cfqq->cfqg->saved_workload_slice = 0;
+               cfqq->cfqg->saved_wl_slice = 0;
 
        /*
         * Put the new queue at the front of the of the current list,
@@ -3637,16 +4026,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
        if (sync) {
-               struct cfq_rb_root *service_tree;
+               struct cfq_rb_root *st;
 
                RQ_CIC(rq)->ttime.last_end_request = now;
 
                if (cfq_cfqq_on_rr(cfqq))
-                       service_tree = cfqq->service_tree;
+                       st = cfqq->service_tree;
                else
-                       service_tree = service_tree_for(cfqq->cfqg,
-                               cfqq_prio(cfqq), cfqq_type(cfqq));
-               service_tree->ttime.last_end_request = now;
+                       st = st_for(cfqq->cfqg, cfqq_class(cfqq),
+                                       cfqq_type(cfqq));
+
+               st->ttime.last_end_request = now;
                if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
                        cfqd->last_delayed_sync = now;
        }
@@ -3993,6 +4383,7 @@ static int cfq_init_queue(struct request_queue *q)
        cfq_init_cfqg_base(cfqd->root_group);
 #endif
        cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+       cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
 
        /*
         * Not strictly needed (since RB_ROOT just clears the node and we
@@ -4177,6 +4568,7 @@ static struct blkcg_policy blkcg_policy_cfq = {
        .cftypes                = cfq_blkcg_files,
 
        .pd_init_fn             = cfq_pd_init,
+       .pd_offline_fn          = cfq_pd_offline,
        .pd_reset_stats_fn      = cfq_pd_reset_stats,
 };
 #endif
index 603b2c1..a0ffdd9 100644 (file)
@@ -46,11 +46,6 @@ static LIST_HEAD(elv_list);
 /*
  * Merge hash stuff.
  */
-static const int elv_hash_shift = 6;
-#define ELV_HASH_BLOCK(sec)    ((sec) >> 3)
-#define ELV_HASH_FN(sec)       \
-               (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
-#define ELV_HASH_ENTRIES       (1 << elv_hash_shift)
 #define rq_hash_key(rq)                (blk_rq_pos(rq) + blk_rq_sectors(rq))
 
 /*
@@ -158,7 +153,6 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
                                  struct elevator_type *e)
 {
        struct elevator_queue *eq;
-       int i;
 
        eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
        if (unlikely(!eq))
@@ -167,14 +161,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
        eq->type = e;
        kobject_init(&eq->kobj, &elv_ktype);
        mutex_init(&eq->sysfs_lock);
-
-       eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
-                                       GFP_KERNEL, q->node);
-       if (!eq->hash)
-               goto err;
-
-       for (i = 0; i < ELV_HASH_ENTRIES; i++)
-               INIT_HLIST_HEAD(&eq->hash[i]);
+       hash_init(eq->hash);
 
        return eq;
 err:
@@ -189,7 +176,6 @@ static void elevator_release(struct kobject *kobj)
 
        e = container_of(kobj, struct elevator_queue, kobj);
        elevator_put(e->type);
-       kfree(e->hash);
        kfree(e);
 }
 
@@ -261,7 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
 
 static inline void __elv_rqhash_del(struct request *rq)
 {
-       hlist_del_init(&rq->hash);
+       hash_del(&rq->hash);
 }
 
 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -275,7 +261,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
        struct elevator_queue *e = q->elevator;
 
        BUG_ON(ELV_ON_HASH(rq));
-       hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+       hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 }
 
 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -287,11 +273,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 {
        struct elevator_queue *e = q->elevator;
-       struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
-       struct hlist_node *entry, *next;
+       struct hlist_node *next;
        struct request *rq;
 
-       hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+       hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
                BUG_ON(!ELV_ON_HASH(rq));
 
                if (unlikely(!rq_mergeable(rq))) {
index 5f73c24..3c001fb 100644 (file)
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
 struct kobject *block_depr;
 
 /* for extended dynamic devt allocation, currently only one major is used */
-#define MAX_EXT_DEVT           (1 << MINORBITS)
+#define NR_EXT_DEVT            (1 << MINORBITS)
 
 /* For extended devt allocation.  ext_devt_mutex prevents look up
  * results from going away underneath its user.
@@ -411,7 +411,7 @@ static int blk_mangle_minor(int minor)
 int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
 {
        struct gendisk *disk = part_to_disk(part);
-       int idx, rc;
+       int idx;
 
        /* in consecutive minor range? */
        if (part->partno < disk->minors) {
@@ -420,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        }
 
        /* allocate ext devt */
-       do {
-               if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
-                       return -ENOMEM;
-               rc = idr_get_new(&ext_devt_idr, part, &idx);
-       } while (rc == -EAGAIN);
-
-       if (rc)
-               return rc;
-
-       if (idx > MAX_EXT_DEVT) {
-               idr_remove(&ext_devt_idr, idx);
-               return -EBUSY;
-       }
+       mutex_lock(&ext_devt_mutex);
+       idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+       mutex_unlock(&ext_devt_mutex);
+       if (idx < 0)
+               return idx == -ENOSPC ? -EBUSY : idx;
 
        *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
        return 0;
@@ -655,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
        disk_part_iter_exit(&piter);
 
        invalidate_partition(disk, 0);
-       blk_free_devt(disk_to_dev(disk)->devt);
        set_capacity(disk, 0);
        disk->flags &= ~GENHD_FL_UP;
 
@@ -674,6 +665,7 @@ void del_gendisk(struct gendisk *disk)
                sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
+       blk_free_devt(disk_to_dev(disk)->devt);
 }
 EXPORT_SYMBOL(del_gendisk);
 
index f1d1451..789cdea 100644 (file)
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
        if (!part)
                return;
 
-       blk_free_devt(part_devt(part));
        rcu_assign_pointer(ptbl->part[partno], NULL);
        rcu_assign_pointer(ptbl->last_lookup, NULL);
        kobject_put(part->holder_dir);
        device_del(part_to_dev(part));
+       blk_free_devt(part_devt(part));
 
        hd_struct_put(part);
 }
@@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
        int p, highest, res;
 rescan:
        if (state && !IS_ERR(state)) {
-               kfree(state);
+               free_partitions(state);
                state = NULL;
        }
 
@@ -525,7 +525,7 @@ rescan:
                        md_autodetect_dev(part_to_dev(part)->devt);
 #endif
        }
-       kfree(state);
+       free_partitions(state);
        return 0;
 }
 
index bc90867..19ba207 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
 
@@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
        NULL
 };
 
+static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
+{
+       struct parsed_partitions *state;
+       int nr;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
+
+       nr = disk_max_parts(hd);
+       state->parts = vzalloc(nr * sizeof(state->parts[0]));
+       if (!state->parts) {
+               kfree(state);
+               return NULL;
+       }
+
+       state->limit = nr;
+
+       return state;
+}
+
+void free_partitions(struct parsed_partitions *state)
+{
+       vfree(state->parts);
+       kfree(state);
+}
+
 struct parsed_partitions *
 check_partition(struct gendisk *hd, struct block_device *bdev)
 {
        struct parsed_partitions *state;
        int i, res, err;
 
-       state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+       state = allocate_partitions(hd);
        if (!state)
                return NULL;
        state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
        if (!state->pp_buf) {
-               kfree(state);
+               free_partitions(state);
                return NULL;
        }
        state->pp_buf[0] = '\0';
@@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
        if (isdigit(state->name[strlen(state->name)-1]))
                sprintf(state->name, "p");
 
-       state->limit = disk_max_parts(hd);
        i = res = err = 0;
        while (!res && check_part[i]) {
-               memset(&state->parts, 0, sizeof(state->parts));
+               memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
                res = check_part[i++](state);
                if (res < 0) {
                        /* We have hit an I/O error which we don't report now.
@@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
        printk(KERN_INFO "%s", state->pp_buf);
 
        free_page((unsigned long)state->pp_buf);
-       kfree(state);
+       free_partitions(state);
        return ERR_PTR(res);
 }
index 52b1003..eade17e 100644 (file)
@@ -15,13 +15,15 @@ struct parsed_partitions {
                int flags;
                bool has_info;
                struct partition_meta_info info;
-       } parts[DISK_MAX_PARTS];
+       } *parts;
        int next;
        int limit;
        bool access_beyond_eod;
        char *pp_buf;
 };
 
+void free_partitions(struct parsed_partitions *state);
+
 struct parsed_partitions *
 check_partition(struct gendisk *, struct block_device *);
 
index b62fb88..ff5804e 100644 (file)
@@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
                goto fail;
        }
 
-       /* Check the GUID Partition Table header size */
+       /* Check the GUID Partition Table header size is too big */
        if (le32_to_cpu((*gpt)->header_size) >
                        bdev_logical_block_size(state->bdev)) {
-               pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+               pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
                        le32_to_cpu((*gpt)->header_size),
                        bdev_logical_block_size(state->bdev));
                goto fail;
        }
 
+       /* Check the GUID Partition Table header size is too small */
+       if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
+               pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
+                       le32_to_cpu((*gpt)->header_size),
+                       sizeof(gpt_header));
+               goto fail;
+       }
+
        /* Check the GUID Partition Table CRC */
        origcrc = le32_to_cpu((*gpt)->header_crc32);
        (*gpt)->header_crc32 = 0;
index 11f688b..76d8ba6 100644 (file)
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
                put_dev_sector(sect);
                return 0;
        }
+
+       if (blocks_in_map >= state->limit)
+               blocks_in_map = state->limit - 1;
+
        strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
        for (slot = 1; slot <= blocks_in_map; ++slot) {
                int pos = slot * secsize;
index 8752a5d..7681cd2 100644 (file)
@@ -455,14 +455,19 @@ int msdos_partition(struct parsed_partitions *state)
        data = read_part_sector(state, 0, &sect);
        if (!data)
                return -1;
-       if (!msdos_magic_present(data + 510)) {
+
+       /*
+        * Note order! (some AIX disks, e.g. unbootable kind,
+        * have no MSDOS 55aa)
+        */
+       if (aix_magic_present(state, data)) {
                put_dev_sector(sect);
+               strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
                return 0;
        }
 
-       if (aix_magic_present(state, data)) {
+       if (!msdos_magic_present(data + 510)) {
                put_dev_sector(sect);
-               strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
                return 0;
        }
 
index 08c57c8..6149a6e 100644 (file)
@@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template);
 void crypto_unregister_template(struct crypto_template *tmpl)
 {
        struct crypto_instance *inst;
-       struct hlist_node *p, *n;
+       struct hlist_node *n;
        struct hlist_head *list;
        LIST_HEAD(users);
 
@@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
        list_del_init(&tmpl->list);
 
        list = &tmpl->instances;
-       hlist_for_each_entry(inst, p, list, list) {
+       hlist_for_each_entry(inst, list, list) {
                int err = crypto_remove_alg(&inst->alg, &users);
                BUG_ON(err);
        }
@@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl)
 
        up_write(&crypto_alg_sem);
 
-       hlist_for_each_entry_safe(inst, p, n, list, list) {
+       hlist_for_each_entry_safe(inst, n, list, list) {
                BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
                tmpl->free(inst);
        }
index b5721e0..3db1b75 100644 (file)
@@ -25084,38 +25084,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = {
 static struct comp_testvec lzo_comp_tv_template[] = {
        {
                .inlen  = 70,
-               .outlen = 46,
+               .outlen = 57,
                .input  = "Join us now and share the software "
                        "Join us now and share the software ",
                .output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
-                       "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
-                       "\x64\x20\x73\x68\x61\x72\x65\x20"
-                       "\x74\x68\x65\x20\x73\x6f\x66\x74"
-                       "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
-                       "\x3d\x88\x00\x11\x00\x00",
+                         "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
+                         "\x64\x20\x73\x68\x61\x72\x65\x20"
+                         "\x74\x68\x65\x20\x73\x6f\x66\x74"
+                         "\x77\x70\x01\x32\x88\x00\x0c\x65"
+                         "\x20\x74\x68\x65\x20\x73\x6f\x66"
+                         "\x74\x77\x61\x72\x65\x20\x11\x00"
+                         "\x00",
        }, {
                .inlen  = 159,
-               .outlen = 133,
+               .outlen = 131,
                .input  = "This document describes a compression method based on the LZO "
                        "compression algorithm.  This document defines the application of "
                        "the LZO algorithm used in UBIFS.",
-               .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
+               .output = "\x00\x2c\x54\x68\x69\x73\x20\x64"
                          "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
                          "\x64\x65\x73\x63\x72\x69\x62\x65"
                          "\x73\x20\x61\x20\x63\x6f\x6d\x70"
                          "\x72\x65\x73\x73\x69\x6f\x6e\x20"
                          "\x6d\x65\x74\x68\x6f\x64\x20\x62"
                          "\x61\x73\x65\x64\x20\x6f\x6e\x20"
-                         "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
-                         "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
-                         "\x69\x74\x68\x6d\x2e\x20\x20\x54"
-                         "\x68\x69\x73\x2a\x54\x01\x02\x66"
-                         "\x69\x6e\x65\x73\x94\x06\x05\x61"
-                         "\x70\x70\x6c\x69\x63\x61\x74\x76"
-                         "\x0a\x6f\x66\x88\x02\x60\x09\x27"
-                         "\xf0\x00\x0c\x20\x75\x73\x65\x64"
-                         "\x20\x69\x6e\x20\x55\x42\x49\x46"
-                         "\x53\x2e\x11\x00\x00",
+                         "\x74\x68\x65\x20\x4c\x5a\x4f\x20"
+                         "\x2a\x8c\x00\x09\x61\x6c\x67\x6f"
+                         "\x72\x69\x74\x68\x6d\x2e\x20\x20"
+                         "\x2e\x54\x01\x03\x66\x69\x6e\x65"
+                         "\x73\x20\x74\x06\x05\x61\x70\x70"
+                         "\x6c\x69\x63\x61\x74\x76\x0a\x6f"
+                         "\x66\x88\x02\x60\x09\x27\xf0\x00"
+                         "\x0c\x20\x75\x73\x65\x64\x20\x69"
+                         "\x6e\x20\x55\x42\x49\x46\x53\x2e"
+                         "\x11\x00\x00",
        },
 };
 
index 3200060..92ed969 100644 (file)
@@ -266,7 +266,8 @@ config ACPI_CUSTOM_DSDT
        default ACPI_CUSTOM_DSDT_FILE != ""
 
 config ACPI_INITRD_TABLE_OVERRIDE
-       bool "ACPI tables can be passed via uncompressed cpio in initrd"
+       bool "ACPI tables override via initrd"
+       depends on BLK_DEV_INITRD && X86
        default n
        help
          This option provides functionality to override arbitrary ACPI tables
index 7ae2750..d668a8a 100644 (file)
@@ -48,8 +48,8 @@
 #include <linux/genalloc.h>
 #include <linux/pci.h>
 #include <linux/aer.h>
-#include <acpi/apei.h>
-#include <acpi/hed.h>
+
+#include <acpi/ghes.h>
 #include <asm/mce.h>
 #include <asm/tlbflush.h>
 #include <asm/nmi.h>
        ((struct acpi_hest_generic_status *)                            \
         ((struct ghes_estatus_node *)(estatus_node) + 1))
 
-/*
- * One struct ghes is created for each generic hardware error source.
- * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler.
- *
- * estatus: memory buffer for error status block, allocated during
- * HEST parsing.
- */
-#define GHES_TO_CLEAR          0x0001
-#define GHES_EXITING           0x0002
-
-struct ghes {
-       struct acpi_hest_generic *generic;
-       struct acpi_hest_generic_status *estatus;
-       u64 buffer_paddr;
-       unsigned long flags;
-       union {
-               struct list_head list;
-               struct timer_list timer;
-               unsigned int irq;
-       };
-};
-
-struct ghes_estatus_node {
-       struct llist_node llnode;
-       struct acpi_hest_generic *generic;
-};
-
-struct ghes_estatus_cache {
-       u32 estatus_len;
-       atomic_t count;
-       struct acpi_hest_generic *generic;
-       unsigned long long time_in;
-       struct rcu_head rcu;
-};
-
 bool ghes_disable;
 module_param_named(disable, ghes_disable, bool, 0);
 
@@ -333,13 +297,6 @@ static void ghes_fini(struct ghes *ghes)
        apei_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
-enum {
-       GHES_SEV_NO = 0x0,
-       GHES_SEV_CORRECTED = 0x1,
-       GHES_SEV_RECOVERABLE = 0x2,
-       GHES_SEV_PANIC = 0x3,
-};
-
 static inline int ghes_severity(int severity)
 {
        switch (severity) {
@@ -452,7 +409,8 @@ static void ghes_clear_estatus(struct ghes *ghes)
        ghes->flags &= ~GHES_TO_CLEAR;
 }
 
-static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
+static void ghes_do_proc(struct ghes *ghes,
+                        const struct acpi_hest_generic_status *estatus)
 {
        int sev, sec_sev;
        struct acpi_hest_generic_data *gdata;
@@ -464,6 +422,8 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
                                 CPER_SEC_PLATFORM_MEM)) {
                        struct cper_sec_mem_err *mem_err;
                        mem_err = (struct cper_sec_mem_err *)(gdata+1);
+                       ghes_edac_report_mem_error(ghes, sev, mem_err);
+
 #ifdef CONFIG_X86_MCE
                        apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
                                                  mem_err);
@@ -682,7 +642,7 @@ static int ghes_proc(struct ghes *ghes)
                if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
                        ghes_estatus_cache_add(ghes->generic, ghes->estatus);
        }
-       ghes_do_proc(ghes->estatus);
+       ghes_do_proc(ghes, ghes->estatus);
 out:
        ghes_clear_estatus(ghes);
        return 0;
@@ -775,7 +735,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
                estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
                len = apei_estatus_len(estatus);
                node_len = GHES_ESTATUS_NODE_LEN(len);
-               ghes_do_proc(estatus);
+               ghes_do_proc(estatus_node->ghes, estatus);
                if (!ghes_estatus_cached(estatus)) {
                        generic = estatus_node->generic;
                        if (ghes_print_estatus(NULL, generic, estatus))
@@ -864,6 +824,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
                estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
                                                      node_len);
                if (estatus_node) {
+                       estatus_node->ghes = ghes;
                        estatus_node->generic = ghes->generic;
                        estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
                        memcpy(estatus, ghes->estatus, len);
@@ -942,6 +903,11 @@ static int ghes_probe(struct platform_device *ghes_dev)
                ghes = NULL;
                goto err;
        }
+
+       rc = ghes_edac_register(ghes, &ghes_dev->dev);
+       if (rc < 0)
+               goto err;
+
        switch (generic->notify.type) {
        case ACPI_HEST_NOTIFY_POLLED:
                ghes->timer.function = ghes_poll_func;
@@ -954,13 +920,13 @@ static int ghes_probe(struct platform_device *ghes_dev)
                if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
                        pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
                               generic->header.source_id);
-                       goto err;
+                       goto err_edac_unreg;
                }
                if (request_irq(ghes->irq, ghes_irq_func,
                                0, "GHES IRQ", ghes)) {
                        pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
                               generic->header.source_id);
-                       goto err;
+                       goto err_edac_unreg;
                }
                break;
        case ACPI_HEST_NOTIFY_SCI:
@@ -986,6 +952,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
        platform_set_drvdata(ghes_dev, ghes);
 
        return 0;
+err_edac_unreg:
+       ghes_edac_unregister(ghes);
 err:
        if (ghes) {
                ghes_fini(ghes);
@@ -1038,6 +1006,9 @@ static int ghes_remove(struct platform_device *ghes_dev)
        }
 
        ghes_fini(ghes);
+
+       ghes_edac_unregister(ghes);
+
        kfree(ghes);
 
        platform_set_drvdata(ghes_dev, NULL);
index 59844ee..33e609f 100644 (file)
@@ -282,10 +282,10 @@ acpi_table_parse_srat(enum acpi_srat_type id,
                                            handler, max_entries);
 }
 
-static int srat_mem_cnt;
-
-void __init early_parse_srat(void)
+int __init acpi_numa_init(void)
 {
+       int cnt = 0;
+
        /*
         * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
         * SRAT cpu entries could have different order with that in MADT.
@@ -295,24 +295,21 @@ void __init early_parse_srat(void)
        /* SRAT: Static Resource Affinity Table */
        if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
                acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
-                                     acpi_parse_x2apic_affinity, 0);
+                                    acpi_parse_x2apic_affinity, 0);
                acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
-                                     acpi_parse_processor_affinity, 0);
-               srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
-                                                    acpi_parse_memory_affinity,
-                                                    NR_NODE_MEMBLKS);
+                                    acpi_parse_processor_affinity, 0);
+               cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+                                           acpi_parse_memory_affinity,
+                                           NR_NODE_MEMBLKS);
        }
-}
 
-int __init acpi_numa_init(void)
-{
        /* SLIT: System Locality Information Table */
        acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
 
        acpi_numa_arch_fixup();
 
-       if (srat_mem_cnt < 0)
-               return srat_mem_cnt;
+       if (cnt < 0)
+               return cnt;
        else if (!parsed_numa_memblks)
                return -ENOENT;
        return 0;
index ab92785..093c435 100644 (file)
@@ -130,7 +130,7 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
        writel(value, ahb->regs + offset);
 }
 
-#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+#ifdef CONFIG_TEGRA_IOMMU_SMMU
 static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
 {
        struct tegra_ahb *ahb = dev_get_drvdata(dev);
index b22d71c..0e3f8f9 100644 (file)
@@ -157,7 +157,6 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
 {
        struct atm_cirange ci;
        struct atm_vcc *vcc;
-       struct hlist_node *node;
        struct sock *s;
        int i;
 
@@ -171,7 +170,7 @@ static int atmtcp_v_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
        for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
                struct hlist_head *head = &vcc_hash[i];
 
-               sk_for_each(s, node, head) {
+               sk_for_each(s, head) {
                        vcc = atm_sk(s);
                        if (vcc->dev != dev)
                                continue;
@@ -264,12 +263,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
 {
         struct hlist_head *head;
         struct atm_vcc *vcc;
-        struct hlist_node *node;
         struct sock *s;
 
         head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 
-        sk_for_each(s, node, head) {
+       sk_for_each(s, head) {
                 vcc = atm_sk(s);
                 if (vcc->dev == dev &&
                     vcc->vci == vci && vcc->vpi == vpi &&
index c1eb6fa..b1955ba 100644 (file)
@@ -2093,7 +2093,6 @@ static unsigned char eni_phy_get(struct atm_dev *dev,unsigned long addr)
 
 static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
 {
-       struct hlist_node *node;
        struct sock *s;
        static const char *signal[] = { "LOST","unknown","okay" };
        struct eni_dev *eni_dev = ENI_DEV(dev);
@@ -2171,7 +2170,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
        for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
                struct hlist_head *head = &vcc_hash[i];
 
-               sk_for_each(s, node, head) {
+               sk_for_each(s, head) {
                        struct eni_vcc *eni_vcc;
                        int length;
 
index 72b6960..d689126 100644 (file)
@@ -329,7 +329,6 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
 {
        struct hlist_head *head;
        struct atm_vcc *vcc;
-       struct hlist_node *node;
        struct sock *s;
        short vpi;
        int vci;
@@ -338,7 +337,7 @@ __find_vcc(struct he_dev *he_dev, unsigned cid)
        vci = cid & ((1 << he_dev->vcibits) - 1);
        head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
 
-       sk_for_each(s, node, head) {
+       sk_for_each(s, head) {
                vcc = atm_sk(s);
                if (vcc->dev == he_dev->atm_dev &&
                    vcc->vci == vci && vcc->vpi == vpi &&
index ed1d2b7..6587dc2 100644 (file)
@@ -251,7 +251,6 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
                if (card->scd2vc[j] != NULL)
                        free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
        }
-       idr_remove_all(&card->idr);
        idr_destroy(&card->idr);
        pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
                            card->rsq.org, card->rsq.dma);
@@ -950,11 +949,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
 static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
 {
        struct sk_buff *handle1, *handle2;
-       u32 id1 = 0, id2 = 0;
+       int id1, id2;
        u32 addr1, addr2;
        u32 stat;
        unsigned long flags;
-       int err;
 
        /* *BARF* */
        handle2 = NULL;
@@ -1027,23 +1025,12 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
                                card->lbfqc += 2;
                }
 
-               do {
-                       if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
-                               printk(KERN_ERR
-                                      "nicstar%d: no free memory for idr\n",
-                                      card->index);
-                               goto out;
-                       }
-
-                       if (!id1)
-                               err = idr_get_new_above(&card->idr, handle1, 0, &id1);
-
-                       if (!id2 && err == 0)
-                               err = idr_get_new_above(&card->idr, handle2, 0, &id2);
-
-               } while (err == -EAGAIN);
+               id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
+               if (id1 < 0)
+                       goto out;
 
-               if (err)
+               id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
+               if (id2 < 0)
                        goto out;
 
                spin_lock_irqsave(&card->res_lock, flags);
index 0474a89..32784d1 100644 (file)
@@ -896,12 +896,11 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
 {
        struct hlist_head *head;
        struct atm_vcc *vcc = NULL;
-       struct hlist_node *node;
        struct sock *s;
 
        read_lock(&vcc_sklist_lock);
        head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
-       sk_for_each(s, node, head) {
+       sk_for_each(s, head) {
                vcc = atm_sk(s);
                if (vcc->dev == dev && vcc->vci == vci &&
                    vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE &&
index 17cf7ca..01fc5b0 100644 (file)
@@ -302,7 +302,8 @@ static int handle_remove(const char *nodename, struct device *dev)
 
        if (dentry->d_inode) {
                struct kstat stat;
-               err = vfs_getattr(parent.mnt, dentry, &stat);
+               struct path p = {.mnt = parent.mnt, .dentry = dentry};
+               err = vfs_getattr(&p, &stat);
                if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
                        struct iattr newattrs;
                        /*
index ff5b745..2a7cb0d 100644 (file)
@@ -39,6 +39,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
 
        dmabuf = file->private_data;
 
+       BUG_ON(dmabuf->vmapping_counter);
+
        dmabuf->ops->release(dmabuf);
        kfree(dmabuf);
        return 0;
@@ -445,6 +447,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                 unsigned long pgoff)
 {
+       struct file *oldfile;
+       int ret;
+
        if (WARN_ON(!dmabuf || !vma))
                return -EINVAL;
 
@@ -458,14 +463,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                return -EINVAL;
 
        /* readjust the vma */
-       if (vma->vm_file)
-               fput(vma->vm_file);
-
-       vma->vm_file = get_file(dmabuf->file);
-
+       get_file(dmabuf->file);
+       oldfile = vma->vm_file;
+       vma->vm_file = dmabuf->file;
        vma->vm_pgoff = pgoff;
 
-       return dmabuf->ops->mmap(dmabuf, vma);
+       ret = dmabuf->ops->mmap(dmabuf, vma);
+       if (ret) {
+               /* restore old parameters on failure */
+               vma->vm_file = oldfile;
+               fput(dmabuf->file);
+       } else {
+               if (oldfile)
+                       fput(oldfile);
+       }
+       return ret;
+
 }
 EXPORT_SYMBOL_GPL(dma_buf_mmap);
 
@@ -481,12 +494,34 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
  */
 void *dma_buf_vmap(struct dma_buf *dmabuf)
 {
+       void *ptr;
+
        if (WARN_ON(!dmabuf))
                return NULL;
 
-       if (dmabuf->ops->vmap)
-               return dmabuf->ops->vmap(dmabuf);
-       return NULL;
+       if (!dmabuf->ops->vmap)
+               return NULL;
+
+       mutex_lock(&dmabuf->lock);
+       if (dmabuf->vmapping_counter) {
+               dmabuf->vmapping_counter++;
+               BUG_ON(!dmabuf->vmap_ptr);
+               ptr = dmabuf->vmap_ptr;
+               goto out_unlock;
+       }
+
+       BUG_ON(dmabuf->vmap_ptr);
+
+       ptr = dmabuf->ops->vmap(dmabuf);
+       if (IS_ERR_OR_NULL(ptr))
+               goto out_unlock;
+
+       dmabuf->vmap_ptr = ptr;
+       dmabuf->vmapping_counter = 1;
+
+out_unlock:
+       mutex_unlock(&dmabuf->lock);
+       return ptr;
 }
 EXPORT_SYMBOL_GPL(dma_buf_vmap);
 
@@ -500,7 +535,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
        if (WARN_ON(!dmabuf))
                return;
 
-       if (dmabuf->ops->vunmap)
-               dmabuf->ops->vunmap(dmabuf, vaddr);
+       BUG_ON(!dmabuf->vmap_ptr);
+       BUG_ON(dmabuf->vmapping_counter == 0);
+       BUG_ON(dmabuf->vmap_ptr != vaddr);
+
+       mutex_lock(&dmabuf->lock);
+       if (--dmabuf->vmapping_counter == 0) {
+               if (dmabuf->ops->vunmap)
+                       dmabuf->ops->vunmap(dmabuf, vaddr);
+               dmabuf->vmap_ptr = NULL;
+       }
+       mutex_unlock(&dmabuf->lock);
 }
 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
index 4a223fe..4b1f926 100644 (file)
@@ -279,7 +279,7 @@ MODULE_PARM_DESC(path, "customized firmware image search path with a higher prio
 static noinline_for_stack long fw_file_size(struct file *file)
 {
        struct kstat st;
-       if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+       if (vfs_getattr(&file->f_path, &st))
                return -1;
        if (!S_ISREG(st.mode))
                return -1;
index d3bde6c..30629a3 100644 (file)
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
                return;
        }
 
+       spin_lock_init(&pc_host->cfgspace_lock);
+
        pc->host_controller = pc_host;
        pc_host->pci_controller.io_resource = &pc_host->io_resource;
        pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
index 9a13e88..5b5ee79 100644 (file)
@@ -6547,7 +6547,7 @@ static ssize_t dac960_user_command_proc_write(struct file *file,
                                       const char __user *Buffer,
                                       size_t Count, loff_t *pos)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file_inode(file))->data;
   unsigned char CommandBuffer[80];
   int Length;
   if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -7054,6 +7054,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
        else
                ErrorCode =  0;
       }
+      break;
       default:
        ErrorCode = -ENOTTY;
     }
index 824e09c..5dc0dae 100644 (file)
@@ -63,19 +63,6 @@ config AMIGA_Z2RAM
          To compile this driver as a module, choose M here: the
          module will be called z2ram.
 
-config BLK_DEV_XD
-       tristate "XT hard disk support"
-       depends on ISA && ISA_DMA_API
-       select CHECK_SIGNATURE
-       help
-         Very old 8 bit hard disk controllers used in the IBM XT computer
-         will be supported if you say Y here.
-
-         To compile this driver as a module, choose M here: the
-         module will be called xd.
-
-         It's pretty unlikely that you have one of these: say N.
-
 config GDROM
        tristate "SEGA Dreamcast GD-ROM drive"
        depends on SH_DREAMCAST
@@ -544,4 +531,14 @@ config BLK_DEV_RBD
 
          If unsure, say N.
 
+config BLK_DEV_RSXX
+       tristate "RamSam PCIe Flash SSD Device Driver"
+       depends on PCI
+       help
+         Device driver for IBM's high speed PCIe SSD
+         storage devices: RamSan-70 and RamSan-80.
+
+         To compile this driver as a module, choose M here: the
+         module will be called rsxx.
+
 endif # BLK_DEV
index 17e82df..a3b4023 100644 (file)
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY)    += ataflop.o
 obj-$(CONFIG_AMIGA_Z2RAM)      += z2ram.o
 obj-$(CONFIG_BLK_DEV_RAM)      += brd.o
 obj-$(CONFIG_BLK_DEV_LOOP)     += loop.o
-obj-$(CONFIG_BLK_DEV_XD)       += xd.o
 obj-$(CONFIG_BLK_CPQ_DA)       += cpqarray.o
 obj-$(CONFIG_BLK_CPQ_CISS_DA)  += cciss.o
 obj-$(CONFIG_BLK_DEV_DAC960)   += DAC960.o
@@ -41,4 +40,6 @@ obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
 
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+
 swim_mod-y     := swim.o swim_asm.o
index 8c13eeb..e98da67 100644 (file)
@@ -2660,25 +2660,24 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
        mdev->read_requests = RB_ROOT;
        mdev->write_requests = RB_ROOT;
 
-       if (!idr_pre_get(&minors, GFP_KERNEL))
-               goto out_no_minor_idr;
-       if (idr_get_new_above(&minors, mdev, minor, &minor_got))
+       minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
+       if (minor_got < 0) {
+               if (minor_got == -ENOSPC) {
+                       err = ERR_MINOR_EXISTS;
+                       drbd_msg_put_info("requested minor exists already");
+               }
                goto out_no_minor_idr;
-       if (minor_got != minor) {
-               err = ERR_MINOR_EXISTS;
-               drbd_msg_put_info("requested minor exists already");
-               goto out_idr_remove_minor;
        }
 
-       if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
-               goto out_idr_remove_minor;
-       if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
+       vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
+       if (vnr_got < 0) {
+               if (vnr_got == -ENOSPC) {
+                       err = ERR_INVALID_REQUEST;
+                       drbd_msg_put_info("requested volume exists already");
+               }
                goto out_idr_remove_minor;
-       if (vnr_got != vnr) {
-               err = ERR_INVALID_REQUEST;
-               drbd_msg_put_info("requested volume exists already");
-               goto out_idr_remove_vol;
        }
+
        add_disk(disk);
        kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
 
@@ -2689,8 +2688,6 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
 
        return NO_ERROR;
 
-out_idr_remove_vol:
-       idr_remove(&tconn->volumes, vnr_got);
 out_idr_remove_minor:
        idr_remove(&minors, minor_got);
        synchronize_rcu();
index ae12512..747bb2a 100644 (file)
@@ -162,12 +162,13 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
 
 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
 {
-       loff_t size, loopsize;
+       loff_t loopsize;
 
        /* Compute loopsize in bytes */
-       size = i_size_read(file->f_mapping->host);
-       loopsize = size - offset;
-       /* offset is beyond i_size, wierd but possible */
+       loopsize = i_size_read(file->f_mapping->host);
+       if (offset > 0)
+               loopsize -= offset;
+       /* offset is beyond i_size, weird but possible */
        if (loopsize < 0)
                return 0;
 
@@ -190,6 +191,7 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 {
        loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
        sector_t x = (sector_t)size;
+       struct block_device *bdev = lo->lo_device;
 
        if (unlikely((loff_t)x != size))
                return -EFBIG;
@@ -198,6 +200,9 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
        if (lo->lo_sizelimit != sizelimit)
                lo->lo_sizelimit = sizelimit;
        set_capacity(lo->lo_disk, x);
+       bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
+       /* let user-space know about the new size */
+       kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
        return 0;
 }
 
@@ -1091,10 +1096,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
                return err;
 
        if (lo->lo_offset != info->lo_offset ||
-           lo->lo_sizelimit != info->lo_sizelimit) {
+           lo->lo_sizelimit != info->lo_sizelimit)
                if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
                        return -EFBIG;
-       }
+
        loop_config_discard(lo);
 
        memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
@@ -1139,7 +1144,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
 
        if (lo->lo_state != Lo_bound)
                return -ENXIO;
-       error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
+       error = vfs_getattr(&file->f_path, &stat);
        if (error)
                return error;
        memset(info, 0, sizeof(*info));
@@ -1271,28 +1276,10 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
 
 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
 {
-       int err;
-       sector_t sec;
-       loff_t sz;
-
-       err = -ENXIO;
        if (unlikely(lo->lo_state != Lo_bound))
-               goto out;
-       err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
-       if (unlikely(err))
-               goto out;
-       sec = get_capacity(lo->lo_disk);
-       /* the width of sector_t may be narrow for bit-shift */
-       sz = sec;
-       sz <<= 9;
-       mutex_lock(&bdev->bd_mutex);
-       bd_set_size(bdev, sz);
-       /* let user-space know about the new size */
-       kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
-       mutex_unlock(&bdev->bd_mutex);
+               return -ENXIO;
 
- out:
-       return err;
+       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
 }
 
 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1624,30 +1611,17 @@ static int loop_add(struct loop_device **l, int i)
        if (!lo)
                goto out;
 
-       if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
-               goto out_free_dev;
-
+       /* allocate id, if @id >= 0, we're requesting that specific id */
        if (i >= 0) {
-               int m;
-
-               /* create specific i in the index */
-               err = idr_get_new_above(&loop_index_idr, lo, i, &m);
-               if (err >= 0 && i != m) {
-                       idr_remove(&loop_index_idr, m);
+               err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
+               if (err == -ENOSPC)
                        err = -EEXIST;
-               }
-       } else if (i == -1) {
-               int m;
-
-               /* get next free nr */
-               err = idr_get_new(&loop_index_idr, lo, &m);
-               if (err >= 0)
-                       i = m;
        } else {
-               err = -EINVAL;
+               err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
        }
        if (err < 0)
                goto out_free_dev;
+       i = err;
 
        lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
        if (!lo->lo_queue)
@@ -1858,11 +1832,15 @@ static int __init loop_init(void)
                max_part = (1UL << part_shift) - 1;
        }
 
-       if ((1UL << part_shift) > DISK_MAX_PARTS)
-               return -EINVAL;
+       if ((1UL << part_shift) > DISK_MAX_PARTS) {
+               err = -EINVAL;
+               goto misc_out;
+       }
 
-       if (max_loop > 1UL << (MINORBITS - part_shift))
-               return -EINVAL;
+       if (max_loop > 1UL << (MINORBITS - part_shift)) {
+               err = -EINVAL;
+               goto misc_out;
+       }
 
        /*
         * If max_loop is specified, create that many devices upfront.
@@ -1880,8 +1858,10 @@ static int __init loop_init(void)
                range = 1UL << MINORBITS;
        }
 
-       if (register_blkdev(LOOP_MAJOR, "loop"))
-               return -EIO;
+       if (register_blkdev(LOOP_MAJOR, "loop")) {
+               err = -EIO;
+               goto misc_out;
+       }
 
        blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
                                  THIS_MODULE, loop_probe, NULL, NULL);
@@ -1894,6 +1874,10 @@ static int __init loop_init(void)
 
        printk(KERN_INFO "loop: module loaded\n");
        return 0;
+
+misc_out:
+       misc_deregister(&loop_misc);
+       return err;
 }
 
 static int loop_exit_cb(int id, void *ptr, void *data)
@@ -1911,7 +1895,6 @@ static void __exit loop_exit(void)
        range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
 
        idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
-       idr_remove_all(&loop_index_idr);
        idr_destroy(&loop_index_idr);
 
        blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
index 0ba837f..1fca1f9 100644 (file)
@@ -4,6 +4,6 @@
 
 config BLK_DEV_PCIESSD_MTIP32XX
        tristate "Block Device Driver for Micron PCIe SSDs"
-       depends on PCI
+       depends on PCI && GENERIC_HARDIRQS
        help
           This enables the block driver for Micron PCIe SSDs.
index 3fd1009..11cc952 100644 (file)
@@ -88,6 +88,8 @@ static int instance;
 static int mtip_major;
 static struct dentry *dfs_parent;
 
+static u32 cpu_use[NR_CPUS];
+
 static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
 
@@ -296,16 +298,17 @@ static int hba_reset_nosleep(struct driver_data *dd)
  */
 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
 {
-       atomic_set(&port->commands[tag].active, 1);
+       int group = tag >> 5;
 
-       spin_lock(&port->cmd_issue_lock);
+       atomic_set(&port->commands[tag].active, 1);
 
+       /* guard SACT and CI registers */
+       spin_lock(&port->cmd_issue_lock[group]);
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->s_active[MTIP_TAG_INDEX(tag)]);
        writel((1 << MTIP_TAG_BIT(tag)),
                        port->cmd_issue[MTIP_TAG_INDEX(tag)]);
-
-       spin_unlock(&port->cmd_issue_lock);
+       spin_unlock(&port->cmd_issue_lock[group]);
 
        /* Set the command's timeout value.*/
        port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -964,56 +967,56 @@ handle_tfe_exit:
 /*
  * Handle a set device bits interrupt
  */
-static inline void mtip_process_sdbf(struct driver_data *dd)
+static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
+                                                       u32 completed)
 {
-       struct mtip_port  *port = dd->port;
-       int group, tag, bit;
-       u32 completed;
+       struct driver_data *dd = port->dd;
+       int tag, bit;
        struct mtip_cmd *command;
 
-       /* walk all bits in all slot groups */
-       for (group = 0; group < dd->slot_groups; group++) {
-               completed = readl(port->completed[group]);
-               if (!completed)
-                       continue;
+       if (!completed) {
+               WARN_ON_ONCE(!completed);
+               return;
+       }
+       /* clear completed status register in the hardware.*/
+       writel(completed, port->completed[group]);
 
-               /* clear completed status register in the hardware.*/
-               writel(completed, port->completed[group]);
+       /* Process completed commands. */
+       for (bit = 0; (bit < 32) && completed; bit++) {
+               if (completed & 0x01) {
+                       tag = (group << 5) | bit;
 
-               /* Process completed commands. */
-               for (bit = 0;
-                    (bit < 32) && completed;
-                    bit++, completed >>= 1) {
-                       if (completed & 0x01) {
-                               tag = (group << 5) | bit;
+                       /* skip internal command slot. */
+                       if (unlikely(tag == MTIP_TAG_INTERNAL))
+                               continue;
 
-                               /* skip internal command slot. */
-                               if (unlikely(tag == MTIP_TAG_INTERNAL))
-                                       continue;
+                       command = &port->commands[tag];
+                       /* make internal callback */
+                       if (likely(command->comp_func)) {
+                               command->comp_func(
+                                       port,
+                                       tag,
+                                       command->comp_data,
+                                       0);
+                       } else {
+                               dev_warn(&dd->pdev->dev,
+                                       "Null completion "
+                                       "for tag %d",
+                                       tag);
 
-                               command = &port->commands[tag];
-                               /* make internal callback */
-                               if (likely(command->comp_func)) {
-                                       command->comp_func(
-                                               port,
-                                               tag,
-                                               command->comp_data,
-                                               0);
-                               } else {
-                                       dev_warn(&dd->pdev->dev,
-                                               "Null completion "
-                                               "for tag %d",
-                                               tag);
-
-                                       if (mtip_check_surprise_removal(
-                                               dd->pdev)) {
-                                               mtip_command_cleanup(dd);
-                                               return;
-                                       }
+                               if (mtip_check_surprise_removal(
+                                       dd->pdev)) {
+                                       mtip_command_cleanup(dd);
+                                       return;
                                }
                        }
                }
+               completed >>= 1;
        }
+
+       /* If last, re-enable interrupts */
+       if (atomic_dec_return(&dd->irq_workers_active) == 0)
+               writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
 }
 
 /*
@@ -1072,6 +1075,8 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
        struct mtip_port *port = dd->port;
        u32 hba_stat, port_stat;
        int rv = IRQ_NONE;
+       int do_irq_enable = 1, i, workers;
+       struct mtip_work *twork;
 
        hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
        if (hba_stat) {
@@ -1082,8 +1087,42 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
                writel(port_stat, port->mmio + PORT_IRQ_STAT);
 
                /* Demux port status */
-               if (likely(port_stat & PORT_IRQ_SDB_FIS))
-                       mtip_process_sdbf(dd);
+               if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
+                       do_irq_enable = 0;
+                       WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
+
+                       /* Start at 1: group zero is always local? */
+                       for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
+                                                                       i++) {
+                               twork = &dd->work[i];
+                               twork->completed = readl(port->completed[i]);
+                               if (twork->completed)
+                                       workers++;
+                       }
+
+                       atomic_set(&dd->irq_workers_active, workers);
+                       if (workers) {
+                               for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
+                                       twork = &dd->work[i];
+                                       if (twork->completed)
+                                               queue_work_on(
+                                                       twork->cpu_binding,
+                                                       dd->isr_workq,
+                                                       &twork->work);
+                               }
+
+                               if (likely(dd->work[0].completed))
+                                       mtip_workq_sdbfx(port, 0,
+                                                       dd->work[0].completed);
+
+                       } else {
+                               /*
+                                * Chip quirk: SDB interrupt but nothing
+                                * to complete
+                                */
+                               do_irq_enable = 1;
+                       }
+               }
 
                if (unlikely(port_stat & PORT_IRQ_ERR)) {
                        if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
@@ -1103,20 +1142,12 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
        }
 
        /* acknowledge interrupt */
-       writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+       if (unlikely(do_irq_enable))
+               writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
 
        return rv;
 }
 
-/*
- * Wrapper for mtip_handle_irq
- * (ignores return code)
- */
-static void mtip_tasklet(unsigned long data)
-{
-       mtip_handle_irq((struct driver_data *) data);
-}
-
 /*
  * HBA interrupt subroutine.
  *
@@ -1130,8 +1161,8 @@ static void mtip_tasklet(unsigned long data)
 static irqreturn_t mtip_irq_handler(int irq, void *instance)
 {
        struct driver_data *dd = instance;
-       tasklet_schedule(&dd->tasklet);
-       return IRQ_HANDLED;
+
+       return mtip_handle_irq(dd);
 }
 
 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
@@ -1489,6 +1520,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
        }
 #endif
 
+       /* Demux ID.DRAT & ID.RZAT to determine trim support */
+       if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
+               port->dd->trim_supp = true;
+       else
+               port->dd->trim_supp = false;
+
        /* Set the identify buffer as valid. */
        port->identify_valid = 1;
 
@@ -1675,6 +1712,81 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
        return rv;
 }
 
+/*
+ * Trim unused sectors
+ *
+ * @dd         pointer to driver_data structure
+ * @lba                starting lba
+ * @len                # of 512b sectors to trim
+ *
+ * return value
+ *      -ENOMEM                Out of dma memory
+ *      -EINVAL                Invalid parameters passed in, trim not supported
+ *      -EIO           Error submitting trim request to hw
+ */
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
+{
+       int i, rv = 0;
+       u64 tlba, tlen, sect_left;
+       struct mtip_trim_entry *buf;
+       dma_addr_t dma_addr;
+       struct host_to_dev_fis fis;
+
+       if (!len || dd->trim_supp == false)
+               return -EINVAL;
+
+       /* Trim request too big */
+       WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
+
+       /* Trim request not aligned on 4k boundary */
+       WARN_ON(len % 8 != 0);
+
+       /* Warn if vu_trim structure is too big */
+       WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
+
+       /* Allocate a DMA buffer for the trim structure */
+       buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
+                                                               GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       memset(buf, 0, ATA_SECT_SIZE);
+
+       for (i = 0, sect_left = len, tlba = lba;
+                       i < MTIP_MAX_TRIM_ENTRIES && sect_left;
+                       i++) {
+               tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
+                                       MTIP_MAX_TRIM_ENTRY_LEN :
+                                       sect_left);
+               buf[i].lba = __force_bit2int cpu_to_le32(tlba);
+               buf[i].range = __force_bit2int cpu_to_le16(tlen);
+               tlba += tlen;
+               sect_left -= tlen;
+       }
+       WARN_ON(sect_left != 0);
+
+       /* Build the fis */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.type       = 0x27;
+       fis.opts       = 1 << 7;
+       fis.command    = 0xfb;
+       fis.features   = 0x60;
+       fis.sect_count = 1;
+       fis.device     = ATA_DEVICE_OBS;
+
+       if (mtip_exec_internal_command(dd->port,
+                                       &fis,
+                                       5,
+                                       dma_addr,
+                                       ATA_SECT_SIZE,
+                                       0,
+                                       GFP_KERNEL,
+                                       MTIP_TRIM_TIMEOUT_MS) < 0)
+               rv = -EIO;
+
+       dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
+       return rv;
+}
+
 /*
  * Get the drive capacity.
  *
@@ -3005,20 +3117,24 @@ static int mtip_hw_init(struct driver_data *dd)
 
        hba_setup(dd);
 
-       tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
-
-       dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+       dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
+                               dd->numa_node);
        if (!dd->port) {
                dev_err(&dd->pdev->dev,
                        "Memory allocation: port structure\n");
                return -ENOMEM;
        }
 
+       /* Continue workqueue setup */
+       for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+               dd->work[i].port = dd->port;
+
        /* Counting semaphore to track command slot usage */
        sema_init(&dd->port->cmd_slot, num_command_slots - 1);
 
        /* Spinlock to prevent concurrent issue */
-       spin_lock_init(&dd->port->cmd_issue_lock);
+       for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
+               spin_lock_init(&dd->port->cmd_issue_lock[i]);
 
        /* Set the port mmio base address. */
        dd->port->mmio  = dd->mmio + PORT_OFFSET;
@@ -3165,6 +3281,7 @@ static int mtip_hw_init(struct driver_data *dd)
                        "Unable to allocate IRQ %d\n", dd->pdev->irq);
                goto out2;
        }
+       irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
 
        /* Enable interrupts on the HBA. */
        writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
@@ -3241,7 +3358,8 @@ out3:
        writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
                        dd->mmio + HOST_CTL);
 
-       /*Release the IRQ. */
+       /* Release the IRQ. */
+       irq_set_affinity_hint(dd->pdev->irq, NULL);
        devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 
 out2:
@@ -3291,11 +3409,9 @@ static int mtip_hw_exit(struct driver_data *dd)
        del_timer_sync(&dd->port->cmd_timer);
 
        /* Release the IRQ. */
+       irq_set_affinity_hint(dd->pdev->irq, NULL);
        devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
 
-       /* Stop the bottom half tasklet. */
-       tasklet_kill(&dd->tasklet);
-
        /* Free the command/command header memory. */
        dmam_free_coherent(&dd->pdev->dev,
                        HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
@@ -3641,6 +3757,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
                }
        }
 
+       if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+               bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+                                               bio_sectors(bio)));
+               return;
+       }
+
        if (unlikely(!bio_has_data(bio))) {
                blk_queue_flush(queue, 0);
                bio_endio(bio, 0);
@@ -3711,7 +3833,7 @@ static int mtip_block_initialize(struct driver_data *dd)
                goto protocol_init_error;
        }
 
-       dd->disk = alloc_disk(MTIP_MAX_MINORS);
+       dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
        if (dd->disk  == NULL) {
                dev_err(&dd->pdev->dev,
                        "Unable to allocate gendisk structure\n");
@@ -3755,7 +3877,7 @@ static int mtip_block_initialize(struct driver_data *dd)
 
 skip_create_disk:
        /* Allocate the request queue. */
-       dd->queue = blk_alloc_queue(GFP_KERNEL);
+       dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node);
        if (dd->queue == NULL) {
                dev_err(&dd->pdev->dev,
                        "Unable to allocate request queue\n");
@@ -3783,6 +3905,15 @@ skip_create_disk:
         */
        blk_queue_flush(dd->queue, 0);
 
+       /* Signal trim support */
+       if (dd->trim_supp == true) {
+               set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
+               dd->queue->limits.discard_granularity = 4096;
+               blk_queue_max_discard_sectors(dd->queue,
+                       MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
+               dd->queue->limits.discard_zeroes_data = 0;
+       }
+
        /* Set the capacity of the device in 512 byte sectors. */
        if (!(mtip_hw_get_capacity(dd, &capacity))) {
                dev_warn(&dd->pdev->dev,
@@ -3813,9 +3944,8 @@ skip_create_disk:
 
 start_service_thread:
        sprintf(thd_name, "mtip_svc_thd_%02d", index);
-
-       dd->mtip_svc_handler = kthread_run(mtip_service_thread,
-                                               dd, thd_name);
+       dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
+                                               dd, dd->numa_node, thd_name);
 
        if (IS_ERR(dd->mtip_svc_handler)) {
                dev_err(&dd->pdev->dev, "service thread failed to start\n");
@@ -3823,7 +3953,7 @@ start_service_thread:
                rv = -EFAULT;
                goto kthread_run_error;
        }
-
+       wake_up_process(dd->mtip_svc_handler);
        if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
                rv = wait_for_rebuild;
 
@@ -3963,6 +4093,56 @@ static int mtip_block_resume(struct driver_data *dd)
        return 0;
 }
 
+static void drop_cpu(int cpu)
+{
+       cpu_use[cpu]--;
+}
+
+static int get_least_used_cpu_on_node(int node)
+{
+       int cpu, least_used_cpu, least_cnt;
+       const struct cpumask *node_mask;
+
+       node_mask = cpumask_of_node(node);
+       least_used_cpu = cpumask_first(node_mask);
+       least_cnt = cpu_use[least_used_cpu];
+       cpu = least_used_cpu;
+
+       for_each_cpu(cpu, node_mask) {
+               if (cpu_use[cpu] < least_cnt) {
+                       least_used_cpu = cpu;
+                       least_cnt = cpu_use[cpu];
+               }
+       }
+       cpu_use[least_used_cpu]++;
+       return least_used_cpu;
+}
+
+/* Helper for selecting a node in round robin mode */
+static inline int mtip_get_next_rr_node(void)
+{
+       static int next_node = -1;
+
+       if (next_node == -1) {
+               next_node = first_online_node;
+               return next_node;
+       }
+
+       next_node = next_online_node(next_node);
+       if (next_node == MAX_NUMNODES)
+               next_node = first_online_node;
+       return next_node;
+}
+
+static DEFINE_HANDLER(0);
+static DEFINE_HANDLER(1);
+static DEFINE_HANDLER(2);
+static DEFINE_HANDLER(3);
+static DEFINE_HANDLER(4);
+static DEFINE_HANDLER(5);
+static DEFINE_HANDLER(6);
+static DEFINE_HANDLER(7);
+
 /*
  * Called for each supported PCI device detected.
  *
@@ -3977,9 +4157,25 @@ static int mtip_pci_probe(struct pci_dev *pdev,
 {
        int rv = 0;
        struct driver_data *dd = NULL;
+       char cpu_list[256];
+       const struct cpumask *node_mask;
+       int cpu, i = 0, j = 0;
+       int my_node = NUMA_NO_NODE;
 
        /* Allocate memory for this devices private data. */
-       dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+       my_node = pcibus_to_node(pdev->bus);
+       if (my_node != NUMA_NO_NODE) {
+               if (!node_online(my_node))
+                       my_node = mtip_get_next_rr_node();
+       } else {
+               dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
+               my_node = mtip_get_next_rr_node();
+       }
+       dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
+               my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
+               cpu_to_node(smp_processor_id()), smp_processor_id());
+
+       dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
        if (dd == NULL) {
                dev_err(&pdev->dev,
                        "Unable to allocate memory for driver data\n");
@@ -4016,19 +4212,82 @@ static int mtip_pci_probe(struct pci_dev *pdev,
                }
        }
 
-       pci_set_master(pdev);
+       /* Copy the info we may need later into the private data structure. */
+       dd->major       = mtip_major;
+       dd->instance    = instance;
+       dd->pdev        = pdev;
+       dd->numa_node   = my_node;
 
+       memset(dd->workq_name, 0, 32);
+       snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
+
+       dd->isr_workq = create_workqueue(dd->workq_name);
+       if (!dd->isr_workq) {
+               dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
+               goto block_initialize_err;
+       }
+
+       memset(cpu_list, 0, sizeof(cpu_list));
+
+       node_mask = cpumask_of_node(dd->numa_node);
+       if (!cpumask_empty(node_mask)) {
+               for_each_cpu(cpu, node_mask)
+               {
+                       snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
+                       j = strlen(cpu_list);
+               }
+
+               dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
+                       dd->numa_node,
+                       topology_physical_package_id(cpumask_first(node_mask)),
+                       nr_cpus_node(dd->numa_node),
+                       cpu_list);
+       } else
+               dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
+
+       dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
+       dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
+               cpu_to_node(dd->isr_binding), dd->isr_binding);
+
+       /* first worker context always runs in ISR */
+       dd->work[0].cpu_binding = dd->isr_binding;
+       dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+       dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
+       dd->work[3].cpu_binding = dd->work[0].cpu_binding;
+       dd->work[4].cpu_binding = dd->work[1].cpu_binding;
+       dd->work[5].cpu_binding = dd->work[2].cpu_binding;
+       dd->work[6].cpu_binding = dd->work[2].cpu_binding;
+       dd->work[7].cpu_binding = dd->work[1].cpu_binding;
+
+       /* Log the bindings */
+       for_each_present_cpu(cpu) {
+               memset(cpu_list, 0, sizeof(cpu_list));
+               for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
+                       if (dd->work[i].cpu_binding == cpu) {
+                               snprintf(&cpu_list[j], 256 - j, "%d ", i);
+                               j = strlen(cpu_list);
+                       }
+               }
+               if (j)
+                       dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
+       }
+
+       INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
+       INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
+       INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
+       INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
+       INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
+       INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
+       INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
+       INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
+
+       pci_set_master(pdev);
        if (pci_enable_msi(pdev)) {
                dev_warn(&pdev->dev,
                        "Unable to enable MSI interrupt.\n");
                goto block_initialize_err;
        }
 
-       /* Copy the info we may need later into the private data structure. */
-       dd->major       = mtip_major;
-       dd->instance    = instance;
-       dd->pdev        = pdev;
-
        /* Initialize the block layer. */
        rv = mtip_block_initialize(dd);
        if (rv < 0) {
@@ -4048,7 +4307,13 @@ static int mtip_pci_probe(struct pci_dev *pdev,
 
 block_initialize_err:
        pci_disable_msi(pdev);
-
+       if (dd->isr_workq) {
+               flush_workqueue(dd->isr_workq);
+               destroy_workqueue(dd->isr_workq);
+               drop_cpu(dd->work[0].cpu_binding);
+               drop_cpu(dd->work[1].cpu_binding);
+               drop_cpu(dd->work[2].cpu_binding);
+       }
 setmask_err:
        pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
 
@@ -4089,6 +4354,14 @@ static void mtip_pci_remove(struct pci_dev *pdev)
        /* Clean up the block layer. */
        mtip_block_remove(dd);
 
+       if (dd->isr_workq) {
+               flush_workqueue(dd->isr_workq);
+               destroy_workqueue(dd->isr_workq);
+               drop_cpu(dd->work[0].cpu_binding);
+               drop_cpu(dd->work[1].cpu_binding);
+               drop_cpu(dd->work[2].cpu_binding);
+       }
+
        pci_disable_msi(pdev);
 
        kfree(dd);
index b174264..3bffff5 100644 (file)
@@ -164,6 +164,35 @@ struct smart_attr {
        u8 res[3];
 } __packed;
 
+struct mtip_work {
+       struct work_struct work;
+       void *port;
+       int cpu_binding;
+       u32 completed;
+} ____cacheline_aligned_in_smp;
+
+#define DEFINE_HANDLER(group)                                  \
+       void mtip_workq_sdbf##group(struct work_struct *work)       \
+       {                                                      \
+               struct mtip_work *w = (struct mtip_work *) work;         \
+               mtip_workq_sdbfx(w->port, group, w->completed);     \
+       }
+
+#define MTIP_TRIM_TIMEOUT_MS           240000
+#define MTIP_MAX_TRIM_ENTRIES          8
+#define MTIP_MAX_TRIM_ENTRY_LEN        0xfff8
+
+struct mtip_trim_entry {
+       u32 lba;   /* starting lba of region */
+       u16 rsvd;  /* unused */
+       u16 range; /* # of 512b blocks to trim */
+} __packed;
+
+struct mtip_trim {
+       /* Array of regions to trim */
+       struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES];
+} __packed;
+
 /* Register Frame Information Structure (FIS), host to device. */
 struct host_to_dev_fis {
        /*
@@ -424,7 +453,7 @@ struct mtip_port {
         */
        struct semaphore cmd_slot;
        /* Spinlock for working around command-issue bug. */
-       spinlock_t cmd_issue_lock;
+       spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
 };
 
 /*
@@ -447,9 +476,6 @@ struct driver_data {
 
        struct mtip_port *port; /* Pointer to the port data structure. */
 
-       /* Tasklet used to process the bottom half of the ISR. */
-       struct tasklet_struct tasklet;
-
        unsigned product_type; /* magic value declaring the product type */
 
        unsigned slot_groups; /* number of slot groups the product supports */
@@ -461,6 +487,20 @@ struct driver_data {
        struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
 
        struct dentry *dfs_node;
+
+       bool trim_supp; /* flag indicating trim support */
+
+       int numa_node; /* NUMA support */
+
+       char workq_name[32];
+
+       struct workqueue_struct *isr_workq;
+
+       struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
+
+       atomic_t irq_workers_active;
+
+       int isr_binding;
 };
 
 #endif
index 043ddcc..7fecc78 100644 (file)
@@ -98,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd)
        case  NBD_CMD_READ: return "read";
        case NBD_CMD_WRITE: return "write";
        case  NBD_CMD_DISC: return "disconnect";
+       case NBD_CMD_FLUSH: return "flush";
        case  NBD_CMD_TRIM: return "trim/discard";
        }
        return "invalid";
@@ -244,8 +245,15 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
 
        request.magic = htonl(NBD_REQUEST_MAGIC);
        request.type = htonl(nbd_cmd(req));
-       request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
-       request.len = htonl(size);
+
+       if (nbd_cmd(req) == NBD_CMD_FLUSH) {
+               /* Other values are reserved for FLUSH requests.  */
+               request.from = 0;
+               request.len = 0;
+       } else {
+               request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
+               request.len = htonl(size);
+       }
        memcpy(request.handle, &req, sizeof(req));
 
        dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
@@ -482,6 +490,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
                }
        }
 
+       if (req->cmd_flags & REQ_FLUSH) {
+               BUG_ON(unlikely(blk_rq_sectors(req)));
+               nbd_cmd(req) = NBD_CMD_FLUSH;
+       }
+
        req->errors = 0;
 
        mutex_lock(&nbd->tx_lock);
@@ -551,6 +564,7 @@ static int nbd_thread(void *data)
  */
 
 static void do_nbd_request(struct request_queue *q)
+               __releases(q->queue_lock) __acquires(q->queue_lock)
 {
        struct request *req;
        
@@ -595,12 +609,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                struct request sreq;
 
                dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
+               if (!nbd->sock)
+                       return -EINVAL;
 
+               mutex_unlock(&nbd->tx_lock);
+               fsync_bdev(bdev);
+               mutex_lock(&nbd->tx_lock);
                blk_rq_init(NULL, &sreq);
                sreq.cmd_type = REQ_TYPE_SPECIAL;
                nbd_cmd(&sreq) = NBD_CMD_DISC;
+
+               /* Check again after getting mutex back.  */
                if (!nbd->sock)
                        return -EINVAL;
+
                nbd_send_req(nbd, &sreq);
                 return 0;
        }
@@ -614,6 +636,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                nbd_clear_que(nbd);
                BUG_ON(!list_empty(&nbd->queue_head));
                BUG_ON(!list_empty(&nbd->waiting_queue));
+               kill_bdev(bdev);
                if (file)
                        fput(file);
                return 0;
@@ -625,7 +648,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                        return -EBUSY;
                file = fget(arg);
                if (file) {
-                       struct inode *inode = file->f_path.dentry->d_inode;
+                       struct inode *inode = file_inode(file);
                        if (S_ISSOCK(inode->i_mode)) {
                                nbd->file = file;
                                nbd->sock = SOCKET_I(inode);
@@ -681,9 +704,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 
                mutex_unlock(&nbd->tx_lock);
 
+               if (nbd->flags & NBD_FLAG_READ_ONLY)
+                       set_device_ro(bdev, true);
                if (nbd->flags & NBD_FLAG_SEND_TRIM)
                        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
                                nbd->disk->queue);
+               if (nbd->flags & NBD_FLAG_SEND_FLUSH)
+                       blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+               else
+                       blk_queue_flush(nbd->disk->queue, 0);
 
                thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
                if (IS_ERR(thread)) {
@@ -702,9 +731,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                nbd->file = NULL;
                nbd_clear_que(nbd);
                dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
+               kill_bdev(bdev);
                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+               set_device_ro(bdev, false);
                if (file)
                        fput(file);
+               nbd->flags = 0;
                nbd->bytesize = 0;
                bdev->bd_inode->i_size = 0;
                set_capacity(nbd->disk, 0);
index 89576a0..6c81a4c 100644 (file)
 #define        SECTOR_SHIFT    9
 #define        SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
 
-/* It might be useful to have this defined elsewhere too */
+/* It might be useful to have these defined elsewhere */
 
-#define        U64_MAX ((u64) (~0ULL))
+#define        U8_MAX  ((u8)   (~0U))
+#define        U16_MAX ((u16)  (~0U))
+#define        U32_MAX ((u32)  (~0U))
+#define        U64_MAX ((u64)  (~0ULL))
 
 #define RBD_DRV_NAME "rbd"
 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
@@ -66,7 +69,6 @@
                        (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
 
 #define RBD_MAX_SNAP_COUNT     510     /* allows max snapc to fit in 4KB */
-#define RBD_MAX_OPT_LEN                1024
 
 #define RBD_SNAP_HEAD_NAME     "-"
 
@@ -93,8 +95,6 @@
 #define DEV_NAME_LEN           32
 #define MAX_INT_FORMAT_WIDTH   ((5 * sizeof (int)) / 2 + 1)
 
-#define RBD_READ_ONLY_DEFAULT          false
-
 /*
  * block device image metadata (in-memory version)
  */
@@ -119,16 +119,33 @@ struct rbd_image_header {
  * An rbd image specification.
  *
  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
- * identify an image.
+ * identify an image.  Each rbd_dev structure includes a pointer to
+ * an rbd_spec structure that encapsulates this identity.
+ *
+ * Each of the id's in an rbd_spec has an associated name.  For a
+ * user-mapped image, the names are supplied and the id's associated
+ * with them are looked up.  For a layered image, a parent image is
+ * defined by the tuple, and the names are looked up.
+ *
+ * An rbd_dev structure contains a parent_spec pointer which is
+ * non-null if the image it represents is a child in a layered
+ * image.  This pointer will refer to the rbd_spec structure used
+ * by the parent rbd_dev for its own identity (i.e., the structure
+ * is shared between the parent and child).
+ *
+ * Since these structures are populated once, during the discovery
+ * phase of image construction, they are effectively immutable so
+ * we make no effort to synchronize access to them.
+ *
+ * Note that code herein does not assume the image name is known (it
+ * could be a null pointer).
  */
 struct rbd_spec {
        u64             pool_id;
        char            *pool_name;
 
        char            *image_id;
-       size_t          image_id_len;
        char            *image_name;
-       size_t          image_name_len;
 
        u64             snap_id;
        char            *snap_name;
@@ -136,10 +153,6 @@ struct rbd_spec {
        struct kref     kref;
 };
 
-struct rbd_options {
-       bool    read_only;
-};
-
 /*
  * an instance of the client.  multiple devices may share an rbd client.
  */
@@ -149,37 +162,76 @@ struct rbd_client {
        struct list_head        node;
 };
 
-/*
- * a request completion status
- */
-struct rbd_req_status {
-       int done;
-       int rc;
-       u64 bytes;
+struct rbd_img_request;
+typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
+
+#define        BAD_WHICH       U32_MAX         /* Good which or bad which, which? */
+
+struct rbd_obj_request;
+typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
+
+enum obj_request_type {
+       OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
 };
 
-/*
- * a collection of requests
- */
-struct rbd_req_coll {
-       int                     total;
-       int                     num_done;
+struct rbd_obj_request {
+       const char              *object_name;
+       u64                     offset;         /* object start byte */
+       u64                     length;         /* bytes from offset */
+
+       struct rbd_img_request  *img_request;
+       struct list_head        links;          /* img_request->obj_requests */
+       u32                     which;          /* posn image request list */
+
+       enum obj_request_type   type;
+       union {
+               struct bio      *bio_list;
+               struct {
+                       struct page     **pages;
+                       u32             page_count;
+               };
+       };
+
+       struct ceph_osd_request *osd_req;
+
+       u64                     xferred;        /* bytes transferred */
+       u64                     version;
+       int                     result;
+       atomic_t                done;
+
+       rbd_obj_callback_t      callback;
+       struct completion       completion;
+
        struct kref             kref;
-       struct rbd_req_status   status[0];
 };
 
-/*
- * a single io request
- */
-struct rbd_request {
-       struct request          *rq;            /* blk layer request */
-       struct bio              *bio;           /* cloned bio */
-       struct page             **pages;        /* list of used pages */
-       u64                     len;
-       int                     coll_index;
-       struct rbd_req_coll     *coll;
+struct rbd_img_request {
+       struct request          *rq;
+       struct rbd_device       *rbd_dev;
+       u64                     offset; /* starting image byte offset */
+       u64                     length; /* byte count from offset */
+       bool                    write_request;  /* false for read */
+       union {
+               struct ceph_snap_context *snapc;        /* for writes */
+               u64             snap_id;                /* for reads */
+       };
+       spinlock_t              completion_lock;/* protects next_completion */
+       u32                     next_completion;
+       rbd_img_callback_t      callback;
+
+       u32                     obj_request_count;
+       struct list_head        obj_requests;   /* rbd_obj_request structs */
+
+       struct kref             kref;
 };
 
+#define for_each_obj_request(ireq, oreq) \
+       list_for_each_entry(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_from(ireq, oreq) \
+       list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
+#define for_each_obj_request_safe(ireq, oreq, n) \
+       list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
+
 struct rbd_snap {
        struct  device          dev;
        const char              *name;
@@ -209,16 +261,18 @@ struct rbd_device {
 
        char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
 
-       spinlock_t              lock;           /* queue lock */
+       spinlock_t              lock;           /* queue, flags, open_count */
 
        struct rbd_image_header header;
-       bool                    exists;
+       unsigned long           flags;          /* possibly lock protected */
        struct rbd_spec         *spec;
 
        char                    *header_name;
 
+       struct ceph_file_layout layout;
+
        struct ceph_osd_event   *watch_event;
-       struct ceph_osd_request *watch_request;
+       struct rbd_obj_request  *watch_request;
 
        struct rbd_spec         *parent_spec;
        u64                     parent_overlap;
@@ -235,7 +289,19 @@ struct rbd_device {
 
        /* sysfs related */
        struct device           dev;
-       unsigned long           open_count;
+       unsigned long           open_count;     /* protected by lock */
+};
+
+/*
+ * Flag bits for rbd_dev->flags.  If atomicity is required,
+ * rbd_dev->lock is used to protect access.
+ *
+ * Currently, only the "removing" flag (which is coupled with the
+ * "open_count" field) requires atomic access.
+ */
+enum rbd_dev_flags {
+       RBD_DEV_FLAG_EXISTS,    /* mapped snapshot has not been deleted */
+       RBD_DEV_FLAG_REMOVING,  /* this mapping is being removed */
 };
 
 static DEFINE_MUTEX(ctl_mutex);          /* Serialize open/close/setup/teardown */
@@ -277,6 +343,33 @@ static struct device rbd_root_dev = {
        .release =      rbd_root_dev_release,
 };
 
+static __printf(2, 3)
+void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       if (!rbd_dev)
+               printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
+       else if (rbd_dev->disk)
+               printk(KERN_WARNING "%s: %s: %pV\n",
+                       RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
+       else if (rbd_dev->spec && rbd_dev->spec->image_name)
+               printk(KERN_WARNING "%s: image %s: %pV\n",
+                       RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
+       else if (rbd_dev->spec && rbd_dev->spec->image_id)
+               printk(KERN_WARNING "%s: id %s: %pV\n",
+                       RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
+       else    /* punt */
+               printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
+                       RBD_DRV_NAME, rbd_dev, &vaf);
+       va_end(args);
+}
+
 #ifdef RBD_DEBUG
 #define rbd_assert(expr)                                               \
                if (unlikely(!(expr))) {                                \
@@ -296,14 +389,23 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
        struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
+       bool removing = false;
 
        if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
                return -EROFS;
 
+       spin_lock_irq(&rbd_dev->lock);
+       if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
+               removing = true;
+       else
+               rbd_dev->open_count++;
+       spin_unlock_irq(&rbd_dev->lock);
+       if (removing)
+               return -ENOENT;
+
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
        (void) get_device(&rbd_dev->dev);
        set_device_ro(bdev, rbd_dev->mapping.read_only);
-       rbd_dev->open_count++;
        mutex_unlock(&ctl_mutex);
 
        return 0;
@@ -312,10 +414,14 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
 static int rbd_release(struct gendisk *disk, fmode_t mode)
 {
        struct rbd_device *rbd_dev = disk->private_data;
+       unsigned long open_count_before;
+
+       spin_lock_irq(&rbd_dev->lock);
+       open_count_before = rbd_dev->open_count--;
+       spin_unlock_irq(&rbd_dev->lock);
+       rbd_assert(open_count_before > 0);
 
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-       rbd_assert(rbd_dev->open_count > 0);
-       rbd_dev->open_count--;
        put_device(&rbd_dev->dev);
        mutex_unlock(&ctl_mutex);
 
@@ -337,7 +443,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
        struct rbd_client *rbdc;
        int ret = -ENOMEM;
 
-       dout("rbd_client_create\n");
+       dout("%s:\n", __func__);
        rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
        if (!rbdc)
                goto out_opt;
@@ -361,8 +467,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
        spin_unlock(&rbd_client_list_lock);
 
        mutex_unlock(&ctl_mutex);
+       dout("%s: rbdc %p\n", __func__, rbdc);
 
-       dout("rbd_client_create created %p\n", rbdc);
        return rbdc;
 
 out_err:
@@ -373,6 +479,8 @@ out_mutex:
 out_opt:
        if (ceph_opts)
                ceph_destroy_options(ceph_opts);
+       dout("%s: error %d\n", __func__, ret);
+
        return ERR_PTR(ret);
 }
 
@@ -426,6 +534,12 @@ static match_table_t rbd_opts_tokens = {
        {-1, NULL}
 };
 
+struct rbd_options {
+       bool    read_only;
+};
+
+#define RBD_READ_ONLY_DEFAULT  false
+
 static int parse_rbd_opts_token(char *c, void *private)
 {
        struct rbd_options *rbd_opts = private;
@@ -493,7 +607,7 @@ static void rbd_client_release(struct kref *kref)
 {
        struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 
-       dout("rbd_release_client %p\n", rbdc);
+       dout("%s: rbdc %p\n", __func__, rbdc);
        spin_lock(&rbd_client_list_lock);
        list_del(&rbdc->node);
        spin_unlock(&rbd_client_list_lock);
@@ -512,18 +626,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
                kref_put(&rbdc->kref, rbd_client_release);
 }
 
-/*
- * Destroy requests collection
- */
-static void rbd_coll_release(struct kref *kref)
-{
-       struct rbd_req_coll *coll =
-               container_of(kref, struct rbd_req_coll, kref);
-
-       dout("rbd_coll_release %p\n", coll);
-       kfree(coll);
-}
-
 static bool rbd_image_format_valid(u32 image_format)
 {
        return image_format == 1 || image_format == 2;
@@ -707,7 +809,8 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
                        goto done;
                rbd_dev->mapping.read_only = true;
        }
-       rbd_dev->exists = true;
+       set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+
 done:
        return ret;
 }
@@ -724,7 +827,7 @@ static void rbd_header_free(struct rbd_image_header *header)
        header->snapc = NULL;
 }
 
-static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
+static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
 {
        char *name;
        u64 segment;
@@ -767,23 +870,6 @@ static u64 rbd_segment_length(struct rbd_device *rbd_dev,
        return length;
 }
 
-static int rbd_get_num_segments(struct rbd_image_header *header,
-                               u64 ofs, u64 len)
-{
-       u64 start_seg;
-       u64 end_seg;
-
-       if (!len)
-               return 0;
-       if (len - 1 > U64_MAX - ofs)
-               return -ERANGE;
-
-       start_seg = ofs >> header->obj_order;
-       end_seg = (ofs + len - 1) >> header->obj_order;
-
-       return end_seg - start_seg + 1;
-}
-
 /*
  * returns the size of an object in the image
  */
@@ -949,8 +1035,10 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
                unsigned int bi_size;
                struct bio *bio;
 
-               if (!bi)
+               if (!bi) {
+                       rbd_warn(NULL, "bio_chain exhausted with %u left", len);
                        goto out_err;   /* EINVAL; ran out of bio's */
+               }
                bi_size = min_t(unsigned int, bi->bi_size - off, len);
                bio = bio_clone_range(bi, off, bi_size, gfpmask);
                if (!bio)
@@ -976,399 +1064,721 @@ out_err:
        return NULL;
 }
 
-/*
- * helpers for osd request op vectors.
- */
-static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
-                                       int opcode, u32 payload_len)
+static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
 {
-       struct ceph_osd_req_op *ops;
+       dout("%s: obj %p (was %d)\n", __func__, obj_request,
+               atomic_read(&obj_request->kref.refcount));
+       kref_get(&obj_request->kref);
+}
+
+static void rbd_obj_request_destroy(struct kref *kref);
+static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+{
+       rbd_assert(obj_request != NULL);
+       dout("%s: obj %p (was %d)\n", __func__, obj_request,
+               atomic_read(&obj_request->kref.refcount));
+       kref_put(&obj_request->kref, rbd_obj_request_destroy);
+}
+
+static void rbd_img_request_get(struct rbd_img_request *img_request)
+{
+       dout("%s: img %p (was %d)\n", __func__, img_request,
+               atomic_read(&img_request->kref.refcount));
+       kref_get(&img_request->kref);
+}
+
+static void rbd_img_request_destroy(struct kref *kref);
+static void rbd_img_request_put(struct rbd_img_request *img_request)
+{
+       rbd_assert(img_request != NULL);
+       dout("%s: img %p (was %d)\n", __func__, img_request,
+               atomic_read(&img_request->kref.refcount));
+       kref_put(&img_request->kref, rbd_img_request_destroy);
+}
+
+static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
+                                       struct rbd_obj_request *obj_request)
+{
+       rbd_assert(obj_request->img_request == NULL);
+
+       rbd_obj_request_get(obj_request);
+       obj_request->img_request = img_request;
+       obj_request->which = img_request->obj_request_count;
+       rbd_assert(obj_request->which != BAD_WHICH);
+       img_request->obj_request_count++;
+       list_add_tail(&obj_request->links, &img_request->obj_requests);
+       dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+               obj_request->which);
+}
 
-       ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
-       if (!ops)
+static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
+                                       struct rbd_obj_request *obj_request)
+{
+       rbd_assert(obj_request->which != BAD_WHICH);
+
+       dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
+               obj_request->which);
+       list_del(&obj_request->links);
+       rbd_assert(img_request->obj_request_count > 0);
+       img_request->obj_request_count--;
+       rbd_assert(obj_request->which == img_request->obj_request_count);
+       obj_request->which = BAD_WHICH;
+       rbd_assert(obj_request->img_request == img_request);
+       obj_request->img_request = NULL;
+       obj_request->callback = NULL;
+       rbd_obj_request_put(obj_request);
+}
+
+static bool obj_request_type_valid(enum obj_request_type type)
+{
+       switch (type) {
+       case OBJ_REQUEST_NODATA:
+       case OBJ_REQUEST_BIO:
+       case OBJ_REQUEST_PAGES:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
+{
+       struct ceph_osd_req_op *op;
+       va_list args;
+       size_t size;
+
+       op = kzalloc(sizeof (*op), GFP_NOIO);
+       if (!op)
                return NULL;
+       op->op = opcode;
+       va_start(args, opcode);
+       switch (opcode) {
+       case CEPH_OSD_OP_READ:
+       case CEPH_OSD_OP_WRITE:
+               /* rbd_osd_req_op_create(READ, offset, length) */
+               /* rbd_osd_req_op_create(WRITE, offset, length) */
+               op->extent.offset = va_arg(args, u64);
+               op->extent.length = va_arg(args, u64);
+               if (opcode == CEPH_OSD_OP_WRITE)
+                       op->payload_len = op->extent.length;
+               break;
+       case CEPH_OSD_OP_STAT:
+               break;
+       case CEPH_OSD_OP_CALL:
+               /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
+               op->cls.class_name = va_arg(args, char *);
+               size = strlen(op->cls.class_name);
+               rbd_assert(size <= (size_t) U8_MAX);
+               op->cls.class_len = size;
+               op->payload_len = size;
+
+               op->cls.method_name = va_arg(args, char *);
+               size = strlen(op->cls.method_name);
+               rbd_assert(size <= (size_t) U8_MAX);
+               op->cls.method_len = size;
+               op->payload_len += size;
+
+               op->cls.argc = 0;
+               op->cls.indata = va_arg(args, void *);
+               size = va_arg(args, size_t);
+               rbd_assert(size <= (size_t) U32_MAX);
+               op->cls.indata_len = (u32) size;
+               op->payload_len += size;
+               break;
+       case CEPH_OSD_OP_NOTIFY_ACK:
+       case CEPH_OSD_OP_WATCH:
+               /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
+               /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
+               op->watch.cookie = va_arg(args, u64);
+               op->watch.ver = va_arg(args, u64);
+               op->watch.ver = cpu_to_le64(op->watch.ver);
+               if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
+                       op->watch.flag = (u8) 1;
+               break;
+       default:
+               rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
+               kfree(op);
+               op = NULL;
+               break;
+       }
+       va_end(args);
 
-       ops[0].op = opcode;
+       return op;
+}
 
-       /*
-        * op extent offset and length will be set later on
-        * in calc_raw_layout()
-        */
-       ops[0].payload_len = payload_len;
+static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
+{
+       kfree(op);
+}
+
+static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
+                               struct rbd_obj_request *obj_request)
+{
+       dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
 
-       return ops;
+       return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
 }
 
-static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
+static void rbd_img_request_complete(struct rbd_img_request *img_request)
 {
-       kfree(ops);
+       dout("%s: img %p\n", __func__, img_request);
+       if (img_request->callback)
+               img_request->callback(img_request);
+       else
+               rbd_img_request_put(img_request);
 }
 
-static void rbd_coll_end_req_index(struct request *rq,
-                                  struct rbd_req_coll *coll,
-                                  int index,
-                                  int ret, u64 len)
+/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
+
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
 {
-       struct request_queue *q;
-       int min, max, i;
+       dout("%s: obj %p\n", __func__, obj_request);
 
-       dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
-            coll, index, ret, (unsigned long long) len);
+       return wait_for_completion_interruptible(&obj_request->completion);
+}
 
-       if (!rq)
-               return;
+static void obj_request_done_init(struct rbd_obj_request *obj_request)
+{
+       atomic_set(&obj_request->done, 0);
+       smp_wmb();
+}
 
-       if (!coll) {
-               blk_end_request(rq, ret, len);
-               return;
+static void obj_request_done_set(struct rbd_obj_request *obj_request)
+{
+       int done;
+
+       done = atomic_inc_return(&obj_request->done);
+       if (done > 1) {
+               struct rbd_img_request *img_request = obj_request->img_request;
+               struct rbd_device *rbd_dev;
+
+               rbd_dev = img_request ? img_request->rbd_dev : NULL;
+               rbd_warn(rbd_dev, "obj_request %p was already done\n",
+                       obj_request);
        }
+}
 
-       q = rq->q;
-
-       spin_lock_irq(q->queue_lock);
-       coll->status[index].done = 1;
-       coll->status[index].rc = ret;
-       coll->status[index].bytes = len;
-       max = min = coll->num_done;
-       while (max < coll->total && coll->status[max].done)
-               max++;
-
-       for (i = min; i<max; i++) {
-               __blk_end_request(rq, coll->status[i].rc,
-                                 coll->status[i].bytes);
-               coll->num_done++;
-               kref_put(&coll->kref, rbd_coll_release);
+static bool obj_request_done_test(struct rbd_obj_request *obj_request)
+{
+       smp_mb();
+       return atomic_read(&obj_request->done) != 0;
+}
+
+static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p cb %p\n", __func__, obj_request,
+               obj_request->callback);
+       if (obj_request->callback)
+               obj_request->callback(obj_request);
+       else
+               complete_all(&obj_request->completion);
+}
+
+static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+       obj_request_done_set(obj_request);
+}
+
+static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
+               obj_request->result, obj_request->xferred, obj_request->length);
+       /*
+        * ENOENT means a hole in the object.  We zero-fill the
+        * entire length of the request.  A short read also implies
+        * zero-fill to the end of the request.  Either way we
+        * update the xferred count to indicate the whole request
+        * was satisfied.
+        */
+       if (obj_request->result == -ENOENT) {
+               zero_bio_chain(obj_request->bio_list, 0);
+               obj_request->result = 0;
+               obj_request->xferred = obj_request->length;
+       } else if (obj_request->xferred < obj_request->length &&
+                       !obj_request->result) {
+               zero_bio_chain(obj_request->bio_list, obj_request->xferred);
+               obj_request->xferred = obj_request->length;
        }
-       spin_unlock_irq(q->queue_lock);
+       obj_request_done_set(obj_request);
 }
 
-static void rbd_coll_end_req(struct rbd_request *req,
-                            int ret, u64 len)
+static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
 {
-       rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
+       dout("%s: obj %p result %d %llu\n", __func__, obj_request,
+               obj_request->result, obj_request->length);
+       /*
+        * There is no such thing as a successful short write.
+        * Our xferred value is the number of bytes transferred
+        * back.  Set it to our originally-requested length.
+        */
+       obj_request->xferred = obj_request->length;
+       obj_request_done_set(obj_request);
 }
 
 /*
- * Send ceph osd request
+ * For a simple stat call there's nothing to do.  We'll do more if
+ * this is part of a write sequence for a layered image.
  */
-static int rbd_do_request(struct request *rq,
-                         struct rbd_device *rbd_dev,
-                         struct ceph_snap_context *snapc,
-                         u64 snapid,
-                         const char *object_name, u64 ofs, u64 len,
-                         struct bio *bio,
-                         struct page **pages,
-                         int num_pages,
-                         int flags,
-                         struct ceph_osd_req_op *ops,
-                         struct rbd_req_coll *coll,
-                         int coll_index,
-                         void (*rbd_cb)(struct ceph_osd_request *req,
-                                        struct ceph_msg *msg),
-                         struct ceph_osd_request **linger_req,
-                         u64 *ver)
-{
-       struct ceph_osd_request *req;
-       struct ceph_file_layout *layout;
-       int ret;
-       u64 bno;
-       struct timespec mtime = CURRENT_TIME;
-       struct rbd_request *req_data;
-       struct ceph_osd_request_head *reqhead;
-       struct ceph_osd_client *osdc;
+static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+       obj_request_done_set(obj_request);
+}
 
-       req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
-       if (!req_data) {
-               if (coll)
-                       rbd_coll_end_req_index(rq, coll, coll_index,
-                                              -ENOMEM, len);
-               return -ENOMEM;
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+                               struct ceph_msg *msg)
+{
+       struct rbd_obj_request *obj_request = osd_req->r_priv;
+       u16 opcode;
+
+       dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+       rbd_assert(osd_req == obj_request->osd_req);
+       rbd_assert(!!obj_request->img_request ^
+                               (obj_request->which == BAD_WHICH));
+
+       if (osd_req->r_result < 0)
+               obj_request->result = osd_req->r_result;
+       obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
+
+       WARN_ON(osd_req->r_num_ops != 1);       /* For now */
+
+       /*
+        * We support a 64-bit length, but ultimately it has to be
+        * passed to blk_end_request(), which takes an unsigned int.
+        */
+       obj_request->xferred = osd_req->r_reply_op_len[0];
+       rbd_assert(obj_request->xferred < (u64) UINT_MAX);
+       opcode = osd_req->r_request_ops[0].op;
+       switch (opcode) {
+       case CEPH_OSD_OP_READ:
+               rbd_osd_read_callback(obj_request);
+               break;
+       case CEPH_OSD_OP_WRITE:
+               rbd_osd_write_callback(obj_request);
+               break;
+       case CEPH_OSD_OP_STAT:
+               rbd_osd_stat_callback(obj_request);
+               break;
+       case CEPH_OSD_OP_CALL:
+       case CEPH_OSD_OP_NOTIFY_ACK:
+       case CEPH_OSD_OP_WATCH:
+               rbd_osd_trivial_callback(obj_request);
+               break;
+       default:
+               rbd_warn(NULL, "%s: unsupported op %hu\n",
+                       obj_request->object_name, (unsigned short) opcode);
+               break;
        }
 
-       if (coll) {
-               req_data->coll = coll;
-               req_data->coll_index = coll_index;
+       if (obj_request_done_test(obj_request))
+               rbd_obj_request_complete(obj_request);
+}
+
+static struct ceph_osd_request *rbd_osd_req_create(
+                                       struct rbd_device *rbd_dev,
+                                       bool write_request,
+                                       struct rbd_obj_request *obj_request,
+                                       struct ceph_osd_req_op *op)
+{
+       struct rbd_img_request *img_request = obj_request->img_request;
+       struct ceph_snap_context *snapc = NULL;
+       struct ceph_osd_client *osdc;
+       struct ceph_osd_request *osd_req;
+       struct timespec now;
+       struct timespec *mtime;
+       u64 snap_id = CEPH_NOSNAP;
+       u64 offset = obj_request->offset;
+       u64 length = obj_request->length;
+
+       if (img_request) {
+               rbd_assert(img_request->write_request == write_request);
+               if (img_request->write_request)
+                       snapc = img_request->snapc;
+               else
+                       snap_id = img_request->snap_id;
        }
 
-       dout("rbd_do_request object_name=%s ofs=%llu len=%llu coll=%p[%d]\n",
-               object_name, (unsigned long long) ofs,
-               (unsigned long long) len, coll, coll_index);
+       /* Allocate and initialize the request, for the single op */
 
        osdc = &rbd_dev->rbd_client->client->osdc;
-       req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
-                                       false, GFP_NOIO, pages, bio);
-       if (!req) {
-               ret = -ENOMEM;
-               goto done_pages;
+       osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
+       if (!osd_req)
+               return NULL;    /* ENOMEM */
+
+       rbd_assert(obj_request_type_valid(obj_request->type));
+       switch (obj_request->type) {
+       case OBJ_REQUEST_NODATA:
+               break;          /* Nothing to do */
+       case OBJ_REQUEST_BIO:
+               rbd_assert(obj_request->bio_list != NULL);
+               osd_req->r_bio = obj_request->bio_list;
+               break;
+       case OBJ_REQUEST_PAGES:
+               osd_req->r_pages = obj_request->pages;
+               osd_req->r_num_pages = obj_request->page_count;
+               osd_req->r_page_alignment = offset & ~PAGE_MASK;
+               break;
        }
 
-       req->r_callback = rbd_cb;
+       if (write_request) {
+               osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+               now = CURRENT_TIME;
+               mtime = &now;
+       } else {
+               osd_req->r_flags = CEPH_OSD_FLAG_READ;
+               mtime = NULL;   /* not needed for reads */
+               offset = 0;     /* These are not used... */
+               length = 0;     /* ...for osd read requests */
+       }
 
-       req_data->rq = rq;
-       req_data->bio = bio;
-       req_data->pages = pages;
-       req_data->len = len;
+       osd_req->r_callback = rbd_osd_req_callback;
+       osd_req->r_priv = obj_request;
 
-       req->r_priv = req_data;
+       osd_req->r_oid_len = strlen(obj_request->object_name);
+       rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
+       memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
 
-       reqhead = req->r_request->front.iov_base;
-       reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
+       osd_req->r_file_layout = rbd_dev->layout;       /* struct */
 
-       strncpy(req->r_oid, object_name, sizeof(req->r_oid));
-       req->r_oid_len = strlen(req->r_oid);
+       /* osd_req will get its own reference to snapc (if non-null) */
 
-       layout = &req->r_file_layout;
-       memset(layout, 0, sizeof(*layout));
-       layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
-       layout->fl_stripe_count = cpu_to_le32(1);
-       layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
-       layout->fl_pg_pool = cpu_to_le32((int) rbd_dev->spec->pool_id);
-       ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
-                                  req, ops);
-       rbd_assert(ret == 0);
+       ceph_osdc_build_request(osd_req, offset, length, 1, op,
+                               snapc, snap_id, mtime);
 
-       ceph_osdc_build_request(req, ofs, &len,
-                               ops,
-                               snapc,
-                               &mtime,
-                               req->r_oid, req->r_oid_len);
+       return osd_req;
+}
 
-       if (linger_req) {
-               ceph_osdc_set_request_linger(osdc, req);
-               *linger_req = req;
-       }
+static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
+{
+       ceph_osdc_put_request(osd_req);
+}
 
-       ret = ceph_osdc_start_request(osdc, req, false);
-       if (ret < 0)
-               goto done_err;
-
-       if (!rbd_cb) {
-               ret = ceph_osdc_wait_request(osdc, req);
-               if (ver)
-                       *ver = le64_to_cpu(req->r_reassert_version.version);
-               dout("reassert_ver=%llu\n",
-                       (unsigned long long)
-                               le64_to_cpu(req->r_reassert_version.version));
-               ceph_osdc_put_request(req);
+/* object_name is assumed to be a non-null pointer and NUL-terminated */
+
+static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
+                                               u64 offset, u64 length,
+                                               enum obj_request_type type)
+{
+       struct rbd_obj_request *obj_request;
+       size_t size;
+       char *name;
+
+       rbd_assert(obj_request_type_valid(type));
+
+       size = strlen(object_name) + 1;
+       obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
+       if (!obj_request)
+               return NULL;
+
+       name = (char *)(obj_request + 1);
+       obj_request->object_name = memcpy(name, object_name, size);
+       obj_request->offset = offset;
+       obj_request->length = length;
+       obj_request->which = BAD_WHICH;
+       obj_request->type = type;
+       INIT_LIST_HEAD(&obj_request->links);
+       obj_request_done_init(obj_request);
+       init_completion(&obj_request->completion);
+       kref_init(&obj_request->kref);
+
+       dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
+               offset, length, (int)type, obj_request);
+
+       return obj_request;
+}
+
+static void rbd_obj_request_destroy(struct kref *kref)
+{
+       struct rbd_obj_request *obj_request;
+
+       obj_request = container_of(kref, struct rbd_obj_request, kref);
+
+       dout("%s: obj %p\n", __func__, obj_request);
+
+       rbd_assert(obj_request->img_request == NULL);
+       rbd_assert(obj_request->which == BAD_WHICH);
+
+       if (obj_request->osd_req)
+               rbd_osd_req_destroy(obj_request->osd_req);
+
+       rbd_assert(obj_request_type_valid(obj_request->type));
+       switch (obj_request->type) {
+       case OBJ_REQUEST_NODATA:
+               break;          /* Nothing to do */
+       case OBJ_REQUEST_BIO:
+               if (obj_request->bio_list)
+                       bio_chain_put(obj_request->bio_list);
+               break;
+       case OBJ_REQUEST_PAGES:
+               if (obj_request->pages)
+                       ceph_release_page_vector(obj_request->pages,
+                                               obj_request->page_count);
+               break;
        }
-       return ret;
 
-done_err:
-       bio_chain_put(req_data->bio);
-       ceph_osdc_put_request(req);
-done_pages:
-       rbd_coll_end_req(req_data, ret, len);
-       kfree(req_data);
-       return ret;
+       kfree(obj_request);
 }
 
 /*
- * Ceph osd op callback
+ * Caller is responsible for filling in the list of object requests
+ * that comprises the image request, and the Linux request pointer
+ * (if there is one).
  */
-static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
-{
-       struct rbd_request *req_data = req->r_priv;
-       struct ceph_osd_reply_head *replyhead;
-       struct ceph_osd_op *op;
-       __s32 rc;
-       u64 bytes;
-       int read_op;
-
-       /* parse reply */
-       replyhead = msg->front.iov_base;
-       WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
-       op = (void *)(replyhead + 1);
-       rc = le32_to_cpu(replyhead->result);
-       bytes = le64_to_cpu(op->extent.length);
-       read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
-
-       dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
-               (unsigned long long) bytes, read_op, (int) rc);
-
-       if (rc == -ENOENT && read_op) {
-               zero_bio_chain(req_data->bio, 0);
-               rc = 0;
-       } else if (rc == 0 && read_op && bytes < req_data->len) {
-               zero_bio_chain(req_data->bio, bytes);
-               bytes = req_data->len;
-       }
+static struct rbd_img_request *rbd_img_request_create(
+                                       struct rbd_device *rbd_dev,
+                                       u64 offset, u64 length,
+                                       bool write_request)
+{
+       struct rbd_img_request *img_request;
+       struct ceph_snap_context *snapc = NULL;
 
-       rbd_coll_end_req(req_data, rc, bytes);
+       img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
+       if (!img_request)
+               return NULL;
 
-       if (req_data->bio)
-               bio_chain_put(req_data->bio);
+       if (write_request) {
+               down_read(&rbd_dev->header_rwsem);
+               snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+               up_read(&rbd_dev->header_rwsem);
+               if (WARN_ON(!snapc)) {
+                       kfree(img_request);
+                       return NULL;    /* Shouldn't happen */
+               }
+       }
 
-       ceph_osdc_put_request(req);
-       kfree(req_data);
+       img_request->rq = NULL;
+       img_request->rbd_dev = rbd_dev;
+       img_request->offset = offset;
+       img_request->length = length;
+       img_request->write_request = write_request;
+       if (write_request)
+               img_request->snapc = snapc;
+       else
+               img_request->snap_id = rbd_dev->spec->snap_id;
+       spin_lock_init(&img_request->completion_lock);
+       img_request->next_completion = 0;
+       img_request->callback = NULL;
+       img_request->obj_request_count = 0;
+       INIT_LIST_HEAD(&img_request->obj_requests);
+       kref_init(&img_request->kref);
+
+       rbd_img_request_get(img_request);       /* Avoid a warning */
+       rbd_img_request_put(img_request);       /* TEMPORARY */
+
+       dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
+               write_request ? "write" : "read", offset, length,
+               img_request);
+
+       return img_request;
 }
 
-static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void rbd_img_request_destroy(struct kref *kref)
 {
-       ceph_osdc_put_request(req);
+       struct rbd_img_request *img_request;
+       struct rbd_obj_request *obj_request;
+       struct rbd_obj_request *next_obj_request;
+
+       img_request = container_of(kref, struct rbd_img_request, kref);
+
+       dout("%s: img %p\n", __func__, img_request);
+
+       for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+               rbd_img_obj_request_del(img_request, obj_request);
+       rbd_assert(img_request->obj_request_count == 0);
+
+       if (img_request->write_request)
+               ceph_put_snap_context(img_request->snapc);
+
+       kfree(img_request);
 }
 
-/*
- * Do a synchronous ceph osd operation
- */
-static int rbd_req_sync_op(struct rbd_device *rbd_dev,
-                          struct ceph_snap_context *snapc,
-                          u64 snapid,
-                          int flags,
-                          struct ceph_osd_req_op *ops,
-                          const char *object_name,
-                          u64 ofs, u64 inbound_size,
-                          char *inbound,
-                          struct ceph_osd_request **linger_req,
-                          u64 *ver)
+static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
+                                       struct bio *bio_list)
 {
-       int ret;
-       struct page **pages;
-       int num_pages;
-
-       rbd_assert(ops != NULL);
+       struct rbd_device *rbd_dev = img_request->rbd_dev;
+       struct rbd_obj_request *obj_request = NULL;
+       struct rbd_obj_request *next_obj_request;
+       unsigned int bio_offset;
+       u64 image_offset;
+       u64 resid;
+       u16 opcode;
+
+       dout("%s: img %p bio %p\n", __func__, img_request, bio_list);
+
+       opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
+                                             : CEPH_OSD_OP_READ;
+       bio_offset = 0;
+       image_offset = img_request->offset;
+       rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
+       resid = img_request->length;
+       rbd_assert(resid > 0);
+       while (resid) {
+               const char *object_name;
+               unsigned int clone_size;
+               struct ceph_osd_req_op *op;
+               u64 offset;
+               u64 length;
+
+               object_name = rbd_segment_name(rbd_dev, image_offset);
+               if (!object_name)
+                       goto out_unwind;
+               offset = rbd_segment_offset(rbd_dev, image_offset);
+               length = rbd_segment_length(rbd_dev, image_offset, resid);
+               obj_request = rbd_obj_request_create(object_name,
+                                               offset, length,
+                                               OBJ_REQUEST_BIO);
+               kfree(object_name);     /* object request has its own copy */
+               if (!obj_request)
+                       goto out_unwind;
+
+               rbd_assert(length <= (u64) UINT_MAX);
+               clone_size = (unsigned int) length;
+               obj_request->bio_list = bio_chain_clone_range(&bio_list,
+                                               &bio_offset, clone_size,
+                                               GFP_ATOMIC);
+               if (!obj_request->bio_list)
+                       goto out_partial;
 
-       num_pages = calc_pages_for(ofs, inbound_size);
-       pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
-       if (IS_ERR(pages))
-               return PTR_ERR(pages);
+               /*
+                * Build up the op to use in building the osd
+                * request.  Note that the contents of the op are
+                * copied by rbd_osd_req_create().
+                */
+               op = rbd_osd_req_op_create(opcode, offset, length);
+               if (!op)
+                       goto out_partial;
+               obj_request->osd_req = rbd_osd_req_create(rbd_dev,
+                                               img_request->write_request,
+                                               obj_request, op);
+               rbd_osd_req_op_destroy(op);
+               if (!obj_request->osd_req)
+                       goto out_partial;
+               /* status and version are initially zero-filled */
+
+               rbd_img_obj_request_add(img_request, obj_request);
+
+               image_offset += length;
+               resid -= length;
+       }
 
-       ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
-                         object_name, ofs, inbound_size, NULL,
-                         pages, num_pages,
-                         flags,
-                         ops,
-                         NULL, 0,
-                         NULL,
-                         linger_req, ver);
-       if (ret < 0)
-               goto done;
+       return 0;
 
-       if ((flags & CEPH_OSD_FLAG_READ) && inbound)
-               ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
+out_partial:
+       rbd_obj_request_put(obj_request);
+out_unwind:
+       for_each_obj_request_safe(img_request, obj_request, next_obj_request)
+               rbd_obj_request_put(obj_request);
 
-done:
-       ceph_release_page_vector(pages, num_pages);
-       return ret;
+       return -ENOMEM;
 }
 
-/*
- * Do an asynchronous ceph osd operation
- */
-static int rbd_do_op(struct request *rq,
-                    struct rbd_device *rbd_dev,
-                    struct ceph_snap_context *snapc,
-                    u64 ofs, u64 len,
-                    struct bio *bio,
-                    struct rbd_req_coll *coll,
-                    int coll_index)
-{
-       char *seg_name;
-       u64 seg_ofs;
-       u64 seg_len;
-       int ret;
-       struct ceph_osd_req_op *ops;
-       u32 payload_len;
-       int opcode;
-       int flags;
-       u64 snapid;
-
-       seg_name = rbd_segment_name(rbd_dev, ofs);
-       if (!seg_name)
-               return -ENOMEM;
-       seg_len = rbd_segment_length(rbd_dev, ofs, len);
-       seg_ofs = rbd_segment_offset(rbd_dev, ofs);
-
-       if (rq_data_dir(rq) == WRITE) {
-               opcode = CEPH_OSD_OP_WRITE;
-               flags = CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK;
-               snapid = CEPH_NOSNAP;
-               payload_len = seg_len;
-       } else {
-               opcode = CEPH_OSD_OP_READ;
-               flags = CEPH_OSD_FLAG_READ;
-               snapc = NULL;
-               snapid = rbd_dev->spec->snap_id;
-               payload_len = 0;
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+{
+       struct rbd_img_request *img_request;
+       u32 which = obj_request->which;
+       bool more = true;
+
+       img_request = obj_request->img_request;
+
+       dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
+       rbd_assert(img_request != NULL);
+       rbd_assert(img_request->rq != NULL);
+       rbd_assert(img_request->obj_request_count > 0);
+       rbd_assert(which != BAD_WHICH);
+       rbd_assert(which < img_request->obj_request_count);
+       rbd_assert(which >= img_request->next_completion);
+
+       spin_lock_irq(&img_request->completion_lock);
+       if (which != img_request->next_completion)
+               goto out;
+
+       for_each_obj_request_from(img_request, obj_request) {
+               unsigned int xferred;
+               int result;
+
+               rbd_assert(more);
+               rbd_assert(which < img_request->obj_request_count);
+
+               if (!obj_request_done_test(obj_request))
+                       break;
+
+               rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
+               xferred = (unsigned int) obj_request->xferred;
+               result = (int) obj_request->result;
+               if (result)
+                       rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
+                               img_request->write_request ? "write" : "read",
+                               result, xferred);
+
+               more = blk_end_request(img_request->rq, result, xferred);
+               which++;
        }
 
-       ret = -ENOMEM;
-       ops = rbd_create_rw_ops(1, opcode, payload_len);
-       if (!ops)
-               goto done;
+       rbd_assert(more ^ (which == img_request->obj_request_count));
+       img_request->next_completion = which;
+out:
+       spin_unlock_irq(&img_request->completion_lock);
 
-       /* we've taken care of segment sizes earlier when we
-          cloned the bios. We should never have a segment
-          truncated at this point */
-       rbd_assert(seg_len == len);
-
-       ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
-                            seg_name, seg_ofs, seg_len,
-                            bio,
-                            NULL, 0,
-                            flags,
-                            ops,
-                            coll, coll_index,
-                            rbd_req_cb, 0, NULL);
-
-       rbd_destroy_ops(ops);
-done:
-       kfree(seg_name);
-       return ret;
+       if (!more)
+               rbd_img_request_complete(img_request);
 }
 
-/*
- * Request sync osd read
- */
-static int rbd_req_sync_read(struct rbd_device *rbd_dev,
-                         u64 snapid,
-                         const char *object_name,
-                         u64 ofs, u64 len,
-                         char *buf,
-                         u64 *ver)
-{
-       struct ceph_osd_req_op *ops;
-       int ret;
+static int rbd_img_request_submit(struct rbd_img_request *img_request)
+{
+       struct rbd_device *rbd_dev = img_request->rbd_dev;
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       struct rbd_obj_request *obj_request;
 
-       ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
-       if (!ops)
-               return -ENOMEM;
+       dout("%s: img %p\n", __func__, img_request);
+       for_each_obj_request(img_request, obj_request) {
+               int ret;
 
-       ret = rbd_req_sync_op(rbd_dev, NULL,
-                              snapid,
-                              CEPH_OSD_FLAG_READ,
-                              ops, object_name, ofs, len, buf, NULL, ver);
-       rbd_destroy_ops(ops);
+               obj_request->callback = rbd_img_obj_callback;
+               ret = rbd_obj_request_submit(osdc, obj_request);
+               if (ret)
+                       return ret;
+               /*
+                * The image request has its own reference to each
+                * of its object requests, so we can safely drop the
+                * initial one here.
+                */
+               rbd_obj_request_put(obj_request);
+       }
 
-       return ret;
+       return 0;
 }
 
-/*
- * Request sync osd watch
- */
-static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
-                                  u64 ver,
-                                  u64 notify_id)
+static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
+                                  u64 ver, u64 notify_id)
 {
-       struct ceph_osd_req_op *ops;
+       struct rbd_obj_request *obj_request;
+       struct ceph_osd_req_op *op;
+       struct ceph_osd_client *osdc;
        int ret;
 
-       ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
-       if (!ops)
+       obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+                                                       OBJ_REQUEST_NODATA);
+       if (!obj_request)
                return -ENOMEM;
 
-       ops[0].watch.ver = cpu_to_le64(ver);
-       ops[0].watch.cookie = notify_id;
-       ops[0].watch.flag = 0;
+       ret = -ENOMEM;
+       op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
+       if (!op)
+               goto out;
+       obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+                                               obj_request, op);
+       rbd_osd_req_op_destroy(op);
+       if (!obj_request->osd_req)
+               goto out;
 
-       ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
-                         rbd_dev->header_name, 0, 0, NULL,
-                         NULL, 0,
-                         CEPH_OSD_FLAG_READ,
-                         ops,
-                         NULL, 0,
-                         rbd_simple_req_cb, 0, NULL);
+       osdc = &rbd_dev->rbd_client->client->osdc;
+       obj_request->callback = rbd_obj_request_put;
+       ret = rbd_obj_request_submit(osdc, obj_request);
+out:
+       if (ret)
+               rbd_obj_request_put(obj_request);
 
-       rbd_destroy_ops(ops);
        return ret;
 }
 
@@ -1381,95 +1791,103 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        if (!rbd_dev)
                return;
 
-       dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
+       dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
                rbd_dev->header_name, (unsigned long long) notify_id,
                (unsigned int) opcode);
        rc = rbd_dev_refresh(rbd_dev, &hver);
        if (rc)
-               pr_warning(RBD_DRV_NAME "%d got notification but failed to "
-                          " update snaps: %d\n", rbd_dev->major, rc);
+               rbd_warn(rbd_dev, "got notification but failed to "
+                          " update snaps: %d\n", rc);
 
-       rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
+       rbd_obj_notify_ack(rbd_dev, hver, notify_id);
 }
 
 /*
- * Request sync osd watch
+ * Request sync osd watch/unwatch.  The value of "start" determines
+ * whether a watch request is being initiated or torn down.
  */
-static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
 {
-       struct ceph_osd_req_op *ops;
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       struct rbd_obj_request *obj_request;
+       struct ceph_osd_req_op *op;
        int ret;
 
-       ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
-       if (!ops)
-               return -ENOMEM;
+       rbd_assert(start ^ !!rbd_dev->watch_event);
+       rbd_assert(start ^ !!rbd_dev->watch_request);
 
-       ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
-                                    (void *)rbd_dev, &rbd_dev->watch_event);
-       if (ret < 0)
-               goto fail;
+       if (start) {
+               ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
+                                               &rbd_dev->watch_event);
+               if (ret < 0)
+                       return ret;
+               rbd_assert(rbd_dev->watch_event != NULL);
+       }
 
-       ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
-       ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
-       ops[0].watch.flag = 1;
+       ret = -ENOMEM;
+       obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
+                                                       OBJ_REQUEST_NODATA);
+       if (!obj_request)
+               goto out_cancel;
+
+       op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
+                               rbd_dev->watch_event->cookie,
+                               rbd_dev->header.obj_version, start);
+       if (!op)
+               goto out_cancel;
+       obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
+                                                       obj_request, op);
+       rbd_osd_req_op_destroy(op);
+       if (!obj_request->osd_req)
+               goto out_cancel;
+
+       if (start)
+               ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
+       else
+               ceph_osdc_unregister_linger_request(osdc,
+                                       rbd_dev->watch_request->osd_req);
+       ret = rbd_obj_request_submit(osdc, obj_request);
+       if (ret)
+               goto out_cancel;
+       ret = rbd_obj_request_wait(obj_request);
+       if (ret)
+               goto out_cancel;
+       ret = obj_request->result;
+       if (ret)
+               goto out_cancel;
 
-       ret = rbd_req_sync_op(rbd_dev, NULL,
-                             CEPH_NOSNAP,
-                             CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                             ops,
-                             rbd_dev->header_name,
-                             0, 0, NULL,
-                             &rbd_dev->watch_request, NULL);
+       /*
+        * A watch request is set to linger, so the underlying osd
+        * request won't go away until we unregister it.  We retain
+        * a pointer to the object request during that time (in
+        * rbd_dev->watch_request), so we'll keep a reference to
+        * it.  We'll drop that reference (below) after we've
+        * unregistered it.
+        */
+       if (start) {
+               rbd_dev->watch_request = obj_request;
 
-       if (ret < 0)
-               goto fail_event;
+               return 0;
+       }
 
-       rbd_destroy_ops(ops);
-       return 0;
+       /* We have successfully torn down the watch request */
 
-fail_event:
+       rbd_obj_request_put(rbd_dev->watch_request);
+       rbd_dev->watch_request = NULL;
+out_cancel:
+       /* Cancel the event if we're tearing down, or on error */
        ceph_osdc_cancel_event(rbd_dev->watch_event);
        rbd_dev->watch_event = NULL;
-fail:
-       rbd_destroy_ops(ops);
-       return ret;
-}
+       if (obj_request)
+               rbd_obj_request_put(obj_request);
 
-/*
- * Request sync osd unwatch
- */
-static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
-{
-       struct ceph_osd_req_op *ops;
-       int ret;
-
-       ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
-       if (!ops)
-               return -ENOMEM;
-
-       ops[0].watch.ver = 0;
-       ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
-       ops[0].watch.flag = 0;
-
-       ret = rbd_req_sync_op(rbd_dev, NULL,
-                             CEPH_NOSNAP,
-                             CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                             ops,
-                             rbd_dev->header_name,
-                             0, 0, NULL, NULL, NULL);
-
-
-       rbd_destroy_ops(ops);
-       ceph_osdc_cancel_event(rbd_dev->watch_event);
-       rbd_dev->watch_event = NULL;
        return ret;
 }
 
 /*
  * Synchronous osd object method call
  */
-static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
+static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
                             const char *object_name,
                             const char *class_name,
                             const char *method_name,
@@ -1477,169 +1895,154 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
                             size_t outbound_size,
                             char *inbound,
                             size_t inbound_size,
-                            int flags,
-                            u64 *ver)
+                            u64 *version)
 {
-       struct ceph_osd_req_op *ops;
-       int class_name_len = strlen(class_name);
-       int method_name_len = strlen(method_name);
-       int payload_size;
+       struct rbd_obj_request *obj_request;
+       struct ceph_osd_client *osdc;
+       struct ceph_osd_req_op *op;
+       struct page **pages;
+       u32 page_count;
        int ret;
 
        /*
-        * Any input parameters required by the method we're calling
-        * will be sent along with the class and method names as
-        * part of the message payload.  That data and its size are
-        * supplied via the indata and indata_len fields (named from
-        * the perspective of the server side) in the OSD request
-        * operation.
+        * Method calls are ultimately read operations but they
+        * don't involve object data (so no offset or length).
+        * The result should placed into the inbound buffer
+        * provided.  They also supply outbound data--parameters for
+        * the object method.  Currently if this is present it will
+        * be a snapshot id.
         */
-       payload_size = class_name_len + method_name_len + outbound_size;
-       ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
-       if (!ops)
-               return -ENOMEM;
+       page_count = (u32) calc_pages_for(0, inbound_size);
+       pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
 
-       ops[0].cls.class_name = class_name;
-       ops[0].cls.class_len = (__u8) class_name_len;
-       ops[0].cls.method_name = method_name;
-       ops[0].cls.method_len = (__u8) method_name_len;
-       ops[0].cls.argc = 0;
-       ops[0].cls.indata = outbound;
-       ops[0].cls.indata_len = outbound_size;
+       ret = -ENOMEM;
+       obj_request = rbd_obj_request_create(object_name, 0, 0,
+                                                       OBJ_REQUEST_PAGES);
+       if (!obj_request)
+               goto out;
 
-       ret = rbd_req_sync_op(rbd_dev, NULL,
-                              CEPH_NOSNAP,
-                              flags, ops,
-                              object_name, 0, inbound_size, inbound,
-                              NULL, ver);
+       obj_request->pages = pages;
+       obj_request->page_count = page_count;
 
-       rbd_destroy_ops(ops);
+       op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
+                                       method_name, outbound, outbound_size);
+       if (!op)
+               goto out;
+       obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+                                               obj_request, op);
+       rbd_osd_req_op_destroy(op);
+       if (!obj_request->osd_req)
+               goto out;
 
-       dout("cls_exec returned %d\n", ret);
-       return ret;
-}
+       osdc = &rbd_dev->rbd_client->client->osdc;
+       ret = rbd_obj_request_submit(osdc, obj_request);
+       if (ret)
+               goto out;
+       ret = rbd_obj_request_wait(obj_request);
+       if (ret)
+               goto out;
 
-static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
-{
-       struct rbd_req_coll *coll =
-                       kzalloc(sizeof(struct rbd_req_coll) +
-                               sizeof(struct rbd_req_status) * num_reqs,
-                               GFP_ATOMIC);
+       ret = obj_request->result;
+       if (ret < 0)
+               goto out;
+       ret = 0;
+       ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
+       if (version)
+               *version = obj_request->version;
+out:
+       if (obj_request)
+               rbd_obj_request_put(obj_request);
+       else
+               ceph_release_page_vector(pages, page_count);
 
-       if (!coll)
-               return NULL;
-       coll->total = num_reqs;
-       kref_init(&coll->kref);
-       return coll;
+       return ret;
 }
 
-/*
- * block device queue callback
- */
-static void rbd_rq_fn(struct request_queue *q)
+static void rbd_request_fn(struct request_queue *q)
+               __releases(q->queue_lock) __acquires(q->queue_lock)
 {
        struct rbd_device *rbd_dev = q->queuedata;
+       bool read_only = rbd_dev->mapping.read_only;
        struct request *rq;
+       int result;
 
        while ((rq = blk_fetch_request(q))) {
-               struct bio *bio;
-               bool do_write;
-               unsigned int size;
-               u64 ofs;
-               int num_segs, cur_seg = 0;
-               struct rbd_req_coll *coll;
-               struct ceph_snap_context *snapc;
-               unsigned int bio_offset;
-
-               dout("fetched request\n");
-
-               /* filter out block requests we don't understand */
-               if ((rq->cmd_type != REQ_TYPE_FS)) {
-                       __blk_end_request_all(rq, 0);
-                       continue;
-               }
+               bool write_request = rq_data_dir(rq) == WRITE;
+               struct rbd_img_request *img_request;
+               u64 offset;
+               u64 length;
+
+               /* Ignore any non-FS requests that filter through. */
 
-               /* deduce our operation (read, write) */
-               do_write = (rq_data_dir(rq) == WRITE);
-               if (do_write && rbd_dev->mapping.read_only) {
-                       __blk_end_request_all(rq, -EROFS);
+               if (rq->cmd_type != REQ_TYPE_FS) {
+                       dout("%s: non-fs request type %d\n", __func__,
+                               (int) rq->cmd_type);
+                       __blk_end_request_all(rq, 0);
                        continue;
                }
 
-               spin_unlock_irq(q->queue_lock);
+               /* Ignore/skip any zero-length requests */
 
-               down_read(&rbd_dev->header_rwsem);
+               offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
+               length = (u64) blk_rq_bytes(rq);
 
-               if (!rbd_dev->exists) {
-                       rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
-                       up_read(&rbd_dev->header_rwsem);
-                       dout("request for non-existent snapshot");
-                       spin_lock_irq(q->queue_lock);
-                       __blk_end_request_all(rq, -ENXIO);
+               if (!length) {
+                       dout("%s: zero-length request\n", __func__);
+                       __blk_end_request_all(rq, 0);
                        continue;
                }
 
-               snapc = ceph_get_snap_context(rbd_dev->header.snapc);
-
-               up_read(&rbd_dev->header_rwsem);
-
-               size = blk_rq_bytes(rq);
-               ofs = blk_rq_pos(rq) * SECTOR_SIZE;
-               bio = rq->bio;
+               spin_unlock_irq(q->queue_lock);
 
-               dout("%s 0x%x bytes at 0x%llx\n",
-                    do_write ? "write" : "read",
-                    size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
+               /* Disallow writes to a read-only device */
 
-               num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
-               if (num_segs <= 0) {
-                       spin_lock_irq(q->queue_lock);
-                       __blk_end_request_all(rq, num_segs);
-                       ceph_put_snap_context(snapc);
-                       continue;
+               if (write_request) {
+                       result = -EROFS;
+                       if (read_only)
+                               goto end_request;
+                       rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
                }
-               coll = rbd_alloc_coll(num_segs);
-               if (!coll) {
-                       spin_lock_irq(q->queue_lock);
-                       __blk_end_request_all(rq, -ENOMEM);
-                       ceph_put_snap_context(snapc);
-                       continue;
-               }
-
-               bio_offset = 0;
-               do {
-                       u64 limit = rbd_segment_length(rbd_dev, ofs, size);
-                       unsigned int chain_size;
-                       struct bio *bio_chain;
-
-                       BUG_ON(limit > (u64) UINT_MAX);
-                       chain_size = (unsigned int) limit;
-                       dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
 
-                       kref_get(&coll->kref);
+               /*
+                * Quit early if the mapped snapshot no longer
+                * exists.  It's still possible the snapshot will
+                * have disappeared by the time our request arrives
+                * at the osd, but there's no sense in sending it if
+                * we already know.
+                */
+               if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
+                       dout("request for non-existent snapshot");
+                       rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
+                       result = -ENXIO;
+                       goto end_request;
+               }
 
-                       /* Pass a cloned bio chain via an osd request */
+               result = -EINVAL;
+               if (WARN_ON(offset && length > U64_MAX - offset + 1))
+                       goto end_request;       /* Shouldn't happen */
 
-                       bio_chain = bio_chain_clone_range(&bio,
-                                               &bio_offset, chain_size,
-                                               GFP_ATOMIC);
-                       if (bio_chain)
-                               (void) rbd_do_op(rq, rbd_dev, snapc,
-                                               ofs, chain_size,
-                                               bio_chain, coll, cur_seg);
-                       else
-                               rbd_coll_end_req_index(rq, coll, cur_seg,
-                                                      -ENOMEM, chain_size);
-                       size -= chain_size;
-                       ofs += chain_size;
+               result = -ENOMEM;
+               img_request = rbd_img_request_create(rbd_dev, offset, length,
+                                                       write_request);
+               if (!img_request)
+                       goto end_request;
 
-                       cur_seg++;
-               } while (size > 0);
-               kref_put(&coll->kref, rbd_coll_release);
+               img_request->rq = rq;
 
+               result = rbd_img_request_fill_bio(img_request, rq->bio);
+               if (!result)
+                       result = rbd_img_request_submit(img_request);
+               if (result)
+                       rbd_img_request_put(img_request);
+end_request:
                spin_lock_irq(q->queue_lock);
-
-               ceph_put_snap_context(snapc);
+               if (result < 0) {
+                       rbd_warn(rbd_dev, "obj_request %s result %d\n",
+                               write_request ? "write" : "read", result);
+                       __blk_end_request_all(rq, result);
+               }
        }
 }
 
@@ -1703,6 +2106,71 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
        put_disk(disk);
 }
 
+static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+                               const char *object_name,
+                               u64 offset, u64 length,
+                               char *buf, u64 *version)
+
+{
+       struct ceph_osd_req_op *op;
+       struct rbd_obj_request *obj_request;
+       struct ceph_osd_client *osdc;
+       struct page **pages = NULL;
+       u32 page_count;
+       size_t size;
+       int ret;
+
+       page_count = (u32) calc_pages_for(offset, length);
+       pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+       if (IS_ERR(pages))
+               ret = PTR_ERR(pages);
+
+       ret = -ENOMEM;
+       obj_request = rbd_obj_request_create(object_name, offset, length,
+                                                       OBJ_REQUEST_PAGES);
+       if (!obj_request)
+               goto out;
+
+       obj_request->pages = pages;
+       obj_request->page_count = page_count;
+
+       op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
+       if (!op)
+               goto out;
+       obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
+                                               obj_request, op);
+       rbd_osd_req_op_destroy(op);
+       if (!obj_request->osd_req)
+               goto out;
+
+       osdc = &rbd_dev->rbd_client->client->osdc;
+       ret = rbd_obj_request_submit(osdc, obj_request);
+       if (ret)
+               goto out;
+       ret = rbd_obj_request_wait(obj_request);
+       if (ret)
+               goto out;
+
+       ret = obj_request->result;
+       if (ret < 0)
+               goto out;
+
+       rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
+       size = (size_t) obj_request->xferred;
+       ceph_copy_from_page_vector(pages, buf, 0, size);
+       rbd_assert(size <= (size_t) INT_MAX);
+       ret = (int) size;
+       if (version)
+               *version = obj_request->version;
+out:
+       if (obj_request)
+               rbd_obj_request_put(obj_request);
+       else
+               ceph_release_page_vector(pages, page_count);
+
+       return ret;
+}
+
 /*
  * Read the complete header for the given rbd device.
  *
@@ -1741,24 +2209,20 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
                if (!ondisk)
                        return ERR_PTR(-ENOMEM);
 
-               ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
-                                      rbd_dev->header_name,
+               ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
                                       0, size,
                                       (char *) ondisk, version);
-
                if (ret < 0)
                        goto out_err;
                if (WARN_ON((size_t) ret < size)) {
                        ret = -ENXIO;
-                       pr_warning("short header read for image %s"
-                                       " (want %zd got %d)\n",
-                               rbd_dev->spec->image_name, size, ret);
+                       rbd_warn(rbd_dev, "short header read (want %zd got %d)",
+                               size, ret);
                        goto out_err;
                }
                if (!rbd_dev_ondisk_valid(ondisk)) {
                        ret = -ENXIO;
-                       pr_warning("invalid header for image %s\n",
-                               rbd_dev->spec->image_name);
+                       rbd_warn(rbd_dev, "invalid header");
                        goto out_err;
                }
 
@@ -1895,8 +2359,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        disk->fops = &rbd_bd_ops;
        disk->private_data = rbd_dev;
 
-       /* init rq */
-       q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
+       q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
        if (!q)
                goto out_disk;
 
@@ -2233,7 +2696,7 @@ static void rbd_spec_free(struct kref *kref)
        kfree(spec);
 }
 
-struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                                struct rbd_spec *spec)
 {
        struct rbd_device *rbd_dev;
@@ -2243,6 +2706,7 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                return NULL;
 
        spin_lock_init(&rbd_dev->lock);
+       rbd_dev->flags = 0;
        INIT_LIST_HEAD(&rbd_dev->node);
        INIT_LIST_HEAD(&rbd_dev->snaps);
        init_rwsem(&rbd_dev->header_rwsem);
@@ -2250,6 +2714,13 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
        rbd_dev->spec = spec;
        rbd_dev->rbd_client = rbdc;
 
+       /* Initialize the layout used for all rbd requests */
+
+       rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+       rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
+       rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
+       rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+
        return rbd_dev;
 }
 
@@ -2360,12 +2831,11 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
                __le64 size;
        } __attribute__ ((packed)) size_buf = { 0 };
 
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_size",
                                (char *) &snapid, sizeof (snapid),
-                               (char *) &size_buf, sizeof (size_buf),
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               (char *) &size_buf, sizeof (size_buf), NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                return ret;
 
@@ -2396,15 +2866,13 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
        if (!reply_buf)
                return -ENOMEM;
 
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_object_prefix",
                                NULL, 0,
-                               reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                goto out;
-       ret = 0;    /* rbd_req_sync_exec() can return positive */
 
        p = reply_buf;
        rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
@@ -2435,12 +2903,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
        u64 incompat;
        int ret;
 
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_features",
                                (char *) &snapid, sizeof (snapid),
                                (char *) &features_buf, sizeof (features_buf),
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                return ret;
 
@@ -2474,7 +2942,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        void *end;
        char *image_id;
        u64 overlap;
-       size_t len = 0;
        int ret;
 
        parent_spec = rbd_spec_alloc();
@@ -2492,12 +2959,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        }
 
        snapid = cpu_to_le64(CEPH_NOSNAP);
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_parent",
                                (char *) &snapid, sizeof (snapid),
-                               (char *) reply_buf, size,
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               (char *) reply_buf, size, NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                goto out_err;
 
@@ -2508,13 +2974,18 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        if (parent_spec->pool_id == CEPH_NOPOOL)
                goto out;       /* No parent?  No problem. */
 
-       image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
+       /* The ceph file layout needs to fit pool id in 32 bits */
+
+       ret = -EIO;
+       if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
+               goto out;
+
+       image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
        if (IS_ERR(image_id)) {
                ret = PTR_ERR(image_id);
                goto out_err;
        }
        parent_spec->image_id = image_id;
-       parent_spec->image_id_len = len;
        ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
        ceph_decode_64_safe(&p, end, overlap, out_err);
 
@@ -2544,26 +3015,25 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
 
        rbd_assert(!rbd_dev->spec->image_name);
 
-       image_id_size = sizeof (__le32) + rbd_dev->spec->image_id_len;
+       len = strlen(rbd_dev->spec->image_id);
+       image_id_size = sizeof (__le32) + len;
        image_id = kmalloc(image_id_size, GFP_KERNEL);
        if (!image_id)
                return NULL;
 
        p = image_id;
        end = (char *) image_id + image_id_size;
-       ceph_encode_string(&p, end, rbd_dev->spec->image_id,
-                               (u32) rbd_dev->spec->image_id_len);
+       ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
 
        size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
        reply_buf = kmalloc(size, GFP_KERNEL);
        if (!reply_buf)
                goto out;
 
-       ret = rbd_req_sync_exec(rbd_dev, RBD_DIRECTORY,
+       ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
                                "rbd", "dir_get_name",
                                image_id, image_id_size,
-                               (char *) reply_buf, size,
-                               CEPH_OSD_FLAG_READ, NULL);
+                               (char *) reply_buf, size, NULL);
        if (ret < 0)
                goto out;
        p = reply_buf;
@@ -2602,8 +3072,11 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
 
        osdc = &rbd_dev->rbd_client->client->osdc;
        name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
-       if (!name)
-               return -EIO;    /* pool id too large (>= 2^31) */
+       if (!name) {
+               rbd_warn(rbd_dev, "there is no pool with id %llu",
+                       rbd_dev->spec->pool_id);        /* Really a BUG() */
+               return -EIO;
+       }
 
        rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
        if (!rbd_dev->spec->pool_name)
@@ -2612,19 +3085,17 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
        /* Fetch the image name; tolerate failure here */
 
        name = rbd_dev_image_name(rbd_dev);
-       if (name) {
-               rbd_dev->spec->image_name_len = strlen(name);
+       if (name)
                rbd_dev->spec->image_name = (char *) name;
-       } else {
-               pr_warning(RBD_DRV_NAME "%d "
-                       "unable to get image name for image id %s\n",
-                       rbd_dev->major, rbd_dev->spec->image_id);
-       }
+       else
+               rbd_warn(rbd_dev, "unable to get image name");
 
        /* Look up the snapshot name. */
 
        name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
        if (!name) {
+               rbd_warn(rbd_dev, "no snapshot with id %llu",
+                       rbd_dev->spec->snap_id);        /* Really a BUG() */
                ret = -EIO;
                goto out_err;
        }
@@ -2665,12 +3136,11 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
        if (!reply_buf)
                return -ENOMEM;
 
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_snapcontext",
                                NULL, 0,
-                               reply_buf, size,
-                               CEPH_OSD_FLAG_READ, ver);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               reply_buf, size, ver);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                goto out;
 
@@ -2735,12 +3205,11 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
                return ERR_PTR(-ENOMEM);
 
        snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
-       ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+       ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
                                "rbd", "get_snapshot_name",
                                (char *) &snap_id, sizeof (snap_id),
-                               reply_buf, size,
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               reply_buf, size, NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                goto out;
 
@@ -2766,7 +3235,7 @@ out:
 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
                u64 *snap_size, u64 *snap_features)
 {
-       __le64 snap_id;
+       u64 snap_id;
        u8 order;
        int ret;
 
@@ -2865,10 +3334,17 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
                if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
                        struct list_head *next = links->next;
 
-                       /* Existing snapshot not in the new snap context */
-
+                       /*
+                        * A previously-existing snapshot is not in
+                        * the new snap context.
+                        *
+                        * If the now missing snapshot is the one the
+                        * image is mapped to, clear its exists flag
+                        * so we can avoid sending any more requests
+                        * to it.
+                        */
                        if (rbd_dev->spec->snap_id == snap->id)
-                               rbd_dev->exists = false;
+                               clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
                        rbd_remove_snap_dev(snap);
                        dout("%ssnap id %llu has been removed\n",
                                rbd_dev->spec->snap_id == snap->id ?
@@ -2942,7 +3418,7 @@ static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
        struct rbd_snap *snap;
        int ret = 0;
 
-       dout("%s called\n", __func__);
+       dout("%s:\n", __func__);
        if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
                return -EIO;
 
@@ -2983,22 +3459,6 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
        device_unregister(&rbd_dev->dev);
 }
 
-static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
-{
-       int ret, rc;
-
-       do {
-               ret = rbd_req_sync_watch(rbd_dev);
-               if (ret == -ERANGE) {
-                       rc = rbd_dev_refresh(rbd_dev, NULL);
-                       if (rc < 0)
-                               return rc;
-               }
-       } while (ret == -ERANGE);
-
-       return ret;
-}
-
 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
 
 /*
@@ -3138,11 +3598,9 @@ static inline char *dup_token(const char **buf, size_t *lenp)
        size_t len;
 
        len = next_token(buf);
-       dup = kmalloc(len + 1, GFP_KERNEL);
+       dup = kmemdup(*buf, len + 1, GFP_KERNEL);
        if (!dup)
                return NULL;
-
-       memcpy(dup, *buf, len);
        *(dup + len) = '\0';
        *buf += len;
 
@@ -3210,8 +3668,10 @@ static int rbd_add_parse_args(const char *buf,
        /* The first four tokens are required */
 
        len = next_token(&buf);
-       if (!len)
-               return -EINVAL; /* Missing monitor address(es) */
+       if (!len) {
+               rbd_warn(NULL, "no monitor address(es) provided");
+               return -EINVAL;
+       }
        mon_addrs = buf;
        mon_addrs_size = len + 1;
        buf += len;
@@ -3220,8 +3680,10 @@ static int rbd_add_parse_args(const char *buf,
        options = dup_token(&buf, NULL);
        if (!options)
                return -ENOMEM;
-       if (!*options)
-               goto out_err;   /* Missing options */
+       if (!*options) {
+               rbd_warn(NULL, "no options provided");
+               goto out_err;
+       }
 
        spec = rbd_spec_alloc();
        if (!spec)
@@ -3230,14 +3692,18 @@ static int rbd_add_parse_args(const char *buf,
        spec->pool_name = dup_token(&buf, NULL);
        if (!spec->pool_name)
                goto out_mem;
-       if (!*spec->pool_name)
-               goto out_err;   /* Missing pool name */
+       if (!*spec->pool_name) {
+               rbd_warn(NULL, "no pool name provided");
+               goto out_err;
+       }
 
-       spec->image_name = dup_token(&buf, &spec->image_name_len);
+       spec->image_name = dup_token(&buf, NULL);
        if (!spec->image_name)
                goto out_mem;
-       if (!*spec->image_name)
-               goto out_err;   /* Missing image name */
+       if (!*spec->image_name) {
+               rbd_warn(NULL, "no image name provided");
+               goto out_err;
+       }
 
        /*
         * Snapshot name is optional; default is to use "-"
@@ -3251,10 +3717,9 @@ static int rbd_add_parse_args(const char *buf,
                ret = -ENAMETOOLONG;
                goto out_err;
        }
-       spec->snap_name = kmalloc(len + 1, GFP_KERNEL);
+       spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
        if (!spec->snap_name)
                goto out_mem;
-       memcpy(spec->snap_name, buf, len);
        *(spec->snap_name + len) = '\0';
 
        /* Initialize all rbd options to the defaults */
@@ -3323,7 +3788,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
         * First, see if the format 2 image id file exists, and if
         * so, get the image's persistent id from it.
         */
-       size = sizeof (RBD_ID_PREFIX) + rbd_dev->spec->image_name_len;
+       size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
        object_name = kmalloc(size, GFP_NOIO);
        if (!object_name)
                return -ENOMEM;
@@ -3339,21 +3804,18 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
                goto out;
        }
 
-       ret = rbd_req_sync_exec(rbd_dev, object_name,
+       ret = rbd_obj_method_sync(rbd_dev, object_name,
                                "rbd", "get_id",
                                NULL, 0,
-                               response, RBD_IMAGE_ID_LEN_MAX,
-                               CEPH_OSD_FLAG_READ, NULL);
-       dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
+                               response, RBD_IMAGE_ID_LEN_MAX, NULL);
+       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
        if (ret < 0)
                goto out;
-       ret = 0;    /* rbd_req_sync_exec() can return positive */
 
        p = response;
        rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
                                                p + RBD_IMAGE_ID_LEN_MAX,
-                                               &rbd_dev->spec->image_id_len,
-                                               GFP_NOIO);
+                                               NULL, GFP_NOIO);
        if (IS_ERR(rbd_dev->spec->image_id)) {
                ret = PTR_ERR(rbd_dev->spec->image_id);
                rbd_dev->spec->image_id = NULL;
@@ -3377,11 +3839,10 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
        rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
        if (!rbd_dev->spec->image_id)
                return -ENOMEM;
-       rbd_dev->spec->image_id_len = 0;
 
        /* Record the header object name for this rbd image. */
 
-       size = rbd_dev->spec->image_name_len + sizeof (RBD_SUFFIX);
+       size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
        rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
        if (!rbd_dev->header_name) {
                ret = -ENOMEM;
@@ -3427,7 +3888,7 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
         * Image id was filled in by the caller.  Record the header
         * object name for this rbd image.
         */
-       size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->spec->image_id_len;
+       size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
        rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
        if (!rbd_dev->header_name)
                return -ENOMEM;
@@ -3542,7 +4003,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
        if (ret)
                goto err_out_bus;
 
-       ret = rbd_init_watch_dev(rbd_dev);
+       ret = rbd_dev_header_watch_sync(rbd_dev, 1);
        if (ret)
                goto err_out_bus;
 
@@ -3638,6 +4099,13 @@ static ssize_t rbd_add(struct bus_type *bus,
                goto err_out_client;
        spec->pool_id = (u64) rc;
 
+       /* The ceph file layout needs to fit pool id in 32 bits */
+
+       if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
+               rc = -EIO;
+               goto err_out_client;
+       }
+
        rbd_dev = rbd_dev_create(rbdc, spec);
        if (!rbd_dev)
                goto err_out_client;
@@ -3691,15 +4159,8 @@ static void rbd_dev_release(struct device *dev)
 {
        struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
 
-       if (rbd_dev->watch_request) {
-               struct ceph_client *client = rbd_dev->rbd_client->client;
-
-               ceph_osdc_unregister_linger_request(&client->osdc,
-                                                   rbd_dev->watch_request);
-       }
        if (rbd_dev->watch_event)
-               rbd_req_sync_unwatch(rbd_dev);
-
+               rbd_dev_header_watch_sync(rbd_dev, 0);
 
        /* clean up and free blkdev */
        rbd_free_disk(rbd_dev);
@@ -3743,10 +4204,14 @@ static ssize_t rbd_remove(struct bus_type *bus,
                goto done;
        }
 
-       if (rbd_dev->open_count) {
+       spin_lock_irq(&rbd_dev->lock);
+       if (rbd_dev->open_count)
                ret = -EBUSY;
+       else
+               set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
+       spin_unlock_irq(&rbd_dev->lock);
+       if (ret < 0)
                goto done;
-       }
 
        rbd_remove_all_snaps(rbd_dev);
        rbd_bus_del_dev(rbd_dev);
@@ -3782,10 +4247,15 @@ static void rbd_sysfs_cleanup(void)
        device_unregister(&rbd_root_dev);
 }
 
-int __init rbd_init(void)
+static int __init rbd_init(void)
 {
        int rc;
 
+       if (!libceph_compatible(NULL)) {
+               rbd_warn(NULL, "libceph incompatibility (quitting)");
+
+               return -EINVAL;
+       }
        rc = rbd_sysfs_init();
        if (rc)
                return rc;
@@ -3793,7 +4263,7 @@ int __init rbd_init(void)
        return 0;
 }
 
-void __exit rbd_exit(void)
+static void __exit rbd_exit(void)
 {
        rbd_sysfs_cleanup();
 }
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
new file mode 100644 (file)
index 0000000..f35cd0b
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
+rsxx-y := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
new file mode 100644 (file)
index 0000000..a295e7e
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+* Filename: config.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include <linux/swab.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+static void initialize_config(void *config)
+{
+       struct rsxx_card_cfg *cfg = config;
+
+       cfg->hdr.version = RSXX_CFG_VERSION;
+
+       cfg->data.block_size        = RSXX_HW_BLK_SIZE;
+       cfg->data.stripe_size       = RSXX_HW_BLK_SIZE;
+       cfg->data.vendor_id         = RSXX_VENDOR_ID_TMS_IBM;
+       cfg->data.cache_order       = (-1);
+       cfg->data.intr_coal.mode    = RSXX_INTR_COAL_DISABLED;
+       cfg->data.intr_coal.count   = 0;
+       cfg->data.intr_coal.latency = 0;
+}
+
+static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
+{
+       /*
+        * Return the compliment of the CRC to ensure compatibility
+        * (i.e. this is how early rsxx drivers did it.)
+        */
+
+       return ~crc32(~0, &cfg->data, sizeof(cfg->data));
+}
+
+
+/*----------------- Config Byte Swap Functions -------------------*/
+static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
+{
+       hdr->version = be32_to_cpu((__force __be32) hdr->version);
+       hdr->crc     = be32_to_cpu((__force __be32) hdr->crc);
+}
+
+static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
+{
+       hdr->version = (__force u32) cpu_to_be32(hdr->version);
+       hdr->crc     = (__force u32) cpu_to_be32(hdr->crc);
+}
+
+static void config_data_swab(struct rsxx_card_cfg *cfg)
+{
+       u32 *data = (u32 *) &cfg->data;
+       int i;
+
+       for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+               data[i] = swab32(data[i]);
+}
+
+static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
+{
+       u32 *data = (u32 *) &cfg->data;
+       int i;
+
+       for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+               data[i] = le32_to_cpu((__force __le32) data[i]);
+}
+
+static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
+{
+       u32 *data = (u32 *) &cfg->data;
+       int i;
+
+       for (i = 0; i < (sizeof(cfg->data) / 4); i++)
+               data[i] = (__force u32) cpu_to_le32(data[i]);
+}
+
+
+/*----------------- Config Operations ------------------*/
+static int rsxx_save_config(struct rsxx_cardinfo *card)
+{
+       struct rsxx_card_cfg cfg;
+       int st;
+
+       memcpy(&cfg, &card->config, sizeof(cfg));
+
+       if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
+               dev_err(CARD_TO_DEV(card),
+                       "Cannot save config with invalid version %d\n",
+                       cfg.hdr.version);
+               return -EINVAL;
+       }
+
+       /* Convert data to little endian for the CRC calculation. */
+       config_data_cpu_to_le(&cfg);
+
+       cfg.hdr.crc = config_data_crc32(&cfg);
+
+       /*
+        * Swap the data from little endian to big endian so it can be
+        * stored.
+        */
+       config_data_swab(&cfg);
+       config_hdr_cpu_to_be(&cfg.hdr);
+
+       st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
+       if (st)
+               return st;
+
+       return 0;
+}
+
+int rsxx_load_config(struct rsxx_cardinfo *card)
+{
+       int st;
+       u32 crc;
+
+       st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
+                               &card->config, 1);
+       if (st) {
+               dev_err(CARD_TO_DEV(card),
+                       "Failed reading card config.\n");
+               return st;
+       }
+
+       config_hdr_be_to_cpu(&card->config.hdr);
+
+       if (card->config.hdr.version == RSXX_CFG_VERSION) {
+               /*
+                * We calculate the CRC with the data in little endian, because
+                * early drivers did not take big endian CPUs into account.
+                * The data is always stored in big endian, so we need to byte
+                * swap it before calculating the CRC.
+                */
+
+               config_data_swab(&card->config);
+
+               /* Check the CRC */
+               crc = config_data_crc32(&card->config);
+               if (crc != card->config.hdr.crc) {
+                       dev_err(CARD_TO_DEV(card),
+                               "Config corruption detected!\n");
+                       dev_info(CARD_TO_DEV(card),
+                               "CRC (sb x%08x is x%08x)\n",
+                               card->config.hdr.crc, crc);
+                       return -EIO;
+               }
+
+               /* Convert the data to CPU byteorder */
+               config_data_le_to_cpu(&card->config);
+
+       } else if (card->config.hdr.version != 0) {
+               dev_err(CARD_TO_DEV(card),
+                       "Invalid config version %d.\n",
+                       card->config.hdr.version);
+               /*
+                * Config version changes require special handling from the
+                * user
+                */
+               return -EINVAL;
+       } else {
+               dev_info(CARD_TO_DEV(card),
+                       "Initializing card configuration.\n");
+               initialize_config(card);
+               st = rsxx_save_config(card);
+               if (st)
+                       return st;
+       }
+
+       card->config_valid = 1;
+
+       dev_dbg(CARD_TO_DEV(card), "version:     x%08x\n",
+               card->config.hdr.version);
+       dev_dbg(CARD_TO_DEV(card), "crc:         x%08x\n",
+               card->config.hdr.crc);
+       dev_dbg(CARD_TO_DEV(card), "block_size:  x%08x\n",
+               card->config.data.block_size);
+       dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
+               card->config.data.stripe_size);
+       dev_dbg(CARD_TO_DEV(card), "vendor_id:   x%08x\n",
+               card->config.data.vendor_id);
+       dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
+               card->config.data.cache_order);
+       dev_dbg(CARD_TO_DEV(card), "mode:        x%08x\n",
+               card->config.data.intr_coal.mode);
+       dev_dbg(CARD_TO_DEV(card), "count:       x%08x\n",
+               card->config.data.intr_coal.count);
+       dev_dbg(CARD_TO_DEV(card), "latency:     x%08x\n",
+                card->config.data.intr_coal.latency);
+
+       return 0;
+}
+
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
new file mode 100644 (file)
index 0000000..e516248
--- /dev/null
@@ -0,0 +1,649 @@
+/*
+* Filename: core.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <linux/genhd.h>
+#include <linux/idr.h>
+
+#include "rsxx_priv.h"
+#include "rsxx_cfg.h"
+
+#define NO_LEGACY 0
+
+MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
+MODULE_AUTHOR("IBM <support@ramsan.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+static unsigned int force_legacy = NO_LEGACY;
+module_param(force_legacy, uint, 0444);
+MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
+
+static DEFINE_IDA(rsxx_disk_ida);
+static DEFINE_SPINLOCK(rsxx_ida_lock);
+
+/*----------------- Interrupt Control & Handling -------------------*/
+static void __enable_intr(unsigned int *mask, unsigned int intr)
+{
+       *mask |= intr;
+}
+
+static void __disable_intr(unsigned int *mask, unsigned int intr)
+{
+       *mask &= ~intr;
+}
+
+/*
+ * NOTE: Disabling the IER will disable the hardware interrupt.
+ * Disabling the ISR will disable the software handling of the ISR bit.
+ *
+ * Enable/Disable interrupt functions assume the card->irq_lock
+ * is held by the caller.
+ */
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+       if (unlikely(card->halt))
+               return;
+
+       __enable_intr(&card->ier_mask, intr);
+       iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
+{
+       __disable_intr(&card->ier_mask, intr);
+       iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+                                unsigned int intr)
+{
+       if (unlikely(card->halt))
+               return;
+
+       __enable_intr(&card->isr_mask, intr);
+       __enable_intr(&card->ier_mask, intr);
+       iowrite32(card->ier_mask, card->regmap + IER);
+}
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+                                 unsigned int intr)
+{
+       __disable_intr(&card->isr_mask, intr);
+       __disable_intr(&card->ier_mask, intr);
+       iowrite32(card->ier_mask, card->regmap + IER);
+}
+
+static irqreturn_t rsxx_isr(int irq, void *pdata)
+{
+       struct rsxx_cardinfo *card = pdata;
+       unsigned int isr;
+       int handled = 0;
+       int reread_isr;
+       int i;
+
+       spin_lock(&card->irq_lock);
+
+       do {
+               reread_isr = 0;
+
+               isr = ioread32(card->regmap + ISR);
+               if (isr == 0xffffffff) {
+                       /*
+                        * A few systems seem to have an intermittent issue
+                        * where PCI reads return all Fs, but retrying the read
+                        * a little later will return as expected.
+                        */
+                       dev_info(CARD_TO_DEV(card),
+                               "ISR = 0xFFFFFFFF, retrying later\n");
+                       break;
+               }
+
+               isr &= card->isr_mask;
+               if (!isr)
+                       break;
+
+               for (i = 0; i < card->n_targets; i++) {
+                       if (isr & CR_INTR_DMA(i)) {
+                               if (card->ier_mask & CR_INTR_DMA(i)) {
+                                       rsxx_disable_ier(card, CR_INTR_DMA(i));
+                                       reread_isr = 1;
+                               }
+                               queue_work(card->ctrl[i].done_wq,
+                                          &card->ctrl[i].dma_done_work);
+                               handled++;
+                       }
+               }
+
+               if (isr & CR_INTR_CREG) {
+                       schedule_work(&card->creg_ctrl.done_work);
+                       handled++;
+               }
+
+               if (isr & CR_INTR_EVENT) {
+                       schedule_work(&card->event_work);
+                       rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+                       handled++;
+               }
+       } while (reread_isr);
+
+       spin_unlock(&card->irq_lock);
+
+       return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*----------------- Card Event Handler -------------------*/
+static char *rsxx_card_state_to_str(unsigned int state)
+{
+       static char *state_strings[] = {
+               "Unknown", "Shutdown", "Starting", "Formatting",
+               "Uninitialized", "Good", "Shutting Down",
+               "Fault", "Read Only Fault", "dStroying"
+       };
+
+       return state_strings[ffs(state)];
+}
+
+static void card_state_change(struct rsxx_cardinfo *card,
+                             unsigned int new_state)
+{
+       int st;
+
+       dev_info(CARD_TO_DEV(card),
+               "card state change detected.(%s -> %s)\n",
+               rsxx_card_state_to_str(card->state),
+               rsxx_card_state_to_str(new_state));
+
+       card->state = new_state;
+
+       /* Don't attach DMA interfaces if the card has an invalid config */
+       if (!card->config_valid)
+               return;
+
+       switch (new_state) {
+       case CARD_STATE_RD_ONLY_FAULT:
+               dev_crit(CARD_TO_DEV(card),
+                       "Hardware has entered read-only mode!\n");
+               /*
+                * Fall through so the DMA devices can be attached and
+                * the user can attempt to pull off their data.
+                */
+       case CARD_STATE_GOOD:
+               st = rsxx_get_card_size8(card, &card->size8);
+               if (st)
+                       dev_err(CARD_TO_DEV(card),
+                               "Failed attaching DMA devices\n");
+
+               if (card->config_valid)
+                       set_capacity(card->gendisk, card->size8 >> 9);
+               break;
+
+       case CARD_STATE_FAULT:
+               dev_crit(CARD_TO_DEV(card),
+                       "Hardware Fault reported!\n");
+               /* Fall through. */
+
+       /* Everything else, detach DMA interface if it's attached. */
+       case CARD_STATE_SHUTDOWN:
+       case CARD_STATE_STARTING:
+       case CARD_STATE_FORMATTING:
+       case CARD_STATE_UNINITIALIZED:
+       case CARD_STATE_SHUTTING_DOWN:
+       /*
+        * dStroy is a term coined by marketing to represent the low level
+        * secure erase.
+        */
+       case CARD_STATE_DSTROYING:
+               set_capacity(card->gendisk, 0);
+               break;
+       }
+}
+
+static void card_event_handler(struct work_struct *work)
+{
+       struct rsxx_cardinfo *card;
+       unsigned int state;
+       unsigned long flags;
+       int st;
+
+       card = container_of(work, struct rsxx_cardinfo, event_work);
+
+       if (unlikely(card->halt))
+               return;
+
+       /*
+        * Enable the interrupt now to avoid any weird race conditions where a
+        * state change might occur while rsxx_get_card_state() is
+        * processing a returned creg cmd.
+        */
+       spin_lock_irqsave(&card->irq_lock, flags);
+       rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       st = rsxx_get_card_state(card, &state);
+       if (st) {
+               dev_info(CARD_TO_DEV(card),
+                       "Failed reading state after event.\n");
+               return;
+       }
+
+       if (card->state != state)
+               card_state_change(card, state);
+
+       if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
+               rsxx_read_hw_log(card);
+}
+
+/*----------------- Card Operations -------------------*/
+static int card_shutdown(struct rsxx_cardinfo *card)
+{
+       unsigned int state;
+       signed long start;
+       const int timeout = msecs_to_jiffies(120000);
+       int st;
+
+       /* We can't issue a shutdown if the card is in a transition state */
+       start = jiffies;
+       do {
+               st = rsxx_get_card_state(card, &state);
+               if (st)
+                       return st;
+       } while (state == CARD_STATE_STARTING &&
+                (jiffies - start < timeout));
+
+       if (state == CARD_STATE_STARTING)
+               return -ETIMEDOUT;
+
+       /* Only issue a shutdown if we need to */
+       if ((state != CARD_STATE_SHUTTING_DOWN) &&
+           (state != CARD_STATE_SHUTDOWN)) {
+               st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
+               if (st)
+                       return st;
+       }
+
+       start = jiffies;
+       do {
+               st = rsxx_get_card_state(card, &state);
+               if (st)
+                       return st;
+       } while (state != CARD_STATE_SHUTDOWN &&
+                (jiffies - start < timeout));
+
+       if (state != CARD_STATE_SHUTDOWN)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+/*----------------- Driver Initialization & Setup -------------------*/
+/* Returns:   0 if the driver is compatible with the device
+            -1 if the driver is NOT compatible with the device */
+static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
+{
+       unsigned char pci_rev;
+
+       pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+       if (pci_rev > RS70_PCI_REV_SUPPORTED)
+               return -1;
+       return 0;
+}
+
+static int rsxx_pci_probe(struct pci_dev *dev,
+                                       const struct pci_device_id *id)
+{
+       struct rsxx_cardinfo *card;
+       int st;
+
+       dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
+
+       card = kzalloc(sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
+
+       card->dev = dev;
+       pci_set_drvdata(dev, card);
+
+       do {
+               if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
+                       st = -ENOMEM;
+                       goto failed_ida_get;
+               }
+
+               spin_lock(&rsxx_ida_lock);
+               st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
+               spin_unlock(&rsxx_ida_lock);
+       } while (st == -EAGAIN);
+
+       if (st)
+               goto failed_ida_get;
+
+       st = pci_enable_device(dev);
+       if (st)
+               goto failed_enable;
+
+       pci_set_master(dev);
+       pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+
+       st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+       if (st) {
+               dev_err(CARD_TO_DEV(card),
+                       "No usable DMA configuration,aborting\n");
+               goto failed_dma_mask;
+       }
+
+       st = pci_request_regions(dev, DRIVER_NAME);
+       if (st) {
+               dev_err(CARD_TO_DEV(card),
+                       "Failed to request memory region\n");
+               goto failed_request_regions;
+       }
+
+       if (pci_resource_len(dev, 0) == 0) {
+               dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
+               st = -ENOMEM;
+               goto failed_iomap;
+       }
+
+       card->regmap = pci_iomap(dev, 0, 0);
+       if (!card->regmap) {
+               dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
+               st = -ENOMEM;
+               goto failed_iomap;
+       }
+
+       spin_lock_init(&card->irq_lock);
+       card->halt = 0;
+
+       spin_lock_irq(&card->irq_lock);
+       rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+       spin_unlock_irq(&card->irq_lock);
+
+       if (!force_legacy) {
+               st = pci_enable_msi(dev);
+               if (st)
+                       dev_warn(CARD_TO_DEV(card),
+                               "Failed to enable MSI\n");
+       }
+
+       st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
+                        DRIVER_NAME, card);
+       if (st) {
+               dev_err(CARD_TO_DEV(card),
+                       "Failed requesting IRQ%d\n", dev->irq);
+               goto failed_irq;
+       }
+
+       /************* Setup Processor Command Interface *************/
+       rsxx_creg_setup(card);
+
+       spin_lock_irq(&card->irq_lock);
+       rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
+       spin_unlock_irq(&card->irq_lock);
+
+       st = rsxx_compatibility_check(card);
+       if (st) {
+               dev_warn(CARD_TO_DEV(card),
+                       "Incompatible driver detected. Please update the driver.\n");
+               st = -EINVAL;
+               goto failed_compatiblity_check;
+       }
+
+       /************* Load Card Config *************/
+       st = rsxx_load_config(card);
+       if (st)
+               dev_err(CARD_TO_DEV(card),
+                       "Failed loading card config\n");
+
+       /************* Setup DMA Engine *************/
+       st = rsxx_get_num_targets(card, &card->n_targets);
+       if (st)
+               dev_info(CARD_TO_DEV(card),
+                       "Failed reading the number of DMA targets\n");
+
+       card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
+       if (!card->ctrl) {
+               st = -ENOMEM;
+               goto failed_dma_setup;
+       }
+
+       st = rsxx_dma_setup(card);
+       if (st) {
+               dev_info(CARD_TO_DEV(card),
+                       "Failed to setup DMA engine\n");
+               goto failed_dma_setup;
+       }
+
+       /************* Setup Card Event Handler *************/
+       INIT_WORK(&card->event_work, card_event_handler);
+
+       st = rsxx_setup_dev(card);
+       if (st)
+               goto failed_create_dev;
+
+       rsxx_get_card_state(card, &card->state);
+
+       dev_info(CARD_TO_DEV(card),
+               "card state: %s\n",
+               rsxx_card_state_to_str(card->state));
+
+       /*
+        * Now that the DMA Engine and devices have been setup,
+        * we can enable the event interrupt(it kicks off actions in
+        * those layers so we couldn't enable it right away.)
+        */
+       spin_lock_irq(&card->irq_lock);
+       rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
+       spin_unlock_irq(&card->irq_lock);
+
+       if (card->state == CARD_STATE_SHUTDOWN) {
+               st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
+               if (st)
+                       dev_crit(CARD_TO_DEV(card),
+                               "Failed issuing card startup\n");
+       } else if (card->state == CARD_STATE_GOOD ||
+                  card->state == CARD_STATE_RD_ONLY_FAULT) {
+               st = rsxx_get_card_size8(card, &card->size8);
+               if (st)
+                       card->size8 = 0;
+       }
+
+       rsxx_attach_dev(card);
+
+       return 0;
+
+failed_create_dev:
+       rsxx_dma_destroy(card);
+failed_dma_setup:
+failed_compatiblity_check:
+       spin_lock_irq(&card->irq_lock);
+       rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+       spin_unlock_irq(&card->irq_lock);
+       free_irq(dev->irq, card);
+       if (!force_legacy)
+               pci_disable_msi(dev);
+failed_irq:
+       pci_iounmap(dev, card->regmap);
+failed_iomap:
+       pci_release_regions(dev);
+failed_request_regions:
+failed_dma_mask:
+       pci_disable_device(dev);
+failed_enable:
+       spin_lock(&rsxx_ida_lock);
+       ida_remove(&rsxx_disk_ida, card->disk_id);
+       spin_unlock(&rsxx_ida_lock);
+failed_ida_get:
+       kfree(card);
+
+       return st;
+}
+
+static void rsxx_pci_remove(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       unsigned long flags;
+       int st;
+       int i;
+
+       if (!card)
+               return;
+
+       dev_info(CARD_TO_DEV(card),
+               "Removing PCI-Flash SSD.\n");
+
+       rsxx_detach_dev(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock_irqsave(&card->irq_lock, flags);
+               rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+               spin_unlock_irqrestore(&card->irq_lock, flags);
+       }
+
+       st = card_shutdown(card);
+       if (st)
+               dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
+
+       /* Sync outstanding event handlers. */
+       spin_lock_irqsave(&card->irq_lock, flags);
+       rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       /* Prevent work_structs from re-queuing themselves. */
+       card->halt = 1;
+
+       cancel_work_sync(&card->event_work);
+
+       rsxx_destroy_dev(card);
+       rsxx_dma_destroy(card);
+
+       spin_lock_irqsave(&card->irq_lock, flags);
+       rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+       free_irq(dev->irq, card);
+
+       if (!force_legacy)
+               pci_disable_msi(dev);
+
+       rsxx_creg_destroy(card);
+
+       pci_iounmap(dev, card->regmap);
+
+       pci_disable_device(dev);
+       pci_release_regions(dev);
+
+       kfree(card);
+}
+
+static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+       /* We don't support suspend at this time. */
+       return -ENOSYS;
+}
+
+static void rsxx_pci_shutdown(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       unsigned long flags;
+       int i;
+
+       if (!card)
+               return;
+
+       dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
+
+       rsxx_detach_dev(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock_irqsave(&card->irq_lock, flags);
+               rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
+               spin_unlock_irqrestore(&card->irq_lock, flags);
+       }
+
+       card_shutdown(card);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
+       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
+       {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
+
+static struct pci_driver rsxx_pci_driver = {
+       .name           = DRIVER_NAME,
+       .id_table       = rsxx_pci_ids,
+       .probe          = rsxx_pci_probe,
+       .remove         = rsxx_pci_remove,
+       .suspend        = rsxx_pci_suspend,
+       .shutdown       = rsxx_pci_shutdown,
+};
+
+static int __init rsxx_core_init(void)
+{
+       int st;
+
+       st = rsxx_dev_init();
+       if (st)
+               return st;
+
+       st = rsxx_dma_init();
+       if (st)
+               goto dma_init_failed;
+
+       st = rsxx_creg_init();
+       if (st)
+               goto creg_init_failed;
+
+       return pci_register_driver(&rsxx_pci_driver);
+
+creg_init_failed:
+       rsxx_dma_cleanup();
+dma_init_failed:
+       rsxx_dev_cleanup();
+
+       return st;
+}
+
+static void __exit rsxx_core_cleanup(void)
+{
+       pci_unregister_driver(&rsxx_pci_driver);
+       rsxx_creg_cleanup();
+       rsxx_dma_cleanup();
+       rsxx_dev_cleanup();
+}
+
+module_init(rsxx_core_init);
+module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
new file mode 100644 (file)
index 0000000..80bbe63
--- /dev/null
@@ -0,0 +1,758 @@
+/*
+* Filename: cregs.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/completion.h>
+#include <linux/slab.h>
+
+#include "rsxx_priv.h"
+
+#define CREG_TIMEOUT_MSEC      10000
+
+typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
+                           struct creg_cmd *cmd,
+                           int st);
+
+struct creg_cmd {
+       struct list_head list;
+       creg_cmd_cb cb;
+       void *cb_private;
+       unsigned int op;
+       unsigned int addr;
+       int cnt8;
+       void *buf;
+       unsigned int stream;
+       unsigned int status;
+};
+
+static struct kmem_cache *creg_cmd_pool;
+
+
+/*------------ Private Functions --------------*/
+
+#if defined(__LITTLE_ENDIAN)
+#define LITTLE_ENDIAN 1
+#elif defined(__BIG_ENDIAN)
+#define LITTLE_ENDIAN 0
+#else
+#error Unknown endianess!!! Aborting...
+#endif
+
+static void copy_to_creg_data(struct rsxx_cardinfo *card,
+                             int cnt8,
+                             void *buf,
+                             unsigned int stream)
+{
+       int i = 0;
+       u32 *data = buf;
+
+       for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+               /*
+                * Firmware implementation makes it necessary to byte swap on
+                * little endian processors.
+                */
+               if (LITTLE_ENDIAN && stream)
+                       iowrite32be(data[i], card->regmap + CREG_DATA(i));
+               else
+                       iowrite32(data[i], card->regmap + CREG_DATA(i));
+       }
+}
+
+
+static void copy_from_creg_data(struct rsxx_cardinfo *card,
+                               int cnt8,
+                               void *buf,
+                               unsigned int stream)
+{
+       int i = 0;
+       u32 *data = buf;
+
+       for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
+               /*
+                * Firmware implementation makes it necessary to byte swap on
+                * little endian processors.
+                */
+               if (LITTLE_ENDIAN && stream)
+                       data[i] = ioread32be(card->regmap + CREG_DATA(i));
+               else
+                       data[i] = ioread32(card->regmap + CREG_DATA(i));
+       }
+}
+
+static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
+{
+       struct creg_cmd *cmd;
+
+       /*
+        * Spin lock is needed because this can be called in atomic/interrupt
+        * context.
+        */
+       spin_lock_bh(&card->creg_ctrl.lock);
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       spin_unlock_bh(&card->creg_ctrl.lock);
+
+       return cmd;
+}
+
+static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
+{
+       iowrite32(cmd->addr, card->regmap + CREG_ADD);
+       iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
+
+       if (cmd->op == CREG_OP_WRITE) {
+               if (cmd->buf)
+                       copy_to_creg_data(card, cmd->cnt8,
+                                         cmd->buf, cmd->stream);
+       }
+
+       /*
+        * Data copy must complete before initiating the command. This is
+        * needed for weakly ordered processors (i.e. PowerPC), so that all
+        * neccessary registers are written before we kick the hardware.
+        */
+       wmb();
+
+       /* Setting the valid bit will kick off the command. */
+       iowrite32(cmd->op, card->regmap + CREG_CMD);
+}
+
+static void creg_kick_queue(struct rsxx_cardinfo *card)
+{
+       if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
+               return;
+
+       card->creg_ctrl.active = 1;
+       card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
+                                                     struct creg_cmd, list);
+       list_del(&card->creg_ctrl.active_cmd->list);
+       card->creg_ctrl.q_depth--;
+
+       /*
+        * We have to set the timer before we push the new command. Otherwise,
+        * we could create a race condition that would occur if the timer
+        * was not canceled, and expired after the new command was pushed,
+        * but before the command was issued to hardware.
+        */
+       mod_timer(&card->creg_ctrl.cmd_timer,
+                               jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
+
+       creg_issue_cmd(card, card->creg_ctrl.active_cmd);
+}
+
+static int creg_queue_cmd(struct rsxx_cardinfo *card,
+                         unsigned int op,
+                         unsigned int addr,
+                         unsigned int cnt8,
+                         void *buf,
+                         int stream,
+                         creg_cmd_cb callback,
+                         void *cb_private)
+{
+       struct creg_cmd *cmd;
+
+       /* Don't queue stuff up if we're halted. */
+       if (unlikely(card->halt))
+               return -EINVAL;
+
+       if (card->creg_ctrl.reset)
+               return -EAGAIN;
+
+       if (cnt8 > MAX_CREG_DATA8)
+               return -EINVAL;
+
+       cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&cmd->list);
+
+       cmd->op         = op;
+       cmd->addr       = addr;
+       cmd->cnt8       = cnt8;
+       cmd->buf        = buf;
+       cmd->stream     = stream;
+       cmd->cb         = callback;
+       cmd->cb_private = cb_private;
+       cmd->status     = 0;
+
+       spin_lock(&card->creg_ctrl.lock);
+       list_add_tail(&cmd->list, &card->creg_ctrl.queue);
+       card->creg_ctrl.q_depth++;
+       creg_kick_queue(card);
+       spin_unlock(&card->creg_ctrl.lock);
+
+       return 0;
+}
+
+static void creg_cmd_timed_out(unsigned long data)
+{
+       struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+       struct creg_cmd *cmd;
+
+       cmd = pop_active_cmd(card);
+       if (cmd == NULL) {
+               card->creg_ctrl.creg_stats.creg_timeout++;
+               dev_warn(CARD_TO_DEV(card),
+                       "No active command associated with timeout!\n");
+               return;
+       }
+
+       if (cmd->cb)
+               cmd->cb(card, cmd, -ETIMEDOUT);
+
+       kmem_cache_free(creg_cmd_pool, cmd);
+
+
+       spin_lock(&card->creg_ctrl.lock);
+       card->creg_ctrl.active = 0;
+       creg_kick_queue(card);
+       spin_unlock(&card->creg_ctrl.lock);
+}
+
+
+static void creg_cmd_done(struct work_struct *work)
+{
+       struct rsxx_cardinfo *card;
+       struct creg_cmd *cmd;
+       int st = 0;
+
+       card = container_of(work, struct rsxx_cardinfo,
+                           creg_ctrl.done_work);
+
+       /*
+        * The timer could not be cancelled for some reason,
+        * race to pop the active command.
+        */
+       if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
+               card->creg_ctrl.creg_stats.failed_cancel_timer++;
+
+       cmd = pop_active_cmd(card);
+       if (cmd == NULL) {
+               dev_err(CARD_TO_DEV(card),
+                       "Spurious creg interrupt!\n");
+               return;
+       }
+
+       card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
+       cmd->status = card->creg_ctrl.creg_stats.stat;
+       if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
+               dev_err(CARD_TO_DEV(card),
+                       "Invalid status on creg command\n");
+               /*
+                * At this point we're probably reading garbage from HW. Don't
+                * do anything else that could mess up the system and let
+                * the sync function return an error.
+                */
+               st = -EIO;
+               goto creg_done;
+       } else if (cmd->status & CREG_STAT_ERROR) {
+               st = -EIO;
+       }
+
+       if ((cmd->op == CREG_OP_READ)) {
+               unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
+
+               /* Paranoid Sanity Checks */
+               if (!cmd->buf) {
+                       dev_err(CARD_TO_DEV(card),
+                               "Buffer not given for read.\n");
+                       st = -EIO;
+                       goto creg_done;
+               }
+               if (cnt8 != cmd->cnt8) {
+                       dev_err(CARD_TO_DEV(card),
+                               "count mismatch\n");
+                       st = -EIO;
+                       goto creg_done;
+               }
+
+               copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
+       }
+
+creg_done:
+       if (cmd->cb)
+               cmd->cb(card, cmd, st);
+
+       kmem_cache_free(creg_cmd_pool, cmd);
+
+       spin_lock(&card->creg_ctrl.lock);
+       card->creg_ctrl.active = 0;
+       creg_kick_queue(card);
+       spin_unlock(&card->creg_ctrl.lock);
+}
+
+static void creg_reset(struct rsxx_cardinfo *card)
+{
+       struct creg_cmd *cmd = NULL;
+       struct creg_cmd *tmp;
+       unsigned long flags;
+
+       /*
+        * mutex_trylock is used here because if reset_lock is taken then a
+        * reset is already happening. So, we can just go ahead and return.
+        */
+       if (!mutex_trylock(&card->creg_ctrl.reset_lock))
+               return;
+
+       card->creg_ctrl.reset = 1;
+       spin_lock_irqsave(&card->irq_lock, flags);
+       rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       dev_warn(CARD_TO_DEV(card),
+               "Resetting creg interface for recovery\n");
+
+       /* Cancel outstanding commands */
+       spin_lock(&card->creg_ctrl.lock);
+       list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+               list_del(&cmd->list);
+               card->creg_ctrl.q_depth--;
+               if (cmd->cb)
+                       cmd->cb(card, cmd, -ECANCELED);
+               kmem_cache_free(creg_cmd_pool, cmd);
+       }
+
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       if (cmd) {
+               if (timer_pending(&card->creg_ctrl.cmd_timer))
+                       del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+               if (cmd->cb)
+                       cmd->cb(card, cmd, -ECANCELED);
+               kmem_cache_free(creg_cmd_pool, cmd);
+
+               card->creg_ctrl.active = 0;
+       }
+       spin_unlock(&card->creg_ctrl.lock);
+
+       card->creg_ctrl.reset = 0;
+       spin_lock_irqsave(&card->irq_lock, flags);
+       rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       mutex_unlock(&card->creg_ctrl.reset_lock);
+}
+
+/* Used for synchronous accesses */
+struct creg_completion {
+       struct completion       *cmd_done;
+       int                     st;
+       u32                     creg_status;
+};
+
+static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
+                            struct creg_cmd *cmd,
+                            int st)
+{
+       struct creg_completion *cmd_completion;
+
+       cmd_completion = cmd->cb_private;
+       BUG_ON(!cmd_completion);
+
+       cmd_completion->st = st;
+       cmd_completion->creg_status = cmd->status;
+       complete(cmd_completion->cmd_done);
+}
+
+static int __issue_creg_rw(struct rsxx_cardinfo *card,
+                          unsigned int op,
+                          unsigned int addr,
+                          unsigned int cnt8,
+                          void *buf,
+                          int stream,
+                          unsigned int *hw_stat)
+{
+       DECLARE_COMPLETION_ONSTACK(cmd_done);
+       struct creg_completion completion;
+       unsigned long timeout;
+       int st;
+
+       completion.cmd_done = &cmd_done;
+       completion.st = 0;
+       completion.creg_status = 0;
+
+       st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
+                           &completion);
+       if (st)
+               return st;
+
+       /*
+        * This timeout is neccessary for unresponsive hardware. The additional
+        * 20 seconds to used to guarantee that each cregs requests has time to
+        * complete.
+        */
+       timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
+                               card->creg_ctrl.q_depth) + 20000);
+
+       /*
+        * The creg interface is guaranteed to complete. It has a timeout
+        * mechanism that will kick in if hardware does not respond.
+        */
+       st = wait_for_completion_timeout(completion.cmd_done, timeout);
+       if (st == 0) {
+               /*
+                * This is really bad, because the kernel timer did not
+                * expire and notify us of a timeout!
+                */
+               dev_crit(CARD_TO_DEV(card),
+                       "cregs timer failed\n");
+               creg_reset(card);
+               return -EIO;
+       }
+
+       *hw_stat = completion.creg_status;
+
+       if (completion.st) {
+               dev_warn(CARD_TO_DEV(card),
+                       "creg command failed(%d x%08x)\n",
+                       completion.st, addr);
+               return completion.st;
+       }
+
+       return 0;
+}
+
+static int issue_creg_rw(struct rsxx_cardinfo *card,
+                        u32 addr,
+                        unsigned int size8,
+                        void *data,
+                        int stream,
+                        int read)
+{
+       unsigned int hw_stat;
+       unsigned int xfer;
+       unsigned int op;
+       int st;
+
+       op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+       do {
+               xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
+
+               st = __issue_creg_rw(card, op, addr, xfer,
+                                    data, stream, &hw_stat);
+               if (st)
+                       return st;
+
+               data   = (char *)data + xfer;
+               addr  += xfer;
+               size8 -= xfer;
+       } while (size8);
+
+       return 0;
+}
+
+/* ---------------------------- Public API ---------------------------------- */
+int rsxx_creg_write(struct rsxx_cardinfo *card,
+                       u32 addr,
+                       unsigned int size8,
+                       void *data,
+                       int byte_stream)
+{
+       return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
+}
+
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+                      u32 addr,
+                      unsigned int size8,
+                      void *data,
+                      int byte_stream)
+{
+       return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
+}
+
+int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
+{
+       return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
+                                 sizeof(*state), state, 0);
+}
+
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
+{
+       unsigned int size;
+       int st;
+
+       st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
+                               sizeof(size), &size, 0);
+       if (st)
+               return st;
+
+       *size8 = (u64)size * RSXX_HW_BLK_SIZE;
+       return 0;
+}
+
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+                            unsigned int *n_targets)
+{
+       return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
+                                 sizeof(*n_targets), n_targets, 0);
+}
+
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+                                  u32 *capabilities)
+{
+       return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
+                                 sizeof(*capabilities), capabilities, 0);
+}
+
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
+{
+       return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
+                                  sizeof(cmd), &cmd, 0);
+}
+
+
+/*----------------- HW Log Functions -------------------*/
+static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
+{
+       static char level;
+
+       /*
+        * New messages start with "<#>", where # is the log level. Messages
+        * that extend past the log buffer will use the previous level
+        */
+       if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
+               level = str[1];
+               str += 3; /* Skip past the log level. */
+               len -= 3;
+       }
+
+       switch (level) {
+       case '0':
+               dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '1':
+               dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '2':
+               dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '3':
+               dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '4':
+               dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '5':
+               dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '6':
+               dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       case '7':
+               dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       default:
+               dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
+               break;
+       }
+}
+
+/*
+ * The substrncpy function copies the src string (which includes the
+ * terminating '\0' character), up to the count into the dest pointer.
+ * Returns the number of bytes copied to dest.
+ */
+static int substrncpy(char *dest, const char *src, int count)
+{
+       int max_cnt = count;
+
+       while (count) {
+               count--;
+               *dest = *src;
+               if (*dest == '\0')
+                       break;
+               src++;
+               dest++;
+       }
+       return max_cnt - count;
+}
+
+
+static void read_hw_log_done(struct rsxx_cardinfo *card,
+                            struct creg_cmd *cmd,
+                            int st)
+{
+       char *buf;
+       char *log_str;
+       int cnt;
+       int len;
+       int off;
+
+       buf = cmd->buf;
+       off = 0;
+
+       /* Failed getting the log message */
+       if (st)
+               return;
+
+       while (off < cmd->cnt8) {
+               log_str = &card->log.buf[card->log.buf_len];
+               cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
+               len = substrncpy(log_str, &buf[off], cnt);
+
+               off += len;
+               card->log.buf_len += len;
+
+               /*
+                * Flush the log if we've hit the end of a message or if we've
+                * run out of buffer space.
+                */
+               if ((log_str[len - 1] == '\0')  ||
+                   (card->log.buf_len == LOG_BUF_SIZE8)) {
+                       if (card->log.buf_len != 1) /* Don't log blank lines. */
+                               hw_log_msg(card, card->log.buf,
+                                          card->log.buf_len);
+                       card->log.buf_len = 0;
+               }
+
+       }
+
+       if (cmd->status & CREG_STAT_LOG_PENDING)
+               rsxx_read_hw_log(card);
+}
+
+int rsxx_read_hw_log(struct rsxx_cardinfo *card)
+{
+       int st;
+
+       st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
+                           sizeof(card->log.tmp), card->log.tmp,
+                           1, read_hw_log_done, NULL);
+       if (st)
+               dev_err(CARD_TO_DEV(card),
+                       "Failed getting log text\n");
+
+       return st;
+}
+
+/*-------------- IOCTL REG Access ------------------*/
+static int issue_reg_cmd(struct rsxx_cardinfo *card,
+                        struct rsxx_reg_access *cmd,
+                        int read)
+{
+       unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
+
+       return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
+                              cmd->stream, &cmd->stat);
+}
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+                       struct rsxx_reg_access __user *ucmd,
+                       int read)
+{
+       struct rsxx_reg_access cmd;
+       int st;
+
+       st = copy_from_user(&cmd, ucmd, sizeof(cmd));
+       if (st)
+               return -EFAULT;
+
+       if (cmd.cnt > RSXX_MAX_REG_CNT)
+               return -EFAULT;
+
+       st = issue_reg_cmd(card, &cmd, read);
+       if (st)
+               return st;
+
+       st = put_user(cmd.stat, &ucmd->stat);
+       if (st)
+               return -EFAULT;
+
+       if (read) {
+               st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
+               if (st)
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
+/*------------ Initialization & Setup --------------*/
+int rsxx_creg_setup(struct rsxx_cardinfo *card)
+{
+       card->creg_ctrl.active_cmd = NULL;
+
+       INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
+       mutex_init(&card->creg_ctrl.reset_lock);
+       INIT_LIST_HEAD(&card->creg_ctrl.queue);
+       spin_lock_init(&card->creg_ctrl.lock);
+       setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
+                   (unsigned long) card);
+
+       return 0;
+}
+
+void rsxx_creg_destroy(struct rsxx_cardinfo *card)
+{
+       struct creg_cmd *cmd;
+       struct creg_cmd *tmp;
+       int cnt = 0;
+
+       /* Cancel outstanding commands */
+       spin_lock(&card->creg_ctrl.lock);
+       list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
+               list_del(&cmd->list);
+               if (cmd->cb)
+                       cmd->cb(card, cmd, -ECANCELED);
+               kmem_cache_free(creg_cmd_pool, cmd);
+               cnt++;
+       }
+
+       if (cnt)
+               dev_info(CARD_TO_DEV(card),
+                       "Canceled %d queue creg commands\n", cnt);
+
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       if (cmd) {
+               if (timer_pending(&card->creg_ctrl.cmd_timer))
+                       del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+               if (cmd->cb)
+                       cmd->cb(card, cmd, -ECANCELED);
+               dev_info(CARD_TO_DEV(card),
+                       "Canceled active creg command\n");
+               kmem_cache_free(creg_cmd_pool, cmd);
+       }
+       spin_unlock(&card->creg_ctrl.lock);
+
+       cancel_work_sync(&card->creg_ctrl.done_work);
+}
+
+
+int rsxx_creg_init(void)
+{
+       creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
+       if (!creg_cmd_pool)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void rsxx_creg_cleanup(void)
+{
+       kmem_cache_destroy(creg_cmd_pool);
+}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
new file mode 100644 (file)
index 0000000..4346d17
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+* Filename: dev.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include <linux/fs.h>
+
+#include "rsxx_priv.h"
+
+static unsigned int blkdev_minors = 64;
+module_param(blkdev_minors, uint, 0444);
+MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
+
+/*
+ * For now I'm making this tweakable in case any applications hit this limit.
+ * If you see a "bio too big" error in the log you will need to raise this
+ * value.
+ */
+static unsigned int blkdev_max_hw_sectors = 1024;
+module_param(blkdev_max_hw_sectors, uint, 0444);
+MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
+
+static unsigned int enable_blkdev = 1;
+module_param(enable_blkdev , uint, 0444);
+MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
+
+
+struct rsxx_bio_meta {
+       struct bio      *bio;
+       atomic_t        pending_dmas;
+       atomic_t        error;
+       unsigned long   start_time;
+};
+
+static struct kmem_cache *bio_meta_pool;
+
+/*----------------- Block Device Operations -----------------*/
+static int rsxx_blkdev_ioctl(struct block_device *bdev,
+                                fmode_t mode,
+                                unsigned int cmd,
+                                unsigned long arg)
+{
+       struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+
+       switch (cmd) {
+       case RSXX_GETREG:
+               return rsxx_reg_access(card, (void __user *)arg, 1);
+       case RSXX_SETREG:
+               return rsxx_reg_access(card, (void __user *)arg, 0);
+       }
+
+       return -ENOTTY;
+}
+
+static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+       struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
+       u64 blocks = card->size8 >> 9;
+
+       /*
+        * get geometry: Fake it. I haven't found any drivers that set
+        * geo->start, so we won't either.
+        */
+       if (card->size8) {
+               geo->heads = 64;
+               geo->sectors = 16;
+               do_div(blocks, (geo->heads * geo->sectors));
+               geo->cylinders = blocks;
+       } else {
+               geo->heads = 0;
+               geo->sectors = 0;
+               geo->cylinders = 0;
+       }
+       return 0;
+}
+
+static const struct block_device_operations rsxx_fops = {
+       .owner          = THIS_MODULE,
+       .getgeo         = rsxx_getgeo,
+       .ioctl          = rsxx_blkdev_ioctl,
+};
+
+static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
+{
+       struct hd_struct *part0 = &card->gendisk->part0;
+       int rw = bio_data_dir(bio);
+       int cpu;
+
+       cpu = part_stat_lock();
+
+       part_round_stats(cpu, part0);
+       part_inc_in_flight(part0, rw);
+
+       part_stat_unlock();
+}
+
+static void disk_stats_complete(struct rsxx_cardinfo *card,
+                               struct bio *bio,
+                               unsigned long start_time)
+{
+       struct hd_struct *part0 = &card->gendisk->part0;
+       unsigned long duration = jiffies - start_time;
+       int rw = bio_data_dir(bio);
+       int cpu;
+
+       cpu = part_stat_lock();
+
+       part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
+       part_stat_inc(cpu, part0, ios[rw]);
+       part_stat_add(cpu, part0, ticks[rw], duration);
+
+       part_round_stats(cpu, part0);
+       part_dec_in_flight(part0, rw);
+
+       part_stat_unlock();
+}
+
+static void bio_dma_done_cb(struct rsxx_cardinfo *card,
+                           void *cb_data,
+                           unsigned int error)
+{
+       struct rsxx_bio_meta *meta = cb_data;
+
+       if (error)
+               atomic_set(&meta->error, 1);
+
+       if (atomic_dec_and_test(&meta->pending_dmas)) {
+               disk_stats_complete(card, meta->bio, meta->start_time);
+
+               bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
+               kmem_cache_free(bio_meta_pool, meta);
+       }
+}
+
+static void rsxx_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct rsxx_cardinfo *card = q->queuedata;
+       struct rsxx_bio_meta *bio_meta;
+       int st = -EINVAL;
+
+       might_sleep();
+
+       if (unlikely(card->halt)) {
+               st = -EFAULT;
+               goto req_err;
+       }
+
+       if (unlikely(card->dma_fault)) {
+               st = (-EFAULT);
+               goto req_err;
+       }
+
+       if (bio->bi_size == 0) {
+               dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
+               goto req_err;
+       }
+
+       bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
+       if (!bio_meta) {
+               st = -ENOMEM;
+               goto req_err;
+       }
+
+       bio_meta->bio = bio;
+       atomic_set(&bio_meta->error, 0);
+       atomic_set(&bio_meta->pending_dmas, 0);
+       bio_meta->start_time = jiffies;
+
+       disk_stats_start(card, bio);
+
+       dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
+                bio_data_dir(bio) ? 'W' : 'R', bio_meta,
+                (u64)bio->bi_sector << 9, bio->bi_size);
+
+       st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
+                                   bio_dma_done_cb, bio_meta);
+       if (st)
+               goto queue_err;
+
+       return;
+
+queue_err:
+       kmem_cache_free(bio_meta_pool, bio_meta);
+req_err:
+       bio_endio(bio, st);
+}
+
+/*----------------- Device Setup -------------------*/
+static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
+{
+       unsigned char pci_rev;
+
+       pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
+
+       return (pci_rev >= RSXX_DISCARD_SUPPORT);
+}
+
+static unsigned short rsxx_get_logical_block_size(
+                                       struct rsxx_cardinfo *card)
+{
+       u32 capabilities = 0;
+       int st;
+
+       st = rsxx_get_card_capabilities(card, &capabilities);
+       if (st)
+               dev_warn(CARD_TO_DEV(card),
+                       "Failed reading card capabilities register\n");
+
+       /* Earlier firmware did not have support for 512 byte accesses */
+       if (capabilities & CARD_CAP_SUBPAGE_WRITES)
+               return 512;
+       else
+               return RSXX_HW_BLK_SIZE;
+}
+
+int rsxx_attach_dev(struct rsxx_cardinfo *card)
+{
+       mutex_lock(&card->dev_lock);
+
+       /* The block device requires the stripe size from the config. */
+       if (enable_blkdev) {
+               if (card->config_valid)
+                       set_capacity(card->gendisk, card->size8 >> 9);
+               else
+                       set_capacity(card->gendisk, 0);
+               add_disk(card->gendisk);
+
+               card->bdev_attached = 1;
+       }
+
+       mutex_unlock(&card->dev_lock);
+
+       return 0;
+}
+
+void rsxx_detach_dev(struct rsxx_cardinfo *card)
+{
+       mutex_lock(&card->dev_lock);
+
+       if (card->bdev_attached) {
+               del_gendisk(card->gendisk);
+               card->bdev_attached = 0;
+       }
+
+       mutex_unlock(&card->dev_lock);
+}
+
+int rsxx_setup_dev(struct rsxx_cardinfo *card)
+{
+       unsigned short blk_size;
+
+       mutex_init(&card->dev_lock);
+
+       if (!enable_blkdev)
+               return 0;
+
+       card->major = register_blkdev(0, DRIVER_NAME);
+       if (card->major < 0) {
+               dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
+               return -ENOMEM;
+       }
+
+       card->queue = blk_alloc_queue(GFP_KERNEL);
+       if (!card->queue) {
+               dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
+               unregister_blkdev(card->major, DRIVER_NAME);
+               return -ENOMEM;
+       }
+
+       card->gendisk = alloc_disk(blkdev_minors);
+       if (!card->gendisk) {
+               dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
+               blk_cleanup_queue(card->queue);
+               unregister_blkdev(card->major, DRIVER_NAME);
+               return -ENOMEM;
+       }
+
+       blk_size = rsxx_get_logical_block_size(card);
+
+       blk_queue_make_request(card->queue, rsxx_make_request);
+       blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
+       blk_queue_dma_alignment(card->queue, blk_size - 1);
+       blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
+       blk_queue_logical_block_size(card->queue, blk_size);
+       blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
+
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
+       if (rsxx_discard_supported(card)) {
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
+               blk_queue_max_discard_sectors(card->queue,
+                                               RSXX_HW_BLK_SIZE >> 9);
+               card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
+               card->queue->limits.discard_alignment   = RSXX_HW_BLK_SIZE;
+               card->queue->limits.discard_zeroes_data = 1;
+       }
+
+       card->queue->queuedata = card;
+
+       snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
+                "rsxx%d", card->disk_id);
+       card->gendisk->driverfs_dev = &card->dev->dev;
+       card->gendisk->major = card->major;
+       card->gendisk->first_minor = 0;
+       card->gendisk->fops = &rsxx_fops;
+       card->gendisk->private_data = card;
+       card->gendisk->queue = card->queue;
+
+       return 0;
+}
+
+void rsxx_destroy_dev(struct rsxx_cardinfo *card)
+{
+       if (!enable_blkdev)
+               return;
+
+       put_disk(card->gendisk);
+       card->gendisk = NULL;
+
+       blk_cleanup_queue(card->queue);
+       unregister_blkdev(card->major, DRIVER_NAME);
+}
+
+int rsxx_dev_init(void)
+{
+       bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
+       if (!bio_meta_pool)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void rsxx_dev_cleanup(void)
+{
+       kmem_cache_destroy(bio_meta_pool);
+}
+
+
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
new file mode 100644 (file)
index 0000000..63176e6
--- /dev/null
@@ -0,0 +1,998 @@
+/*
+* Filename: dma.c
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/slab.h>
+#include "rsxx_priv.h"
+
+struct rsxx_dma {
+       struct list_head         list;
+       u8                       cmd;
+       unsigned int             laddr;     /* Logical address on the ramsan */
+       struct {
+               u32              off;
+               u32              cnt;
+       } sub_page;
+       dma_addr_t               dma_addr;
+       struct page              *page;
+       unsigned int             pg_off;    /* Page Offset */
+       rsxx_dma_cb              cb;
+       void                     *cb_data;
+};
+
+/* This timeout is used to detect a stalled DMA channel */
+#define DMA_ACTIVITY_TIMEOUT   msecs_to_jiffies(10000)
+
+struct hw_status {
+       u8      status;
+       u8      tag;
+       __le16  count;
+       __le32  _rsvd2;
+       __le64  _rsvd3;
+} __packed;
+
+enum rsxx_dma_status {
+       DMA_SW_ERR    = 0x1,
+       DMA_HW_FAULT  = 0x2,
+       DMA_CANCELLED = 0x4,
+};
+
+struct hw_cmd {
+       u8      command;
+       u8      tag;
+       u8      _rsvd;
+       u8      sub_page; /* Bit[0:2]: 512byte offset */
+                         /* Bit[4:6]: 512byte count */
+       __le32  device_addr;
+       __le64  host_addr;
+} __packed;
+
+enum rsxx_hw_cmd {
+       HW_CMD_BLK_DISCARD      = 0x70,
+       HW_CMD_BLK_WRITE        = 0x80,
+       HW_CMD_BLK_READ         = 0xC0,
+       HW_CMD_BLK_RECON_READ   = 0xE0,
+};
+
+enum rsxx_hw_status {
+       HW_STATUS_CRC           = 0x01,
+       HW_STATUS_HARD_ERR      = 0x02,
+       HW_STATUS_SOFT_ERR      = 0x04,
+       HW_STATUS_FAULT         = 0x08,
+};
+
+#define STATUS_BUFFER_SIZE8     4096
+#define COMMAND_BUFFER_SIZE8    4096
+
+static struct kmem_cache *rsxx_dma_pool;
+
+struct dma_tracker {
+       int                     next_tag;
+       struct rsxx_dma *dma;
+};
+
+#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
+               (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
+
+struct dma_tracker_list {
+       spinlock_t              lock;
+       int                     head;
+       struct dma_tracker      list[0];
+};
+
+
+/*----------------- Misc Utility Functions -------------------*/
+static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
+{
+       unsigned long long tgt_addr8;
+
+       tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
+                     card->_stripe.upper_mask) |
+                   ((addr8) & card->_stripe.lower_mask);
+       do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
+       return tgt_addr8;
+}
+
+static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
+{
+       unsigned int tgt;
+
+       tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
+
+       return tgt;
+}
+
+static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
+{
+       /* Reset all DMA Command/Status Queues */
+       iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
+}
+
+static unsigned int get_dma_size(struct rsxx_dma *dma)
+{
+       if (dma->sub_page.cnt)
+               return dma->sub_page.cnt << 9;
+       else
+               return RSXX_HW_BLK_SIZE;
+}
+
+
+/*----------------- DMA Tracker -------------------*/
+static void set_tracker_dma(struct dma_tracker_list *trackers,
+                           int tag,
+                           struct rsxx_dma *dma)
+{
+       trackers->list[tag].dma = dma;
+}
+
+static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
+                                           int tag)
+{
+       return trackers->list[tag].dma;
+}
+
+static int pop_tracker(struct dma_tracker_list *trackers)
+{
+       int tag;
+
+       spin_lock(&trackers->lock);
+       tag = trackers->head;
+       if (tag != -1) {
+               trackers->head = trackers->list[tag].next_tag;
+               trackers->list[tag].next_tag = -1;
+       }
+       spin_unlock(&trackers->lock);
+
+       return tag;
+}
+
+static void push_tracker(struct dma_tracker_list *trackers, int tag)
+{
+       spin_lock(&trackers->lock);
+       trackers->list[tag].next_tag = trackers->head;
+       trackers->head = tag;
+       trackers->list[tag].dma = NULL;
+       spin_unlock(&trackers->lock);
+}
+
+
+/*----------------- Interrupt Coalescing -------------*/
+/*
+ * Interrupt Coalescing Register Format:
+ * Interrupt Timer (64ns units) [15:0]
+ * Interrupt Count [24:16]
+ * Reserved [31:25]
+*/
+#define INTR_COAL_LATENCY_MASK       (0x0000ffff)
+
+#define INTR_COAL_COUNT_SHIFT        16
+#define INTR_COAL_COUNT_BITS         9
+#define INTR_COAL_COUNT_MASK         (((1 << INTR_COAL_COUNT_BITS) - 1) << \
+                                       INTR_COAL_COUNT_SHIFT)
+#define INTR_COAL_LATENCY_UNITS_NS   64
+
+
+static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
+{
+       u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
+
+       if (mode == RSXX_INTR_COAL_DISABLED)
+               return 0;
+
+       return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
+                       (latency_units & INTR_COAL_LATENCY_MASK);
+
+}
+
+static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
+{
+       int i;
+       u32 q_depth = 0;
+       u32 intr_coal;
+
+       if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
+               return;
+
+       for (i = 0; i < card->n_targets; i++)
+               q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
+
+       intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+                                     q_depth / 2,
+                                     card->config.data.intr_coal.latency);
+       iowrite32(intr_coal, card->regmap + INTR_COAL);
+}
+
+/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_complete_dma(struct rsxx_cardinfo *card,
+                                 struct rsxx_dma *dma,
+                                 unsigned int status)
+{
+       if (status & DMA_SW_ERR)
+               printk_ratelimited(KERN_ERR
+                                  "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
+                                  dma->cmd, dma->laddr);
+       if (status & DMA_HW_FAULT)
+               printk_ratelimited(KERN_ERR
+                                  "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
+                                  dma->cmd, dma->laddr);
+       if (status & DMA_CANCELLED)
+               printk_ratelimited(KERN_ERR
+                                  "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
+                                  dma->cmd, dma->laddr);
+
+       if (dma->dma_addr)
+               pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
+                              dma->cmd == HW_CMD_BLK_WRITE ?
+                                          PCI_DMA_TODEVICE :
+                                          PCI_DMA_FROMDEVICE);
+
+       if (dma->cb)
+               dma->cb(card, dma->cb_data, status ? 1 : 0);
+
+       kmem_cache_free(rsxx_dma_pool, dma);
+}
+
+static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
+                                struct rsxx_dma *dma)
+{
+       /*
+        * Requeued DMAs go to the front of the queue so they are issued
+        * first.
+        */
+       spin_lock(&ctrl->queue_lock);
+       list_add(&dma->list, &ctrl->queue);
+       spin_unlock(&ctrl->queue_lock);
+}
+
+static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
+                                     struct rsxx_dma *dma,
+                                     u8 hw_st)
+{
+       unsigned int status = 0;
+       int requeue_cmd = 0;
+
+       dev_dbg(CARD_TO_DEV(ctrl->card),
+               "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
+               dma->cmd, dma->laddr, hw_st);
+
+       if (hw_st & HW_STATUS_CRC)
+               ctrl->stats.crc_errors++;
+       if (hw_st & HW_STATUS_HARD_ERR)
+               ctrl->stats.hard_errors++;
+       if (hw_st & HW_STATUS_SOFT_ERR)
+               ctrl->stats.soft_errors++;
+
+       switch (dma->cmd) {
+       case HW_CMD_BLK_READ:
+               if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+                       if (ctrl->card->scrub_hard) {
+                               dma->cmd = HW_CMD_BLK_RECON_READ;
+                               requeue_cmd = 1;
+                               ctrl->stats.reads_retried++;
+                       } else {
+                               status |= DMA_HW_FAULT;
+                               ctrl->stats.reads_failed++;
+                       }
+               } else if (hw_st & HW_STATUS_FAULT) {
+                       status |= DMA_HW_FAULT;
+                       ctrl->stats.reads_failed++;
+               }
+
+               break;
+       case HW_CMD_BLK_RECON_READ:
+               if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
+                       /* Data could not be reconstructed. */
+                       status |= DMA_HW_FAULT;
+                       ctrl->stats.reads_failed++;
+               }
+
+               break;
+       case HW_CMD_BLK_WRITE:
+               status |= DMA_HW_FAULT;
+               ctrl->stats.writes_failed++;
+
+               break;
+       case HW_CMD_BLK_DISCARD:
+               status |= DMA_HW_FAULT;
+               ctrl->stats.discards_failed++;
+
+               break;
+       default:
+               dev_err(CARD_TO_DEV(ctrl->card),
+                       "Unknown command in DMA!(cmd: x%02x "
+                          "laddr x%08x st: x%02x\n",
+                          dma->cmd, dma->laddr, hw_st);
+               status |= DMA_SW_ERR;
+
+               break;
+       }
+
+       if (requeue_cmd)
+               rsxx_requeue_dma(ctrl, dma);
+       else
+               rsxx_complete_dma(ctrl->card, dma, status);
+}
+
+static void dma_engine_stalled(unsigned long data)
+{
+       struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+
+       if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+               return;
+
+       if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
+               /*
+                * The dma engine was stalled because the SW_CMD_IDX write
+                * was lost. Issue it again to recover.
+                */
+               dev_warn(CARD_TO_DEV(ctrl->card),
+                       "SW_CMD_IDX write was lost, re-writing...\n");
+               iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+               mod_timer(&ctrl->activity_timer,
+                         jiffies + DMA_ACTIVITY_TIMEOUT);
+       } else {
+               dev_warn(CARD_TO_DEV(ctrl->card),
+                       "DMA channel %d has stalled, faulting interface.\n",
+                       ctrl->id);
+               ctrl->card->dma_fault = 1;
+       }
+}
+
+static void rsxx_issue_dmas(struct work_struct *work)
+{
+       struct rsxx_dma_ctrl *ctrl;
+       struct rsxx_dma *dma;
+       int tag;
+       int cmds_pending = 0;
+       struct hw_cmd *hw_cmd_buf;
+
+       ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
+       hw_cmd_buf = ctrl->cmd.buf;
+
+       if (unlikely(ctrl->card->halt))
+               return;
+
+       while (1) {
+               spin_lock(&ctrl->queue_lock);
+               if (list_empty(&ctrl->queue)) {
+                       spin_unlock(&ctrl->queue_lock);
+                       break;
+               }
+               spin_unlock(&ctrl->queue_lock);
+
+               tag = pop_tracker(ctrl->trackers);
+               if (tag == -1)
+                       break;
+
+               spin_lock(&ctrl->queue_lock);
+               dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
+               list_del(&dma->list);
+               ctrl->stats.sw_q_depth--;
+               spin_unlock(&ctrl->queue_lock);
+
+               /*
+                * This will catch any DMAs that slipped in right before the
+                * fault, but was queued after all the other DMAs were
+                * cancelled.
+                */
+               if (unlikely(ctrl->card->dma_fault)) {
+                       push_tracker(ctrl->trackers, tag);
+                       rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
+                       continue;
+               }
+
+               set_tracker_dma(ctrl->trackers, tag, dma);
+               hw_cmd_buf[ctrl->cmd.idx].command  = dma->cmd;
+               hw_cmd_buf[ctrl->cmd.idx].tag      = tag;
+               hw_cmd_buf[ctrl->cmd.idx]._rsvd    = 0;
+               hw_cmd_buf[ctrl->cmd.idx].sub_page =
+                                       ((dma->sub_page.cnt & 0x7) << 4) |
+                                        (dma->sub_page.off & 0x7);
+
+               hw_cmd_buf[ctrl->cmd.idx].device_addr =
+                                       cpu_to_le32(dma->laddr);
+
+               hw_cmd_buf[ctrl->cmd.idx].host_addr =
+                                       cpu_to_le64(dma->dma_addr);
+
+               dev_dbg(CARD_TO_DEV(ctrl->card),
+                       "Issue DMA%d(laddr %d tag %d) to idx %d\n",
+                       ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
+
+               ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
+               cmds_pending++;
+
+               if (dma->cmd == HW_CMD_BLK_WRITE)
+                       ctrl->stats.writes_issued++;
+               else if (dma->cmd == HW_CMD_BLK_DISCARD)
+                       ctrl->stats.discards_issued++;
+               else
+                       ctrl->stats.reads_issued++;
+       }
+
+       /* Let HW know we've queued commands. */
+       if (cmds_pending) {
+               /*
+                * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
+                * (which is in PCI-consistent system-memory) from the loop
+                * above make it into the coherency domain before the
+                * following PIO "trigger" updating the cmd.idx.  A WMB is
+                * sufficient. We need not explicitly CPU cache-flush since
+                * the memory is a PCI-consistent (ie; coherent) mapping.
+                */
+               wmb();
+
+               atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
+               mod_timer(&ctrl->activity_timer,
+                         jiffies + DMA_ACTIVITY_TIMEOUT);
+               iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+       }
+}
+
+static void rsxx_dma_done(struct work_struct *work)
+{
+       struct rsxx_dma_ctrl *ctrl;
+       struct rsxx_dma *dma;
+       unsigned long flags;
+       u16 count;
+       u8 status;
+       u8 tag;
+       struct hw_status *hw_st_buf;
+
+       ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
+       hw_st_buf = ctrl->status.buf;
+
+       if (unlikely(ctrl->card->halt) ||
+           unlikely(ctrl->card->dma_fault))
+               return;
+
+       count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+
+       while (count == ctrl->e_cnt) {
+               /*
+                * The read memory-barrier is necessary to keep aggressive
+                * processors/optimizers (such as the PPC Apple G5) from
+                * reordering the following status-buffer tag & status read
+                * *before* the count read on subsequent iterations of the
+                * loop!
+                */
+               rmb();
+
+               status = hw_st_buf[ctrl->status.idx].status;
+               tag    = hw_st_buf[ctrl->status.idx].tag;
+
+               dma = get_tracker_dma(ctrl->trackers, tag);
+               if (dma == NULL) {
+                       spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+                       rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
+                       spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+                       dev_err(CARD_TO_DEV(ctrl->card),
+                               "No tracker for tag %d "
+                               "(idx %d id %d)\n",
+                               tag, ctrl->status.idx, ctrl->id);
+                       return;
+               }
+
+               dev_dbg(CARD_TO_DEV(ctrl->card),
+                       "Completing DMA%d"
+                       "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
+                       ctrl->id, dma->laddr, tag, status, count,
+                       ctrl->status.idx);
+
+               atomic_dec(&ctrl->stats.hw_q_depth);
+
+               mod_timer(&ctrl->activity_timer,
+                         jiffies + DMA_ACTIVITY_TIMEOUT);
+
+               if (status)
+                       rsxx_handle_dma_error(ctrl, dma, status);
+               else
+                       rsxx_complete_dma(ctrl->card, dma, 0);
+
+               push_tracker(ctrl->trackers, tag);
+
+               ctrl->status.idx = (ctrl->status.idx + 1) &
+                                  RSXX_CS_IDX_MASK;
+               ctrl->e_cnt++;
+
+               count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
+       }
+
+       dma_intr_coal_auto_tune(ctrl->card);
+
+       if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+               del_timer_sync(&ctrl->activity_timer);
+
+       spin_lock_irqsave(&ctrl->card->irq_lock, flags);
+       rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
+       spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
+
+       spin_lock(&ctrl->queue_lock);
+       if (ctrl->stats.sw_q_depth)
+               queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
+       spin_unlock(&ctrl->queue_lock);
+}
+
+static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
+                                     struct list_head *q)
+{
+       struct rsxx_dma *dma;
+       struct rsxx_dma *tmp;
+       int cnt = 0;
+
+       list_for_each_entry_safe(dma, tmp, q, list) {
+               list_del(&dma->list);
+
+               if (dma->dma_addr)
+                       pci_unmap_page(card->dev, dma->dma_addr,
+                                      get_dma_size(dma),
+                                      (dma->cmd == HW_CMD_BLK_WRITE) ?
+                                      PCI_DMA_TODEVICE :
+                                      PCI_DMA_FROMDEVICE);
+               kmem_cache_free(rsxx_dma_pool, dma);
+               cnt++;
+       }
+
+       return cnt;
+}
+
+static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+                                 struct list_head *q,
+                                 unsigned int laddr,
+                                 rsxx_dma_cb cb,
+                                 void *cb_data)
+{
+       struct rsxx_dma *dma;
+
+       dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+       if (!dma)
+               return -ENOMEM;
+
+       dma->cmd          = HW_CMD_BLK_DISCARD;
+       dma->laddr        = laddr;
+       dma->dma_addr     = 0;
+       dma->sub_page.off = 0;
+       dma->sub_page.cnt = 0;
+       dma->page         = NULL;
+       dma->pg_off       = 0;
+       dma->cb           = cb;
+       dma->cb_data      = cb_data;
+
+       dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
+
+       list_add_tail(&dma->list, q);
+
+       return 0;
+}
+
+static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+                             struct list_head *q,
+                             int dir,
+                             unsigned int dma_off,
+                             unsigned int dma_len,
+                             unsigned int laddr,
+                             struct page *page,
+                             unsigned int pg_off,
+                             rsxx_dma_cb cb,
+                             void *cb_data)
+{
+       struct rsxx_dma *dma;
+
+       dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
+       if (!dma)
+               return -ENOMEM;
+
+       dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
+                                    dir ? PCI_DMA_TODEVICE :
+                                    PCI_DMA_FROMDEVICE);
+       if (!dma->dma_addr) {
+               kmem_cache_free(rsxx_dma_pool, dma);
+               return -ENOMEM;
+       }
+
+       dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
+       dma->laddr        = laddr;
+       dma->sub_page.off = (dma_off >> 9);
+       dma->sub_page.cnt = (dma_len >> 9);
+       dma->page         = page;
+       dma->pg_off       = pg_off;
+       dma->cb           = cb;
+       dma->cb_data      = cb_data;
+
+       dev_dbg(CARD_TO_DEV(card),
+               "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
+               dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
+               dma->sub_page.cnt, dma->page, dma->pg_off);
+
+       /* Queue the DMA */
+       list_add_tail(&dma->list, q);
+
+       return 0;
+}
+
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+                          struct bio *bio,
+                          atomic_t *n_dmas,
+                          rsxx_dma_cb cb,
+                          void *cb_data)
+{
+       struct list_head dma_list[RSXX_MAX_TARGETS];
+       struct bio_vec *bvec;
+       unsigned long long addr8;
+       unsigned int laddr;
+       unsigned int bv_len;
+       unsigned int bv_off;
+       unsigned int dma_off;
+       unsigned int dma_len;
+       int dma_cnt[RSXX_MAX_TARGETS];
+       int tgt;
+       int st;
+       int i;
+
+       addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+       atomic_set(n_dmas, 0);
+
+       for (i = 0; i < card->n_targets; i++) {
+               INIT_LIST_HEAD(&dma_list[i]);
+               dma_cnt[i] = 0;
+       }
+
+       if (bio->bi_rw & REQ_DISCARD) {
+               bv_len = bio->bi_size;
+
+               while (bv_len > 0) {
+                       tgt   = rsxx_get_dma_tgt(card, addr8);
+                       laddr = rsxx_addr8_to_laddr(addr8, card);
+
+                       st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
+                                                   cb, cb_data);
+                       if (st)
+                               goto bvec_err;
+
+                       dma_cnt[tgt]++;
+                       atomic_inc(n_dmas);
+                       addr8  += RSXX_HW_BLK_SIZE;
+                       bv_len -= RSXX_HW_BLK_SIZE;
+               }
+       } else {
+               bio_for_each_segment(bvec, bio, i) {
+                       bv_len = bvec->bv_len;
+                       bv_off = bvec->bv_offset;
+
+                       while (bv_len > 0) {
+                               tgt   = rsxx_get_dma_tgt(card, addr8);
+                               laddr = rsxx_addr8_to_laddr(addr8, card);
+                               dma_off = addr8 & RSXX_HW_BLK_MASK;
+                               dma_len = min(bv_len,
+                                             RSXX_HW_BLK_SIZE - dma_off);
+
+                               st = rsxx_queue_dma(card, &dma_list[tgt],
+                                                       bio_data_dir(bio),
+                                                       dma_off, dma_len,
+                                                       laddr, bvec->bv_page,
+                                                       bv_off, cb, cb_data);
+                               if (st)
+                                       goto bvec_err;
+
+                               dma_cnt[tgt]++;
+                               atomic_inc(n_dmas);
+                               addr8  += dma_len;
+                               bv_off += dma_len;
+                               bv_len -= dma_len;
+                       }
+               }
+       }
+
+       for (i = 0; i < card->n_targets; i++) {
+               if (!list_empty(&dma_list[i])) {
+                       spin_lock(&card->ctrl[i].queue_lock);
+                       card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
+                       list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
+                       spin_unlock(&card->ctrl[i].queue_lock);
+
+                       queue_work(card->ctrl[i].issue_wq,
+                                  &card->ctrl[i].issue_dma_work);
+               }
+       }
+
+       return 0;
+
+bvec_err:
+       for (i = 0; i < card->n_targets; i++)
+               rsxx_cleanup_dma_queue(card, &dma_list[i]);
+
+       return st;
+}
+
+
+/*----------------- DMA Engine Initialization & Setup -------------------*/
+static int rsxx_dma_ctrl_init(struct pci_dev *dev,
+                                 struct rsxx_dma_ctrl *ctrl)
+{
+       int i;
+
+       memset(&ctrl->stats, 0, sizeof(ctrl->stats));
+
+       ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
+                                               &ctrl->status.dma_addr);
+       ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
+                                            &ctrl->cmd.dma_addr);
+       if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
+               return -ENOMEM;
+
+       ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
+       if (!ctrl->trackers)
+               return -ENOMEM;
+
+       ctrl->trackers->head = 0;
+       for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
+               ctrl->trackers->list[i].next_tag = i + 1;
+               ctrl->trackers->list[i].dma = NULL;
+       }
+       ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
+       spin_lock_init(&ctrl->trackers->lock);
+
+       spin_lock_init(&ctrl->queue_lock);
+       INIT_LIST_HEAD(&ctrl->queue);
+
+       setup_timer(&ctrl->activity_timer, dma_engine_stalled,
+                                       (unsigned long)ctrl);
+
+       ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
+       if (!ctrl->issue_wq)
+               return -ENOMEM;
+
+       ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
+       if (!ctrl->done_wq)
+               return -ENOMEM;
+
+       INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
+       INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
+
+       memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->status.dma_addr),
+                 ctrl->regmap + SB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->status.dma_addr),
+                 ctrl->regmap + SB_ADD_HI);
+
+       memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
+
+       ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
+       if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
+                        ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
+       iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
+
+       ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
+       if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
+                        ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+
+       wmb();
+
+       return 0;
+}
+
+static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
+                             unsigned int stripe_size8)
+{
+       if (!is_power_of_2(stripe_size8)) {
+               dev_err(CARD_TO_DEV(card),
+                       "stripe_size is NOT a power of 2!\n");
+               return -EINVAL;
+       }
+
+       card->_stripe.lower_mask = stripe_size8 - 1;
+
+       card->_stripe.upper_mask  = ~(card->_stripe.lower_mask);
+       card->_stripe.upper_shift = ffs(card->n_targets) - 1;
+
+       card->_stripe.target_mask = card->n_targets - 1;
+       card->_stripe.target_shift = ffs(stripe_size8) - 1;
+
+       dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask   = x%016llx\n",
+               card->_stripe.lower_mask);
+       dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift  = x%016llx\n",
+               card->_stripe.upper_shift);
+       dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask   = x%016llx\n",
+               card->_stripe.upper_mask);
+       dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask  = x%016llx\n",
+               card->_stripe.target_mask);
+       dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
+               card->_stripe.target_shift);
+
+       return 0;
+}
+
+static int rsxx_dma_configure(struct rsxx_cardinfo *card)
+{
+       u32 intr_coal;
+
+       intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
+                                     card->config.data.intr_coal.count,
+                                     card->config.data.intr_coal.latency);
+       iowrite32(intr_coal, card->regmap + INTR_COAL);
+
+       return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
+}
+
+int rsxx_dma_setup(struct rsxx_cardinfo *card)
+{
+       unsigned long flags;
+       int st;
+       int i;
+
+       dev_info(CARD_TO_DEV(card),
+               "Initializing %d DMA targets\n",
+               card->n_targets);
+
+       /* Regmap is divided up into 4K chunks. One for each DMA channel */
+       for (i = 0; i < card->n_targets; i++)
+               card->ctrl[i].regmap = card->regmap + (i * 4096);
+
+       card->dma_fault = 0;
+
+       /* Reset the DMA queues */
+       rsxx_dma_queue_reset(card);
+
+       /************* Setup DMA Control *************/
+       for (i = 0; i < card->n_targets; i++) {
+               st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
+               if (st)
+                       goto failed_dma_setup;
+
+               card->ctrl[i].card = card;
+               card->ctrl[i].id = i;
+       }
+
+       card->scrub_hard = 1;
+
+       if (card->config_valid)
+               rsxx_dma_configure(card);
+
+       /* Enable the interrupts after all setup has completed. */
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock_irqsave(&card->irq_lock, flags);
+               rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
+               spin_unlock_irqrestore(&card->irq_lock, flags);
+       }
+
+       return 0;
+
+failed_dma_setup:
+       for (i = 0; i < card->n_targets; i++) {
+               struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
+
+               if (ctrl->issue_wq) {
+                       destroy_workqueue(ctrl->issue_wq);
+                       ctrl->issue_wq = NULL;
+               }
+
+               if (ctrl->done_wq) {
+                       destroy_workqueue(ctrl->done_wq);
+                       ctrl->done_wq = NULL;
+               }
+
+               if (ctrl->trackers)
+                       vfree(ctrl->trackers);
+
+               if (ctrl->status.buf)
+                       pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+                                           ctrl->status.buf,
+                                           ctrl->status.dma_addr);
+               if (ctrl->cmd.buf)
+                       pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+                                           ctrl->cmd.buf, ctrl->cmd.dma_addr);
+       }
+
+       return st;
+}
+
+
+void rsxx_dma_destroy(struct rsxx_cardinfo *card)
+{
+       struct rsxx_dma_ctrl *ctrl;
+       struct rsxx_dma *dma;
+       int i, j;
+       int cnt = 0;
+
+       for (i = 0; i < card->n_targets; i++) {
+               ctrl = &card->ctrl[i];
+
+               if (ctrl->issue_wq) {
+                       destroy_workqueue(ctrl->issue_wq);
+                       ctrl->issue_wq = NULL;
+               }
+
+               if (ctrl->done_wq) {
+                       destroy_workqueue(ctrl->done_wq);
+                       ctrl->done_wq = NULL;
+               }
+
+               if (timer_pending(&ctrl->activity_timer))
+                       del_timer_sync(&ctrl->activity_timer);
+
+               /* Clean up the DMA queue */
+               spin_lock(&ctrl->queue_lock);
+               cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
+               spin_unlock(&ctrl->queue_lock);
+
+               if (cnt)
+                       dev_info(CARD_TO_DEV(card),
+                               "Freed %d queued DMAs on channel %d\n",
+                               cnt, i);
+
+               /* Clean up issued DMAs */
+               for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
+                       dma = get_tracker_dma(ctrl->trackers, j);
+                       if (dma) {
+                               pci_unmap_page(card->dev, dma->dma_addr,
+                                              get_dma_size(dma),
+                                              (dma->cmd == HW_CMD_BLK_WRITE) ?
+                                              PCI_DMA_TODEVICE :
+                                              PCI_DMA_FROMDEVICE);
+                               kmem_cache_free(rsxx_dma_pool, dma);
+                               cnt++;
+                       }
+               }
+
+               if (cnt)
+                       dev_info(CARD_TO_DEV(card),
+                               "Freed %d pending DMAs on channel %d\n",
+                               cnt, i);
+
+               vfree(ctrl->trackers);
+
+               pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+                                   ctrl->status.buf, ctrl->status.dma_addr);
+               pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+                                   ctrl->cmd.buf, ctrl->cmd.dma_addr);
+       }
+}
+
+
+int rsxx_dma_init(void)
+{
+       rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
+       if (!rsxx_dma_pool)
+               return -ENOMEM;
+
+       return 0;
+}
+
+
+void rsxx_dma_cleanup(void)
+{
+       kmem_cache_destroy(rsxx_dma_pool);
+}
+
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
new file mode 100644 (file)
index 0000000..2e50b65
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+* Filename: rsxx.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_H__
+#define __RSXX_H__
+
+/*----------------- IOCTL Definitions -------------------*/
+
+struct rsxx_reg_access {
+       __u32 addr;
+       __u32 cnt;
+       __u32 stat;
+       __u32 stream;
+       __u32 data[8];
+};
+
+#define RSXX_MAX_REG_CNT       (8 * (sizeof(__u32)))
+
+#define RSXX_IOC_MAGIC 'r'
+
+#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
+#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
+
+#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
new file mode 100644 (file)
index 0000000..c025fe5
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+* Filename: rsXX_cfg.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_CFG_H__
+#define __RSXX_CFG_H__
+
+/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
+#include <linux/types.h>
+
+/*
+ * The card config version must match the driver's expected version. If it does
+ * not, the DMA interfaces will not be attached and the user will need to
+ * initialize/upgrade the card configuration using the card config utility.
+ */
+#define RSXX_CFG_VERSION       4
+
+struct card_cfg_hdr {
+       __u32   version;
+       __u32   crc;
+};
+
+struct card_cfg_data {
+       __u32   block_size;
+       __u32   stripe_size;
+       __u32   vendor_id;
+       __u32   cache_order;
+       struct {
+               __u32   mode;   /* Disabled, manual, auto-tune... */
+               __u32   count;  /* Number of intr to coalesce     */
+               __u32   latency;/* Max wait time (in ns)          */
+       } intr_coal;
+};
+
+struct rsxx_card_cfg {
+       struct card_cfg_hdr     hdr;
+       struct card_cfg_data    data;
+};
+
+/* Vendor ID Values */
+#define RSXX_VENDOR_ID_TMS_IBM         0
+#define RSXX_VENDOR_ID_DSI             1
+#define RSXX_VENDOR_COUNT              2
+
+/* Interrupt Coalescing Values */
+#define RSXX_INTR_COAL_DISABLED           0
+#define RSXX_INTR_COAL_EXPLICIT           1
+#define RSXX_INTR_COAL_AUTO_TUNE          2
+
+
+#endif /* __RSXX_CFG_H__ */
+
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
new file mode 100644 (file)
index 0000000..a1ac907
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+* Filename: rsxx_priv.h
+*
+*
+* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
+*      Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+*
+* (C) Copyright 2013 IBM Corporation
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License as
+* published by the Free Software Foundation; either version 2 of the
+* License, or (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software Foundation,
+* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __RSXX_PRIV_H__
+#define __RSXX_PRIV_H__
+
+#include <linux/version.h>
+#include <linux/semaphore.h>
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/bio.h>
+#include <linux/vmalloc.h>
+#include <linux/timer.h>
+#include <linux/ioctl.h>
+
+#include "rsxx.h"
+#include "rsxx_cfg.h"
+
+struct proc_cmd;
+
+#define PCI_VENDOR_ID_TMS_IBM          0x15B6
+#define PCI_DEVICE_ID_RS70_FLASH       0x0019
+#define PCI_DEVICE_ID_RS70D_FLASH      0x001A
+#define PCI_DEVICE_ID_RS80_FLASH       0x001C
+#define PCI_DEVICE_ID_RS81_FLASH       0x001E
+
+#define RS70_PCI_REV_SUPPORTED 4
+
+#define DRIVER_NAME "rsxx"
+#define DRIVER_VERSION "3.7"
+
+/* Block size is 4096 */
+#define RSXX_HW_BLK_SHIFT              12
+#define RSXX_HW_BLK_SIZE               (1 << RSXX_HW_BLK_SHIFT)
+#define RSXX_HW_BLK_MASK               (RSXX_HW_BLK_SIZE - 1)
+
+#define MAX_CREG_DATA8 32
+#define LOG_BUF_SIZE8  128
+
+#define RSXX_MAX_OUTSTANDING_CMDS      255
+#define RSXX_CS_IDX_MASK               0xff
+
+#define RSXX_MAX_TARGETS       8
+
+struct dma_tracker_list;
+
+/* DMA Command/Status Buffer structure */
+struct rsxx_cs_buffer {
+       dma_addr_t      dma_addr;
+       void            *buf;
+       u32             idx;
+};
+
+struct rsxx_dma_stats {
+       u32 crc_errors;
+       u32 hard_errors;
+       u32 soft_errors;
+       u32 writes_issued;
+       u32 writes_failed;
+       u32 reads_issued;
+       u32 reads_failed;
+       u32 reads_retried;
+       u32 discards_issued;
+       u32 discards_failed;
+       u32 done_rescheduled;
+       u32 issue_rescheduled;
+       u32 sw_q_depth;         /* Number of DMAs on the SW queue. */
+       atomic_t hw_q_depth;    /* Number of DMAs queued to HW. */
+};
+
+struct rsxx_dma_ctrl {
+       struct rsxx_cardinfo            *card;
+       int                             id;
+       void                            __iomem *regmap;
+       struct rsxx_cs_buffer           status;
+       struct rsxx_cs_buffer           cmd;
+       u16                             e_cnt;
+       spinlock_t                      queue_lock;
+       struct list_head                queue;
+       struct workqueue_struct         *issue_wq;
+       struct work_struct              issue_dma_work;
+       struct workqueue_struct         *done_wq;
+       struct work_struct              dma_done_work;
+       struct timer_list               activity_timer;
+       struct dma_tracker_list         *trackers;
+       struct rsxx_dma_stats           stats;
+};
+
+struct rsxx_cardinfo {
+       struct pci_dev          *dev;
+       unsigned int            halt;
+
+       void                    __iomem *regmap;
+       spinlock_t              irq_lock;
+       unsigned int            isr_mask;
+       unsigned int            ier_mask;
+
+       struct rsxx_card_cfg    config;
+       int                     config_valid;
+
+       /* Embedded CPU Communication */
+       struct {
+               spinlock_t              lock;
+               bool                    active;
+               struct creg_cmd         *active_cmd;
+               struct work_struct      done_work;
+               struct list_head        queue;
+               unsigned int            q_depth;
+               /* Cache the creg status to prevent ioreads */
+               struct {
+                       u32             stat;
+                       u32             failed_cancel_timer;
+                       u32             creg_timeout;
+               } creg_stats;
+               struct timer_list       cmd_timer;
+               struct mutex            reset_lock;
+               int                     reset;
+       } creg_ctrl;
+
+       struct {
+               char tmp[MAX_CREG_DATA8];
+               char buf[LOG_BUF_SIZE8]; /* terminated */
+               int buf_len;
+       } log;
+
+       struct work_struct      event_work;
+       unsigned int            state;
+       u64                     size8;
+
+       /* Lock the device attach/detach function */
+       struct mutex            dev_lock;
+
+       /* Block Device Variables */
+       bool                    bdev_attached;
+       int                     disk_id;
+       int                     major;
+       struct request_queue    *queue;
+       struct gendisk          *gendisk;
+       struct {
+               /* Used to convert a byte address to a device address. */
+               u64 lower_mask;
+               u64 upper_shift;
+               u64 upper_mask;
+               u64 target_mask;
+               u64 target_shift;
+       } _stripe;
+       unsigned int            dma_fault;
+
+       int                     scrub_hard;
+
+       int                     n_targets;
+       struct rsxx_dma_ctrl    *ctrl;
+};
+
+enum rsxx_pci_regmap {
+       HWID            = 0x00, /* Hardware Identification Register */
+       SCRATCH         = 0x04, /* Scratch/Debug Register */
+       RESET           = 0x08, /* Reset Register */
+       ISR             = 0x10, /* Interrupt Status Register */
+       IER             = 0x14, /* Interrupt Enable Register */
+       IPR             = 0x18, /* Interrupt Poll Register */
+       CB_ADD_LO       = 0x20, /* Command Host Buffer Address [31:0] */
+       CB_ADD_HI       = 0x24, /* Command Host Buffer Address [63:32]*/
+       HW_CMD_IDX      = 0x28, /* Hardware Processed Command Index */
+       SW_CMD_IDX      = 0x2C, /* Software Processed Command Index */
+       SB_ADD_LO       = 0x30, /* Status Host Buffer Address [31:0] */
+       SB_ADD_HI       = 0x34, /* Status Host Buffer Address [63:32] */
+       HW_STATUS_CNT   = 0x38, /* Hardware Status Counter */
+       SW_STATUS_CNT   = 0x3C, /* Deprecated */
+       CREG_CMD        = 0x40, /* CPU Command Register */
+       CREG_ADD        = 0x44, /* CPU Address Register */
+       CREG_CNT        = 0x48, /* CPU Count Register */
+       CREG_STAT       = 0x4C, /* CPU Status Register */
+       CREG_DATA0      = 0x50, /* CPU Data Registers */
+       CREG_DATA1      = 0x54,
+       CREG_DATA2      = 0x58,
+       CREG_DATA3      = 0x5C,
+       CREG_DATA4      = 0x60,
+       CREG_DATA5      = 0x64,
+       CREG_DATA6      = 0x68,
+       CREG_DATA7      = 0x6c,
+       INTR_COAL       = 0x70, /* Interrupt Coalescing Register */
+       HW_ERROR        = 0x74, /* Card Error Register */
+       PCI_DEBUG0      = 0x78, /* PCI Debug Registers */
+       PCI_DEBUG1      = 0x7C,
+       PCI_DEBUG2      = 0x80,
+       PCI_DEBUG3      = 0x84,
+       PCI_DEBUG4      = 0x88,
+       PCI_DEBUG5      = 0x8C,
+       PCI_DEBUG6      = 0x90,
+       PCI_DEBUG7      = 0x94,
+       PCI_POWER_THROTTLE = 0x98,
+       PERF_CTRL       = 0x9c,
+       PERF_TIMER_LO   = 0xa0,
+       PERF_TIMER_HI   = 0xa4,
+       PERF_RD512_LO   = 0xa8,
+       PERF_RD512_HI   = 0xac,
+       PERF_WR512_LO   = 0xb0,
+       PERF_WR512_HI   = 0xb4,
+};
+
+enum rsxx_intr {
+       CR_INTR_DMA0    = 0x00000001,
+       CR_INTR_CREG    = 0x00000002,
+       CR_INTR_DMA1    = 0x00000004,
+       CR_INTR_EVENT   = 0x00000008,
+       CR_INTR_DMA2    = 0x00000010,
+       CR_INTR_DMA3    = 0x00000020,
+       CR_INTR_DMA4    = 0x00000040,
+       CR_INTR_DMA5    = 0x00000080,
+       CR_INTR_DMA6    = 0x00000100,
+       CR_INTR_DMA7    = 0x00000200,
+       CR_INTR_DMA_ALL = 0x000003f5,
+       CR_INTR_ALL     = 0xffffffff,
+};
+
+static inline int CR_INTR_DMA(int N)
+{
+       static const unsigned int _CR_INTR_DMA[] = {
+               CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
+               CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
+       };
+       return _CR_INTR_DMA[N];
+}
+enum rsxx_pci_reset {
+       DMA_QUEUE_RESET         = 0x00000001,
+};
+
+enum rsxx_pci_revision {
+       RSXX_DISCARD_SUPPORT = 2,
+};
+
+enum rsxx_creg_cmd {
+       CREG_CMD_TAG_MASK       = 0x0000FF00,
+       CREG_OP_WRITE           = 0x000000C0,
+       CREG_OP_READ            = 0x000000E0,
+};
+
+enum rsxx_creg_addr {
+       CREG_ADD_CARD_CMD               = 0x80001000,
+       CREG_ADD_CARD_STATE             = 0x80001004,
+       CREG_ADD_CARD_SIZE              = 0x8000100c,
+       CREG_ADD_CAPABILITIES           = 0x80001050,
+       CREG_ADD_LOG                    = 0x80002000,
+       CREG_ADD_NUM_TARGETS            = 0x80003000,
+       CREG_ADD_CONFIG                 = 0xB0000000,
+};
+
+enum rsxx_creg_card_cmd {
+       CARD_CMD_STARTUP                = 1,
+       CARD_CMD_SHUTDOWN               = 2,
+       CARD_CMD_LOW_LEVEL_FORMAT       = 3,
+       CARD_CMD_FPGA_RECONFIG_BR       = 4,
+       CARD_CMD_FPGA_RECONFIG_MAIN     = 5,
+       CARD_CMD_BACKUP                 = 6,
+       CARD_CMD_RESET                  = 7,
+       CARD_CMD_deprecated             = 8,
+       CARD_CMD_UNINITIALIZE           = 9,
+       CARD_CMD_DSTROY_EMERGENCY       = 10,
+       CARD_CMD_DSTROY_NORMAL          = 11,
+       CARD_CMD_DSTROY_EXTENDED        = 12,
+       CARD_CMD_DSTROY_ABORT           = 13,
+};
+
+enum rsxx_card_state {
+       CARD_STATE_SHUTDOWN             = 0x00000001,
+       CARD_STATE_STARTING             = 0x00000002,
+       CARD_STATE_FORMATTING           = 0x00000004,
+       CARD_STATE_UNINITIALIZED        = 0x00000008,
+       CARD_STATE_GOOD                 = 0x00000010,
+       CARD_STATE_SHUTTING_DOWN        = 0x00000020,
+       CARD_STATE_FAULT                = 0x00000040,
+       CARD_STATE_RD_ONLY_FAULT        = 0x00000080,
+       CARD_STATE_DSTROYING            = 0x00000100,
+};
+
+enum rsxx_led {
+       LED_DEFAULT     = 0x0,
+       LED_IDENTIFY    = 0x1,
+       LED_SOAK        = 0x2,
+};
+
+enum rsxx_creg_flash_lock {
+       CREG_FLASH_LOCK         = 1,
+       CREG_FLASH_UNLOCK       = 2,
+};
+
+enum rsxx_card_capabilities {
+       CARD_CAP_SUBPAGE_WRITES = 0x00000080,
+};
+
+enum rsxx_creg_stat {
+       CREG_STAT_STATUS_MASK   = 0x00000003,
+       CREG_STAT_SUCCESS       = 0x1,
+       CREG_STAT_ERROR         = 0x2,
+       CREG_STAT_CHAR_PENDING  = 0x00000004, /* Character I/O pending bit */
+       CREG_STAT_LOG_PENDING   = 0x00000008, /* HW log message pending bit */
+       CREG_STAT_TAG_MASK      = 0x0000ff00,
+};
+
+static inline unsigned int CREG_DATA(int N)
+{
+       return CREG_DATA0 + (N << 2);
+}
+
+/*----------------- Convenient Log Wrappers -------------------*/
+#define CARD_TO_DEV(__CARD)    (&(__CARD)->dev->dev)
+
+/***** config.c *****/
+int rsxx_load_config(struct rsxx_cardinfo *card);
+
+/***** core.c *****/
+void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
+void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
+                                unsigned int intr);
+void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
+                                 unsigned int intr);
+
+/***** dev.c *****/
+int rsxx_attach_dev(struct rsxx_cardinfo *card);
+void rsxx_detach_dev(struct rsxx_cardinfo *card);
+int rsxx_setup_dev(struct rsxx_cardinfo *card);
+void rsxx_destroy_dev(struct rsxx_cardinfo *card);
+int rsxx_dev_init(void);
+void rsxx_dev_cleanup(void);
+
+/***** dma.c ****/
+typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
+                               void *cb_data,
+                               unsigned int status);
+int rsxx_dma_setup(struct rsxx_cardinfo *card);
+void rsxx_dma_destroy(struct rsxx_cardinfo *card);
+int rsxx_dma_init(void);
+void rsxx_dma_cleanup(void);
+int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+                          struct bio *bio,
+                          atomic_t *n_dmas,
+                          rsxx_dma_cb cb,
+                          void *cb_data);
+
+/***** cregs.c *****/
+int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
+                       unsigned int size8,
+                       void *data,
+                       int byte_stream);
+int rsxx_creg_read(struct rsxx_cardinfo *card,
+                      u32 addr,
+                      unsigned int size8,
+                      void *data,
+                      int byte_stream);
+int rsxx_read_hw_log(struct rsxx_cardinfo *card);
+int rsxx_get_card_state(struct rsxx_cardinfo *card,
+                           unsigned int *state);
+int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
+int rsxx_get_num_targets(struct rsxx_cardinfo *card,
+                            unsigned int *n_targets);
+int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
+                                  u32 *capabilities);
+int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
+int rsxx_creg_setup(struct rsxx_cardinfo *card);
+void rsxx_creg_destroy(struct rsxx_cardinfo *card);
+int rsxx_creg_init(void);
+void rsxx_creg_cleanup(void);
+
+int rsxx_reg_access(struct rsxx_cardinfo *card,
+                       struct rsxx_reg_access __user *ucmd,
+                       int read);
+
+
+
+#endif /* __DRIVERS_BLOCK_RSXX_H__ */
index 57763c5..758f2ac 100644 (file)
@@ -1090,10 +1090,13 @@ static const struct block_device_operations floppy_fops = {
 static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
 {
        struct floppy_state *fs = macio_get_drvdata(mdev);
-       struct swim3 __iomem *sw = fs->swim3;
+       struct swim3 __iomem *sw;
 
        if (!fs)
                return;
+
+       sw = fs->swim3;
+
        if (mb_state != MB_FD)
                return;
 
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
deleted file mode 100644 (file)
index ff54052..0000000
+++ /dev/null
@@ -1,1123 +0,0 @@
-/*
- * This file contains the driver for an XT hard disk controller
- * (at least the DTC 5150X) for Linux.
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- * 
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler,
- *   kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and
- *   Wim Van Dorst.
- *
- * Revised: 04/04/94 by Risto Kankkunen
- *   Moved the detection code from xd_init() to xd_geninit() as it needed
- *   interrupts enabled and Linus didn't want to enable them in that first
- *   phase. xd_geninit() is the place to do these kinds of things anyway,
- *   he says.
- *
- * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
- *
- * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
- *   Fixed some problems with disk initialization and module initiation.
- *   Added support for manual geometry setting (except Seagate controllers)
- *   in form:
- *      xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
- *   Recovered DMA access. Abridged messages. Added support for DTC5051CX,
- *   WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
- *   Extended ioctl() support.
- *
- * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect.
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/blkpg.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/dma.h>
-
-#include "xd.h"
-
-static DEFINE_MUTEX(xd_mutex);
-static void __init do_xd_setup (int *integers);
-#ifdef MODULE
-static int xd[5] = { -1,-1,-1,-1, };
-#endif
-
-#define XD_DONT_USE_DMA                0  /* Initial value. may be overriden using
-                                     "nodma" module option */
-#define XD_INIT_DISK_DELAY     (30)  /* 30 ms delay during disk initialization */
-
-/* Above may need to be increased if a problem with the 2nd drive detection
-   (ST11M controller) or resetting a controller (WD) appears */
-
-static XD_INFO xd_info[XD_MAXDRIVES];
-
-/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
-   signature and details to the following list of signatures. A BIOS signature is a string embedded into the first
-   few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG
-   command. Run DEBUG, and then you can examine your BIOS signature with:
-
-       d xxxx:0000
-
-   where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should
-   be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters
-   in the table are, in order:
-
-       offset                  ; this is the offset (in bytes) from the start of your ROM where the signature starts
-       signature               ; this is the actual text of the signature
-       xd_?_init_controller    ; this is the controller init routine used by your controller
-       xd_?_init_drive         ; this is the drive init routine used by your controller
-
-   The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is
-   made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your
-   best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and
-   may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>.
-
-   NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
-   should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
-
-#include <asm/page.h>
-#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
-#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-static char *xd_dma_buffer;
-
-static XD_SIGNATURE xd_sigs[] __initdata = {
-       { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
-       { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
-       { 0x000B,"CRD18A   Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
-       { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
-       { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
-       { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
-       { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
-       { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
-       { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
-       { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
-       { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
-       { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" },
-       { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */
-};
-
-static unsigned int xd_bases[] __initdata =
-{
-       0xC8000, 0xCA000, 0xCC000,
-       0xCE000, 0xD0000, 0xD2000,
-       0xD4000, 0xD6000, 0xD8000,
-       0xDA000, 0xDC000, 0xDE000,
-       0xE0000
-};
-
-static DEFINE_SPINLOCK(xd_lock);
-
-static struct gendisk *xd_gendisk[2];
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static const struct block_device_operations xd_fops = {
-       .owner  = THIS_MODULE,
-       .ioctl  = xd_ioctl,
-       .getgeo = xd_getgeo,
-};
-static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
-static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
-static u_char xd_override __initdata = 0, xd_type __initdata = 0;
-static u_short xd_iobase = 0x320;
-static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, };
-
-static volatile int xdc_busy;
-static struct timer_list xd_watchdog_int;
-
-static volatile u_char xd_error;
-static bool nodma = XD_DONT_USE_DMA;
-
-static struct request_queue *xd_queue;
-
-/* xd_init: register the block device number and set up pointer tables */
-static int __init xd_init(void)
-{
-       u_char i,controller;
-       unsigned int address;
-       int err;
-
-#ifdef MODULE
-       {
-               u_char count = 0;
-               for (i = 4; i > 0; i--)
-                       if (((xd[i] = xd[i-1]) >= 0) && !count)
-                               count = i;
-               if ((xd[0] = count))
-                       do_xd_setup(xd);
-       }
-#endif
-
-       init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
-
-       err = -EBUSY;
-       if (register_blkdev(XT_DISK_MAJOR, "xd"))
-               goto out1;
-
-       err = -ENOMEM;
-       xd_queue = blk_init_queue(do_xd_request, &xd_lock);
-       if (!xd_queue)
-               goto out1a;
-
-       if (xd_detect(&controller,&address)) {
-
-               printk("Detected a%s controller (type %d) at address %06x\n",
-                       xd_sigs[controller].name,controller,address);
-               if (!request_region(xd_iobase,4,"xd")) {
-                       printk("xd: Ports at 0x%x are not available\n",
-                               xd_iobase);
-                       goto out2;
-               }
-               if (controller)
-                       xd_sigs[controller].init_controller(address);
-               xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
-               
-               printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
-                       xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
-       }
-
-       /*
-        * With the drive detected, xd_maxsectors should now be known.
-        * If xd_maxsectors is 0, nothing was detected and we fall through
-        * to return -ENODEV
-        */
-       if (!xd_dma_buffer && xd_maxsectors) {
-               xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
-               if (!xd_dma_buffer) {
-                       printk(KERN_ERR "xd: Out of memory.\n");
-                       goto out3;
-               }
-       }
-
-       err = -ENODEV;
-       if (!xd_drives)
-               goto out3;
-
-       for (i = 0; i < xd_drives; i++) {
-               XD_INFO *p = &xd_info[i];
-               struct gendisk *disk = alloc_disk(64);
-               if (!disk)
-                       goto Enomem;
-               p->unit = i;
-               disk->major = XT_DISK_MAJOR;
-               disk->first_minor = i<<6;
-               sprintf(disk->disk_name, "xd%c", i+'a');
-               disk->fops = &xd_fops;
-               disk->private_data = p;
-               disk->queue = xd_queue;
-               set_capacity(disk, p->heads * p->cylinders * p->sectors);
-               printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
-                       p->cylinders, p->heads, p->sectors);
-               xd_gendisk[i] = disk;
-       }
-
-       err = -EBUSY;
-       if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
-               printk("xd: unable to get IRQ%d\n",xd_irq);
-               goto out4;
-       }
-
-       if (request_dma(xd_dma,"xd")) {
-               printk("xd: unable to get DMA%d\n",xd_dma);
-               goto out5;
-       }
-
-       /* xd_maxsectors depends on controller - so set after detection */
-       blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
-
-       for (i = 0; i < xd_drives; i++)
-               add_disk(xd_gendisk[i]);
-
-       return 0;
-
-out5:
-       free_irq(xd_irq, NULL);
-out4:
-       for (i = 0; i < xd_drives; i++)
-               put_disk(xd_gendisk[i]);
-out3:
-       if (xd_maxsectors)
-               release_region(xd_iobase,4);
-
-       if (xd_dma_buffer)
-               xd_dma_mem_free((unsigned long)xd_dma_buffer,
-                               xd_maxsectors * 0x200);
-out2:
-       blk_cleanup_queue(xd_queue);
-out1a:
-       unregister_blkdev(XT_DISK_MAJOR, "xd");
-out1:
-       return err;
-Enomem:
-       err = -ENOMEM;
-       while (i--)
-               put_disk(xd_gendisk[i]);
-       goto out3;
-}
-
-/* xd_detect: scan the possible BIOS ROM locations for the signature strings */
-static u_char __init xd_detect (u_char *controller, unsigned int *address)
-{
-       int i, j;
-
-       if (xd_override)
-       {
-               *controller = xd_type;
-               *address = 0;
-               return(1);
-       }
-
-       for (i = 0; i < ARRAY_SIZE(xd_bases); i++) {
-               void __iomem *p = ioremap(xd_bases[i], 0x2000);
-               if (!p)
-                       continue;
-               for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) {
-                       const char *s = xd_sigs[j].string;
-                       if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
-                               *controller = j;
-                               xd_type = j;
-                               *address = xd_bases[i];
-                               iounmap(p);
-                               return 1;
-                       }
-               }
-               iounmap(p);
-       }
-       return 0;
-}
-
-/* do_xd_request: handle an incoming request */
-static void do_xd_request (struct request_queue * q)
-{
-       struct request *req;
-
-       if (xdc_busy)
-               return;
-
-       req = blk_fetch_request(q);
-       while (req) {
-               unsigned block = blk_rq_pos(req);
-               unsigned count = blk_rq_cur_sectors(req);
-               XD_INFO *disk = req->rq_disk->private_data;
-               int res = -EIO;
-               int retry;
-
-               if (req->cmd_type != REQ_TYPE_FS)
-                       goto done;
-               if (block + count > get_capacity(req->rq_disk))
-                       goto done;
-               for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
-                       res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
-                                          block, count);
-       done:
-               /* wrap up, 0 = success, -errno = fail */
-               if (!__blk_end_request_cur(req, res))
-                       req = blk_fetch_request(q);
-       }
-}
-
-static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
-       XD_INFO *p = bdev->bd_disk->private_data;
-
-       geo->heads = p->heads;
-       geo->sectors = p->sectors;
-       geo->cylinders = p->cylinders;
-       return 0;
-}
-
-/* xd_ioctl: handle device ioctl's */
-static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
-{
-       switch (cmd) {
-               case HDIO_SET_DMA:
-                       if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-                       if (xdc_busy) return -EBUSY;
-                       nodma = !arg;
-                       if (nodma && xd_dma_buffer) {
-                               xd_dma_mem_free((unsigned long)xd_dma_buffer,
-                                               xd_maxsectors * 0x200);
-                               xd_dma_buffer = NULL;
-                       } else if (!nodma && !xd_dma_buffer) {
-                               xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
-                               if (!xd_dma_buffer) {
-                                       nodma = XD_DONT_USE_DMA;
-                                       return -ENOMEM;
-                               }
-                       }
-                       return 0;
-               case HDIO_GET_DMA:
-                       return put_user(!nodma, (long __user *) arg);
-               case HDIO_GET_MULTCOUNT:
-                       return put_user(xd_maxsectors, (long __user *) arg);
-               default:
-                       return -EINVAL;
-       }
-}
-
-static int xd_ioctl(struct block_device *bdev, fmode_t mode,
-                            unsigned int cmd, unsigned long param)
-{
-       int ret;
-
-       mutex_lock(&xd_mutex);
-       ret = xd_locked_ioctl(bdev, mode, cmd, param);
-       mutex_unlock(&xd_mutex);
-
-       return ret;
-}
-
-/* xd_readwrite: handle a read/write request */
-static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
-{
-       int drive = p->unit;
-       u_char cmdblk[6],sense[4];
-       u_short track,cylinder;
-       u_char head,sector,control,mode = PIO_MODE,temp;
-       char **real_buffer;
-       register int i;
-       
-#ifdef DEBUG_READWRITE
-       printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
-#endif /* DEBUG_READWRITE */
-
-       spin_unlock_irq(&xd_lock);
-
-       control = p->control;
-       if (!xd_dma_buffer)
-               xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
-       while (count) {
-               temp = count < xd_maxsectors ? count : xd_maxsectors;
-
-               track = block / p->sectors;
-               head = track % p->heads;
-               cylinder = track / p->heads;
-               sector = block % p->sectors;
-
-#ifdef DEBUG_READWRITE
-               printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
-#endif /* DEBUG_READWRITE */
-
-               if (xd_dma_buffer) {
-                       mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
-                       real_buffer = &xd_dma_buffer;
-                       for (i=0; i < (temp * 0x200); i++)
-                               xd_dma_buffer[i] = buffer[i];
-               }
-               else
-                       real_buffer = &buffer;
-
-               xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
-
-               switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
-                       case 1:
-                               printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
-                               xd_recalibrate(drive);
-                               spin_lock_irq(&xd_lock);
-                               return -EIO;
-                       case 2:
-                               if (sense[0] & 0x30) {
-                                       printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
-                                       switch ((sense[0] & 0x30) >> 4) {
-                                       case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F);
-                                               break;
-                                       case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F);
-                                               break;
-                                       case 2: printk("command error, code = 0x%X",sense[0] & 0x0F);
-                                               break;
-                                       case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F);
-                                               break;
-                                       }
-                               }
-                               if (sense[0] & 0x80)
-                                       printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F);
-                               /*      reported drive number = (sense[1] & 0xE0) >> 5 */
-                               else
-                                       printk(" - no valid disk address\n");
-                               spin_lock_irq(&xd_lock);
-                               return -EIO;
-               }
-               if (xd_dma_buffer)
-                       for (i=0; i < (temp * 0x200); i++)
-                               buffer[i] = xd_dma_buffer[i];
-
-               count -= temp, buffer += temp * 0x200, block += temp;
-       }
-       spin_lock_irq(&xd_lock);
-       return 0;
-}
-
-/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
-static void xd_recalibrate (u_char drive)
-{
-       u_char cmdblk[6];
-       
-       xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0);
-       if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8))
-               printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive);
-}
-
-/* xd_interrupt_handler: interrupt service routine */
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id)
-{
-       if (inb(XD_STATUS) & STAT_INTERRUPT) {                                                  /* check if it was our device */
-#ifdef DEBUG_OTHER
-               printk("xd_interrupt_handler: interrupt detected\n");
-#endif /* DEBUG_OTHER */
-               outb(0,XD_CONTROL);                                                             /* acknowledge interrupt */
-               wake_up(&xd_wait_int);  /* and wake up sleeping processes */
-               return IRQ_HANDLED;
-       }
-       else
-               printk("xd: unexpected interrupt\n");
-       return IRQ_NONE;
-}
-
-/* xd_setup_dma: set up the DMA controller for a data transfer */
-static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
-{
-       unsigned long f;
-       
-       if (nodma)
-               return (PIO_MODE);
-       if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) {
-#ifdef DEBUG_OTHER
-               printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
-#endif /* DEBUG_OTHER */
-               return (PIO_MODE);
-       }
-       
-       f=claim_dma_lock();
-       disable_dma(xd_dma);
-       clear_dma_ff(xd_dma);
-       set_dma_mode(xd_dma,mode);
-       set_dma_addr(xd_dma, (unsigned long) buffer);
-       set_dma_count(xd_dma,count);
-       
-       release_dma_lock(f);
-
-       return (DMA_MODE);                      /* use DMA and INT */
-}
-
-/* xd_build: put stuff into an array in a format suitable for the controller */
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control)
-{
-       cmdblk[0] = command;
-       cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F);
-       cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F);
-       cmdblk[3] = cylinder & 0xFF;
-       cmdblk[4] = count;
-       cmdblk[5] = control;
-       
-       return (cmdblk);
-}
-
-static void xd_watchdog (unsigned long unused)
-{
-       xd_error = 1;
-       wake_up(&xd_wait_int);
-}
-
-/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
-{
-       u_long expiry = jiffies + timeout;
-       int success;
-
-       xdc_busy = 1;
-       while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry))
-               schedule_timeout_uninterruptible(1);
-       xdc_busy = 0;
-       return (success);
-}
-
-static inline u_int xd_wait_for_IRQ (void)
-{
-       unsigned long flags;
-       xd_watchdog_int.expires = jiffies + 8 * HZ;
-       add_timer(&xd_watchdog_int);
-       
-       flags=claim_dma_lock();
-       enable_dma(xd_dma);
-       release_dma_lock(flags);
-       
-       sleep_on(&xd_wait_int);
-       del_timer(&xd_watchdog_int);
-       xdc_busy = 0;
-       
-       flags=claim_dma_lock();
-       disable_dma(xd_dma);
-       release_dma_lock(flags);
-       
-       if (xd_error) {
-               printk("xd: missed IRQ - command aborted\n");
-               xd_error = 0;
-               return (1);
-       }
-       return (0);
-}
-
-/* xd_command: handle all data transfers necessary for a single command */
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout)
-{
-       u_char cmdblk[6],csb,complete = 0;
-
-#ifdef DEBUG_COMMAND
-       printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense);
-#endif /* DEBUG_COMMAND */
-
-       outb(0,XD_SELECT);
-       outb(mode,XD_CONTROL);
-
-       if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout))
-               return (1);
-
-       while (!complete) {
-               if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout))
-                       return (1);
-
-               switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
-                       case 0:
-                               if (mode == DMA_MODE) {
-                                       if (xd_wait_for_IRQ())
-                                               return (1);
-                               } else
-                                       outb(outdata ? *outdata++ : 0,XD_DATA);
-                               break;
-                       case STAT_INPUT:
-                               if (mode == DMA_MODE) {
-                                       if (xd_wait_for_IRQ())
-                                               return (1);
-                               } else
-                                       if (indata)
-                                               *indata++ = inb(XD_DATA);
-                                       else
-                                               inb(XD_DATA);
-                               break;
-                       case STAT_COMMAND:
-                               outb(command ? *command++ : 0,XD_DATA);
-                               break;
-                       case STAT_COMMAND | STAT_INPUT:
-                               complete = 1;
-                               break;
-               }
-       }
-       csb = inb(XD_DATA);
-
-       if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout))                                       /* wait until deselected */
-               return (1);
-
-       if (csb & CSB_ERROR) {                                                                  /* read sense data if error */
-               xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0);
-               if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT))
-                       printk("xd: warning! sense command failed!\n");
-       }
-
-#ifdef DEBUG_COMMAND
-       printk("xd_command: completed with csb = 0x%X\n",csb);
-#endif /* DEBUG_COMMAND */
-
-       return (csb & CSB_ERROR);
-}
-
-static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
-{
-       u_char cmdblk[6],i,count = 0;
-
-       for (i = 0; i < XD_MAXDRIVES; i++) {
-               xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
-               if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
-                       msleep_interruptible(XD_INIT_DISK_DELAY);
-
-                       init_drive(count);
-                       count++;
-
-                       msleep_interruptible(XD_INIT_DISK_DELAY);
-               }
-       }
-       return (count);
-}
-
-static void __init xd_manual_geo_set (u_char drive)
-{
-       xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
-       xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
-       xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
-}
-
-static void __init xd_dtc_init_controller (unsigned int address)
-{
-       switch (address) {
-               case 0x00000:
-               case 0xC8000:   break;                  /*initial: 0x320 */
-               case 0xCA000:   xd_iobase = 0x324; 
-               case 0xD0000:                           /*5150CX*/
-               case 0xD8000:   break;                  /*5150CX & 5150XL*/
-               default:        printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address);
-                               break;
-       }
-       xd_maxsectors = 0x01;           /* my card seems to have trouble doing multi-block transfers? */
-
-       outb(0,XD_RESET);               /* reset the controller */
-}
-
-
-static void __init xd_dtc5150cx_init_drive (u_char drive)
-{
-       /* values from controller's BIOS - BIOS chip may be removed */
-       static u_short geometry_table[][4] = {
-               {0x200,8,0x200,0x100},
-               {0x267,2,0x267,0x267},
-               {0x264,4,0x264,0x80},
-               {0x132,4,0x132,0x0},
-               {0x132,2,0x80, 0x132},
-               {0x177,8,0x177,0x0},
-               {0x132,8,0x84, 0x0},
-               {},  /* not used */
-               {0x132,6,0x80, 0x100},
-               {0x200,6,0x100,0x100},
-               {0x264,2,0x264,0x80},
-               {0x280,4,0x280,0x100},
-               {0x2B9,3,0x2B9,0x2B9},
-               {0x2B9,5,0x2B9,0x2B9},
-               {0x280,6,0x280,0x100},
-               {0x132,4,0x132,0x0}};
-       u_char n;
-
-       n = inb(XD_JUMPER);
-       n = (drive ? n : (n >> 2)) & 0x33;
-       n = (n | (n >> 2)) & 0x0F;
-       if (xd_geo[3*drive])
-               xd_manual_geo_set(drive);
-       else
-               if (n != 7) {   
-                       xd_info[drive].heads = (u_char)(geometry_table[n][1]);                  /* heads */
-                       xd_info[drive].cylinders = geometry_table[n][0];        /* cylinders */
-                       xd_info[drive].sectors = 17;                            /* sectors */
-#if 0
-                       xd_info[drive].rwrite = geometry_table[n][2];   /* reduced write */
-                       xd_info[drive].precomp = geometry_table[n][3]           /* write precomp */
-                       xd_info[drive].ecc = 0x0B;                              /* ecc length */
-#endif /* 0 */
-               }
-               else {
-                       printk("xd%c: undetermined drive geometry\n",'a'+drive);
-                       return;
-               }
-       xd_info[drive].control = 5;                             /* control byte */
-       xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
-       xd_recalibrate(drive);
-}
-
-static void __init xd_dtc_init_drive (u_char drive)
-{
-       u_char cmdblk[6],buf[64];
-
-       xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0);
-       if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
-               xd_info[drive].heads = buf[0x0A];                       /* heads */
-               xd_info[drive].cylinders = ((u_short *) (buf))[0x04];   /* cylinders */
-               xd_info[drive].sectors = 17;                            /* sectors */
-               if (xd_geo[3*drive])
-                       xd_manual_geo_set(drive);
-#if 0
-               xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05];  /* reduced write */
-               xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */
-               xd_info[drive].ecc = buf[0x0F];                         /* ecc length */
-#endif /* 0 */
-               xd_info[drive].control = 0;                             /* control byte */
-
-               xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]);
-               xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7);
-               if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
-                       printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive);
-       }
-       else
-               printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive);
-}
-
-static void __init xd_wd_init_controller (unsigned int address)
-{
-       switch (address) {
-               case 0x00000:
-               case 0xC8000:   break;                  /*initial: 0x320 */
-               case 0xCA000:   xd_iobase = 0x324; break;
-               case 0xCC000:   xd_iobase = 0x328; break;
-               case 0xCE000:   xd_iobase = 0x32C; break;
-               case 0xD0000:   xd_iobase = 0x328; break; /* ? */
-               case 0xD8000:   xd_iobase = 0x32C; break; /* ? */
-               default:        printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address);
-                               break;
-       }
-       xd_maxsectors = 0x01;           /* this one doesn't wrap properly either... */
-
-       outb(0,XD_RESET);               /* reset the controller */
-
-       msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_wd_init_drive (u_char drive)
-{
-       /* values from controller's BIOS - BIOS may be disabled */
-       static u_short geometry_table[][4] = {
-               {0x264,4,0x1C2,0x1C2},   /* common part */
-               {0x132,4,0x099,0x0},
-               {0x267,2,0x1C2,0x1C2},
-               {0x267,4,0x1C2,0x1C2},
-
-               {0x334,6,0x335,0x335},   /* 1004 series RLL */
-               {0x30E,4,0x30F,0x3DC},
-               {0x30E,2,0x30F,0x30F},
-               {0x267,4,0x268,0x268},
-
-               {0x3D5,5,0x3D6,0x3D6},   /* 1002 series RLL */
-               {0x3DB,7,0x3DC,0x3DC},
-               {0x264,4,0x265,0x265},
-               {0x267,4,0x268,0x268}};
-
-       u_char cmdblk[6],buf[0x200];
-       u_char n = 0,rll,jumper_state,use_jumper_geo;
-       u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
-       
-       jumper_state = ~(inb(0x322));
-       if (jumper_state & 0x40)
-               xd_irq = 9;
-       rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
-       xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
-       if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
-               xd_info[drive].heads = buf[0x1AF];                              /* heads */
-               xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6];       /* cylinders */
-               xd_info[drive].sectors = 17;                                    /* sectors */
-               if (xd_geo[3*drive])
-                       xd_manual_geo_set(drive);
-#if 0
-               xd_info[drive].rwrite = ((u_short *) (buf))[0xD8];              /* reduced write */
-               xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA];            /* write precomp */
-               xd_info[drive].ecc = buf[0x1B4];                                /* ecc length */
-#endif /* 0 */
-               xd_info[drive].control = buf[0x1B5];                            /* control byte */
-               use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
-               if (xd_geo[3*drive]) {
-                       xd_manual_geo_set(drive);
-                       xd_info[drive].control = rll ? 7 : 5;
-               }
-               else if (use_jumper_geo) {
-                       n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
-                       xd_info[drive].cylinders = geometry_table[n][0];
-                       xd_info[drive].heads = (u_char)(geometry_table[n][1]);
-                       xd_info[drive].control = rll ? 7 : 5;
-#if 0
-                       xd_info[drive].rwrite = geometry_table[n][2];
-                       xd_info[drive].wprecomp = geometry_table[n][3];
-                       xd_info[drive].ecc = 0x0B;
-#endif /* 0 */
-               }
-               if (!wd_1002) {
-                       if (use_jumper_geo)
-                               xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
-                                       geometry_table[n][2],geometry_table[n][3],0x0B);
-                       else
-                               xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
-                                       ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
-               }
-       /* 1002 based RLL controller requests converted addressing, but reports physical 
-          (physical 26 sec., logical 17 sec.) 
-          1004 based ???? */
-               if (rll & wd_1002) {
-                       if ((xd_info[drive].cylinders *= 26,
-                            xd_info[drive].cylinders /= 17) > 1023)
-                               xd_info[drive].cylinders = 1023;  /* 1024 ? */
-#if 0
-                       xd_info[drive].rwrite *= 26; 
-                       xd_info[drive].rwrite /= 17;
-                       xd_info[drive].wprecomp *= 26
-                       xd_info[drive].wprecomp /= 17;
-#endif /* 0 */
-               }
-       }
-       else
-               printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive);        
-
-}
-
-static void __init xd_seagate_init_controller (unsigned int address)
-{
-       switch (address) {
-               case 0x00000:
-               case 0xC8000:   break;                  /*initial: 0x320 */
-               case 0xD0000:   xd_iobase = 0x324; break;
-               case 0xD8000:   xd_iobase = 0x328; break;
-               case 0xE0000:   xd_iobase = 0x32C; break;
-               default:        printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address);
-                               break;
-       }
-       xd_maxsectors = 0x40;
-
-       outb(0,XD_RESET);               /* reset the controller */
-}
-
-static void __init xd_seagate_init_drive (u_char drive)
-{
-       u_char cmdblk[6],buf[0x200];
-
-       xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0);
-       if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
-               xd_info[drive].heads = buf[0x04];                               /* heads */
-               xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03];        /* cylinders */
-               xd_info[drive].sectors = buf[0x05];                             /* sectors */
-               xd_info[drive].control = 0;                                     /* control byte */
-       }
-       else
-               printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive);
-}
-
-/* Omti support courtesy Dirk Melchers */
-static void __init xd_omti_init_controller (unsigned int address)
-{
-       switch (address) {
-               case 0x00000:
-               case 0xC8000:   break;                  /*initial: 0x320 */
-               case 0xD0000:   xd_iobase = 0x324; break;
-               case 0xD8000:   xd_iobase = 0x328; break;
-               case 0xE0000:   xd_iobase = 0x32C; break;
-               default:        printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address);
-                               break;
-       }
-       
-       xd_maxsectors = 0x40;
-
-       outb(0,XD_RESET);               /* reset the controller */
-}
-
-static void __init xd_omti_init_drive (u_char drive)
-{
-       /* gets infos from drive */
-       xd_override_init_drive(drive);
-
-       /* set other parameters, Hardcoded, not that nice :-) */
-       xd_info[drive].control = 2;
-}
-
-/* Xebec support (AK) */
-static void __init xd_xebec_init_controller (unsigned int address)
-{
-/* iobase may be set manually in range 0x300 - 0x33C
-      irq may be set manually to 2(9),3,4,5,6,7
-      dma may be set manually to 1,2,3
-       (How to detect them ???)
-BIOS address may be set manually in range 0x0 - 0xF8000
-If you need non-standard settings use the xd=... command */
-
-       switch (address) {
-               case 0x00000:
-               case 0xC8000:   /* initially: xd_iobase==0x320 */
-               case 0xD0000:
-               case 0xD2000:
-               case 0xD4000:
-               case 0xD6000:
-               case 0xD8000:
-               case 0xDA000:
-               case 0xDC000:
-               case 0xDE000:
-               case 0xE0000:   break;
-               default:        printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address);
-                               break;
-               }
-
-       xd_maxsectors = 0x01;
-       outb(0,XD_RESET);               /* reset the controller */
-
-       msleep(XD_INIT_DISK_DELAY);
-}
-
-static void __init xd_xebec_init_drive (u_char drive)
-{
-       /* values from controller's BIOS - BIOS chip may be removed */
-       static u_short geometry_table[][5] = {
-               {0x132,4,0x080,0x080,0x7},
-               {0x132,4,0x080,0x080,0x17},
-               {0x264,2,0x100,0x100,0x7},
-               {0x264,2,0x100,0x100,0x17},
-               {0x132,8,0x080,0x080,0x7},
-               {0x132,8,0x080,0x080,0x17},
-               {0x264,4,0x100,0x100,0x6},
-               {0x264,4,0x100,0x100,0x17},
-               {0x2BC,5,0x2BC,0x12C,0x6},
-               {0x3A5,4,0x3A5,0x3A5,0x7},
-               {0x26C,6,0x26C,0x26C,0x7},
-               {0x200,8,0x200,0x100,0x17},
-               {0x400,5,0x400,0x400,0x7},
-               {0x400,6,0x400,0x400,0x7},
-               {0x264,8,0x264,0x200,0x17},
-               {0x33E,7,0x33E,0x200,0x7}};
-       u_char n;
-
-       n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry 
-                                       is assumed for BOTH drives */
-       if (xd_geo[3*drive])
-               xd_manual_geo_set(drive);
-       else {
-               xd_info[drive].heads = (u_char)(geometry_table[n][1]);                  /* heads */
-               xd_info[drive].cylinders = geometry_table[n][0];        /* cylinders */
-               xd_info[drive].sectors = 17;                            /* sectors */
-#if 0
-               xd_info[drive].rwrite = geometry_table[n][2];   /* reduced write */
-               xd_info[drive].precomp = geometry_table[n][3]           /* write precomp */
-               xd_info[drive].ecc = 0x0B;                              /* ecc length */
-#endif /* 0 */
-       }
-       xd_info[drive].control = geometry_table[n][4];                  /* control byte */
-       xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
-       xd_recalibrate(drive);
-}
-
-/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
-   etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
-static void __init xd_override_init_drive (u_char drive)
-{
-       u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
-       u_char cmdblk[6],i;
-
-       if (xd_geo[3*drive])
-               xd_manual_geo_set(drive);
-       else {
-               for (i = 0; i < 3; i++) {
-                       while (min[i] != max[i] - 1) {
-                               test[i] = (min[i] + max[i]) / 2;
-                               xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
-                               if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
-                                       min[i] = test[i];
-                               else
-                                       max[i] = test[i];
-                       }
-                       test[i] = min[i];
-               }
-               xd_info[drive].heads = (u_char) min[0] + 1;
-               xd_info[drive].cylinders = (u_short) min[1] + 1;
-               xd_info[drive].sectors = (u_char) min[2] + 1;
-       }
-       xd_info[drive].control = 0;
-}
-
-/* xd_setup: initialise controller from command line parameters */
-static void __init do_xd_setup (int *integers)
-{
-       switch (integers[0]) {
-               case 4: if (integers[4] < 0)
-                               nodma = 1;
-                       else if (integers[4] < 8)
-                               xd_dma = integers[4];
-               case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
-                               xd_iobase = integers[3];
-               case 2: if ((integers[2] > 0) && (integers[2] < 16))
-                               xd_irq = integers[2];
-               case 1: xd_override = 1;
-                       if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs)))
-                               xd_type = integers[1];
-               case 0: break;
-               default:printk("xd: too many parameters for xd\n");
-       }
-       xd_maxsectors = 0x01;
-}
-
-/* xd_setparam: set the drive characteristics */
-static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
-{
-       u_char cmdblk[14];
-
-       xd_build(cmdblk,command,drive,0,0,0,0,0);
-       cmdblk[6] = (u_char) (cylinders >> 8) & 0x03;
-       cmdblk[7] = (u_char) (cylinders & 0xFF);
-       cmdblk[8] = heads & 0x1F;
-       cmdblk[9] = (u_char) (rwrite >> 8) & 0x03;
-       cmdblk[10] = (u_char) (rwrite & 0xFF);
-       cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03;
-       cmdblk[12] = (u_char) (wprecomp & 0xFF);
-       cmdblk[13] = ecc;
-
-       /* Some controllers require geometry info as data, not command */
-
-       if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2))
-               printk("xd: error setting characteristics for xd%c\n", 'a'+drive);
-}
-
-
-#ifdef MODULE
-
-module_param_array(xd, int, NULL, 0);
-module_param_array(xd_geo, int, NULL, 0);
-module_param(nodma, bool, 0);
-
-MODULE_LICENSE("GPL");
-
-void cleanup_module(void)
-{
-       int i;
-       unregister_blkdev(XT_DISK_MAJOR, "xd");
-       for (i = 0; i < xd_drives; i++) {
-               del_gendisk(xd_gendisk[i]);
-               put_disk(xd_gendisk[i]);
-       }
-       blk_cleanup_queue(xd_queue);
-       release_region(xd_iobase,4);
-       if (xd_drives) {
-               free_irq(xd_irq, NULL);
-               free_dma(xd_dma);
-               if (xd_dma_buffer)
-                       xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
-       }
-}
-#else
-
-static int __init xd_setup (char *str)
-{
-       int ints[5];
-       get_options (str, ARRAY_SIZE (ints), ints);
-       do_xd_setup (ints);
-       return 1;
-}
-
-/* xd_manual_geo_init: initialise drive geometry from command line parameters
-   (used only for WD drives) */
-static int __init xd_manual_geo_init (char *str)
-{
-       int i, integers[1 + 3*XD_MAXDRIVES];
-
-       get_options (str, ARRAY_SIZE (integers), integers);
-       if (integers[0]%3 != 0) {
-               printk("xd: incorrect number of parameters for xd_geo\n");
-               return 1;
-       }
-       for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
-               xd_geo[i] = integers[i+1];
-       return 1;
-}
-
-__setup ("xd=", xd_setup);
-__setup ("xd_geo=", xd_manual_geo_init);
-
-#endif /* MODULE */
-
-module_init(xd_init);
-MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
deleted file mode 100644 (file)
index 37cacef..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef _LINUX_XD_H
-#define _LINUX_XD_H
-
-/*
- * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X).
- *
- * Author: Pat Mackinlay, pat@it.com.au
- * Date: 29/09/92
- *
- * Revised: 01/01/93, ...
- *
- * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com)
- * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst.
- */
-
-#include <linux/interrupt.h>
-
-/* XT hard disk controller registers */
-#define XD_DATA                (xd_iobase + 0x00)      /* data RW register */
-#define XD_RESET       (xd_iobase + 0x01)      /* reset WO register */
-#define XD_STATUS      (xd_iobase + 0x01)      /* status RO register */
-#define XD_SELECT      (xd_iobase + 0x02)      /* select WO register */
-#define XD_JUMPER      (xd_iobase + 0x02)      /* jumper RO register */
-#define XD_CONTROL     (xd_iobase + 0x03)      /* DMAE/INTE WO register */
-#define XD_RESERVED    (xd_iobase + 0x03)      /* reserved */
-
-/* XT hard disk controller commands (incomplete list) */
-#define CMD_TESTREADY  0x00    /* test drive ready */
-#define CMD_RECALIBRATE        0x01    /* recalibrate drive */
-#define CMD_SENSE      0x03    /* request sense */
-#define CMD_FORMATDRV  0x04    /* format drive */
-#define CMD_VERIFY     0x05    /* read verify */
-#define CMD_FORMATTRK  0x06    /* format track */
-#define CMD_FORMATBAD  0x07    /* format bad track */
-#define CMD_READ       0x08    /* read */
-#define CMD_WRITE      0x0A    /* write */
-#define CMD_SEEK       0x0B    /* seek */
-
-/* Controller specific commands */
-#define CMD_DTCSETPARAM        0x0C    /* set drive parameters (DTC 5150X & CX only?) */
-#define CMD_DTCGETECC  0x0D    /* get ecc error length (DTC 5150X only?) */
-#define CMD_DTCREADBUF 0x0E    /* read sector buffer (DTC 5150X only?) */
-#define CMD_DTCWRITEBUF 0x0F   /* write sector buffer (DTC 5150X only?) */
-#define CMD_DTCREMAPTRK        0x11    /* assign alternate track (DTC 5150X only?) */
-#define CMD_DTCGETPARAM        0xFB    /* get drive parameters (DTC 5150X only?) */
-#define CMD_DTCSETSTEP 0xFC    /* set step rate (DTC 5150X only?) */
-#define CMD_DTCSETGEOM 0xFE    /* set geometry data (DTC 5150X only?) */
-#define CMD_DTCGETGEOM 0xFF    /* get geometry data (DTC 5150X only?) */
-#define CMD_ST11GETGEOM 0xF8   /* get geometry data (Seagate ST11R/M only?) */
-#define CMD_WDSETPARAM 0x0C    /* set drive parameters (WD 1004A27X only?) */
-#define CMD_XBSETPARAM 0x0C    /* set drive parameters (XEBEC only?) */
-
-/* Bits for command status byte */
-#define CSB_ERROR      0x02    /* error */
-#define CSB_LUN                0x20    /* logical Unit Number */
-
-/* XT hard disk controller status bits */
-#define STAT_READY     0x01    /* controller is ready */
-#define STAT_INPUT     0x02    /* data flowing from controller to host */
-#define STAT_COMMAND   0x04    /* controller in command phase */
-#define STAT_SELECT    0x08    /* controller is selected */
-#define STAT_REQUEST   0x10    /* controller requesting data */
-#define STAT_INTERRUPT 0x20    /* controller requesting interrupt */
-
-/* XT hard disk controller control bits */
-#define PIO_MODE       0x00    /* control bits to set for PIO */
-#define DMA_MODE       0x03    /* control bits to set for DMA & interrupt */
-
-#define XD_MAXDRIVES   2       /* maximum 2 drives */
-#define XD_TIMEOUT     HZ      /* 1 second timeout */
-#define XD_RETRIES     4       /* maximum 4 retries */
-
-#undef DEBUG                   /* define for debugging output */
-
-#ifdef DEBUG
-       #define DEBUG_STARTUP   /* debug driver initialisation */
-       #define DEBUG_OVERRIDE  /* debug override geometry detection */
-       #define DEBUG_READWRITE /* debug each read/write command */
-       #define DEBUG_OTHER     /* debug misc. interrupt/DMA stuff */
-       #define DEBUG_COMMAND   /* debug each controller command */
-#endif /* DEBUG */
-
-/* this structure defines the XT drives and their types */
-typedef struct {
-       u_char heads;
-       u_short cylinders;
-       u_char sectors;
-       u_char control;
-       int unit;
-} XD_INFO;
-
-/* this structure defines a ROM BIOS signature */
-typedef struct {
-       unsigned int offset;
-       const char *string;
-       void (*init_controller)(unsigned int address);
-       void (*init_drive)(u_char drive);
-       const char *name;
-} XD_SIGNATURE;
-
-#ifndef MODULE
-static int xd_manual_geo_init (char *command);
-#endif /* MODULE */
-static u_char xd_detect (u_char *controller, unsigned int *address);
-static u_char xd_initdrives (void (*init_drive)(u_char drive));
-
-static void do_xd_request (struct request_queue * q);
-static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg);
-static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
-static void xd_recalibrate (u_char drive);
-
-static irqreturn_t xd_interrupt_handler(int irq, void *dev_id);
-static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
-static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
-static void xd_watchdog (unsigned long unused);
-static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
-static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
-
-/* card specific setup and geometry gathering code */
-static void xd_dtc_init_controller (unsigned int address);
-static void xd_dtc5150cx_init_drive (u_char drive);
-static void xd_dtc_init_drive (u_char drive);
-static void xd_wd_init_controller (unsigned int address);
-static void xd_wd_init_drive (u_char drive);
-static void xd_seagate_init_controller (unsigned int address);
-static void xd_seagate_init_drive (u_char drive);
-static void xd_omti_init_controller (unsigned int address);
-static void xd_omti_init_drive (u_char drive);
-static void xd_xebec_init_controller (unsigned int address);
-static void xd_xebec_init_drive (u_char drive);
-static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
-static void xd_override_init_drive (u_char drive);
-
-#endif /* _LINUX_XD_H */
index 5ac841f..de1f319 100644 (file)
@@ -46,6 +46,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
 #include "common.h"
 
 /*
@@ -239,6 +240,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        ret = gnttab_unmap_refs(unmap, NULL, pages,
                                segs_to_unmap);
                        BUG_ON(ret);
+                       free_xenballooned_pages(segs_to_unmap, pages);
                        segs_to_unmap = 0;
                }
 
@@ -527,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
                                GFP_KERNEL);
                        if (!persistent_gnt)
                                return -ENOMEM;
-                       persistent_gnt->page = alloc_page(GFP_KERNEL);
-                       if (!persistent_gnt->page) {
+                       if (alloc_xenballooned_pages(1, &persistent_gnt->page,
+                           false)) {
                                kfree(persistent_gnt);
                                return -ENOMEM;
                        }
@@ -879,7 +881,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                goto fail_response;
        }
 
-       preq.dev           = req->u.rw.handle;
        preq.sector_number = req->u.rw.sector_number;
        preq.nr_sects      = 0;
 
index 6398072..5e237f6 100644 (file)
@@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
                be->blkif = NULL;
        }
 
+       kfree(be->mode);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
        return 0;
@@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
                = container_of(watch, struct backend_info, backend_watch);
        struct xenbus_device *dev = be->dev;
        int cdrom = 0;
+       unsigned long handle;
        char *device_type;
 
        DPRINTK("");
@@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
                return;
        }
 
-       if ((be->major || be->minor) &&
-           ((be->major != major) || (be->minor != minor))) {
-               pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
-                       be->major, be->minor, major, minor);
+       if (be->major | be->minor) {
+               if (be->major != major || be->minor != minor)
+                       pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+                               be->major, be->minor, major, minor);
                return;
        }
 
@@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
                kfree(device_type);
        }
 
-       if (be->major == 0 && be->minor == 0) {
-               /* Front end dir is a number, which is used as the handle. */
-
-               char *p = strrchr(dev->otherend, '/') + 1;
-               long handle;
-               err = strict_strtoul(p, 0, &handle);
-               if (err)
-                       return;
+       /* Front end dir is a number, which is used as the handle. */
+       err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
+       if (err)
+               return;
 
-               be->major = major;
-               be->minor = minor;
+       be->major = major;
+       be->minor = minor;
 
-               err = xen_vbd_create(be->blkif, handle, major, minor,
-                                (NULL == strchr(be->mode, 'w')), cdrom);
-               if (err) {
-                       be->major = 0;
-                       be->minor = 0;
-                       xenbus_dev_fatal(dev, err, "creating vbd structure");
-                       return;
-               }
+       err = xen_vbd_create(be->blkif, handle, major, minor,
+                            !strchr(be->mode, 'w'), cdrom);
 
+       if (err)
+               xenbus_dev_fatal(dev, err, "creating vbd structure");
+       else {
                err = xenvbd_sysfs_addif(dev);
                if (err) {
                        xen_vbd_free(&be->blkif->vbd);
-                       be->major = 0;
-                       be->minor = 0;
                        xenbus_dev_fatal(dev, err, "creating sysfs entries");
-                       return;
                }
+       }
 
+       if (err) {
+               kfree(be->mode);
+               be->mode = NULL;
+               be->major = 0;
+               be->minor = 0;
+       } else {
                /* We're potentially connected now */
                xen_update_blkif_status(be->blkif);
        }
index 11043c1..c3dae2e 100644 (file)
@@ -791,7 +791,7 @@ static void blkif_restart_queue(struct work_struct *work)
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
        struct llist_node *all_gnts;
-       struct grant *persistent_gnt;
+       struct grant *persistent_gnt, *tmp;
        struct llist_node *n;
 
        /* Prevent new requests being issued until we fix things up. */
@@ -805,10 +805,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
        /* Remove all persistent grants */
        if (info->persistent_gnts_c) {
                all_gnts = llist_del_all(&info->persistent_gnts);
-               llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
+               persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
+               while (persistent_gnt) {
                        gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
                        __free_page(pfn_to_page(persistent_gnt->pfn));
-                       kfree(persistent_gnt);
+                       tmp = persistent_gnt;
+                       n = persistent_gnt->node.next;
+                       if (n)
+                               persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
+                       else
+                               persistent_gnt = NULL;
+                       kfree(tmp);
                }
                info->persistent_gnts_c = 0;
        }
index 052797b..01a5ca7 100644 (file)
@@ -181,7 +181,7 @@ static int dsp56k_upload(u_char __user *bin, int len)
 static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
                           loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int dev = iminor(inode) & 0x0f;
 
        switch(dev)
@@ -244,7 +244,7 @@ static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
 static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
                            loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int dev = iminor(inode) & 0x0f;
 
        switch(dev)
@@ -306,7 +306,7 @@ static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t co
 static long dsp56k_ioctl(struct file *file, unsigned int cmd,
                         unsigned long arg)
 {
-       int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+       int dev = iminor(file_inode(file)) & 0x0f;
        void __user *argp = (void __user *)arg;
 
        switch(dev)
@@ -408,7 +408,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
 #if 0
 static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
 {
-       int dev = iminor(file->f_path.dentry->d_inode) & 0x0f;
+       int dev = iminor(file_inode(file)) & 0x0f;
 
        switch(dev)
        {
index 85156dd..65a8d96 100644 (file)
@@ -125,7 +125,7 @@ static char dtlk_write_tts(char);
 static ssize_t dtlk_read(struct file *file, char __user *buf,
                         size_t count, loff_t * ppos)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        char ch;
        int i = 0, retries;
 
@@ -177,7 +177,7 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
        }
 #endif
 
-       if (iminor(file->f_path.dentry->d_inode) != DTLK_MINOR)
+       if (iminor(file_inode(file)) != DTLK_MINOR)
                return -EINVAL;
 
        while (1) {
index 1bafb40..69ae597 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
 #include <asm/uaccess.h>
 
 
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
 static LIST_HEAD(rng_list);
 static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
-       __cacheline_aligned;
+static u8 *rng_buffer;
+
+static size_t rng_buffer_size(void)
+{
+       return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
 
 static inline int hwrng_init(struct hwrng *rng)
 {
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 
                if (!data_avail) {
                        bytes_read = rng_get_data(current_rng, rng_buffer,
-                               sizeof(rng_buffer),
+                               rng_buffer_size(),
                                !(filp->f_flags & O_NONBLOCK));
                        if (bytes_read < 0) {
                                err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
 
        mutex_lock(&rng_mutex);
 
+       /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+       err = -ENOMEM;
+       if (!rng_buffer) {
+               rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+               if (!rng_buffer)
+                       goto out_unlock;
+       }
+
        /* Must not register two RNGs with the same name. */
        err = -EEXIST;
        list_for_each_entry(tmp, &rng_list, list) {
index b65c103..10fd71c 100644 (file)
@@ -154,18 +154,7 @@ static struct virtio_driver virtio_rng_driver = {
 #endif
 };
 
-static int __init init(void)
-{
-       return register_virtio_driver(&virtio_rng_driver);
-}
-
-static void __exit fini(void)
-{
-       unregister_virtio_driver(&virtio_rng_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_rng_driver);
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio random number driver");
 MODULE_LICENSE("GPL");
index 1c7fdcd..0ac9b45 100644 (file)
@@ -1208,6 +1208,16 @@ static int smi_num; /* Used to sequence the SMIs */
 #define DEFAULT_REGSPACING     1
 #define DEFAULT_REGSIZE                1
 
+#ifdef CONFIG_ACPI
+static bool          si_tryacpi = 1;
+#endif
+#ifdef CONFIG_DMI
+static bool          si_trydmi = 1;
+#endif
+static bool          si_tryplatform = 1;
+#ifdef CONFIG_PCI
+static bool          si_trypci = 1;
+#endif
 static bool          si_trydefaults = 1;
 static char          *si_type[SI_MAX_PARMS];
 #define MAX_SI_TYPE_STR 30
@@ -1238,6 +1248,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
                 " Documentation/IPMI.txt in the kernel sources for the"
                 " gory details.");
 
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+                " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+                " default scan of the interfaces identified via DMI");
+#endif
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+                " default scan of the interfaces identified via platform"
+                " interfaces like openfirmware");
+#ifdef CONFIG_PCI
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+                " default scan of the interfaces identified via pci");
+#endif
 module_param_named(trydefaults, si_trydefaults, bool, 0);
 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
                 " default scan of the KCS and SMIC interface at the standard"
@@ -3371,13 +3400,15 @@ static int init_ipmi_si(void)
                return 0;
        initialized = 1;
 
-       rv = platform_driver_register(&ipmi_driver);
-       if (rv) {
-               printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
-               return rv;
+       if (si_tryplatform) {
+               rv = platform_driver_register(&ipmi_driver);
+               if (rv) {
+                       printk(KERN_ERR PFX "Unable to register "
+                              "driver: %d\n", rv);
+                       return rv;
+               }
        }
 
-
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
@@ -3400,24 +3431,31 @@ static int init_ipmi_si(void)
                return 0;
 
 #ifdef CONFIG_PCI
-       rv = pci_register_driver(&ipmi_pci_driver);
-       if (rv)
-               printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
-       else
-               pci_registered = 1;
+       if (si_trypci) {
+               rv = pci_register_driver(&ipmi_pci_driver);
+               if (rv)
+                       printk(KERN_ERR PFX "Unable to register "
+                              "PCI driver: %d\n", rv);
+               else
+                       pci_registered = 1;
+       }
 #endif
 
 #ifdef CONFIG_ACPI
-       pnp_register_driver(&ipmi_pnp_driver);
-       pnp_registered = 1;
+       if (si_tryacpi) {
+               pnp_register_driver(&ipmi_pnp_driver);
+               pnp_registered = 1;
+       }
 #endif
 
 #ifdef CONFIG_DMI
-       dmi_find_bmc();
+       if (si_trydmi)
+               dmi_find_bmc();
 #endif
 
 #ifdef CONFIG_ACPI
-       spmi_find_bmc();
+       if (si_tryacpi)
+               spmi_find_bmc();
 #endif
 
        /* We prefer devices with interrupts, but in the case of a machine
index a741e41..dafd9ac 100644 (file)
@@ -294,7 +294,7 @@ static int lp_wait_ready(int minor, int nonblock)
 static ssize_t lp_write(struct file * file, const char __user * buf,
                        size_t count, loff_t *ppos)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct parport *port = lp_table[minor].dev->port;
        char *kbuf = lp_table[minor].lp_buffer;
        ssize_t retv = 0;
@@ -413,7 +413,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
                       size_t count, loff_t *ppos)
 {
        DEFINE_WAIT(wait);
-       unsigned int minor=iminor(file->f_path.dentry->d_inode);
+       unsigned int minor=iminor(file_inode(file));
        struct parport *port = lp_table[minor].dev->port;
        ssize_t retval = 0;
        char *kbuf = lp_table[minor].lp_buffer;
@@ -679,7 +679,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd,
        struct timeval par_timeout;
        int ret;
 
-       minor = iminor(file->f_path.dentry->d_inode);
+       minor = iminor(file_inode(file));
        mutex_lock(&lp_mutex);
        switch (cmd) {
        case LPSETTIMEOUT:
@@ -707,7 +707,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
        struct timeval par_timeout;
        int ret;
 
-       minor = iminor(file->f_path.dentry->d_inode);
+       minor = iminor(file_inode(file));
        mutex_lock(&lp_mutex);
        switch (cmd) {
        case LPSETTIMEOUT:
index 6f6e92a..2c644af 100644 (file)
@@ -708,7 +708,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 {
        loff_t ret;
 
-       mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+       mutex_lock(&file_inode(file)->i_mutex);
        switch (orig) {
        case SEEK_CUR:
                offset += file->f_pos;
@@ -725,7 +725,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
        default:
                ret = -EINVAL;
        }
-       mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+       mutex_unlock(&file_inode(file)->i_mutex);
        return ret;
 }
 
index 522136d..190d442 100644 (file)
@@ -183,19 +183,12 @@ static const struct file_operations misc_fops = {
  
 int misc_register(struct miscdevice * misc)
 {
-       struct miscdevice *c;
        dev_t dev;
        int err = 0;
 
        INIT_LIST_HEAD(&misc->list);
 
        mutex_lock(&misc_mtx);
-       list_for_each_entry(c, &misc_list, list) {
-               if (c->minor == misc->minor) {
-                       mutex_unlock(&misc_mtx);
-                       return -EBUSY;
-               }
-       }
 
        if (misc->minor == MISC_DYNAMIC_MINOR) {
                int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
@@ -205,6 +198,15 @@ int misc_register(struct miscdevice * misc)
                }
                misc->minor = DYNAMIC_MINORS - i - 1;
                set_bit(i, misc_minors);
+       } else {
+               struct miscdevice *c;
+
+               list_for_each_entry(c, &misc_list, list) {
+                       if (c->minor == misc->minor) {
+                               mutex_unlock(&misc_mtx);
+                               return -EBUSY;
+                       }
+               }
        }
 
        dev = MKDEV(MISC_MAJOR, misc->minor);
index 808d44e..b07b119 100644 (file)
@@ -41,7 +41,7 @@ void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
 ssize_t nsc_gpio_write(struct file *file, const char __user *data,
                       size_t len, loff_t *ppos)
 {
-       unsigned m = iminor(file->f_path.dentry->d_inode);
+       unsigned m = iminor(file_inode(file));
        struct nsc_gpio_ops *amp = file->private_data;
        struct device *dev = amp->dev;
        size_t i;
@@ -104,7 +104,7 @@ ssize_t nsc_gpio_write(struct file *file, const char __user *data,
 ssize_t nsc_gpio_read(struct file *file, char __user * buf,
                      size_t len, loff_t * ppos)
 {
-       unsigned m = iminor(file->f_path.dentry->d_inode);
+       unsigned m = iminor(file_inode(file));
        int value;
        struct nsc_gpio_ops *amp = file->private_data;
 
index a758486..c115217 100644 (file)
@@ -1400,7 +1400,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct cm4000_dev *dev = filp->private_data;
        unsigned int iobase = dev->p_dev->resource[0]->start;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pcmcia_device *link;
        int size;
        int rc;
index 1cd4924..ae0b42b 100644 (file)
@@ -107,7 +107,7 @@ static inline void pp_enable_irq (struct pp_struct *pp)
 static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
                        loff_t * ppos)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
        char * kbuffer;
        ssize_t bytes_read = 0;
@@ -189,7 +189,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
 static ssize_t pp_write (struct file * file, const char __user * buf,
                         size_t count, loff_t * ppos)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
        char * kbuffer;
        ssize_t bytes_written = 0;
@@ -324,7 +324,7 @@ static enum ieee1284_phase init_phase (int mode)
 
 static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct pp_struct *pp = file->private_data;
        struct parport * port;
        void __user *argp = (void __user *)arg;
index 588063a..8cafa9c 100644 (file)
@@ -312,7 +312,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
 
 static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int err;
        mutex_lock(&inode->i_mutex);
        err = ps3flash_writeback(ps3flash_dev);
index 54a3a6d..f3223aa 100644 (file)
@@ -80,7 +80,7 @@ static int raw_open(struct inode *inode, struct file *filp)
        filp->f_flags |= O_DIRECT;
        filp->f_mapping = bdev->bd_inode->i_mapping;
        if (++raw_devices[minor].inuse == 1)
-               filp->f_path.dentry->d_inode->i_mapping =
+               file_inode(filp)->i_mapping =
                        bdev->bd_inode->i_mapping;
        filp->private_data = bdev;
        mutex_unlock(&raw_mutex);
index 6386a98..bf2349d 100644 (file)
@@ -938,7 +938,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
        }
 
        if (ret > 0) {
-               struct inode *inode = file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(file);
                inode->i_atime = current_fs_time(inode->i_sb);
        }
 
index 34c63f8..47b9fdf 100644 (file)
@@ -164,7 +164,7 @@ static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t le
        unsigned int minor;
        char value;
 
-       minor = iminor(file->f_path.dentry->d_inode);
+       minor = iminor(file_inode(file));
        switch (minor) {
        case 0:
                value = get_led();
@@ -200,7 +200,7 @@ static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
        int retval = 0;
        char c;
 
-       minor = iminor(file->f_path.dentry->d_inode);
+       minor = iminor(file_inode(file));
        switch (minor) {
        case 0:
                type = TYPE_LED;
index ee4dbea..e905d5f 100644 (file)
@@ -61,9 +61,6 @@ struct ports_driver_data {
        /* List of all the devices we're handling */
        struct list_head portdevs;
 
-       /* Number of devices this driver is handling */
-       unsigned int index;
-
        /*
         * This is used to keep track of the number of hvc consoles
         * spawned by this driver.  This number is given as the first
@@ -169,9 +166,6 @@ struct ports_device {
        /* Array of per-port IO virtqueues */
        struct virtqueue **in_vqs, **out_vqs;
 
-       /* Used for numbering devices for sysfs and debugfs */
-       unsigned int drv_index;
-
        /* Major number for this device.  Ports will be created as minors. */
        int chr_major;
 };
@@ -1415,7 +1409,7 @@ static int add_port(struct ports_device *portdev, u32 id)
        }
        port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
                                  devt, port, "vport%up%u",
-                                 port->portdev->drv_index, id);
+                                 port->portdev->vdev->index, id);
        if (IS_ERR(port->dev)) {
                err = PTR_ERR(port->dev);
                dev_err(&port->portdev->vdev->dev,
@@ -1442,7 +1436,7 @@ static int add_port(struct ports_device *portdev, u32 id)
                 * rproc_serial does not want the console port, only
                 * the generic port implementation.
                 */
-               port->host_connected = true;
+               port->host_connected = port->guest_connected = true;
        else if (!use_multiport(port->portdev)) {
                /*
                 * If we're not using multiport support,
@@ -1470,7 +1464,7 @@ static int add_port(struct ports_device *portdev, u32 id)
                 * inspect a port's state at any time
                 */
                sprintf(debugfs_name, "vport%up%u",
-                       port->portdev->drv_index, id);
+                       port->portdev->vdev->index, id);
                port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
                                                         pdrvdata.debugfs_dir,
                                                         port,
@@ -1958,16 +1952,12 @@ static int virtcons_probe(struct virtio_device *vdev)
        portdev->vdev = vdev;
        vdev->priv = portdev;
 
-       spin_lock_irq(&pdrvdata_lock);
-       portdev->drv_index = pdrvdata.index++;
-       spin_unlock_irq(&pdrvdata_lock);
-
        portdev->chr_major = register_chrdev(0, "virtio-portsdev",
                                             &portdev_fops);
        if (portdev->chr_major < 0) {
                dev_err(&vdev->dev,
                        "Error %d registering chrdev for device %u\n",
-                       portdev->chr_major, portdev->drv_index);
+                       portdev->chr_major, vdev->index);
                err = portdev->chr_major;
                goto free;
        }
index fabbfe1..ed87b24 100644 (file)
@@ -52,31 +52,29 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
                                     int level)
 {
        struct clk *child;
-       struct hlist_node *tmp;
 
        if (!c)
                return;
 
        clk_summary_show_one(s, c, level);
 
-       hlist_for_each_entry(child, tmp, &c->children, child_node)
+       hlist_for_each_entry(child, &c->children, child_node)
                clk_summary_show_subtree(s, child, level + 1);
 }
 
 static int clk_summary_show(struct seq_file *s, void *data)
 {
        struct clk *c;
-       struct hlist_node *tmp;
 
        seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
        seq_printf(s, "---------------------------------------------------------------------\n");
 
        mutex_lock(&prepare_lock);
 
-       hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
+       hlist_for_each_entry(c, &clk_root_list, child_node)
                clk_summary_show_subtree(s, c, 0);
 
-       hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
+       hlist_for_each_entry(c, &clk_orphan_list, child_node)
                clk_summary_show_subtree(s, c, 0);
 
        mutex_unlock(&prepare_lock);
@@ -111,14 +109,13 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
 {
        struct clk *child;
-       struct hlist_node *tmp;
 
        if (!c)
                return;
 
        clk_dump_one(s, c, level);
 
-       hlist_for_each_entry(child, tmp, &c->children, child_node) {
+       hlist_for_each_entry(child, &c->children, child_node) {
                seq_printf(s, ",");
                clk_dump_subtree(s, child, level + 1);
        }
@@ -129,21 +126,20 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
 static int clk_dump(struct seq_file *s, void *data)
 {
        struct clk *c;
-       struct hlist_node *tmp;
        bool first_node = true;
 
        seq_printf(s, "{");
 
        mutex_lock(&prepare_lock);
 
-       hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
+       hlist_for_each_entry(c, &clk_root_list, child_node) {
                if (!first_node)
                        seq_printf(s, ",");
                first_node = false;
                clk_dump_subtree(s, c, 0);
        }
 
-       hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
+       hlist_for_each_entry(c, &clk_orphan_list, child_node) {
                seq_printf(s, ",");
                clk_dump_subtree(s, c, 0);
        }
@@ -222,7 +218,6 @@ out:
 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
 {
        struct clk *child;
-       struct hlist_node *tmp;
        int ret = -EINVAL;;
 
        if (!clk || !pdentry)
@@ -233,7 +228,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
        if (ret)
                goto out;
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node)
+       hlist_for_each_entry(child, &clk->children, child_node)
                clk_debug_create_subtree(child, clk->dentry);
 
        ret = 0;
@@ -299,7 +294,6 @@ out:
 static int __init clk_debug_init(void)
 {
        struct clk *clk;
-       struct hlist_node *tmp;
        struct dentry *d;
 
        rootdir = debugfs_create_dir("clk", NULL);
@@ -324,10 +318,10 @@ static int __init clk_debug_init(void)
 
        mutex_lock(&prepare_lock);
 
-       hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+       hlist_for_each_entry(clk, &clk_root_list, child_node)
                clk_debug_create_subtree(clk, rootdir);
 
-       hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+       hlist_for_each_entry(clk, &clk_orphan_list, child_node)
                clk_debug_create_subtree(clk, orphandir);
 
        inited = 1;
@@ -345,13 +339,12 @@ static inline int clk_debug_register(struct clk *clk) { return 0; }
 static void clk_disable_unused_subtree(struct clk *clk)
 {
        struct clk *child;
-       struct hlist_node *tmp;
        unsigned long flags;
 
        if (!clk)
                goto out;
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node)
+       hlist_for_each_entry(child, &clk->children, child_node)
                clk_disable_unused_subtree(child);
 
        spin_lock_irqsave(&enable_lock, flags);
@@ -384,14 +377,13 @@ out:
 static int clk_disable_unused(void)
 {
        struct clk *clk;
-       struct hlist_node *tmp;
 
        mutex_lock(&prepare_lock);
 
-       hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
+       hlist_for_each_entry(clk, &clk_root_list, child_node)
                clk_disable_unused_subtree(clk);
 
-       hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
+       hlist_for_each_entry(clk, &clk_orphan_list, child_node)
                clk_disable_unused_subtree(clk);
 
        mutex_unlock(&prepare_lock);
@@ -484,12 +476,11 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
 {
        struct clk *child;
        struct clk *ret;
-       struct hlist_node *tmp;
 
        if (!strcmp(clk->name, name))
                return clk;
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+       hlist_for_each_entry(child, &clk->children, child_node) {
                ret = __clk_lookup_subtree(name, child);
                if (ret)
                        return ret;
@@ -502,20 +493,19 @@ struct clk *__clk_lookup(const char *name)
 {
        struct clk *root_clk;
        struct clk *ret;
-       struct hlist_node *tmp;
 
        if (!name)
                return NULL;
 
        /* search the 'proper' clk tree first */
-       hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
+       hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
                ret = __clk_lookup_subtree(name, root_clk);
                if (ret)
                        return ret;
        }
 
        /* if not found, then search the orphan tree */
-       hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
+       hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
                ret = __clk_lookup_subtree(name, root_clk);
                if (ret)
                        return ret;
@@ -812,7 +802,6 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 {
        unsigned long old_rate;
        unsigned long parent_rate = 0;
-       struct hlist_node *tmp;
        struct clk *child;
 
        old_rate = clk->rate;
@@ -832,7 +821,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
        if (clk->notifier_count && msg)
                __clk_notify(clk, msg, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node)
+       hlist_for_each_entry(child, &clk->children, child_node)
                __clk_recalc_rates(child, msg);
 }
 
@@ -878,7 +867,6 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
  */
 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
 {
-       struct hlist_node *tmp;
        struct clk *child;
        unsigned long new_rate;
        int ret = NOTIFY_DONE;
@@ -895,7 +883,7 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
        if (ret == NOTIFY_BAD)
                goto out;
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+       hlist_for_each_entry(child, &clk->children, child_node) {
                ret = __clk_speculate_rates(child, new_rate);
                if (ret == NOTIFY_BAD)
                        break;
@@ -908,11 +896,10 @@ out:
 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 {
        struct clk *child;
-       struct hlist_node *tmp;
 
        clk->new_rate = new_rate;
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+       hlist_for_each_entry(child, &clk->children, child_node) {
                if (child->ops->recalc_rate)
                        child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
                else
@@ -983,7 +970,6 @@ out:
  */
 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 {
-       struct hlist_node *tmp;
        struct clk *child, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
@@ -996,7 +982,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
                        fail_clk = clk;
        }
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node) {
+       hlist_for_each_entry(child, &clk->children, child_node) {
                clk = clk_propagate_rate_change(child, event);
                if (clk)
                        fail_clk = clk;
@@ -1014,7 +1000,6 @@ static void clk_change_rate(struct clk *clk)
        struct clk *child;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
-       struct hlist_node *tmp;
 
        old_rate = clk->rate;
 
@@ -1032,7 +1017,7 @@ static void clk_change_rate(struct clk *clk)
        if (clk->notifier_count && old_rate != clk->rate)
                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 
-       hlist_for_each_entry(child, tmp, &clk->children, child_node)
+       hlist_for_each_entry(child, &clk->children, child_node)
                clk_change_rate(child);
 }
 
@@ -1348,7 +1333,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 {
        int i, ret = 0;
        struct clk *orphan;
-       struct hlist_node *tmp, *tmp2;
+       struct hlist_node *tmp2;
 
        if (!clk)
                return -EINVAL;
@@ -1448,7 +1433,7 @@ int __clk_init(struct device *dev, struct clk *clk)
         * walk the list of orphan clocks and reparent any that are children of
         * this clock
         */
-       hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
+       hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
                if (orphan->ops->get_parent) {
                        i = orphan->ops->get_parent(orphan->hw);
                        if (!strcmp(clk->name, orphan->parent_names[i]))
index e920cbe..e507ab7 100644 (file)
@@ -62,3 +62,8 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
 
 config ARM_ARCH_TIMER
        bool
+
+config CLKSRC_METAG_GENERIC
+       def_bool y if METAG
+       help
+         This option enables support for the Meta per-thread timers.
index 7d671b8..4d8283a 100644 (file)
@@ -21,3 +21,4 @@ obj-$(CONFIG_ARCH_TEGRA)      += tegra20_timer.o
 obj-$(CONFIG_VT8500_TIMER)     += vt8500_timer.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)           += arm_arch_timer.o
+obj-$(CONFIG_CLKSRC_METAG_GENERIC)     += metag_generic.o
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
new file mode 100644 (file)
index 0000000..ade7513
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2005-2013 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Support for Meta per-thread timers.
+ *
+ * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used
+ * as a free-running time base (hz clocksource), and the interrupt timer
+ * (TXTIMERI) is used for the timer interrupt (clock event). Both counters
+ * traditionally count at approximately 1MHz.
+ */
+
+#include <clocksource/metag_generic.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+
+#include <asm/clock.h>
+#include <asm/hwthread.h>
+#include <asm/core_reg.h>
+#include <asm/metag_mem.h>
+#include <asm/tbx.h>
+
+#define HARDWARE_FREQ          1000000 /* 1MHz */
+#define HARDWARE_DIV           1       /* divide by 1 = 1MHz clock */
+#define HARDWARE_TO_NS_SHIFT   10      /* convert ticks to ns */
+
+static unsigned int hwtimer_freq = HARDWARE_FREQ;
+static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
+static DEFINE_PER_CPU(char [11], local_clockevent_name);
+
+static int metag_timer_set_next_event(unsigned long delta,
+                                     struct clock_event_device *dev)
+{
+       __core_reg_set(TXTIMERI, -delta);
+       return 0;
+}
+
+static void metag_timer_set_mode(enum clock_event_mode mode,
+                                struct clock_event_device *evt)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+       case CLOCK_EVT_MODE_RESUME:
+               break;
+
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               /* We should disable the IRQ here */
+               break;
+
+       case CLOCK_EVT_MODE_PERIODIC:
+       case CLOCK_EVT_MODE_UNUSED:
+               WARN_ON(1);
+               break;
+       };
+}
+
+static cycle_t metag_clocksource_read(struct clocksource *cs)
+{
+       return __core_reg_get(TXTIMER);
+}
+
+static struct clocksource clocksource_metag = {
+       .name = "META",
+       .rating = 200,
+       .mask = CLOCKSOURCE_MASK(32),
+       .read = metag_clocksource_read,
+       .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static irqreturn_t metag_timer_interrupt(int irq, void *dummy)
+{
+       struct clock_event_device *evt = &__get_cpu_var(local_clockevent);
+
+       evt->event_handler(evt);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction metag_timer_irq = {
+       .name = "META core timer",
+       .handler = metag_timer_interrupt,
+       .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
+};
+
+unsigned long long sched_clock(void)
+{
+       unsigned long long ticks = __core_reg_get(TXTIMER);
+       return ticks << HARDWARE_TO_NS_SHIFT;
+}
+
+static void __cpuinit arch_timer_setup(unsigned int cpu)
+{
+       unsigned int txdivtime;
+       struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
+       char *name = per_cpu(local_clockevent_name, cpu);
+
+       txdivtime = __core_reg_get(TXDIVTIME);
+
+       txdivtime &= ~TXDIVTIME_DIV_BITS;
+       txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS);
+
+       __core_reg_set(TXDIVTIME, txdivtime);
+
+       sprintf(name, "META %d", cpu);
+       clk->name = name;
+       clk->features = CLOCK_EVT_FEAT_ONESHOT,
+
+       clk->rating = 200,
+       clk->shift = 12,
+       clk->irq = tbisig_map(TBID_SIGNUM_TRT),
+       clk->set_mode = metag_timer_set_mode,
+       clk->set_next_event = metag_timer_set_next_event,
+
+       clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift);
+       clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk);
+       clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
+       clk->cpumask = cpumask_of(cpu);
+
+       clockevents_register_device(clk);
+
+       /*
+        * For all non-boot CPUs we need to synchronize our free
+        * running clock (TXTIMER) with the boot CPU's clock.
+        *
+        * While this won't be accurate, it should be close enough.
+        */
+       if (cpu) {
+               unsigned int thread0 = cpu_2_hwthread_id[0];
+               unsigned long val;
+
+               val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
+               __core_reg_set(TXTIMER, val);
+       }
+}
+
+static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+                                          unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_STARTING:
+       case CPU_STARTING_FROZEN:
+               arch_timer_setup(cpu);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
+       .notifier_call = arch_timer_cpu_notify,
+};
+
+int __init metag_generic_timer_init(void)
+{
+       /*
+        * On Meta 2 SoCs, the actual frequency of the timer is based on the
+        * Meta core clock speed divided by an integer, so it is only
+        * approximately 1MHz. Calculating the real frequency here drastically
+        * reduces clock skew on these SoCs.
+        */
+#ifdef CONFIG_METAG_META21
+       hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
+#endif
+       clocksource_register_hz(&clocksource_metag, hwtimer_freq);
+
+       setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
+
+       /* Configure timer on boot CPU */
+       arch_timer_setup(smp_processor_id());
+
+       /* Hook cpu boot to configure other CPU's timers */
+       register_cpu_notifier(&arch_timer_cpu_nb);
+
+       return 0;
+}
index 435e54d..071f6ea 100644 (file)
@@ -240,6 +240,7 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
        /* Timer 1 is used for events, register irq and clockevents */
        setup_irq(irq, &nmdk_timer_irq);
        nmdk_clkevt.cpumask = cpumask_of(0);
+       nmdk_clkevt.irq = irq;
        clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
 
        mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
index a4605fd..47a6730 100644 (file)
 #include <linux/of_address.h>
 #include <linux/irq.h>
 #include <linux/module.h>
-#include <asm/sched_clock.h>
 
+#include <asm/sched_clock.h>
+#include <asm/localtimer.h>
+#include <linux/percpu.h>
 /*
  * Timer block registers.
  */
@@ -49,6 +51,7 @@
 #define TIMER1_RELOAD_OFF      0x0018
 #define TIMER1_VAL_OFF         0x001c
 
+#define LCL_TIMER_EVENTS_STATUS        0x0028
 /* Global timers are connected to the coherency fabric clock, and the
    below divider reduces their incrementing frequency. */
 #define TIMER_DIVIDER_SHIFT     5
 /*
  * SoC-specific data.
  */
-static void __iomem *timer_base;
-static int timer_irq;
+static void __iomem *timer_base, *local_base;
+static unsigned int timer_clk;
+static bool timer25Mhz = true;
 
 /*
  * Number of timer ticks per jiffy.
  */
 static u32 ticks_per_jiffy;
 
+static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
+
 static u32 notrace armada_370_xp_read_sched_clock(void)
 {
        return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -78,24 +84,23 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
                                struct clock_event_device *dev)
 {
        u32 u;
-
        /*
         * Clear clockevent timer interrupt.
         */
-       writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+       writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
 
        /*
         * Setup new clockevent timer value.
         */
-       writel(delta, timer_base + TIMER1_VAL_OFF);
+       writel(delta, local_base + TIMER0_VAL_OFF);
 
        /*
         * Enable the timer.
         */
-       u = readl(timer_base + TIMER_CTRL_OFF);
-       u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
-            TIMER1_DIV(TIMER_DIVIDER_SHIFT));
-       writel(u, timer_base + TIMER_CTRL_OFF);
+       u = readl(local_base + TIMER_CTRL_OFF);
+       u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
+            TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+       writel(u, local_base + TIMER_CTRL_OFF);
 
        return 0;
 }
@@ -107,37 +112,38 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
        u32 u;
 
        if (mode == CLOCK_EVT_MODE_PERIODIC) {
+
                /*
                 * Setup timer to fire at 1/HZ intervals.
                 */
-               writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
-               writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+               writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
+               writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
 
                /*
                 * Enable timer.
                 */
-               u = readl(timer_base + TIMER_CTRL_OFF);
 
-               writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
-                       TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
-                      timer_base + TIMER_CTRL_OFF);
+               u = readl(local_base + TIMER_CTRL_OFF);
+
+               writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
+                       TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
+                       local_base + TIMER_CTRL_OFF);
        } else {
                /*
                 * Disable timer.
                 */
-               u = readl(timer_base + TIMER_CTRL_OFF);
-               writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+               u = readl(local_base + TIMER_CTRL_OFF);
+               writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
 
                /*
                 * ACK pending timer interrupt.
                 */
-               writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
-
+               writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
        }
 }
 
 static struct clock_event_device armada_370_xp_clkevt = {
-       .name           = "armada_370_xp_tick",
+       .name           = "armada_370_xp_per_cpu_tick",
        .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
        .shift          = 32,
        .rating         = 300,
@@ -150,32 +156,78 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
        /*
         * ACK timer interrupt and call event handler.
         */
+       struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
 
-       writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
-       armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
+       writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
+       evt->event_handler(evt);
 
        return IRQ_HANDLED;
 }
 
-static struct irqaction armada_370_xp_timer_irq = {
-       .name           = "armada_370_xp_tick",
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-       .handler        = armada_370_xp_timer_interrupt
+/*
+ * Setup the local clock events for a CPU.
+ */
+static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt)
+{
+       u32 u;
+       int cpu = smp_processor_id();
+
+       /* Use existing clock_event for cpu 0 */
+       if (!smp_processor_id())
+               return 0;
+
+       u = readl(local_base + TIMER_CTRL_OFF);
+       if (timer25Mhz)
+               writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+       else
+               writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+
+       evt->name               = armada_370_xp_clkevt.name;
+       evt->irq                = armada_370_xp_clkevt.irq;
+       evt->features           = armada_370_xp_clkevt.features;
+       evt->shift              = armada_370_xp_clkevt.shift;
+       evt->rating             = armada_370_xp_clkevt.rating,
+       evt->set_next_event     = armada_370_xp_clkevt_next_event,
+       evt->set_mode           = armada_370_xp_clkevt_mode,
+       evt->cpumask            = cpumask_of(cpu);
+
+       *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt;
+
+       clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
+       enable_percpu_irq(evt->irq, 0);
+
+       return 0;
+}
+
+static void  armada_370_xp_timer_stop(struct clock_event_device *evt)
+{
+       evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+       disable_percpu_irq(evt->irq);
+}
+
+static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = {
+       .setup  = armada_370_xp_timer_setup,
+       .stop   =  armada_370_xp_timer_stop,
 };
 
 void __init armada_370_xp_timer_init(void)
 {
        u32 u;
        struct device_node *np;
-       unsigned int timer_clk;
+       int res;
+
        np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
        timer_base = of_iomap(np, 0);
        WARN_ON(!timer_base);
+       local_base = of_iomap(np, 1);
 
        if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
                /* The fixed 25MHz timer is available so let's use it */
+               u = readl(local_base + TIMER_CTRL_OFF);
+               writel(u | TIMER0_25MHZ,
+                      local_base + TIMER_CTRL_OFF);
                u = readl(timer_base + TIMER_CTRL_OFF);
-               writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
+               writel(u | TIMER0_25MHZ,
                       timer_base + TIMER_CTRL_OFF);
                timer_clk = 25000000;
        } else {
@@ -183,15 +235,23 @@ void __init armada_370_xp_timer_init(void)
                struct clk *clk = of_clk_get(np, 0);
                WARN_ON(IS_ERR(clk));
                rate =  clk_get_rate(clk);
+               u = readl(local_base + TIMER_CTRL_OFF);
+               writel(u & ~(TIMER0_25MHZ),
+                      local_base + TIMER_CTRL_OFF);
+
                u = readl(timer_base + TIMER_CTRL_OFF);
-               writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
+               writel(u & ~(TIMER0_25MHZ),
                       timer_base + TIMER_CTRL_OFF);
+
                timer_clk = rate / TIMER_DIVIDER;
+               timer25Mhz = false;
        }
 
-       /* We use timer 0 as clocksource, and timer 1 for
-          clockevents */
-       timer_irq = irq_of_parse_and_map(np, 1);
+       /*
+        * We use timer 0 as clocksource, and private(local) timer 0
+        * for clockevents
+        */
+       armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4);
 
        ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
 
@@ -216,12 +276,26 @@ void __init armada_370_xp_timer_init(void)
                              "armada_370_xp_clocksource",
                              timer_clk, 300, 32, clocksource_mmio_readl_down);
 
-       /*
-        * Setup clockevent timer (interrupt-driven).
-        */
-       setup_irq(timer_irq, &armada_370_xp_timer_irq);
+       /* Register the clockevent on the private timer of CPU 0 */
        armada_370_xp_clkevt.cpumask = cpumask_of(0);
        clockevents_config_and_register(&armada_370_xp_clkevt,
                                        timer_clk, 1, 0xfffffffe);
-}
 
+       percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *);
+
+
+       /*
+        * Setup clockevent timer (interrupt-driven).
+        */
+       *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt;
+       res = request_percpu_irq(armada_370_xp_clkevt.irq,
+                               armada_370_xp_timer_interrupt,
+                               armada_370_xp_clkevt.name,
+                               percpu_armada_370_xp_evt);
+       if (!res) {
+               enable_percpu_irq(armada_370_xp_clkevt.irq, 0);
+#ifdef CONFIG_LOCAL_TIMERS
+               local_timer_register(&armada_370_xp_local_timer_ops);
+#endif
+       }
+}
index fce2000..1110478 100644 (file)
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
            (task_active_pid_ns(current) != &init_pid_ns))
                return;
 
+       /* Can only change if privileged. */
+       if (!capable(CAP_NET_ADMIN)) {
+               err = EPERM;
+               goto out;
+       }
+
        mc_op = (enum proc_cn_mcast_op *)msg->data;
        switch (*mc_op) {
        case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                err = EINVAL;
                break;
        }
+
+out:
        cn_proc_ack(err, msg->seq, msg->ack);
 }
 
index 591b659..126cf29 100644 (file)
@@ -53,22 +53,19 @@ void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
 int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
 {
        struct device *cd;
-       int err = 0;
+       int ret;
 
-idr_try_again:
-       if (!idr_pre_get(&dca_idr, GFP_KERNEL))
-               return -ENOMEM;
+       idr_preload(GFP_KERNEL);
        spin_lock(&dca_idr_lock);
-       err = idr_get_new(&dca_idr, dca, &dca->id);
+
+       ret = idr_alloc(&dca_idr, dca, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               dca->id = ret;
+
        spin_unlock(&dca_idr_lock);
-       switch (err) {
-       case 0:
-               break;
-       case -EAGAIN:
-               goto idr_try_again;
-       default:
-               return err;
-       }
+       idr_preload_end();
+       if (ret < 0)
+               return ret;
 
        cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
        if (IS_ERR(cd)) {
index 242b8c0..b2728d6 100644 (file)
@@ -686,18 +686,14 @@ static int get_dma_id(struct dma_device *device)
 {
        int rc;
 
- idr_retry:
-       if (!idr_pre_get(&dma_idr, GFP_KERNEL))
-               return -ENOMEM;
        mutex_lock(&dma_list_mutex);
-       rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
-       mutex_unlock(&dma_list_mutex);
-       if (rc == -EAGAIN)
-               goto idr_retry;
-       else if (rc != 0)
-               return rc;
 
-       return 0;
+       rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
+       if (rc >= 0)
+               device->dev_id = rc;
+
+       mutex_unlock(&dma_list_mutex);
+       return rc < 0 ? rc : 0;
 }
 
 /**
index 51c3ea2..c599558 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -171,7 +172,13 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
        if (dwc->initialized == true)
                return;
 
-       if (dws) {
+       if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
+               /* autoconfigure based on request line from DT */
+               if (dwc->direction == DMA_MEM_TO_DEV)
+                       cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+               else if (dwc->direction == DMA_DEV_TO_MEM)
+                       cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+       } else if (dws) {
                /*
                 * We need controller-specific data to set up slave
                 * transfers.
@@ -1226,49 +1233,64 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
 }
 
-bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+struct dw_dma_filter_args {
+       struct dw_dma *dw;
+       unsigned int req;
+       unsigned int src;
+       unsigned int dst;
+};
+
+static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
 {
+       struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
        struct dw_dma *dw = to_dw_dma(chan->device);
-       static struct dw_dma *last_dw;
-       static char *last_bus_id;
-       int i = -1;
+       struct dw_dma_filter_args *fargs = param;
+       struct dw_dma_slave *dws = &dwc->slave;
 
-       /*
-        * dmaengine framework calls this routine for all channels of all dma
-        * controller, until true is returned. If 'param' bus_id is not
-        * registered with a dma controller (dw), then there is no need of
-        * running below function for all channels of dw.
-        *
-        * This block of code does this by saving the parameters of last
-        * failure. If dw and param are same, i.e. trying on same dw with
-        * different channel, return false.
-        */
-       if ((last_dw == dw) && (last_bus_id == param))
-               return false;
-       /*
-        * Return true:
-        * - If dw_dma's platform data is not filled with slave info, then all
-        *   dma controllers are fine for transfer.
-        * - Or if param is NULL
-        */
-       if (!dw->sd || !param)
-               return true;
+       /* ensure the device matches our channel */
+        if (chan->device != &fargs->dw->dma)
+                return false;
 
-       while (++i < dw->sd_count) {
-               if (!strcmp(dw->sd[i].bus_id, param)) {
-                       chan->private = &dw->sd[i];
-                       last_dw = NULL;
-                       last_bus_id = NULL;
+       dws->dma_dev    = dw->dma.dev;
+       dws->cfg_hi     = ~0;
+       dws->cfg_lo     = ~0;
+       dws->src_master = fargs->src;
+       dws->dst_master = fargs->dst;
 
-                       return true;
-               }
-       }
+       dwc->request_line = fargs->req;
 
-       last_dw = dw;
-       last_bus_id = param;
-       return false;
+       chan->private = dws;
+
+       return true;
+}
+
+static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
+                                        struct of_dma *ofdma)
+{
+       struct dw_dma *dw = ofdma->of_dma_data;
+       struct dw_dma_filter_args fargs = {
+               .dw = dw,
+       };
+       dma_cap_mask_t cap;
+
+       if (dma_spec->args_count != 3)
+               return NULL;
+
+       fargs.req = be32_to_cpup(dma_spec->args+0);
+       fargs.src = be32_to_cpup(dma_spec->args+1);
+       fargs.dst = be32_to_cpup(dma_spec->args+2);
+
+       if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+                   fargs.src >= dw->nr_masters ||
+                   fargs.dst >= dw->nr_masters))
+               return NULL;
+
+       dma_cap_zero(cap);
+       dma_cap_set(DMA_SLAVE, cap);
+
+       /* TODO: there should be a simpler way to do this */
+       return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
 }
-EXPORT_SYMBOL(dw_dma_generic_filter);
 
 /* --------------------- Cyclic DMA API extensions -------------------- */
 
@@ -1554,9 +1576,8 @@ static void dw_dma_off(struct dw_dma *dw)
 static struct dw_dma_platform_data *
 dw_dma_parse_dt(struct platform_device *pdev)
 {
-       struct device_node *sn, *cn, *np = pdev->dev.of_node;
+       struct device_node *np = pdev->dev.of_node;
        struct dw_dma_platform_data *pdata;
-       struct dw_dma_slave *sd;
        u32 tmp, arr[4];
 
        if (!np) {
@@ -1568,7 +1589,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
        if (!pdata)
                return NULL;
 
-       if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
+       if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
                return NULL;
 
        if (of_property_read_bool(np, "is_private"))
@@ -1583,7 +1604,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
        if (!of_property_read_u32(np, "block_size", &tmp))
                pdata->block_size = tmp;
 
-       if (!of_property_read_u32(np, "nr_masters", &tmp)) {
+       if (!of_property_read_u32(np, "dma-masters", &tmp)) {
                if (tmp > 4)
                        return NULL;
 
@@ -1595,36 +1616,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
                for (tmp = 0; tmp < pdata->nr_masters; tmp++)
                        pdata->data_width[tmp] = arr[tmp];
 
-       /* parse slave data */
-       sn = of_find_node_by_name(np, "slave_info");
-       if (!sn)
-               return pdata;
-
-       /* calculate number of slaves */
-       tmp = of_get_child_count(sn);
-       if (!tmp)
-               return NULL;
-
-       sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
-       if (!sd)
-               return NULL;
-
-       pdata->sd = sd;
-       pdata->sd_count = tmp;
-
-       for_each_child_of_node(sn, cn) {
-               sd->dma_dev = &pdev->dev;
-               of_property_read_string(cn, "bus_id", &sd->bus_id);
-               of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
-               of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
-               if (!of_property_read_u32(cn, "src_master", &tmp))
-                       sd->src_master = tmp;
-
-               if (!of_property_read_u32(cn, "dst_master", &tmp))
-                       sd->dst_master = tmp;
-               sd++;
-       }
-
        return pdata;
 }
 #else
@@ -1705,8 +1696,6 @@ static int dw_probe(struct platform_device *pdev)
        clk_prepare_enable(dw->clk);
 
        dw->regs = regs;
-       dw->sd = pdata->sd;
-       dw->sd_count = pdata->sd_count;
 
        /* get hardware configuration parameters */
        if (autocfg) {
@@ -1837,6 +1826,14 @@ static int dw_probe(struct platform_device *pdev)
 
        dma_async_device_register(&dw->dma);
 
+       if (pdev->dev.of_node) {
+               err = of_dma_controller_register(pdev->dev.of_node,
+                                                dw_dma_xlate, dw);
+               if (err && err != -ENODEV)
+                       dev_err(&pdev->dev,
+                               "could not register of_dma_controller\n");
+       }
+
        return 0;
 }
 
@@ -1845,6 +1842,8 @@ static int dw_remove(struct platform_device *pdev)
        struct dw_dma           *dw = platform_get_drvdata(pdev);
        struct dw_dma_chan      *dwc, *_dwc;
 
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
        dw_dma_off(dw);
        dma_async_device_unregister(&dw->dma);
 
index 88dd8eb..cf0ce5c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/dw_dmac.h>
 
 #define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MAX_NR_REQUESTS 16
 
 /* flow controller */
 enum dw_dma_fc {
@@ -211,6 +212,8 @@ struct dw_dma_chan {
        /* hardware configuration */
        unsigned int            block_size;
        bool                    nollp;
+       unsigned int            request_line;
+       struct dw_dma_slave     slave;
 
        /* configuration passed via DMA_SLAVE_CONFIG */
        struct dma_slave_config dma_sconfig;
@@ -239,10 +242,6 @@ struct dw_dma {
        struct tasklet_struct   tasklet;
        struct clk              *clk;
 
-       /* slave information */
-       struct dw_dma_slave     *sd;
-       unsigned int            sd_count;
-
        u8                      all_chan_mask;
 
        /* hardware configuration */
index acb709b..e443f2c 100644 (file)
@@ -80,6 +80,29 @@ config EDAC_MM_EDAC
          occurred so that a particular failing memory module can be
          replaced.  If unsure, select 'Y'.
 
+config EDAC_GHES
+       bool "Output ACPI APEI/GHES BIOS detected errors via EDAC"
+       depends on ACPI_APEI_GHES && (EDAC_MM_EDAC=y)
+       default y
+       help
+         Not all machines support hardware-driven error report. Some of those
+         provide a BIOS-driven error report mechanism via ACPI, using the
+         APEI/GHES driver. By enabling this option, the error reports provided
+         by GHES are sent to userspace via the EDAC API.
+
+         When this option is enabled, it will disable the hardware-driven
+         mechanisms, if a GHES BIOS is detected, entering into the
+         "Firmware First" mode.
+
+         It should be noticed that keeping both GHES and a hardware-driven
+         error mechanism won't work well, as BIOS will race with OS, while
+         reading the error registers. So, if you want to not use "Firmware
+         first" GHES error mechanism, you should disable GHES either at
+         compilation time or by passing "ghes.disable=1" Kernel parameter
+         at boot time.
+
+         In doubt, say 'Y'.
+
 config EDAC_AMD64
        tristate "AMD64 (Opteron, Athlon64) K8, F10h"
        depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
index 5608a9b..4154ed6 100644 (file)
@@ -16,6 +16,7 @@ ifdef CONFIG_PCI
 edac_core-y    += edac_pci.o edac_pci_sysfs.o
 endif
 
+obj-$(CONFIG_EDAC_GHES)                        += ghes_edac.o
 obj-$(CONFIG_EDAC_MCE_INJ)             += mce_amd_inj.o
 
 edac_mce_amd-y                         := mce_amd.o
index 23bb99f..3c2625e 100644 (file)
@@ -453,6 +453,11 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
 extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
 extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
                                      unsigned long page);
+
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+                             struct mem_ctl_info *mci,
+                             struct edac_raw_error_desc *e);
+
 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                          struct mem_ctl_info *mci,
                          const u16 error_count,
index d1e9eb1..cdb81aa 100644 (file)
 static DEFINE_MUTEX(mem_ctls_mutex);
 static LIST_HEAD(mc_devices);
 
+/*
+ * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
+ *     apei/ghes and i7core_edac to be used at the same time.
+ */
+static void const *edac_mc_owner;
+
 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
                                 unsigned len)
 {
@@ -441,13 +447,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
 
        mci->op_state = OP_ALLOC;
 
-       /* at this point, the root kobj is valid, and in order to
-        * 'free' the object, then the function:
-        *      edac_mc_unregister_sysfs_main_kobj() must be called
-        * which will perform kobj unregistration and the actual free
-        * will occur during the kobject callback operation
-        */
-
        return mci;
 
 error:
@@ -666,9 +665,9 @@ fail1:
        return 1;
 }
 
-static void del_mc_from_global_list(struct mem_ctl_info *mci)
+static int del_mc_from_global_list(struct mem_ctl_info *mci)
 {
-       atomic_dec(&edac_handlers);
+       int handlers = atomic_dec_return(&edac_handlers);
        list_del_rcu(&mci->link);
 
        /* these are for safe removal of devices from global list while
@@ -676,6 +675,8 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
         */
        synchronize_rcu();
        INIT_LIST_HEAD(&mci->link);
+
+       return handlers;
 }
 
 /**
@@ -719,6 +720,7 @@ EXPORT_SYMBOL(edac_mc_find);
 /* FIXME - should a warning be printed if no error detection? correction? */
 int edac_mc_add_mc(struct mem_ctl_info *mci)
 {
+       int ret = -EINVAL;
        edac_dbg(0, "\n");
 
 #ifdef CONFIG_EDAC_DEBUG
@@ -749,6 +751,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
 #endif
        mutex_lock(&mem_ctls_mutex);
 
+       if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
+               ret = -EPERM;
+               goto fail0;
+       }
+
        if (add_mc_to_global_list(mci))
                goto fail0;
 
@@ -775,6 +782,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
        edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
                " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
 
+       edac_mc_owner = mci->mod_name;
+
        mutex_unlock(&mem_ctls_mutex);
        return 0;
 
@@ -783,7 +792,7 @@ fail1:
 
 fail0:
        mutex_unlock(&mem_ctls_mutex);
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
 
@@ -809,7 +818,8 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
                return NULL;
        }
 
-       del_mc_from_global_list(mci);
+       if (!del_mc_from_global_list(mci))
+               edac_mc_owner = NULL;
        mutex_unlock(&mem_ctls_mutex);
 
        /* flush workq processes */
@@ -907,6 +917,7 @@ const char *edac_layer_name[] = {
        [EDAC_MC_LAYER_CHANNEL] = "channel",
        [EDAC_MC_LAYER_SLOT] = "slot",
        [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+       [EDAC_MC_LAYER_ALL_MEM] = "memory",
 };
 EXPORT_SYMBOL_GPL(edac_layer_name);
 
@@ -1054,7 +1065,46 @@ static void edac_ue_error(struct mem_ctl_info *mci,
        edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
 }
 
-#define OTHER_LABEL " or "
+/**
+ * edac_raw_mc_handle_error - reports a memory event to userspace without doing
+ *                           anything to discover the error location
+ *
+ * @type:              severity of the error (CE/UE/Fatal)
+ * @mci:               a struct mem_ctl_info pointer
+ * @e:                 error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+                             struct mem_ctl_info *mci,
+                             struct edac_raw_error_desc *e)
+{
+       char detail[80];
+       int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
+
+       /* Memory type dependent details about the error */
+       if (type == HW_EVENT_ERR_CORRECTED) {
+               snprintf(detail, sizeof(detail),
+                       "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
+                       e->page_frame_number, e->offset_in_page,
+                       e->grain, e->syndrome);
+               edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+                             detail, e->other_detail, e->enable_per_layer_report,
+                             e->page_frame_number, e->offset_in_page, e->grain);
+       } else {
+               snprintf(detail, sizeof(detail),
+                       "page:0x%lx offset:0x%lx grain:%ld",
+                       e->page_frame_number, e->offset_in_page, e->grain);
+
+               edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
+                             detail, e->other_detail, e->enable_per_layer_report);
+       }
+
+
+}
+EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
 
 /**
  * edac_mc_handle_error - reports a memory event to userspace
@@ -1086,19 +1136,27 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                          const char *msg,
                          const char *other_detail)
 {
-       /* FIXME: too much for stack: move it to some pre-alocated area */
-       char detail[80], location[80];
-       char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
        char *p;
        int row = -1, chan = -1;
        int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
-       int i;
-       long grain;
-       bool enable_per_layer_report = false;
+       int i, n_labels = 0;
        u8 grain_bits;
+       struct edac_raw_error_desc *e = &mci->error_desc;
 
        edac_dbg(3, "MC%d\n", mci->mc_idx);
 
+       /* Fills the error report buffer */
+       memset(e, 0, sizeof (*e));
+       e->error_count = error_count;
+       e->top_layer = top_layer;
+       e->mid_layer = mid_layer;
+       e->low_layer = low_layer;
+       e->page_frame_number = page_frame_number;
+       e->offset_in_page = offset_in_page;
+       e->syndrome = syndrome;
+       e->msg = msg;
+       e->other_detail = other_detail;
+
        /*
         * Check if the event report is consistent and if the memory
         * location is known. If it is known, enable_per_layer_report will be
@@ -1121,7 +1179,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                        pos[i] = -1;
                }
                if (pos[i] >= 0)
-                       enable_per_layer_report = true;
+                       e->enable_per_layer_report = true;
        }
 
        /*
@@ -1135,8 +1193,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
         * where each memory belongs to a separate channel within the same
         * branch.
         */
-       grain = 0;
-       p = label;
+       p = e->label;
        *p = '\0';
 
        for (i = 0; i < mci->tot_dimms; i++) {
@@ -1150,8 +1207,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                        continue;
 
                /* get the max grain, over the error match range */
-               if (dimm->grain > grain)
-                       grain = dimm->grain;
+               if (dimm->grain > e->grain)
+                       e->grain = dimm->grain;
 
                /*
                 * If the error is memory-controller wide, there's no need to
@@ -1159,8 +1216,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                 * channel/memory controller/...  may be affected.
                 * Also, don't show errors for empty DIMM slots.
                 */
-               if (enable_per_layer_report && dimm->nr_pages) {
-                       if (p != label) {
+               if (e->enable_per_layer_report && dimm->nr_pages) {
+                       if (n_labels >= EDAC_MAX_LABELS) {
+                               e->enable_per_layer_report = false;
+                               break;
+                       }
+                       n_labels++;
+                       if (p != e->label) {
                                strcpy(p, OTHER_LABEL);
                                p += strlen(OTHER_LABEL);
                        }
@@ -1187,12 +1249,12 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                }
        }
 
-       if (!enable_per_layer_report) {
-               strcpy(label, "any memory");
+       if (!e->enable_per_layer_report) {
+               strcpy(e->label, "any memory");
        } else {
                edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
-               if (p == label)
-                       strcpy(label, "unknown memory");
+               if (p == e->label)
+                       strcpy(e->label, "unknown memory");
                if (type == HW_EVENT_ERR_CORRECTED) {
                        if (row >= 0) {
                                mci->csrows[row]->ce_count += error_count;
@@ -1205,7 +1267,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
        }
 
        /* Fill the RAM location data */
-       p = location;
+       p = e->location;
 
        for (i = 0; i < mci->n_layers; i++) {
                if (pos[i] < 0)
@@ -1215,32 +1277,16 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                             edac_layer_name[mci->layers[i].type],
                             pos[i]);
        }
-       if (p > location)
+       if (p > e->location)
                *(p - 1) = '\0';
 
        /* Report the error via the trace interface */
-       grain_bits = fls_long(grain) + 1;
-       trace_mc_event(type, msg, label, error_count,
-                      mci->mc_idx, top_layer, mid_layer, low_layer,
-                      PAGES_TO_MiB(page_frame_number) | offset_in_page,
-                      grain_bits, syndrome, other_detail);
+       grain_bits = fls_long(e->grain) + 1;
+       trace_mc_event(type, e->msg, e->label, e->error_count,
+                      mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+                      PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+                      grain_bits, e->syndrome, e->other_detail);
 
-       /* Memory type dependent details about the error */
-       if (type == HW_EVENT_ERR_CORRECTED) {
-               snprintf(detail, sizeof(detail),
-                       "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
-                       page_frame_number, offset_in_page,
-                       grain, syndrome);
-               edac_ce_error(mci, error_count, pos, msg, location, label,
-                             detail, other_detail, enable_per_layer_report,
-                             page_frame_number, offset_in_page, grain);
-       } else {
-               snprintf(detail, sizeof(detail),
-                       "page:0x%lx offset:0x%lx grain:%ld",
-                       page_frame_number, offset_in_page, grain);
-
-               edac_ue_error(mci, error_count, pos, msg, location, label,
-                             detail, other_detail, enable_per_layer_report);
-       }
+       edac_raw_mc_handle_error(type, mci, e);
 }
 EXPORT_SYMBOL_GPL(edac_mc_handle_error);
index 0ca1ca7..4f4b613 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
  *
- * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
+ * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
  *     The entire API were re-written, and ported to use struct device
  *
  */
@@ -429,8 +429,12 @@ static int edac_create_csrow_objects(struct mem_ctl_info *mci)
                if (!nr_pages_per_csrow(csrow))
                        continue;
                err = edac_create_csrow_object(mci, mci->csrows[i], i);
-               if (err < 0)
+               if (err < 0) {
+                       edac_dbg(1,
+                                "failure: create csrow objects for csrow %d\n",
+                                i);
                        goto error;
+               }
        }
        return 0;
 
@@ -677,9 +681,6 @@ static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
        unsigned long bandwidth = 0;
        int new_bw = 0;
 
-       if (!mci->set_sdram_scrub_rate)
-               return -ENODEV;
-
        if (strict_strtoul(data, 10, &bandwidth) < 0)
                return -EINVAL;
 
@@ -703,9 +704,6 @@ static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
        struct mem_ctl_info *mci = to_mci(dev);
        int bandwidth = 0;
 
-       if (!mci->get_sdram_scrub_rate)
-               return -ENODEV;
-
        bandwidth = mci->get_sdram_scrub_rate(mci);
        if (bandwidth < 0) {
                edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
@@ -866,8 +864,7 @@ DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
 DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
 
 /* memory scrubber attribute file */
-DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
-       mci_sdram_scrub_rate_store);
+DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
 
 static struct attribute *mci_attrs[] = {
        &dev_attr_reset_counters.attr,
@@ -878,7 +875,6 @@ static struct attribute *mci_attrs[] = {
        &dev_attr_ce_noinfo_count.attr,
        &dev_attr_ue_count.attr,
        &dev_attr_ce_count.attr,
-       &dev_attr_sdram_scrub_rate.attr,
        &dev_attr_max_location.attr,
        NULL
 };
@@ -1007,11 +1003,28 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
        edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
        err = device_add(&mci->dev);
        if (err < 0) {
+               edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
                bus_unregister(&mci->bus);
                kfree(mci->bus.name);
                return err;
        }
 
+       if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
+               if (mci->get_sdram_scrub_rate) {
+                       dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+                       dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+               }
+               if (mci->set_sdram_scrub_rate) {
+                       dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+                       dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+               }
+               err = device_create_file(&mci->dev,
+                                        &dev_attr_sdram_scrub_rate);
+               if (err) {
+                       edac_dbg(1, "failure: create sdram_scrub_rate\n");
+                       goto fail2;
+               }
+       }
        /*
         * Create the dimm/rank devices
         */
@@ -1056,6 +1069,7 @@ fail:
                        continue;
                device_unregister(&dimm->dev);
        }
+fail2:
        device_unregister(&mci->dev);
        bus_unregister(&mci->bus);
        kfree(mci->bus.name);
index 12c951a..a66941f 100644 (file)
@@ -146,7 +146,7 @@ static void __exit edac_exit(void)
 /*
  * Inform the kernel of our entry and exit points
  */
-module_init(edac_init);
+subsys_initcall(edac_init);
 module_exit(edac_exit);
 
 MODULE_LICENSE("GPL");
index 0056c4d..e8658e4 100644 (file)
@@ -429,8 +429,8 @@ static void edac_pci_main_kobj_teardown(void)
        if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
                edac_dbg(0, "called kobject_put on main kobj\n");
                kobject_put(edac_pci_top_main_kobj);
+               edac_put_sysfs_subsys();
        }
-       edac_put_sysfs_subsys();
 }
 
 /*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
new file mode 100644 (file)
index 0000000..bb53467
--- /dev/null
@@ -0,0 +1,537 @@
+/*
+ * GHES/EDAC Linux driver
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2013 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <acpi/ghes.h>
+#include <linux/edac.h>
+#include <linux/dmi.h>
+#include "edac_core.h"
+#include <ras/ras_event.h>
+
+#define GHES_EDAC_REVISION " Ver: 1.0.0"
+
+struct ghes_edac_pvt {
+       struct list_head list;
+       struct ghes *ghes;
+       struct mem_ctl_info *mci;
+
+       /* Buffers for the error handling routine */
+       char detail_location[240];
+       char other_detail[160];
+       char msg[80];
+};
+
+static LIST_HEAD(ghes_reglist);
+static DEFINE_MUTEX(ghes_edac_lock);
+static int ghes_edac_mc_num;
+
+
+/* Memory Device - Type 17 of SMBIOS spec */
+struct memdev_dmi_entry {
+       u8 type;
+       u8 length;
+       u16 handle;
+       u16 phys_mem_array_handle;
+       u16 mem_err_info_handle;
+       u16 total_width;
+       u16 data_width;
+       u16 size;
+       u8 form_factor;
+       u8 device_set;
+       u8 device_locator;
+       u8 bank_locator;
+       u8 memory_type;
+       u16 type_detail;
+       u16 speed;
+       u8 manufacturer;
+       u8 serial_number;
+       u8 asset_tag;
+       u8 part_number;
+       u8 attributes;
+       u32 extended_size;
+       u16 conf_mem_clk_speed;
+} __attribute__((__packed__));
+
+struct ghes_edac_dimm_fill {
+       struct mem_ctl_info *mci;
+       unsigned count;
+};
+
+char *memory_type[] = {
+       [MEM_EMPTY] = "EMPTY",
+       [MEM_RESERVED] = "RESERVED",
+       [MEM_UNKNOWN] = "UNKNOWN",
+       [MEM_FPM] = "FPM",
+       [MEM_EDO] = "EDO",
+       [MEM_BEDO] = "BEDO",
+       [MEM_SDR] = "SDR",
+       [MEM_RDR] = "RDR",
+       [MEM_DDR] = "DDR",
+       [MEM_RDDR] = "RDDR",
+       [MEM_RMBS] = "RMBS",
+       [MEM_DDR2] = "DDR2",
+       [MEM_FB_DDR2] = "FB_DDR2",
+       [MEM_RDDR2] = "RDDR2",
+       [MEM_XDR] = "XDR",
+       [MEM_DDR3] = "DDR3",
+       [MEM_RDDR3] = "RDDR3",
+};
+
+static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
+{
+       int *num_dimm = arg;
+
+       if (dh->type == DMI_ENTRY_MEM_DEVICE)
+               (*num_dimm)++;
+}
+
+static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
+{
+       struct ghes_edac_dimm_fill *dimm_fill = arg;
+       struct mem_ctl_info *mci = dimm_fill->mci;
+
+       if (dh->type == DMI_ENTRY_MEM_DEVICE) {
+               struct memdev_dmi_entry *entry = (struct memdev_dmi_entry *)dh;
+               struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                                      mci->n_layers,
+                                                      dimm_fill->count, 0, 0);
+
+               if (entry->size == 0xffff) {
+                       pr_info("Can't get DIMM%i size\n",
+                               dimm_fill->count);
+                       dimm->nr_pages = MiB_TO_PAGES(32);/* Unknown */
+               } else if (entry->size == 0x7fff) {
+                       dimm->nr_pages = MiB_TO_PAGES(entry->extended_size);
+               } else {
+                       if (entry->size & 1 << 15)
+                               dimm->nr_pages = MiB_TO_PAGES((entry->size &
+                                                              0x7fff) << 10);
+                       else
+                               dimm->nr_pages = MiB_TO_PAGES(entry->size);
+               }
+
+               switch (entry->memory_type) {
+               case 0x12:
+                       if (entry->type_detail & 1 << 13)
+                               dimm->mtype = MEM_RDDR;
+                       else
+                               dimm->mtype = MEM_DDR;
+                       break;
+               case 0x13:
+                       if (entry->type_detail & 1 << 13)
+                               dimm->mtype = MEM_RDDR2;
+                       else
+                               dimm->mtype = MEM_DDR2;
+                       break;
+               case 0x14:
+                       dimm->mtype = MEM_FB_DDR2;
+                       break;
+               case 0x18:
+                       if (entry->type_detail & 1 << 13)
+                               dimm->mtype = MEM_RDDR3;
+                       else
+                               dimm->mtype = MEM_DDR3;
+                       break;
+               default:
+                       if (entry->type_detail & 1 << 6)
+                               dimm->mtype = MEM_RMBS;
+                       else if ((entry->type_detail & ((1 << 7) | (1 << 13)))
+                                == ((1 << 7) | (1 << 13)))
+                               dimm->mtype = MEM_RDR;
+                       else if (entry->type_detail & 1 << 7)
+                               dimm->mtype = MEM_SDR;
+                       else if (entry->type_detail & 1 << 9)
+                               dimm->mtype = MEM_EDO;
+                       else
+                               dimm->mtype = MEM_UNKNOWN;
+               }
+
+               /*
+                * Actually, we can only detect if the memory has bits for
+                * checksum or not
+                */
+               if (entry->total_width == entry->data_width)
+                       dimm->edac_mode = EDAC_NONE;
+               else
+                       dimm->edac_mode = EDAC_SECDED;
+
+               dimm->dtype = DEV_UNKNOWN;
+               dimm->grain = 128;              /* Likely, worse case */
+
+               /*
+                * FIXME: It shouldn't be hard to also fill the DIMM labels
+                */
+
+               if (dimm->nr_pages) {
+                       edac_dbg(1, "DIMM%i: %s size = %d MB%s\n",
+                               dimm_fill->count, memory_type[dimm->mtype],
+                               PAGES_TO_MiB(dimm->nr_pages),
+                               (dimm->edac_mode != EDAC_NONE) ? "(ECC)" : "");
+                       edac_dbg(2, "\ttype %d, detail 0x%02x, width %d(total %d)\n",
+                               entry->memory_type, entry->type_detail,
+                               entry->total_width, entry->data_width);
+               }
+
+               dimm_fill->count++;
+       }
+}
+
+void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+                               struct cper_sec_mem_err *mem_err)
+{
+       enum hw_event_mc_err_type type;
+       struct edac_raw_error_desc *e;
+       struct mem_ctl_info *mci;
+       struct ghes_edac_pvt *pvt = NULL;
+       char *p;
+       u8 grain_bits;
+
+       list_for_each_entry(pvt, &ghes_reglist, list) {
+               if (ghes == pvt->ghes)
+                       break;
+       }
+       if (!pvt) {
+               pr_err("Internal error: Can't find EDAC structure\n");
+               return;
+       }
+       mci = pvt->mci;
+       e = &mci->error_desc;
+
+       /* Cleans the error report buffer */
+       memset(e, 0, sizeof (*e));
+       e->error_count = 1;
+       strcpy(e->label, "unknown label");
+       e->msg = pvt->msg;
+       e->other_detail = pvt->other_detail;
+       e->top_layer = -1;
+       e->mid_layer = -1;
+       e->low_layer = -1;
+       *pvt->other_detail = '\0';
+       *pvt->msg = '\0';
+
+       switch (sev) {
+       case GHES_SEV_CORRECTED:
+               type = HW_EVENT_ERR_CORRECTED;
+               break;
+       case GHES_SEV_RECOVERABLE:
+               type = HW_EVENT_ERR_UNCORRECTED;
+               break;
+       case GHES_SEV_PANIC:
+               type = HW_EVENT_ERR_FATAL;
+               break;
+       default:
+       case GHES_SEV_NO:
+               type = HW_EVENT_ERR_INFO;
+       }
+
+       edac_dbg(1, "error validation_bits: 0x%08llx\n",
+                (long long)mem_err->validation_bits);
+
+       /* Error type, mapped on e->msg */
+       if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+               p = pvt->msg;
+               switch (mem_err->error_type) {
+               case 0:
+                       p += sprintf(p, "Unknown");
+                       break;
+               case 1:
+                       p += sprintf(p, "No error");
+                       break;
+               case 2:
+                       p += sprintf(p, "Single-bit ECC");
+                       break;
+               case 3:
+                       p += sprintf(p, "Multi-bit ECC");
+                       break;
+               case 4:
+                       p += sprintf(p, "Single-symbol ChipKill ECC");
+                       break;
+               case 5:
+                       p += sprintf(p, "Multi-symbol ChipKill ECC");
+                       break;
+               case 6:
+                       p += sprintf(p, "Master abort");
+                       break;
+               case 7:
+                       p += sprintf(p, "Target abort");
+                       break;
+               case 8:
+                       p += sprintf(p, "Parity Error");
+                       break;
+               case 9:
+                       p += sprintf(p, "Watchdog timeout");
+                       break;
+               case 10:
+                       p += sprintf(p, "Invalid address");
+                       break;
+               case 11:
+                       p += sprintf(p, "Mirror Broken");
+                       break;
+               case 12:
+                       p += sprintf(p, "Memory Sparing");
+                       break;
+               case 13:
+                       p += sprintf(p, "Scrub corrected error");
+                       break;
+               case 14:
+                       p += sprintf(p, "Scrub uncorrected error");
+                       break;
+               case 15:
+                       p += sprintf(p, "Physical Memory Map-out event");
+                       break;
+               default:
+                       p += sprintf(p, "reserved error (%d)",
+                                    mem_err->error_type);
+               }
+       } else {
+               strcpy(pvt->msg, "unknown error");
+       }
+
+       /* Error address */
+       if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+               e->page_frame_number = mem_err->physical_addr >> PAGE_SHIFT;
+               e->offset_in_page = mem_err->physical_addr & ~PAGE_MASK;
+       }
+
+       /* Error grain */
+       if (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) {
+               e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+       }
+
+       /* Memory error location, mapped on e->location */
+       p = e->location;
+       if (mem_err->validation_bits & CPER_MEM_VALID_NODE)
+               p += sprintf(p, "node:%d ", mem_err->node);
+       if (mem_err->validation_bits & CPER_MEM_VALID_CARD)
+               p += sprintf(p, "card:%d ", mem_err->card);
+       if (mem_err->validation_bits & CPER_MEM_VALID_MODULE)
+               p += sprintf(p, "module:%d ", mem_err->module);
+       if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
+               p += sprintf(p, "bank:%d ", mem_err->bank);
+       if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
+               p += sprintf(p, "row:%d ", mem_err->row);
+       if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
+               p += sprintf(p, "col:%d ", mem_err->column);
+       if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+               p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
+       if (p > e->location)
+               *(p - 1) = '\0';
+
+       /* All other fields are mapped on e->other_detail */
+       p = pvt->other_detail;
+       if (mem_err->validation_bits & CPER_MEM_VALID_ERROR_STATUS) {
+               u64 status = mem_err->error_status;
+
+               p += sprintf(p, "status(0x%016llx): ", (long long)status);
+               switch ((status >> 8) & 0xff) {
+               case 1:
+                       p += sprintf(p, "Error detected internal to the component ");
+                       break;
+               case 16:
+                       p += sprintf(p, "Error detected in the bus ");
+                       break;
+               case 4:
+                       p += sprintf(p, "Storage error in DRAM memory ");
+                       break;
+               case 5:
+                       p += sprintf(p, "Storage error in TLB ");
+                       break;
+               case 6:
+                       p += sprintf(p, "Storage error in cache ");
+                       break;
+               case 7:
+                       p += sprintf(p, "Error in one or more functional units ");
+                       break;
+               case 8:
+                       p += sprintf(p, "component failed self test ");
+                       break;
+               case 9:
+                       p += sprintf(p, "Overflow or undervalue of internal queue ");
+                       break;
+               case 17:
+                       p += sprintf(p, "Virtual address not found on IO-TLB or IO-PDIR ");
+                       break;
+               case 18:
+                       p += sprintf(p, "Improper access error ");
+                       break;
+               case 19:
+                       p += sprintf(p, "Access to a memory address which is not mapped to any component ");
+                       break;
+               case 20:
+                       p += sprintf(p, "Loss of Lockstep ");
+                       break;
+               case 21:
+                       p += sprintf(p, "Response not associated with a request ");
+                       break;
+               case 22:
+                       p += sprintf(p, "Bus parity error - must also set the A, C, or D Bits ");
+                       break;
+               case 23:
+                       p += sprintf(p, "Detection of a PATH_ERROR ");
+                       break;
+               case 25:
+                       p += sprintf(p, "Bus operation timeout ");
+                       break;
+               case 26:
+                       p += sprintf(p, "A read was issued to data that has been poisoned ");
+                       break;
+               default:
+                       p += sprintf(p, "reserved ");
+                       break;
+               }
+       }
+       if (mem_err->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+               p += sprintf(p, "requestorID: 0x%016llx ",
+                            (long long)mem_err->requestor_id);
+       if (mem_err->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+               p += sprintf(p, "responderID: 0x%016llx ",
+                            (long long)mem_err->responder_id);
+       if (mem_err->validation_bits & CPER_MEM_VALID_TARGET_ID)
+               p += sprintf(p, "targetID: 0x%016llx ",
+                            (long long)mem_err->responder_id);
+       if (p > pvt->other_detail)
+               *(p - 1) = '\0';
+
+       /* Generate the trace event */
+       grain_bits = fls_long(e->grain);
+       sprintf(pvt->detail_location, "APEI location: %s %s",
+               e->location, e->other_detail);
+       trace_mc_event(type, e->msg, e->label, e->error_count,
+                      mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
+                      PAGES_TO_MiB(e->page_frame_number) | e->offset_in_page,
+                      grain_bits, e->syndrome, pvt->detail_location);
+
+       /* Report the error via EDAC API */
+       edac_raw_mc_handle_error(type, mci, e);
+}
+EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+int ghes_edac_register(struct ghes *ghes, struct device *dev)
+{
+       bool fake = false;
+       int rc, num_dimm = 0;
+       struct mem_ctl_info *mci;
+       struct edac_mc_layer layers[1];
+       struct ghes_edac_pvt *pvt;
+       struct ghes_edac_dimm_fill dimm_fill;
+
+       /* Get the number of DIMMs */
+       dmi_walk(ghes_edac_count_dimms, &num_dimm);
+
+       /* Check if we've got a bogus BIOS */
+       if (num_dimm == 0) {
+               fake = true;
+               num_dimm = 1;
+       }
+
+       layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+       layers[0].size = num_dimm;
+       layers[0].is_virt_csrow = true;
+
+       /*
+        * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
+        * to avoid duplicated memory controller numbers
+        */
+       mutex_lock(&ghes_edac_lock);
+       mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
+                           sizeof(*pvt));
+       if (!mci) {
+               pr_info("Can't allocate memory for EDAC data\n");
+               mutex_unlock(&ghes_edac_lock);
+               return -ENOMEM;
+       }
+
+       pvt = mci->pvt_info;
+       memset(pvt, 0, sizeof(*pvt));
+       list_add_tail(&pvt->list, &ghes_reglist);
+       pvt->ghes = ghes;
+       pvt->mci  = mci;
+       mci->pdev = dev;
+
+       mci->mtype_cap = MEM_FLAG_EMPTY;
+       mci->edac_ctl_cap = EDAC_FLAG_NONE;
+       mci->edac_cap = EDAC_FLAG_NONE;
+       mci->mod_name = "ghes_edac.c";
+       mci->mod_ver = GHES_EDAC_REVISION;
+       mci->ctl_name = "ghes_edac";
+       mci->dev_name = "ghes";
+
+       if (!ghes_edac_mc_num) {
+               if (!fake) {
+                       pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+                       pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+                       pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+                       pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+                       pr_info("to correct its BIOS.\n");
+                       pr_info("This system has %d DIMM sockets.\n",
+                               num_dimm);
+               } else {
+                       pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+                       pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+                       pr_info("work on such system. Use this driver with caution\n");
+               }
+       }
+
+       if (!fake) {
+               /*
+                * Fill DIMM info from DMI for the memory controller #0
+                *
+                * Keep it in blank for the other memory controllers, as
+                * there's no reliable way to properly credit each DIMM to
+                * the memory controller, as different BIOSes fill the
+                * DMI bank location fields on different ways
+                */
+               if (!ghes_edac_mc_num) {
+                       dimm_fill.count = 0;
+                       dimm_fill.mci = mci;
+                       dmi_walk(ghes_edac_dmidecode, &dimm_fill);
+               }
+       } else {
+               struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                                      mci->n_layers, 0, 0, 0);
+
+               dimm->nr_pages = 1;
+               dimm->grain = 128;
+               dimm->mtype = MEM_UNKNOWN;
+               dimm->dtype = DEV_UNKNOWN;
+               dimm->edac_mode = EDAC_SECDED;
+       }
+
+       rc = edac_mc_add_mc(mci);
+       if (rc < 0) {
+               pr_info("Can't register at EDAC core\n");
+               edac_mc_free(mci);
+               mutex_unlock(&ghes_edac_lock);
+               return -ENODEV;
+       }
+
+       ghes_edac_mc_num++;
+       mutex_unlock(&ghes_edac_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ghes_edac_register);
+
+void ghes_edac_unregister(struct ghes *ghes)
+{
+       struct mem_ctl_info *mci;
+       struct ghes_edac_pvt *pvt, *tmp;
+
+       list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
+               if (ghes == pvt->ghes) {
+                       mci = pvt->mci;
+                       edac_mc_del_mc(mci->pdev);
+                       edac_mc_free(mci);
+                       list_del(&pvt->list);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(ghes_edac_unregister);
index 4e83376..aa44c17 100644 (file)
@@ -106,16 +106,26 @@ static int nr_channels;
 
 static int how_many_channels(struct pci_dev *pdev)
 {
+       int n_channels;
+
        unsigned char capid0_8b; /* 8th byte of CAPID0 */
 
        pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
+
        if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
                edac_dbg(0, "In single channel mode\n");
-               return 1;
+               n_channels = 1;
        } else {
                edac_dbg(0, "In dual channel mode\n");
-               return 2;
+               n_channels = 2;
        }
+
+       if (capid0_8b & 0x10) /* check if both channels are filled */
+               edac_dbg(0, "2 DIMMS per channel disabled\n");
+       else
+               edac_dbg(0, "2 DIMMS per channel enabled\n");
+
+       return n_channels;
 }
 
 static unsigned long eccerrlog_syndrome(u64 log)
@@ -290,6 +300,8 @@ static void i3200_get_drbs(void __iomem *window,
        for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
                drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
                drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
+
+               edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
        }
 }
 
@@ -311,6 +323,9 @@ static unsigned long drb_to_nr_pages(
        int n;
 
        n = drbs[channel][rank];
+       if (!n)
+               return 0;
+
        if (rank > 0)
                n -= drbs[channel][rank - 1];
        if (stacked && (channel == 1) &&
@@ -377,19 +392,19 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
         * cumulative; the last one will contain the total memory
         * contained in all ranks.
         */
-       for (i = 0; i < mci->nr_csrows; i++) {
+       for (i = 0; i < I3200_DIMMS; i++) {
                unsigned long nr_pages;
-               struct csrow_info *csrow = mci->csrows[i];
 
-               nr_pages = drb_to_nr_pages(drbs, stacked,
-                       i / I3200_RANKS_PER_CHANNEL,
-                       i % I3200_RANKS_PER_CHANNEL);
+               for (j = 0; j < nr_channels; j++) {
+                       struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                                              mci->n_layers, i, j, 0);
 
-               if (nr_pages == 0)
-                       continue;
+                       nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
+                       if (nr_pages == 0)
+                               continue;
 
-               for (j = 0; j < nr_channels; j++) {
-                       struct dimm_info *dimm = csrow->channels[j]->dimm;
+                       edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+                                stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
 
                        dimm->nr_pages = nr_pages;
                        dimm->grain = nr_pages << PAGE_SHIFT;
index d6955b2..1b63517 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/edac.h>
 #include <linux/delay.h>
 #include <linux/mmzone.h>
+#include <linux/debugfs.h>
 
 #include "edac_core.h"
 
                        I5100_FERR_NF_MEM_M1ERR_MASK)
 #define        I5100_NERR_NF_MEM       0xa4    /* MC Next Non-Fatal Errors */
 #define I5100_EMASK_MEM                0xa8    /* MC Error Mask Register */
+#define I5100_MEM0EINJMSK0     0x200   /* Injection Mask0 Register Channel 0 */
+#define I5100_MEM1EINJMSK0     0x208   /* Injection Mask0 Register Channel 1 */
+#define                I5100_MEMXEINJMSK0_EINJEN       (1 << 27)
+#define I5100_MEM0EINJMSK1     0x204   /* Injection Mask1 Register Channel 0 */
+#define I5100_MEM1EINJMSK1     0x206   /* Injection Mask1 Register Channel 1 */
+
+/* Device 19, Function 0 */
+#define I5100_DINJ0 0x9a
 
 /* device 21 and 22, func 0 */
 #define I5100_MTR_0    0x154   /* Memory Technology Registers 0-3 */
@@ -338,13 +347,26 @@ struct i5100_priv {
        unsigned ranksperchan;  /* number of ranks per channel */
 
        struct pci_dev *mc;     /* device 16 func 1 */
+       struct pci_dev *einj;   /* device 19 func 0 */
        struct pci_dev *ch0mm;  /* device 21 func 0 */
        struct pci_dev *ch1mm;  /* device 22 func 0 */
 
        struct delayed_work i5100_scrubbing;
        int scrub_enable;
+
+       /* Error injection */
+       u8 inject_channel;
+       u8 inject_hlinesel;
+       u8 inject_deviceptr1;
+       u8 inject_deviceptr2;
+       u16 inject_eccmask1;
+       u16 inject_eccmask2;
+
+       struct dentry *debugfs;
 };
 
+static struct dentry *i5100_debugfs;
+
 /* map a rank/chan to a slot number on the mainboard */
 static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
                              int chan, int rank)
@@ -863,13 +885,126 @@ static void i5100_init_csrows(struct mem_ctl_info *mci)
        }
 }
 
+/****************************************************************************
+ *                       Error injection routines
+ ****************************************************************************/
+
+static void i5100_do_inject(struct mem_ctl_info *mci)
+{
+       struct i5100_priv *priv = mci->pvt_info;
+       u32 mask0;
+       u16 mask1;
+
+       /* MEM[1:0]EINJMSK0
+        * 31    - ADDRMATCHEN
+        * 29:28 - HLINESEL
+        *         00 Reserved
+        *         01 Lower half of cache line
+        *         10 Upper half of cache line
+        *         11 Both upper and lower parts of cache line
+        * 27    - EINJEN
+        * 25:19 - XORMASK1 for deviceptr1
+        * 9:5   - SEC2RAM for deviceptr2
+        * 4:0   - FIR2RAM for deviceptr1
+        */
+       mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
+               I5100_MEMXEINJMSK0_EINJEN |
+               ((priv->inject_eccmask1 & 0xffff) << 10) |
+               ((priv->inject_deviceptr2 & 0x1f) << 5) |
+               (priv->inject_deviceptr1 & 0x1f);
+
+       /* MEM[1:0]EINJMSK1
+        * 15:0  - XORMASK2 for deviceptr2
+        */
+       mask1 = priv->inject_eccmask2;
+
+       if (priv->inject_channel == 0) {
+               pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
+               pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
+       } else {
+               pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
+               pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
+       }
+
+       /* Error Injection Response Function
+        * Intel 5100 Memory Controller Hub Chipset (318378) datasheet
+        * hints about this register but carry no data about them. All
+        * data regarding device 19 is based on experimentation and the
+        * Intel 7300 Chipset Memory Controller Hub (318082) datasheet
+        * which appears to be accurate for the i5100 in this area.
+        *
+        * The injection code don't work without setting this register.
+        * The register needs to be flipped off then on else the hardware
+        * will only preform the first injection.
+        *
+        * Stop condition bits 7:4
+        * 1010 - Stop after one injection
+        * 1011 - Never stop injecting faults
+        *
+        * Start condition bits 3:0
+        * 1010 - Never start
+        * 1011 - Start immediately
+        */
+       pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
+       pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+static ssize_t inject_enable_write(struct file *file, const char __user *data,
+               size_t count, loff_t *ppos)
+{
+       struct device *dev = file->private_data;
+       struct mem_ctl_info *mci = to_mci(dev);
+
+       i5100_do_inject(mci);
+
+       return count;
+}
+
+static const struct file_operations i5100_inject_enable_fops = {
+       .open = simple_open,
+       .write = inject_enable_write,
+       .llseek = generic_file_llseek,
+};
+
+static int i5100_setup_debugfs(struct mem_ctl_info *mci)
+{
+       struct i5100_priv *priv = mci->pvt_info;
+
+       if (!i5100_debugfs)
+               return -ENODEV;
+
+       priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
+
+       if (!priv->debugfs)
+               return -ENOMEM;
+
+       debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_channel);
+       debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_hlinesel);
+       debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_deviceptr1);
+       debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_deviceptr2);
+       debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_eccmask1);
+       debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
+                       &priv->inject_eccmask2);
+       debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
+                       &mci->dev, &i5100_inject_enable_fops);
+
+       return 0;
+
+}
+
 static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int rc;
        struct mem_ctl_info *mci;
        struct edac_mc_layer layers[2];
        struct i5100_priv *priv;
-       struct pci_dev *ch0mm, *ch1mm;
+       struct pci_dev *ch0mm, *ch1mm, *einj;
        int ret = 0;
        u32 dw;
        int ranksperch;
@@ -941,6 +1076,22 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto bail_disable_ch1;
        }
 
+
+       /* device 19, func 0, Error injection */
+       einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
+                                   PCI_DEVICE_ID_INTEL_5100_19, 0);
+       if (!einj) {
+               ret = -ENODEV;
+               goto bail_einj;
+       }
+
+       rc = pci_enable_device(einj);
+       if (rc < 0) {
+               ret = rc;
+               goto bail_disable_einj;
+       }
+
+
        mci->pdev = &pdev->dev;
 
        priv = mci->pvt_info;
@@ -948,6 +1099,7 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        priv->mc = pdev;
        priv->ch0mm = ch0mm;
        priv->ch1mm = ch1mm;
+       priv->einj = einj;
 
        INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
 
@@ -975,6 +1127,13 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
        mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
 
+       priv->inject_channel = 0;
+       priv->inject_hlinesel = 0;
+       priv->inject_deviceptr1 = 0;
+       priv->inject_deviceptr2 = 0;
+       priv->inject_eccmask1 = 0;
+       priv->inject_eccmask2 = 0;
+
        i5100_init_csrows(mci);
 
        /* this strange construction seems to be in every driver, dunno why */
@@ -992,6 +1151,8 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto bail_scrub;
        }
 
+       i5100_setup_debugfs(mci);
+
        return ret;
 
 bail_scrub:
@@ -999,6 +1160,12 @@ bail_scrub:
        cancel_delayed_work_sync(&(priv->i5100_scrubbing));
        edac_mc_free(mci);
 
+bail_disable_einj:
+       pci_disable_device(einj);
+
+bail_einj:
+       pci_dev_put(einj);
+
 bail_disable_ch1:
        pci_disable_device(ch1mm);
 
@@ -1030,14 +1197,18 @@ static void i5100_remove_one(struct pci_dev *pdev)
 
        priv = mci->pvt_info;
 
+       debugfs_remove_recursive(priv->debugfs);
+
        priv->scrub_enable = 0;
        cancel_delayed_work_sync(&(priv->i5100_scrubbing));
 
        pci_disable_device(pdev);
        pci_disable_device(priv->ch0mm);
        pci_disable_device(priv->ch1mm);
+       pci_disable_device(priv->einj);
        pci_dev_put(priv->ch0mm);
        pci_dev_put(priv->ch1mm);
+       pci_dev_put(priv->einj);
 
        edac_mc_free(mci);
 }
@@ -1060,13 +1231,16 @@ static int __init i5100_init(void)
 {
        int pci_rc;
 
-       pci_rc = pci_register_driver(&i5100_driver);
+       i5100_debugfs = debugfs_create_dir("i5100_edac", NULL);
 
+       pci_rc = pci_register_driver(&i5100_driver);
        return (pci_rc < 0) ? pci_rc : 0;
 }
 
 static void __exit i5100_exit(void)
 {
+       debugfs_remove(i5100_debugfs);
+
        pci_unregister_driver(&i5100_driver);
 }
 
index e213d03..0ec3e95 100644 (file)
@@ -420,21 +420,21 @@ static inline int numdimms(u32 dimms)
 
 static inline int numrank(u32 rank)
 {
-       static int ranks[4] = { 1, 2, 4, -EINVAL };
+       static const int ranks[] = { 1, 2, 4, -EINVAL };
 
        return ranks[rank & 0x3];
 }
 
 static inline int numbank(u32 bank)
 {
-       static int banks[4] = { 4, 8, 16, -EINVAL };
+       static const int banks[] = { 4, 8, 16, -EINVAL };
 
        return banks[bank & 0x3];
 }
 
 static inline int numrow(u32 row)
 {
-       static int rows[8] = {
+       static const int rows[] = {
                1 << 12, 1 << 13, 1 << 14, 1 << 15,
                1 << 16, -EINVAL, -EINVAL, -EINVAL,
        };
@@ -444,7 +444,7 @@ static inline int numrow(u32 row)
 
 static inline int numcol(u32 col)
 {
-       static int cols[8] = {
+       static const int cols[] = {
                1 << 10, 1 << 11, 1 << 12, -EINVAL,
        };
        return cols[col & 0x3];
index da7e298..57244f9 100644 (file)
@@ -639,7 +639,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
        tmp_mb = (1 + pvt->tohm) >> 20;
 
        mb = div_u64_rem(tmp_mb, 1000, &kb);
-       edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
+       edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
 
        /*
         * Step 2) Get SAD range and SAD Interleave list
index f8d2287..27ac423 100644 (file)
@@ -487,27 +487,28 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 static int add_client_resource(struct client *client,
                               struct client_resource *resource, gfp_t gfp_mask)
 {
+       bool preload = gfp_mask & __GFP_WAIT;
        unsigned long flags;
        int ret;
 
- retry:
-       if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
-               return -ENOMEM;
-
+       if (preload)
+               idr_preload(gfp_mask);
        spin_lock_irqsave(&client->lock, flags);
+
        if (client->in_shutdown)
                ret = -ECANCELED;
        else
-               ret = idr_get_new(&client->resource_idr, resource,
-                                 &resource->handle);
+               ret = idr_alloc(&client->resource_idr, resource, 0, 0,
+                               GFP_NOWAIT);
        if (ret >= 0) {
+               resource->handle = ret;
                client_get(client);
                schedule_if_iso_resource(resource);
        }
-       spin_unlock_irqrestore(&client->lock, flags);
 
-       if (ret == -EAGAIN)
-               goto retry;
+       spin_unlock_irqrestore(&client->lock, flags);
+       if (preload)
+               idr_preload_end();
 
        return ret < 0 ? ret : 0;
 }
@@ -1779,7 +1780,6 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
        wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
 
        idr_for_each(&client->resource_idr, shutdown_resource, client);
-       idr_remove_all(&client->resource_idr);
        idr_destroy(&client->resource_idr);
 
        list_for_each_entry_safe(event, next_event, &client->event_list, link)
index 3873d53..03ce7d9 100644 (file)
@@ -1017,12 +1017,11 @@ static void fw_device_init(struct work_struct *work)
 
        fw_device_get(device);
        down_write(&fw_device_rwsem);
-       ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
-             idr_get_new(&fw_device_idr, device, &minor) :
-             -ENOMEM;
+       minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
+                       GFP_KERNEL);
        up_write(&fw_device_rwsem);
 
-       if (ret < 0)
+       if (minor < 0)
                goto error;
 
        device->device.bus = &fw_bus_type;
index fed08b6..7320bf8 100644 (file)
@@ -79,6 +79,7 @@
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/pstore.h>
+#include <linux/ctype.h>
 
 #include <linux/fs.h>
 #include <linux/ramfs.h>
@@ -908,6 +909,48 @@ static struct inode *efivarfs_get_inode(struct super_block *sb,
        return inode;
 }
 
+/*
+ * Return true if 'str' is a valid efivarfs filename of the form,
+ *
+ *     VariableName-12345678-1234-1234-1234-1234567891bc
+ */
+static bool efivarfs_valid_name(const char *str, int len)
+{
+       static const char dashes[GUID_LEN] = {
+               [8] = 1, [13] = 1, [18] = 1, [23] = 1
+       };
+       const char *s = str + len - GUID_LEN;
+       int i;
+
+       /*
+        * We need a GUID, plus at least one letter for the variable name,
+        * plus the '-' separator
+        */
+       if (len < GUID_LEN + 2)
+               return false;
+
+       /* GUID should be right after the first '-' */
+       if (s - 1 != strchr(str, '-'))
+               return false;
+
+       /*
+        * Validate that 's' is of the correct format, e.g.
+        *
+        *      12345678-1234-1234-1234-123456789abc
+        */
+       for (i = 0; i < GUID_LEN; i++) {
+               if (dashes[i]) {
+                       if (*s++ != '-')
+                               return false;
+               } else {
+                       if (!isxdigit(*s++))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
 static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
 {
        guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
@@ -936,11 +979,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        struct efivar_entry *var;
        int namelen, i = 0, err = 0;
 
-       /*
-        * We need a GUID, plus at least one letter for the variable name,
-        * plus the '-' separator
-        */
-       if (dentry->d_name.len < GUID_LEN + 2)
+       if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
                return -EINVAL;
 
        inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
@@ -1012,6 +1051,84 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
        return -EINVAL;
 };
 
+/*
+ * Compare two efivarfs file names.
+ *
+ * An efivarfs filename is composed of two parts,
+ *
+ *     1. A case-sensitive variable name
+ *     2. A case-insensitive GUID
+ *
+ * So we need to perform a case-sensitive match on part 1 and a
+ * case-insensitive match on part 2.
+ */
+static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
+                             const struct dentry *dentry, const struct inode *inode,
+                             unsigned int len, const char *str,
+                             const struct qstr *name)
+{
+       int guid = len - GUID_LEN;
+
+       if (name->len != len)
+               return 1;
+
+       /* Case-sensitive compare for the variable name */
+       if (memcmp(str, name->name, guid))
+               return 1;
+
+       /* Case-insensitive compare for the GUID */
+       return strncasecmp(name->name + guid, str + guid, GUID_LEN);
+}
+
+static int efivarfs_d_hash(const struct dentry *dentry,
+                          const struct inode *inode, struct qstr *qstr)
+{
+       unsigned long hash = init_name_hash();
+       const unsigned char *s = qstr->name;
+       unsigned int len = qstr->len;
+
+       if (!efivarfs_valid_name(s, len))
+               return -EINVAL;
+
+       while (len-- > GUID_LEN)
+               hash = partial_name_hash(*s++, hash);
+
+       /* GUID is case-insensitive. */
+       while (len--)
+               hash = partial_name_hash(tolower(*s++), hash);
+
+       qstr->hash = end_name_hash(hash);
+       return 0;
+}
+
+/*
+ * Retaining negative dentries for an in-memory filesystem just wastes
+ * memory and lookup time: arrange for them to be deleted immediately.
+ */
+static int efivarfs_delete_dentry(const struct dentry *dentry)
+{
+       return 1;
+}
+
+static struct dentry_operations efivarfs_d_ops = {
+       .d_compare = efivarfs_d_compare,
+       .d_hash = efivarfs_d_hash,
+       .d_delete = efivarfs_delete_dentry,
+};
+
+static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
+{
+       struct qstr q;
+
+       q.name = name;
+       q.len = strlen(name);
+
+       if (efivarfs_d_hash(NULL, NULL, &q))
+               return NULL;
+
+       return d_alloc(parent, &q);
+}
+
 static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct inode *inode = NULL;
@@ -1027,6 +1144,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
        sb->s_magic             = EFIVARFS_MAGIC;
        sb->s_op                = &efivarfs_ops;
+       sb->s_d_op              = &efivarfs_d_ops;
        sb->s_time_gran         = 1;
 
        inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
@@ -1067,7 +1185,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
                if (!inode)
                        goto fail_name;
 
-               dentry = d_alloc_name(root, name);
+               dentry = efivarfs_alloc_dentry(root, name);
                if (!dentry)
                        goto fail_inode;
 
@@ -1084,7 +1202,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 
                mutex_lock(&inode->i_mutex);
                inode->i_private = entry;
-               i_size_write(inode, size+4);
+               i_size_write(inode, size + sizeof(entry->var.Attributes));
                mutex_unlock(&inode->i_mutex);
                d_add(dentry, inode);
        }
@@ -1117,8 +1235,20 @@ static struct file_system_type efivarfs_type = {
        .kill_sb = efivarfs_kill_sb,
 };
 
+/*
+ * Handle negative dentry.
+ */
+static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
+                                     unsigned int flags)
+{
+       if (dentry->d_name.len > NAME_MAX)
+               return ERR_PTR(-ENAMETOOLONG);
+       d_add(dentry, NULL);
+       return NULL;
+}
+
 static const struct inode_operations efivarfs_dir_inode_operations = {
-       .lookup = simple_lookup,
+       .lookup = efivarfs_lookup,
        .unlink = efivarfs_unlink,
        .create = efivarfs_create,
 };
index 6f2306d..f9dbd50 100644 (file)
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
        return data & (1 << bit) ? 1 : 0;
 }
 
-static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
+static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
 {
-       return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
+       return ichx_priv.use_gpio & (1 << (nr / 32));
 }
 
 static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
index 4828fe7..c2534d6 100644 (file)
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
 static void gpiod_free(struct gpio_desc *desc);
 static int gpiod_direction_input(struct gpio_desc *desc);
 static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_get_direction(const struct gpio_desc *desc);
 static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
 static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(struct gpio_desc *desc);
+static int gpiod_get_value(const struct gpio_desc *desc);
 static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(struct gpio_desc *desc);
-static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_cansleep(const struct gpio_desc *desc);
+static int gpiod_to_irq(const struct gpio_desc *desc);
 static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
 static int gpiod_export_link(struct device *dev, const char *name,
                             struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
        return 0;
 }
 
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
 {
-       return desc->chip;
+       return desc ? desc->chip : NULL;
 }
 
+/* caller holds gpio_lock *OR* gpio is marked as requested */
 struct gpio_chip *gpio_to_chip(unsigned gpio)
 {
        return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
 }
 
 /* caller ensures gpio is valid and requested, chip->get_direction may sleep  */
-static int gpiod_get_direction(struct gpio_desc *desc)
+static int gpiod_get_direction(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        unsigned                offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
        if (status > 0) {
                /* GPIOF_DIR_IN, or other positive */
                status = 1;
-               clear_bit(FLAG_IS_OUT, &desc->flags);
+               /* FLAG_IS_OUT is just a cache of the result of get_direction(),
+                * so it does not affect constness per se */
+               clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        if (status == 0) {
                /* GPIOF_DIR_OUT */
-               set_bit(FLAG_IS_OUT, &desc->flags);
+               set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
        }
        return status;
 }
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
 static ssize_t gpio_direction_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct gpio_desc        *desc = dev_get_drvdata(dev);
+       const struct gpio_desc  *desc = dev_get_drvdata(dev);
        ssize_t                 status;
 
        mutex_lock(&sysfs_lock);
@@ -411,15 +414,10 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
                        goto err_out;
                }
 
-               do {
-                       ret = -ENOMEM;
-                       if (idr_pre_get(&dirent_idr, GFP_KERNEL))
-                               ret = idr_get_new_above(&dirent_idr, value_sd,
-                                                       1, &id);
-               } while (ret == -EAGAIN);
-
-               if (ret)
+               ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
+               if (ret < 0)
                        goto free_sd;
+               id = ret;
 
                desc->flags &= GPIO_FLAGS_MASK;
                desc->flags |= (unsigned long)id << ID_SHIFT;
@@ -659,6 +657,11 @@ static ssize_t export_store(struct class *class,
                goto done;
 
        desc = gpio_to_desc(gpio);
+       /* reject invalid GPIOs */
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -695,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
        if (status < 0)
                goto done;
 
-       status = -EINVAL;
-
        desc = gpio_to_desc(gpio);
        /* reject bogus commands (gpio_unexport ignores them) */
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
+
+       status = -EINVAL;
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
@@ -851,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
 {
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -870,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
 
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -901,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        struct device           *dev = NULL;
        int                     status = -EINVAL;
 
-       if (!desc)
-               goto done;
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -919,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
 unlock:
        mutex_unlock(&sysfs_lock);
 
-done:
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -945,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
        struct device           *dev = NULL;
 
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return;
        }
 
        mutex_lock(&sysfs_lock);
@@ -967,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
                device_unregister(dev);
                put_device(dev);
        }
-done:
+
        if (status)
                pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
                         status);
@@ -1389,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       spin_lock_irqsave(&gpio_lock, flags);
-
        if (!desc) {
-               status = -EINVAL;
-               goto done;
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
        }
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
        chip = desc->chip;
        if (chip == NULL)
                goto done;
@@ -1437,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
 done:
        if (status)
                pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
-                        desc ? desc_to_gpio(desc) : -1,
-                        label ? : "?", status);
+                        desc_to_gpio(desc), label ? : "?", status);
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -1621,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->get || !chip->direction_input)
                goto fail;
@@ -1660,13 +1670,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1683,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
        int                     status = -EINVAL;
        int offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        /* Open drain pin should not be driven to 1 */
        if (value && test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
                return gpiod_direction_input(desc);
@@ -1693,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->direction_output)
                goto fail;
@@ -1730,13 +1739,9 @@ lose:
        return status;
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
        return status;
 }
 
@@ -1758,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        int                     status = -EINVAL;
        int                     offset;
 
+       if (!desc) {
+               pr_warn("%s: invalid GPIO\n", __func__);
+               return -EINVAL;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (!desc)
-               goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->set_debounce)
                goto fail;
@@ -1781,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 
 fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       if (status) {
-               int gpio = -1;
-               if (desc)
-                       gpio = desc_to_gpio(desc);
-               pr_debug("%s: gpio-%d status %d\n",
-                       __func__, gpio, status);
-       }
+       if (status)
+               pr_debug("%s: gpio-%d status %d\n", __func__,
+                        desc_to_gpio(desc), status);
 
        return status;
 }
@@ -1829,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
  * It returns the zero or nonzero value provided by the associated
  * gpio_chip.get() method; or zero if no such method is provided.
  */
-static int gpiod_get_value(struct gpio_desc *desc)
+static int gpiod_get_value(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        /* Should be using gpio_get_value_cansleep() */
@@ -1917,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        struct gpio_chip        *chip;
 
+       if (!desc)
+               return;
        chip = desc->chip;
        /* Should be using gpio_set_value_cansleep() */
        WARN_ON(chip->can_sleep);
@@ -1943,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
  * This is used directly or indirectly to implement gpio_cansleep().  It
  * returns nonzero if access reading or writing the GPIO value can sleep.
  */
-static int gpiod_cansleep(struct gpio_desc *desc)
+static int gpiod_cansleep(const struct gpio_desc *desc)
 {
+       if (!desc)
+               return 0;
        /* only call this on GPIOs that are valid! */
        return desc->chip->can_sleep;
 }
@@ -1964,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
  * It returns the number of the IRQ signaled by this (input) GPIO,
  * or a negative errno.
  */
-static int gpiod_to_irq(struct gpio_desc *desc)
+static int gpiod_to_irq(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int                     offset;
 
+       if (!desc)
+               return -EINVAL;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1985,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
  * Common examples include ones connected to I2C or SPI chips.
  */
 
-static int gpiod_get_value_cansleep(struct gpio_desc *desc)
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        struct gpio_chip        *chip;
        int value;
        int offset;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return 0;
        chip = desc->chip;
        offset = gpio_chip_hwgpio(desc);
        value = chip->get ? chip->get(chip, offset) : 0;
@@ -2010,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
        struct gpio_chip        *chip;
 
        might_sleep_if(extra_checks);
+       if (!desc)
+               return;
        chip = desc->chip;
        trace_gpio_value(desc_to_gpio(desc), 0, value);
        if (test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
index 45adf97..725968d 100644 (file)
@@ -74,24 +74,13 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
  */
 static int drm_ctxbitmap_next(struct drm_device * dev)
 {
-       int new_id;
        int ret;
 
-again:
-       if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
-               DRM_ERROR("Out of memory expanding drawable idr\n");
-               return -ENOMEM;
-       }
        mutex_lock(&dev->struct_mutex);
-       ret = idr_get_new_above(&dev->ctx_idr, NULL,
-                               DRM_RESERVED_CONTEXTS, &new_id);
+       ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
+                       GFP_KERNEL);
        mutex_unlock(&dev->struct_mutex);
-       if (ret == -EAGAIN)
-               goto again;
-       else if (ret)
-               return ret;
-
-       return new_id;
+       return ret;
 }
 
 /**
@@ -118,7 +107,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
 void drm_ctxbitmap_cleanup(struct drm_device * dev)
 {
        mutex_lock(&dev->struct_mutex);
-       idr_remove_all(&dev->ctx_idr);
+       idr_destroy(&dev->ctx_idr);
        mutex_unlock(&dev->struct_mutex);
 }
 
index 3bdf2a6..792c3e3 100644 (file)
@@ -266,32 +266,21 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
 static int drm_mode_object_get(struct drm_device *dev,
                               struct drm_mode_object *obj, uint32_t obj_type)
 {
-       int new_id = 0;
        int ret;
 
-again:
-       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
-               DRM_ERROR("Ran out memory getting a mode number\n");
-               return -ENOMEM;
-       }
-
        mutex_lock(&dev->mode_config.idr_mutex);
-       ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
-
-       if (!ret) {
+       ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
+       if (ret >= 0) {
                /*
                 * Set up the object linking under the protection of the idr
                 * lock so that other users can't see inconsistent state.
                 */
-               obj->id = new_id;
+               obj->id = ret;
                obj->type = obj_type;
        }
        mutex_unlock(&dev->mode_config.idr_mutex);
 
-       if (ret == -EAGAIN)
-               goto again;
-
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 
 /**
@@ -1272,7 +1261,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
                crtc->funcs->destroy(crtc);
        }
 
-       idr_remove_all(&dev->mode_config.crtc_idr);
        idr_destroy(&dev->mode_config.crtc_idr);
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
index be174ca..25f91cd 100644 (file)
@@ -297,7 +297,6 @@ static void __exit drm_core_exit(void)
 
        unregister_chrdev(DRM_MAJOR, "drm");
 
-       idr_remove_all(&drm_minors_idr);
        idr_destroy(&drm_minors_idr);
 }
 
index 24efae4..af779ae 100644 (file)
@@ -270,21 +270,19 @@ drm_gem_handle_create(struct drm_file *file_priv,
        int ret;
 
        /*
-        * Get the user-visible handle using idr.
+        * Get the user-visible handle using idr.  Preload and perform
+        * allocation under our spinlock.
         */
-again:
-       /* ensure there is space available to allocate a handle */
-       if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
-               return -ENOMEM;
-
-       /* do the allocation under our spinlock */
+       idr_preload(GFP_KERNEL);
        spin_lock(&file_priv->table_lock);
-       ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
+
+       ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+
        spin_unlock(&file_priv->table_lock);
-       if (ret == -EAGAIN)
-               goto again;
-       else if (ret)
+       idr_preload_end();
+       if (ret < 0)
                return ret;
+       *handlep = ret;
 
        drm_gem_object_handle_reference(obj);
 
@@ -451,29 +449,25 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
-again:
-       if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
-               ret = -ENOMEM;
-               goto err;
-       }
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&dev->object_name_lock);
        if (!obj->name) {
-               ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
-                                       &obj->name);
+               ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+               obj->name = ret;
                args->name = (uint64_t) obj->name;
                spin_unlock(&dev->object_name_lock);
+               idr_preload_end();
 
-               if (ret == -EAGAIN)
-                       goto again;
-               else if (ret)
+               if (ret < 0)
                        goto err;
+               ret = 0;
 
                /* Allocate a reference for the name table.  */
                drm_gem_object_reference(obj);
        } else {
                args->name = (uint64_t) obj->name;
                spin_unlock(&dev->object_name_lock);
+               idr_preload_end();
                ret = 0;
        }
 
@@ -561,8 +555,6 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 {
        idr_for_each(&file_private->object_idr,
                     &drm_gem_object_release_handle, file_private);
-
-       idr_remove_all(&file_private->object_idr);
        idr_destroy(&file_private->object_idr);
 }
 
index 8025454..7e4bae7 100644 (file)
@@ -60,14 +60,13 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
 {
        struct drm_hash_item *entry;
        struct hlist_head *h_list;
-       struct hlist_node *list;
        unsigned int hashed_key;
        int count = 0;
 
        hashed_key = hash_long(key, ht->order);
        DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
        h_list = &ht->table[hashed_key];
-       hlist_for_each_entry(entry, list, h_list, head)
+       hlist_for_each_entry(entry, h_list, head)
                DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
 }
 
@@ -76,14 +75,13 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
 {
        struct drm_hash_item *entry;
        struct hlist_head *h_list;
-       struct hlist_node *list;
        unsigned int hashed_key;
 
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
-       hlist_for_each_entry(entry, list, h_list, head) {
+       hlist_for_each_entry(entry, h_list, head) {
                if (entry->key == key)
-                       return list;
+                       return &entry->head;
                if (entry->key > key)
                        break;
        }
@@ -95,14 +93,13 @@ static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
 {
        struct drm_hash_item *entry;
        struct hlist_head *h_list;
-       struct hlist_node *list;
        unsigned int hashed_key;
 
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
-       hlist_for_each_entry_rcu(entry, list, h_list, head) {
+       hlist_for_each_entry_rcu(entry, h_list, head) {
                if (entry->key == key)
-                       return list;
+                       return &entry->head;
                if (entry->key > key)
                        break;
        }
@@ -113,19 +110,19 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
        struct drm_hash_item *entry;
        struct hlist_head *h_list;
-       struct hlist_node *list, *parent;
+       struct hlist_node *parent;
        unsigned int hashed_key;
        unsigned long key = item->key;
 
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
        parent = NULL;
-       hlist_for_each_entry(entry, list, h_list, head) {
+       hlist_for_each_entry(entry, h_list, head) {
                if (entry->key == key)
                        return -EINVAL;
                if (entry->key > key)
                        break;
-               parent = list;
+               parent = &entry->head;
        }
        if (parent) {
                hlist_add_after_rcu(parent, &item->head);
index 200e104..7d30802 100644 (file)
@@ -109,7 +109,6 @@ EXPORT_SYMBOL(drm_ut_debug_printk);
 
 static int drm_minor_get_id(struct drm_device *dev, int type)
 {
-       int new_id;
        int ret;
        int base = 0, limit = 63;
 
@@ -121,25 +120,11 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
                 limit = base + 255;
         }
 
-again:
-       if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
-               DRM_ERROR("Out of memory expanding drawable idr\n");
-               return -ENOMEM;
-       }
        mutex_lock(&dev->struct_mutex);
-       ret = idr_get_new_above(&drm_minors_idr, NULL,
-                               base, &new_id);
+       ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
        mutex_unlock(&dev->struct_mutex);
-       if (ret == -EAGAIN)
-               goto again;
-       else if (ret)
-               return ret;
 
-       if (new_id >= limit) {
-               idr_remove(&drm_minors_idr, new_id);
-               return -EINVAL;
-       }
-       return new_id;
+       return ret == -ENOSPC ? -EINVAL : ret;
 }
 
 struct drm_master *drm_master_create(struct drm_minor *minor)
index 1a55635..1adce07 100644 (file)
@@ -137,21 +137,15 @@ static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
 
        DRM_DEBUG_KMS("%s\n", __func__);
 
-again:
-       /* ensure there is space available to allocate a handle */
-       if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
-               DRM_ERROR("failed to get idr.\n");
-               return -ENOMEM;
-       }
-
        /* do the allocation under our mutexlock */
        mutex_lock(lock);
-       ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+       ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
        mutex_unlock(lock);
-       if (ret == -EAGAIN)
-               goto again;
+       if (ret < 0)
+               return ret;
 
-       return ret;
+       *idp = ret;
+       return 0;
 }
 
 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -1786,8 +1780,6 @@ err_iommu:
                        drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 err_idr:
-       idr_remove_all(&ctx->ipp_idr);
-       idr_remove_all(&ctx->prop_idr);
        idr_destroy(&ctx->ipp_idr);
        idr_destroy(&ctx->prop_idr);
        return ret;
@@ -1965,8 +1957,6 @@ static int ipp_remove(struct platform_device *pdev)
        exynos_drm_subdrv_unregister(&ctx->subdrv);
 
        /* remove,destroy ipp idr */
-       idr_remove_all(&ctx->ipp_idr);
-       idr_remove_all(&ctx->prop_idr);
        idr_destroy(&ctx->ipp_idr);
        idr_destroy(&ctx->prop_idr);
 
index 04a371a..054e26e 100644 (file)
@@ -202,7 +202,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
        WARN_ON(gt->pages);
 
        /* This is the shared memory object that backs the GEM resource */
-       inode = gt->gem.filp->f_path.dentry->d_inode;
+       inode = file_inode(gt->gem.filp);
        mapping = inode->i_mapping;
 
        gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
index 8413ffc..0e207e6 100644 (file)
@@ -1618,7 +1618,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
         * To do this we must instruct the shmfs to drop all of its
         * backing pages, *now*.
         */
-       inode = obj->base.filp->f_path.dentry->d_inode;
+       inode = file_inode(obj->base.filp);
        shmem_truncate_range(inode, 0, (loff_t)-1);
 
        obj->madv = __I915_MADV_PURGED;
@@ -1783,7 +1783,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         *
         * Fail silently without starting the shrinker
         */
-       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       mapping = file_inode(obj->base.filp)->i_mapping;
        gfp = mapping_gfp_mask(mapping);
        gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
        gfp &= ~(__GFP_IO | __GFP_WAIT);
@@ -3747,7 +3747,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                mask |= __GFP_DMA32;
        }
 
-       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       mapping = file_inode(obj->base.filp)->i_mapping;
        mapping_set_gfp_mask(mapping, mask);
 
        i915_gem_object_init(obj, &i915_gem_object_ops);
@@ -4232,7 +4232,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
 void i915_gem_detach_phys_object(struct drm_device *dev,
                                 struct drm_i915_gem_object *obj)
 {
-       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
        char *vaddr;
        int i;
        int page_count;
@@ -4268,7 +4268,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                            int id,
                            int align)
 {
-       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret = 0;
        int page_count;
index 21177d9..94d873a 100644 (file)
@@ -139,7 +139,7 @@ create_hw_context(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_context *ctx;
-       int ret, id;
+       int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
@@ -164,22 +164,11 @@ create_hw_context(struct drm_device *dev,
 
        ctx->file_priv = file_priv;
 
-again:
-       if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
-               ret = -ENOMEM;
-               DRM_DEBUG_DRIVER("idr allocation failed\n");
-               goto err_out;
-       }
-
-       ret = idr_get_new_above(&file_priv->context_idr, ctx,
-                               DEFAULT_CONTEXT_ID + 1, &id);
-       if (ret == 0)
-               ctx->id = id;
-
-       if (ret == -EAGAIN)
-               goto again;
-       else if (ret)
+       ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+                       GFP_KERNEL);
+       if (ret < 0)
                goto err_out;
+       ctx->id = ret;
 
        return ctx;
 
index e4a66a3..f9eb679 100644 (file)
@@ -40,7 +40,7 @@ struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
        int i, npages;
 
        /* This is the shared memory object that backs the GEM resource */
-       inode = obj->filp->f_path.dentry->d_inode;
+       inode = file_inode(obj->filp);
        mapping = inode->i_mapping;
 
        npages = obj->size >> PAGE_SHIFT;
index 841065b..5a5325e 100644 (file)
@@ -58,7 +58,6 @@ static int sis_driver_unload(struct drm_device *dev)
 {
        drm_sis_private_t *dev_priv = dev->dev_private;
 
-       idr_remove_all(&dev_priv->object_idr);
        idr_destroy(&dev_priv->object_idr);
 
        kfree(dev_priv);
index 2b2f78c..9a43d98 100644 (file)
@@ -128,17 +128,10 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
        if (retval)
                goto fail_alloc;
 
-again:
-       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
-               retval = -ENOMEM;
-               goto fail_idr;
-       }
-
-       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
-       if (retval == -EAGAIN)
-               goto again;
-       if (retval)
+       retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+       if (retval < 0)
                goto fail_idr;
+       user_key = retval;
 
        list_add(&item->owner_list, &file_priv->obj_list);
        mutex_unlock(&dev->struct_mutex);
index 7d759a4..5e93a52 100644 (file)
@@ -296,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
        swap_storage = ttm->swap_storage;
        BUG_ON(swap_storage == NULL);
 
-       swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+       swap_space = file_inode(swap_storage)->i_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = shmem_read_mapping_page(swap_space, i);
@@ -345,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
        } else
                swap_storage = persistent_swap_storage;
 
-       swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
+       swap_space = file_inode(swap_storage)->i_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
                from_page = ttm->pages[i];
index afd212c..3816270 100644 (file)
@@ -137,7 +137,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
        if (obj->pages == NULL)
                return -ENOMEM;
 
-       inode = obj->base.filp->f_path.dentry->d_inode;
+       inode = file_inode(obj->base.filp);
        mapping = inode->i_mapping;
        gfpmask |= mapping_gfp_mask(mapping);
 
index c0f1cc7..d0ab3fb 100644 (file)
@@ -120,7 +120,6 @@ int via_driver_unload(struct drm_device *dev)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
 
-       idr_remove_all(&dev_priv->object_idr);
        idr_destroy(&dev_priv->object_idr);
 
        kfree(dev_priv);
index 0d55432..0ab93ff 100644 (file)
@@ -148,17 +148,10 @@ int via_mem_alloc(struct drm_device *dev, void *data,
        if (retval)
                goto fail_alloc;
 
-again:
-       if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
-               retval = -ENOMEM;
-               goto fail_idr;
-       }
-
-       retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
-       if (retval == -EAGAIN)
-               goto again;
-       if (retval)
+       retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+       if (retval < 0)
                goto fail_idr;
+       user_key = retval;
 
        list_add(&item->owner_list, &file_priv->obj_list);
        mutex_unlock(&dev->struct_mutex);
index 1655617..bc78425 100644 (file)
@@ -177,17 +177,16 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
 
        BUG_ON(res->id != -1);
 
-       do {
-               if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
-                       return -ENOMEM;
-
-               write_lock(&dev_priv->resource_lock);
-               ret = idr_get_new_above(idr, res, 1, &res->id);
-               write_unlock(&dev_priv->resource_lock);
+       idr_preload(GFP_KERNEL);
+       write_lock(&dev_priv->resource_lock);
 
-       } while (ret == -EAGAIN);
+       ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               res->id = ret;
 
-       return ret;
+       write_unlock(&dev_priv->resource_lock);
+       idr_preload_end();
+       return ret < 0 ? ret : 0;
 }
 
 /**
index b685b04..d7437ef 100644 (file)
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(roccat_disconnect);
 
 static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct roccat_device *device;
        unsigned int minor = iminor(inode);
        long retval = 0;
index f3bbbce..a745163 100644 (file)
@@ -108,7 +108,7 @@ out:
  * This function is to be called with the minors_lock mutex held */
 static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct hid_device *dev;
        __u8 *buf;
        int ret = 0;
@@ -176,7 +176,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
  *  mutex held. */
 static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
 {
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        struct hid_device *dev;
        __u8 *buf;
        int ret = 0, len;
@@ -340,7 +340,7 @@ unlock:
 static long hidraw_ioctl(struct file *file, unsigned int cmd,
                                                        unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        unsigned int minor = iminor(inode);
        long ret = 0;
        struct hidraw *dev;
index 2d58f93..833dd1a 100644 (file)
@@ -420,7 +420,7 @@ static int hsi_event_notifier_call(struct notifier_block *nb,
 /**
  * hsi_register_port_event - Register a client to receive port events
  * @cl: HSI client that wants to receive port events
- * @cb: Event handler callback
+ * @handler: Event handler callback
  *
  * Clients should register a callback to be able to receive
  * events from the ports. Registration should happen after
index 53a8600..ff1be16 100644 (file)
@@ -318,7 +318,7 @@ static u32 get_vp_index(uuid_le *type_guid)
                return 0;
        }
        cur_cpu = (++next_vp % max_cpus);
-       return 0;
+       return cur_cpu;
 }
 
 /*
index 1c5481d..7311589 100644 (file)
@@ -272,7 +272,7 @@ u16 hv_signal_event(void *con_id)
  * retrieve the initialized message and event pages.  Otherwise, we create and
  * initialize the message and event pages.
  */
-void hv_synic_init(void *irqarg)
+void hv_synic_init(void *arg)
 {
        u64 version;
        union hv_synic_simp simp;
@@ -281,7 +281,6 @@ void hv_synic_init(void *irqarg)
        union hv_synic_scontrol sctrl;
        u64 vp_index;
 
-       u32 irq_vector = *((u32 *)(irqarg));
        int cpu = smp_processor_id();
 
        if (!hv_context.hypercall_page)
@@ -335,7 +334,7 @@ void hv_synic_init(void *irqarg)
        rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
        shared_sint.as_uint64 = 0;
-       shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
+       shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
        shared_sint.masked = false;
        shared_sint.auto_eoi = true;
 
index cf19dfa..bf421e0 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/kernel_stat.h>
 #include <asm/hyperv.h>
 #include <asm/hypervisor.h>
+#include <asm/mshyperv.h>
 #include "hyperv_vmbus.h"
 
 
@@ -528,7 +529,6 @@ static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
 static int vmbus_bus_init(int irq)
 {
        int ret;
-       unsigned int vector;
 
        /* Hypervisor initialization...setup hypercall page..etc */
        ret = hv_init();
@@ -558,13 +558,16 @@ static int vmbus_bus_init(int irq)
         */
        irq_set_handler(irq, vmbus_flow_handler);
 
-       vector = IRQ0_VECTOR + irq;
+       /*
+        * Register our interrupt handler.
+        */
+       hv_register_vmbus_handler(irq, vmbus_isr);
 
        /*
-        * Notify the hypervisor of our irq and
+        * Initialize the per-cpu interrupt state and
         * connect to the host.
         */
-       on_each_cpu(hv_synic_init, (void *)&vector, 1);
+       on_each_cpu(hv_synic_init, NULL, 1);
        ret = vmbus_connect();
        if (ret)
                goto err_irq;
index 66a30f7..991d38d 100644 (file)
@@ -935,25 +935,17 @@ out_list:
  */
 int i2c_add_adapter(struct i2c_adapter *adapter)
 {
-       int     id, res = 0;
-
-retry:
-       if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
-               return -ENOMEM;
+       int id;
 
        mutex_lock(&core_lock);
-       /* "above" here means "above or equal to", sigh */
-       res = idr_get_new_above(&i2c_adapter_idr, adapter,
-                               __i2c_first_dynamic_bus_num, &id);
+       id = idr_alloc(&i2c_adapter_idr, adapter,
+                      __i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
        mutex_unlock(&core_lock);
-
-       if (res < 0) {
-               if (res == -EAGAIN)
-                       goto retry;
-               return res;
-       }
+       if (id < 0)
+               return id;
 
        adapter->nr = id;
+
        return i2c_register_adapter(adapter);
 }
 EXPORT_SYMBOL(i2c_add_adapter);
@@ -984,33 +976,17 @@ EXPORT_SYMBOL(i2c_add_adapter);
 int i2c_add_numbered_adapter(struct i2c_adapter *adap)
 {
        int     id;
-       int     status;
 
        if (adap->nr == -1) /* -1 means dynamically assign bus id */
                return i2c_add_adapter(adap);
-       if (adap->nr & ~MAX_IDR_MASK)
-               return -EINVAL;
-
-retry:
-       if (idr_pre_get(&i2c_adapter_idr, GFP_KERNEL) == 0)
-               return -ENOMEM;
 
        mutex_lock(&core_lock);
-       /* "above" here means "above or equal to", sigh;
-        * we need the "equal to" result to force the result
-        */
-       status = idr_get_new_above(&i2c_adapter_idr, adap, adap->nr, &id);
-       if (status == 0 && id != adap->nr) {
-               status = -EBUSY;
-               idr_remove(&i2c_adapter_idr, id);
-       }
+       id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
+                      GFP_KERNEL);
        mutex_unlock(&core_lock);
-       if (status == -EAGAIN)
-               goto retry;
-
-       if (status == 0)
-               status = i2c_register_adapter(adap);
-       return status;
+       if (id < 0)
+               return id == -ENOSPC ? -EBUSY : id;
+       return i2c_register_adapter(adap);
 }
 EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
 
index 5ec2261..c3ccdea 100644 (file)
@@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
                return -ENOMEM;
 
        pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
-               iminor(file->f_path.dentry->d_inode), count);
+               iminor(file_inode(file)), count);
 
        ret = i2c_master_recv(client, tmp, count);
        if (ret >= 0)
@@ -172,7 +172,7 @@ static ssize_t i2cdev_write(struct file *file, const char __user *buf,
                return PTR_ERR(tmp);
 
        pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
-               iminor(file->f_path.dentry->d_inode), count);
+               iminor(file_inode(file)), count);
 
        ret = i2c_master_send(client, tmp, count);
        kfree(tmp);
index a3133d7..2abcc47 100644 (file)
@@ -333,7 +333,7 @@ static int ide_settings_proc_open(struct inode *inode, struct file *file)
 static ssize_t ide_settings_proc_write(struct file *file, const char __user *buffer,
                                       size_t count, loff_t *pos)
 {
-       ide_drive_t     *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+       ide_drive_t     *drive = (ide_drive_t *) PDE(file_inode(file))->data;
        char            name[MAX_LEN + 1];
        int             for_real = 0, mul_factor, div_factor;
        unsigned long   n;
@@ -558,7 +558,7 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
 static ssize_t ide_driver_proc_write(struct file *file, const char __user *buffer,
                                     size_t count, loff_t *pos)
 {
-       ide_drive_t     *drive = (ide_drive_t *) PDE(file->f_path.dentry->d_inode)->data;
+       ide_drive_t     *drive = (ide_drive_t *) PDE(file_inode(file))->data;
        char name[32];
 
        if (!capable(CAP_SYS_ADMIN))
index 394fea2..784b97c 100644 (file)
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
 {
        unsigned long flags;
-       int ret, id;
+       int id;
        static int next_id;
 
-       do {
-               spin_lock_irqsave(&cm.lock, flags);
-               ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
-                                       next_id, &id);
-               if (!ret)
-                       next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
-               spin_unlock_irqrestore(&cm.lock, flags);
-       } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+       idr_preload(GFP_KERNEL);
+       spin_lock_irqsave(&cm.lock, flags);
+
+       id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
+       if (id >= 0)
+               next_id = max(id + 1, 0);
+
+       spin_unlock_irqrestore(&cm.lock, flags);
+       idr_preload_end();
 
        cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
-       return ret;
+       return id < 0 ? id : 0;
 }
 
 static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
        cm.remote_sidr_table = RB_ROOT;
        idr_init(&cm.local_id_table);
        get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
-       idr_pre_get(&cm.local_id_table, GFP_KERNEL);
        INIT_LIST_HEAD(&cm.timewait_list);
 
        ret = class_register(&cm_class);
index d789eea..71c2c71 100644 (file)
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
                          unsigned short snum)
 {
        struct rdma_bind_list *bind_list;
-       int port, ret;
+       int ret;
 
        bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
        if (!bind_list)
                return -ENOMEM;
 
-       do {
-               ret = idr_get_new_above(ps, bind_list, snum, &port);
-       } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
-       if (ret)
-               goto err1;
-
-       if (port != snum) {
-               ret = -EADDRNOTAVAIL;
-               goto err2;
-       }
+       ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+       if (ret < 0)
+               goto err;
 
        bind_list->ps = ps;
-       bind_list->port = (unsigned short) port;
+       bind_list->port = (unsigned short)ret;
        cma_bind_port(bind_list, id_priv);
        return 0;
-err2:
-       idr_remove(ps, port);
-err1:
+err:
        kfree(bind_list);
-       return ret;
+       return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
 }
 
 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
 {
        struct rdma_id_private *cur_id;
        struct sockaddr *addr, *cur_addr;
-       struct hlist_node *node;
 
        addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
-       hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
+       hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                if (id_priv == cur_id)
                        continue;
 
index 176c8f9..9f5ad7c 100644 (file)
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
 {
        struct hlist_head *bucket;
        struct ib_pool_fmr *fmr;
-       struct hlist_node *pos;
 
        if (!pool->cache_bucket)
                return NULL;
 
        bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
 
-       hlist_for_each_entry(fmr, pos, bucket, cache_node)
+       hlist_for_each_entry(fmr, bucket, cache_node)
                if (io_virtual_address == fmr->io_virtual_address &&
                    page_list_len      == fmr->page_list_len      &&
                    !memcmp(page_list, fmr->page_list,
index a8905ab..934f45e 100644 (file)
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
 
 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
 {
+       bool preload = gfp_mask & __GFP_WAIT;
        unsigned long flags;
        int ret, id;
 
-retry:
-       if (!idr_pre_get(&query_idr, gfp_mask))
-               return -ENOMEM;
+       if (preload)
+               idr_preload(gfp_mask);
        spin_lock_irqsave(&idr_lock, flags);
-       ret = idr_get_new(&query_idr, query, &id);
+
+       id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
+
        spin_unlock_irqrestore(&idr_lock, flags);
-       if (ret == -EAGAIN)
-               goto retry;
-       if (ret)
-               return ret;
+       if (preload)
+               idr_preload_end();
+       if (id < 0)
+               return id;
 
        query->mad_buf->timeout_ms  = timeout_ms;
        query->mad_buf->context[0] = query;
index 49b15ac..f2f6393 100644 (file)
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
 {
        struct ib_ucm_context *ctx;
-       int result;
 
        ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
        if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
        ctx->file = file;
        INIT_LIST_HEAD(&ctx->events);
 
-       do {
-               result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
-               if (!result)
-                       goto error;
-
-               mutex_lock(&ctx_id_mutex);
-               result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
-               mutex_unlock(&ctx_id_mutex);
-       } while (result == -EAGAIN);
-
-       if (result)
+       mutex_lock(&ctx_id_mutex);
+       ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
+       mutex_unlock(&ctx_id_mutex);
+       if (ctx->id < 0)
                goto error;
 
        list_add_tail(&ctx->file_list, &file->ctxs);
index 2709ff5..5ca44cd 100644 (file)
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
 {
        struct ucma_context *ctx;
-       int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
        INIT_LIST_HEAD(&ctx->mc_list);
        ctx->file = file;
 
-       do {
-               ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
-               if (!ret)
-                       goto error;
-
-               mutex_lock(&mut);
-               ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
-               mutex_unlock(&mut);
-       } while (ret == -EAGAIN);
-
-       if (ret)
+       mutex_lock(&mut);
+       ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+       mutex_unlock(&mut);
+       if (ctx->id < 0)
                goto error;
 
        list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
 {
        struct ucma_multicast *mc;
-       int ret;
 
        mc = kzalloc(sizeof(*mc), GFP_KERNEL);
        if (!mc)
                return NULL;
 
-       do {
-               ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
-               if (!ret)
-                       goto error;
-
-               mutex_lock(&mut);
-               ret = idr_get_new(&multicast_idr, mc, &mc->id);
-               mutex_unlock(&mut);
-       } while (ret == -EAGAIN);
-
-       if (ret)
+       mutex_lock(&mut);
+       mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+       mutex_unlock(&mut);
+       if (mc->id < 0)
                goto error;
 
        mc->ctx = ctx;
index 3983a05..a7d00f6 100644 (file)
@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
 {
        int ret;
 
-retry:
-       if (!idr_pre_get(idr, GFP_KERNEL))
-               return -ENOMEM;
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&ib_uverbs_idr_lock);
-       ret = idr_get_new(idr, uobj, &uobj->id);
-       spin_unlock(&ib_uverbs_idr_lock);
 
-       if (ret == -EAGAIN)
-               goto retry;
+       ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               uobj->id = ret;
 
-       return ret;
+       spin_unlock(&ib_uverbs_idr_lock);
+       idr_preload_end();
+
+       return ret < 0 ? ret : 0;
 }
 
 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
@@ -731,7 +730,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
                        goto err_tree_mutex_unlock;
                }
 
-               inode = f.file->f_path.dentry->d_inode;
+               inode = file_inode(f.file);
                xrcd = find_xrcd(file->device, inode);
                if (!xrcd && !(cmd.oflags & O_CREAT)) {
                        /* no file descriptor. Need CREATE flag */
index 28cd5cb..0ab826b 100644 (file)
@@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
 {
        int ret;
 
-        do {
-               spin_lock_irq(&c2dev->qp_table.lock);
-               ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
-                                       c2dev->qp_table.last++, &qp->qpn);
-               spin_unlock_irq(&c2dev->qp_table.lock);
-        } while ((ret == -EAGAIN) &&
-                idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
-       return ret;
+       idr_preload(GFP_KERNEL);
+       spin_lock_irq(&c2dev->qp_table.lock);
+
+       ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
+                       GFP_NOWAIT);
+       if (ret >= 0)
+               qp->qpn = ret;
+
+       spin_unlock_irq(&c2dev->qp_table.lock);
+       idr_preload_end();
+       return ret < 0 ? ret : 0;
 }
 
 static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
index a1c4457..8378622 100644 (file)
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
                                void *handle, u32 id)
 {
        int ret;
-       int newid;
-
-       do {
-               if (!idr_pre_get(idr, GFP_KERNEL)) {
-                       return -ENOMEM;
-               }
-               spin_lock_irq(&rhp->lock);
-               ret = idr_get_new_above(idr, handle, id, &newid);
-               BUG_ON(newid != id);
-               spin_unlock_irq(&rhp->lock);
-       } while (ret == -EAGAIN);
-
-       return ret;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock_irq(&rhp->lock);
+
+       ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
+
+       spin_unlock_irq(&rhp->lock);
+       idr_preload_end();
+
+       BUG_ON(ret == -ENOSPC);
+       return ret < 0 ? ret : 0;
 }
 
 static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
index 4c07fc0..7eec5e1 100644 (file)
@@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
                                 void *handle, u32 id, int lock)
 {
        int ret;
-       int newid;
 
-       do {
-               if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
-                       return -ENOMEM;
-               if (lock)
-                       spin_lock_irq(&rhp->lock);
-               ret = idr_get_new_above(idr, handle, id, &newid);
-               BUG_ON(!ret && newid != id);
-               if (lock)
-                       spin_unlock_irq(&rhp->lock);
-       } while (ret == -EAGAIN);
-
-       return ret;
+       if (lock) {
+               idr_preload(GFP_KERNEL);
+               spin_lock_irq(&rhp->lock);
+       }
+
+       ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
+
+       if (lock) {
+               spin_unlock_irq(&rhp->lock);
+               idr_preload_end();
+       }
+
+       BUG_ON(ret == -ENOSPC);
+       return ret < 0 ? ret : 0;
 }
 
 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
index 8f52901..212150c 100644 (file)
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
        void *vpage;
        u32 counter;
        u64 rpage, cqx_fec, h_ret;
-       int ipz_rc, ret, i;
+       int ipz_rc, i;
        unsigned long flags;
 
        if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
        adapter_handle = shca->ipz_hca_handle;
        param.eq_handle = shca->eq.ipz_eq_handle;
 
-       do {
-               if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
-                       cq = ERR_PTR(-ENOMEM);
-                       ehca_err(device, "Can't reserve idr nr. device=%p",
-                                device);
-                       goto create_cq_exit1;
-               }
-
-               write_lock_irqsave(&ehca_cq_idr_lock, flags);
-               ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
-               write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-       } while (ret == -EAGAIN);
+       idr_preload(GFP_KERNEL);
+       write_lock_irqsave(&ehca_cq_idr_lock, flags);
+       my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
+       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+       idr_preload_end();
 
-       if (ret) {
+       if (my_cq->token < 0) {
                cq = ERR_PTR(-ENOMEM);
                ehca_err(device, "Can't allocate new idr entry. device=%p",
                         device);
                goto create_cq_exit1;
        }
 
-       if (my_cq->token > 0x1FFFFFF) {
-               cq = ERR_PTR(-ENOMEM);
-               ehca_err(device, "Invalid number of cq. device=%p", device);
-               goto create_cq_exit2;
-       }
-
        /*
         * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
         * for receiving errors CQEs.
index 1493939..00d6861 100644 (file)
@@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
                my_qp->send_cq =
                        container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
 
-       do {
-               if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
-                       ret = -ENOMEM;
-                       ehca_err(pd->device, "Can't reserve idr resources.");
-                       goto create_qp_exit0;
-               }
+       idr_preload(GFP_KERNEL);
+       write_lock_irqsave(&ehca_qp_idr_lock, flags);
 
-               write_lock_irqsave(&ehca_qp_idr_lock, flags);
-               ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
-               write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-       } while (ret == -EAGAIN);
+       ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
+       if (ret >= 0)
+               my_qp->token = ret;
 
-       if (ret) {
-               ret = -ENOMEM;
-               ehca_err(pd->device, "Can't allocate new idr entry.");
+       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+       idr_preload_end();
+       if (ret < 0) {
+               if (ret == -ENOSPC) {
+                       ret = -EINVAL;
+                       ehca_err(pd->device, "Invalid number of qp");
+               } else {
+                       ret = -ENOMEM;
+                       ehca_err(pd->device, "Can't allocate new idr entry.");
+               }
                goto create_qp_exit0;
        }
 
-       if (my_qp->token > 0x1FFFFFF) {
-               ret = -EINVAL;
-               ehca_err(pd->device, "Invalid number of qp");
-               goto create_qp_exit1;
-       }
-
        if (has_srq)
                parms.srq_token = my_qp->token;
 
index 7b371f5..bd0caed 100644 (file)
@@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
        struct ipath_devdata *dd;
        int ret;
 
-       if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
-               dd = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
        dd = vzalloc(sizeof(*dd));
        if (!dd) {
                dd = ERR_PTR(-ENOMEM);
@@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
        }
        dd->ipath_unit = -1;
 
+       idr_preload(GFP_KERNEL);
        spin_lock_irqsave(&ipath_devs_lock, flags);
 
-       ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
+       ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
        if (ret < 0) {
                printk(KERN_ERR IPATH_DRV_NAME
                       ": Could not allocate unit ID: error %d\n", -ret);
@@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
                dd = ERR_PTR(ret);
                goto bail_unlock;
        }
+       dd->ipath_unit = ret;
 
        dd->pcidev = pdev;
        pci_set_drvdata(pdev, dd);
@@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
 
 bail_unlock:
        spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
+       idr_preload_end();
 bail:
        return dd;
 }
@@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
         * the PCI subsystem.
         */
        idr_init(&unit_table);
-       if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
-               printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
-               ret = -ENOMEM;
-               goto bail;
-       }
 
        ret = pci_register_driver(&ipath_driver);
        if (ret < 0) {
index 3eb7e45..aed8afe 100644 (file)
@@ -1864,9 +1864,9 @@ static int ipath_assign_port(struct file *fp,
                goto done_chk_sdma;
        }
 
-       i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
+       i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
        ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-                  (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
+                  (long)file_inode(fp)->i_rdev, i_minor);
 
        if (i_minor)
                ret = find_free_port(i_minor - 1, fp, uinfo);
index a4de9d5..a479375 100644 (file)
@@ -113,7 +113,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
        struct infinipath_counters counters;
        struct ipath_devdata *dd;
 
-       dd = file->f_path.dentry->d_inode->i_private;
+       dd = file_inode(file)->i_private;
        dd->ipath_f_read_counters(dd, &counters);
 
        return simple_read_from_buffer(buf, count, ppos, &counters,
@@ -154,7 +154,7 @@ static ssize_t flash_read(struct file *file, char __user *buf,
                goto bail;
        }
 
-       dd = file->f_path.dentry->d_inode->i_private;
+       dd = file_inode(file)->i_private;
        if (ipath_eeprom_read(dd, pos, tmp, count)) {
                ipath_dev_err(dd, "failed to read from flash\n");
                ret = -ENXIO;
@@ -207,7 +207,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
                goto bail_tmp;
        }
 
-       dd = file->f_path.dentry->d_inode->i_private;
+       dd = file_inode(file)->i_private;
        if (ipath_eeprom_write(dd, pos, tmp, count)) {
                ret = -ENXIO;
                ipath_dev_err(dd, "failed to write to flash\n");
index dbc99d4..e0d79b2 100644 (file)
@@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
 static struct id_map_entry *
 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
 {
-       int ret, id;
+       int ret;
        static int next_id;
        struct id_map_entry *ent;
        struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
@@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
        ent->dev = to_mdev(ibdev);
        INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
 
-       do {
-               spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-               ret = idr_get_new_above(&sriov->pv_id_table, ent,
-                                       next_id, &id);
-               if (!ret) {
-                       next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
-                       ent->pv_cm_id = (u32)id;
-                       sl_id_map_add(ibdev, ent);
-               }
+       idr_preload(GFP_KERNEL);
+       spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
 
-               spin_unlock(&sriov->id_map_lock);
-       } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
-       /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
-       if (!ret) {
-               spin_lock(&sriov->id_map_lock);
+       ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
+       if (ret >= 0) {
+               next_id = max(ret + 1, 0);
+               ent->pv_cm_id = (u32)ret;
+               sl_id_map_add(ibdev, ent);
                list_add_tail(&ent->list, &sriov->cm_list);
-               spin_unlock(&sriov->id_map_lock);
-               return ent;
        }
+
+       spin_unlock(&sriov->id_map_lock);
+       idr_preload_end();
+
+       if (ret >= 0)
+               return ent;
+
        /*error flow*/
        kfree(ent);
        mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
index c4e0131..48928c8 100644 (file)
@@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);
 
 static union ib_gid ocrdma_zero_sgid;
 
-static int ocrdma_get_instance(void)
-{
-       int instance = 0;
-
-       /* Assign an unused number */
-       if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
-               return -1;
-       if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
-               return -1;
-       return instance;
-}
-
 void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
 {
        u8 mac_addr[6];
@@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
                goto idr_err;
 
        memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
-       dev->id = ocrdma_get_instance();
+       dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
        if (dev->id < 0)
                goto idr_err;
 
index 959a5c4..4f7aa30 100644 (file)
@@ -1524,7 +1524,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
                }
        }
 
-       i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+       i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
        if (i_minor)
                ret = find_free_ctxt(i_minor - 1, fp, uinfo);
        else
index 65a2a23..644bd6f 100644 (file)
@@ -45,7 +45,7 @@
 
 static struct super_block *qib_super;
 
-#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+#define private2dd(file) (file_inode(file)->i_private)
 
 static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
                       umode_t mode, const struct file_operations *fops,
@@ -171,7 +171,7 @@ static const struct file_operations cntr_ops[] = {
 };
 
 /*
- * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * Could use file_inode(file)->i_ino to figure out which file,
  * instead of separate routine for each, but for now, this works...
  */
 
index ddf066d..50e33aa 100644 (file)
@@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
        struct qib_devdata *dd;
        int ret;
 
-       if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
-               dd = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
        dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
        if (!dd) {
                dd = ERR_PTR(-ENOMEM);
                goto bail;
        }
 
+       idr_preload(GFP_KERNEL);
        spin_lock_irqsave(&qib_devs_lock, flags);
-       ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
-       if (ret >= 0)
+
+       ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
+       if (ret >= 0) {
+               dd->unit = ret;
                list_add(&dd->list, &qib_dev_list);
+       }
+
        spin_unlock_irqrestore(&qib_devs_lock, flags);
+       idr_preload_end();
 
        if (ret < 0) {
                qib_early_err(&pdev->dev,
@@ -1180,11 +1181,6 @@ static int __init qlogic_ib_init(void)
         * the PCI subsystem.
         */
        idr_init(&qib_unit_table);
-       if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
-               pr_err("idr_pre_get() failed\n");
-               ret = -ENOMEM;
-               goto bail_cq_wq;
-       }
 
        ret = pci_register_driver(&qib_driver);
        if (ret < 0) {
@@ -1199,7 +1195,6 @@ static int __init qlogic_ib_init(void)
 
 bail_unit:
        idr_destroy(&qib_unit_table);
-bail_cq_wq:
        destroy_workqueue(qib_cq_wq);
 bail_dev:
        qib_dev_cleanup();
index 0b4f542..2e3334b 100644 (file)
@@ -109,7 +109,9 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm)
        
        if (hp_sdc_enqueue_transaction(&t)) return -1;
        
-       down_interruptible(&tsem);  /* Put ourselves to sleep for results. */
+       /* Put ourselves to sleep for results. */
+       if (WARN_ON(down_interruptible(&tsem)))
+               return -1;
        
        /* Check for nonpresence of BBRTC */
        if (!((tseq[83] | tseq[90] | tseq[69] | tseq[76] |
@@ -176,11 +178,16 @@ static int64_t hp_sdc_rtc_read_i8042timer (uint8_t loadcmd, int numreg)
        t.seq =                 tseq;
        t.act.semaphore =       &i8042tregs;
 
-       down_interruptible(&i8042tregs);  /* Sleep if output regs in use. */
+       /* Sleep if output regs in use. */
+       if (WARN_ON(down_interruptible(&i8042tregs)))
+               return -1;
 
        if (hp_sdc_enqueue_transaction(&t)) return -1;
        
-       down_interruptible(&i8042tregs);  /* Sleep until results come back. */
+       /* Sleep until results come back. */
+       if (WARN_ON(down_interruptible(&i8042tregs)))
+               return -1;
+
        up(&i8042tregs);
 
        return (tseq[5] | 
@@ -276,6 +283,7 @@ static inline int hp_sdc_rtc_read_ct(struct timeval *res) {
 }
 
 
+#if 0 /* not used yet */
 /* Set the i8042 real-time clock */
 static int hp_sdc_rtc_set_rt (struct timeval *setto)
 {
@@ -386,6 +394,7 @@ static int hp_sdc_rtc_set_i8042timer (struct timeval *setto, uint8_t setcmd)
        }
        return 0;
 }
+#endif
 
 static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos) {
index 6e9cc76..3ec5ef2 100644 (file)
@@ -22,7 +22,7 @@ config SERIO_I8042
        tristate "i8042 PC Keyboard controller" if EXPERT || !X86
        default y
        depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
-                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
+                  (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390
        help
          i8042 is the chip over which the standard AT keyboard and PS/2
          mouse are connected to the computer. If you use these devices,
index eb0109f..b34e5fd 100644 (file)
@@ -968,7 +968,6 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
 {
        struct smmu_debugfs_info *info;
        struct smmu_device *smmu;
-       struct dentry *dent;
        int i;
        enum {
                _OFF = 0,
@@ -996,8 +995,7 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
        if (i == ARRAY_SIZE(command))
                return -EINVAL;
 
-       dent = file->f_dentry;
-       info = dent->d_inode->i_private;
+       info = file_inode(file)->i_private;
        smmu = info->smmu;
 
        offs = SMMU_CACHE_CONFIG(info->cache);
@@ -1032,15 +1030,11 @@ static ssize_t smmu_debugfs_stats_write(struct file *file,
 
 static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
 {
-       struct smmu_debugfs_info *info;
-       struct smmu_device *smmu;
-       struct dentry *dent;
+       struct smmu_debugfs_info *info = s->private;
+       struct smmu_device *smmu = info->smmu;
        int i;
        const char * const stats[] = { "hit", "miss", };
 
-       dent = d_find_alias(s->private);
-       info = dent->d_inode->i_private;
-       smmu = info->smmu;
 
        for (i = 0; i < ARRAY_SIZE(stats); i++) {
                u32 val;
@@ -1054,14 +1048,12 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
                        stats[i], val, offs);
        }
        seq_printf(s, "\n");
-       dput(dent);
-
        return 0;
 }
 
 static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, smmu_debugfs_stats_show, inode);
+       return single_open(file, smmu_debugfs_stats_show, inode->i_private);
 }
 
 static const struct file_operations smmu_debugfs_stats_fops = {
index e65fbf2..98e3b87 100644 (file)
@@ -2,6 +2,8 @@ obj-$(CONFIG_IRQCHIP)                   += irqchip.o
 
 obj-$(CONFIG_ARCH_BCM2835)             += irq-bcm2835.o
 obj-$(CONFIG_ARCH_EXYNOS)              += exynos-combiner.o
+obj-$(CONFIG_METAG)                    += irq-metag-ext.o
+obj-$(CONFIG_METAG_PERFCOUNTER_IRQS)   += irq-metag.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sunxi.o
 obj-$(CONFIG_ARCH_SPEAR3XX)            += spear-shirq.o
 obj-$(CONFIG_ARM_GIC)                  += irq-gic.o
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
new file mode 100644 (file)
index 0000000..92c41ab
--- /dev/null
@@ -0,0 +1,868 @@
+/*
+ * Meta External interrupt code.
+ *
+ * Copyright (C) 2005-2012 Imagination Technologies Ltd.
+ *
+ * External interrupts on Meta are configured at two-levels, in the CPU core and
+ * in the external trigger block. Interrupts from SoC peripherals are
+ * multiplexed onto a single Meta CPU "trigger" - traditionally it has always
+ * been trigger 2 (TR2). For info on how de-multiplexing happens check out
+ * meta_intc_irq_demux().
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/metag-ext.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+
+#include <asm/irq.h>
+#include <asm/hwthread.h>
+
+#define HWSTAT_STRIDE 8
+#define HWVEC_BLK_STRIDE 0x1000
+
+/**
+ * struct meta_intc_priv - private meta external interrupt data
+ * @nr_banks:          Number of interrupt banks
+ * @domain:            IRQ domain for all banks of external IRQs
+ * @unmasked:          Record of unmasked IRQs
+ * @levels_altered:    Record of altered level bits
+ */
+struct meta_intc_priv {
+       unsigned int            nr_banks;
+       struct irq_domain       *domain;
+
+       unsigned long           unmasked[4];
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+       unsigned long           levels_altered[4];
+#endif
+};
+
+/* Private data for the one and only external interrupt controller */
+static struct meta_intc_priv meta_intc_priv;
+
+/**
+ * meta_intc_offset() - Get the offset into the bank of a hardware IRQ number
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Bit offset into the IRQ's bank registers
+ */
+static unsigned int meta_intc_offset(irq_hw_number_t hw)
+{
+       return hw & 0x1f;
+}
+
+/**
+ * meta_intc_bank() - Get the bank number of a hardware IRQ number
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Bank number indicating which register the IRQ's bits are
+ */
+static unsigned int meta_intc_bank(irq_hw_number_t hw)
+{
+       return hw >> 5;
+}
+
+/**
+ * meta_intc_stat_addr() - Get the address of a HWSTATEXT register
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Address of a HWSTATEXT register containing the status bit for
+ *             the specified hardware IRQ number
+ */
+static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
+{
+       return (void __iomem *)(HWSTATEXT +
+                               HWSTAT_STRIDE * meta_intc_bank(hw));
+}
+
+/**
+ * meta_intc_level_addr() - Get the address of a HWLEVELEXT register
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Address of a HWLEVELEXT register containing the sense bit for
+ *             the specified hardware IRQ number
+ */
+static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
+{
+       return (void __iomem *)(HWLEVELEXT +
+                               HWSTAT_STRIDE * meta_intc_bank(hw));
+}
+
+/**
+ * meta_intc_mask_addr() - Get the address of a HWMASKEXT register
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Address of a HWMASKEXT register containing the mask bit for the
+ *             specified hardware IRQ number
+ */
+static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
+{
+       return (void __iomem *)(HWMASKEXT +
+                               HWSTAT_STRIDE * meta_intc_bank(hw));
+}
+
+/**
+ * meta_intc_vec_addr() - Get the vector address of a hardware interrupt
+ * @hw:                Hardware IRQ number (within external trigger block)
+ *
+ * Returns:    Address of a HWVECEXT register controlling the core trigger to
+ *             vector the IRQ onto
+ */
+static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
+{
+       return (void __iomem *)(HWVEC0EXT +
+                               HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
+                               HWVECnEXT_STRIDE * meta_intc_offset(hw));
+}
+
+/**
+ * meta_intc_startup_irq() - set up an external irq
+ * @data:      data for the external irq to start up
+ *
+ * Multiplex interrupts for irq onto TR2. Clear any pending interrupts and
+ * unmask irq, both using the appropriate callbacks.
+ */
+static unsigned int meta_intc_startup_irq(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+       int thread = hard_processor_id();
+
+       /* Perform any necessary acking. */
+       if (data->chip->irq_ack)
+               data->chip->irq_ack(data);
+
+       /* Wire up this interrupt to the core with HWVECxEXT. */
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+
+       /* Perform any necessary unmasking. */
+       data->chip->irq_unmask(data);
+
+       return 0;
+}
+
+/**
+ * meta_intc_shutdown_irq() - turn off an external irq
+ * @data:      data for the external irq to turn off
+ *
+ * Mask irq using the appropriate callback and stop muxing it onto TR2.
+ */
+static void meta_intc_shutdown_irq(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+
+       /* Mask the IRQ */
+       data->chip->irq_mask(data);
+
+       /*
+        * Disable the IRQ at the core by removing the interrupt from
+        * the HW vector mapping.
+        */
+       metag_out32(0, vec_addr);
+}
+
+/**
+ * meta_intc_ack_irq() - acknowledge an external irq
+ * @data:      data for the external irq to ack
+ *
+ * Clear down an edge interrupt in the status register.
+ */
+static void meta_intc_ack_irq(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *stat_addr = meta_intc_stat_addr(hw);
+
+       /* Ack the int, if it is still 'on'.
+        * NOTE - this only works for edge triggered interrupts.
+        */
+       if (metag_in32(stat_addr) & bit)
+               metag_out32(bit, stat_addr);
+}
+
+/**
+ * record_irq_is_masked() - record the IRQ masked so it doesn't get handled
+ * @data:      data for the external irq to record
+ *
+ * This should get called whenever an external IRQ is masked (by whichever
+ * callback is used). It records the IRQ masked so that it doesn't get handled
+ * if it still shows up in the status register.
+ */
+static void record_irq_is_masked(struct irq_data *data)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       irq_hw_number_t hw = data->hwirq;
+
+       clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
+}
+
+/**
+ * record_irq_is_unmasked() - record the IRQ unmasked so it can be handled
+ * @data:      data for the external irq to record
+ *
+ * This should get called whenever an external IRQ is unmasked (by whichever
+ * callback is used). It records the IRQ unmasked so that it gets handled if it
+ * shows up in the status register.
+ */
+static void record_irq_is_unmasked(struct irq_data *data)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       irq_hw_number_t hw = data->hwirq;
+
+       set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
+}
+
+/*
+ * For use by wrapper IRQ drivers
+ */
+
+/**
+ * meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers
+ * @data:      data for the external irq being masked
+ *
+ * This should be called by any wrapper IRQ driver mask functions. it doesn't do
+ * any masking but records the IRQ as masked so that the core code knows the
+ * mask has taken place. It is the callers responsibility to ensure that the IRQ
+ * won't trigger an interrupt to the core.
+ */
+void meta_intc_mask_irq_simple(struct irq_data *data)
+{
+       record_irq_is_masked(data);
+}
+
+/**
+ * meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers
+ * @data:      data for the external irq being unmasked
+ *
+ * This should be called by any wrapper IRQ driver unmask functions. it doesn't
+ * do any unmasking but records the IRQ as unmasked so that the core code knows
+ * the unmask has taken place. It is the callers responsibility to ensure that
+ * the IRQ can now trigger an interrupt to the core.
+ */
+void meta_intc_unmask_irq_simple(struct irq_data *data)
+{
+       record_irq_is_unmasked(data);
+}
+
+
+/**
+ * meta_intc_mask_irq() - mask an external irq using HWMASKEXT
+ * @data:      data for the external irq to mask
+ *
+ * This is a default implementation of a mask function which makes use of the
+ * HWMASKEXT registers available in newer versions.
+ *
+ * Earlier versions without these registers should use SoC level IRQ masking
+ * which call the meta_intc_*_simple() functions above, or if that isn't
+ * available should use the fallback meta_intc_*_nomask() functions below.
+ */
+static void meta_intc_mask_irq(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *mask_addr = meta_intc_mask_addr(hw);
+       unsigned long flags;
+
+       record_irq_is_masked(data);
+
+       /* update the interrupt mask */
+       __global_lock2(flags);
+       metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
+       __global_unlock2(flags);
+}
+
+/**
+ * meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT
+ * @data:      data for the external irq to unmask
+ *
+ * This is a default implementation of an unmask function which makes use of the
+ * HWMASKEXT registers available on new versions. It should be paired with
+ * meta_intc_mask_irq() above.
+ */
+static void meta_intc_unmask_irq(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *mask_addr = meta_intc_mask_addr(hw);
+       unsigned long flags;
+
+       record_irq_is_unmasked(data);
+
+       /* update the interrupt mask */
+       __global_lock2(flags);
+       metag_out32(metag_in32(mask_addr) | bit, mask_addr);
+       __global_unlock2(flags);
+}
+
+/**
+ * meta_intc_mask_irq_nomask() - mask an external irq by unvectoring
+ * @data:      data for the external irq to mask
+ *
+ * This is the version of the mask function for older versions which don't have
+ * HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is
+ * unvectored from the core and retriggered if necessary later.
+ */
+static void meta_intc_mask_irq_nomask(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+
+       record_irq_is_masked(data);
+
+       /* there is no interrupt mask, so unvector the interrupt */
+       metag_out32(0, vec_addr);
+}
+
+/**
+ * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
+ * @data:      data for the external irq to unmask
+ *
+ * This is the version of the unmask function for older versions which don't
+ * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
+ * IRQ is revectored back to the core and retriggered if necessary.
+ *
+ * The retriggering done by this function is specific to edge interrupts.
+ */
+static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *stat_addr = meta_intc_stat_addr(hw);
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+       unsigned int thread = hard_processor_id();
+
+       record_irq_is_unmasked(data);
+
+       /* there is no interrupt mask, so revector the interrupt */
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+
+       /*
+        * Re-trigger interrupt
+        *
+        * Writing a 1 toggles, and a 0->1 transition triggers. We only
+        * retrigger if the status bit is already set, which means we
+        * need to clear it first. Retriggering is fundamentally racy
+        * because if the interrupt fires again after we clear it we
+        * could end up clearing it again and the interrupt handler
+        * thinking it hasn't fired. Therefore we need to keep trying to
+        * retrigger until the bit is set.
+        */
+       if (metag_in32(stat_addr) & bit) {
+               metag_out32(bit, stat_addr);
+               while (!(metag_in32(stat_addr) & bit))
+                       metag_out32(bit, stat_addr);
+       }
+}
+
+/**
+ * meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring
+ * @data:      data for the external irq to unmask
+ *
+ * This is the version of the unmask function for older versions which don't
+ * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
+ * IRQ is revectored back to the core and retriggered if necessary.
+ *
+ * The retriggering done by this function is specific to level interrupts.
+ */
+static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *stat_addr = meta_intc_stat_addr(hw);
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+       unsigned int thread = hard_processor_id();
+
+       record_irq_is_unmasked(data);
+
+       /* there is no interrupt mask, so revector the interrupt */
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+
+       /* Re-trigger interrupt */
+       /* Writing a 1 triggers interrupt */
+       if (metag_in32(stat_addr) & bit)
+               metag_out32(bit, stat_addr);
+}
+
+/**
+ * meta_intc_irq_set_type() - set the type of an external irq
+ * @data:      data for the external irq to set the type of
+ * @flow_type: new irq flow type
+ *
+ * Set the flow type of an external interrupt. This updates the irq chip and irq
+ * handler depending on whether the irq is edge or level sensitive (the polarity
+ * is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows
+ * when to trigger.
+ */
+static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+#ifdef CONFIG_METAG_SUSPEND_MEM
+       struct meta_intc_priv *priv = &meta_intc_priv;
+#endif
+       unsigned int irq = data->irq;
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *level_addr = meta_intc_level_addr(hw);
+       unsigned long flags;
+       unsigned int level;
+
+       /* update the chip/handler */
+       if (flow_type & IRQ_TYPE_LEVEL_MASK)
+               __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
+                                                  handle_level_irq, NULL);
+       else
+               __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
+                                                  handle_edge_irq, NULL);
+
+       /* and clear/set the bit in HWLEVELEXT */
+       __global_lock2(flags);
+       level = metag_in32(level_addr);
+       if (flow_type & IRQ_TYPE_LEVEL_MASK)
+               level |= bit;
+       else
+               level &= ~bit;
+       metag_out32(level, level_addr);
+#ifdef CONFIG_METAG_SUSPEND_MEM
+       priv->levels_altered[meta_intc_bank(hw)] |= bit;
+#endif
+       __global_unlock2(flags);
+
+       return 0;
+}
+
+/**
+ * meta_intc_irq_demux() - external irq de-multiplexer
+ * @irq:       the virtual interrupt number
+ * @desc:      the interrupt description structure for this irq
+ *
+ * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
+ * this function's job to demux this irq and figure out exactly which external
+ * irq needs servicing.
+ *
+ * Whilst using TR2 to detect external interrupts is a software convention it is
+ * (hopefully) unlikely to change.
+ */
+static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       irq_hw_number_t hw;
+       unsigned int bank, irq_no, status;
+       void __iomem *stat_addr = meta_intc_stat_addr(0);
+
+       /*
+        * Locate which interrupt has caused our handler to run.
+        */
+       for (bank = 0; bank < priv->nr_banks; ++bank) {
+               /* Which interrupts are currently pending in this bank? */
+recalculate:
+               status = metag_in32(stat_addr) & priv->unmasked[bank];
+
+               for (hw = bank*32; status; status >>= 1, ++hw) {
+                       if (status & 0x1) {
+                               /*
+                                * Map the hardware IRQ number to a virtual
+                                * Linux IRQ number.
+                                */
+                               irq_no = irq_linear_revmap(priv->domain, hw);
+
+                               /*
+                                * Only fire off external interrupts that are
+                                * registered to be handled by the kernel.
+                                * Other external interrupts are probably being
+                                * handled by other Meta hardware threads.
+                                */
+                               generic_handle_irq(irq_no);
+
+                               /*
+                                * The handler may have re-enabled interrupts
+                                * which could have caused a nested invocation
+                                * of this code and make the copy of the
+                                * status register we are using invalid.
+                                */
+                               goto recalculate;
+                       }
+               }
+               stat_addr += HWSTAT_STRIDE;
+       }
+}
+
+#ifdef CONFIG_SMP
+/**
+ * meta_intc_set_affinity() - set the affinity for an interrupt
+ * @data:      data for the external irq to set the affinity of
+ * @cpumask:   cpu mask representing cpus which can handle the interrupt
+ * @force:     whether to force (ignored)
+ *
+ * Revector the specified external irq onto a specific cpu's TR2 trigger, so
+ * that that cpu tends to be the one who handles it.
+ */
+static int meta_intc_set_affinity(struct irq_data *data,
+                                 const struct cpumask *cpumask, bool force)
+{
+       irq_hw_number_t hw = data->hwirq;
+       void __iomem *vec_addr = meta_intc_vec_addr(hw);
+       unsigned int cpu, thread;
+
+       /*
+        * Wire up this interrupt from HWVECxEXT to the Meta core.
+        *
+        * Note that we can't wire up HWVECxEXT to interrupt more than
+        * one cpu (the interrupt code doesn't support it), so we just
+        * pick the first cpu we find in 'cpumask'.
+        */
+       cpu = cpumask_any(cpumask);
+       thread = cpu_2_hwthread_id[cpu];
+
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+
+       return 0;
+}
+#else
+#define meta_intc_set_affinity NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define META_INTC_CHIP_FLAGS   (IRQCHIP_MASK_ON_SUSPEND \
+                               | IRQCHIP_SKIP_SET_WAKE)
+#else
+#define META_INTC_CHIP_FLAGS   0
+#endif
+
+/* public edge/level irq chips which SoCs can override */
+
+struct irq_chip meta_intc_edge_chip = {
+       .irq_startup            = meta_intc_startup_irq,
+       .irq_shutdown           = meta_intc_shutdown_irq,
+       .irq_ack                = meta_intc_ack_irq,
+       .irq_mask               = meta_intc_mask_irq,
+       .irq_unmask             = meta_intc_unmask_irq,
+       .irq_set_type           = meta_intc_irq_set_type,
+       .irq_set_affinity       = meta_intc_set_affinity,
+       .flags                  = META_INTC_CHIP_FLAGS,
+};
+
+struct irq_chip meta_intc_level_chip = {
+       .irq_startup            = meta_intc_startup_irq,
+       .irq_shutdown           = meta_intc_shutdown_irq,
+       .irq_set_type           = meta_intc_irq_set_type,
+       .irq_mask               = meta_intc_mask_irq,
+       .irq_unmask             = meta_intc_unmask_irq,
+       .irq_set_affinity       = meta_intc_set_affinity,
+       .flags                  = META_INTC_CHIP_FLAGS,
+};
+
+/**
+ * meta_intc_map() - map an external irq
+ * @d:         irq domain of external trigger block
+ * @irq:       virtual irq number
+ * @hw:                hardware irq number within external trigger block
+ *
+ * This sets up a virtual irq for a specified hardware interrupt. The irq chip
+ * and handler is configured, using the HWLEVELEXT registers to determine
+ * edge/level flow type. These registers will have been set when the irq type is
+ * set (or set to a default at init time).
+ */
+static int meta_intc_map(struct irq_domain *d, unsigned int irq,
+                        irq_hw_number_t hw)
+{
+       unsigned int bit = 1 << meta_intc_offset(hw);
+       void __iomem *level_addr = meta_intc_level_addr(hw);
+
+       /* Go by the current sense in the HWLEVELEXT register */
+       if (metag_in32(level_addr) & bit)
+               irq_set_chip_and_handler(irq, &meta_intc_level_chip,
+                                        handle_level_irq);
+       else
+               irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
+                                        handle_edge_irq);
+       return 0;
+}
+
+static const struct irq_domain_ops meta_intc_domain_ops = {
+       .map = meta_intc_map,
+       .xlate = irq_domain_xlate_twocell,
+};
+
+#ifdef CONFIG_METAG_SUSPEND_MEM
+
+/**
+ * struct meta_intc_context - suspend context
+ * @levels:    State of HWLEVELEXT registers
+ * @masks:     State of HWMASKEXT registers
+ * @vectors:   State of HWVECEXT registers
+ * @txvecint:  State of TxVECINT registers
+ *
+ * This structure stores the IRQ state across suspend.
+ */
+struct meta_intc_context {
+       u32 levels[4];
+       u32 masks[4];
+       u8 vectors[4*32];
+
+       u8 txvecint[4][4];
+};
+
+/* suspend context */
+static struct meta_intc_context *meta_intc_context;
+
+/**
+ * meta_intc_suspend() - store irq state
+ *
+ * To avoid interfering with other threads we only save the IRQ state of IRQs in
+ * use by Linux.
+ */
+static int meta_intc_suspend(void)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       int i, j;
+       irq_hw_number_t hw;
+       unsigned int bank;
+       unsigned long flags;
+       struct meta_intc_context *context;
+       void __iomem *level_addr, *mask_addr, *vec_addr;
+       u32 mask, bit;
+
+       context = kzalloc(sizeof(*context), GFP_ATOMIC);
+       if (!context)
+               return -ENOMEM;
+
+       hw = 0;
+       level_addr = meta_intc_level_addr(0);
+       mask_addr = meta_intc_mask_addr(0);
+       for (bank = 0; bank < priv->nr_banks; ++bank) {
+               vec_addr = meta_intc_vec_addr(hw);
+
+               /* create mask of interrupts in use */
+               mask = 0;
+               for (bit = 1; bit; bit <<= 1) {
+                       i = irq_linear_revmap(priv->domain, hw);
+                       /* save mapped irqs which are enabled or have actions */
+                       if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
+                                 irq_has_action(i))) {
+                               mask |= bit;
+
+                               /* save trigger vector */
+                               context->vectors[hw] = metag_in32(vec_addr);
+                       }
+
+                       ++hw;
+                       vec_addr += HWVECnEXT_STRIDE;
+               }
+
+               /* save level state if any IRQ levels altered */
+               if (priv->levels_altered[bank])
+                       context->levels[bank] = metag_in32(level_addr);
+               /* save mask state if any IRQs in use */
+               if (mask)
+                       context->masks[bank] = metag_in32(mask_addr);
+
+               level_addr += HWSTAT_STRIDE;
+               mask_addr += HWSTAT_STRIDE;
+       }
+
+       /* save trigger matrixing */
+       __global_lock2(flags);
+       for (i = 0; i < 4; ++i)
+               for (j = 0; j < 4; ++j)
+                       context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
+                                                            TnVECINT_STRIDE*i +
+                                                            8*j);
+       __global_unlock2(flags);
+
+       meta_intc_context = context;
+       return 0;
+}
+
+/**
+ * meta_intc_resume() - restore saved irq state
+ *
+ * Restore the saved IRQ state and drop it.
+ */
+static void meta_intc_resume(void)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       int i, j;
+       irq_hw_number_t hw;
+       unsigned int bank;
+       unsigned long flags;
+       struct meta_intc_context *context = meta_intc_context;
+       void __iomem *level_addr, *mask_addr, *vec_addr;
+       u32 mask, bit, tmp;
+
+       meta_intc_context = NULL;
+
+       hw = 0;
+       level_addr = meta_intc_level_addr(0);
+       mask_addr = meta_intc_mask_addr(0);
+       for (bank = 0; bank < priv->nr_banks; ++bank) {
+               vec_addr = meta_intc_vec_addr(hw);
+
+               /* create mask of interrupts in use */
+               mask = 0;
+               for (bit = 1; bit; bit <<= 1) {
+                       i = irq_linear_revmap(priv->domain, hw);
+                       /* restore mapped irqs, enabled or with actions */
+                       if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
+                                 irq_has_action(i))) {
+                               mask |= bit;
+
+                               /* restore trigger vector */
+                               metag_out32(context->vectors[hw], vec_addr);
+                       }
+
+                       ++hw;
+                       vec_addr += HWVECnEXT_STRIDE;
+               }
+
+               if (mask) {
+                       /* restore mask state */
+                       __global_lock2(flags);
+                       tmp = metag_in32(mask_addr);
+                       tmp = (tmp & ~mask) | (context->masks[bank] & mask);
+                       metag_out32(tmp, mask_addr);
+                       __global_unlock2(flags);
+               }
+
+               mask = priv->levels_altered[bank];
+               if (mask) {
+                       /* restore level state */
+                       __global_lock2(flags);
+                       tmp = metag_in32(level_addr);
+                       tmp = (tmp & ~mask) | (context->levels[bank] & mask);
+                       metag_out32(tmp, level_addr);
+                       __global_unlock2(flags);
+               }
+
+               level_addr += HWSTAT_STRIDE;
+               mask_addr += HWSTAT_STRIDE;
+       }
+
+       /* restore trigger matrixing */
+       __global_lock2(flags);
+       for (i = 0; i < 4; ++i) {
+               for (j = 0; j < 4; ++j) {
+                       metag_out32(context->txvecint[i][j],
+                                   T0VECINT_BHALT +
+                                   TnVECINT_STRIDE*i +
+                                   8*j);
+               }
+       }
+       __global_unlock2(flags);
+
+       kfree(context);
+}
+
+static struct syscore_ops meta_intc_syscore_ops = {
+       .suspend = meta_intc_suspend,
+       .resume = meta_intc_resume,
+};
+
+static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
+{
+       register_syscore_ops(&meta_intc_syscore_ops);
+}
+#else
+#define meta_intc_init_syscore_ops(priv) do {} while (0)
+#endif
+
+/**
+ * meta_intc_init_cpu() - register with a Meta cpu
+ * @priv:      private interrupt controller data
+ * @cpu:       the CPU to register on
+ *
+ * Configure @cpu's TR2 irq so that we can demux external irqs.
+ */
+static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
+{
+       unsigned int thread = cpu_2_hwthread_id[cpu];
+       unsigned int signum = TBID_SIGNUM_TR2(thread);
+       int irq = tbisig_map(signum);
+
+       /* Register the multiplexed IRQ handler */
+       irq_set_chained_handler(irq, meta_intc_irq_demux);
+       irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+}
+
+/**
+ * meta_intc_no_mask() - indicate lack of HWMASKEXT registers
+ *
+ * Called from SoC code (or init code below) to dynamically indicate the lack of
+ * HWMASKEXT registers (for example depending on some SoC revision register).
+ * This alters the irq mask and unmask callbacks to use the fallback
+ * unvectoring/retriggering technique instead of using HWMASKEXT registers.
+ */
+void __init meta_intc_no_mask(void)
+{
+       meta_intc_edge_chip.irq_mask    = meta_intc_mask_irq_nomask;
+       meta_intc_edge_chip.irq_unmask  = meta_intc_unmask_edge_irq_nomask;
+       meta_intc_level_chip.irq_mask   = meta_intc_mask_irq_nomask;
+       meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
+}
+
+/**
+ * init_external_IRQ() - initialise the external irq controller
+ *
+ * Set up the external irq controller using device tree properties. This is
+ * called from init_IRQ().
+ */
+int __init init_external_IRQ(void)
+{
+       struct meta_intc_priv *priv = &meta_intc_priv;
+       struct device_node *node;
+       int ret, cpu;
+       u32 val;
+       bool no_masks = false;
+
+       node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
+       if (!node)
+               return -ENOENT;
+
+       /* Get number of banks */
+       ret = of_property_read_u32(node, "num-banks", &val);
+       if (ret) {
+               pr_err("meta-intc: No num-banks property found\n");
+               return ret;
+       }
+       if (val < 1 || val > 4) {
+               pr_err("meta-intc: num-banks (%u) out of range\n", val);
+               return -EINVAL;
+       }
+       priv->nr_banks = val;
+
+       /* Are any mask registers present? */
+       if (of_get_property(node, "no-mask", NULL))
+               no_masks = true;
+
+       /* No HWMASKEXT registers present? */
+       if (no_masks)
+               meta_intc_no_mask();
+
+       /* Set up an IRQ domain */
+       /*
+        * This is a legacy IRQ domain for now until all the platform setup code
+        * has been converted to devicetree.
+        */
+       priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
+                                            &meta_intc_domain_ops, priv);
+       if (unlikely(!priv->domain)) {
+               pr_err("meta-intc: cannot add IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       /* Setup TR2 for all cpus. */
+       for_each_possible_cpu(cpu)
+               meta_intc_init_cpu(priv, cpu);
+
+       /* Set up system suspend/resume callbacks */
+       meta_intc_init_syscore_ops(priv);
+
+       pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
+               priv->nr_banks*32);
+
+       return 0;
+}
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
new file mode 100644 (file)
index 0000000..8e94d7a
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * Meta internal (HWSTATMETA) interrupt code.
+ *
+ * Copyright (C) 2011-2012 Imagination Technologies Ltd.
+ *
+ * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c
+ * The code base could be generalised/merged as a lot of the functionality is
+ * similar. Until this is done, we try to keep the code simple here.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+
+#include <asm/irq.h>
+#include <asm/hwthread.h>
+
+#define PERF0VECINT            0x04820580
+#define PERF1VECINT            0x04820588
+#define PERF0TRIG_OFFSET       16
+#define PERF1TRIG_OFFSET       17
+
+/**
+ * struct metag_internal_irq_priv - private meta internal interrupt data
+ * @domain:            IRQ domain for all internal Meta IRQs (HWSTATMETA)
+ * @unmasked:          Record of unmasked IRQs
+ */
+struct metag_internal_irq_priv {
+       struct irq_domain       *domain;
+
+       unsigned long           unmasked;
+};
+
+/* Private data for the one and only internal interrupt controller */
+static struct metag_internal_irq_priv metag_internal_irq_priv;
+
+static unsigned int metag_internal_irq_startup(struct irq_data *data);
+static void metag_internal_irq_shutdown(struct irq_data *data);
+static void metag_internal_irq_ack(struct irq_data *data);
+static void metag_internal_irq_mask(struct irq_data *data);
+static void metag_internal_irq_unmask(struct irq_data *data);
+#ifdef CONFIG_SMP
+static int metag_internal_irq_set_affinity(struct irq_data *data,
+                       const struct cpumask *cpumask, bool force);
+#endif
+
+static struct irq_chip internal_irq_edge_chip = {
+       .name = "HWSTATMETA-IRQ",
+       .irq_startup = metag_internal_irq_startup,
+       .irq_shutdown = metag_internal_irq_shutdown,
+       .irq_ack = metag_internal_irq_ack,
+       .irq_mask = metag_internal_irq_mask,
+       .irq_unmask = metag_internal_irq_unmask,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = metag_internal_irq_set_affinity,
+#endif
+};
+
+/*
+ *     metag_hwvec_addr - get the address of *VECINT regs of irq
+ *
+ *     This function is a table of supported triggers on HWSTATMETA
+ *     Could do with a structure, but better keep it simple. Changes
+ *     in this code should be rare.
+ */
+static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw)
+{
+       void __iomem *addr;
+
+       switch (hw) {
+       case PERF0TRIG_OFFSET:
+               addr = (void __iomem *)PERF0VECINT;
+               break;
+       case PERF1TRIG_OFFSET:
+               addr = (void __iomem *)PERF1VECINT;
+               break;
+       default:
+               addr = NULL;
+               break;
+       }
+       return addr;
+}
+
+/*
+ *     metag_internal_startup - setup an internal irq
+ *     @irq:   the irq to startup
+ *
+ *     Multiplex interrupts for @irq onto TR1. Clear any pending
+ *     interrupts.
+ */
+static unsigned int metag_internal_irq_startup(struct irq_data *data)
+{
+       /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
+       metag_internal_irq_ack(data);
+
+       /* Enable the interrupt by unmasking it */
+       metag_internal_irq_unmask(data);
+
+       return 0;
+}
+
+/*
+ *     metag_internal_irq_shutdown - turn off the irq
+ *     @irq:   the irq number to turn off
+ *
+ *     Mask @irq and clear any pending interrupts.
+ *     Stop muxing @irq onto TR1.
+ */
+static void metag_internal_irq_shutdown(struct irq_data *data)
+{
+       /* Disable the IRQ at the core by masking it. */
+       metag_internal_irq_mask(data);
+
+       /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
+       metag_internal_irq_ack(data);
+}
+
+/*
+ *     metag_internal_irq_ack - acknowledge irq
+ *     @irq:   the irq to ack
+ */
+static void metag_internal_irq_ack(struct irq_data *data)
+{
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << hw;
+
+       if (metag_in32(HWSTATMETA) & bit)
+               metag_out32(bit, HWSTATMETA);
+}
+
+/**
+ * metag_internal_irq_mask() - mask an internal irq by unvectoring
+ * @data:      data for the internal irq to mask
+ *
+ * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core
+ * and retriggered if necessary later.
+ */
+static void metag_internal_irq_mask(struct irq_data *data)
+{
+       struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
+       irq_hw_number_t hw = data->hwirq;
+       void __iomem *vec_addr = metag_hwvec_addr(hw);
+
+       clear_bit(hw, &priv->unmasked);
+
+       /* there is no interrupt mask, so unvector the interrupt */
+       metag_out32(0, vec_addr);
+}
+
+/**
+ * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
+ * @data:      data for the internal irq to unmask
+ *
+ * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the
+ * core and retriggered if necessary.
+ */
+static void metag_internal_irq_unmask(struct irq_data *data)
+{
+       struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int bit = 1 << hw;
+       void __iomem *vec_addr = metag_hwvec_addr(hw);
+       unsigned int thread = hard_processor_id();
+
+       set_bit(hw, &priv->unmasked);
+
+       /* there is no interrupt mask, so revector the interrupt */
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr);
+
+       /*
+        * Re-trigger interrupt
+        *
+        * Writing a 1 toggles, and a 0->1 transition triggers. We only
+        * retrigger if the status bit is already set, which means we
+        * need to clear it first. Retriggering is fundamentally racy
+        * because if the interrupt fires again after we clear it we
+        * could end up clearing it again and the interrupt handler
+        * thinking it hasn't fired. Therefore we need to keep trying to
+        * retrigger until the bit is set.
+        */
+       if (metag_in32(HWSTATMETA) & bit) {
+               metag_out32(bit, HWSTATMETA);
+               while (!(metag_in32(HWSTATMETA) & bit))
+                       metag_out32(bit, HWSTATMETA);
+       }
+}
+
+#ifdef CONFIG_SMP
+/*
+ *     metag_internal_irq_set_affinity - set the affinity for an interrupt
+ */
+static int metag_internal_irq_set_affinity(struct irq_data *data,
+                       const struct cpumask *cpumask, bool force)
+{
+       unsigned int cpu, thread;
+       irq_hw_number_t hw = data->hwirq;
+       /*
+        * Wire up this interrupt from *VECINT to the Meta core.
+        *
+        * Note that we can't wire up *VECINT to interrupt more than
+        * one cpu (the interrupt code doesn't support it), so we just
+        * pick the first cpu we find in 'cpumask'.
+        */
+       cpu = cpumask_any(cpumask);
+       thread = cpu_2_hwthread_id[cpu];
+
+       metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
+                   metag_hwvec_addr(hw));
+
+       return 0;
+}
+#endif
+
+/*
+ *     metag_internal_irq_demux - irq de-multiplexer
+ *     @irq:   the interrupt number
+ *     @desc:  the interrupt description structure for this irq
+ *
+ *     The cpu receives an interrupt on TR1 when an interrupt has
+ *     occurred. It is this function's job to demux this irq and
+ *     figure out exactly which trigger needs servicing.
+ */
+static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+       struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
+       irq_hw_number_t hw;
+       unsigned int irq_no;
+       u32 status;
+
+recalculate:
+       status = metag_in32(HWSTATMETA) & priv->unmasked;
+
+       for (hw = 0; status != 0; status >>= 1, ++hw) {
+               if (status & 0x1) {
+                       /*
+                        * Map the hardware IRQ number to a virtual Linux IRQ
+                        * number.
+                        */
+                       irq_no = irq_linear_revmap(priv->domain, hw);
+
+                       /*
+                        * Only fire off interrupts that are
+                        * registered to be handled by the kernel.
+                        * Other interrupts are probably being
+                        * handled by other Meta hardware threads.
+                        */
+                       generic_handle_irq(irq_no);
+
+                       /*
+                        * The handler may have re-enabled interrupts
+                        * which could have caused a nested invocation
+                        * of this code and make the copy of the
+                        * status register we are using invalid.
+                        */
+                       goto recalculate;
+               }
+       }
+}
+
+/**
+ * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number.
+ * @hw:                Number of the internal IRQ. Must be in range.
+ *
+ * Returns:    The virtual IRQ number of the Meta internal IRQ specified by
+ *             @hw.
+ */
+int internal_irq_map(unsigned int hw)
+{
+       struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
+       if (!priv->domain)
+               return -ENODEV;
+       return irq_create_mapping(priv->domain, hw);
+}
+
+/**
+ *     metag_internal_irq_init_cpu - regsister with the Meta cpu
+ *     @cpu:   the CPU to register on
+ *
+ *     Configure @cpu's TR1 irq so that we can demux irqs.
+ */
+static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv,
+                                       int cpu)
+{
+       unsigned int thread = cpu_2_hwthread_id[cpu];
+       unsigned int signum = TBID_SIGNUM_TR1(thread);
+       int irq = tbisig_map(signum);
+
+       /* Register the multiplexed IRQ handler */
+       irq_set_handler_data(irq, priv);
+       irq_set_chained_handler(irq, metag_internal_irq_demux);
+       irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+}
+
+/**
+ * metag_internal_intc_map() - map an internal irq
+ * @d:         irq domain of internal trigger block
+ * @irq:       virtual irq number
+ * @hw:                hardware irq number within internal trigger block
+ *
+ * This sets up a virtual irq for a specified hardware interrupt. The irq chip
+ * and handler is configured.
+ */
+static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq,
+                                  irq_hw_number_t hw)
+{
+       /* only register interrupt if it is mapped */
+       if (!metag_hwvec_addr(hw))
+               return -EINVAL;
+
+       irq_set_chip_and_handler(irq, &internal_irq_edge_chip,
+                                handle_edge_irq);
+       return 0;
+}
+
+static const struct irq_domain_ops metag_internal_intc_domain_ops = {
+       .map    = metag_internal_intc_map,
+};
+
+/**
+ *     metag_internal_irq_register - register internal IRQs
+ *
+ *     Register the irq chip and handler function for all internal IRQs
+ */
+int __init init_internal_IRQ(void)
+{
+       struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
+       unsigned int cpu;
+
+       /* Set up an IRQ domain */
+       priv->domain = irq_domain_add_linear(NULL, 32,
+                                            &metag_internal_intc_domain_ops,
+                                            priv);
+       if (unlikely(!priv->domain)) {
+               pr_err("meta-internal-intc: cannot add IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       /* Setup TR1 for all cpus. */
+       for_each_possible_cpu(cpu)
+               metag_internal_irq_init_cpu(priv, cpu);
+
+       return 0;
+};
index af4fd3d..3a4165c 100644 (file)
@@ -145,7 +145,7 @@ void remove_divas_proc(void)
 static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
                                  size_t count, loff_t *pos)
 {
-       diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+       diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
        PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
 
        if ((count == 1) || (count == 2)) {
@@ -172,7 +172,7 @@ static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
 static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
                                    size_t count, loff_t *pos)
 {
-       diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+       diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
        PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
 
        if ((count == 1) || (count == 2)) {
@@ -251,7 +251,7 @@ static const struct file_operations grp_opt_proc_fops = {
 static ssize_t info_proc_write(struct file *file, const char __user *buffer,
                               size_t count, loff_t *pos)
 {
-       diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
+       diva_os_xdi_adapter_t *a = PDE(file_inode(file))->data;
        PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
        char c[4];
 
index 88e4f0e..9a3ce93 100644 (file)
@@ -173,7 +173,7 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
 {
        struct log_data *inf;
        int len;
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        struct procdata *pd = NULL;
        hysdn_card *card;
 
@@ -319,7 +319,7 @@ static unsigned int
 hysdn_log_poll(struct file *file, poll_table *wait)
 {
        unsigned int mask = 0;
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        hysdn_card *card;
        struct procdata *pd = NULL;
 
index b87d9e5..9bb12ba 100644 (file)
@@ -1058,7 +1058,7 @@ isdn_info_update(void)
 static ssize_t
 isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
 {
-       uint minor = iminor(file->f_path.dentry->d_inode);
+       uint minor = iminor(file_inode(file));
        int len = 0;
        int drvidx;
        int chidx;
@@ -1165,7 +1165,7 @@ out:
 static ssize_t
 isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
 {
-       uint minor = iminor(file->f_path.dentry->d_inode);
+       uint minor = iminor(file_inode(file));
        int drvidx;
        int chidx;
        int retval;
@@ -1228,7 +1228,7 @@ static unsigned int
 isdn_poll(struct file *file, poll_table *wait)
 {
        unsigned int mask = 0;
-       unsigned int minor = iminor(file->f_path.dentry->d_inode);
+       unsigned int minor = iminor(file_inode(file));
        int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
 
        mutex_lock(&isdn_mutex);
@@ -1269,7 +1269,7 @@ out:
 static int
 isdn_ioctl(struct file *file, uint cmd, ulong arg)
 {
-       uint minor = iminor(file->f_path.dentry->d_inode);
+       uint minor = iminor(file_inode(file));
        isdn_ctrl c;
        int drvidx;
        int ret;
index 61d78fa..38ceac5 100644 (file)
@@ -668,7 +668,7 @@ isdn_ppp_poll(struct file *file, poll_table *wait)
 
        if (is->debug & 0x2)
                printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n",
-                      iminor(file->f_path.dentry->d_inode));
+                      iminor(file_inode(file)));
 
        /* just registers wait_queue hook. This doesn't really wait. */
        poll_wait(file, &is->wq, wait);
index abe2d69..8b07f83 100644 (file)
@@ -483,7 +483,6 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 {
        struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
        struct sock *sk = sock->sk;
-       struct hlist_node *node;
        struct sock *csk;
        int err = 0;
 
@@ -508,7 +507,7 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 
        if (sk->sk_protocol < ISDN_P_B_START) {
                read_lock_bh(&data_sockets.lock);
-               sk_for_each(csk, node, &data_sockets.head) {
+               sk_for_each(csk, &data_sockets.head) {
                        if (sk == csk)
                                continue;
                        if (_pms(csk)->dev != _pms(sk)->dev)
index deda591..9cb4b62 100644 (file)
@@ -64,12 +64,11 @@ unlock:
 static void
 send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
 {
-       struct hlist_node       *node;
        struct sock             *sk;
        struct sk_buff          *cskb = NULL;
 
        read_lock(&sl->lock);
-       sk_for_each(sk, node, &sl->head) {
+       sk_for_each(sk, &sl->head) {
                if (sk->sk_state != MISDN_BOUND)
                        continue;
                if (!cskb)
index fc92ccb..b3256ff 100644 (file)
@@ -396,7 +396,7 @@ static const char *lg_bus_name(struct virtio_device *vdev)
 }
 
 /* The ops structure which hooks everything together. */
-static struct virtio_config_ops lguest_config_ops = {
+static const struct virtio_config_ops lguest_config_ops = {
        .get_features = lg_get_features,
        .finalize_features = lg_finalize_features,
        .get = lg_get,
index 91a02ee..4d8d90b 100644 (file)
@@ -154,17 +154,6 @@ config MD_RAID456
 
          If unsure, say Y.
 
-config MULTICORE_RAID456
-       bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
-       depends on MD_RAID456
-       depends on SMP
-       depends on EXPERIMENTAL
-       ---help---
-         Enable the raid456 module to dispatch per-stripe raid operations to a
-         thread pool.
-
-         If unsure, say N.
-
 config MD_MULTIPATH
        tristate "Multipath I/O support"
        depends on BLK_DEV_MD
@@ -210,7 +199,7 @@ config DM_DEBUG
 
 config DM_BUFIO
        tristate
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       depends on BLK_DEV_DM
        ---help---
         This interface allows you to do buffered I/O on a device and acts
         as a cache, holding recently-read blocks in memory and performing
@@ -218,7 +207,7 @@ config DM_BUFIO
 
 config DM_BIO_PRISON
        tristate
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       depends on BLK_DEV_DM
        ---help---
         Some bio locking schemes used by other device-mapper targets
         including thin provisioning.
@@ -251,8 +240,8 @@ config DM_SNAPSHOT
          Allow volume managers to take writable snapshots of a device.
 
 config DM_THIN_PROVISIONING
-       tristate "Thin provisioning target (EXPERIMENTAL)"
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       tristate "Thin provisioning target"
+       depends on BLK_DEV_DM
        select DM_PERSISTENT_DATA
        select DM_BIO_PRISON
        ---help---
@@ -268,6 +257,37 @@ config DM_DEBUG_BLOCK_STACK_TRACING
 
          If unsure, say N.
 
+config DM_CACHE
+       tristate "Cache target (EXPERIMENTAL)"
+       depends on BLK_DEV_DM
+       default n
+       select DM_PERSISTENT_DATA
+       select DM_BIO_PRISON
+       ---help---
+         dm-cache attempts to improve performance of a block device by
+         moving frequently used data to a smaller, higher performance
+         device.  Different 'policy' plugins can be used to change the
+         algorithms used to select which blocks are promoted, demoted,
+         cleaned etc.  It supports writeback and writethrough modes.
+
+config DM_CACHE_MQ
+       tristate "MQ Cache Policy (EXPERIMENTAL)"
+       depends on DM_CACHE
+       default y
+       ---help---
+         A cache policy that uses a multiqueue ordered by recent hit
+         count to select which blocks should be promoted and demoted.
+         This is meant to be a general purpose policy.  It prioritises
+         reads over writes.
+
+config DM_CACHE_CLEANER
+       tristate "Cleaner Cache Policy (EXPERIMENTAL)"
+       depends on DM_CACHE
+       default y
+       ---help---
+         A simple cache policy that writes back all data to the
+         origin.  Used when decommissioning a dm-cache.
+
 config DM_MIRROR
        tristate "Mirror target"
        depends on BLK_DEV_DM
@@ -302,8 +322,8 @@ config DM_RAID
         in one of the available parity distribution methods.
 
 config DM_LOG_USERSPACE
-       tristate "Mirror userspace logging (EXPERIMENTAL)"
-       depends on DM_MIRROR && EXPERIMENTAL && NET
+       tristate "Mirror userspace logging"
+       depends on DM_MIRROR && NET
        select CONNECTOR
        ---help---
          The userspace logging module provides a mechanism for
@@ -350,8 +370,8 @@ config DM_MULTIPATH_ST
          If unsure, say N.
 
 config DM_DELAY
-       tristate "I/O delaying target (EXPERIMENTAL)"
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       tristate "I/O delaying target"
+       depends on BLK_DEV_DM
        ---help---
        A target that delays reads and/or writes and can send
        them to different devices.  Useful for testing.
@@ -365,14 +385,14 @@ config DM_UEVENT
        Generate udev events for DM events.
 
 config DM_FLAKEY
-       tristate "Flakey target (EXPERIMENTAL)"
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       tristate "Flakey target"
+       depends on BLK_DEV_DM
        ---help---
          A target that intermittently fails I/O for debugging purposes.
 
 config DM_VERITY
-       tristate "Verity target support (EXPERIMENTAL)"
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       tristate "Verity target support"
+       depends on BLK_DEV_DM
        select CRYPTO
        select CRYPTO_HASH
        select DM_BUFIO
index 94dce8b..7ceeaef 100644 (file)
@@ -11,6 +11,9 @@ dm-mirror-y   += dm-raid1.o
 dm-log-userspace-y \
                += dm-log-userspace-base.o dm-log-userspace-transfer.o
 dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
+dm-cache-y     += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
+dm-cache-mq-y   += dm-cache-policy-mq.o
+dm-cache-cleaner-y += dm-cache-policy-cleaner.o
 md-mod-y       += md.o bitmap.o
 raid456-y      += raid5.o
 
@@ -44,6 +47,9 @@ obj-$(CONFIG_DM_ZERO)         += dm-zero.o
 obj-$(CONFIG_DM_RAID)  += dm-raid.o
 obj-$(CONFIG_DM_THIN_PROVISIONING)     += dm-thin-pool.o
 obj-$(CONFIG_DM_VERITY)                += dm-verity.o
+obj-$(CONFIG_DM_CACHE)         += dm-cache.o
+obj-$(CONFIG_DM_CACHE_MQ)      += dm-cache-mq.o
+obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs                    += dm-uevent.o
index 7155945..4fd9d6a 100644 (file)
@@ -337,7 +337,7 @@ static int read_page(struct file *file, unsigned long index,
                     struct page *page)
 {
        int ret = 0;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct buffer_head *bh;
        sector_t block;
 
@@ -755,7 +755,7 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
                free_buffers(sb_page);
 
        if (file) {
-               struct inode *inode = file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(file);
                invalidate_mapping_pages(inode->i_mapping, 0, -1);
                fput(file);
        }
index aefb78e..85f0b70 100644 (file)
 
 /*----------------------------------------------------------------*/
 
-struct dm_bio_prison_cell {
-       struct hlist_node list;
-       struct dm_bio_prison *prison;
-       struct dm_cell_key key;
-       struct bio *holder;
-       struct bio_list bios;
-};
-
 struct dm_bio_prison {
        spinlock_t lock;
        mempool_t *cell_pool;
@@ -87,6 +79,19 @@ void dm_bio_prison_destroy(struct dm_bio_prison *prison)
 }
 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
 
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
+{
+       return mempool_alloc(prison->cell_pool, gfp);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
+
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+                            struct dm_bio_prison_cell *cell)
+{
+       mempool_free(cell, prison->cell_pool);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
+
 static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
 {
        const unsigned long BIG_PRIME = 4294967291UL;
@@ -106,100 +111,103 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
                                                  struct dm_cell_key *key)
 {
        struct dm_bio_prison_cell *cell;
-       struct hlist_node *tmp;
 
-       hlist_for_each_entry(cell, tmp, bucket, list)
+       hlist_for_each_entry(cell, bucket, list)
                if (keys_equal(&cell->key, key))
                        return cell;
 
        return NULL;
 }
 
-/*
- * This may block if a new cell needs allocating.  You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
- *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
- */
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
-                 struct bio *inmate, struct dm_bio_prison_cell **ref)
+static void __setup_new_cell(struct dm_bio_prison *prison,
+                            struct dm_cell_key *key,
+                            struct bio *holder,
+                            uint32_t hash,
+                            struct dm_bio_prison_cell *cell)
 {
-       int r = 1;
-       unsigned long flags;
-       uint32_t hash = hash_key(prison, key);
-       struct dm_bio_prison_cell *cell, *cell2;
-
-       BUG_ON(hash > prison->nr_buckets);
-
-       spin_lock_irqsave(&prison->lock, flags);
-
-       cell = __search_bucket(prison->cells + hash, key);
-       if (cell) {
-               bio_list_add(&cell->bios, inmate);
-               goto out;
-       }
+       memcpy(&cell->key, key, sizeof(cell->key));
+       cell->holder = holder;
+       bio_list_init(&cell->bios);
+       hlist_add_head(&cell->list, prison->cells + hash);
+}
 
-       /*
-        * Allocate a new cell
-        */
-       spin_unlock_irqrestore(&prison->lock, flags);
-       cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
-       spin_lock_irqsave(&prison->lock, flags);
+static int __bio_detain(struct dm_bio_prison *prison,
+                       struct dm_cell_key *key,
+                       struct bio *inmate,
+                       struct dm_bio_prison_cell *cell_prealloc,
+                       struct dm_bio_prison_cell **cell_result)
+{
+       uint32_t hash = hash_key(prison, key);
+       struct dm_bio_prison_cell *cell;
 
-       /*
-        * We've been unlocked, so we have to double check that
-        * nobody else has inserted this cell in the meantime.
-        */
        cell = __search_bucket(prison->cells + hash, key);
        if (cell) {
-               mempool_free(cell2, prison->cell_pool);
-               bio_list_add(&cell->bios, inmate);
-               goto out;
+               if (inmate)
+                       bio_list_add(&cell->bios, inmate);
+               *cell_result = cell;
+               return 1;
        }
 
-       /*
-        * Use new cell.
-        */
-       cell = cell2;
-
-       cell->prison = prison;
-       memcpy(&cell->key, key, sizeof(cell->key));
-       cell->holder = inmate;
-       bio_list_init(&cell->bios);
-       hlist_add_head(&cell->list, prison->cells + hash);
+       __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
+       *cell_result = cell_prealloc;
+       return 0;
+}
 
-       r = 0;
+static int bio_detain(struct dm_bio_prison *prison,
+                     struct dm_cell_key *key,
+                     struct bio *inmate,
+                     struct dm_bio_prison_cell *cell_prealloc,
+                     struct dm_bio_prison_cell **cell_result)
+{
+       int r;
+       unsigned long flags;
 
-out:
+       spin_lock_irqsave(&prison->lock, flags);
+       r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
        spin_unlock_irqrestore(&prison->lock, flags);
 
-       *ref = cell;
-
        return r;
 }
+
+int dm_bio_detain(struct dm_bio_prison *prison,
+                 struct dm_cell_key *key,
+                 struct bio *inmate,
+                 struct dm_bio_prison_cell *cell_prealloc,
+                 struct dm_bio_prison_cell **cell_result)
+{
+       return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
+}
 EXPORT_SYMBOL_GPL(dm_bio_detain);
 
+int dm_get_cell(struct dm_bio_prison *prison,
+               struct dm_cell_key *key,
+               struct dm_bio_prison_cell *cell_prealloc,
+               struct dm_bio_prison_cell **cell_result)
+{
+       return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
+}
+EXPORT_SYMBOL_GPL(dm_get_cell);
+
 /*
  * @inmates must have been initialised prior to this call
  */
-static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell,
+                          struct bio_list *inmates)
 {
-       struct dm_bio_prison *prison = cell->prison;
-
        hlist_del(&cell->list);
 
        if (inmates) {
-               bio_list_add(inmates, cell->holder);
+               if (cell->holder)
+                       bio_list_add(inmates, cell->holder);
                bio_list_merge(inmates, &cell->bios);
        }
-
-       mempool_free(cell, prison->cell_pool);
 }
 
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
+void dm_cell_release(struct dm_bio_prison *prison,
+                    struct dm_bio_prison_cell *cell,
+                    struct bio_list *bios)
 {
        unsigned long flags;
-       struct dm_bio_prison *prison = cell->prison;
 
        spin_lock_irqsave(&prison->lock, flags);
        __cell_release(cell, bios);
@@ -210,20 +218,18 @@ EXPORT_SYMBOL_GPL(dm_cell_release);
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
-static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+                                    struct bio_list *inmates)
 {
-       struct dm_bio_prison *prison = cell->prison;
-
        hlist_del(&cell->list);
        bio_list_merge(inmates, &cell->bios);
-
-       mempool_free(cell, prison->cell_pool);
 }
 
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+                              struct dm_bio_prison_cell *cell,
+                              struct bio_list *inmates)
 {
        unsigned long flags;
-       struct dm_bio_prison *prison = cell->prison;
 
        spin_lock_irqsave(&prison->lock, flags);
        __cell_release_no_holder(cell, inmates);
@@ -231,9 +237,9 @@ void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list
 }
 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
 
-void dm_cell_error(struct dm_bio_prison_cell *cell)
+void dm_cell_error(struct dm_bio_prison *prison,
+                  struct dm_bio_prison_cell *cell)
 {
-       struct dm_bio_prison *prison = cell->prison;
        struct bio_list bios;
        struct bio *bio;
        unsigned long flags;
index 53d1a7a..3f83319 100644 (file)
@@ -22,7 +22,6 @@
  * subsequently unlocked the bios become available.
  */
 struct dm_bio_prison;
-struct dm_bio_prison_cell;
 
 /* FIXME: this needs to be more abstract */
 struct dm_cell_key {
@@ -31,21 +30,62 @@ struct dm_cell_key {
        dm_block_t block;
 };
 
+/*
+ * Treat this as opaque, only in header so callers can manage allocation
+ * themselves.
+ */
+struct dm_bio_prison_cell {
+       struct hlist_node list;
+       struct dm_cell_key key;
+       struct bio *holder;
+       struct bio_list bios;
+};
+
 struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
 void dm_bio_prison_destroy(struct dm_bio_prison *prison);
 
 /*
- * This may block if a new cell needs allocating.  You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
+ * These two functions just wrap a mempool.  This is a transitory step:
+ * Eventually all bio prison clients should manage their own cell memory.
  *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
+ * in interrupt context or passed GFP_NOWAIT.
  */
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
-                 struct bio *inmate, struct dm_bio_prison_cell **ref);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
+                                                   gfp_t gfp);
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+                            struct dm_bio_prison_cell *cell);
 
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
-void dm_cell_error(struct dm_bio_prison_cell *cell);
+/*
+ * Creates, or retrieves a cell for the given key.
+ *
+ * Returns 1 if pre-existing cell returned, zero if new cell created using
+ * @cell_prealloc.
+ */
+int dm_get_cell(struct dm_bio_prison *prison,
+               struct dm_cell_key *key,
+               struct dm_bio_prison_cell *cell_prealloc,
+               struct dm_bio_prison_cell **cell_result);
+
+/*
+ * An atomic op that combines retrieving a cell, and adding a bio to it.
+ *
+ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ */
+int dm_bio_detain(struct dm_bio_prison *prison,
+                 struct dm_cell_key *key,
+                 struct bio *inmate,
+                 struct dm_bio_prison_cell *cell_prealloc,
+                 struct dm_bio_prison_cell **cell_result);
+
+void dm_cell_release(struct dm_bio_prison *prison,
+                    struct dm_bio_prison_cell *cell,
+                    struct bio_list *bios);
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+                              struct dm_bio_prison_cell *cell,
+                              struct bio_list *inmates);
+void dm_cell_error(struct dm_bio_prison *prison,
+                  struct dm_bio_prison_cell *cell);
 
 /*----------------------------------------------------------------*/
 
index 651ca79..3c955e1 100644 (file)
@@ -859,9 +859,8 @@ static void __check_watermark(struct dm_bufio_client *c)
 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 {
        struct dm_buffer *b;
-       struct hlist_node *hn;
 
-       hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
+       hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
                             hash_list) {
                dm_bufio_cond_resched();
                if (b->block == block)
@@ -1193,7 +1192,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
 int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
-               .bi_rw = REQ_FLUSH,
+               .bi_rw = WRITE_FLUSH,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
new file mode 100644 (file)
index 0000000..bed4ad4
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_BLOCK_TYPES_H
+#define DM_CACHE_BLOCK_TYPES_H
+
+#include "persistent-data/dm-block-manager.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * It's helpful to get sparse to differentiate between indexes into the
+ * origin device, indexes into the cache device, and indexes into the
+ * discard bitset.
+ */
+
+typedef dm_block_t __bitwise__ dm_oblock_t;
+typedef uint32_t __bitwise__ dm_cblock_t;
+typedef dm_block_t __bitwise__ dm_dblock_t;
+
+static inline dm_oblock_t to_oblock(dm_block_t b)
+{
+       return (__force dm_oblock_t) b;
+}
+
+static inline dm_block_t from_oblock(dm_oblock_t b)
+{
+       return (__force dm_block_t) b;
+}
+
+static inline dm_cblock_t to_cblock(uint32_t b)
+{
+       return (__force dm_cblock_t) b;
+}
+
+static inline uint32_t from_cblock(dm_cblock_t b)
+{
+       return (__force uint32_t) b;
+}
+
+static inline dm_dblock_t to_dblock(dm_block_t b)
+{
+       return (__force dm_dblock_t) b;
+}
+
+static inline dm_block_t from_dblock(dm_dblock_t b)
+{
+       return (__force dm_block_t) b;
+}
+
+#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
new file mode 100644 (file)
index 0000000..fbd3625
--- /dev/null
@@ -0,0 +1,1146 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-metadata.h"
+
+#include "persistent-data/dm-array.h"
+#include "persistent-data/dm-bitset.h"
+#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-disk.h"
+#include "persistent-data/dm-transaction-manager.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX   "cache metadata"
+
+#define CACHE_SUPERBLOCK_MAGIC 06142003
+#define CACHE_SUPERBLOCK_LOCATION 0
+#define CACHE_VERSION 1
+#define CACHE_METADATA_CACHE_SIZE 64
+
+/*
+ *  3 for btree insert +
+ *  2 for btree lookup used within space map
+ */
+#define CACHE_MAX_CONCURRENT_LOCKS 5
+#define SPACE_MAP_ROOT_SIZE 128
+
+enum superblock_flag_bits {
+       /* for spotting crashes that would invalidate the dirty bitset */
+       CLEAN_SHUTDOWN,
+};
+
+/*
+ * Each mapping from cache block -> origin block carries a set of flags.
+ */
+enum mapping_bits {
+       /*
+        * A valid mapping.  Because we're using an array we clear this
+        * flag for an non existant mapping.
+        */
+       M_VALID = 1,
+
+       /*
+        * The data on the cache is different from that on the origin.
+        */
+       M_DIRTY = 2
+};
+
+struct cache_disk_superblock {
+       __le32 csum;
+       __le32 flags;
+       __le64 blocknr;
+
+       __u8 uuid[16];
+       __le64 magic;
+       __le32 version;
+
+       __u8 policy_name[CACHE_POLICY_NAME_SIZE];
+       __le32 policy_hint_size;
+
+       __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+       __le64 mapping_root;
+       __le64 hint_root;
+
+       __le64 discard_root;
+       __le64 discard_block_size;
+       __le64 discard_nr_blocks;
+
+       __le32 data_block_size;
+       __le32 metadata_block_size;
+       __le32 cache_blocks;
+
+       __le32 compat_flags;
+       __le32 compat_ro_flags;
+       __le32 incompat_flags;
+
+       __le32 read_hits;
+       __le32 read_misses;
+       __le32 write_hits;
+       __le32 write_misses;
+} __packed;
+
+struct dm_cache_metadata {
+       struct block_device *bdev;
+       struct dm_block_manager *bm;
+       struct dm_space_map *metadata_sm;
+       struct dm_transaction_manager *tm;
+
+       struct dm_array_info info;
+       struct dm_array_info hint_info;
+       struct dm_disk_bitset discard_info;
+
+       struct rw_semaphore root_lock;
+       dm_block_t root;
+       dm_block_t hint_root;
+       dm_block_t discard_root;
+
+       sector_t discard_block_size;
+       dm_dblock_t discard_nr_blocks;
+
+       sector_t data_block_size;
+       dm_cblock_t cache_blocks;
+       bool changed:1;
+       bool clean_when_opened:1;
+
+       char policy_name[CACHE_POLICY_NAME_SIZE];
+       size_t policy_hint_size;
+       struct dm_cache_statistics stats;
+};
+
+/*-------------------------------------------------------------------
+ * superblock validator
+ *-----------------------------------------------------------------*/
+
+#define SUPERBLOCK_CSUM_XOR 9031977
+
+static void sb_prepare_for_write(struct dm_block_validator *v,
+                                struct dm_block *b,
+                                size_t sb_block_size)
+{
+       struct cache_disk_superblock *disk_super = dm_block_data(b);
+
+       disk_super->blocknr = cpu_to_le64(dm_block_location(b));
+       disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+                                                     sb_block_size - sizeof(__le32),
+                                                     SUPERBLOCK_CSUM_XOR));
+}
+
+static int sb_check(struct dm_block_validator *v,
+                   struct dm_block *b,
+                   size_t sb_block_size)
+{
+       struct cache_disk_superblock *disk_super = dm_block_data(b);
+       __le32 csum_le;
+
+       if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+               DMERR("sb_check failed: blocknr %llu: wanted %llu",
+                     le64_to_cpu(disk_super->blocknr),
+                     (unsigned long long)dm_block_location(b));
+               return -ENOTBLK;
+       }
+
+       if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
+               DMERR("sb_check failed: magic %llu: wanted %llu",
+                     le64_to_cpu(disk_super->magic),
+                     (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
+               return -EILSEQ;
+       }
+
+       csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+                                            sb_block_size - sizeof(__le32),
+                                            SUPERBLOCK_CSUM_XOR));
+       if (csum_le != disk_super->csum) {
+               DMERR("sb_check failed: csum %u: wanted %u",
+                     le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+               return -EILSEQ;
+       }
+
+       return 0;
+}
+
+static struct dm_block_validator sb_validator = {
+       .name = "superblock",
+       .prepare_for_write = sb_prepare_for_write,
+       .check = sb_check
+};
+
+/*----------------------------------------------------------------*/
+
+static int superblock_read_lock(struct dm_cache_metadata *cmd,
+                               struct dm_block **sblock)
+{
+       return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+                              &sb_validator, sblock);
+}
+
+static int superblock_lock_zero(struct dm_cache_metadata *cmd,
+                               struct dm_block **sblock)
+{
+       return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+                                    &sb_validator, sblock);
+}
+
+static int superblock_lock(struct dm_cache_metadata *cmd,
+                          struct dm_block **sblock)
+{
+       return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+                               &sb_validator, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+{
+       int r;
+       unsigned i;
+       struct dm_block *b;
+       __le64 *data_le, zero = cpu_to_le64(0);
+       unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+       /*
+        * We can't use a validator here - it may be all zeroes.
+        */
+       r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
+       if (r)
+               return r;
+
+       data_le = dm_block_data(b);
+       *result = 1;
+       for (i = 0; i < sb_block_size; i++) {
+               if (data_le[i] != zero) {
+                       *result = 0;
+                       break;
+               }
+       }
+
+       return dm_bm_unlock(b);
+}
+
+static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+{
+       struct dm_btree_value_type vt;
+
+       vt.context = NULL;
+       vt.size = sizeof(__le64);
+       vt.inc = NULL;
+       vt.dec = NULL;
+       vt.equal = NULL;
+       dm_array_info_init(&cmd->info, cmd->tm, &vt);
+
+       if (cmd->policy_hint_size) {
+               vt.size = sizeof(__le32);
+               dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
+       }
+}
+
+static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+{
+       int r;
+       struct dm_block *sblock;
+       size_t metadata_len;
+       struct cache_disk_superblock *disk_super;
+       sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+       /* FIXME: see if we can lose the max sectors limit */
+       if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+               bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+
+       r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+       if (r < 0)
+               return r;
+
+       r = dm_tm_pre_commit(cmd->tm);
+       if (r < 0)
+               return r;
+
+       r = superblock_lock_zero(cmd, &sblock);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(sblock);
+       disk_super->flags = 0;
+       memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
+       disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
+       disk_super->version = cpu_to_le32(CACHE_VERSION);
+       memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+       disk_super->policy_hint_size = 0;
+
+       r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+                           metadata_len);
+       if (r < 0)
+               goto bad_locked;
+
+       disk_super->mapping_root = cpu_to_le64(cmd->root);
+       disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+       disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+       disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+       disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+       disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+       disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
+       disk_super->cache_blocks = cpu_to_le32(0);
+       memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+
+       disk_super->read_hits = cpu_to_le32(0);
+       disk_super->read_misses = cpu_to_le32(0);
+       disk_super->write_hits = cpu_to_le32(0);
+       disk_super->write_misses = cpu_to_le32(0);
+
+       return dm_tm_commit(cmd->tm, sblock);
+
+bad_locked:
+       dm_bm_unlock(sblock);
+       return r;
+}
+
+static int __format_metadata(struct dm_cache_metadata *cmd)
+{
+       int r;
+
+       r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+                                &cmd->tm, &cmd->metadata_sm);
+       if (r < 0) {
+               DMERR("tm_create_with_sm failed");
+               return r;
+       }
+
+       __setup_mapping_info(cmd);
+
+       r = dm_array_empty(&cmd->info, &cmd->root);
+       if (r < 0)
+               goto bad;
+
+       dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+
+       r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
+       if (r < 0)
+               goto bad;
+
+       cmd->discard_block_size = 0;
+       cmd->discard_nr_blocks = 0;
+
+       r = __write_initial_superblock(cmd);
+       if (r)
+               goto bad;
+
+       cmd->clean_when_opened = true;
+       return 0;
+
+bad:
+       dm_tm_destroy(cmd->tm);
+       dm_sm_destroy(cmd->metadata_sm);
+
+       return r;
+}
+
+static int __check_incompat_features(struct cache_disk_superblock *disk_super,
+                                    struct dm_cache_metadata *cmd)
+{
+       uint32_t features;
+
+       features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
+       if (features) {
+               DMERR("could not access metadata due to unsupported optional features (%lx).",
+                     (unsigned long)features);
+               return -EINVAL;
+       }
+
+       /*
+        * Check for read-only metadata to skip the following RDWR checks.
+        */
+       if (get_disk_ro(cmd->bdev->bd_disk))
+               return 0;
+
+       features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
+       if (features) {
+               DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
+                     (unsigned long)features);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int __open_metadata(struct dm_cache_metadata *cmd)
+{
+       int r;
+       struct dm_block *sblock;
+       struct cache_disk_superblock *disk_super;
+       unsigned long sb_flags;
+
+       r = superblock_read_lock(cmd, &sblock);
+       if (r < 0) {
+               DMERR("couldn't read lock superblock");
+               return r;
+       }
+
+       disk_super = dm_block_data(sblock);
+
+       r = __check_incompat_features(disk_super, cmd);
+       if (r < 0)
+               goto bad;
+
+       r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+                              disk_super->metadata_space_map_root,
+                              sizeof(disk_super->metadata_space_map_root),
+                              &cmd->tm, &cmd->metadata_sm);
+       if (r < 0) {
+               DMERR("tm_open_with_sm failed");
+               goto bad;
+       }
+
+       __setup_mapping_info(cmd);
+       dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+       sb_flags = le32_to_cpu(disk_super->flags);
+       cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
+       return dm_bm_unlock(sblock);
+
+bad:
+       dm_bm_unlock(sblock);
+       return r;
+}
+
+static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
+                                    bool format_device)
+{
+       int r, unformatted;
+
+       r = __superblock_all_zeroes(cmd->bm, &unformatted);
+       if (r)
+               return r;
+
+       if (unformatted)
+               return format_device ? __format_metadata(cmd) : -EPERM;
+
+       return __open_metadata(cmd);
+}
+
+static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+                                           bool may_format_device)
+{
+       int r;
+       cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE,
+                                         CACHE_METADATA_CACHE_SIZE,
+                                         CACHE_MAX_CONCURRENT_LOCKS);
+       if (IS_ERR(cmd->bm)) {
+               DMERR("could not create block manager");
+               return PTR_ERR(cmd->bm);
+       }
+
+       r = __open_or_format_metadata(cmd, may_format_device);
+       if (r)
+               dm_block_manager_destroy(cmd->bm);
+
+       return r;
+}
+
+static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
+{
+       dm_sm_destroy(cmd->metadata_sm);
+       dm_tm_destroy(cmd->tm);
+       dm_block_manager_destroy(cmd->bm);
+}
+
+typedef unsigned long (*flags_mutator)(unsigned long);
+
+static void update_flags(struct cache_disk_superblock *disk_super,
+                        flags_mutator mutator)
+{
+       uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+       disk_super->flags = cpu_to_le32(sb_flags);
+}
+
+static unsigned long set_clean_shutdown(unsigned long flags)
+{
+       set_bit(CLEAN_SHUTDOWN, &flags);
+       return flags;
+}
+
+static unsigned long clear_clean_shutdown(unsigned long flags)
+{
+       clear_bit(CLEAN_SHUTDOWN, &flags);
+       return flags;
+}
+
+static void read_superblock_fields(struct dm_cache_metadata *cmd,
+                                  struct cache_disk_superblock *disk_super)
+{
+       cmd->root = le64_to_cpu(disk_super->mapping_root);
+       cmd->hint_root = le64_to_cpu(disk_super->hint_root);
+       cmd->discard_root = le64_to_cpu(disk_super->discard_root);
+       cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
+       cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
+       cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
+       cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
+       strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+       cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+
+       cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
+       cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
+       cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
+       cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
+
+       cmd->changed = false;
+}
+
+/*
+ * The mutator updates the superblock flags.
+ */
+static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
+                                    flags_mutator mutator)
+{
+       int r;
+       struct cache_disk_superblock *disk_super;
+       struct dm_block *sblock;
+
+       r = superblock_lock(cmd, &sblock);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(sblock);
+       update_flags(disk_super, mutator);
+       read_superblock_fields(cmd, disk_super);
+
+       return dm_bm_flush_and_unlock(cmd->bm, sblock);
+}
+
+static int __begin_transaction(struct dm_cache_metadata *cmd)
+{
+       int r;
+       struct cache_disk_superblock *disk_super;
+       struct dm_block *sblock;
+
+       /*
+        * We re-read the superblock every time.  Shouldn't need to do this
+        * really.
+        */
+       r = superblock_read_lock(cmd, &sblock);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(sblock);
+       read_superblock_fields(cmd, disk_super);
+       dm_bm_unlock(sblock);
+
+       return 0;
+}
+
+static int __commit_transaction(struct dm_cache_metadata *cmd,
+                               flags_mutator mutator)
+{
+       int r;
+       size_t metadata_len;
+       struct cache_disk_superblock *disk_super;
+       struct dm_block *sblock;
+
+       /*
+        * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
+        */
+       BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
+
+       r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
+                           &cmd->discard_root);
+       if (r)
+               return r;
+
+       r = dm_tm_pre_commit(cmd->tm);
+       if (r < 0)
+               return r;
+
+       r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+       if (r < 0)
+               return r;
+
+       r = superblock_lock(cmd, &sblock);
+       if (r)
+               return r;
+
+       disk_super = dm_block_data(sblock);
+
+       if (mutator)
+               update_flags(disk_super, mutator);
+
+       disk_super->mapping_root = cpu_to_le64(cmd->root);
+       disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+       disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+       disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+       disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+       disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
+       strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+
+       disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
+       disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+       disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+       disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+
+       r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+                           metadata_len);
+       if (r < 0) {
+               dm_bm_unlock(sblock);
+               return r;
+       }
+
+       return dm_tm_commit(cmd->tm, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The mappings are held in a dm-array that has 64-bit values stored in
+ * little-endian format.  The index is the cblock, the high 48bits of the
+ * value are the oblock and the low 16 bit the flags.
+ */
+#define FLAGS_MASK ((1 << 16) - 1)
+
+static __le64 pack_value(dm_oblock_t block, unsigned flags)
+{
+       uint64_t value = from_oblock(block);
+       value <<= 16;
+       value = value | (flags & FLAGS_MASK);
+       return cpu_to_le64(value);
+}
+
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+{
+       uint64_t value = le64_to_cpu(value_le);
+       uint64_t b = value >> 16;
+       *block = to_oblock(b);
+       *flags = value & FLAGS_MASK;
+}
+
+/*----------------------------------------------------------------*/
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+                                                sector_t data_block_size,
+                                                bool may_format_device,
+                                                size_t policy_hint_size)
+{
+       int r;
+       struct dm_cache_metadata *cmd;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               DMERR("could not allocate metadata struct");
+               return NULL;
+       }
+
+       init_rwsem(&cmd->root_lock);
+       cmd->bdev = bdev;
+       cmd->data_block_size = data_block_size;
+       cmd->cache_blocks = 0;
+       cmd->policy_hint_size = policy_hint_size;
+       cmd->changed = true;
+
+       r = __create_persistent_data_objects(cmd, may_format_device);
+       if (r) {
+               kfree(cmd);
+               return ERR_PTR(r);
+       }
+
+       r = __begin_transaction_flags(cmd, clear_clean_shutdown);
+       if (r < 0) {
+               dm_cache_metadata_close(cmd);
+               return ERR_PTR(r);
+       }
+
+       return cmd;
+}
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+{
+       __destroy_persistent_data_objects(cmd);
+       kfree(cmd);
+}
+
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+{
+       int r;
+       __le64 null_mapping = pack_value(0, 0);
+
+       down_write(&cmd->root_lock);
+       __dm_bless_for_disk(&null_mapping);
+       r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
+                           from_cblock(new_cache_size),
+                           &null_mapping, &cmd->root);
+       if (!r)
+               cmd->cache_blocks = new_cache_size;
+       cmd->changed = true;
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+                                  sector_t discard_block_size,
+                                  dm_dblock_t new_nr_entries)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = dm_bitset_resize(&cmd->discard_info,
+                            cmd->discard_root,
+                            from_dblock(cmd->discard_nr_blocks),
+                            from_dblock(new_nr_entries),
+                            false, &cmd->discard_root);
+       if (!r) {
+               cmd->discard_block_size = discard_block_size;
+               cmd->discard_nr_blocks = new_nr_entries;
+       }
+
+       cmd->changed = true;
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+       return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
+                                from_dblock(b), &cmd->discard_root);
+}
+
+static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+       return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
+                                  from_dblock(b), &cmd->discard_root);
+}
+
+static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
+                         bool *is_discarded)
+{
+       return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
+                                 from_dblock(b), &cmd->discard_root,
+                                 is_discarded);
+}
+
+static int __discard(struct dm_cache_metadata *cmd,
+                    dm_dblock_t dblock, bool discard)
+{
+       int r;
+
+       r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
+       if (r)
+               return r;
+
+       cmd->changed = true;
+       return 0;
+}
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd,
+                        dm_dblock_t dblock, bool discard)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = __discard(cmd, dblock, discard);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+static int __load_discards(struct dm_cache_metadata *cmd,
+                          load_discard_fn fn, void *context)
+{
+       int r = 0;
+       dm_block_t b;
+       bool discard;
+
+       for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+               dm_dblock_t dblock = to_dblock(b);
+
+               if (cmd->clean_when_opened) {
+                       r = __is_discarded(cmd, dblock, &discard);
+                       if (r)
+                               return r;
+               } else
+                       discard = false;
+
+               r = fn(context, cmd->discard_block_size, dblock, discard);
+               if (r)
+                       break;
+       }
+
+       return r;
+}
+
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+                          load_discard_fn fn, void *context)
+{
+       int r;
+
+       down_read(&cmd->root_lock);
+       r = __load_discards(cmd, fn, context);
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+{
+       dm_cblock_t r;
+
+       down_read(&cmd->root_lock);
+       r = cmd->cache_blocks;
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+       int r;
+       __le64 value = pack_value(0, 0);
+
+       __dm_bless_for_disk(&value);
+       r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+                              &value, &cmd->root);
+       if (r)
+               return r;
+
+       cmd->changed = true;
+       return 0;
+}
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = __remove(cmd, cblock);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+static int __insert(struct dm_cache_metadata *cmd,
+                   dm_cblock_t cblock, dm_oblock_t oblock)
+{
+       int r;
+       __le64 value = pack_value(oblock, M_VALID);
+       __dm_bless_for_disk(&value);
+
+       r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+                              &value, &cmd->root);
+       if (r)
+               return r;
+
+       cmd->changed = true;
+       return 0;
+}
+
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
+                           dm_cblock_t cblock, dm_oblock_t oblock)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = __insert(cmd, cblock, oblock);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+struct thunk {
+       load_mapping_fn fn;
+       void *context;
+
+       struct dm_cache_metadata *cmd;
+       bool respect_dirty_flags;
+       bool hints_valid;
+};
+
+static bool hints_array_initialized(struct dm_cache_metadata *cmd)
+{
+       return cmd->hint_root && cmd->policy_hint_size;
+}
+
+static bool hints_array_available(struct dm_cache_metadata *cmd,
+                                 const char *policy_name)
+{
+       bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
+                                          sizeof(cmd->policy_name));
+
+       return cmd->clean_when_opened && policy_names_match &&
+               hints_array_initialized(cmd);
+}
+
+static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+{
+       int r = 0;
+       bool dirty;
+       __le64 value;
+       __le32 hint_value = 0;
+       dm_oblock_t oblock;
+       unsigned flags;
+       struct thunk *thunk = context;
+       struct dm_cache_metadata *cmd = thunk->cmd;
+
+       memcpy(&value, leaf, sizeof(value));
+       unpack_value(value, &oblock, &flags);
+
+       if (flags & M_VALID) {
+               if (thunk->hints_valid) {
+                       r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
+                                              cblock, &hint_value);
+                       if (r && r != -ENODATA)
+                               return r;
+               }
+
+               dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
+               r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
+                             dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+       }
+
+       return r;
+}
+
+static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+                          load_mapping_fn fn, void *context)
+{
+       struct thunk thunk;
+
+       thunk.fn = fn;
+       thunk.context = context;
+
+       thunk.cmd = cmd;
+       thunk.respect_dirty_flags = cmd->clean_when_opened;
+       thunk.hints_valid = hints_array_available(cmd, policy_name);
+
+       return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+}
+
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+                          load_mapping_fn fn, void *context)
+{
+       int r;
+
+       down_read(&cmd->root_lock);
+       r = __load_mappings(cmd, policy_name, fn, context);
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+{
+       int r = 0;
+       __le64 value;
+       dm_oblock_t oblock;
+       unsigned flags;
+
+       memcpy(&value, leaf, sizeof(value));
+       unpack_value(value, &oblock, &flags);
+
+       return r;
+}
+
+static int __dump_mappings(struct dm_cache_metadata *cmd)
+{
+       return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
+}
+
+void dm_cache_dump(struct dm_cache_metadata *cmd)
+{
+       down_read(&cmd->root_lock);
+       __dump_mappings(cmd);
+       up_read(&cmd->root_lock);
+}
+
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+{
+       int r;
+
+       down_read(&cmd->root_lock);
+       r = cmd->changed;
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+{
+       int r;
+       unsigned flags;
+       dm_oblock_t oblock;
+       __le64 value;
+
+       r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
+       if (r)
+               return r;
+
+       unpack_value(value, &oblock, &flags);
+
+       if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
+               /* nothing to be done */
+               return 0;
+
+       value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+       __dm_bless_for_disk(&value);
+
+       r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+                              &value, &cmd->root);
+       if (r)
+               return r;
+
+       cmd->changed = true;
+       return 0;
+
+}
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+                      dm_cblock_t cblock, bool dirty)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = __dirty(cmd, cblock, dirty);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+                                struct dm_cache_statistics *stats)
+{
+       down_read(&cmd->root_lock);
+       memcpy(stats, &cmd->stats, sizeof(*stats));
+       up_read(&cmd->root_lock);
+}
+
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+                                struct dm_cache_statistics *stats)
+{
+       down_write(&cmd->root_lock);
+       memcpy(&cmd->stats, stats, sizeof(*stats));
+       up_write(&cmd->root_lock);
+}
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
+{
+       int r;
+       flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
+                                clear_clean_shutdown);
+
+       down_write(&cmd->root_lock);
+       r = __commit_transaction(cmd, mutator);
+       if (r)
+               goto out;
+
+       r = __begin_transaction(cmd);
+
+out:
+       up_write(&cmd->root_lock);
+       return r;
+}
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+                                          dm_block_t *result)
+{
+       int r = -EINVAL;
+
+       down_read(&cmd->root_lock);
+       r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+                                  dm_block_t *result)
+{
+       int r = -EINVAL;
+
+       down_read(&cmd->root_lock);
+       r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+       up_read(&cmd->root_lock);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+       int r;
+       __le32 value;
+       size_t hint_size;
+       const char *policy_name = dm_cache_policy_get_name(policy);
+
+       if (!policy_name[0] ||
+           (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+               return -EINVAL;
+
+       if (strcmp(cmd->policy_name, policy_name)) {
+               strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+
+               hint_size = dm_cache_policy_get_hint_size(policy);
+               if (!hint_size)
+                       return 0; /* short-circuit hints initialization */
+               cmd->policy_hint_size = hint_size;
+
+               if (cmd->hint_root) {
+                       r = dm_array_del(&cmd->hint_info, cmd->hint_root);
+                       if (r)
+                               return r;
+               }
+
+               r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+               if (r)
+                       return r;
+
+               value = cpu_to_le32(0);
+               __dm_bless_for_disk(&value);
+               r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
+                                   from_cblock(cmd->cache_blocks),
+                                   &value, &cmd->hint_root);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+       int r;
+
+       down_write(&cmd->root_lock);
+       r = begin_hints(cmd, policy);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
+
+static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+                    uint32_t hint)
+{
+       int r;
+       __le32 value = cpu_to_le32(hint);
+       __dm_bless_for_disk(&value);
+
+       r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
+                              from_cblock(cblock), &value, &cmd->hint_root);
+       cmd->changed = true;
+
+       return r;
+}
+
+int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+                      uint32_t hint)
+{
+       int r;
+
+       if (!hints_array_initialized(cmd))
+               return 0;
+
+       down_write(&cmd->root_lock);
+       r = save_hint(cmd, cblock, hint);
+       up_write(&cmd->root_lock);
+
+       return r;
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
new file mode 100644 (file)
index 0000000..135864e
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_METADATA_H
+#define DM_CACHE_METADATA_H
+
+#include "dm-cache-block-types.h"
+#include "dm-cache-policy-internal.h"
+
+/*----------------------------------------------------------------*/
+
+#define DM_CACHE_METADATA_BLOCK_SIZE 4096
+
+/* FIXME: remove this restriction */
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries.  Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * A metadata device larger than 16GB triggers a warning.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Ext[234]-style compat feature flags.
+ *
+ * A new feature which old metadata will still be compatible with should
+ * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
+ *
+ * A new feature that is not compatible with old code should define a
+ * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
+ * that flag.
+ *
+ * A new feature that is not compatible with old code accessing the
+ * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
+ * guard the relevant code with that flag.
+ *
+ * As these various flags are defined they should be added to the
+ * following masks.
+ */
+#define DM_CACHE_FEATURE_COMPAT_SUPP     0UL
+#define DM_CACHE_FEATURE_COMPAT_RO_SUPP          0UL
+#define DM_CACHE_FEATURE_INCOMPAT_SUPP   0UL
+
+/*
+ * Reopens or creates a new, empty metadata volume.
+ * Returns an ERR_PTR on failure.
+ */
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+                                                sector_t data_block_size,
+                                                bool may_format_device,
+                                                size_t policy_hint_size);
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+
+/*
+ * The metadata needs to know how many cache blocks there are.  We don't
+ * care about the origin, assuming the core target is giving us valid
+ * origin blocks to map to.
+ */
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+                                  sector_t discard_block_size,
+                                  dm_dblock_t new_nr_entries);
+
+typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
+                              dm_dblock_t dblock, bool discarded);
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+                          load_discard_fn fn, void *context);
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
+
+typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
+                              dm_cblock_t cblock, bool dirty,
+                              uint32_t hint, bool hint_valid);
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+                          const char *policy_name,
+                          load_mapping_fn fn,
+                          void *context);
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
+
+struct dm_cache_statistics {
+       uint32_t read_hits;
+       uint32_t read_misses;
+       uint32_t write_hits;
+       uint32_t write_misses;
+};
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+                                struct dm_cache_statistics *stats);
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+                                struct dm_cache_statistics *stats);
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+                                          dm_block_t *result);
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+                                  dm_block_t *result);
+
+void dm_cache_dump(struct dm_cache_metadata *cmd);
+
+/*
+ * The policy is invited to save a 32bit hint value for every cblock (eg,
+ * for a hit count).  These are stored against the policy name.  If
+ * policies are changed, then hints will be lost.  If the machine crashes,
+ * hints will be lost.
+ *
+ * The hints are indexed by the cblock, but many policies will not
+ * neccessarily have a fast way of accessing efficiently via cblock.  So
+ * rather than querying the policy for each cblock, we let it walk its data
+ * structures and fill in the hints in whatever order it wishes.
+ */
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+
+/*
+ * requests hints for every cblock and stores in the metadata device.
+ */
+int dm_cache_save_hint(struct dm_cache_metadata *cmd,
+                      dm_cblock_t cblock, uint32_t hint);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
new file mode 100644 (file)
index 0000000..cc05d70
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * writeback cache policy supporting flushing out dirty cache blocks.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache cleaner"
+#define CLEANER_VERSION "1.0.0"
+
+/* Cache entry struct. */
+struct wb_cache_entry {
+       struct list_head list;
+       struct hlist_node hlist;
+
+       dm_oblock_t oblock;
+       dm_cblock_t cblock;
+       bool dirty:1;
+       bool pending:1;
+};
+
+struct hash {
+       struct hlist_head *table;
+       dm_block_t hash_bits;
+       unsigned nr_buckets;
+};
+
+struct policy {
+       struct dm_cache_policy policy;
+       spinlock_t lock;
+
+       struct list_head free;
+       struct list_head clean;
+       struct list_head clean_pending;
+       struct list_head dirty;
+
+       /*
+        * We know exactly how many cblocks will be needed,
+        * so we can allocate them up front.
+        */
+       dm_cblock_t cache_size, nr_cblocks_allocated;
+       struct wb_cache_entry *cblocks;
+       struct hash chash;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Low-level functions.
+ */
+static unsigned next_power(unsigned n, unsigned min)
+{
+       return roundup_pow_of_two(max(n, min));
+}
+
+static struct policy *to_policy(struct dm_cache_policy *p)
+{
+       return container_of(p, struct policy, policy);
+}
+
+static struct list_head *list_pop(struct list_head *q)
+{
+       struct list_head *r = q->next;
+
+       list_del(r);
+
+       return r;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Allocate/free various resources. */
+static int alloc_hash(struct hash *hash, unsigned elts)
+{
+       hash->nr_buckets = next_power(elts >> 4, 16);
+       hash->hash_bits = ffs(hash->nr_buckets) - 1;
+       hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
+
+       return hash->table ? 0 : -ENOMEM;
+}
+
+static void free_hash(struct hash *hash)
+{
+       vfree(hash->table);
+}
+
+static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
+{
+       int r = -ENOMEM;
+
+       p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
+       if (p->cblocks) {
+               unsigned u = from_cblock(cache_size);
+
+               while (u--)
+                       list_add(&p->cblocks[u].list, &p->free);
+
+               p->nr_cblocks_allocated = 0;
+
+               /* Cache entries hash. */
+               r = alloc_hash(&p->chash, from_cblock(cache_size));
+               if (r)
+                       vfree(p->cblocks);
+       }
+
+       return r;
+}
+
+static void free_cache_blocks_and_hash(struct policy *p)
+{
+       free_hash(&p->chash);
+       vfree(p->cblocks);
+}
+
+static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
+{
+       struct wb_cache_entry *e;
+
+       BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
+
+       e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
+       p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
+
+       return e;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Hash functions (lookup, insert, remove). */
+static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
+{
+       struct hash *hash = &p->chash;
+       unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
+       struct wb_cache_entry *cur;
+       struct hlist_head *bucket = &hash->table[h];
+
+       hlist_for_each_entry(cur, bucket, hlist) {
+               if (cur->oblock == oblock) {
+                       /* Move upfront bucket for faster access. */
+                       hlist_del(&cur->hlist);
+                       hlist_add_head(&cur->hlist, bucket);
+                       return cur;
+               }
+       }
+
+       return NULL;
+}
+
+static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
+{
+       unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
+
+       hlist_add_head(&e->hlist, &p->chash.table[h]);
+}
+
+static void remove_cache_hash_entry(struct wb_cache_entry *e)
+{
+       hlist_del(&e->hlist);
+}
+
+/* Public interface (see dm-cache-policy.h */
+static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+                 bool can_block, bool can_migrate, bool discarded_oblock,
+                 struct bio *bio, struct policy_result *result)
+{
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+       unsigned long flags;
+
+       result->op = POLICY_MISS;
+
+       if (can_block)
+               spin_lock_irqsave(&p->lock, flags);
+
+       else if (!spin_trylock_irqsave(&p->lock, flags))
+               return -EWOULDBLOCK;
+
+       e = lookup_cache_entry(p, oblock);
+       if (e) {
+               result->op = POLICY_HIT;
+               result->cblock = e->cblock;
+
+       }
+
+       spin_unlock_irqrestore(&p->lock, flags);
+
+       return 0;
+}
+
+static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+       int r;
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+       unsigned long flags;
+
+       if (!spin_trylock_irqsave(&p->lock, flags))
+               return -EWOULDBLOCK;
+
+       e = lookup_cache_entry(p, oblock);
+       if (e) {
+               *cblock = e->cblock;
+               r = 0;
+
+       } else
+               r = -ENOENT;
+
+       spin_unlock_irqrestore(&p->lock, flags);
+
+       return r;
+}
+
+static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
+{
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+
+       e = lookup_cache_entry(p, oblock);
+       BUG_ON(!e);
+
+       if (set) {
+               if (!e->dirty) {
+                       e->dirty = true;
+                       list_move(&e->list, &p->dirty);
+               }
+
+       } else {
+               if (e->dirty) {
+                       e->pending = false;
+                       e->dirty = false;
+                       list_move(&e->list, &p->clean);
+               }
+       }
+}
+
+static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+       struct policy *p = to_policy(pe);
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->lock, flags);
+       __set_clear_dirty(pe, oblock, true);
+       spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+       struct policy *p = to_policy(pe);
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->lock, flags);
+       __set_clear_dirty(pe, oblock, false);
+       spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
+{
+       insert_cache_hash_entry(p, e);
+       if (e->dirty)
+               list_add(&e->list, &p->dirty);
+       else
+               list_add(&e->list, &p->clean);
+}
+
+static int wb_load_mapping(struct dm_cache_policy *pe,
+                          dm_oblock_t oblock, dm_cblock_t cblock,
+                          uint32_t hint, bool hint_valid)
+{
+       int r;
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e = alloc_cache_entry(p);
+
+       if (e) {
+               e->cblock = cblock;
+               e->oblock = oblock;
+               e->dirty = false; /* blocks default to clean */
+               add_cache_entry(p, e);
+               r = 0;
+
+       } else
+               r = -ENOMEM;
+
+       return r;
+}
+
+static void wb_destroy(struct dm_cache_policy *pe)
+{
+       struct policy *p = to_policy(pe);
+
+       free_cache_blocks_and_hash(p);
+       kfree(p);
+}
+
+static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
+{
+       struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
+
+       BUG_ON(!r);
+
+       remove_cache_hash_entry(r);
+       list_del(&r->list);
+
+       return r;
+}
+
+static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->lock, flags);
+       e = __wb_force_remove_mapping(p, oblock);
+       list_add_tail(&e->list, &p->free);
+       BUG_ON(!from_cblock(p->nr_cblocks_allocated));
+       p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
+       spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_force_mapping(struct dm_cache_policy *pe,
+                               dm_oblock_t current_oblock, dm_oblock_t oblock)
+{
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->lock, flags);
+       e = __wb_force_remove_mapping(p, current_oblock);
+       e->oblock = oblock;
+       add_cache_entry(p, e);
+       spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
+{
+       struct list_head *l;
+       struct wb_cache_entry *r;
+
+       if (list_empty(&p->dirty))
+               return NULL;
+
+       l = list_pop(&p->dirty);
+       r = container_of(l, struct wb_cache_entry, list);
+       list_add(l, &p->clean_pending);
+
+       return r;
+}
+
+static int wb_writeback_work(struct dm_cache_policy *pe,
+                            dm_oblock_t *oblock,
+                            dm_cblock_t *cblock)
+{
+       int r = -ENOENT;
+       struct policy *p = to_policy(pe);
+       struct wb_cache_entry *e;
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->lock, flags);
+
+       e = get_next_dirty_entry(p);
+       if (e) {
+               *oblock = e->oblock;
+               *cblock = e->cblock;
+               r = 0;
+       }
+
+       spin_unlock_irqrestore(&p->lock, flags);
+
+       return r;
+}
+
+static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
+{
+       return to_policy(pe)->nr_cblocks_allocated;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct policy *p)
+{
+       p->policy.destroy = wb_destroy;
+       p->policy.map = wb_map;
+       p->policy.lookup = wb_lookup;
+       p->policy.set_dirty = wb_set_dirty;
+       p->policy.clear_dirty = wb_clear_dirty;
+       p->policy.load_mapping = wb_load_mapping;
+       p->policy.walk_mappings = NULL;
+       p->policy.remove_mapping = wb_remove_mapping;
+       p->policy.writeback_work = wb_writeback_work;
+       p->policy.force_mapping = wb_force_mapping;
+       p->policy.residency = wb_residency;
+       p->policy.tick = NULL;
+}
+
+static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+                                        sector_t origin_size,
+                                        sector_t cache_block_size)
+{
+       int r;
+       struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+       if (!p)
+               return NULL;
+
+       init_policy_functions(p);
+       INIT_LIST_HEAD(&p->free);
+       INIT_LIST_HEAD(&p->clean);
+       INIT_LIST_HEAD(&p->clean_pending);
+       INIT_LIST_HEAD(&p->dirty);
+
+       p->cache_size = cache_size;
+       spin_lock_init(&p->lock);
+
+       /* Allocate cache entry structs and add them to free list. */
+       r = alloc_cache_blocks_with_hash(p, cache_size);
+       if (!r)
+               return &p->policy;
+
+       kfree(p);
+
+       return NULL;
+}
+/*----------------------------------------------------------------------------*/
+
+static struct dm_cache_policy_type wb_policy_type = {
+       .name = "cleaner",
+       .hint_size = 0,
+       .owner = THIS_MODULE,
+       .create = wb_create
+};
+
+static int __init wb_init(void)
+{
+       int r = dm_cache_policy_register(&wb_policy_type);
+
+       if (r < 0)
+               DMERR("register failed %d", r);
+       else
+               DMINFO("version " CLEANER_VERSION " loaded");
+
+       return r;
+}
+
+static void __exit wb_exit(void)
+{
+       dm_cache_policy_unregister(&wb_policy_type);
+}
+
+module_init(wb_init);
+module_exit(wb_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cleaner cache policy");
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
new file mode 100644 (file)
index 0000000..52a75be
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_INTERNAL_H
+#define DM_CACHE_POLICY_INTERNAL_H
+
+#include "dm-cache-policy.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Little inline functions that simplify calling the policy methods.
+ */
+static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+                            bool can_block, bool can_migrate, bool discarded_oblock,
+                            struct bio *bio, struct policy_result *result)
+{
+       return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+}
+
+static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+       BUG_ON(!p->lookup);
+       return p->lookup(p, oblock, cblock);
+}
+
+static inline void policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+       if (p->set_dirty)
+               p->set_dirty(p, oblock);
+}
+
+static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+       if (p->clear_dirty)
+               p->clear_dirty(p, oblock);
+}
+
+static inline int policy_load_mapping(struct dm_cache_policy *p,
+                                     dm_oblock_t oblock, dm_cblock_t cblock,
+                                     uint32_t hint, bool hint_valid)
+{
+       return p->load_mapping(p, oblock, cblock, hint, hint_valid);
+}
+
+static inline int policy_walk_mappings(struct dm_cache_policy *p,
+                                     policy_walk_fn fn, void *context)
+{
+       return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+}
+
+static inline int policy_writeback_work(struct dm_cache_policy *p,
+                                       dm_oblock_t *oblock,
+                                       dm_cblock_t *cblock)
+{
+       return p->writeback_work ? p->writeback_work(p, oblock, cblock) : -ENOENT;
+}
+
+static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+       return p->remove_mapping(p, oblock);
+}
+
+static inline void policy_force_mapping(struct dm_cache_policy *p,
+                                       dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+       return p->force_mapping(p, current_oblock, new_oblock);
+}
+
+static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
+{
+       return p->residency(p);
+}
+
+static inline void policy_tick(struct dm_cache_policy *p)
+{
+       if (p->tick)
+               return p->tick(p);
+}
+
+static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+       ssize_t sz = 0;
+       if (p->emit_config_values)
+               return p->emit_config_values(p, result, maxlen);
+
+       DMEMIT("0");
+       return 0;
+}
+
+static inline int policy_set_config_value(struct dm_cache_policy *p,
+                                         const char *key, const char *value)
+{
+       return p->set_config_value ? p->set_config_value(p, key, value) : -EINVAL;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Creates a new cache policy given a policy name, a cache size, an origin size and the block size.
+ */
+struct dm_cache_policy *dm_cache_policy_create(const char *name, dm_cblock_t cache_size,
+                                              sector_t origin_size, sector_t block_size);
+
+/*
+ * Destroys the policy.  This drops references to the policy module as well
+ * as calling it's destroy method.  So always use this rather than calling
+ * the policy->destroy method directly.
+ */
+void dm_cache_policy_destroy(struct dm_cache_policy *p);
+
+/*
+ * In case we've forgotten.
+ */
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_INTERNAL_H */
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
new file mode 100644 (file)
index 0000000..9641532
--- /dev/null
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache-policy-mq"
+#define MQ_VERSION     "1.0.0"
+
+static struct kmem_cache *mq_entry_cache;
+
+/*----------------------------------------------------------------*/
+
+static unsigned next_power(unsigned n, unsigned min)
+{
+       return roundup_pow_of_two(max(n, min));
+}
+
+/*----------------------------------------------------------------*/
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+       size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+       return vzalloc(s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+       vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Large, sequential ios are probably better left on the origin device since
+ * spindles tend to have good bandwidth.
+ *
+ * The io_tracker tries to spot when the io is in one of these sequential
+ * modes.
+ *
+ * Two thresholds to switch between random and sequential io mode are defaulting
+ * as follows and can be adjusted via the constructor and message interfaces.
+ */
+#define RANDOM_THRESHOLD_DEFAULT 4
+#define SEQUENTIAL_THRESHOLD_DEFAULT 512
+
+enum io_pattern {
+       PATTERN_SEQUENTIAL,
+       PATTERN_RANDOM
+};
+
+struct io_tracker {
+       enum io_pattern pattern;
+
+       unsigned nr_seq_samples;
+       unsigned nr_rand_samples;
+       unsigned thresholds[2];
+
+       dm_oblock_t last_end_oblock;
+};
+
+static void iot_init(struct io_tracker *t,
+                    int sequential_threshold, int random_threshold)
+{
+       t->pattern = PATTERN_RANDOM;
+       t->nr_seq_samples = 0;
+       t->nr_rand_samples = 0;
+       t->last_end_oblock = 0;
+       t->thresholds[PATTERN_RANDOM] = random_threshold;
+       t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
+}
+
+static enum io_pattern iot_pattern(struct io_tracker *t)
+{
+       return t->pattern;
+}
+
+static void iot_update_stats(struct io_tracker *t, struct bio *bio)
+{
+       if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+               t->nr_seq_samples++;
+       else {
+               /*
+                * Just one non-sequential IO is enough to reset the
+                * counters.
+                */
+               if (t->nr_seq_samples) {
+                       t->nr_seq_samples = 0;
+                       t->nr_rand_samples = 0;
+               }
+
+               t->nr_rand_samples++;
+       }
+
+       t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+}
+
+static void iot_check_for_pattern_switch(struct io_tracker *t)
+{
+       switch (t->pattern) {
+       case PATTERN_SEQUENTIAL:
+               if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
+                       t->pattern = PATTERN_RANDOM;
+                       t->nr_seq_samples = t->nr_rand_samples = 0;
+               }
+               break;
+
+       case PATTERN_RANDOM:
+               if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
+                       t->pattern = PATTERN_SEQUENTIAL;
+                       t->nr_seq_samples = t->nr_rand_samples = 0;
+               }
+               break;
+       }
+}
+
+static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
+{
+       iot_update_stats(t, bio);
+       iot_check_for_pattern_switch(t);
+}
+
+/*----------------------------------------------------------------*/
+
+
+/*
+ * This queue is divided up into different levels.  Allowing us to push
+ * entries to the back of any of the levels.  Think of it as a partially
+ * sorted queue.
+ */
+#define NR_QUEUE_LEVELS 16u
+
+struct queue {
+       struct list_head qs[NR_QUEUE_LEVELS];
+};
+
+static void queue_init(struct queue *q)
+{
+       unsigned i;
+
+       for (i = 0; i < NR_QUEUE_LEVELS; i++)
+               INIT_LIST_HEAD(q->qs + i);
+}
+
+/*
+ * Insert an entry to the back of the given level.
+ */
+static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
+{
+       list_add_tail(elt, q->qs + level);
+}
+
+static void queue_remove(struct list_head *elt)
+{
+       list_del(elt);
+}
+
+/*
+ * Shifts all regions down one level.  This has no effect on the order of
+ * the queue.
+ */
+static void queue_shift_down(struct queue *q)
+{
+       unsigned level;
+
+       for (level = 1; level < NR_QUEUE_LEVELS; level++)
+               list_splice_init(q->qs + level, q->qs + level - 1);
+}
+
+/*
+ * Gives us the oldest entry of the lowest popoulated level.  If the first
+ * level is emptied then we shift down one level.
+ */
+static struct list_head *queue_pop(struct queue *q)
+{
+       unsigned level;
+       struct list_head *r;
+
+       for (level = 0; level < NR_QUEUE_LEVELS; level++)
+               if (!list_empty(q->qs + level)) {
+                       r = q->qs[level].next;
+                       list_del(r);
+
+                       /* have we just emptied the bottom level? */
+                       if (level == 0 && list_empty(q->qs))
+                               queue_shift_down(q);
+
+                       return r;
+               }
+
+       return NULL;
+}
+
+static struct list_head *list_pop(struct list_head *lh)
+{
+       struct list_head *r = lh->next;
+
+       BUG_ON(!r);
+       list_del_init(r);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Describes a cache entry.  Used in both the cache and the pre_cache.
+ */
+struct entry {
+       struct hlist_node hlist;
+       struct list_head list;
+       dm_oblock_t oblock;
+       dm_cblock_t cblock;     /* valid iff in_cache */
+
+       /*
+        * FIXME: pack these better
+        */
+       bool in_cache:1;
+       unsigned hit_count;
+       unsigned generation;
+       unsigned tick;
+};
+
+struct mq_policy {
+       struct dm_cache_policy policy;
+
+       /* protects everything */
+       struct mutex lock;
+       dm_cblock_t cache_size;
+       struct io_tracker tracker;
+
+       /*
+        * We maintain two queues of entries.  The cache proper contains
+        * the currently active mappings.  Whereas the pre_cache tracks
+        * blocks that are being hit frequently and potential candidates
+        * for promotion to the cache.
+        */
+       struct queue pre_cache;
+       struct queue cache;
+
+       /*
+        * Keeps track of time, incremented by the core.  We use this to
+        * avoid attributing multiple hits within the same tick.
+        *
+        * Access to tick_protected should be done with the spin lock held.
+        * It's copied to tick at the start of the map function (within the
+        * mutex).
+        */
+       spinlock_t tick_lock;
+       unsigned tick_protected;
+       unsigned tick;
+
+       /*
+        * A count of the number of times the map function has been called
+        * and found an entry in the pre_cache or cache.  Currently used to
+        * calculate the generation.
+        */
+       unsigned hit_count;
+
+       /*
+        * A generation is a longish period that is used to trigger some
+        * book keeping effects.  eg, decrementing hit counts on entries.
+        * This is needed to allow the cache to evolve as io patterns
+        * change.
+        */
+       unsigned generation;
+       unsigned generation_period; /* in lookups (will probably change) */
+
+       /*
+        * Entries in the pre_cache whose hit count passes the promotion
+        * threshold move to the cache proper.  Working out the correct
+        * value for the promotion_threshold is crucial to this policy.
+        */
+       unsigned promote_threshold;
+
+       /*
+        * We need cache_size entries for the cache, and choose to have
+        * cache_size entries for the pre_cache too.  One motivation for
+        * using the same size is to make the hit counts directly
+        * comparable between pre_cache and cache.
+        */
+       unsigned nr_entries;
+       unsigned nr_entries_allocated;
+       struct list_head free;
+
+       /*
+        * Cache blocks may be unallocated.  We store this info in a
+        * bitset.
+        */
+       unsigned long *allocation_bitset;
+       unsigned nr_cblocks_allocated;
+       unsigned find_free_nr_words;
+       unsigned find_free_last_word;
+
+       /*
+        * The hash table allows us to quickly find an entry by origin
+        * block.  Both pre_cache and cache entries are in here.
+        */
+       unsigned nr_buckets;
+       dm_block_t hash_bits;
+       struct hlist_head *table;
+};
+
+/*----------------------------------------------------------------*/
+/* Free/alloc mq cache entry structures. */
+static void takeout_queue(struct list_head *lh, struct queue *q)
+{
+       unsigned level;
+
+       for (level = 0; level < NR_QUEUE_LEVELS; level++)
+               list_splice(q->qs + level, lh);
+}
+
+static void free_entries(struct mq_policy *mq)
+{
+       struct entry *e, *tmp;
+
+       takeout_queue(&mq->free, &mq->pre_cache);
+       takeout_queue(&mq->free, &mq->cache);
+
+       list_for_each_entry_safe(e, tmp, &mq->free, list)
+               kmem_cache_free(mq_entry_cache, e);
+}
+
+static int alloc_entries(struct mq_policy *mq, unsigned elts)
+{
+       unsigned u = mq->nr_entries;
+
+       INIT_LIST_HEAD(&mq->free);
+       mq->nr_entries_allocated = 0;
+
+       while (u--) {
+               struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
+
+               if (!e) {
+                       free_entries(mq);
+                       return -ENOMEM;
+               }
+
+
+               list_add(&e->list, &mq->free);
+       }
+
+       return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Simple hash table implementation.  Should replace with the standard hash
+ * table that's making its way upstream.
+ */
+static void hash_insert(struct mq_policy *mq, struct entry *e)
+{
+       unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
+
+       hlist_add_head(&e->hlist, mq->table + h);
+}
+
+static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
+{
+       unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
+       struct hlist_head *bucket = mq->table + h;
+       struct entry *e;
+
+       hlist_for_each_entry(e, bucket, hlist)
+               if (e->oblock == oblock) {
+                       hlist_del(&e->hlist);
+                       hlist_add_head(&e->hlist, bucket);
+                       return e;
+               }
+
+       return NULL;
+}
+
+static void hash_remove(struct entry *e)
+{
+       hlist_del(&e->hlist);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Allocates a new entry structure.  The memory is allocated in one lump,
+ * so we just handing it out here.  Returns NULL if all entries have
+ * already been allocated.  Cannot fail otherwise.
+ */
+static struct entry *alloc_entry(struct mq_policy *mq)
+{
+       struct entry *e;
+
+       if (mq->nr_entries_allocated >= mq->nr_entries) {
+               BUG_ON(!list_empty(&mq->free));
+               return NULL;
+       }
+
+       e = list_entry(list_pop(&mq->free), struct entry, list);
+       INIT_LIST_HEAD(&e->list);
+       INIT_HLIST_NODE(&e->hlist);
+
+       mq->nr_entries_allocated++;
+       return e;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Mark cache blocks allocated or not in the bitset.
+ */
+static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+       BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+       BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+       set_bit(from_cblock(cblock), mq->allocation_bitset);
+       mq->nr_cblocks_allocated++;
+}
+
+static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+       BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+       BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+       clear_bit(from_cblock(cblock), mq->allocation_bitset);
+       mq->nr_cblocks_allocated--;
+}
+
+static bool any_free_cblocks(struct mq_policy *mq)
+{
+       return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+}
+
+/*
+ * Fills result out with a cache block that isn't in use, or return
+ * -ENOSPC.  This does _not_ mark the cblock as allocated, the caller is
+ * reponsible for that.
+ */
+static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
+                             dm_cblock_t *result, unsigned *last_word)
+{
+       int r = -ENOSPC;
+       unsigned w;
+
+       for (w = begin; w < end; w++) {
+               /*
+                * ffz is undefined if no zero exists
+                */
+               if (mq->allocation_bitset[w] != ~0UL) {
+                       *last_word = w;
+                       *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
+                       if (from_cblock(*result) < from_cblock(mq->cache_size))
+                               r = 0;
+
+                       break;
+               }
+       }
+
+       return r;
+}
+
+static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
+{
+       int r;
+
+       if (!any_free_cblocks(mq))
+               return -ENOSPC;
+
+       r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
+       if (r == -ENOSPC && mq->find_free_last_word)
+               r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Now we get to the meat of the policy.  This section deals with deciding
+ * when to to add entries to the pre_cache and cache, and move between
+ * them.
+ */
+
+/*
+ * The queue level is based on the log2 of the hit count.
+ */
+static unsigned queue_level(struct entry *e)
+{
+       return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
+}
+
+/*
+ * Inserts the entry into the pre_cache or the cache.  Ensures the cache
+ * block is marked as allocated if necc.  Inserts into the hash table.  Sets the
+ * tick which records when the entry was last moved about.
+ */
+static void push(struct mq_policy *mq, struct entry *e)
+{
+       e->tick = mq->tick;
+       hash_insert(mq, e);
+
+       if (e->in_cache) {
+               alloc_cblock(mq, e->cblock);
+               queue_push(&mq->cache, queue_level(e), &e->list);
+       } else
+               queue_push(&mq->pre_cache, queue_level(e), &e->list);
+}
+
+/*
+ * Removes an entry from pre_cache or cache.  Removes from the hash table.
+ * Frees off the cache block if necc.
+ */
+static void del(struct mq_policy *mq, struct entry *e)
+{
+       queue_remove(&e->list);
+       hash_remove(e);
+       if (e->in_cache)
+               free_cblock(mq, e->cblock);
+}
+
+/*
+ * Like del, except it removes the first entry in the queue (ie. the least
+ * recently used).
+ */
+static struct entry *pop(struct mq_policy *mq, struct queue *q)
+{
+       struct entry *e = container_of(queue_pop(q), struct entry, list);
+
+       if (e) {
+               hash_remove(e);
+
+               if (e->in_cache)
+                       free_cblock(mq, e->cblock);
+       }
+
+       return e;
+}
+
+/*
+ * Has this entry already been updated?
+ */
+static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+{
+       return mq->tick == e->tick;
+}
+
+/*
+ * The promotion threshold is adjusted every generation.  As are the counts
+ * of the entries.
+ *
+ * At the moment the threshold is taken by averaging the hit counts of some
+ * of the entries in the cache (the first 20 entries of the first level).
+ *
+ * We can be much cleverer than this though.  For example, each promotion
+ * could bump up the threshold helping to prevent churn.  Much more to do
+ * here.
+ */
+
+#define MAX_TO_AVERAGE 20
+
+static void check_generation(struct mq_policy *mq)
+{
+       unsigned total = 0, nr = 0, count = 0, level;
+       struct list_head *head;
+       struct entry *e;
+
+       if ((mq->hit_count >= mq->generation_period) &&
+           (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
+
+               mq->hit_count = 0;
+               mq->generation++;
+
+               for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
+                       head = mq->cache.qs + level;
+                       list_for_each_entry(e, head, list) {
+                               nr++;
+                               total += e->hit_count;
+
+                               if (++count >= MAX_TO_AVERAGE)
+                                       break;
+                       }
+               }
+
+               mq->promote_threshold = nr ? total / nr : 1;
+               if (mq->promote_threshold * nr < total)
+                       mq->promote_threshold++;
+       }
+}
+
+/*
+ * Whenever we use an entry we bump up it's hit counter, and push it to the
+ * back to it's current level.
+ */
+static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+{
+       if (updated_this_tick(mq, e))
+               return;
+
+       e->hit_count++;
+       mq->hit_count++;
+       check_generation(mq);
+
+       /* generation adjustment, to stop the counts increasing forever. */
+       /* FIXME: divide? */
+       /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
+       e->generation = mq->generation;
+
+       del(mq, e);
+       push(mq, e);
+}
+
+/*
+ * Demote the least recently used entry from the cache to the pre_cache.
+ * Returns the new cache entry to use, and the old origin block it was
+ * mapped to.
+ *
+ * We drop the hit count on the demoted entry back to 1 to stop it bouncing
+ * straight back into the cache if it's subsequently hit.  There are
+ * various options here, and more experimentation would be good:
+ *
+ * - just forget about the demoted entry completely (ie. don't insert it
+     into the pre_cache).
+ * - divide the hit count rather that setting to some hard coded value.
+ * - set the hit count to a hard coded value other than 1, eg, is it better
+ *   if it goes in at level 2?
+ */
+static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+{
+       dm_cblock_t result;
+       struct entry *demoted = pop(mq, &mq->cache);
+
+       BUG_ON(!demoted);
+       result = demoted->cblock;
+       *oblock = demoted->oblock;
+       demoted->in_cache = false;
+       demoted->hit_count = 1;
+       push(mq, demoted);
+
+       return result;
+}
+
+/*
+ * We modify the basic promotion_threshold depending on the specific io.
+ *
+ * If the origin block has been discarded then there's no cost to copy it
+ * to the cache.
+ *
+ * We bias towards reads, since they can be demoted at no cost if they
+ * haven't been dirtied.
+ */
+#define DISCARDED_PROMOTE_THRESHOLD 1
+#define READ_PROMOTE_THRESHOLD 4
+#define WRITE_PROMOTE_THRESHOLD 8
+
+static unsigned adjusted_promote_threshold(struct mq_policy *mq,
+                                          bool discarded_oblock, int data_dir)
+{
+       if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+               /*
+                * We don't need to do any copying at all, so give this a
+                * very low threshold.  In practice this only triggers
+                * during initial population after a format.
+                */
+               return DISCARDED_PROMOTE_THRESHOLD;
+
+       return data_dir == READ ?
+               (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
+               (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+}
+
+static bool should_promote(struct mq_policy *mq, struct entry *e,
+                          bool discarded_oblock, int data_dir)
+{
+       return e->hit_count >=
+               adjusted_promote_threshold(mq, discarded_oblock, data_dir);
+}
+
+static int cache_entry_found(struct mq_policy *mq,
+                            struct entry *e,
+                            struct policy_result *result)
+{
+       requeue_and_update_tick(mq, e);
+
+       if (e->in_cache) {
+               result->op = POLICY_HIT;
+               result->cblock = e->cblock;
+       }
+
+       return 0;
+}
+
+/*
+ * Moves and entry from the pre_cache to the cache.  The main work is
+ * finding which cache block to use.
+ */
+static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+                             struct policy_result *result)
+{
+       dm_cblock_t cblock;
+
+       if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+               result->op = POLICY_REPLACE;
+               cblock = demote_cblock(mq, &result->old_oblock);
+       } else
+               result->op = POLICY_NEW;
+
+       result->cblock = e->cblock = cblock;
+
+       del(mq, e);
+       e->in_cache = true;
+       push(mq, e);
+
+       return 0;
+}
+
+static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+                                bool can_migrate, bool discarded_oblock,
+                                int data_dir, struct policy_result *result)
+{
+       int r = 0;
+       bool updated = updated_this_tick(mq, e);
+
+       requeue_and_update_tick(mq, e);
+
+       if ((!discarded_oblock && updated) ||
+           !should_promote(mq, e, discarded_oblock, data_dir))
+               result->op = POLICY_MISS;
+       else if (!can_migrate)
+               r = -EWOULDBLOCK;
+       else
+               r = pre_cache_to_cache(mq, e, result);
+
+       return r;
+}
+
+static void insert_in_pre_cache(struct mq_policy *mq,
+                               dm_oblock_t oblock)
+{
+       struct entry *e = alloc_entry(mq);
+
+       if (!e)
+               /*
+                * There's no spare entry structure, so we grab the least
+                * used one from the pre_cache.
+                */
+               e = pop(mq, &mq->pre_cache);
+
+       if (unlikely(!e)) {
+               DMWARN("couldn't pop from pre cache");
+               return;
+       }
+
+       e->in_cache = false;
+       e->oblock = oblock;
+       e->hit_count = 1;
+       e->generation = mq->generation;
+       push(mq, e);
+}
+
+static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+                           struct policy_result *result)
+{
+       struct entry *e;
+       dm_cblock_t cblock;
+
+       if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+               result->op = POLICY_MISS;
+               insert_in_pre_cache(mq, oblock);
+               return;
+       }
+
+       e = alloc_entry(mq);
+       if (unlikely(!e)) {
+               result->op = POLICY_MISS;
+               return;
+       }
+
+       e->oblock = oblock;
+       e->cblock = cblock;
+       e->in_cache = true;
+       e->hit_count = 1;
+       e->generation = mq->generation;
+       push(mq, e);
+
+       result->op = POLICY_NEW;
+       result->cblock = e->cblock;
+}
+
+static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+                         bool can_migrate, bool discarded_oblock,
+                         int data_dir, struct policy_result *result)
+{
+       if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
+               if (can_migrate)
+                       insert_in_cache(mq, oblock, result);
+               else
+                       return -EWOULDBLOCK;
+       } else {
+               insert_in_pre_cache(mq, oblock);
+               result->op = POLICY_MISS;
+       }
+
+       return 0;
+}
+
+/*
+ * Looks the oblock up in the hash table, then decides whether to put in
+ * pre_cache, or cache etc.
+ */
+static int map(struct mq_policy *mq, dm_oblock_t oblock,
+              bool can_migrate, bool discarded_oblock,
+              int data_dir, struct policy_result *result)
+{
+       int r = 0;
+       struct entry *e = hash_lookup(mq, oblock);
+
+       if (e && e->in_cache)
+               r = cache_entry_found(mq, e, result);
+       else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
+               result->op = POLICY_MISS;
+       else if (e)
+               r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+                                         data_dir, result);
+       else
+               r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+                                  data_dir, result);
+
+       if (r == -EWOULDBLOCK)
+               result->op = POLICY_MISS;
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct.  See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
+{
+       return container_of(p, struct mq_policy, policy);
+}
+
+static void mq_destroy(struct dm_cache_policy *p)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+
+       free_bitset(mq->allocation_bitset);
+       kfree(mq->table);
+       free_entries(mq);
+       kfree(mq);
+}
+
+static void copy_tick(struct mq_policy *mq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&mq->tick_lock, flags);
+       mq->tick = mq->tick_protected;
+       spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+                 bool can_block, bool can_migrate, bool discarded_oblock,
+                 struct bio *bio, struct policy_result *result)
+{
+       int r;
+       struct mq_policy *mq = to_mq_policy(p);
+
+       result->op = POLICY_MISS;
+
+       if (can_block)
+               mutex_lock(&mq->lock);
+       else if (!mutex_trylock(&mq->lock))
+               return -EWOULDBLOCK;
+
+       copy_tick(mq);
+
+       iot_examine_bio(&mq->tracker, bio);
+       r = map(mq, oblock, can_migrate, discarded_oblock,
+               bio_data_dir(bio), result);
+
+       mutex_unlock(&mq->lock);
+
+       return r;
+}
+
+static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+       int r;
+       struct mq_policy *mq = to_mq_policy(p);
+       struct entry *e;
+
+       if (!mutex_trylock(&mq->lock))
+               return -EWOULDBLOCK;
+
+       e = hash_lookup(mq, oblock);
+       if (e && e->in_cache) {
+               *cblock = e->cblock;
+               r = 0;
+       } else
+               r = -ENOENT;
+
+       mutex_unlock(&mq->lock);
+
+       return r;
+}
+
+static int mq_load_mapping(struct dm_cache_policy *p,
+                          dm_oblock_t oblock, dm_cblock_t cblock,
+                          uint32_t hint, bool hint_valid)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+       struct entry *e;
+
+       e = alloc_entry(mq);
+       if (!e)
+               return -ENOMEM;
+
+       e->cblock = cblock;
+       e->oblock = oblock;
+       e->in_cache = true;
+       e->hit_count = hint_valid ? hint : 1;
+       e->generation = mq->generation;
+       push(mq, e);
+
+       return 0;
+}
+
+static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+                           void *context)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+       int r = 0;
+       struct entry *e;
+       unsigned level;
+
+       mutex_lock(&mq->lock);
+
+       for (level = 0; level < NR_QUEUE_LEVELS; level++)
+               list_for_each_entry(e, &mq->cache.qs[level], list) {
+                       r = fn(context, e->cblock, e->oblock, e->hit_count);
+                       if (r)
+                               goto out;
+               }
+
+out:
+       mutex_unlock(&mq->lock);
+
+       return r;
+}
+
+static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+       struct entry *e = hash_lookup(mq, oblock);
+
+       BUG_ON(!e || !e->in_cache);
+
+       del(mq, e);
+       e->in_cache = false;
+       push(mq, e);
+}
+
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+
+       mutex_lock(&mq->lock);
+       remove_mapping(mq, oblock);
+       mutex_unlock(&mq->lock);
+}
+
+static void force_mapping(struct mq_policy *mq,
+                         dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+       struct entry *e = hash_lookup(mq, current_oblock);
+
+       BUG_ON(!e || !e->in_cache);
+
+       del(mq, e);
+       e->oblock = new_oblock;
+       push(mq, e);
+}
+
+static void mq_force_mapping(struct dm_cache_policy *p,
+                            dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+
+       mutex_lock(&mq->lock);
+       force_mapping(mq, current_oblock, new_oblock);
+       mutex_unlock(&mq->lock);
+}
+
+static dm_cblock_t mq_residency(struct dm_cache_policy *p)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+
+       /* FIXME: lock mutex, not sure we can block here */
+       return to_cblock(mq->nr_cblocks_allocated);
+}
+
+static void mq_tick(struct dm_cache_policy *p)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mq->tick_lock, flags);
+       mq->tick_protected++;
+       spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_set_config_value(struct dm_cache_policy *p,
+                              const char *key, const char *value)
+{
+       struct mq_policy *mq = to_mq_policy(p);
+       enum io_pattern pattern;
+       unsigned long tmp;
+
+       if (!strcasecmp(key, "random_threshold"))
+               pattern = PATTERN_RANDOM;
+       else if (!strcasecmp(key, "sequential_threshold"))
+               pattern = PATTERN_SEQUENTIAL;
+       else
+               return -EINVAL;
+
+       if (kstrtoul(value, 10, &tmp))
+               return -EINVAL;
+
+       mq->tracker.thresholds[pattern] = tmp;
+
+       return 0;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+       ssize_t sz = 0;
+       struct mq_policy *mq = to_mq_policy(p);
+
+       DMEMIT("4 random_threshold %u sequential_threshold %u",
+              mq->tracker.thresholds[PATTERN_RANDOM],
+              mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
+
+       return 0;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct mq_policy *mq)
+{
+       mq->policy.destroy = mq_destroy;
+       mq->policy.map = mq_map;
+       mq->policy.lookup = mq_lookup;
+       mq->policy.load_mapping = mq_load_mapping;
+       mq->policy.walk_mappings = mq_walk_mappings;
+       mq->policy.remove_mapping = mq_remove_mapping;
+       mq->policy.writeback_work = NULL;
+       mq->policy.force_mapping = mq_force_mapping;
+       mq->policy.residency = mq_residency;
+       mq->policy.tick = mq_tick;
+       mq->policy.emit_config_values = mq_emit_config_values;
+       mq->policy.set_config_value = mq_set_config_value;
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+                                        sector_t origin_size,
+                                        sector_t cache_block_size)
+{
+       int r;
+       struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+       if (!mq)
+               return NULL;
+
+       init_policy_functions(mq);
+       iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
+
+       mq->cache_size = cache_size;
+       mq->tick_protected = 0;
+       mq->tick = 0;
+       mq->hit_count = 0;
+       mq->generation = 0;
+       mq->promote_threshold = 0;
+       mutex_init(&mq->lock);
+       spin_lock_init(&mq->tick_lock);
+       mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
+       mq->find_free_last_word = 0;
+
+       queue_init(&mq->pre_cache);
+       queue_init(&mq->cache);
+       mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
+
+       mq->nr_entries = 2 * from_cblock(cache_size);
+       r = alloc_entries(mq, mq->nr_entries);
+       if (r)
+               goto bad_cache_alloc;
+
+       mq->nr_entries_allocated = 0;
+       mq->nr_cblocks_allocated = 0;
+
+       mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+       mq->hash_bits = ffs(mq->nr_buckets) - 1;
+       mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+       if (!mq->table)
+               goto bad_alloc_table;
+
+       mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
+       if (!mq->allocation_bitset)
+               goto bad_alloc_bitset;
+
+       return &mq->policy;
+
+bad_alloc_bitset:
+       kfree(mq->table);
+bad_alloc_table:
+       free_entries(mq);
+bad_cache_alloc:
+       kfree(mq);
+
+       return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type mq_policy_type = {
+       .name = "mq",
+       .hint_size = 4,
+       .owner = THIS_MODULE,
+       .create = mq_create
+};
+
+static struct dm_cache_policy_type default_policy_type = {
+       .name = "default",
+       .hint_size = 4,
+       .owner = THIS_MODULE,
+       .create = mq_create
+};
+
+static int __init mq_init(void)
+{
+       int r;
+
+       mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
+                                          sizeof(struct entry),
+                                          __alignof__(struct entry),
+                                          0, NULL);
+       if (!mq_entry_cache)
+               goto bad;
+
+       r = dm_cache_policy_register(&mq_policy_type);
+       if (r) {
+               DMERR("register failed %d", r);
+               goto bad_register_mq;
+       }
+
+       r = dm_cache_policy_register(&default_policy_type);
+       if (!r) {
+               DMINFO("version " MQ_VERSION " loaded");
+               return 0;
+       }
+
+       DMERR("register failed (as default) %d", r);
+
+       dm_cache_policy_unregister(&mq_policy_type);
+bad_register_mq:
+       kmem_cache_destroy(mq_entry_cache);
+bad:
+       return -ENOMEM;
+}
+
+static void __exit mq_exit(void)
+{
+       dm_cache_policy_unregister(&mq_policy_type);
+       dm_cache_policy_unregister(&default_policy_type);
+
+       kmem_cache_destroy(mq_entry_cache);
+}
+
+module_init(mq_init);
+module_exit(mq_exit);
+
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
new file mode 100644 (file)
index 0000000..2cbf5fd
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy-internal.h"
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache-policy"
+
+static DEFINE_SPINLOCK(register_lock);
+static LIST_HEAD(register_list);
+
+static struct dm_cache_policy_type *__find_policy(const char *name)
+{
+       struct dm_cache_policy_type *t;
+
+       list_for_each_entry(t, &register_list, list)
+               if (!strcmp(t->name, name))
+                       return t;
+
+       return NULL;
+}
+
+static struct dm_cache_policy_type *__get_policy_once(const char *name)
+{
+       struct dm_cache_policy_type *t = __find_policy(name);
+
+       if (t && !try_module_get(t->owner)) {
+               DMWARN("couldn't get module %s", name);
+               t = ERR_PTR(-EINVAL);
+       }
+
+       return t;
+}
+
+static struct dm_cache_policy_type *get_policy_once(const char *name)
+{
+       struct dm_cache_policy_type *t;
+
+       spin_lock(&register_lock);
+       t = __get_policy_once(name);
+       spin_unlock(&register_lock);
+
+       return t;
+}
+
+static struct dm_cache_policy_type *get_policy(const char *name)
+{
+       struct dm_cache_policy_type *t;
+
+       t = get_policy_once(name);
+       if (IS_ERR(t))
+               return NULL;
+
+       if (t)
+               return t;
+
+       request_module("dm-cache-%s", name);
+
+       t = get_policy_once(name);
+       if (IS_ERR(t))
+               return NULL;
+
+       return t;
+}
+
+static void put_policy(struct dm_cache_policy_type *t)
+{
+       module_put(t->owner);
+}
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type)
+{
+       int r;
+
+       /* One size fits all for now */
+       if (type->hint_size != 0 && type->hint_size != 4) {
+               DMWARN("hint size must be 0 or 4 but %llu supplied.", (unsigned long long) type->hint_size);
+               return -EINVAL;
+       }
+
+       spin_lock(&register_lock);
+       if (__find_policy(type->name)) {
+               DMWARN("attempt to register policy under duplicate name %s", type->name);
+               r = -EINVAL;
+       } else {
+               list_add(&type->list, &register_list);
+               r = 0;
+       }
+       spin_unlock(&register_lock);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_register);
+
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type)
+{
+       spin_lock(&register_lock);
+       list_del_init(&type->list);
+       spin_unlock(&register_lock);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_unregister);
+
+struct dm_cache_policy *dm_cache_policy_create(const char *name,
+                                              dm_cblock_t cache_size,
+                                              sector_t origin_size,
+                                              sector_t cache_block_size)
+{
+       struct dm_cache_policy *p = NULL;
+       struct dm_cache_policy_type *type;
+
+       type = get_policy(name);
+       if (!type) {
+               DMWARN("unknown policy type");
+               return NULL;
+       }
+
+       p = type->create(cache_size, origin_size, cache_block_size);
+       if (!p) {
+               put_policy(type);
+               return NULL;
+       }
+       p->private = type;
+
+       return p;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_create);
+
+void dm_cache_policy_destroy(struct dm_cache_policy *p)
+{
+       struct dm_cache_policy_type *t = p->private;
+
+       p->destroy(p);
+       put_policy(t);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_destroy);
+
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+{
+       struct dm_cache_policy_type *t = p->private;
+
+       return t->name;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
+{
+       struct dm_cache_policy_type *t = p->private;
+
+       return t->hint_size;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_hint_size);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
new file mode 100644 (file)
index 0000000..f0f51b2
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_H
+#define DM_CACHE_POLICY_H
+
+#include "dm-cache-block-types.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+/* FIXME: make it clear which methods are optional.  Get debug policy to
+ * double check this at start.
+ */
+
+/*
+ * The cache policy makes the important decisions about which blocks get to
+ * live on the faster cache device.
+ *
+ * When the core target has to remap a bio it calls the 'map' method of the
+ * policy.  This returns an instruction telling the core target what to do.
+ *
+ * POLICY_HIT:
+ *   That block is in the cache.  Remap to the cache and carry on.
+ *
+ * POLICY_MISS:
+ *   This block is on the origin device.  Remap and carry on.
+ *
+ * POLICY_NEW:
+ *   This block is currently on the origin device, but the policy wants to
+ *   move it.  The core should:
+ *
+ *   - hold any further io to this origin block
+ *   - copy the origin to the given cache block
+ *   - release all the held blocks
+ *   - remap the original block to the cache
+ *
+ * POLICY_REPLACE:
+ *   This block is currently on the origin device.  The policy wants to
+ *   move it to the cache, with the added complication that the destination
+ *   cache block needs a writeback first.  The core should:
+ *
+ *   - hold any further io to this origin block
+ *   - hold any further io to the origin block that's being written back
+ *   - writeback
+ *   - copy new block to cache
+ *   - release held blocks
+ *   - remap bio to cache and reissue.
+ *
+ * Should the core run into trouble while processing a POLICY_NEW or
+ * POLICY_REPLACE instruction it will roll back the policies mapping using
+ * remove_mapping() or force_mapping().  These methods must not fail.  This
+ * approach avoids having transactional semantics in the policy (ie, the
+ * core informing the policy when a migration is complete), and hence makes
+ * it easier to write new policies.
+ *
+ * In general policy methods should never block, except in the case of the
+ * map function when can_migrate is set.  So be careful to implement using
+ * bounded, preallocated memory.
+ */
+enum policy_operation {
+       POLICY_HIT,
+       POLICY_MISS,
+       POLICY_NEW,
+       POLICY_REPLACE
+};
+
+/*
+ * This is the instruction passed back to the core target.
+ */
+struct policy_result {
+       enum policy_operation op;
+       dm_oblock_t old_oblock; /* POLICY_REPLACE */
+       dm_cblock_t cblock;     /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
+};
+
+typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
+                             dm_oblock_t oblock, uint32_t hint);
+
+/*
+ * The cache policy object.  Just a bunch of methods.  It is envisaged that
+ * this structure will be embedded in a bigger, policy specific structure
+ * (ie. use container_of()).
+ */
+struct dm_cache_policy {
+
+       /*
+        * FIXME: make it clear which methods are optional, and which may
+        * block.
+        */
+
+       /*
+        * Destroys this object.
+        */
+       void (*destroy)(struct dm_cache_policy *p);
+
+       /*
+        * See large comment above.
+        *
+        * oblock      - the origin block we're interested in.
+        *
+        * can_block - indicates whether the current thread is allowed to
+        *             block.  -EWOULDBLOCK returned if it can't and would.
+        *
+        * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
+        *               instructions.  If denied and the policy would have
+        *               returned one of these instructions it should
+        *               return -EWOULDBLOCK.
+        *
+        * discarded_oblock - indicates whether the whole origin block is
+        *               in a discarded state (FIXME: better to tell the
+        *               policy about this sooner, so it can recycle that
+        *               cache block if it wants.)
+        * bio         - the bio that triggered this call.
+        * result      - gets filled in with the instruction.
+        *
+        * May only return 0, or -EWOULDBLOCK (if !can_migrate)
+        */
+       int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+                  bool can_block, bool can_migrate, bool discarded_oblock,
+                  struct bio *bio, struct policy_result *result);
+
+       /*
+        * Sometimes we want to see if a block is in the cache, without
+        * triggering any update of stats.  (ie. it's not a real hit).
+        *
+        * Must not block.
+        *
+        * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
+        * would be typical).
+        */
+       int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
+
+       /*
+        * oblock must be a mapped block.  Must not block.
+        */
+       void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+       void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+
+       /*
+        * Called when a cache target is first created.  Used to load a
+        * mapping from the metadata device into the policy.
+        */
+       int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
+                           dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+
+       int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
+                            void *context);
+
+       /*
+        * Override functions used on the error paths of the core target.
+        * They must succeed.
+        */
+       void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
+       void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
+                             dm_oblock_t new_oblock);
+
+       int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+
+
+       /*
+        * How full is the cache?
+        */
+       dm_cblock_t (*residency)(struct dm_cache_policy *p);
+
+       /*
+        * Because of where we sit in the block layer, we can be asked to
+        * map a lot of little bios that are all in the same block (no
+        * queue merging has occurred).  To stop the policy being fooled by
+        * these the core target sends regular tick() calls to the policy.
+        * The policy should only count an entry as hit once per tick.
+        */
+       void (*tick)(struct dm_cache_policy *p);
+
+       /*
+        * Configuration.
+        */
+       int (*emit_config_values)(struct dm_cache_policy *p,
+                                 char *result, unsigned maxlen);
+       int (*set_config_value)(struct dm_cache_policy *p,
+                               const char *key, const char *value);
+
+       /*
+        * Book keeping ptr for the policy register, not for general use.
+        */
+       void *private;
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We maintain a little register of the different policy types.
+ */
+#define CACHE_POLICY_NAME_SIZE 16
+
+struct dm_cache_policy_type {
+       /* For use by the register code only. */
+       struct list_head list;
+
+       /*
+        * Policy writers should fill in these fields.  The name field is
+        * what gets passed on the target line to select your policy.
+        */
+       char name[CACHE_POLICY_NAME_SIZE];
+
+       /*
+        * Policies may store a hint for each each cache block.
+        * Currently the size of this hint must be 0 or 4 bytes but we
+        * expect to relax this in future.
+        */
+       size_t hint_size;
+
+       struct module *owner;
+       struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
+                                         sector_t origin_size,
+                                         sector_t block_size);
+};
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type);
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_H */
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
new file mode 100644 (file)
index 0000000..0f4e84b
--- /dev/null
@@ -0,0 +1,2584 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-bio-prison.h"
+#include "dm-cache-metadata.h"
+
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache"
+
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
+       "A percentage of time allocated for copying to and/or from cache");
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Glossary:
+ *
+ * oblock: index of an origin block
+ * cblock: index of a cache block
+ * promotion: movement of a block from origin to cache
+ * demotion: movement of a block from cache to origin
+ * migration: movement of a block between the origin and cache device,
+ *           either direction
+ */
+
+/*----------------------------------------------------------------*/
+
+static size_t bitset_size_in_bytes(unsigned nr_entries)
+{
+       return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+       size_t s = bitset_size_in_bytes(nr_entries);
+       return vzalloc(s);
+}
+
+static void clear_bitset(void *bitset, unsigned nr_entries)
+{
+       size_t s = bitset_size_in_bytes(nr_entries);
+       memset(bitset, 0, s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+       vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+#define PRISON_CELLS 1024
+#define MIGRATION_POOL_SIZE 128
+#define COMMIT_PERIOD HZ
+#define MIGRATION_COUNT_WINDOW 10
+
+/*
+ * The block size of the device holding cache data must be >= 32KB
+ */
+#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+
+/*
+ * FIXME: the cache is read/write for the time being.
+ */
+enum cache_mode {
+       CM_WRITE,               /* metadata may be changed */
+       CM_READ_ONLY,           /* metadata may not be changed */
+};
+
+struct cache_features {
+       enum cache_mode mode;
+       bool write_through:1;
+};
+
+struct cache_stats {
+       atomic_t read_hit;
+       atomic_t read_miss;
+       atomic_t write_hit;
+       atomic_t write_miss;
+       atomic_t demotion;
+       atomic_t promotion;
+       atomic_t copies_avoided;
+       atomic_t cache_cell_clash;
+       atomic_t commit_count;
+       atomic_t discard_count;
+};
+
+struct cache {
+       struct dm_target *ti;
+       struct dm_target_callbacks callbacks;
+
+       /*
+        * Metadata is written to this device.
+        */
+       struct dm_dev *metadata_dev;
+
+       /*
+        * The slower of the two data devices.  Typically a spindle.
+        */
+       struct dm_dev *origin_dev;
+
+       /*
+        * The faster of the two data devices.  Typically an SSD.
+        */
+       struct dm_dev *cache_dev;
+
+       /*
+        * Cache features such as write-through.
+        */
+       struct cache_features features;
+
+       /*
+        * Size of the origin device in _complete_ blocks and native sectors.
+        */
+       dm_oblock_t origin_blocks;
+       sector_t origin_sectors;
+
+       /*
+        * Size of the cache device in blocks.
+        */
+       dm_cblock_t cache_size;
+
+       /*
+        * Fields for converting from sectors to blocks.
+        */
+       uint32_t sectors_per_block;
+       int sectors_per_block_shift;
+
+       struct dm_cache_metadata *cmd;
+
+       spinlock_t lock;
+       struct bio_list deferred_bios;
+       struct bio_list deferred_flush_bios;
+       struct list_head quiesced_migrations;
+       struct list_head completed_migrations;
+       struct list_head need_commit_migrations;
+       sector_t migration_threshold;
+       atomic_t nr_migrations;
+       wait_queue_head_t migration_wait;
+
+       /*
+        * cache_size entries, dirty if set
+        */
+       dm_cblock_t nr_dirty;
+       unsigned long *dirty_bitset;
+
+       /*
+        * origin_blocks entries, discarded if set.
+        */
+       sector_t discard_block_size; /* a power of 2 times sectors per block */
+       dm_dblock_t discard_nr_blocks;
+       unsigned long *discard_bitset;
+
+       struct dm_kcopyd_client *copier;
+       struct workqueue_struct *wq;
+       struct work_struct worker;
+
+       struct delayed_work waker;
+       unsigned long last_commit_jiffies;
+
+       struct dm_bio_prison *prison;
+       struct dm_deferred_set *all_io_ds;
+
+       mempool_t *migration_pool;
+       struct dm_cache_migration *next_migration;
+
+       struct dm_cache_policy *policy;
+       unsigned policy_nr_args;
+
+       bool need_tick_bio:1;
+       bool sized:1;
+       bool quiescing:1;
+       bool commit_requested:1;
+       bool loaded_mappings:1;
+       bool loaded_discards:1;
+
+       struct cache_stats stats;
+
+       /*
+        * Rather than reconstructing the table line for the status we just
+        * save it and regurgitate.
+        */
+       unsigned nr_ctr_args;
+       const char **ctr_args;
+};
+
+struct per_bio_data {
+       bool tick:1;
+       unsigned req_nr:2;
+       struct dm_deferred_entry *all_io_entry;
+};
+
+struct dm_cache_migration {
+       struct list_head list;
+       struct cache *cache;
+
+       unsigned long start_jiffies;
+       dm_oblock_t old_oblock;
+       dm_oblock_t new_oblock;
+       dm_cblock_t cblock;
+
+       bool err:1;
+       bool writeback:1;
+       bool demote:1;
+       bool promote:1;
+
+       struct dm_bio_prison_cell *old_ocell;
+       struct dm_bio_prison_cell *new_ocell;
+};
+
+/*
+ * Processing a bio in the worker thread may require these memory
+ * allocations.  We prealloc to avoid deadlocks (the same worker thread
+ * frees them back to the mempool).
+ */
+struct prealloc {
+       struct dm_cache_migration *mg;
+       struct dm_bio_prison_cell *cell1;
+       struct dm_bio_prison_cell *cell2;
+};
+
+static void wake_worker(struct cache *cache)
+{
+       queue_work(cache->wq, &cache->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
+{
+       /* FIXME: change to use a local slab. */
+       return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
+}
+
+static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+       dm_bio_prison_free_cell(cache->prison, cell);
+}
+
+static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+{
+       if (!p->mg) {
+               p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+               if (!p->mg)
+                       return -ENOMEM;
+       }
+
+       if (!p->cell1) {
+               p->cell1 = alloc_prison_cell(cache);
+               if (!p->cell1)
+                       return -ENOMEM;
+       }
+
+       if (!p->cell2) {
+               p->cell2 = alloc_prison_cell(cache);
+               if (!p->cell2)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
+{
+       if (p->cell2)
+               free_prison_cell(cache, p->cell2);
+
+       if (p->cell1)
+               free_prison_cell(cache, p->cell1);
+
+       if (p->mg)
+               mempool_free(p->mg, cache->migration_pool);
+}
+
+static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+{
+       struct dm_cache_migration *mg = p->mg;
+
+       BUG_ON(!mg);
+       p->mg = NULL;
+
+       return mg;
+}
+
+/*
+ * You must have a cell within the prealloc struct to return.  If not this
+ * function will BUG() rather than returning NULL.
+ */
+static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
+{
+       struct dm_bio_prison_cell *r = NULL;
+
+       if (p->cell1) {
+               r = p->cell1;
+               p->cell1 = NULL;
+
+       } else if (p->cell2) {
+               r = p->cell2;
+               p->cell2 = NULL;
+       } else
+               BUG();
+
+       return r;
+}
+
+/*
+ * You can't have more than two cells in a prealloc struct.  BUG() will be
+ * called if you try and overfill.
+ */
+static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
+{
+       if (!p->cell2)
+               p->cell2 = cell;
+
+       else if (!p->cell1)
+               p->cell1 = cell;
+
+       else
+               BUG();
+}
+
+/*----------------------------------------------------------------*/
+
+static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
+{
+       key->virtual = 0;
+       key->dev = 0;
+       key->block = from_oblock(oblock);
+}
+
+/*
+ * The caller hands in a preallocated cell, and a free function for it.
+ * The cell will be freed if there's an error, or if it wasn't used because
+ * a cell with that key already exists.
+ */
+typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
+
+static int bio_detain(struct cache *cache, dm_oblock_t oblock,
+                     struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
+                     cell_free_fn free_fn, void *free_context,
+                     struct dm_bio_prison_cell **cell_result)
+{
+       int r;
+       struct dm_cell_key key;
+
+       build_key(oblock, &key);
+       r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
+       if (r)
+               free_fn(free_context, cell_prealloc);
+
+       return r;
+}
+
+static int get_cell(struct cache *cache,
+                   dm_oblock_t oblock,
+                   struct prealloc *structs,
+                   struct dm_bio_prison_cell **cell_result)
+{
+       int r;
+       struct dm_cell_key key;
+       struct dm_bio_prison_cell *cell_prealloc;
+
+       cell_prealloc = prealloc_get_cell(structs);
+
+       build_key(oblock, &key);
+       r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
+       if (r)
+               prealloc_put_cell(structs, cell_prealloc);
+
+       return r;
+}
+
+ /*----------------------------------------------------------------*/
+
+static bool is_dirty(struct cache *cache, dm_cblock_t b)
+{
+       return test_bit(from_cblock(b), cache->dirty_bitset);
+}
+
+static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+       if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+               policy_set_dirty(cache->policy, oblock);
+       }
+}
+
+static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+       if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+               policy_clear_dirty(cache->policy, oblock);
+               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+               if (!from_cblock(cache->nr_dirty))
+                       dm_table_event(cache->ti->table);
+       }
+}
+
+/*----------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct cache *cache)
+{
+       return cache->sectors_per_block_shift >= 0;
+}
+
+static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
+{
+       sector_t discard_blocks = cache->discard_block_size;
+       dm_block_t b = from_oblock(oblock);
+
+       if (!block_size_is_power_of_two(cache))
+               (void) sector_div(discard_blocks, cache->sectors_per_block);
+       else
+               discard_blocks >>= cache->sectors_per_block_shift;
+
+       (void) sector_div(b, discard_blocks);
+
+       return to_dblock(b);
+}
+
+static void set_discard(struct cache *cache, dm_dblock_t b)
+{
+       unsigned long flags;
+
+       atomic_inc(&cache->stats.discard_count);
+
+       spin_lock_irqsave(&cache->lock, flags);
+       set_bit(from_dblock(b), cache->discard_bitset);
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void clear_discard(struct cache *cache, dm_dblock_t b)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       clear_bit(from_dblock(b), cache->discard_bitset);
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_discarded(struct cache *cache, dm_dblock_t b)
+{
+       int r;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       r = test_bit(from_dblock(b), cache->discard_bitset);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       return r;
+}
+
+static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
+{
+       int r;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
+                    cache->discard_bitset);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static void load_stats(struct cache *cache)
+{
+       struct dm_cache_statistics stats;
+
+       dm_cache_metadata_get_stats(cache->cmd, &stats);
+       atomic_set(&cache->stats.read_hit, stats.read_hits);
+       atomic_set(&cache->stats.read_miss, stats.read_misses);
+       atomic_set(&cache->stats.write_hit, stats.write_hits);
+       atomic_set(&cache->stats.write_miss, stats.write_misses);
+}
+
+static void save_stats(struct cache *cache)
+{
+       struct dm_cache_statistics stats;
+
+       stats.read_hits = atomic_read(&cache->stats.read_hit);
+       stats.read_misses = atomic_read(&cache->stats.read_miss);
+       stats.write_hits = atomic_read(&cache->stats.write_hit);
+       stats.write_misses = atomic_read(&cache->stats.write_miss);
+
+       dm_cache_metadata_set_stats(cache->cmd, &stats);
+}
+
+/*----------------------------------------------------------------
+ * Per bio data
+ *--------------------------------------------------------------*/
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
+{
+       struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+       BUG_ON(!pb);
+       return pb;
+}
+
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
+{
+       struct per_bio_data *pb = get_per_bio_data(bio);
+
+       pb->tick = false;
+       pb->req_nr = dm_bio_get_target_bio_nr(bio);
+       pb->all_io_entry = NULL;
+
+       return pb;
+}
+
+/*----------------------------------------------------------------
+ * Remapping
+ *--------------------------------------------------------------*/
+static void remap_to_origin(struct cache *cache, struct bio *bio)
+{
+       bio->bi_bdev = cache->origin_dev->bdev;
+}
+
+static void remap_to_cache(struct cache *cache, struct bio *bio,
+                          dm_cblock_t cblock)
+{
+       sector_t bi_sector = bio->bi_sector;
+
+       bio->bi_bdev = cache->cache_dev->bdev;
+       if (!block_size_is_power_of_two(cache))
+               bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
+                               sector_div(bi_sector, cache->sectors_per_block);
+       else
+               bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
+                               (bi_sector & (cache->sectors_per_block - 1));
+}
+
+static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
+{
+       unsigned long flags;
+       struct per_bio_data *pb = get_per_bio_data(bio);
+
+       spin_lock_irqsave(&cache->lock, flags);
+       if (cache->need_tick_bio &&
+           !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+               pb->tick = true;
+               cache->need_tick_bio = false;
+       }
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+                                 dm_oblock_t oblock)
+{
+       check_if_tick_bio_needed(cache, bio);
+       remap_to_origin(cache, bio);
+       if (bio_data_dir(bio) == WRITE)
+               clear_discard(cache, oblock_to_dblock(cache, oblock));
+}
+
+static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
+                                dm_oblock_t oblock, dm_cblock_t cblock)
+{
+       remap_to_cache(cache, bio, cblock);
+       if (bio_data_dir(bio) == WRITE) {
+               set_dirty(cache, oblock, cblock);
+               clear_discard(cache, oblock_to_dblock(cache, oblock));
+       }
+}
+
+static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
+{
+       sector_t block_nr = bio->bi_sector;
+
+       if (!block_size_is_power_of_two(cache))
+               (void) sector_div(block_nr, cache->sectors_per_block);
+       else
+               block_nr >>= cache->sectors_per_block_shift;
+
+       return to_oblock(block_nr);
+}
+
+static int bio_triggers_commit(struct cache *cache, struct bio *bio)
+{
+       return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+}
+
+static void issue(struct cache *cache, struct bio *bio)
+{
+       unsigned long flags;
+
+       if (!bio_triggers_commit(cache, bio)) {
+               generic_make_request(bio);
+               return;
+       }
+
+       /*
+        * Batch together any bios that trigger commits and then issue a
+        * single commit for them in do_worker().
+        */
+       spin_lock_irqsave(&cache->lock, flags);
+       cache->commit_requested = true;
+       bio_list_add(&cache->deferred_flush_bios, bio);
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+/*----------------------------------------------------------------
+ * Migration processing
+ *
+ * Migration covers moving data from the origin device to the cache, or
+ * vice versa.
+ *--------------------------------------------------------------*/
+static void free_migration(struct dm_cache_migration *mg)
+{
+       mempool_free(mg, mg->cache->migration_pool);
+}
+
+static void inc_nr_migrations(struct cache *cache)
+{
+       atomic_inc(&cache->nr_migrations);
+}
+
+static void dec_nr_migrations(struct cache *cache)
+{
+       atomic_dec(&cache->nr_migrations);
+
+       /*
+        * Wake the worker in case we're suspending the target.
+        */
+       wake_up(&cache->migration_wait);
+}
+
+static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+                        bool holder)
+{
+       (holder ? dm_cell_release : dm_cell_release_no_holder)
+               (cache->prison, cell, &cache->deferred_bios);
+       free_prison_cell(cache, cell);
+}
+
+static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+                      bool holder)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       __cell_defer(cache, cell, holder);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void cleanup_migration(struct dm_cache_migration *mg)
+{
+       dec_nr_migrations(mg->cache);
+       free_migration(mg);
+}
+
+static void migration_failure(struct dm_cache_migration *mg)
+{
+       struct cache *cache = mg->cache;
+
+       if (mg->writeback) {
+               DMWARN_LIMIT("writeback failed; couldn't copy block");
+               set_dirty(cache, mg->old_oblock, mg->cblock);
+               cell_defer(cache, mg->old_ocell, false);
+
+       } else if (mg->demote) {
+               DMWARN_LIMIT("demotion failed; couldn't copy block");
+               policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
+
+               cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+               if (mg->promote)
+                       cell_defer(cache, mg->new_ocell, 1);
+       } else {
+               DMWARN_LIMIT("promotion failed; couldn't copy block");
+               policy_remove_mapping(cache->policy, mg->new_oblock);
+               cell_defer(cache, mg->new_ocell, 1);
+       }
+
+       cleanup_migration(mg);
+}
+
+static void migration_success_pre_commit(struct dm_cache_migration *mg)
+{
+       unsigned long flags;
+       struct cache *cache = mg->cache;
+
+       if (mg->writeback) {
+               cell_defer(cache, mg->old_ocell, false);
+               clear_dirty(cache, mg->old_oblock, mg->cblock);
+               cleanup_migration(mg);
+               return;
+
+       } else if (mg->demote) {
+               if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
+                       DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
+                       policy_force_mapping(cache->policy, mg->new_oblock,
+                                            mg->old_oblock);
+                       if (mg->promote)
+                               cell_defer(cache, mg->new_ocell, true);
+                       cleanup_migration(mg);
+                       return;
+               }
+       } else {
+               if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+                       DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+                       policy_remove_mapping(cache->policy, mg->new_oblock);
+                       cleanup_migration(mg);
+                       return;
+               }
+       }
+
+       spin_lock_irqsave(&cache->lock, flags);
+       list_add_tail(&mg->list, &cache->need_commit_migrations);
+       cache->commit_requested = true;
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void migration_success_post_commit(struct dm_cache_migration *mg)
+{
+       unsigned long flags;
+       struct cache *cache = mg->cache;
+
+       if (mg->writeback) {
+               DMWARN("writeback unexpectedly triggered commit");
+               return;
+
+       } else if (mg->demote) {
+               cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+
+               if (mg->promote) {
+                       mg->demote = false;
+
+                       spin_lock_irqsave(&cache->lock, flags);
+                       list_add_tail(&mg->list, &cache->quiesced_migrations);
+                       spin_unlock_irqrestore(&cache->lock, flags);
+
+               } else
+                       cleanup_migration(mg);
+
+       } else {
+               cell_defer(cache, mg->new_ocell, true);
+               clear_dirty(cache, mg->new_oblock, mg->cblock);
+               cleanup_migration(mg);
+       }
+}
+
+static void copy_complete(int read_err, unsigned long write_err, void *context)
+{
+       unsigned long flags;
+       struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
+       struct cache *cache = mg->cache;
+
+       if (read_err || write_err)
+               mg->err = true;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       list_add_tail(&mg->list, &cache->completed_migrations);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void issue_copy_real(struct dm_cache_migration *mg)
+{
+       int r;
+       struct dm_io_region o_region, c_region;
+       struct cache *cache = mg->cache;
+
+       o_region.bdev = cache->origin_dev->bdev;
+       o_region.count = cache->sectors_per_block;
+
+       c_region.bdev = cache->cache_dev->bdev;
+       c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+       c_region.count = cache->sectors_per_block;
+
+       if (mg->writeback || mg->demote) {
+               /* demote */
+               o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
+               r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
+       } else {
+               /* promote */
+               o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
+               r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
+       }
+
+       if (r < 0)
+               migration_failure(mg);
+}
+
+static void avoid_copy(struct dm_cache_migration *mg)
+{
+       atomic_inc(&mg->cache->stats.copies_avoided);
+       migration_success_pre_commit(mg);
+}
+
+static void issue_copy(struct dm_cache_migration *mg)
+{
+       bool avoid;
+       struct cache *cache = mg->cache;
+
+       if (mg->writeback || mg->demote)
+               avoid = !is_dirty(cache, mg->cblock) ||
+                       is_discarded_oblock(cache, mg->old_oblock);
+       else
+               avoid = is_discarded_oblock(cache, mg->new_oblock);
+
+       avoid ? avoid_copy(mg) : issue_copy_real(mg);
+}
+
+static void complete_migration(struct dm_cache_migration *mg)
+{
+       if (mg->err)
+               migration_failure(mg);
+       else
+               migration_success_pre_commit(mg);
+}
+
+static void process_migrations(struct cache *cache, struct list_head *head,
+                              void (*fn)(struct dm_cache_migration *))
+{
+       unsigned long flags;
+       struct list_head list;
+       struct dm_cache_migration *mg, *tmp;
+
+       INIT_LIST_HEAD(&list);
+       spin_lock_irqsave(&cache->lock, flags);
+       list_splice_init(head, &list);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       list_for_each_entry_safe(mg, tmp, &list, list)
+               fn(mg);
+}
+
+static void __queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+       list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
+}
+
+static void queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+       unsigned long flags;
+       struct cache *cache = mg->cache;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       __queue_quiesced_migration(mg);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
+{
+       unsigned long flags;
+       struct dm_cache_migration *mg, *tmp;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       list_for_each_entry_safe(mg, tmp, work, list)
+               __queue_quiesced_migration(mg);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void check_for_quiesced_migrations(struct cache *cache,
+                                         struct per_bio_data *pb)
+{
+       struct list_head work;
+
+       if (!pb->all_io_entry)
+               return;
+
+       INIT_LIST_HEAD(&work);
+       if (pb->all_io_entry)
+               dm_deferred_entry_dec(pb->all_io_entry, &work);
+
+       if (!list_empty(&work))
+               queue_quiesced_migrations(cache, &work);
+}
+
+static void quiesce_migration(struct dm_cache_migration *mg)
+{
+       if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
+               queue_quiesced_migration(mg);
+}
+
+static void promote(struct cache *cache, struct prealloc *structs,
+                   dm_oblock_t oblock, dm_cblock_t cblock,
+                   struct dm_bio_prison_cell *cell)
+{
+       struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+       mg->err = false;
+       mg->writeback = false;
+       mg->demote = false;
+       mg->promote = true;
+       mg->cache = cache;
+       mg->new_oblock = oblock;
+       mg->cblock = cblock;
+       mg->old_ocell = NULL;
+       mg->new_ocell = cell;
+       mg->start_jiffies = jiffies;
+
+       inc_nr_migrations(cache);
+       quiesce_migration(mg);
+}
+
+static void writeback(struct cache *cache, struct prealloc *structs,
+                     dm_oblock_t oblock, dm_cblock_t cblock,
+                     struct dm_bio_prison_cell *cell)
+{
+       struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+       mg->err = false;
+       mg->writeback = true;
+       mg->demote = false;
+       mg->promote = false;
+       mg->cache = cache;
+       mg->old_oblock = oblock;
+       mg->cblock = cblock;
+       mg->old_ocell = cell;
+       mg->new_ocell = NULL;
+       mg->start_jiffies = jiffies;
+
+       inc_nr_migrations(cache);
+       quiesce_migration(mg);
+}
+
+static void demote_then_promote(struct cache *cache, struct prealloc *structs,
+                               dm_oblock_t old_oblock, dm_oblock_t new_oblock,
+                               dm_cblock_t cblock,
+                               struct dm_bio_prison_cell *old_ocell,
+                               struct dm_bio_prison_cell *new_ocell)
+{
+       struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+       mg->err = false;
+       mg->writeback = false;
+       mg->demote = true;
+       mg->promote = true;
+       mg->cache = cache;
+       mg->old_oblock = old_oblock;
+       mg->new_oblock = new_oblock;
+       mg->cblock = cblock;
+       mg->old_ocell = old_ocell;
+       mg->new_ocell = new_ocell;
+       mg->start_jiffies = jiffies;
+
+       inc_nr_migrations(cache);
+       quiesce_migration(mg);
+}
+
+/*----------------------------------------------------------------
+ * bio processing
+ *--------------------------------------------------------------*/
+static void defer_bio(struct cache *cache, struct bio *bio)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       bio_list_add(&cache->deferred_bios, bio);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void process_flush_bio(struct cache *cache, struct bio *bio)
+{
+       struct per_bio_data *pb = get_per_bio_data(bio);
+
+       BUG_ON(bio->bi_size);
+       if (!pb->req_nr)
+               remap_to_origin(cache, bio);
+       else
+               remap_to_cache(cache, bio, 0);
+
+       issue(cache, bio);
+}
+
+/*
+ * People generally discard large parts of a device, eg, the whole device
+ * when formatting.  Splitting these large discards up into cache block
+ * sized ios and then quiescing (always neccessary for discard) takes too
+ * long.
+ *
+ * We keep it simple, and allow any size of discard to come in, and just
+ * mark off blocks on the discard bitset.  No passdown occurs!
+ *
+ * To implement passdown we need to change the bio_prison such that a cell
+ * can have a key that spans many blocks.
+ */
+static void process_discard_bio(struct cache *cache, struct bio *bio)
+{
+       dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+                                                 cache->discard_block_size);
+       dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+       dm_block_t b;
+
+       (void) sector_div(end_block, cache->discard_block_size);
+
+       for (b = start_block; b < end_block; b++)
+               set_discard(cache, to_dblock(b));
+
+       bio_endio(bio, 0);
+}
+
+static bool spare_migration_bandwidth(struct cache *cache)
+{
+       sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+               cache->sectors_per_block;
+       return current_volume < cache->migration_threshold;
+}
+
+static bool is_writethrough_io(struct cache *cache, struct bio *bio,
+                              dm_cblock_t cblock)
+{
+       return bio_data_dir(bio) == WRITE &&
+               cache->features.write_through && !is_dirty(cache, cblock);
+}
+
+static void inc_hit_counter(struct cache *cache, struct bio *bio)
+{
+       atomic_inc(bio_data_dir(bio) == READ ?
+                  &cache->stats.read_hit : &cache->stats.write_hit);
+}
+
+static void inc_miss_counter(struct cache *cache, struct bio *bio)
+{
+       atomic_inc(bio_data_dir(bio) == READ ?
+                  &cache->stats.read_miss : &cache->stats.write_miss);
+}
+
+static void process_bio(struct cache *cache, struct prealloc *structs,
+                       struct bio *bio)
+{
+       int r;
+       bool release_cell = true;
+       dm_oblock_t block = get_bio_block(cache, bio);
+       struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+       struct policy_result lookup_result;
+       struct per_bio_data *pb = get_per_bio_data(bio);
+       bool discarded_block = is_discarded_oblock(cache, block);
+       bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+
+       /*
+        * Check to see if that block is currently migrating.
+        */
+       cell_prealloc = prealloc_get_cell(structs);
+       r = bio_detain(cache, block, bio, cell_prealloc,
+                      (cell_free_fn) prealloc_put_cell,
+                      structs, &new_ocell);
+       if (r > 0)
+               return;
+
+       r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+                      bio, &lookup_result);
+
+       if (r == -EWOULDBLOCK)
+               /* migration has been denied */
+               lookup_result.op = POLICY_MISS;
+
+       switch (lookup_result.op) {
+       case POLICY_HIT:
+               inc_hit_counter(cache, bio);
+               pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+               if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+                       /*
+                        * No need to mark anything dirty in write through mode.
+                        */
+                       pb->req_nr == 0 ?
+                               remap_to_cache(cache, bio, lookup_result.cblock) :
+                               remap_to_origin_clear_discard(cache, bio, block);
+               } else
+                       remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+               issue(cache, bio);
+               break;
+
+       case POLICY_MISS:
+               inc_miss_counter(cache, bio);
+               pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+               if (pb->req_nr != 0) {
+                       /*
+                        * This is a duplicate writethrough io that is no
+                        * longer needed because the block has been demoted.
+                        */
+                       bio_endio(bio, 0);
+               } else {
+                       remap_to_origin_clear_discard(cache, bio, block);
+                       issue(cache, bio);
+               }
+               break;
+
+       case POLICY_NEW:
+               atomic_inc(&cache->stats.promotion);
+               promote(cache, structs, block, lookup_result.cblock, new_ocell);
+               release_cell = false;
+               break;
+
+       case POLICY_REPLACE:
+               cell_prealloc = prealloc_get_cell(structs);
+               r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+                              (cell_free_fn) prealloc_put_cell,
+                              structs, &old_ocell);
+               if (r > 0) {
+                       /*
+                        * We have to be careful to avoid lock inversion of
+                        * the cells.  So we back off, and wait for the
+                        * old_ocell to become free.
+                        */
+                       policy_force_mapping(cache->policy, block,
+                                            lookup_result.old_oblock);
+                       atomic_inc(&cache->stats.cache_cell_clash);
+                       break;
+               }
+               atomic_inc(&cache->stats.demotion);
+               atomic_inc(&cache->stats.promotion);
+
+               demote_then_promote(cache, structs, lookup_result.old_oblock,
+                                   block, lookup_result.cblock,
+                                   old_ocell, new_ocell);
+               release_cell = false;
+               break;
+
+       default:
+               DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
+                           (unsigned) lookup_result.op);
+               bio_io_error(bio);
+       }
+
+       if (release_cell)
+               cell_defer(cache, new_ocell, false);
+}
+
+static int need_commit_due_to_time(struct cache *cache)
+{
+       return jiffies < cache->last_commit_jiffies ||
+              jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+}
+
+static int commit_if_needed(struct cache *cache)
+{
+       if (dm_cache_changed_this_transaction(cache->cmd) &&
+           (cache->commit_requested || need_commit_due_to_time(cache))) {
+               atomic_inc(&cache->stats.commit_count);
+               cache->last_commit_jiffies = jiffies;
+               cache->commit_requested = false;
+               return dm_cache_commit(cache->cmd, false);
+       }
+
+       return 0;
+}
+
+static void process_deferred_bios(struct cache *cache)
+{
+       unsigned long flags;
+       struct bio_list bios;
+       struct bio *bio;
+       struct prealloc structs;
+
+       memset(&structs, 0, sizeof(structs));
+       bio_list_init(&bios);
+
+       spin_lock_irqsave(&cache->lock, flags);
+       bio_list_merge(&bios, &cache->deferred_bios);
+       bio_list_init(&cache->deferred_bios);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       while (!bio_list_empty(&bios)) {
+               /*
+                * If we've got no free migration structs, and processing
+                * this bio might require one, we pause until there are some
+                * prepared mappings to process.
+                */
+               if (prealloc_data_structs(cache, &structs)) {
+                       spin_lock_irqsave(&cache->lock, flags);
+                       bio_list_merge(&cache->deferred_bios, &bios);
+                       spin_unlock_irqrestore(&cache->lock, flags);
+                       break;
+               }
+
+               bio = bio_list_pop(&bios);
+
+               if (bio->bi_rw & REQ_FLUSH)
+                       process_flush_bio(cache, bio);
+               else if (bio->bi_rw & REQ_DISCARD)
+                       process_discard_bio(cache, bio);
+               else
+                       process_bio(cache, &structs, bio);
+       }
+
+       prealloc_free_structs(cache, &structs);
+}
+
+static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
+{
+       unsigned long flags;
+       struct bio_list bios;
+       struct bio *bio;
+
+       bio_list_init(&bios);
+
+       spin_lock_irqsave(&cache->lock, flags);
+       bio_list_merge(&bios, &cache->deferred_flush_bios);
+       bio_list_init(&cache->deferred_flush_bios);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       while ((bio = bio_list_pop(&bios)))
+               submit_bios ? generic_make_request(bio) : bio_io_error(bio);
+}
+
+static void writeback_some_dirty_blocks(struct cache *cache)
+{
+       int r = 0;
+       dm_oblock_t oblock;
+       dm_cblock_t cblock;
+       struct prealloc structs;
+       struct dm_bio_prison_cell *old_ocell;
+
+       memset(&structs, 0, sizeof(structs));
+
+       while (spare_migration_bandwidth(cache)) {
+               if (prealloc_data_structs(cache, &structs))
+                       break;
+
+               r = policy_writeback_work(cache->policy, &oblock, &cblock);
+               if (r)
+                       break;
+
+               r = get_cell(cache, oblock, &structs, &old_ocell);
+               if (r) {
+                       policy_set_dirty(cache->policy, oblock);
+                       break;
+               }
+
+               writeback(cache, &structs, oblock, cblock, old_ocell);
+       }
+
+       prealloc_free_structs(cache, &structs);
+}
+
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+static void start_quiescing(struct cache *cache)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       cache->quiescing = 1;
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       cache->quiescing = 0;
+       spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_quiescing(struct cache *cache)
+{
+       int r;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       r = cache->quiescing;
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       return r;
+}
+
+static void wait_for_migrations(struct cache *cache)
+{
+       wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+}
+
+static void stop_worker(struct cache *cache)
+{
+       cancel_delayed_work(&cache->waker);
+       flush_workqueue(cache->wq);
+}
+
+static void requeue_deferred_io(struct cache *cache)
+{
+       struct bio *bio;
+       struct bio_list bios;
+
+       bio_list_init(&bios);
+       bio_list_merge(&bios, &cache->deferred_bios);
+       bio_list_init(&cache->deferred_bios);
+
+       while ((bio = bio_list_pop(&bios)))
+               bio_endio(bio, DM_ENDIO_REQUEUE);
+}
+
+static int more_work(struct cache *cache)
+{
+       if (is_quiescing(cache))
+               return !list_empty(&cache->quiesced_migrations) ||
+                       !list_empty(&cache->completed_migrations) ||
+                       !list_empty(&cache->need_commit_migrations);
+       else
+               return !bio_list_empty(&cache->deferred_bios) ||
+                       !bio_list_empty(&cache->deferred_flush_bios) ||
+                       !list_empty(&cache->quiesced_migrations) ||
+                       !list_empty(&cache->completed_migrations) ||
+                       !list_empty(&cache->need_commit_migrations);
+}
+
+static void do_worker(struct work_struct *ws)
+{
+       struct cache *cache = container_of(ws, struct cache, worker);
+
+       do {
+               if (!is_quiescing(cache))
+                       process_deferred_bios(cache);
+
+               process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+               process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+               writeback_some_dirty_blocks(cache);
+
+               if (commit_if_needed(cache)) {
+                       process_deferred_flush_bios(cache, false);
+
+                       /*
+                        * FIXME: rollback metadata or just go into a
+                        * failure mode and error everything
+                        */
+               } else {
+                       process_deferred_flush_bios(cache, true);
+                       process_migrations(cache, &cache->need_commit_migrations,
+                                          migration_success_post_commit);
+               }
+       } while (more_work(cache));
+}
+
+/*
+ * We want to commit periodically so that not too much
+ * unwritten metadata builds up.
+ */
+static void do_waker(struct work_struct *ws)
+{
+       struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+       wake_worker(cache);
+       queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
+}
+
+/*----------------------------------------------------------------*/
+
+static int is_congested(struct dm_dev *dev, int bdi_bits)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+       return bdi_congested(&q->backing_dev_info, bdi_bits);
+}
+
+static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+       struct cache *cache = container_of(cb, struct cache, callbacks);
+
+       return is_congested(cache->origin_dev, bdi_bits) ||
+               is_congested(cache->cache_dev, bdi_bits);
+}
+
+/*----------------------------------------------------------------
+ * Target methods
+ *--------------------------------------------------------------*/
+
+/*
+ * This function gets called on the error paths of the constructor, so we
+ * have to cope with a partially initialised struct.
+ */
+static void destroy(struct cache *cache)
+{
+       unsigned i;
+
+       if (cache->next_migration)
+               mempool_free(cache->next_migration, cache->migration_pool);
+
+       if (cache->migration_pool)
+               mempool_destroy(cache->migration_pool);
+
+       if (cache->all_io_ds)
+               dm_deferred_set_destroy(cache->all_io_ds);
+
+       if (cache->prison)
+               dm_bio_prison_destroy(cache->prison);
+
+       if (cache->wq)
+               destroy_workqueue(cache->wq);
+
+       if (cache->dirty_bitset)
+               free_bitset(cache->dirty_bitset);
+
+       if (cache->discard_bitset)
+               free_bitset(cache->discard_bitset);
+
+       if (cache->copier)
+               dm_kcopyd_client_destroy(cache->copier);
+
+       if (cache->cmd)
+               dm_cache_metadata_close(cache->cmd);
+
+       if (cache->metadata_dev)
+               dm_put_device(cache->ti, cache->metadata_dev);
+
+       if (cache->origin_dev)
+               dm_put_device(cache->ti, cache->origin_dev);
+
+       if (cache->cache_dev)
+               dm_put_device(cache->ti, cache->cache_dev);
+
+       if (cache->policy)
+               dm_cache_policy_destroy(cache->policy);
+
+       for (i = 0; i < cache->nr_ctr_args ; i++)
+               kfree(cache->ctr_args[i]);
+       kfree(cache->ctr_args);
+
+       kfree(cache);
+}
+
+static void cache_dtr(struct dm_target *ti)
+{
+       struct cache *cache = ti->private;
+
+       destroy(cache);
+}
+
+static sector_t get_dev_size(struct dm_dev *dev)
+{
+       return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Construct a cache device mapping.
+ *
+ * cache <metadata dev> <cache dev> <origin dev> <block size>
+ *       <#feature args> [<feature arg>]*
+ *       <policy> <#policy args> [<policy arg>]*
+ *
+ * metadata dev    : fast device holding the persistent metadata
+ * cache dev      : fast device holding cached data blocks
+ * origin dev     : slow device holding original data blocks
+ * block size     : cache unit size in sectors
+ *
+ * #feature args   : number of feature arguments passed
+ * feature args    : writethrough.  (The default is writeback.)
+ *
+ * policy         : the replacement policy to use
+ * #policy args    : an even number of policy arguments corresponding
+ *                  to key/value pairs passed to the policy
+ * policy args    : key/value pairs passed to the policy
+ *                  E.g. 'sequential_threshold 1024'
+ *                  See cache-policies.txt for details.
+ *
+ * Optional feature arguments are:
+ *   writethrough  : write through caching that prohibits cache block
+ *                  content from being different from origin block content.
+ *                  Without this argument, the default behaviour is to write
+ *                  back cache block contents later for performance reasons,
+ *                  so they may differ from the corresponding origin blocks.
+ */
+struct cache_args {
+       struct dm_target *ti;
+
+       struct dm_dev *metadata_dev;
+
+       struct dm_dev *cache_dev;
+       sector_t cache_sectors;
+
+       struct dm_dev *origin_dev;
+       sector_t origin_sectors;
+
+       uint32_t block_size;
+
+       const char *policy_name;
+       int policy_argc;
+       const char **policy_argv;
+
+       struct cache_features features;
+};
+
+static void destroy_cache_args(struct cache_args *ca)
+{
+       if (ca->metadata_dev)
+               dm_put_device(ca->ti, ca->metadata_dev);
+
+       if (ca->cache_dev)
+               dm_put_device(ca->ti, ca->cache_dev);
+
+       if (ca->origin_dev)
+               dm_put_device(ca->ti, ca->origin_dev);
+
+       kfree(ca);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+       if (!as->argc) {
+               *error = "Insufficient args";
+               return false;
+       }
+
+       return true;
+}
+
+static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
+                             char **error)
+{
+       int r;
+       sector_t metadata_dev_size;
+       char b[BDEVNAME_SIZE];
+
+       if (!at_least_one_arg(as, error))
+               return -EINVAL;
+
+       r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+                         &ca->metadata_dev);
+       if (r) {
+               *error = "Error opening metadata device";
+               return r;
+       }
+
+       metadata_dev_size = get_dev_size(ca->metadata_dev);
+       if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
+               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+                      bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+
+       return 0;
+}
+
+static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
+                          char **error)
+{
+       int r;
+
+       if (!at_least_one_arg(as, error))
+               return -EINVAL;
+
+       r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+                         &ca->cache_dev);
+       if (r) {
+               *error = "Error opening cache device";
+               return r;
+       }
+       ca->cache_sectors = get_dev_size(ca->cache_dev);
+
+       return 0;
+}
+
+static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
+                           char **error)
+{
+       int r;
+
+       if (!at_least_one_arg(as, error))
+               return -EINVAL;
+
+       r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+                         &ca->origin_dev);
+       if (r) {
+               *error = "Error opening origin device";
+               return r;
+       }
+
+       ca->origin_sectors = get_dev_size(ca->origin_dev);
+       if (ca->ti->len > ca->origin_sectors) {
+               *error = "Device size larger than cached device";
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
+                           char **error)
+{
+       unsigned long tmp;
+
+       if (!at_least_one_arg(as, error))
+               return -EINVAL;
+
+       if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
+           tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+           tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+               *error = "Invalid data block size";
+               return -EINVAL;
+       }
+
+       if (tmp > ca->cache_sectors) {
+               *error = "Data block size is larger than the cache device";
+               return -EINVAL;
+       }
+
+       ca->block_size = tmp;
+
+       return 0;
+}
+
+static void init_features(struct cache_features *cf)
+{
+       cf->mode = CM_WRITE;
+       cf->write_through = false;
+}
+
+static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+                         char **error)
+{
+       static struct dm_arg _args[] = {
+               {0, 1, "Invalid number of cache feature arguments"},
+       };
+
+       int r;
+       unsigned argc;
+       const char *arg;
+       struct cache_features *cf = &ca->features;
+
+       init_features(cf);
+
+       r = dm_read_arg_group(_args, as, &argc, error);
+       if (r)
+               return -EINVAL;
+
+       while (argc--) {
+               arg = dm_shift_arg(as);
+
+               if (!strcasecmp(arg, "writeback"))
+                       cf->write_through = false;
+
+               else if (!strcasecmp(arg, "writethrough"))
+                       cf->write_through = true;
+
+               else {
+                       *error = "Unrecognised cache feature requested";
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
+                       char **error)
+{
+       static struct dm_arg _args[] = {
+               {0, 1024, "Invalid number of policy arguments"},
+       };
+
+       int r;
+
+       if (!at_least_one_arg(as, error))
+               return -EINVAL;
+
+       ca->policy_name = dm_shift_arg(as);
+
+       r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
+       if (r)
+               return -EINVAL;
+
+       ca->policy_argv = (const char **)as->argv;
+       dm_consume_args(as, ca->policy_argc);
+
+       return 0;
+}
+
+static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
+                           char **error)
+{
+       int r;
+       struct dm_arg_set as;
+
+       as.argc = argc;
+       as.argv = argv;
+
+       r = parse_metadata_dev(ca, &as, error);
+       if (r)
+               return r;
+
+       r = parse_cache_dev(ca, &as, error);
+       if (r)
+               return r;
+
+       r = parse_origin_dev(ca, &as, error);
+       if (r)
+               return r;
+
+       r = parse_block_size(ca, &as, error);
+       if (r)
+               return r;
+
+       r = parse_features(ca, &as, error);
+       if (r)
+               return r;
+
+       r = parse_policy(ca, &as, error);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *migration_cache;
+
+static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+{
+       int r = 0;
+
+       if (argc & 1) {
+               DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
+               return -EINVAL;
+       }
+
+       while (argc) {
+               r = policy_set_config_value(p, argv[0], argv[1]);
+               if (r) {
+                       DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
+                              argv[0], argv[1]);
+                       return r;
+               }
+
+               argc -= 2;
+               argv += 2;
+       }
+
+       return r;
+}
+
+static int create_cache_policy(struct cache *cache, struct cache_args *ca,
+                              char **error)
+{
+       int r;
+
+       cache->policy = dm_cache_policy_create(ca->policy_name,
+                                              cache->cache_size,
+                                              cache->origin_sectors,
+                                              cache->sectors_per_block);
+       if (!cache->policy) {
+               *error = "Error creating cache's policy";
+               return -ENOMEM;
+       }
+
+       r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
+       if (r)
+               dm_cache_policy_destroy(cache->policy);
+
+       return r;
+}
+
+/*
+ * We want the discard block size to be a power of two, at least the size
+ * of the cache block size, and have no more than 2^14 discard blocks
+ * across the origin.
+ */
+#define MAX_DISCARD_BLOCKS (1 << 14)
+
+static bool too_many_discard_blocks(sector_t discard_block_size,
+                                   sector_t origin_size)
+{
+       (void) sector_div(origin_size, discard_block_size);
+
+       return origin_size > MAX_DISCARD_BLOCKS;
+}
+
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
+                                            sector_t origin_size)
+{
+       sector_t discard_block_size;
+
+       discard_block_size = roundup_pow_of_two(cache_block_size);
+
+       if (origin_size)
+               while (too_many_discard_blocks(discard_block_size, origin_size))
+                       discard_block_size *= 2;
+
+       return discard_block_size;
+}
+
+#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
+
+static int cache_create(struct cache_args *ca, struct cache **result)
+{
+       int r = 0;
+       char **error = &ca->ti->error;
+       struct cache *cache;
+       struct dm_target *ti = ca->ti;
+       dm_block_t origin_blocks;
+       struct dm_cache_metadata *cmd;
+       bool may_format = ca->features.mode == CM_WRITE;
+
+       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+       if (!cache)
+               return -ENOMEM;
+
+       cache->ti = ca->ti;
+       ti->private = cache;
+       ti->per_bio_data_size = sizeof(struct per_bio_data);
+       ti->num_flush_bios = 2;
+       ti->flush_supported = true;
+
+       ti->num_discard_bios = 1;
+       ti->discards_supported = true;
+       ti->discard_zeroes_data_unsupported = true;
+
+       memcpy(&cache->features, &ca->features, sizeof(cache->features));
+
+       if (cache->features.write_through)
+               ti->num_write_bios = cache_num_write_bios;
+
+       cache->callbacks.congested_fn = cache_is_congested;
+       dm_table_add_target_callbacks(ti->table, &cache->callbacks);
+
+       cache->metadata_dev = ca->metadata_dev;
+       cache->origin_dev = ca->origin_dev;
+       cache->cache_dev = ca->cache_dev;
+
+       ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
+
+       /* FIXME: factor out this whole section */
+       origin_blocks = cache->origin_sectors = ca->origin_sectors;
+       (void) sector_div(origin_blocks, ca->block_size);
+       cache->origin_blocks = to_oblock(origin_blocks);
+
+       cache->sectors_per_block = ca->block_size;
+       if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
+               r = -EINVAL;
+               goto bad;
+       }
+
+       if (ca->block_size & (ca->block_size - 1)) {
+               dm_block_t cache_size = ca->cache_sectors;
+
+               cache->sectors_per_block_shift = -1;
+               (void) sector_div(cache_size, ca->block_size);
+               cache->cache_size = to_cblock(cache_size);
+       } else {
+               cache->sectors_per_block_shift = __ffs(ca->block_size);
+               cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
+       }
+
+       r = create_cache_policy(cache, ca, error);
+       if (r)
+               goto bad;
+       cache->policy_nr_args = ca->policy_argc;
+
+       cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
+                                    ca->block_size, may_format,
+                                    dm_cache_policy_get_hint_size(cache->policy));
+       if (IS_ERR(cmd)) {
+               *error = "Error creating metadata object";
+               r = PTR_ERR(cmd);
+               goto bad;
+       }
+       cache->cmd = cmd;
+
+       spin_lock_init(&cache->lock);
+       bio_list_init(&cache->deferred_bios);
+       bio_list_init(&cache->deferred_flush_bios);
+       INIT_LIST_HEAD(&cache->quiesced_migrations);
+       INIT_LIST_HEAD(&cache->completed_migrations);
+       INIT_LIST_HEAD(&cache->need_commit_migrations);
+       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+       atomic_set(&cache->nr_migrations, 0);
+       init_waitqueue_head(&cache->migration_wait);
+
+       cache->nr_dirty = 0;
+       cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+       if (!cache->dirty_bitset) {
+               *error = "could not allocate dirty bitset";
+               goto bad;
+       }
+       clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+
+       cache->discard_block_size =
+               calculate_discard_block_size(cache->sectors_per_block,
+                                            cache->origin_sectors);
+       cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+       cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+       if (!cache->discard_bitset) {
+               *error = "could not allocate discard bitset";
+               goto bad;
+       }
+       clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+
+       cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+       if (IS_ERR(cache->copier)) {
+               *error = "could not create kcopyd client";
+               r = PTR_ERR(cache->copier);
+               goto bad;
+       }
+
+       cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+       if (!cache->wq) {
+               *error = "could not create workqueue for metadata object";
+               goto bad;
+       }
+       INIT_WORK(&cache->worker, do_worker);
+       INIT_DELAYED_WORK(&cache->waker, do_waker);
+       cache->last_commit_jiffies = jiffies;
+
+       cache->prison = dm_bio_prison_create(PRISON_CELLS);
+       if (!cache->prison) {
+               *error = "could not create bio prison";
+               goto bad;
+       }
+
+       cache->all_io_ds = dm_deferred_set_create();
+       if (!cache->all_io_ds) {
+               *error = "could not create all_io deferred set";
+               goto bad;
+       }
+
+       cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
+                                                        migration_cache);
+       if (!cache->migration_pool) {
+               *error = "Error creating cache's migration mempool";
+               goto bad;
+       }
+
+       cache->next_migration = NULL;
+
+       cache->need_tick_bio = true;
+       cache->sized = false;
+       cache->quiescing = false;
+       cache->commit_requested = false;
+       cache->loaded_mappings = false;
+       cache->loaded_discards = false;
+
+       load_stats(cache);
+
+       atomic_set(&cache->stats.demotion, 0);
+       atomic_set(&cache->stats.promotion, 0);
+       atomic_set(&cache->stats.copies_avoided, 0);
+       atomic_set(&cache->stats.cache_cell_clash, 0);
+       atomic_set(&cache->stats.commit_count, 0);
+       atomic_set(&cache->stats.discard_count, 0);
+
+       *result = cache;
+       return 0;
+
+bad:
+       destroy(cache);
+       return r;
+}
+
+static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+{
+       unsigned i;
+       const char **copy;
+
+       copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+       if (!copy)
+               return -ENOMEM;
+       for (i = 0; i < argc; i++) {
+               copy[i] = kstrdup(argv[i], GFP_KERNEL);
+               if (!copy[i]) {
+                       while (i--)
+                               kfree(copy[i]);
+                       kfree(copy);
+                       return -ENOMEM;
+               }
+       }
+
+       cache->nr_ctr_args = argc;
+       cache->ctr_args = copy;
+
+       return 0;
+}
+
+static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+       int r = -EINVAL;
+       struct cache_args *ca;
+       struct cache *cache = NULL;
+
+       ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+       if (!ca) {
+               ti->error = "Error allocating memory for cache";
+               return -ENOMEM;
+       }
+       ca->ti = ti;
+
+       r = parse_cache_args(ca, argc, argv, &ti->error);
+       if (r)
+               goto out;
+
+       r = cache_create(ca, &cache);
+
+       r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
+       if (r) {
+               destroy(cache);
+               goto out;
+       }
+
+       ti->private = cache;
+
+out:
+       destroy_cache_args(ca);
+       return r;
+}
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
+{
+       int r;
+       struct cache *cache = ti->private;
+       dm_oblock_t block = get_bio_block(cache, bio);
+       dm_cblock_t cblock;
+
+       r = policy_lookup(cache->policy, block, &cblock);
+       if (r < 0)
+               return 2;       /* assume the worst */
+
+       return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
+}
+
+static int cache_map(struct dm_target *ti, struct bio *bio)
+{
+       struct cache *cache = ti->private;
+
+       int r;
+       dm_oblock_t block = get_bio_block(cache, bio);
+       bool can_migrate = false;
+       bool discarded_block;
+       struct dm_bio_prison_cell *cell;
+       struct policy_result lookup_result;
+       struct per_bio_data *pb;
+
+       if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+               /*
+                * This can only occur if the io goes to a partial block at
+                * the end of the origin device.  We don't cache these.
+                * Just remap to the origin and carry on.
+                */
+               remap_to_origin_clear_discard(cache, bio, block);
+               return DM_MAPIO_REMAPPED;
+       }
+
+       pb = init_per_bio_data(bio);
+
+       if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+               defer_bio(cache, bio);
+               return DM_MAPIO_SUBMITTED;
+       }
+
+       /*
+        * Check to see if that block is currently migrating.
+        */
+       cell = alloc_prison_cell(cache);
+       if (!cell) {
+               defer_bio(cache, bio);
+               return DM_MAPIO_SUBMITTED;
+       }
+
+       r = bio_detain(cache, block, bio, cell,
+                      (cell_free_fn) free_prison_cell,
+                      cache, &cell);
+       if (r) {
+               if (r < 0)
+                       defer_bio(cache, bio);
+
+               return DM_MAPIO_SUBMITTED;
+       }
+
+       discarded_block = is_discarded_oblock(cache, block);
+
+       r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+                      bio, &lookup_result);
+       if (r == -EWOULDBLOCK) {
+               cell_defer(cache, cell, true);
+               return DM_MAPIO_SUBMITTED;
+
+       } else if (r) {
+               DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
+               bio_io_error(bio);
+               return DM_MAPIO_SUBMITTED;
+       }
+
+       switch (lookup_result.op) {
+       case POLICY_HIT:
+               inc_hit_counter(cache, bio);
+               pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+               if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+                       /*
+                        * No need to mark anything dirty in write through mode.
+                        */
+                       pb->req_nr == 0 ?
+                               remap_to_cache(cache, bio, lookup_result.cblock) :
+                               remap_to_origin_clear_discard(cache, bio, block);
+                       cell_defer(cache, cell, false);
+               } else {
+                       remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+                       cell_defer(cache, cell, false);
+               }
+               break;
+
+       case POLICY_MISS:
+               inc_miss_counter(cache, bio);
+               pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+               if (pb->req_nr != 0) {
+                       /*
+                        * This is a duplicate writethrough io that is no
+                        * longer needed because the block has been demoted.
+                        */
+                       bio_endio(bio, 0);
+                       cell_defer(cache, cell, false);
+                       return DM_MAPIO_SUBMITTED;
+               } else {
+                       remap_to_origin_clear_discard(cache, bio, block);
+                       cell_defer(cache, cell, false);
+               }
+               break;
+
+       default:
+               DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
+                           (unsigned) lookup_result.op);
+               bio_io_error(bio);
+               return DM_MAPIO_SUBMITTED;
+       }
+
+       return DM_MAPIO_REMAPPED;
+}
+
+static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+       struct cache *cache = ti->private;
+       unsigned long flags;
+       struct per_bio_data *pb = get_per_bio_data(bio);
+
+       if (pb->tick) {
+               policy_tick(cache->policy);
+
+               spin_lock_irqsave(&cache->lock, flags);
+               cache->need_tick_bio = true;
+               spin_unlock_irqrestore(&cache->lock, flags);
+       }
+
+       check_for_quiesced_migrations(cache, pb);
+
+       return 0;
+}
+
+static int write_dirty_bitset(struct cache *cache)
+{
+       unsigned i, r;
+
+       for (i = 0; i < from_cblock(cache->cache_size); i++) {
+               r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
+                                      is_dirty(cache, to_cblock(i)));
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+static int write_discard_bitset(struct cache *cache)
+{
+       unsigned i, r;
+
+       r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
+                                          cache->discard_nr_blocks);
+       if (r) {
+               DMERR("could not resize on-disk discard bitset");
+               return r;
+       }
+
+       for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
+               r = dm_cache_set_discard(cache->cmd, to_dblock(i),
+                                        is_discarded(cache, to_dblock(i)));
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
+                    uint32_t hint)
+{
+       struct cache *cache = context;
+       return dm_cache_save_hint(cache->cmd, cblock, hint);
+}
+
+static int write_hints(struct cache *cache)
+{
+       int r;
+
+       r = dm_cache_begin_hints(cache->cmd, cache->policy);
+       if (r) {
+               DMERR("dm_cache_begin_hints failed");
+               return r;
+       }
+
+       r = policy_walk_mappings(cache->policy, save_hint, cache);
+       if (r)
+               DMERR("policy_walk_mappings failed");
+
+       return r;
+}
+
+/*
+ * returns true on success
+ */
+static bool sync_metadata(struct cache *cache)
+{
+       int r1, r2, r3, r4;
+
+       r1 = write_dirty_bitset(cache);
+       if (r1)
+               DMERR("could not write dirty bitset");
+
+       r2 = write_discard_bitset(cache);
+       if (r2)
+               DMERR("could not write discard bitset");
+
+       save_stats(cache);
+
+       r3 = write_hints(cache);
+       if (r3)
+               DMERR("could not write hints");
+
+       /*
+        * If writing the above metadata failed, we still commit, but don't
+        * set the clean shutdown flag.  This will effectively force every
+        * dirty bit to be set on reload.
+        */
+       r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
+       if (r4)
+               DMERR("could not write cache metadata.  Data loss may occur.");
+
+       return !r1 && !r2 && !r3 && !r4;
+}
+
+static void cache_postsuspend(struct dm_target *ti)
+{
+       struct cache *cache = ti->private;
+
+       start_quiescing(cache);
+       wait_for_migrations(cache);
+       stop_worker(cache);
+       requeue_deferred_io(cache);
+       stop_quiescing(cache);
+
+       (void) sync_metadata(cache);
+}
+
+static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+                       bool dirty, uint32_t hint, bool hint_valid)
+{
+       int r;
+       struct cache *cache = context;
+
+       r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
+       if (r)
+               return r;
+
+       if (dirty)
+               set_dirty(cache, oblock, cblock);
+       else
+               clear_dirty(cache, oblock, cblock);
+
+       return 0;
+}
+
+static int load_discard(void *context, sector_t discard_block_size,
+                       dm_dblock_t dblock, bool discard)
+{
+       struct cache *cache = context;
+
+       /* FIXME: handle mis-matched block size */
+
+       if (discard)
+               set_discard(cache, dblock);
+       else
+               clear_discard(cache, dblock);
+
+       return 0;
+}
+
+static int cache_preresume(struct dm_target *ti)
+{
+       int r = 0;
+       struct cache *cache = ti->private;
+       sector_t actual_cache_size = get_dev_size(cache->cache_dev);
+       (void) sector_div(actual_cache_size, cache->sectors_per_block);
+
+       /*
+        * Check to see if the cache has resized.
+        */
+       if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
+               cache->cache_size = to_cblock(actual_cache_size);
+
+               r = dm_cache_resize(cache->cmd, cache->cache_size);
+               if (r) {
+                       DMERR("could not resize cache metadata");
+                       return r;
+               }
+
+               cache->sized = true;
+       }
+
+       if (!cache->loaded_mappings) {
+               r = dm_cache_load_mappings(cache->cmd,
+                                          dm_cache_policy_get_name(cache->policy),
+                                          load_mapping, cache);
+               if (r) {
+                       DMERR("could not load cache mappings");
+                       return r;
+               }
+
+               cache->loaded_mappings = true;
+       }
+
+       if (!cache->loaded_discards) {
+               r = dm_cache_load_discards(cache->cmd, load_discard, cache);
+               if (r) {
+                       DMERR("could not load origin discards");
+                       return r;
+               }
+
+               cache->loaded_discards = true;
+       }
+
+       return r;
+}
+
+static void cache_resume(struct dm_target *ti)
+{
+       struct cache *cache = ti->private;
+
+       cache->need_tick_bio = true;
+       do_waker(&cache->waker.work);
+}
+
+/*
+ * Status format:
+ *
+ * <#used metadata blocks>/<#total metadata blocks>
+ * <#read hits> <#read misses> <#write hits> <#write misses>
+ * <#demotions> <#promotions> <#blocks in cache> <#dirty>
+ * <#features> <features>*
+ * <#core args> <core args>
+ * <#policy args> <policy args>*
+ */
+static void cache_status(struct dm_target *ti, status_type_t type,
+                        unsigned status_flags, char *result, unsigned maxlen)
+{
+       int r = 0;
+       unsigned i;
+       ssize_t sz = 0;
+       dm_block_t nr_free_blocks_metadata = 0;
+       dm_block_t nr_blocks_metadata = 0;
+       char buf[BDEVNAME_SIZE];
+       struct cache *cache = ti->private;
+       dm_cblock_t residency;
+
+       switch (type) {
+       case STATUSTYPE_INFO:
+               /* Commit to ensure statistics aren't out-of-date */
+               if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
+                       r = dm_cache_commit(cache->cmd, false);
+                       if (r)
+                               DMERR("could not commit metadata for accurate status");
+               }
+
+               r = dm_cache_get_free_metadata_block_count(cache->cmd,
+                                                          &nr_free_blocks_metadata);
+               if (r) {
+                       DMERR("could not get metadata free block count");
+                       goto err;
+               }
+
+               r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
+               if (r) {
+                       DMERR("could not get metadata device size");
+                       goto err;
+               }
+
+               residency = policy_residency(cache->policy);
+
+               DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
+                      (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+                      (unsigned long long)nr_blocks_metadata,
+                      (unsigned) atomic_read(&cache->stats.read_hit),
+                      (unsigned) atomic_read(&cache->stats.read_miss),
+                      (unsigned) atomic_read(&cache->stats.write_hit),
+                      (unsigned) atomic_read(&cache->stats.write_miss),
+                      (unsigned) atomic_read(&cache->stats.demotion),
+                      (unsigned) atomic_read(&cache->stats.promotion),
+                      (unsigned long long) from_cblock(residency),
+                      cache->nr_dirty);
+
+               if (cache->features.write_through)
+                       DMEMIT("1 writethrough ");
+               else
+                       DMEMIT("0 ");
+
+               DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
+               if (sz < maxlen) {
+                       r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
+                       if (r)
+                               DMERR("policy_emit_config_values returned %d", r);
+               }
+
+               break;
+
+       case STATUSTYPE_TABLE:
+               format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
+               DMEMIT("%s ", buf);
+               format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
+               DMEMIT("%s ", buf);
+               format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
+               DMEMIT("%s", buf);
+
+               for (i = 0; i < cache->nr_ctr_args - 1; i++)
+                       DMEMIT(" %s", cache->ctr_args[i]);
+               if (cache->nr_ctr_args)
+                       DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
+       }
+
+       return;
+
+err:
+       DMEMIT("Error");
+}
+
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, char **argv)
+{
+       unsigned long tmp;
+
+       if (!strcasecmp(argv[0], "migration_threshold")) {
+               if (kstrtoul(argv[1], 10, &tmp))
+                       return -EINVAL;
+
+               cache->migration_threshold = tmp;
+               return 0;
+       }
+
+       return NOT_CORE_OPTION;
+}
+
+/*
+ * Supports <key> <value>.
+ *
+ * The key migration_threshold is supported by the cache target core.
+ */
+static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+       int r;
+       struct cache *cache = ti->private;
+
+       if (argc != 2)
+               return -EINVAL;
+
+       r = process_config_option(cache, argv);
+       if (r == NOT_CORE_OPTION)
+               return policy_set_config_value(cache->policy, argv[0], argv[1]);
+
+       return r;
+}
+
+static int cache_iterate_devices(struct dm_target *ti,
+                                iterate_devices_callout_fn fn, void *data)
+{
+       int r = 0;
+       struct cache *cache = ti->private;
+
+       r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
+       if (!r)
+               r = fn(ti, cache->origin_dev, 0, ti->len, data);
+
+       return r;
+}
+
+/*
+ * We assume I/O is going to the origin (which is the volume
+ * more likely to have restrictions e.g. by being striped).
+ * (Looking up the exact location of the data would be expensive
+ * and could always be out of date by the time the bio is submitted.)
+ */
+static int cache_bvec_merge(struct dm_target *ti,
+                           struct bvec_merge_data *bvm,
+                           struct bio_vec *biovec, int max_size)
+{
+       struct cache *cache = ti->private;
+       struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
+
+       if (!q->merge_bvec_fn)
+               return max_size;
+
+       bvm->bi_bdev = cache->origin_dev->bdev;
+       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
+{
+       /*
+        * FIXME: these limits may be incompatible with the cache device
+        */
+       limits->max_discard_sectors = cache->discard_block_size * 1024;
+       limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+}
+
+static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+       struct cache *cache = ti->private;
+
+       blk_limits_io_min(limits, 0);
+       blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+       set_discard_limits(cache, limits);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct target_type cache_target = {
+       .name = "cache",
+       .version = {1, 0, 0},
+       .module = THIS_MODULE,
+       .ctr = cache_ctr,
+       .dtr = cache_dtr,
+       .map = cache_map,
+       .end_io = cache_end_io,
+       .postsuspend = cache_postsuspend,
+       .preresume = cache_preresume,
+       .resume = cache_resume,
+       .status = cache_status,
+       .message = cache_message,
+       .iterate_devices = cache_iterate_devices,
+       .merge = cache_bvec_merge,
+       .io_hints = cache_io_hints,
+};
+
+static int __init dm_cache_init(void)
+{
+       int r;
+
+       r = dm_register_target(&cache_target);
+       if (r) {
+               DMERR("cache target registration failed: %d", r);
+               return r;
+       }
+
+       migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+       if (!migration_cache) {
+               dm_unregister_target(&cache_target);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void __exit dm_cache_exit(void)
+{
+       dm_unregister_target(&cache_target);
+       kmem_cache_destroy(migration_cache);
+}
+
+module_init(dm_cache_init);
+module_exit(dm_cache_exit);
+
+MODULE_DESCRIPTION(DM_NAME " cache target");
+MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
+MODULE_LICENSE("GPL");
index f7369f9..13c1548 100644 (file)
@@ -1234,20 +1234,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
        return 0;
 }
 
-/*
- * Encode key into its hex representation
- */
-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
-{
-       unsigned int i;
-
-       for (i = 0; i < size; i++) {
-               sprintf(hex, "%02x", *key);
-               hex += 2;
-               key++;
-       }
-}
-
 static void crypt_free_tfms(struct crypt_config *cc)
 {
        unsigned i;
@@ -1651,7 +1637,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
                if (opt_params == 1 && opt_string &&
                    !strcasecmp(opt_string, "allow_discards"))
-                       ti->num_discard_requests = 1;
+                       ti->num_discard_bios = 1;
                else if (opt_params) {
                        ret = -EINVAL;
                        ti->error = "Invalid feature arguments";
@@ -1679,7 +1665,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ti->num_flush_requests = 1;
+       ti->num_flush_bios = 1;
        ti->discard_zeroes_data_unsupported = true;
 
        return 0;
@@ -1717,11 +1703,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-static int crypt_status(struct dm_target *ti, status_type_t type,
-                       unsigned status_flags, char *result, unsigned maxlen)
+static void crypt_status(struct dm_target *ti, status_type_t type,
+                        unsigned status_flags, char *result, unsigned maxlen)
 {
        struct crypt_config *cc = ti->private;
-       unsigned int sz = 0;
+       unsigned i, sz = 0;
 
        switch (type) {
        case STATUSTYPE_INFO:
@@ -1731,27 +1717,20 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
        case STATUSTYPE_TABLE:
                DMEMIT("%s ", cc->cipher_string);
 
-               if (cc->key_size > 0) {
-                       if ((maxlen - sz) < ((cc->key_size << 1) + 1))
-                               return -ENOMEM;
-
-                       crypt_encode_key(result + sz, cc->key, cc->key_size);
-                       sz += cc->key_size << 1;
-               } else {
-                       if (sz >= maxlen)
-                               return -ENOMEM;
-                       result[sz++] = '-';
-               }
+               if (cc->key_size > 0)
+                       for (i = 0; i < cc->key_size; i++)
+                               DMEMIT("%02x", cc->key[i]);
+               else
+                       DMEMIT("-");
 
                DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
                                cc->dev->name, (unsigned long long)cc->start);
 
-               if (ti->num_discard_requests)
+               if (ti->num_discard_bios)
                        DMEMIT(" 1 allow_discards");
 
                break;
        }
-       return 0;
 }
 
 static void crypt_postsuspend(struct dm_target *ti)
@@ -1845,7 +1824,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 12, 0},
+       .version = {1, 12, 1},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
index cc1bd04..496d5f3 100644 (file)
@@ -198,8 +198,8 @@ out:
        mutex_init(&dc->timer_lock);
        atomic_set(&dc->may_delay, 1);
 
-       ti->num_flush_requests = 1;
-       ti->num_discard_requests = 1;
+       ti->num_flush_bios = 1;
+       ti->num_discard_bios = 1;
        ti->private = dc;
        return 0;
 
@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
        return delay_bio(dc, dc->read_delay, bio);
 }
 
-static int delay_status(struct dm_target *ti, status_type_t type,
-                       unsigned status_flags, char *result, unsigned maxlen)
+static void delay_status(struct dm_target *ti, status_type_t type,
+                        unsigned status_flags, char *result, unsigned maxlen)
 {
        struct delay_c *dc = ti->private;
        int sz = 0;
@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
                               dc->write_delay);
                break;
        }
-
-       return 0;
 }
 
 static int delay_iterate_devices(struct dm_target *ti,
@@ -337,7 +335,7 @@ out:
 
 static struct target_type delay_target = {
        .name        = "delay",
-       .version     = {1, 2, 0},
+       .version     = {1, 2, 1},
        .module      = THIS_MODULE,
        .ctr         = delay_ctr,
        .dtr         = delay_dtr,
index 9721f2f..7fcf21c 100644 (file)
@@ -216,8 +216,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ti->num_flush_requests = 1;
-       ti->num_discard_requests = 1;
+       ti->num_flush_bios = 1;
+       ti->num_discard_bios = 1;
        ti->per_bio_data_size = sizeof(struct per_bio_data);
        ti->private = fc;
        return 0;
@@ -337,8 +337,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
        return error;
 }
 
-static int flakey_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void flakey_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        unsigned sz = 0;
        struct flakey_c *fc = ti->private;
@@ -368,7 +368,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
 
                break;
        }
-       return 0;
 }
 
 static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
@@ -411,7 +410,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
 
 static struct target_type flakey_target = {
        .name   = "flakey",
-       .version = {1, 3, 0},
+       .version = {1, 3, 1},
        .module = THIS_MODULE,
        .ctr    = flakey_ctr,
        .dtr    = flakey_dtr,
index 0666b5d..aa04f02 100644 (file)
@@ -1067,6 +1067,7 @@ static void retrieve_status(struct dm_table *table,
        num_targets = dm_table_get_num_targets(table);
        for (i = 0; i < num_targets; i++) {
                struct dm_target *ti = dm_table_get_target(table, i);
+               size_t l;
 
                remaining = len - (outptr - outbuf);
                if (remaining <= sizeof(struct dm_target_spec)) {
@@ -1093,14 +1094,17 @@ static void retrieve_status(struct dm_table *table,
                if (ti->type->status) {
                        if (param->flags & DM_NOFLUSH_FLAG)
                                status_flags |= DM_STATUS_NOFLUSH_FLAG;
-                       if (ti->type->status(ti, type, status_flags, outptr, remaining)) {
-                               param->flags |= DM_BUFFER_FULL_FLAG;
-                               break;
-                       }
+                       ti->type->status(ti, type, status_flags, outptr, remaining);
                } else
                        outptr[0] = '\0';
 
-               outptr += strlen(outptr) + 1;
+               l = strlen(outptr) + 1;
+               if (l == remaining) {
+                       param->flags |= DM_BUFFER_FULL_FLAG;
+                       break;
+               }
+
+               outptr += l;
                used = param->data_start + (outptr - outbuf);
 
                outptr = align_ptr(outptr);
@@ -1410,6 +1414,22 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
        return 0;
 }
 
+static bool buffer_test_overflow(char *result, unsigned maxlen)
+{
+       return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+/*
+ * Process device-mapper dependent messages.
+ * Returns a number <= 1 if message was processed by device mapper.
+ * Returns 2 if message should be delivered to the target.
+ */
+static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+                         char *result, unsigned maxlen)
+{
+       return 2;
+}
+
 /*
  * Pass a message to the target that's at the supplied device offset.
  */
@@ -1421,6 +1441,8 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
        struct dm_table *table;
        struct dm_target *ti;
        struct dm_target_msg *tmsg = (void *) param + param->data_start;
+       size_t maxlen;
+       char *result = get_result_buffer(param, param_size, &maxlen);
 
        md = find_device(param);
        if (!md)
@@ -1444,6 +1466,10 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
                goto out_argv;
        }
 
+       r = message_for_md(md, argc, argv, result, maxlen);
+       if (r <= 1)
+               goto out_argv;
+
        table = dm_get_live_table(md);
        if (!table)
                goto out_argv;
@@ -1469,44 +1495,68 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
  out_argv:
        kfree(argv);
  out:
-       param->data_size = 0;
+       if (r >= 0)
+               __dev_status(md, param);
+
+       if (r == 1) {
+               param->flags |= DM_DATA_OUT_FLAG;
+               if (buffer_test_overflow(result, maxlen))
+                       param->flags |= DM_BUFFER_FULL_FLAG;
+               else
+                       param->data_size = param->data_start + strlen(result) + 1;
+               r = 0;
+       }
+
        dm_put(md);
        return r;
 }
 
+/*
+ * The ioctl parameter block consists of two parts, a dm_ioctl struct
+ * followed by a data buffer.  This flag is set if the second part,
+ * which has a variable size, is not used by the function processing
+ * the ioctl.
+ */
+#define IOCTL_FLAGS_NO_PARAMS  1
+
 /*-----------------------------------------------------------------
  * Implementation of open/close/ioctl on the special char
  * device.
  *---------------------------------------------------------------*/
-static ioctl_fn lookup_ioctl(unsigned int cmd)
+static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
 {
        static struct {
                int cmd;
+               int flags;
                ioctl_fn fn;
        } _ioctls[] = {
-               {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
-               {DM_REMOVE_ALL_CMD, remove_all},
-               {DM_LIST_DEVICES_CMD, list_devices},
-
-               {DM_DEV_CREATE_CMD, dev_create},
-               {DM_DEV_REMOVE_CMD, dev_remove},
-               {DM_DEV_RENAME_CMD, dev_rename},
-               {DM_DEV_SUSPEND_CMD, dev_suspend},
-               {DM_DEV_STATUS_CMD, dev_status},
-               {DM_DEV_WAIT_CMD, dev_wait},
-
-               {DM_TABLE_LOAD_CMD, table_load},
-               {DM_TABLE_CLEAR_CMD, table_clear},
-               {DM_TABLE_DEPS_CMD, table_deps},
-               {DM_TABLE_STATUS_CMD, table_status},
-
-               {DM_LIST_VERSIONS_CMD, list_versions},
-
-               {DM_TARGET_MSG_CMD, target_message},
-               {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
+               {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
+               {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+               {DM_LIST_DEVICES_CMD, 0, list_devices},
+
+               {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
+               {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
+               {DM_DEV_RENAME_CMD, 0, dev_rename},
+               {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
+               {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
+               {DM_DEV_WAIT_CMD, 0, dev_wait},
+
+               {DM_TABLE_LOAD_CMD, 0, table_load},
+               {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
+               {DM_TABLE_DEPS_CMD, 0, table_deps},
+               {DM_TABLE_STATUS_CMD, 0, table_status},
+
+               {DM_LIST_VERSIONS_CMD, 0, list_versions},
+
+               {DM_TARGET_MSG_CMD, 0, target_message},
+               {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}
        };
 
-       return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
+       if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
+               return NULL;
+
+       *ioctl_flags = _ioctls[cmd].flags;
+       return _ioctls[cmd].fn;
 }
 
 /*
@@ -1543,7 +1593,8 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
        return r;
 }
 
-#define DM_PARAMS_VMALLOC      0x0001  /* Params alloced with vmalloc not kmalloc */
+#define DM_PARAMS_KMALLOC      0x0001  /* Params alloced with kmalloc */
+#define DM_PARAMS_VMALLOC      0x0002  /* Params alloced with vmalloc */
 #define DM_WIPE_BUFFER         0x0010  /* Wipe input buffer before returning from ioctl */
 
 static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1551,66 +1602,80 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
        if (param_flags & DM_WIPE_BUFFER)
                memset(param, 0, param_size);
 
+       if (param_flags & DM_PARAMS_KMALLOC)
+               kfree(param);
        if (param_flags & DM_PARAMS_VMALLOC)
                vfree(param);
-       else
-               kfree(param);
 }
 
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+                      int ioctl_flags,
+                      struct dm_ioctl **param, int *param_flags)
 {
-       struct dm_ioctl tmp, *dmi;
+       struct dm_ioctl *dmi;
        int secure_data;
+       const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
 
-       if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
+       if (copy_from_user(param_kernel, user, minimum_data_size))
                return -EFAULT;
 
-       if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
+       if (param_kernel->data_size < minimum_data_size)
                return -EINVAL;
 
-       secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
+       secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
 
        *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
 
+       if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
+               dmi = param_kernel;
+               dmi->data_size = minimum_data_size;
+               goto data_copied;
+       }
+
        /*
         * Try to avoid low memory issues when a device is suspended.
         * Use kmalloc() rather than vmalloc() when we can.
         */
        dmi = NULL;
-       if (tmp.data_size <= KMALLOC_MAX_SIZE)
-               dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+       if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+               dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+               if (dmi)
+                       *param_flags |= DM_PARAMS_KMALLOC;
+       }
 
        if (!dmi) {
-               dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
-               *param_flags |= DM_PARAMS_VMALLOC;
+               dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+               if (dmi)
+                       *param_flags |= DM_PARAMS_VMALLOC;
        }
 
        if (!dmi) {
-               if (secure_data && clear_user(user, tmp.data_size))
+               if (secure_data && clear_user(user, param_kernel->data_size))
                        return -EFAULT;
                return -ENOMEM;
        }
 
-       if (copy_from_user(dmi, user, tmp.data_size))
+       if (copy_from_user(dmi, user, param_kernel->data_size))
                goto bad;
 
+data_copied:
        /*
         * Abort if something changed the ioctl data while it was being copied.
         */
-       if (dmi->data_size != tmp.data_size) {
+       if (dmi->data_size != param_kernel->data_size) {
                DMERR("rejecting ioctl: data size modified while processing parameters");
                goto bad;
        }
 
        /* Wipe the user buffer so we do not return it to userspace */
-       if (secure_data && clear_user(user, tmp.data_size))
+       if (secure_data && clear_user(user, param_kernel->data_size))
                goto bad;
 
        *param = dmi;
        return 0;
 
 bad:
-       free_params(dmi, tmp.data_size, *param_flags);
+       free_params(dmi, param_kernel->data_size, *param_flags);
 
        return -EFAULT;
 }
@@ -1621,6 +1686,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
        param->flags &= ~DM_BUFFER_FULL_FLAG;
        param->flags &= ~DM_UEVENT_GENERATED_FLAG;
        param->flags &= ~DM_SECURE_DATA_FLAG;
+       param->flags &= ~DM_DATA_OUT_FLAG;
 
        /* Ignores parameters */
        if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1648,11 +1714,13 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
 static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
 {
        int r = 0;
+       int ioctl_flags;
        int param_flags;
        unsigned int cmd;
        struct dm_ioctl *uninitialized_var(param);
        ioctl_fn fn = NULL;
        size_t input_param_size;
+       struct dm_ioctl param_kernel;
 
        /* only root can play with this */
        if (!capable(CAP_SYS_ADMIN))
@@ -1677,7 +1745,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
        if (cmd == DM_VERSION_CMD)
                return 0;
 
-       fn = lookup_ioctl(cmd);
+       fn = lookup_ioctl(cmd, &ioctl_flags);
        if (!fn) {
                DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
                return -ENOTTY;
@@ -1686,7 +1754,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
        /*
         * Copy the parameters into kernel space.
         */
-       r = copy_params(user, &param, &param_flags);
+       r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags);
 
        if (r)
                return r;
@@ -1699,6 +1767,10 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
        param->data_size = sizeof(*param);
        r = fn(param, input_param_size);
 
+       if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+           unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
+               DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+
        /*
         * Copy the results back to userland.
         */
index 68c0267..d581fe5 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/delay.h>
 #include <linux/device-mapper.h>
 #include <linux/dm-kcopyd.h>
 
@@ -51,6 +52,8 @@ struct dm_kcopyd_client {
        struct workqueue_struct *kcopyd_wq;
        struct work_struct kcopyd_work;
 
+       struct dm_kcopyd_throttle *throttle;
+
 /*
  * We maintain three lists of jobs:
  *
@@ -68,6 +71,117 @@ struct dm_kcopyd_client {
 
 static struct page_list zero_page_list;
 
+static DEFINE_SPINLOCK(throttle_spinlock);
+
+/*
+ * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
+ * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
+ * by 2.
+ */
+#define ACCOUNT_INTERVAL_SHIFT         SHIFT_HZ
+
+/*
+ * Sleep this number of milliseconds.
+ *
+ * The value was decided experimentally.
+ * Smaller values seem to cause an increased copy rate above the limit.
+ * The reason for this is unknown but possibly due to jiffies rounding errors
+ * or read/write cache inside the disk.
+ */
+#define SLEEP_MSEC                     100
+
+/*
+ * Maximum number of sleep events. There is a theoretical livelock if more
+ * kcopyd clients do work simultaneously which this limit avoids.
+ */
+#define MAX_SLEEPS                     10
+
+static void io_job_start(struct dm_kcopyd_throttle *t)
+{
+       unsigned throttle, now, difference;
+       int slept = 0, skew;
+
+       if (unlikely(!t))
+               return;
+
+try_again:
+       spin_lock_irq(&throttle_spinlock);
+
+       throttle = ACCESS_ONCE(t->throttle);
+
+       if (likely(throttle >= 100))
+               goto skip_limit;
+
+       now = jiffies;
+       difference = now - t->last_jiffies;
+       t->last_jiffies = now;
+       if (t->num_io_jobs)
+               t->io_period += difference;
+       t->total_period += difference;
+
+       /*
+        * Maintain sane values if we got a temporary overflow.
+        */
+       if (unlikely(t->io_period > t->total_period))
+               t->io_period = t->total_period;
+
+       if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
+               int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+               t->total_period >>= shift;
+               t->io_period >>= shift;
+       }
+
+       skew = t->io_period - throttle * t->total_period / 100;
+
+       if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
+               slept++;
+               spin_unlock_irq(&throttle_spinlock);
+               msleep(SLEEP_MSEC);
+               goto try_again;
+       }
+
+skip_limit:
+       t->num_io_jobs++;
+
+       spin_unlock_irq(&throttle_spinlock);
+}
+
+static void io_job_finish(struct dm_kcopyd_throttle *t)
+{
+       unsigned long flags;
+
+       if (unlikely(!t))
+               return;
+
+       spin_lock_irqsave(&throttle_spinlock, flags);
+
+       t->num_io_jobs--;
+
+       if (likely(ACCESS_ONCE(t->throttle) >= 100))
+               goto skip_limit;
+
+       if (!t->num_io_jobs) {
+               unsigned now, difference;
+
+               now = jiffies;
+               difference = now - t->last_jiffies;
+               t->last_jiffies = now;
+
+               t->io_period += difference;
+               t->total_period += difference;
+
+               /*
+                * Maintain sane values if we got a temporary overflow.
+                */
+               if (unlikely(t->io_period > t->total_period))
+                       t->io_period = t->total_period;
+       }
+
+skip_limit:
+       spin_unlock_irqrestore(&throttle_spinlock, flags);
+}
+
+
 static void wake(struct dm_kcopyd_client *kc)
 {
        queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
@@ -348,6 +462,8 @@ static void complete_io(unsigned long error, void *context)
        struct kcopyd_job *job = (struct kcopyd_job *) context;
        struct dm_kcopyd_client *kc = job->kc;
 
+       io_job_finish(kc->throttle);
+
        if (error) {
                if (job->rw & WRITE)
                        job->write_err |= error;
@@ -389,6 +505,8 @@ static int run_io_job(struct kcopyd_job *job)
                .client = job->kc->io_client,
        };
 
+       io_job_start(job->kc->throttle);
+
        if (job->rw == READ)
                r = dm_io(&io_req, 1, &job->source, NULL);
        else
@@ -695,7 +813,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
 /*-----------------------------------------------------------------
  * Client setup
  *---------------------------------------------------------------*/
-struct dm_kcopyd_client *dm_kcopyd_client_create(void)
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
 {
        int r = -ENOMEM;
        struct dm_kcopyd_client *kc;
@@ -708,6 +826,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(void)
        INIT_LIST_HEAD(&kc->complete_jobs);
        INIT_LIST_HEAD(&kc->io_jobs);
        INIT_LIST_HEAD(&kc->pages_jobs);
+       kc->throttle = throttle;
 
        kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
        if (!kc->job_pool)
index 328cad5..4f99d26 100644 (file)
@@ -53,9 +53,9 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ti->num_flush_requests = 1;
-       ti->num_discard_requests = 1;
-       ti->num_write_same_requests = 1;
+       ti->num_flush_bios = 1;
+       ti->num_discard_bios = 1;
+       ti->num_write_same_bios = 1;
        ti->private = lc;
        return 0;
 
@@ -95,8 +95,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
-static int linear_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void linear_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
 
@@ -110,7 +110,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
                                (unsigned long long)lc->start);
                break;
        }
-       return 0;
 }
 
 static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -155,7 +154,7 @@ static int linear_iterate_devices(struct dm_target *ti,
 
 static struct target_type linear_target = {
        .name   = "linear",
-       .version = {1, 2, 0},
+       .version = {1, 2, 1},
        .module = THIS_MODULE,
        .ctr    = linear_ctr,
        .dtr    = linear_dtr,
index 573bd04..51bb816 100644 (file)
@@ -905,8 +905,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
                goto bad;
        }
 
-       ti->num_flush_requests = 1;
-       ti->num_discard_requests = 1;
+       ti->num_flush_bios = 1;
+       ti->num_discard_bios = 1;
 
        return 0;
 
@@ -1378,8 +1378,8 @@ static void multipath_resume(struct dm_target *ti)
  *     [priority selector-name num_ps_args [ps_args]*
  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
  */
-static int multipath_status(struct dm_target *ti, status_type_t type,
-                           unsigned status_flags, char *result, unsigned maxlen)
+static void multipath_status(struct dm_target *ti, status_type_t type,
+                            unsigned status_flags, char *result, unsigned maxlen)
 {
        int sz = 0;
        unsigned long flags;
@@ -1485,8 +1485,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
        }
 
        spin_unlock_irqrestore(&m->lock, flags);
-
-       return 0;
 }
 
 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
@@ -1695,7 +1693,7 @@ out:
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
        .name = "multipath",
-       .version = {1, 5, 0},
+       .version = {1, 5, 1},
        .module = THIS_MODULE,
        .ctr = multipath_ctr,
        .dtr = multipath_dtr,
index 9e58dbd..311e3d3 100644 (file)
@@ -91,15 +91,44 @@ static struct raid_type {
        {"raid6_nc", "RAID6 (N continue)",              2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
 };
 
+static char *raid10_md_layout_to_format(int layout)
+{
+       /*
+        * Bit 16 and 17 stand for "offset" and "use_far_sets"
+        * Refer to MD's raid10.c for details
+        */
+       if ((layout & 0x10000) && (layout & 0x20000))
+               return "offset";
+
+       if ((layout & 0xFF) > 1)
+               return "near";
+
+       return "far";
+}
+
 static unsigned raid10_md_layout_to_copies(int layout)
 {
-       return layout & 0xFF;
+       if ((layout & 0xFF) > 1)
+               return layout & 0xFF;
+       return (layout >> 8) & 0xFF;
 }
 
 static int raid10_format_to_md_layout(char *format, unsigned copies)
 {
-       /* 1 "far" copy, and 'copies' "near" copies */
-       return (1 << 8) | (copies & 0xFF);
+       unsigned n = 1, f = 1;
+
+       if (!strcmp("near", format))
+               n = copies;
+       else
+               f = copies;
+
+       if (!strcmp("offset", format))
+               return 0x30000 | (f << 8) | n;
+
+       if (!strcmp("far", format))
+               return 0x20000 | (f << 8) | n;
+
+       return (f << 8) | n;
 }
 
 static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned i, rebuild_cnt = 0;
        unsigned rebuilds_per_group, copies, d;
+       unsigned group_size, last_group_start;
 
        for (i = 0; i < rs->md.raid_disks; i++)
                if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 * as long as the failed devices occur in different mirror
                 * groups (i.e. different stripes).
                 *
-                * Right now, we only allow for "near" copies.  When other
-                * formats are added, we will have to check those too.
-                *
                 * When checking "near" format, make sure no adjacent devices
                 * have failed beyond what can be handled.  In addition to the
                 * simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
-               for (i = 0; i < rs->md.raid_disks * copies; i++) {
-                       if (!(i % copies))
+               if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
+                       for (i = 0; i < rs->md.raid_disks * copies; i++) {
+                               if (!(i % copies))
+                                       rebuilds_per_group = 0;
+                               d = i % rs->md.raid_disks;
+                               if ((!rs->dev[d].rdev.sb_page ||
+                                    !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                                   (++rebuilds_per_group >= copies))
+                                       goto too_many;
+                       }
+                       break;
+               }
+
+               /*
+                * When checking "far" and "offset" formats, we need to ensure
+                * that the device that holds its copy is not also dead or
+                * being rebuilt.  (Note that "far" and "offset" formats only
+                * support two copies right now.  These formats also only ever
+                * use the 'use_far_sets' variant.)
+                *
+                * This check is somewhat complicated by the need to account
+                * for arrays that are not a multiple of (far) copies.  This
+                * results in the need to treat the last (potentially larger)
+                * set differently.
+                */
+               group_size = (rs->md.raid_disks / copies);
+               last_group_start = (rs->md.raid_disks / group_size) - 1;
+               last_group_start *= group_size;
+               for (i = 0; i < rs->md.raid_disks; i++) {
+                       if (!(i % copies) && !(i > last_group_start))
                                rebuilds_per_group = 0;
-                       d = i % rs->md.raid_disks;
-                       if ((!rs->dev[d].rdev.sb_page ||
-                            !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+                       if ((!rs->dev[i].rdev.sb_page ||
+                            !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
                            (++rebuilds_per_group >= copies))
-                               goto too_many;
+                                       goto too_many;
                }
                break;
        default:
@@ -433,7 +487,7 @@ too_many:
  *
  * RAID10-only options:
  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
- *    [raid10_format <near>]            Layout algorithm.  (Default: near)
+ *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
  */
 static int parse_raid_params(struct raid_set *rs, char **argv,
                             unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                                rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
                                return -EINVAL;
                        }
-                       if (strcmp("near", argv[i])) {
+                       if (strcmp("near", argv[i]) &&
+                           strcmp("far", argv[i]) &&
+                           strcmp("offset", argv[i])) {
                                rs->ti->error = "Invalid 'raid10_format' value given";
                                return -EINVAL;
                        }
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
                        return -EINVAL;
                }
 
+               /*
+                * If the format is not "near", we only support
+                * two copies at the moment.
+                */
+               if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
+                       rs->ti->error = "Too many copies for given RAID10 format.";
+                       return -EINVAL;
+               }
+
                /* (Len * #mirrors) / #devices */
                sectors_per_dev = rs->ti->len * raid10_copies;
                sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
        /*
         * Reshaping is not currently allowed
         */
-       if ((le32_to_cpu(sb->level) != mddev->level) ||
-           (le32_to_cpu(sb->layout) != mddev->layout) ||
-           (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
-               DMERR("Reshaping arrays not yet supported.");
+       if (le32_to_cpu(sb->level) != mddev->level) {
+               DMERR("Reshaping arrays not yet supported. (RAID level change)");
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->layout) != mddev->layout) {
+               DMERR("Reshaping arrays not yet supported. (RAID layout change)");
+               DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+               DMERR("  Old layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+                     raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+               DMERR("  New layout: %s w/ %d copies",
+                     raid10_md_layout_to_format(mddev->layout),
+                     raid10_md_layout_to_copies(mddev->layout));
+               return -EINVAL;
+       }
+       if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+               DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
                return -EINVAL;
        }
 
        /* We can only change the number of devices in RAID1 right now */
        if ((rs->raid_type->level != 1) &&
            (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
-               DMERR("Reshaping arrays not yet supported.");
+               DMERR("Reshaping arrays not yet supported. (device count change)");
                return -EINVAL;
        }
 
@@ -1151,7 +1229,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        INIT_WORK(&rs->md.event_work, do_table_event);
        ti->private = rs;
-       ti->num_flush_requests = 1;
+       ti->num_flush_bios = 1;
 
        mutex_lock(&rs->md.reconfig_mutex);
        ret = md_run(&rs->md);
@@ -1201,8 +1279,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-static int raid_status(struct dm_target *ti, status_type_t type,
-                      unsigned status_flags, char *result, unsigned maxlen)
+static void raid_status(struct dm_target *ti, status_type_t type,
+                       unsigned status_flags, char *result, unsigned maxlen)
 {
        struct raid_set *rs = ti->private;
        unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -1329,7 +1407,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
                               raid10_md_layout_to_copies(rs->md.layout));
 
                if (rs->print_flags & DMPF_RAID10_FORMAT)
-                       DMEMIT(" raid10_format near");
+                       DMEMIT(" raid10_format %s",
+                              raid10_md_layout_to_format(rs->md.layout));
 
                DMEMIT(" %d", rs->md.raid_disks);
                for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1344,8 +1423,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
                                DMEMIT(" -");
                }
        }
-
-       return 0;
 }
 
 static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -1405,7 +1482,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 4, 1},
+       .version = {1, 4, 2},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
@@ -1420,6 +1497,10 @@ static struct target_type raid_target = {
 
 static int __init dm_raid_init(void)
 {
+       DMINFO("Loading target version %u.%u.%u",
+              raid_target.version[0],
+              raid_target.version[1],
+              raid_target.version[2]);
        return dm_register_target(&raid_target);
 }
 
index fa51918..d053098 100644 (file)
@@ -82,6 +82,9 @@ struct mirror_set {
        struct mirror mirror[0];
 };
 
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
+               "A percentage of time allocated for raid resynchronization");
+
 static void wakeup_mirrord(void *context)
 {
        struct mirror_set *ms = context;
@@ -1072,8 +1075,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (r)
                goto err_free_context;
 
-       ti->num_flush_requests = 1;
-       ti->num_discard_requests = 1;
+       ti->num_flush_bios = 1;
+       ti->num_discard_bios = 1;
        ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
        ti->discard_zeroes_data_unsupported = true;
 
@@ -1111,7 +1114,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto err_destroy_wq;
        }
 
-       ms->kcopyd_client = dm_kcopyd_client_create();
+       ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(ms->kcopyd_client)) {
                r = PTR_ERR(ms->kcopyd_client);
                goto err_destroy_wq;
@@ -1347,8 +1350,8 @@ static char device_status_char(struct mirror *m)
 }
 
 
-static int mirror_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void mirror_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        unsigned int m, sz = 0;
        struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1383,8 +1386,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
                if (ms->features & DM_RAID1_HANDLE_ERRORS)
                        DMEMIT(" 1 handle_errors");
        }
-
-       return 0;
 }
 
 static int mirror_iterate_devices(struct dm_target *ti,
@@ -1403,7 +1404,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
 
 static struct target_type mirror_target = {
        .name    = "mirror",
-       .version = {1, 13, 1},
+       .version = {1, 13, 2},
        .module  = THIS_MODULE,
        .ctr     = mirror_ctr,
        .dtr     = mirror_dtr,
index 59fc18a..c0e0702 100644 (file)
@@ -124,6 +124,9 @@ struct dm_snapshot {
 #define RUNNING_MERGE          0
 #define SHUTDOWN_MERGE         1
 
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+               "A percentage of time allocated for copy on write");
+
 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
 {
        return s->origin;
@@ -227,12 +230,11 @@ static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
 {
        struct dm_snap_tracked_chunk *c;
-       struct hlist_node *hn;
        int found = 0;
 
        spin_lock_irq(&s->tracked_chunk_lock);
 
-       hlist_for_each_entry(c, hn,
+       hlist_for_each_entry(c,
            &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
                if (c->chunk == chunk) {
                        found = 1;
@@ -1038,7 +1040,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        int i;
        int r = -EINVAL;
        char *origin_path, *cow_path;
-       unsigned args_used, num_flush_requests = 1;
+       unsigned args_used, num_flush_bios = 1;
        fmode_t origin_mode = FMODE_READ;
 
        if (argc != 4) {
@@ -1048,7 +1050,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        if (dm_target_is_snapshot_merge(ti)) {
-               num_flush_requests = 2;
+               num_flush_bios = 2;
                origin_mode = FMODE_WRITE;
        }
 
@@ -1109,7 +1111,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       s->kcopyd_client = dm_kcopyd_client_create();
+       s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(s->kcopyd_client)) {
                r = PTR_ERR(s->kcopyd_client);
                ti->error = "Could not create kcopyd client";
@@ -1128,7 +1130,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        spin_lock_init(&s->tracked_chunk_lock);
 
        ti->private = s;
-       ti->num_flush_requests = num_flush_requests;
+       ti->num_flush_bios = num_flush_bios;
        ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
 
        /* Add snapshot to the list of snapshots for this origin */
@@ -1692,7 +1694,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
        init_tracked_chunk(bio);
 
        if (bio->bi_rw & REQ_FLUSH) {
-               if (!dm_bio_get_target_request_nr(bio))
+               if (!dm_bio_get_target_bio_nr(bio))
                        bio->bi_bdev = s->origin->bdev;
                else
                        bio->bi_bdev = s->cow->bdev;
@@ -1837,8 +1839,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
        start_merge(s);
 }
 
-static int snapshot_status(struct dm_target *ti, status_type_t type,
-                          unsigned status_flags, char *result, unsigned maxlen)
+static void snapshot_status(struct dm_target *ti, status_type_t type,
+                           unsigned status_flags, char *result, unsigned maxlen)
 {
        unsigned sz = 0;
        struct dm_snapshot *snap = ti->private;
@@ -1884,8 +1886,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
                                          maxlen - sz);
                break;
        }
-
-       return 0;
 }
 
 static int snapshot_iterate_devices(struct dm_target *ti,
@@ -2105,7 +2105,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        ti->private = dev;
-       ti->num_flush_requests = 1;
+       ti->num_flush_bios = 1;
 
        return 0;
 }
@@ -2139,8 +2139,8 @@ static void origin_resume(struct dm_target *ti)
        ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
 }
 
-static int origin_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void origin_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        struct dm_dev *dev = ti->private;
 
@@ -2153,8 +2153,6 @@ static int origin_status(struct dm_target *ti, status_type_t type,
                snprintf(result, maxlen, "%s", dev->name);
                break;
        }
-
-       return 0;
 }
 
 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -2181,7 +2179,7 @@ static int origin_iterate_devices(struct dm_target *ti,
 
 static struct target_type origin_target = {
        .name    = "snapshot-origin",
-       .version = {1, 8, 0},
+       .version = {1, 8, 1},
        .module  = THIS_MODULE,
        .ctr     = origin_ctr,
        .dtr     = origin_dtr,
@@ -2194,7 +2192,7 @@ static struct target_type origin_target = {
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 11, 0},
+       .version = {1, 11, 1},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
@@ -2307,3 +2305,5 @@ module_exit(dm_snapshot_exit);
 MODULE_DESCRIPTION(DM_NAME " snapshot target");
 MODULE_AUTHOR("Joe Thornber");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("dm-snapshot-origin");
+MODULE_ALIAS("dm-snapshot-merge");
index c89cde8..d8837d3 100644 (file)
@@ -160,9 +160,9 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (r)
                return r;
 
-       ti->num_flush_requests = stripes;
-       ti->num_discard_requests = stripes;
-       ti->num_write_same_requests = stripes;
+       ti->num_flush_bios = stripes;
+       ti->num_discard_bios = stripes;
+       ti->num_write_same_bios = stripes;
 
        sc->chunk_size = chunk_size;
        if (chunk_size & (chunk_size - 1))
@@ -276,19 +276,19 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
 {
        struct stripe_c *sc = ti->private;
        uint32_t stripe;
-       unsigned target_request_nr;
+       unsigned target_bio_nr;
 
        if (bio->bi_rw & REQ_FLUSH) {
-               target_request_nr = dm_bio_get_target_request_nr(bio);
-               BUG_ON(target_request_nr >= sc->stripes);
-               bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
+               target_bio_nr = dm_bio_get_target_bio_nr(bio);
+               BUG_ON(target_bio_nr >= sc->stripes);
+               bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
                return DM_MAPIO_REMAPPED;
        }
        if (unlikely(bio->bi_rw & REQ_DISCARD) ||
            unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
-               target_request_nr = dm_bio_get_target_request_nr(bio);
-               BUG_ON(target_request_nr >= sc->stripes);
-               return stripe_map_range(sc, bio, target_request_nr);
+               target_bio_nr = dm_bio_get_target_bio_nr(bio);
+               BUG_ON(target_bio_nr >= sc->stripes);
+               return stripe_map_range(sc, bio, target_bio_nr);
        }
 
        stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -312,8 +312,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
  *
  */
 
-static int stripe_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void stripe_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        struct stripe_c *sc = (struct stripe_c *) ti->private;
        char buffer[sc->stripes + 1];
@@ -340,7 +340,6 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
                            (unsigned long long)sc->stripe[i].physical_start);
                break;
        }
-       return 0;
 }
 
 static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -428,7 +427,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
 
 static struct target_type stripe_target = {
        .name   = "striped",
-       .version = {1, 5, 0},
+       .version = {1, 5, 1},
        .module = THIS_MODULE,
        .ctr    = stripe_ctr,
        .dtr    = stripe_dtr,
index daf25d0..e50dad0 100644 (file)
@@ -217,7 +217,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
 
        if (alloc_targets(t, num_targets)) {
                kfree(t);
-               t = NULL;
                return -ENOMEM;
        }
 
@@ -823,8 +822,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
 
        t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 
-       if (!tgt->num_discard_requests && tgt->discards_supported)
-               DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+       if (!tgt->num_discard_bios && tgt->discards_supported)
+               DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
                       dm_device_name(t->md), type);
 
        return 0;
@@ -1360,7 +1359,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
        while (i < dm_table_get_num_targets(t)) {
                ti = dm_table_get_target(t, i++);
 
-               if (!ti->num_flush_requests)
+               if (!ti->num_flush_bios)
                        continue;
 
                if (ti->flush_supported)
@@ -1439,7 +1438,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
        while (i < dm_table_get_num_targets(t)) {
                ti = dm_table_get_target(t, i++);
 
-               if (!ti->num_write_same_requests)
+               if (!ti->num_write_same_bios)
                        return false;
 
                if (!ti->type->iterate_devices ||
@@ -1657,7 +1656,7 @@ bool dm_table_supports_discards(struct dm_table *t)
        while (i < dm_table_get_num_targets(t)) {
                ti = dm_table_get_target(t, i++);
 
-               if (!ti->num_discard_requests)
+               if (!ti->num_discard_bios)
                        continue;
 
                if (ti->discards_supported)
index 617d21a..37ba5db 100644 (file)
@@ -116,7 +116,7 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
        /*
         * Return error for discards instead of -EOPNOTSUPP
         */
-       tt->num_discard_requests = 1;
+       tt->num_discard_bios = 1;
 
        return 0;
 }
index 4d6e853..00cee02 100644 (file)
@@ -280,7 +280,7 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
        *t = v & ((1 << 24) - 1);
 }
 
-static void data_block_inc(void *context, void *value_le)
+static void data_block_inc(void *context, const void *value_le)
 {
        struct dm_space_map *sm = context;
        __le64 v_le;
@@ -292,7 +292,7 @@ static void data_block_inc(void *context, void *value_le)
        dm_sm_inc_block(sm, b);
 }
 
-static void data_block_dec(void *context, void *value_le)
+static void data_block_dec(void *context, const void *value_le)
 {
        struct dm_space_map *sm = context;
        __le64 v_le;
@@ -304,7 +304,7 @@ static void data_block_dec(void *context, void *value_le)
        dm_sm_dec_block(sm, b);
 }
 
-static int data_block_equal(void *context, void *value1_le, void *value2_le)
+static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
 {
        __le64 v1_le, v2_le;
        uint64_t b1, b2;
@@ -318,7 +318,7 @@ static int data_block_equal(void *context, void *value1_le, void *value2_le)
        return b1 == b2;
 }
 
-static void subtree_inc(void *context, void *value)
+static void subtree_inc(void *context, const void *value)
 {
        struct dm_btree_info *info = context;
        __le64 root_le;
@@ -329,7 +329,7 @@ static void subtree_inc(void *context, void *value)
        dm_tm_inc(info->tm, root);
 }
 
-static void subtree_dec(void *context, void *value)
+static void subtree_dec(void *context, const void *value)
 {
        struct dm_btree_info *info = context;
        __le64 root_le;
@@ -341,7 +341,7 @@ static void subtree_dec(void *context, void *value)
                DMERR("btree delete failed\n");
 }
 
-static int subtree_equal(void *context, void *value1_le, void *value2_le)
+static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
 {
        __le64 v1_le, v2_le;
        memcpy(&v1_le, value1_le, sizeof(v1_le));
index 5409607..009339d 100644 (file)
@@ -26,6 +26,9 @@
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
 
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+               "A percentage of time allocated for copy on write");
+
 /*
  * The block size of the device holding pool data must be
  * between 64KB and 1GB.
@@ -226,6 +229,78 @@ struct thin_c {
 
 /*----------------------------------------------------------------*/
 
+/*
+ * wake_worker() is used when new work is queued and when pool_resume is
+ * ready to continue deferred IO processing.
+ */
+static void wake_worker(struct pool *pool)
+{
+       queue_work(pool->wq, &pool->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
+                     struct dm_bio_prison_cell **cell_result)
+{
+       int r;
+       struct dm_bio_prison_cell *cell_prealloc;
+
+       /*
+        * Allocate a cell from the prison's mempool.
+        * This might block but it can't fail.
+        */
+       cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
+
+       r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
+       if (r)
+               /*
+                * We reused an old cell; we can get rid of
+                * the new one.
+                */
+               dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+
+       return r;
+}
+
+static void cell_release(struct pool *pool,
+                        struct dm_bio_prison_cell *cell,
+                        struct bio_list *bios)
+{
+       dm_cell_release(pool->prison, cell, bios);
+       dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_release_no_holder(struct pool *pool,
+                                  struct dm_bio_prison_cell *cell,
+                                  struct bio_list *bios)
+{
+       dm_cell_release_no_holder(pool->prison, cell, bios);
+       dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_defer_no_holder_no_free(struct thin_c *tc,
+                                        struct dm_bio_prison_cell *cell)
+{
+       struct pool *pool = tc->pool;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pool->lock, flags);
+       dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
+       spin_unlock_irqrestore(&pool->lock, flags);
+
+       wake_worker(pool);
+}
+
+static void cell_error(struct pool *pool,
+                      struct dm_bio_prison_cell *cell)
+{
+       dm_cell_error(pool->prison, cell);
+       dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+/*----------------------------------------------------------------*/
+
 /*
  * A global list of pools that uses a struct mapped_device as a key.
  */
@@ -330,14 +405,20 @@ static void requeue_io(struct thin_c *tc)
  * target.
  */
 
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+       return pool->sectors_per_block_shift >= 0;
+}
+
 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 {
+       struct pool *pool = tc->pool;
        sector_t block_nr = bio->bi_sector;
 
-       if (tc->pool->sectors_per_block_shift < 0)
-               (void) sector_div(block_nr, tc->pool->sectors_per_block);
+       if (block_size_is_power_of_two(pool))
+               block_nr >>= pool->sectors_per_block_shift;
        else
-               block_nr >>= tc->pool->sectors_per_block_shift;
+               (void) sector_div(block_nr, pool->sectors_per_block);
 
        return block_nr;
 }
@@ -348,12 +429,12 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
        sector_t bi_sector = bio->bi_sector;
 
        bio->bi_bdev = tc->pool_dev->bdev;
-       if (tc->pool->sectors_per_block_shift < 0)
-               bio->bi_sector = (block * pool->sectors_per_block) +
-                                sector_div(bi_sector, pool->sectors_per_block);
-       else
+       if (block_size_is_power_of_two(pool))
                bio->bi_sector = (block << pool->sectors_per_block_shift) |
                                (bi_sector & (pool->sectors_per_block - 1));
+       else
+               bio->bi_sector = (block * pool->sectors_per_block) +
+                                sector_div(bi_sector, pool->sectors_per_block);
 }
 
 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -420,15 +501,6 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
        issue(tc, bio);
 }
 
-/*
- * wake_worker() is used when new work is queued and when pool_resume is
- * ready to continue deferred IO processing.
- */
-static void wake_worker(struct pool *pool)
-{
-       queue_work(pool->wq, &pool->worker);
-}
-
 /*----------------------------------------------------------------*/
 
 /*
@@ -515,14 +587,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
        unsigned long flags;
 
        spin_lock_irqsave(&pool->lock, flags);
-       dm_cell_release(cell, &pool->deferred_bios);
+       cell_release(pool, cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
 
        wake_worker(pool);
 }
 
 /*
- * Same as cell_defer except it omits the original holder of the cell.
+ * Same as cell_defer above, except it omits the original holder of the cell.
  */
 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
@@ -530,7 +602,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
        unsigned long flags;
 
        spin_lock_irqsave(&pool->lock, flags);
-       dm_cell_release_no_holder(cell, &pool->deferred_bios);
+       cell_release_no_holder(pool, cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&pool->lock, flags);
 
        wake_worker(pool);
@@ -540,13 +612,15 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
        if (m->bio)
                m->bio->bi_end_io = m->saved_bi_end_io;
-       dm_cell_error(m->cell);
+       cell_error(m->tc->pool, m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
 }
+
 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 {
        struct thin_c *tc = m->tc;
+       struct pool *pool = tc->pool;
        struct bio *bio;
        int r;
 
@@ -555,7 +629,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
                bio->bi_end_io = m->saved_bi_end_io;
 
        if (m->err) {
-               dm_cell_error(m->cell);
+               cell_error(pool, m->cell);
                goto out;
        }
 
@@ -567,7 +641,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
        if (r) {
                DMERR_LIMIT("dm_thin_insert_block() failed");
-               dm_cell_error(m->cell);
+               cell_error(pool, m->cell);
                goto out;
        }
 
@@ -585,7 +659,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 
 out:
        list_del(&m->list);
-       mempool_free(m, tc->pool->mapping_pool);
+       mempool_free(m, pool->mapping_pool);
 }
 
 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -736,7 +810,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
                        DMERR_LIMIT("dm_kcopyd_copy() failed");
-                       dm_cell_error(cell);
+                       cell_error(pool, cell);
                }
        }
 }
@@ -802,7 +876,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
                        DMERR_LIMIT("dm_kcopyd_zero() failed");
-                       dm_cell_error(cell);
+                       cell_error(pool, cell);
                }
        }
 }
@@ -908,13 +982,13 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&pool->lock, flags);
 }
 
-static void no_space(struct dm_bio_prison_cell *cell)
+static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
        struct bio *bio;
        struct bio_list bios;
 
        bio_list_init(&bios);
-       dm_cell_release(cell, &bios);
+       cell_release(pool, cell, &bios);
 
        while ((bio = bio_list_pop(&bios)))
                retry_on_resume(bio);
@@ -932,7 +1006,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
        struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
-       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (bio_detain(tc->pool, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -944,7 +1018,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                 * on this block.
                 */
                build_data_key(tc->td, lookup_result.block, &key2);
-               if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+               if (bio_detain(tc->pool, &key2, bio, &cell2)) {
                        cell_defer_no_holder(tc, cell);
                        break;
                }
@@ -1020,13 +1094,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                break;
 
        case -ENOSPC:
-               no_space(cell);
+               no_space(tc->pool, cell);
                break;
 
        default:
                DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
                            __func__, r);
-               dm_cell_error(cell);
+               cell_error(tc->pool, cell);
                break;
        }
 }
@@ -1044,7 +1118,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
         * of being broken so we have nothing further to do here.
         */
        build_data_key(tc->td, lookup_result->block, &key);
-       if (dm_bio_detain(pool->prison, &key, bio, &cell))
+       if (bio_detain(pool, &key, bio, &cell))
                return;
 
        if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1065,12 +1139,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
 {
        int r;
        dm_block_t data_block;
+       struct pool *pool = tc->pool;
 
        /*
         * Remap empty bios (flushes) immediately, without provisioning.
         */
        if (!bio->bi_size) {
-               inc_all_io_entry(tc->pool, bio);
+               inc_all_io_entry(pool, bio);
                cell_defer_no_holder(tc, cell);
 
                remap_and_issue(tc, bio, 0);
@@ -1097,14 +1172,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
                break;
 
        case -ENOSPC:
-               no_space(cell);
+               no_space(pool, cell);
                break;
 
        default:
                DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
                            __func__, r);
-               set_pool_mode(tc->pool, PM_READ_ONLY);
-               dm_cell_error(cell);
+               set_pool_mode(pool, PM_READ_ONLY);
+               cell_error(pool, cell);
                break;
        }
 }
@@ -1112,6 +1187,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
 static void process_bio(struct thin_c *tc, struct bio *bio)
 {
        int r;
+       struct pool *pool = tc->pool;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_bio_prison_cell *cell;
        struct dm_cell_key key;
@@ -1122,7 +1198,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
         * being provisioned so we have nothing further to do here.
         */
        build_virtual_key(tc->td, block, &key);
-       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (bio_detain(pool, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1130,9 +1206,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
        case 0:
                if (lookup_result.shared) {
                        process_shared_bio(tc, bio, block, &lookup_result);
-                       cell_defer_no_holder(tc, cell);
+                       cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
                } else {
-                       inc_all_io_entry(tc->pool, bio);
+                       inc_all_io_entry(pool, bio);
                        cell_defer_no_holder(tc, cell);
 
                        remap_and_issue(tc, bio, lookup_result.block);
@@ -1141,7 +1217,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 
        case -ENODATA:
                if (bio_data_dir(bio) == READ && tc->origin_dev) {
-                       inc_all_io_entry(tc->pool, bio);
+                       inc_all_io_entry(pool, bio);
                        cell_defer_no_holder(tc, cell);
 
                        remap_to_origin_and_issue(tc, bio);
@@ -1378,7 +1454,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_device *td = tc->td;
        struct dm_thin_lookup_result result;
-       struct dm_bio_prison_cell *cell1, *cell2;
+       struct dm_bio_prison_cell cell1, cell2;
+       struct dm_bio_prison_cell *cell_result;
        struct dm_cell_key key;
 
        thin_hook_bio(tc, bio);
@@ -1420,18 +1497,18 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                }
 
                build_virtual_key(tc->td, block, &key);
-               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
                        return DM_MAPIO_SUBMITTED;
 
                build_data_key(tc->td, result.block, &key);
-               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
-                       cell_defer_no_holder(tc, cell1);
+               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
+                       cell_defer_no_holder_no_free(tc, &cell1);
                        return DM_MAPIO_SUBMITTED;
                }
 
                inc_all_io_entry(tc->pool, bio);
-               cell_defer_no_holder(tc, cell2);
-               cell_defer_no_holder(tc, cell1);
+               cell_defer_no_holder_no_free(tc, &cell2);
+               cell_defer_no_holder_no_free(tc, &cell1);
 
                remap(tc, bio, result.block);
                return DM_MAPIO_REMAPPED;
@@ -1636,7 +1713,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
                goto bad_prison;
        }
 
-       pool->copier = dm_kcopyd_client_create();
+       pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(pool->copier)) {
                r = PTR_ERR(pool->copier);
                *error = "Error creating pool's kcopyd client";
@@ -1938,7 +2015,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        pt->data_dev = data_dev;
        pt->low_water_blocks = low_water_blocks;
        pt->adjusted_pf = pt->requested_pf = pf;
-       ti->num_flush_requests = 1;
+       ti->num_flush_bios = 1;
 
        /*
         * Only need to enable discards if the pool should pass
@@ -1946,7 +2023,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         * processing will cause mappings to be removed from the btree.
         */
        if (pf.discard_enabled && pf.discard_passdown) {
-               ti->num_discard_requests = 1;
+               ti->num_discard_bios = 1;
 
                /*
                 * Setting 'discards_supported' circumvents the normal
@@ -2299,8 +2376,8 @@ static void emit_flags(struct pool_features *pf, char *result,
  *    <transaction id> <used metadata sectors>/<total metadata sectors>
  *    <used data sectors>/<total data sectors> <held metadata root>
  */
-static int pool_status(struct dm_target *ti, status_type_t type,
-                      unsigned status_flags, char *result, unsigned maxlen)
+static void pool_status(struct dm_target *ti, status_type_t type,
+                       unsigned status_flags, char *result, unsigned maxlen)
 {
        int r;
        unsigned sz = 0;
@@ -2326,32 +2403,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
                        (void) commit_or_fallback(pool);
 
-               r = dm_pool_get_metadata_transaction_id(pool->pmd,
-                                                       &transaction_id);
-               if (r)
-                       return r;
+               r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
+               if (r) {
+                       DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+                       goto err;
+               }
 
-               r = dm_pool_get_free_metadata_block_count(pool->pmd,
-                                                         &nr_free_blocks_metadata);
-               if (r)
-                       return r;
+               r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
+               if (r) {
+                       DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+                       goto err;
+               }
 
                r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
-               if (r)
-                       return r;
+               if (r) {
+                       DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+                       goto err;
+               }
 
-               r = dm_pool_get_free_block_count(pool->pmd,
-                                                &nr_free_blocks_data);
-               if (r)
-                       return r;
+               r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
+               if (r) {
+                       DMERR("dm_pool_get_free_block_count returned %d", r);
+                       goto err;
+               }
 
                r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
-               if (r)
-                       return r;
+               if (r) {
+                       DMERR("dm_pool_get_data_dev_size returned %d", r);
+                       goto err;
+               }
 
                r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
-               if (r)
-                       return r;
+               if (r) {
+                       DMERR("dm_pool_get_metadata_snap returned %d", r);
+                       goto err;
+               }
 
                DMEMIT("%llu %llu/%llu %llu/%llu ",
                       (unsigned long long)transaction_id,
@@ -2388,8 +2474,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                emit_flags(&pt->requested_pf, result, sz, maxlen);
                break;
        }
+       return;
 
-       return 0;
+err:
+       DMEMIT("Error");
 }
 
 static int pool_iterate_devices(struct dm_target *ti,
@@ -2414,11 +2502,6 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
 }
 
-static bool block_size_is_power_of_two(struct pool *pool)
-{
-       return pool->sectors_per_block_shift >= 0;
-}
-
 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
 {
        struct pool *pool = pt->pool;
@@ -2432,15 +2515,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
        if (pt->adjusted_pf.discard_passdown) {
                data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
                limits->discard_granularity = data_limits->discard_granularity;
-       } else if (block_size_is_power_of_two(pool))
+       } else
                limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
-       else
-               /*
-                * Use largest power of 2 that is a factor of sectors_per_block
-                * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
-                */
-               limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
-                                                 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
 }
 
 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2468,7 +2544,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 6, 0},
+       .version = {1, 6, 1},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2588,17 +2664,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        if (r)
                goto bad_thin_open;
 
-       ti->num_flush_requests = 1;
+       ti->num_flush_bios = 1;
        ti->flush_supported = true;
        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = true;
-               ti->num_discard_requests = 1;
+               ti->num_discard_bios = 1;
                ti->discard_zeroes_data_unsupported = true;
-               /* Discard requests must be split on a block boundary */
-               ti->split_discard_requests = true;
+               /* Discard bios must be split on a block boundary */
+               ti->split_discard_bios = true;
        }
 
        dm_put(pool_md);
@@ -2676,8 +2752,8 @@ static void thin_postsuspend(struct dm_target *ti)
 /*
  * <nr mapped sectors> <highest mapped sector>
  */
-static int thin_status(struct dm_target *ti, status_type_t type,
-                      unsigned status_flags, char *result, unsigned maxlen)
+static void thin_status(struct dm_target *ti, status_type_t type,
+                       unsigned status_flags, char *result, unsigned maxlen)
 {
        int r;
        ssize_t sz = 0;
@@ -2687,7 +2763,7 @@ static int thin_status(struct dm_target *ti, status_type_t type,
 
        if (get_pool_mode(tc->pool) == PM_FAIL) {
                DMEMIT("Fail");
-               return 0;
+               return;
        }
 
        if (!tc->td)
@@ -2696,12 +2772,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
                switch (type) {
                case STATUSTYPE_INFO:
                        r = dm_thin_get_mapped_count(tc->td, &mapped);
-                       if (r)
-                               return r;
+                       if (r) {
+                               DMERR("dm_thin_get_mapped_count returned %d", r);
+                               goto err;
+                       }
 
                        r = dm_thin_get_highest_mapped_block(tc->td, &highest);
-                       if (r < 0)
-                               return r;
+                       if (r < 0) {
+                               DMERR("dm_thin_get_highest_mapped_block returned %d", r);
+                               goto err;
+                       }
 
                        DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
                        if (r)
@@ -2721,7 +2801,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
                }
        }
 
-       return 0;
+       return;
+
+err:
+       DMEMIT("Error");
 }
 
 static int thin_iterate_devices(struct dm_target *ti,
@@ -2748,7 +2831,7 @@ static int thin_iterate_devices(struct dm_target *ti,
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 7, 0},
+       .version = {1, 7, 1},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index 52cde98..6ad5383 100644 (file)
@@ -508,8 +508,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
 /*
  * Status: V (valid) or C (corruption found)
  */
-static int verity_status(struct dm_target *ti, status_type_t type,
-                        unsigned status_flags, char *result, unsigned maxlen)
+static void verity_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
 {
        struct dm_verity *v = ti->private;
        unsigned sz = 0;
@@ -540,8 +540,6 @@ static int verity_status(struct dm_target *ti, status_type_t type,
                                DMEMIT("%02x", v->salt[x]);
                break;
        }
-
-       return 0;
 }
 
 static int verity_ioctl(struct dm_target *ti, unsigned cmd,
@@ -860,7 +858,7 @@ bad:
 
 static struct target_type verity_target = {
        .name           = "verity",
-       .version        = {1, 1, 0},
+       .version        = {1, 1, 1},
        .module         = THIS_MODULE,
        .ctr            = verity_ctr,
        .dtr            = verity_dtr,
index 69a5c3b..c99003e 100644 (file)
@@ -25,7 +25,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        /*
         * Silently drop discards, avoiding -EOPNOTSUPP.
         */
-       ti->num_discard_requests = 1;
+       ti->num_discard_bios = 1;
 
        return 0;
 }
index 314a0e2..7e46926 100644 (file)
@@ -163,7 +163,6 @@ struct mapped_device {
         * io objects are allocated from here.
         */
        mempool_t *io_pool;
-       mempool_t *tio_pool;
 
        struct bio_set *bs;
 
@@ -197,7 +196,6 @@ struct mapped_device {
  */
 struct dm_md_mempools {
        mempool_t *io_pool;
-       mempool_t *tio_pool;
        struct bio_set *bs;
 };
 
@@ -205,12 +203,6 @@ struct dm_md_mempools {
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_rq_tio_cache;
 
-/*
- * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
- * still used for _io_cache, I'm leaving this for a later cleanup
- */
-static struct kmem_cache *_rq_bio_info_cache;
-
 static int __init local_init(void)
 {
        int r = -ENOMEM;
@@ -224,13 +216,9 @@ static int __init local_init(void)
        if (!_rq_tio_cache)
                goto out_free_io_cache;
 
-       _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
-       if (!_rq_bio_info_cache)
-               goto out_free_rq_tio_cache;
-
        r = dm_uevent_init();
        if (r)
-               goto out_free_rq_bio_info_cache;
+               goto out_free_rq_tio_cache;
 
        _major = major;
        r = register_blkdev(_major, _name);
@@ -244,8 +232,6 @@ static int __init local_init(void)
 
 out_uevent_exit:
        dm_uevent_exit();
-out_free_rq_bio_info_cache:
-       kmem_cache_destroy(_rq_bio_info_cache);
 out_free_rq_tio_cache:
        kmem_cache_destroy(_rq_tio_cache);
 out_free_io_cache:
@@ -256,7 +242,6 @@ out_free_io_cache:
 
 static void local_exit(void)
 {
-       kmem_cache_destroy(_rq_bio_info_cache);
        kmem_cache_destroy(_rq_tio_cache);
        kmem_cache_destroy(_io_cache);
        unregister_blkdev(_major, _name);
@@ -318,7 +303,6 @@ static void __exit dm_exit(void)
        /*
         * Should be empty by this point.
         */
-       idr_remove_all(&_minor_idr);
        idr_destroy(&_minor_idr);
 }
 
@@ -449,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
                                            gfp_t gfp_mask)
 {
-       return mempool_alloc(md->tio_pool, gfp_mask);
+       return mempool_alloc(md->io_pool, gfp_mask);
 }
 
 static void free_rq_tio(struct dm_rq_target_io *tio)
 {
-       mempool_free(tio, tio->md->tio_pool);
+       mempool_free(tio, tio->md->io_pool);
 }
 
 static int md_in_flight(struct mapped_device *md)
@@ -627,7 +611,6 @@ static void dec_pending(struct dm_io *io, int error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
-                       trace_block_bio_complete(md->queue, bio, io_error);
                        bio_endio(bio, io_error);
                }
        }
@@ -987,12 +970,13 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 }
 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
 
-static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
 {
        int r;
        sector_t sector;
        struct mapped_device *md;
        struct bio *clone = &tio->clone;
+       struct dm_target *ti = tio->ti;
 
        clone->bi_end_io = clone_endio;
        clone->bi_private = tio;
@@ -1033,32 +1017,54 @@ struct clone_info {
        unsigned short idx;
 };
 
+static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+{
+       bio->bi_sector = sector;
+       bio->bi_size = to_bytes(len);
+}
+
+static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
+{
+       bio->bi_idx = idx;
+       bio->bi_vcnt = idx + bv_count;
+       bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+}
+
+static void clone_bio_integrity(struct bio *bio, struct bio *clone,
+                               unsigned short idx, unsigned len, unsigned offset,
+                               unsigned trim)
+{
+       if (!bio_integrity(bio))
+               return;
+
+       bio_integrity_clone(clone, bio, GFP_NOIO);
+
+       if (trim)
+               bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
+}
+
 /*
  * Creates a little bio that just does part of a bvec.
  */
-static void split_bvec(struct dm_target_io *tio, struct bio *bio,
-                      sector_t sector, unsigned short idx, unsigned int offset,
-                      unsigned int len, struct bio_set *bs)
+static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
+                           sector_t sector, unsigned short idx,
+                           unsigned offset, unsigned len)
 {
        struct bio *clone = &tio->clone;
        struct bio_vec *bv = bio->bi_io_vec + idx;
 
        *clone->bi_io_vec = *bv;
 
-       clone->bi_sector = sector;
+       bio_setup_sector(clone, sector, len);
+
        clone->bi_bdev = bio->bi_bdev;
        clone->bi_rw = bio->bi_rw;
        clone->bi_vcnt = 1;
-       clone->bi_size = to_bytes(len);
        clone->bi_io_vec->bv_offset = offset;
        clone->bi_io_vec->bv_len = clone->bi_size;
        clone->bi_flags |= 1 << BIO_CLONED;
 
-       if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO);
-               bio_integrity_trim(clone,
-                                  bio_sector_offset(bio, idx, offset), len);
-       }
+       clone_bio_integrity(bio, clone, idx, len, offset, 1);
 }
 
 /*
@@ -1066,29 +1072,23 @@ static void split_bvec(struct dm_target_io *tio, struct bio *bio,
  */
 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
                      sector_t sector, unsigned short idx,
-                     unsigned short bv_count, unsigned int len,
-                     struct bio_set *bs)
+                     unsigned short bv_count, unsigned len)
 {
        struct bio *clone = &tio->clone;
+       unsigned trim = 0;
 
        __bio_clone(clone, bio);
-       clone->bi_sector = sector;
-       clone->bi_idx = idx;
-       clone->bi_vcnt = idx + bv_count;
-       clone->bi_size = to_bytes(len);
-       clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
-       if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO);
-
-               if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
-                       bio_integrity_trim(clone,
-                                          bio_sector_offset(bio, idx, 0), len);
-       }
+       bio_setup_sector(clone, sector, len);
+       bio_setup_bv(clone, idx, bv_count);
+
+       if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
+               trim = 1;
+       clone_bio_integrity(bio, clone, idx, len, 0, trim);
 }
 
 static struct dm_target_io *alloc_tio(struct clone_info *ci,
-                                     struct dm_target *ti, int nr_iovecs)
+                                     struct dm_target *ti, int nr_iovecs,
+                                     unsigned target_bio_nr)
 {
        struct dm_target_io *tio;
        struct bio *clone;
@@ -1099,96 +1099,104 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
        tio->io = ci->io;
        tio->ti = ti;
        memset(&tio->info, 0, sizeof(tio->info));
-       tio->target_request_nr = 0;
+       tio->target_bio_nr = target_bio_nr;
 
        return tio;
 }
 
-static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
-                                  unsigned request_nr, sector_t len)
+static void __clone_and_map_simple_bio(struct clone_info *ci,
+                                      struct dm_target *ti,
+                                      unsigned target_bio_nr, sector_t len)
 {
-       struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
+       struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
        struct bio *clone = &tio->clone;
 
-       tio->target_request_nr = request_nr;
-
        /*
         * Discard requests require the bio's inline iovecs be initialized.
         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
         * and discard, so no need for concern about wasted bvec allocations.
         */
-
         __bio_clone(clone, ci->bio);
-       if (len) {
-               clone->bi_sector = ci->sector;
-               clone->bi_size = to_bytes(len);
-       }
+       if (len)
+               bio_setup_sector(clone, ci->sector, len);
 
-       __map_bio(ti, tio);
+       __map_bio(tio);
 }
 
-static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
-                                   unsigned num_requests, sector_t len)
+static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+                                 unsigned num_bios, sector_t len)
 {
-       unsigned request_nr;
+       unsigned target_bio_nr;
 
-       for (request_nr = 0; request_nr < num_requests; request_nr++)
-               __issue_target_request(ci, ti, request_nr, len);
+       for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
+               __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
 }
 
-static int __clone_and_map_empty_flush(struct clone_info *ci)
+static int __send_empty_flush(struct clone_info *ci)
 {
        unsigned target_nr = 0;
        struct dm_target *ti;
 
        BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
-               __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
+               __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
 
        return 0;
 }
 
-/*
- * Perform all io with a single clone.
- */
-static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+                                    sector_t sector, int nr_iovecs,
+                                    unsigned short idx, unsigned short bv_count,
+                                    unsigned offset, unsigned len,
+                                    unsigned split_bvec)
 {
        struct bio *bio = ci->bio;
        struct dm_target_io *tio;
+       unsigned target_bio_nr;
+       unsigned num_target_bios = 1;
 
-       tio = alloc_tio(ci, ti, bio->bi_max_vecs);
-       clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
-                 ci->sector_count, ci->md->bs);
-       __map_bio(ti, tio);
-       ci->sector_count = 0;
+       /*
+        * Does the target want to receive duplicate copies of the bio?
+        */
+       if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
+               num_target_bios = ti->num_write_bios(ti, bio);
+
+       for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
+               tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
+               if (split_bvec)
+                       clone_split_bio(tio, bio, sector, idx, offset, len);
+               else
+                       clone_bio(tio, bio, sector, idx, bv_count, len);
+               __map_bio(tio);
+       }
 }
 
-typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
 
-static unsigned get_num_discard_requests(struct dm_target *ti)
+static unsigned get_num_discard_bios(struct dm_target *ti)
 {
-       return ti->num_discard_requests;
+       return ti->num_discard_bios;
 }
 
-static unsigned get_num_write_same_requests(struct dm_target *ti)
+static unsigned get_num_write_same_bios(struct dm_target *ti)
 {
-       return ti->num_write_same_requests;
+       return ti->num_write_same_bios;
 }
 
 typedef bool (*is_split_required_fn)(struct dm_target *ti);
 
 static bool is_split_required_for_discard(struct dm_target *ti)
 {
-       return ti->split_discard_requests;
+       return ti->split_discard_bios;
 }
 
-static int __clone_and_map_changing_extent_only(struct clone_info *ci,
-                                               get_num_requests_fn get_num_requests,
-                                               is_split_required_fn is_split_required)
+static int __send_changing_extent_only(struct clone_info *ci,
+                                      get_num_bios_fn get_num_bios,
+                                      is_split_required_fn is_split_required)
 {
        struct dm_target *ti;
        sector_t len;
-       unsigned num_requests;
+       unsigned num_bios;
 
        do {
                ti = dm_table_find_target(ci->map, ci->sector);
@@ -1201,8 +1209,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                 * reconfiguration might also have changed that since the
                 * check was performed.
                 */
-               num_requests = get_num_requests ? get_num_requests(ti) : 0;
-               if (!num_requests)
+               num_bios = get_num_bios ? get_num_bios(ti) : 0;
+               if (!num_bios)
                        return -EOPNOTSUPP;
 
                if (is_split_required && !is_split_required(ti))
@@ -1210,7 +1218,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                else
                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
 
-               __issue_target_requests(ci, ti, num_requests, len);
+               __send_duplicate_bios(ci, ti, num_bios, len);
 
                ci->sector += len;
        } while (ci->sector_count -= len);
@@ -1218,108 +1226,129 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
        return 0;
 }
 
-static int __clone_and_map_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci)
 {
-       return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
-                                                   is_split_required_for_discard);
+       return __send_changing_extent_only(ci, get_num_discard_bios,
+                                          is_split_required_for_discard);
 }
 
-static int __clone_and_map_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci)
 {
-       return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+       return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
 }
 
-static int __clone_and_map(struct clone_info *ci)
+/*
+ * Find maximum number of sectors / bvecs we can process with a single bio.
+ */
+static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
 {
        struct bio *bio = ci->bio;
-       struct dm_target *ti;
-       sector_t len = 0, max;
-       struct dm_target_io *tio;
+       sector_t bv_len, total_len = 0;
 
-       if (unlikely(bio->bi_rw & REQ_DISCARD))
-               return __clone_and_map_discard(ci);
-       else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
-               return __clone_and_map_write_same(ci);
+       for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
+               bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
 
-       ti = dm_table_find_target(ci->map, ci->sector);
-       if (!dm_target_is_valid(ti))
-               return -EIO;
-
-       max = max_io_len(ci->sector, ti);
+               if (bv_len > max)
+                       break;
 
-       if (ci->sector_count <= max) {
-               /*
-                * Optimise for the simple case where we can do all of
-                * the remaining io with a single clone.
-                */
-               __clone_and_map_simple(ci, ti);
+               max -= bv_len;
+               total_len += bv_len;
+       }
 
-       } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
-               /*
-                * There are some bvecs that don't span targets.
-                * Do as many of these as possible.
-                */
-               int i;
-               sector_t remaining = max;
-               sector_t bv_len;
+       return total_len;
+}
 
-               for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
-                       bv_len = to_sector(bio->bi_io_vec[i].bv_len);
+static int __split_bvec_across_targets(struct clone_info *ci,
+                                      struct dm_target *ti, sector_t max)
+{
+       struct bio *bio = ci->bio;
+       struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+       sector_t remaining = to_sector(bv->bv_len);
+       unsigned offset = 0;
+       sector_t len;
 
-                       if (bv_len > remaining)
-                               break;
+       do {
+               if (offset) {
+                       ti = dm_table_find_target(ci->map, ci->sector);
+                       if (!dm_target_is_valid(ti))
+                               return -EIO;
 
-                       remaining -= bv_len;
-                       len += bv_len;
+                       max = max_io_len(ci->sector, ti);
                }
 
-               tio = alloc_tio(ci, ti, bio->bi_max_vecs);
-               clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len,
-                         ci->md->bs);
-               __map_bio(ti, tio);
+               len = min(remaining, max);
+
+               __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
+                                        bv->bv_offset + offset, len, 1);
 
                ci->sector += len;
                ci->sector_count -= len;
-               ci->idx = i;
+               offset += to_bytes(len);
+       } while (remaining -= len);
 
-       } else {
-               /*
-                * Handle a bvec that must be split between two or more targets.
-                */
-               struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-               sector_t remaining = to_sector(bv->bv_len);
-               unsigned int offset = 0;
+       ci->idx++;
 
-               do {
-                       if (offset) {
-                               ti = dm_table_find_target(ci->map, ci->sector);
-                               if (!dm_target_is_valid(ti))
-                                       return -EIO;
+       return 0;
+}
 
-                               max = max_io_len(ci->sector, ti);
-                       }
+/*
+ * Select the correct strategy for processing a non-flush bio.
+ */
+static int __split_and_process_non_flush(struct clone_info *ci)
+{
+       struct bio *bio = ci->bio;
+       struct dm_target *ti;
+       sector_t len, max;
+       int idx;
 
-                       len = min(remaining, max);
+       if (unlikely(bio->bi_rw & REQ_DISCARD))
+               return __send_discard(ci);
+       else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+               return __send_write_same(ci);
 
-                       tio = alloc_tio(ci, ti, 1);
-                       split_bvec(tio, bio, ci->sector, ci->idx,
-                                  bv->bv_offset + offset, len, ci->md->bs);
+       ti = dm_table_find_target(ci->map, ci->sector);
+       if (!dm_target_is_valid(ti))
+               return -EIO;
 
-                       __map_bio(ti, tio);
+       max = max_io_len(ci->sector, ti);
+
+       /*
+        * Optimise for the simple case where we can do all of
+        * the remaining io with a single clone.
+        */
+       if (ci->sector_count <= max) {
+               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+                                        ci->idx, bio->bi_vcnt - ci->idx, 0,
+                                        ci->sector_count, 0);
+               ci->sector_count = 0;
+               return 0;
+       }
+
+       /*
+        * There are some bvecs that don't span targets.
+        * Do as many of these as possible.
+        */
+       if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
+               len = __len_within_target(ci, max, &idx);
 
-                       ci->sector += len;
-                       ci->sector_count -= len;
-                       offset += to_bytes(len);
-               } while (remaining -= len);
+               __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+                                        ci->idx, idx - ci->idx, 0, len, 0);
 
-               ci->idx++;
+               ci->sector += len;
+               ci->sector_count -= len;
+               ci->idx = idx;
+
+               return 0;
        }
 
-       return 0;
+       /*
+        * Handle a bvec that must be split between two or more targets.
+        */
+       return __split_bvec_across_targets(ci, ti, max);
 }
 
 /*
- * Split the bio into several clones and submit it to targets.
+ * Entry point to split a bio into clones and submit them to the targets.
  */
 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 {
@@ -1343,16 +1372,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        ci.idx = bio->bi_idx;
 
        start_io_acct(ci.io);
+
        if (bio->bi_rw & REQ_FLUSH) {
                ci.bio = &ci.md->flush_bio;
                ci.sector_count = 0;
-               error = __clone_and_map_empty_flush(&ci);
+               error = __send_empty_flush(&ci);
                /* dec_pending submits any data associated with flush */
        } else {
                ci.bio = bio;
                ci.sector_count = bio_sectors(bio);
                while (ci.sector_count && !error)
-                       error = __clone_and_map(&ci);
+                       error = __split_and_process_non_flush(&ci);
        }
 
        /* drop the extra reference count */
@@ -1756,62 +1786,38 @@ static void free_minor(int minor)
  */
 static int specific_minor(int minor)
 {
-       int r, m;
+       int r;
 
        if (minor >= (1 << MINORBITS))
                return -EINVAL;
 
-       r = idr_pre_get(&_minor_idr, GFP_KERNEL);
-       if (!r)
-               return -ENOMEM;
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&_minor_lock);
 
-       if (idr_find(&_minor_idr, minor)) {
-               r = -EBUSY;
-               goto out;
-       }
+       r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
 
-       r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
-       if (r)
-               goto out;
-
-       if (m != minor) {
-               idr_remove(&_minor_idr, m);
-               r = -EBUSY;
-               goto out;
-       }
-
-out:
        spin_unlock(&_minor_lock);
-       return r;
+       idr_preload_end();
+       if (r < 0)
+               return r == -ENOSPC ? -EBUSY : r;
+       return 0;
 }
 
 static int next_free_minor(int *minor)
 {
-       int r, m;
-
-       r = idr_pre_get(&_minor_idr, GFP_KERNEL);
-       if (!r)
-               return -ENOMEM;
+       int r;
 
+       idr_preload(GFP_KERNEL);
        spin_lock(&_minor_lock);
 
-       r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
-       if (r)
-               goto out;
+       r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
 
-       if (m >= (1 << MINORBITS)) {
-               idr_remove(&_minor_idr, m);
-               r = -ENOSPC;
-               goto out;
-       }
-
-       *minor = m;
-
-out:
        spin_unlock(&_minor_lock);
-       return r;
+       idr_preload_end();
+       if (r < 0)
+               return r;
+       *minor = r;
+       return 0;
 }
 
 static const struct block_device_operations dm_blk_dops;
@@ -1949,8 +1955,6 @@ static void free_dev(struct mapped_device *md)
        unlock_fs(md);
        bdput(md->bdev);
        destroy_workqueue(md->wq);
-       if (md->tio_pool)
-               mempool_destroy(md->tio_pool);
        if (md->io_pool)
                mempool_destroy(md->io_pool);
        if (md->bs)
@@ -1973,24 +1977,33 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
 {
        struct dm_md_mempools *p = dm_table_get_md_mempools(t);
 
-       if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
-               /*
-                * The md already has necessary mempools. Reload just the
-                * bioset because front_pad may have changed because
-                * a different table was loaded.
-                */
-               bioset_free(md->bs);
-               md->bs = p->bs;
-               p->bs = NULL;
+       if (md->io_pool && md->bs) {
+               /* The md already has necessary mempools. */
+               if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+                       /*
+                        * Reload bioset because front_pad may have changed
+                        * because a different table was loaded.
+                        */
+                       bioset_free(md->bs);
+                       md->bs = p->bs;
+                       p->bs = NULL;
+               } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
+                       /*
+                        * There's no need to reload with request-based dm
+                        * because the size of front_pad doesn't change.
+                        * Note for future: If you are to reload bioset,
+                        * prep-ed requests in the queue may refer
+                        * to bio from the old bioset, so you must walk
+                        * through the queue to unprep.
+                        */
+               }
                goto out;
        }
 
-       BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+       BUG_ON(!p || md->io_pool || md->bs);
 
        md->io_pool = p->io_pool;
        p->io_pool = NULL;
-       md->tio_pool = p->tio_pool;
-       p->tio_pool = NULL;
        md->bs = p->bs;
        p->bs = NULL;
 
@@ -2421,7 +2434,7 @@ static void dm_queue_flush(struct mapped_device *md)
  */
 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
 {
-       struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
+       struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
        struct queue_limits limits;
        int r;
 
@@ -2444,10 +2457,12 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
                dm_table_put(live_map);
        }
 
-       r = dm_calculate_queue_limits(table, &limits);
-       if (r) {
-               map = ERR_PTR(r);
-               goto out;
+       if (!live_map) {
+               r = dm_calculate_queue_limits(table, &limits);
+               if (r) {
+                       map = ERR_PTR(r);
+                       goto out;
+               }
        }
 
        map = __bind(md, table, &limits);
@@ -2745,52 +2760,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
 {
-       struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
-       unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
+       struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+       struct kmem_cache *cachep;
+       unsigned int pool_size;
+       unsigned int front_pad;
 
        if (!pools)
                return NULL;
 
-       per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+       if (type == DM_TYPE_BIO_BASED) {
+               cachep = _io_cache;
+               pool_size = 16;
+               front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+       } else if (type == DM_TYPE_REQUEST_BASED) {
+               cachep = _rq_tio_cache;
+               pool_size = MIN_IOS;
+               front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+               /* per_bio_data_size is not used. See __bind_mempools(). */
+               WARN_ON(per_bio_data_size != 0);
+       } else
+               goto out;
 
-       pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
-                        mempool_create_slab_pool(MIN_IOS, _io_cache) :
-                        mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+       pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
        if (!pools->io_pool)
-               goto free_pools_and_out;
-
-       pools->tio_pool = NULL;
-       if (type == DM_TYPE_REQUEST_BASED) {
-               pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
-               if (!pools->tio_pool)
-                       goto free_io_pool_and_out;
-       }
+               goto out;
 
-       pools->bs = (type == DM_TYPE_BIO_BASED) ?
-               bioset_create(pool_size,
-                             per_bio_data_size + offsetof(struct dm_target_io, clone)) :
-               bioset_create(pool_size,
-                             offsetof(struct dm_rq_clone_bio_info, clone));
+       pools->bs = bioset_create(pool_size, front_pad);
        if (!pools->bs)
-               goto free_tio_pool_and_out;
+               goto out;
 
        if (integrity && bioset_integrity_create(pools->bs, pool_size))
-               goto free_bioset_and_out;
+               goto out;
 
        return pools;
 
-free_bioset_and_out:
-       bioset_free(pools->bs);
-
-free_tio_pool_and_out:
-       if (pools->tio_pool)
-               mempool_destroy(pools->tio_pool);
-
-free_io_pool_and_out:
-       mempool_destroy(pools->io_pool);
-
-free_pools_and_out:
-       kfree(pools);
+out:
+       dm_free_md_mempools(pools);
 
        return NULL;
 }
@@ -2803,9 +2808,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
        if (pools->io_pool)
                mempool_destroy(pools->io_pool);
 
-       if (pools->tio_pool)
-               mempool_destroy(pools->tio_pool);
-
        if (pools->bs)
                bioset_free(pools->bs);
 
index 3db3d1b..fcb878f 100644 (file)
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
                bio_io_error(bio);
                return;
        }
+       if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+               bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+               return;
+       }
        smp_rmb(); /* Ensure implications of  'active' are visible */
        rcu_read_lock();
        if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
                } else if (!sectors)
                        sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
                                rdev->data_offset;
+               if (!my_mddev->pers->resize)
+                       /* Cannot change size for RAID0 or Linear etc */
+                       return -EINVAL;
        }
        if (sectors < my_mddev->dev_sectors)
                return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        mddev->ro = 0;
                        sysfs_notify_dirent_safe(mddev->sysfs_state);
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-                       md_wakeup_thread(mddev->thread);
+                       /* mddev_unlock will wake thread */
+                       /* If a device failed while we were read-only, we
+                        * need to make sure the metadata is updated now.
+                        */
+                       if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+                               mddev_unlock(mddev);
+                               wait_event(mddev->sb_wait,
+                                          !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
+                                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+                               mddev_lock(mddev);
+                       }
                } else {
                        err = -EROFS;
                        goto abort_unlock;
index ceb3590..19b2687 100644 (file)
@@ -1,6 +1,6 @@
 config DM_PERSISTENT_DATA
        tristate
-       depends on BLK_DEV_DM && EXPERIMENTAL
+       depends on BLK_DEV_DM
        select LIBCRC32C
        select DM_BUFIO
        ---help---
index d8e7cb7..ff52879 100644 (file)
@@ -1,5 +1,7 @@
 obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
 dm-persistent-data-objs := \
+       dm-array.o \
+       dm-bitset.o \
        dm-block-manager.o \
        dm-space-map-common.o \
        dm-space-map-disk.o \
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
new file mode 100644 (file)
index 0000000..172147e
--- /dev/null
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-array.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "array"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The array is implemented as a fully populated btree, which points to
+ * blocks that contain the packed values.  This is more space efficient
+ * than just using a btree since we don't store 1 key per value.
+ */
+struct array_block {
+       __le32 csum;
+       __le32 max_entries;
+       __le32 nr_entries;
+       __le32 value_size;
+       __le64 blocknr; /* Block this node is supposed to live in. */
+} __packed;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Validator methods.  As usual we calculate a checksum, and also write the
+ * block location into the header (paranoia about ssds remapping areas by
+ * mistake).
+ */
+#define CSUM_XOR 595846735
+
+static void array_block_prepare_for_write(struct dm_block_validator *v,
+                                         struct dm_block *b,
+                                         size_t size_of_block)
+{
+       struct array_block *bh_le = dm_block_data(b);
+
+       bh_le->blocknr = cpu_to_le64(dm_block_location(b));
+       bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+                                                size_of_block - sizeof(__le32),
+                                                CSUM_XOR));
+}
+
+static int array_block_check(struct dm_block_validator *v,
+                            struct dm_block *b,
+                            size_t size_of_block)
+{
+       struct array_block *bh_le = dm_block_data(b);
+       __le32 csum_disk;
+
+       if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
+               DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+                           (unsigned long long) le64_to_cpu(bh_le->blocknr),
+                           (unsigned long long) dm_block_location(b));
+               return -ENOTBLK;
+       }
+
+       csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+                                              size_of_block - sizeof(__le32),
+                                              CSUM_XOR));
+       if (csum_disk != bh_le->csum) {
+               DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+                           (unsigned) le32_to_cpu(csum_disk),
+                           (unsigned) le32_to_cpu(bh_le->csum));
+               return -EILSEQ;
+       }
+
+       return 0;
+}
+
+static struct dm_block_validator array_validator = {
+       .name = "array",
+       .prepare_for_write = array_block_prepare_for_write,
+       .check = array_block_check
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Functions for manipulating the array blocks.
+ */
+
+/*
+ * Returns a pointer to a value within an array block.
+ *
+ * index - The index into _this_ specific block.
+ */
+static void *element_at(struct dm_array_info *info, struct array_block *ab,
+                       unsigned index)
+{
+       unsigned char *entry = (unsigned char *) (ab + 1);
+
+       entry += index * info->value_type.size;
+
+       return entry;
+}
+
+/*
+ * Utility function that calls one of the value_type methods on every value
+ * in an array block.
+ */
+static void on_entries(struct dm_array_info *info, struct array_block *ab,
+                      void (*fn)(void *, const void *))
+{
+       unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
+
+       for (i = 0; i < nr_entries; i++)
+               fn(info->value_type.context, element_at(info, ab, i));
+}
+
+/*
+ * Increment every value in an array block.
+ */
+static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+       struct dm_btree_value_type *vt = &info->value_type;
+
+       if (vt->inc)
+               on_entries(info, ab, vt->inc);
+}
+
+/*
+ * Decrement every value in an array block.
+ */
+static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+       struct dm_btree_value_type *vt = &info->value_type;
+
+       if (vt->dec)
+               on_entries(info, ab, vt->dec);
+}
+
+/*
+ * Each array block can hold this many values.
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
+{
+       return (size_of_block - sizeof(struct array_block)) / value_size;
+}
+
+/*
+ * Allocate a new array block.  The caller will need to unlock block.
+ */
+static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+                       uint32_t max_entries,
+                       struct dm_block **block, struct array_block **ab)
+{
+       int r;
+
+       r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
+       if (r)
+               return r;
+
+       (*ab) = dm_block_data(*block);
+       (*ab)->max_entries = cpu_to_le32(max_entries);
+       (*ab)->nr_entries = cpu_to_le32(0);
+       (*ab)->value_size = cpu_to_le32(info->value_type.size);
+
+       return 0;
+}
+
+/*
+ * Pad an array block out with a particular value.  Every instance will
+ * cause an increment of the value_type.  new_nr must always be more than
+ * the current number of entries.
+ */
+static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+                       const void *value, unsigned new_nr)
+{
+       unsigned i;
+       uint32_t nr_entries;
+       struct dm_btree_value_type *vt = &info->value_type;
+
+       BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+       BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
+
+       nr_entries = le32_to_cpu(ab->nr_entries);
+       for (i = nr_entries; i < new_nr; i++) {
+               if (vt->inc)
+                       vt->inc(vt->context, value);
+               memcpy(element_at(info, ab, i), value, vt->size);
+       }
+       ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Remove some entries from the back of an array block.  Every value
+ * removed will be decremented.  new_nr must be <= the current number of
+ * entries.
+ */
+static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+                       unsigned new_nr)
+{
+       unsigned i;
+       uint32_t nr_entries;
+       struct dm_btree_value_type *vt = &info->value_type;
+
+       BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+       BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
+
+       nr_entries = le32_to_cpu(ab->nr_entries);
+       for (i = nr_entries; i > new_nr; i--)
+               if (vt->dec)
+                       vt->dec(vt->context, element_at(info, ab, i - 1));
+       ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Read locks a block, and coerces it to an array block.  The caller must
+ * unlock 'block' when finished.
+ */
+static int get_ablock(struct dm_array_info *info, dm_block_t b,
+                     struct dm_block **block, struct array_block **ab)
+{
+       int r;
+
+       r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
+       if (r)
+               return r;
+
+       *ab = dm_block_data(*block);
+       return 0;
+}
+
+/*
+ * Unlocks an array block.
+ */
+static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+{
+       return dm_tm_unlock(info->btree_info.tm, block);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Btree manipulation.
+ */
+
+/*
+ * Looks up an array block in the btree, and then read locks it.
+ *
+ * index is the index of the index of the array_block, (ie. the array index
+ * / max_entries).
+ */
+static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+                        unsigned index, struct dm_block **block,
+                        struct array_block **ab)
+{
+       int r;
+       uint64_t key = index;
+       __le64 block_le;
+
+       r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
+       if (r)
+               return r;
+
+       return get_ablock(info, le64_to_cpu(block_le), block, ab);
+}
+
+/*
+ * Insert an array block into the btree.  The block is _not_ unlocked.
+ */
+static int insert_ablock(struct dm_array_info *info, uint64_t index,
+                        struct dm_block *block, dm_block_t *root)
+{
+       __le64 block_le = cpu_to_le64(dm_block_location(block));
+
+       __dm_bless_for_disk(block_le);
+       return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
+}
+
+/*
+ * Looks up an array block in the btree.  Then shadows it, and updates the
+ * btree to point to this new shadow.  'root' is an input/output parameter
+ * for both the current root block, and the new one.
+ */
+static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+                        unsigned index, struct dm_block **block,
+                        struct array_block **ab)
+{
+       int r, inc;
+       uint64_t key = index;
+       dm_block_t b;
+       __le64 block_le;
+
+       /*
+        * lookup
+        */
+       r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
+       if (r)
+               return r;
+       b = le64_to_cpu(block_le);
+
+       /*
+        * shadow
+        */
+       r = dm_tm_shadow_block(info->btree_info.tm, b,
+                              &array_validator, block, &inc);
+       if (r)
+               return r;
+
+       *ab = dm_block_data(*block);
+       if (inc)
+               inc_ablock_entries(info, *ab);
+
+       /*
+        * Reinsert.
+        *
+        * The shadow op will often be a noop.  Only insert if it really
+        * copied data.
+        */
+       if (dm_block_location(*block) != b)
+               r = insert_ablock(info, index, *block, root);
+
+       return r;
+}
+
+/*
+ * Allocate an new array block, and fill it with some values.
+ */
+static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+                            uint32_t max_entries,
+                            unsigned block_index, uint32_t nr,
+                            const void *value, dm_block_t *root)
+{
+       int r;
+       struct dm_block *block;
+       struct array_block *ab;
+
+       r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+       if (r)
+               return r;
+
+       fill_ablock(info, ab, value, nr);
+       r = insert_ablock(info, block_index, block, root);
+       unlock_ablock(info, block);
+
+       return r;
+}
+
+static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+                              unsigned begin_block, unsigned end_block,
+                              unsigned max_entries, const void *value,
+                              dm_block_t *root)
+{
+       int r = 0;
+
+       for (; !r && begin_block != end_block; begin_block++)
+               r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
+
+       return r;
+}
+
+/*
+ * There are a bunch of functions involved with resizing an array.  This
+ * structure holds information that commonly needed by them.  Purely here
+ * to reduce parameter count.
+ */
+struct resize {
+       /*
+        * Describes the array.
+        */
+       struct dm_array_info *info;
+
+       /*
+        * The current root of the array.  This gets updated.
+        */
+       dm_block_t root;
+
+       /*
+        * Metadata block size.  Used to calculate the nr entries in an
+        * array block.
+        */
+       size_t size_of_block;
+
+       /*
+        * Maximum nr entries in an array block.
+        */
+       unsigned max_entries;
+
+       /*
+        * nr of completely full blocks in the array.
+        *
+        * 'old' refers to before the resize, 'new' after.
+        */
+       unsigned old_nr_full_blocks, new_nr_full_blocks;
+
+       /*
+        * Number of entries in the final block.  0 iff only full blocks in
+        * the array.
+        */
+       unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+
+       /*
+        * The default value used when growing the array.
+        */
+       const void *value;
+};
+
+/*
+ * Removes a consecutive set of array blocks from the btree.  The values
+ * in block are decremented as a side effect of the btree remove.
+ *
+ * begin_index - the index of the first array block to remove.
+ * end_index - the one-past-the-end value.  ie. this block is not removed.
+ */
+static int drop_blocks(struct resize *resize, unsigned begin_index,
+                      unsigned end_index)
+{
+       int r;
+
+       while (begin_index != end_index) {
+               uint64_t key = begin_index++;
+               r = dm_btree_remove(&resize->info->btree_info, resize->root,
+                                   &key, &resize->root);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+/*
+ * Calculates how many blocks are needed for the array.
+ */
+static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+                                      unsigned nr_entries_in_last_block)
+{
+       return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+}
+
+/*
+ * Shrink an array.
+ */
+static int shrink(struct resize *resize)
+{
+       int r;
+       unsigned begin, end;
+       struct dm_block *block;
+       struct array_block *ab;
+
+       /*
+        * Lose some blocks from the back?
+        */
+       if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
+               begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
+                                              resize->new_nr_entries_in_last_block);
+               end = total_nr_blocks_needed(resize->old_nr_full_blocks,
+                                            resize->old_nr_entries_in_last_block);
+
+               r = drop_blocks(resize, begin, end);
+               if (r)
+                       return r;
+       }
+
+       /*
+        * Trim the new tail block
+        */
+       if (resize->new_nr_entries_in_last_block) {
+               r = shadow_ablock(resize->info, &resize->root,
+                                 resize->new_nr_full_blocks, &block, &ab);
+               if (r)
+                       return r;
+
+               trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
+               unlock_ablock(resize->info, block);
+       }
+
+       return 0;
+}
+
+/*
+ * Grow an array.
+ */
+static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
+{
+       int r;
+       struct dm_block *block;
+       struct array_block *ab;
+
+       r = shadow_ablock(resize->info, &resize->root,
+                         resize->old_nr_full_blocks, &block, &ab);
+       if (r)
+               return r;
+
+       fill_ablock(resize->info, ab, resize->value, new_nr_entries);
+       unlock_ablock(resize->info, block);
+
+       return r;
+}
+
+static int grow_add_tail_block(struct resize *resize)
+{
+       return insert_new_ablock(resize->info, resize->size_of_block,
+                                resize->max_entries,
+                                resize->new_nr_full_blocks,
+                                resize->new_nr_entries_in_last_block,
+                                resize->value, &resize->root);
+}
+
+static int grow_needs_more_blocks(struct resize *resize)
+{
+       int r;
+
+       if (resize->old_nr_entries_in_last_block > 0) {
+               r = grow_extend_tail_block(resize, resize->max_entries);
+               if (r)
+                       return r;
+       }
+
+       r = insert_full_ablocks(resize->info, resize->size_of_block,
+                               resize->old_nr_full_blocks,
+                               resize->new_nr_full_blocks,
+                               resize->max_entries, resize->value,
+                               &resize->root);
+       if (r)
+               return r;
+
+       if (resize->new_nr_entries_in_last_block)
+               r = grow_add_tail_block(resize);
+
+       return r;
+}
+
+static int grow(struct resize *resize)
+{
+       if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
+               return grow_needs_more_blocks(resize);
+
+       else if (resize->old_nr_entries_in_last_block)
+               return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
+
+       else
+               return grow_add_tail_block(resize);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * These are the value_type functions for the btree elements, which point
+ * to array blocks.
+ */
+static void block_inc(void *context, const void *value)
+{
+       __le64 block_le;
+       struct dm_array_info *info = context;
+
+       memcpy(&block_le, value, sizeof(block_le));
+       dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
+}
+
+static void block_dec(void *context, const void *value)
+{
+       int r;
+       uint64_t b;
+       __le64 block_le;
+       uint32_t ref_count;
+       struct dm_block *block;
+       struct array_block *ab;
+       struct dm_array_info *info = context;
+
+       memcpy(&block_le, value, sizeof(block_le));
+       b = le64_to_cpu(block_le);
+
+       r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
+       if (r) {
+               DMERR_LIMIT("couldn't get reference count for block %llu",
+                           (unsigned long long) b);
+               return;
+       }
+
+       if (ref_count == 1) {
+               /*
+                * We're about to drop the last reference to this ablock.
+                * So we need to decrement the ref count of the contents.
+                */
+               r = get_ablock(info, b, &block, &ab);
+               if (r) {
+                       DMERR_LIMIT("couldn't get array block %llu",
+                                   (unsigned long long) b);
+                       return;
+               }
+
+               dec_ablock_entries(info, ab);
+               unlock_ablock(info, block);
+       }
+
+       dm_tm_dec(info->btree_info.tm, b);
+}
+
+static int block_equal(void *context, const void *value1, const void *value2)
+{
+       return !memcmp(value1, value2, sizeof(__le64));
+}
+
+/*----------------------------------------------------------------*/
+
+void dm_array_info_init(struct dm_array_info *info,
+                       struct dm_transaction_manager *tm,
+                       struct dm_btree_value_type *vt)
+{
+       struct dm_btree_value_type *bvt = &info->btree_info.value_type;
+
+       memcpy(&info->value_type, vt, sizeof(info->value_type));
+       info->btree_info.tm = tm;
+       info->btree_info.levels = 1;
+
+       bvt->context = info;
+       bvt->size = sizeof(__le64);
+       bvt->inc = block_inc;
+       bvt->dec = block_dec;
+       bvt->equal = block_equal;
+}
+EXPORT_SYMBOL_GPL(dm_array_info_init);
+
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
+{
+       return dm_btree_empty(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_empty);
+
+static int array_resize(struct dm_array_info *info, dm_block_t root,
+                       uint32_t old_size, uint32_t new_size,
+                       const void *value, dm_block_t *new_root)
+{
+       int r;
+       struct resize resize;
+
+       if (old_size == new_size)
+               return 0;
+
+       resize.info = info;
+       resize.root = root;
+       resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+       resize.max_entries = calc_max_entries(info->value_type.size,
+                                             resize.size_of_block);
+
+       resize.old_nr_full_blocks = old_size / resize.max_entries;
+       resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
+       resize.new_nr_full_blocks = new_size / resize.max_entries;
+       resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
+       resize.value = value;
+
+       r = ((new_size > old_size) ? grow : shrink)(&resize);
+       if (r)
+               return r;
+
+       *new_root = resize.root;
+       return 0;
+}
+
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+                   uint32_t old_size, uint32_t new_size,
+                   const void *value, dm_block_t *new_root)
+                   __dm_written_to_disk(value)
+{
+       int r = array_resize(info, root, old_size, new_size, value, new_root);
+       __dm_unbless_for_disk(value);
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_resize);
+
+int dm_array_del(struct dm_array_info *info, dm_block_t root)
+{
+       return dm_btree_del(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_del);
+
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+                      uint32_t index, void *value_le)
+{
+       int r;
+       struct dm_block *block;
+       struct array_block *ab;
+       size_t size_of_block;
+       unsigned entry, max_entries;
+
+       size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+       max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+       r = lookup_ablock(info, root, index / max_entries, &block, &ab);
+       if (r)
+               return r;
+
+       entry = index % max_entries;
+       if (entry >= le32_to_cpu(ab->nr_entries))
+               r = -ENODATA;
+       else
+               memcpy(value_le, element_at(info, ab, entry),
+                      info->value_type.size);
+
+       unlock_ablock(info, block);
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_get_value);
+
+static int array_set_value(struct dm_array_info *info, dm_block_t root,
+                          uint32_t index, const void *value, dm_block_t *new_root)
+{
+       int r;
+       struct dm_block *block;
+       struct array_block *ab;
+       size_t size_of_block;
+       unsigned max_entries;
+       unsigned entry;
+       void *old_value;
+       struct dm_btree_value_type *vt = &info->value_type;
+
+       size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+       max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+       r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
+       if (r)
+               return r;
+       *new_root = root;
+
+       entry = index % max_entries;
+       if (entry >= le32_to_cpu(ab->nr_entries)) {
+               r = -ENODATA;
+               goto out;
+       }
+
+       old_value = element_at(info, ab, entry);
+       if (vt->dec &&
+           (!vt->equal || !vt->equal(vt->context, old_value, value))) {
+               vt->dec(vt->context, old_value);
+               if (vt->inc)
+                       vt->inc(vt->context, value);
+       }
+
+       memcpy(old_value, value, info->value_type.size);
+
+out:
+       unlock_ablock(info, block);
+       return r;
+}
+
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+                uint32_t index, const void *value, dm_block_t *new_root)
+                __dm_written_to_disk(value)
+{
+       int r;
+
+       r = array_set_value(info, root, index, value, new_root);
+       __dm_unbless_for_disk(value);
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_set_value);
+
+struct walk_info {
+       struct dm_array_info *info;
+       int (*fn)(void *context, uint64_t key, void *leaf);
+       void *context;
+};
+
+static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+{
+       struct walk_info *wi = context;
+
+       int r;
+       unsigned i;
+       __le64 block_le;
+       unsigned nr_entries, max_entries;
+       struct dm_block *block;
+       struct array_block *ab;
+
+       memcpy(&block_le, leaf, sizeof(block_le));
+       r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
+       if (r)
+               return r;
+
+       max_entries = le32_to_cpu(ab->max_entries);
+       nr_entries = le32_to_cpu(ab->nr_entries);
+       for (i = 0; i < nr_entries; i++) {
+               r = wi->fn(wi->context, keys[0] * max_entries + i,
+                          element_at(wi->info, ab, i));
+
+               if (r)
+                       break;
+       }
+
+       unlock_ablock(wi->info, block);
+       return r;
+}
+
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+                 int (*fn)(void *, uint64_t key, void *leaf),
+                 void *context)
+{
+       struct walk_info wi;
+
+       wi.info = info;
+       wi.fn = fn;
+       wi.context = context;
+
+       return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
+}
+EXPORT_SYMBOL_GPL(dm_array_walk);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
new file mode 100644 (file)
index 0000000..ea177d6
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_ARRAY_H
+#define _LINUX_DM_ARRAY_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The dm-array is a persistent version of an array.  It packs the data
+ * more efficiently than a btree which will result in less disk space use,
+ * and a performance boost.  The element get and set operations are still
+ * O(ln(n)), but with a much smaller constant.
+ *
+ * The value type structure is reused from the btree type to support proper
+ * reference counting of values.
+ *
+ * The arrays implicitly know their length, and bounds are checked for
+ * lookups and updated.  It doesn't store this in an accessible place
+ * because it would waste a whole metadata block.  Make sure you store the
+ * size along with the array root in your encompassing data.
+ *
+ * Array entries are indexed via an unsigned integer starting from zero.
+ * Arrays are not sparse; if you resize an array to have 'n' entries then
+ * 'n - 1' will be the last valid index.
+ *
+ * Typical use:
+ *
+ * a) initialise a dm_array_info structure.  This describes the array
+ *    values and ties it into a specific transaction manager.  It holds no
+ *    instance data; the same info can be used for many similar arrays if
+ *    you wish.
+ *
+ * b) Get yourself a root.  The root is the index of a block of data on the
+ *    disk that holds a particular instance of an array.  You may have a
+ *    pre existing root in your metadata that you wish to use, or you may
+ *    want to create a brand new, empty array with dm_array_empty().
+ *
+ * Like the other data structures in this library, dm_array objects are
+ * immutable between transactions.  Update functions will return you the
+ * root for a _new_ array.  If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * c) resize an array with dm_array_resize().
+ *
+ * d) Get a value from the array with dm_array_get_value().
+ *
+ * e) Set a value in the array with dm_array_set_value().
+ *
+ * f) Walk an array of values in index order with dm_array_walk().  More
+ *    efficient than making many calls to dm_array_get_value().
+ *
+ * g) Destroy the array with dm_array_del().  This tells the transaction
+ *    manager that you're no longer using this data structure so it can
+ *    recycle it's blocks.  (dm_array_dec() would be a better name for it,
+ *    but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Describes an array.  Don't initialise this structure yourself, use the
+ * init function below.
+ */
+struct dm_array_info {
+       struct dm_transaction_manager *tm;
+       struct dm_btree_value_type value_type;
+       struct dm_btree_info btree_info;
+};
+
+/*
+ * Sets up a dm_array_info structure.  You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * info - the structure being filled in.
+ * tm   - the transaction manager that should supervise this structure.
+ * vt   - describes the leaf values.
+ */
+void dm_array_info_init(struct dm_array_info *info,
+                       struct dm_transaction_manager *tm,
+                       struct dm_btree_value_type *vt);
+
+/*
+ * Create an empty, zero length array.
+ *
+ * info - describes the array
+ * root - on success this will be filled out with the root block
+ */
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root);
+
+/*
+ * Resizes the array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * old_size - the caller is responsible for remembering the size of
+ *            the array
+ * new_size - can be bigger or smaller than old_size
+ * value - if we're growing the array the new entries will have this value
+ * new_root - on success, points to the new root block
+ *
+ * If growing the inc function for 'value' will be called the appropriate
+ * number of times.  So if the caller is holding a reference they may want
+ * to drop it.
+ */
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+                   uint32_t old_size, uint32_t new_size,
+                   const void *value, dm_block_t *new_root)
+       __dm_written_to_disk(value);
+
+/*
+ * Frees a whole array.  The value_type's decrement operation will be called
+ * for all values in the array
+ */
+int dm_array_del(struct dm_array_info *info, dm_block_t root);
+
+/*
+ * Lookup a value in the array
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - the value to be read.  Will be in on-disk format of course.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+                      uint32_t index, void *value);
+
+/*
+ * Set an entry in the array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - value to be written to disk.  Make sure you confirm the value is
+ *         in on-disk format with__dm_bless_for_disk() before calling.
+ * new_root - the new root block
+ *
+ * The old value being overwritten will be decremented, the new value
+ * incremented.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+                      uint32_t index, const void *value, dm_block_t *new_root)
+       __dm_written_to_disk(value);
+
+/*
+ * Walk through all the entries in an array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * fn - called back for every element
+ * context - passed to the callback
+ */
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+                 int (*fn)(void *context, uint64_t key, void *leaf),
+                 void *context);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
new file mode 100644 (file)
index 0000000..cd9a86d
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-bitset.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "bitset"
+#define BITS_PER_ARRAY_ENTRY 64
+
+/*----------------------------------------------------------------*/
+
+static struct dm_btree_value_type bitset_bvt = {
+       .context = NULL,
+       .size = sizeof(__le64),
+       .inc = NULL,
+       .dec = NULL,
+       .equal = NULL,
+};
+
+/*----------------------------------------------------------------*/
+
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+                        struct dm_disk_bitset *info)
+{
+       dm_array_info_init(&info->array_info, tm, &bitset_bvt);
+       info->current_index_set = false;
+}
+EXPORT_SYMBOL_GPL(dm_disk_bitset_init);
+
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root)
+{
+       return dm_array_empty(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_empty);
+
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root,
+                    uint32_t old_nr_entries, uint32_t new_nr_entries,
+                    bool default_value, dm_block_t *new_root)
+{
+       uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY);
+       uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY);
+       __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0);
+
+       __dm_bless_for_disk(&value);
+       return dm_array_resize(&info->array_info, root, old_blocks, new_blocks,
+                              &value, new_root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_resize);
+
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root)
+{
+       return dm_array_del(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_del);
+
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+                   dm_block_t *new_root)
+{
+       int r;
+       __le64 value;
+
+       if (!info->current_index_set)
+               return 0;
+
+       value = cpu_to_le64(info->current_bits);
+
+       __dm_bless_for_disk(&value);
+       r = dm_array_set_value(&info->array_info, root, info->current_index,
+                              &value, new_root);
+       if (r)
+               return r;
+
+       info->current_index_set = false;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_flush);
+
+static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
+                    uint32_t array_index)
+{
+       int r;
+       __le64 value;
+
+       r = dm_array_get_value(&info->array_info, root, array_index, &value);
+       if (r)
+               return r;
+
+       info->current_bits = le64_to_cpu(value);
+       info->current_index_set = true;
+       info->current_index = array_index;
+       return 0;
+}
+
+static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+                          uint32_t index, dm_block_t *new_root)
+{
+       int r;
+       unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+
+       if (info->current_index_set) {
+               if (info->current_index == array_index)
+                       return 0;
+
+               r = dm_bitset_flush(info, root, new_root);
+               if (r)
+                       return r;
+       }
+
+       return read_bits(info, root, array_index);
+}
+
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+                     uint32_t index, dm_block_t *new_root)
+{
+       int r;
+       unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+       r = get_array_entry(info, root, index, new_root);
+       if (r)
+               return r;
+
+       set_bit(b, (unsigned long *) &info->current_bits);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
+
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+                       uint32_t index, dm_block_t *new_root)
+{
+       int r;
+       unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+       r = get_array_entry(info, root, index, new_root);
+       if (r)
+               return r;
+
+       clear_bit(b, (unsigned long *) &info->current_bits);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
+
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+                      uint32_t index, dm_block_t *new_root, bool *result)
+{
+       int r;
+       unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+       r = get_array_entry(info, root, index, new_root);
+       if (r)
+               return r;
+
+       *result = test_bit(b, (unsigned long *) &info->current_bits);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_test_bit);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
new file mode 100644 (file)
index 0000000..e1b9bea
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_BITSET_H
+#define _LINUX_DM_BITSET_H
+
+#include "dm-array.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This bitset type is a thin wrapper round a dm_array of 64bit words.  It
+ * uses a tiny, one word cache to reduce the number of array lookups and so
+ * increase performance.
+ *
+ * Like the dm-array that it's based on, the caller needs to keep track of
+ * the size of the bitset separately.  The underlying dm-array implicitly
+ * knows how many words it's storing and will return -ENODATA if you try
+ * and access an out of bounds word.  However, an out of bounds bit in the
+ * final word will _not_ be detected, you have been warned.
+ *
+ * Bits are indexed from zero.
+
+ * Typical use:
+ *
+ * a) Initialise a dm_disk_bitset structure with dm_disk_bitset_init().
+ *    This describes the bitset and includes the cache.  It's not called it
+ *    dm_bitset_info in line with other data structures because it does
+ *    include instance data.
+ *
+ * b) Get yourself a root.  The root is the index of a block of data on the
+ *    disk that holds a particular instance of an bitset.  You may have a
+ *    pre existing root in your metadata that you wish to use, or you may
+ *    want to create a brand new, empty bitset with dm_bitset_empty().
+ *
+ * Like the other data structures in this library, dm_bitset objects are
+ * immutable between transactions.  Update functions will return you the
+ * root for a _new_ array.  If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * Even read operations may trigger the cache to be flushed and as such
+ * return a root for a new, updated bitset.
+ *
+ * c) resize a bitset with dm_bitset_resize().
+ *
+ * d) Set a bit with dm_bitset_set_bit().
+ *
+ * e) Clear a bit with dm_bitset_clear_bit().
+ *
+ * f) Test a bit with dm_bitset_test_bit().
+ *
+ * g) Flush all updates from the cache with dm_bitset_flush().
+ *
+ * h) Destroy the bitset with dm_bitset_del().  This tells the transaction
+ *    manager that you're no longer using this data structure so it can
+ *    recycle it's blocks.  (dm_bitset_dec() would be a better name for it,
+ *    but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Opaque object.  Unlike dm_array_info, you should have one of these per
+ * bitset.  Initialise with dm_disk_bitset_init().
+ */
+struct dm_disk_bitset {
+       struct dm_array_info array_info;
+
+       uint32_t current_index;
+       uint64_t current_bits;
+
+       bool current_index_set:1;
+};
+
+/*
+ * Sets up a dm_disk_bitset structure.  You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * tm - the transaction manager that should supervise this structure
+ * info - the structure being initialised
+ */
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+                        struct dm_disk_bitset *info);
+
+/*
+ * Create an empty, zero length bitset.
+ *
+ * info - describes the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *new_root);
+
+/*
+ * Resize the bitset.
+ *
+ * info - describes the bitset
+ * old_root - the root block of the array on disk
+ * old_nr_entries - the number of bits in the old bitset
+ * new_nr_entries - the number of bits you want in the new bitset
+ * default_value - the value for any new bits
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t old_root,
+                    uint32_t old_nr_entries, uint32_t new_nr_entries,
+                    bool default_value, dm_block_t *new_root);
+
+/*
+ * Frees the bitset.
+ */
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root);
+
+/*
+ * Set a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+                     uint32_t index, dm_block_t *new_root);
+
+/*
+ * Clears a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+                       uint32_t index, dm_block_t *new_root);
+
+/*
+ * Tests a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block (cached values may have been written)
+ * result - the bit value you're after
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+                      uint32_t index, dm_block_t *new_root, bool *result);
+
+/*
+ * Flush any cached changes to disk.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+                   dm_block_t *new_root);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_BITSET_H */
index 28c3ed0..81b5138 100644 (file)
@@ -613,6 +613,7 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
 
        return dm_bufio_write_dirty_buffers(bm->bufio);
 }
+EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
 
 void dm_bm_set_read_only(struct dm_block_manager *bm)
 {
index accbb05..37d367b 100644 (file)
@@ -64,6 +64,7 @@ struct ro_spine {
 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
 int exit_ro_spine(struct ro_spine *s);
 int ro_step(struct ro_spine *s, dm_block_t new_child);
+void ro_pop(struct ro_spine *s);
 struct btree_node *ro_node(struct ro_spine *s);
 
 struct shadow_spine {
index f199a0c..cf9fd67 100644 (file)
@@ -164,6 +164,13 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
        return r;
 }
 
+void ro_pop(struct ro_spine *s)
+{
+       BUG_ON(!s->count);
+       --s->count;
+       unlock_block(s->info, s->nodes[s->count]);
+}
+
 struct btree_node *ro_node(struct ro_spine *s)
 {
        struct dm_block *block;
index 4caf669..3586542 100644 (file)
@@ -807,3 +807,55 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
        return r ? r : count;
 }
 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+
+/*
+ * FIXME: We shouldn't use a recursive algorithm when we have limited stack
+ * space.  Also this only works for single level trees.
+ */
+static int walk_node(struct ro_spine *s, dm_block_t block,
+                    int (*fn)(void *context, uint64_t *keys, void *leaf),
+                    void *context)
+{
+       int r;
+       unsigned i, nr;
+       struct btree_node *n;
+       uint64_t keys;
+
+       r = ro_step(s, block);
+       n = ro_node(s);
+
+       nr = le32_to_cpu(n->header.nr_entries);
+       for (i = 0; i < nr; i++) {
+               if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
+                       r = walk_node(s, value64(n, i), fn, context);
+                       if (r)
+                               goto out;
+               } else {
+                       keys = le64_to_cpu(*key_ptr(n, i));
+                       r = fn(context, &keys, value_ptr(n, i));
+                       if (r)
+                               goto out;
+               }
+       }
+
+out:
+       ro_pop(s);
+       return r;
+}
+
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+                 int (*fn)(void *context, uint64_t *keys, void *leaf),
+                 void *context)
+{
+       int r;
+       struct ro_spine spine;
+
+       BUG_ON(info->levels > 1);
+
+       init_ro_spine(&spine, info);
+       r = walk_node(&spine, root, fn, context);
+       exit_ro_spine(&spine);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_walk);
index a2cd504..8672d15 100644 (file)
@@ -58,21 +58,21 @@ struct dm_btree_value_type {
         * somewhere.) This method is _not_ called for insertion of a new
         * value: It is assumed the ref count is already 1.
         */
-       void (*inc)(void *context, void *value);
+       void (*inc)(void *context, const void *value);
 
        /*
         * This value is being deleted.  The btree takes care of freeing
         * the memory pointed to by @value.  Often the del function just
         * needs to decrement a reference count somewhere.
         */
-       void (*dec)(void *context, void *value);
+       void (*dec)(void *context, const void *value);
 
        /*
         * A test for equality between two values.  When a value is
         * overwritten with a new one, the old one has the dec method
         * called _unless_ the new and old value are deemed equal.
         */
-       int (*equal)(void *context, void *value1, void *value2);
+       int (*equal)(void *context, const void *value1, const void *value2);
 };
 
 /*
@@ -142,4 +142,13 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
 int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
                              uint64_t *result_keys);
 
+/*
+ * Iterate through the a btree, calling fn() on each entry.
+ * It only works for single level trees and is internally recursive, so
+ * monitor stack usage carefully.
+ */
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+                 int (*fn)(void *context, uint64_t *keys, void *leaf),
+                 void *context);
+
 #endif /* _LINUX_DM_BTREE_H */
index 7b17a1f..81da1a2 100644 (file)
@@ -46,10 +46,9 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
        int r = 0;
        unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
        struct shadow_info *si;
-       struct hlist_node *n;
 
        spin_lock(&tm->lock);
-       hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+       hlist_for_each_entry(si, tm->buckets + bucket, hlist)
                if (si->where == b) {
                        r = 1;
                        break;
@@ -81,14 +80,14 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
 static void wipe_shadow_table(struct dm_transaction_manager *tm)
 {
        struct shadow_info *si;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        struct hlist_head *bucket;
        int i;
 
        spin_lock(&tm->lock);
        for (i = 0; i < DM_HASH_SIZE; i++) {
                bucket = tm->buckets + i;
-               hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+               hlist_for_each_entry_safe(si, tmp, bucket, hlist)
                        kfree(si);
 
                INIT_HLIST_HEAD(bucket);
index 24b3597..0505452 100644 (file)
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                        rdev1->new_raid_disk = j;
                }
 
-               if (j < 0 || j >= mddev->raid_disks) {
+               if (j < 0) {
+                       printk(KERN_ERR
+                              "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+                              mdname(mddev));
+                       goto abort;
+               }
+               if (j >= mddev->raid_disks) {
                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
                               "aborting!\n", mdname(mddev), j);
                        goto abort;
@@ -289,7 +295,7 @@ abort:
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
-       *private_conf = NULL;
+       *private_conf = ERR_PTR(err);
        return err;
 }
 
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
                  "%s does not support generic reshape\n", __func__);
 
        rdev_for_each(rdev, mddev)
-               array_sectors += rdev->sectors;
+               array_sectors += (rdev->sectors &
+                                 ~(sector_t)(mddev->chunk_sectors-1));
 
        return array_sectors;
 }
index d5bddfc..fd86b37 100644 (file)
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+               mbio->bi_rw =
+                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
+       if (mddev->queue)
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
index 64d4824..77b562d 100644 (file)
  *    near_copies (stored in low byte of layout)
  *    far_copies (stored in second byte of layout)
  *    far_offset (stored in bit 16 of layout )
+ *    use_far_sets (stored in bit 17 of layout )
  *
- * The data to be stored is divided into chunks using chunksize.
- * Each device is divided into far_copies sections.
- * In each section, chunks are laid out in a style similar to raid0, but
- * near_copies copies of each chunk is stored (each on a different drive).
- * The starting device for each section is offset near_copies from the starting
- * device of the previous section.
- * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
- * drive.
- * near_copies and far_copies must be at least one, and their product is at most
- * raid_disks.
+ * The data to be stored is divided into chunks using chunksize.  Each device
+ * is divided into far_copies sections.   In each section, chunks are laid out
+ * in a style similar to raid0, but near_copies copies of each chunk is stored
+ * (each on a different drive).  The starting device for each section is offset
+ * near_copies from the starting device of the previous section.  Thus there
+ * are (near_copies * far_copies) of each chunk, and each is on a different
+ * drive.  near_copies and far_copies must be at least one, and their product
+ * is at most raid_disks.
  *
  * If far_offset is true, then the far_copies are handled a bit differently.
- * The copies are still in different stripes, but instead of be very far apart
- * on disk, there are adjacent stripes.
+ * The copies are still in different stripes, but instead of being very far
+ * apart on disk, there are adjacent stripes.
+ *
+ * The far and offset algorithms are handled slightly differently if
+ * 'use_far_sets' is true.  In this case, the array's devices are grouped into
+ * sets that are (near_copies * far_copies) in size.  The far copied stripes
+ * are still shifted by 'near_copies' devices, but this shifting stays confined
+ * to the set rather than the entire array.  This is done to improve the number
+ * of device combinations that can fail without causing the array to fail.
+ * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
+ * on a device):
+ *    A B C D    A B C D E
+ *      ...         ...
+ *    D A B C    E A B C D
+ * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
+ *    [A B] [C D]    [A B] [C D E]
+ *    |...| |...|    |...| | ... |
+ *    [B A] [D C]    [B A] [E C D]
  */
 
 /*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        sector_t stripe;
        int dev;
        int slot = 0;
+       int last_far_set_start, last_far_set_size;
+
+       last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+       last_far_set_start *= geo->far_set_size;
+
+       last_far_set_size = geo->far_set_size;
+       last_far_set_size += (geo->raid_disks % geo->far_set_size);
 
        /* now calculate first sector/dev */
        chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
        /* and calculate all the others */
        for (n = 0; n < geo->near_copies; n++) {
                int d = dev;
+               int set;
                sector_t s = sector;
-               r10bio->devs[slot].addr = sector;
                r10bio->devs[slot].devnum = d;
+               r10bio->devs[slot].addr = s;
                slot++;
 
                for (f = 1; f < geo->far_copies; f++) {
+                       set = d / geo->far_set_size;
                        d += geo->near_copies;
-                       if (d >= geo->raid_disks)
-                               d -= geo->raid_disks;
+
+                       if ((geo->raid_disks % geo->far_set_size) &&
+                           (d > last_far_set_start)) {
+                               d -= last_far_set_start;
+                               d %= last_far_set_size;
+                               d += last_far_set_start;
+                       } else {
+                               d %= geo->far_set_size;
+                               d += geo->far_set_size * set;
+                       }
                        s += geo->stride;
                        r10bio->devs[slot].devnum = d;
                        r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
         * or recovery, so reshape isn't happening
         */
        struct geom *geo = &conf->geo;
+       int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
+       int far_set_size = geo->far_set_size;
+       int last_far_set_start;
+
+       if (geo->raid_disks % geo->far_set_size) {
+               last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+               last_far_set_start *= geo->far_set_size;
+
+               if (dev >= last_far_set_start) {
+                       far_set_size = geo->far_set_size;
+                       far_set_size += (geo->raid_disks % geo->far_set_size);
+                       far_set_start = last_far_set_start;
+               }
+       }
 
        offset = sector & geo->chunk_mask;
        if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
                chunk = sector >> geo->chunk_shift;
                fc = sector_div(chunk, geo->far_copies);
                dev -= fc * geo->near_copies;
-               if (dev < 0)
-                       dev += geo->raid_disks;
+               if (dev < far_set_start)
+                       dev += far_set_size;
        } else {
                while (sector >= geo->stride) {
                        sector -= geo->stride;
-                       if (dev < geo->near_copies)
-                               dev += geo->raid_disks - geo->near_copies;
+                       if (dev < (geo->near_copies + far_set_start))
+                               dev += far_set_size - geo->near_copies;
                        else
                                dev -= geo->near_copies;
                }
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
                                                              rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
                                                   r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
-                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_rw =
+                               WRITE | do_sync | do_fua | do_discard | do_same;
                        mbio->bi_private = r10_bio;
 
                        atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
                disks = mddev->raid_disks + mddev->delta_disks;
                break;
        }
-       if (layout >> 17)
+       if (layout >> 18)
                return -1;
        if (chunk < (PAGE_SIZE >> 9) ||
            !is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
        geo->near_copies = nc;
        geo->far_copies = fc;
        geo->far_offset = fo;
+       geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
        geo->chunk_mask = chunk - 1;
        geo->chunk_shift = ffz(~chunk);
        return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
        if (mddev->queue) {
                blk_queue_max_discard_sectors(mddev->queue,
                                              mddev->chunk_sectors);
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
index 1054cf6..157d69e 100644 (file)
@@ -33,6 +33,11 @@ struct r10conf {
                                               * far_offset, in which case it is
                                               * 1 stripe.
                                               */
+               int             far_set_size; /* The number of devices in a set,
+                                              * where a 'set' are devices that
+                                              * contain far/offset copies of
+                                              * each other.
+                                              */
                int             chunk_shift; /* shift from chunks to sectors */
                sector_t        chunk_mask;
        } prev, geo;
index 19d77a0..3ee2912 100644 (file)
@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
-               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
-                                        bi, 0);
                bio_endio(bi, 0);
                bi = return_bi;
        }
@@ -365,10 +363,9 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
                                         short generation)
 {
        struct stripe_head *sh;
-       struct hlist_node *hn;
 
        pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
-       hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
+       hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
                if (sh->sector == sector && sh->generation == generation)
                        return sh;
        pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -1406,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
 }
 
-static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
@@ -1471,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-#ifdef CONFIG_MULTICORE_RAID456
-static void async_run_ops(void *param, async_cookie_t cookie)
-{
-       struct stripe_head *sh = param;
-       unsigned long ops_request = sh->ops.request;
-
-       clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
-       wake_up(&sh->ops.wait_for_ops);
-
-       __raid_run_ops(sh, ops_request);
-       release_stripe(sh);
-}
-
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
-{
-       /* since handle_stripe can be called outside of raid5d context
-        * we need to ensure sh->ops.request is de-staged before another
-        * request arrives
-        */
-       wait_event(sh->ops.wait_for_ops,
-                  !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
-       sh->ops.request = ops_request;
-
-       atomic_inc(&sh->count);
-       async_schedule(async_run_ops, sh);
-}
-#else
-#define raid_run_ops __raid_run_ops
-#endif
-
 static int grow_one_stripe(struct r5conf *conf)
 {
        struct stripe_head *sh;
@@ -1509,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
                return 0;
 
        sh->raid_conf = conf;
-       #ifdef CONFIG_MULTICORE_RAID456
-       init_waitqueue_head(&sh->ops.wait_for_ops);
-       #endif
 
        spin_lock_init(&sh->stripe_lock);
 
@@ -1630,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                        break;
 
                nsh->raid_conf = conf;
-               #ifdef CONFIG_MULTICORE_RAID456
-               init_waitqueue_head(&nsh->ops.wait_for_ops);
-               #endif
                spin_lock_init(&nsh->stripe_lock);
 
                list_add(&nsh->lru, &newstripes);
@@ -3917,8 +3878,6 @@ static void raid5_align_endio(struct bio *bi, int error)
        rdev_dec_pending(rdev, conf->mddev);
 
        if (!error && uptodate) {
-               trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
-                                        raid_bi, 0);
                bio_endio(raid_bi, 0);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
                        wake_up(&conf->wait_for_stripe);
@@ -4377,8 +4336,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
-               trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
-                                        bi, 0);
                bio_endio(bi, 0);
        }
 }
@@ -4755,11 +4712,8 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
                handled++;
        }
        remaining = raid5_dec_bi_active_stripes(raid_bio);
-       if (remaining == 0) {
-               trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
-                                        raid_bio, 0);
+       if (remaining == 0)
                bio_endio(raid_bio, 0);
-       }
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;
index f1423b7..e084b0a 100644 (file)
@@ -137,7 +137,7 @@ static int zoran_open(struct inode *inode, struct file *file)
 static ssize_t zoran_write(struct file *file, const char __user *buffer,
                        size_t count, loff_t *ppos)
 {
-       struct zoran *zr = PDE(file->f_path.dentry->d_inode)->data;
+       struct zoran *zr = PDE(file_inode(file))->data;
        char *string, *sp;
        char *line, *ldelim, *varname, *svar, *tdelim;
 
index 383a727..6e5ad8e 100644 (file)
@@ -1338,28 +1338,15 @@ static int isp_enable_clocks(struct isp_device *isp)
 {
        int r;
        unsigned long rate;
-       int divisor;
-
-       /*
-        * cam_mclk clock chain:
-        *   dpll4 -> dpll4_m5 -> dpll4_m5x2 -> cam_mclk
-        *
-        * In OMAP3630 dpll4_m5x2 != 2 x dpll4_m5 but both are
-        * set to the same value. Hence the rate set for dpll4_m5
-        * has to be twice of what is set on OMAP3430 to get
-        * the required value for cam_mclk
-        */
-       divisor = isp->revision == ISP_REVISION_15_0 ? 1 : 2;
 
        r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
        if (r) {
                dev_err(isp->dev, "failed to enable cam_ick clock\n");
                goto out_clk_enable_ick;
        }
-       r = clk_set_rate(isp->clock[ISP_CLK_DPLL4_M5_CK],
-                        CM_CAM_MCLK_HZ/divisor);
+       r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
        if (r) {
-               dev_err(isp->dev, "clk_set_rate for dpll4_m5_ck failed\n");
+               dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
                goto out_clk_enable_mclk;
        }
        r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
@@ -1401,7 +1388,6 @@ static void isp_disable_clocks(struct isp_device *isp)
 static const char *isp_clocks[] = {
        "cam_ick",
        "cam_mclk",
-       "dpll4_m5_ck",
        "csi2_96m_fck",
        "l3_ick",
 };
index 517d348..c77e1f2 100644 (file)
@@ -147,7 +147,6 @@ struct isp_platform_callback {
  * @ref_count: Reference count for handling multiple ISP requests.
  * @cam_ick: Pointer to camera interface clock structure.
  * @cam_mclk: Pointer to camera functional clock structure.
- * @dpll4_m5_ck: Pointer to DPLL4 M5 clock structure.
  * @csi2_fck: Pointer to camera CSI2 complexIO clock structure.
  * @l3_ick: Pointer to OMAP3 L3 bus interface clock.
  * @irq: Currently attached ISP ISR callbacks information structure.
@@ -189,10 +188,9 @@ struct isp_device {
        u32 xclk_divisor[2];    /* Two clocks, a and b. */
 #define ISP_CLK_CAM_ICK                0
 #define ISP_CLK_CAM_MCLK       1
-#define ISP_CLK_DPLL4_M5_CK    2
-#define ISP_CLK_CSI2_FCK       3
-#define ISP_CLK_L3_ICK         4
-       struct clk *clock[5];
+#define ISP_CLK_CSI2_FCK       2
+#define ISP_CLK_L3_ICK         3
+       struct clk *clock[4];
 
        /* ISP modules */
        struct ispstat isp_af;
index ca12d32..5247d94 100644 (file)
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
 
 unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
 {
-       struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+       struct irctl *ir = irctls[iminor(file_inode(file))];
        unsigned int ret;
 
        if (!ir) {
@@ -565,7 +565,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        __u32 mode;
        int result = 0;
-       struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+       struct irctl *ir = irctls[iminor(file_inode(file))];
 
        if (!ir) {
                printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
@@ -650,7 +650,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
                          size_t length,
                          loff_t *ppos)
 {
-       struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+       struct irctl *ir = irctls[iminor(file_inode(file))];
        unsigned char *buf;
        int ret = 0, written = 0;
        DECLARE_WAITQUEUE(wait, current);
@@ -752,16 +752,7 @@ EXPORT_SYMBOL(lirc_dev_fop_read);
 
 void *lirc_get_pdata(struct file *file)
 {
-       void *data = NULL;
-
-       if (file && file->f_dentry && file->f_dentry->d_inode &&
-           file->f_dentry->d_inode->i_rdev) {
-               struct irctl *ir;
-               ir = irctls[iminor(file->f_dentry->d_inode)];
-               data = ir->d.data;
-       }
-
-       return data;
+       return irctls[iminor(file_inode(file))]->d.data;
 }
 EXPORT_SYMBOL(lirc_get_pdata);
 
@@ -769,7 +760,7 @@ EXPORT_SYMBOL(lirc_get_pdata);
 ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
                           size_t length, loff_t *ppos)
 {
-       struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)];
+       struct irctl *ir = irctls[iminor(file_inode(file))];
 
        if (!ir) {
                printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
index 51b3a77..de1e9ab 100644 (file)
@@ -222,7 +222,7 @@ static struct class video_class = {
 
 struct video_device *video_devdata(struct file *file)
 {
-       return video_device[iminor(file->f_path.dentry->d_inode)];
+       return video_device[iminor(file_inode(file))];
 }
 EXPORT_SYMBOL(video_devdata);
 
index 56ff19c..ffcb10a 100644 (file)
@@ -512,18 +512,17 @@ int memstick_add_host(struct memstick_host *host)
 {
        int rc;
 
-       while (1) {
-               if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
-                       return -ENOMEM;
+       idr_preload(GFP_KERNEL);
+       spin_lock(&memstick_host_lock);
 
-               spin_lock(&memstick_host_lock);
-               rc = idr_get_new(&memstick_host_idr, host, &host->id);
-               spin_unlock(&memstick_host_lock);
-               if (!rc)
-                       break;
-               else if (rc != -EAGAIN)
-                       return rc;
-       }
+       rc = idr_alloc(&memstick_host_idr, host, 0, 0, GFP_NOWAIT);
+       if (rc >= 0)
+               host->id = rc;
+
+       spin_unlock(&memstick_host_lock);
+       idr_preload_end();
+       if (rc < 0)
+               return rc;
 
        dev_set_name(&host->dev, "memstick%u", host->id);
 
index 9729b92..f12b78d 100644 (file)
@@ -1213,21 +1213,10 @@ static int mspro_block_init_disk(struct memstick_dev *card)
        msb->page_size = be16_to_cpu(sys_info->unit_size);
 
        mutex_lock(&mspro_block_disk_lock);
-       if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
-               mutex_unlock(&mspro_block_disk_lock);
-               return -ENOMEM;
-       }
-
-       rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
+       disk_id = idr_alloc(&mspro_block_disk_idr, card, 0, 256, GFP_KERNEL);
        mutex_unlock(&mspro_block_disk_lock);
-
-       if (rc)
-               return rc;
-
-       if ((disk_id << MSPRO_BLOCK_PART_SHIFT) > 255) {
-               rc = -ENOSPC;
-               goto out_release_id;
-       }
+       if (disk_id < 0)
+               return disk_id;
 
        msb->disk = alloc_disk(1 << MSPRO_BLOCK_PART_SHIFT);
        if (!msb->disk) {
index 29b2172..a7c5b31 100644 (file)
@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
 /* Executes one TPC (data is read/written from small or large fifo) */
 static void r592_execute_tpc(struct r592_device *dev)
 {
-       bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
+       bool is_write;
        int len, error;
        u32 status, reg;
 
@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
                return;
        }
 
+       is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
        len = dev->req->long_data ?
                dev->req->sg.length : dev->req->data_len;
 
index 481a98a..2f12cc1 100644 (file)
@@ -1091,15 +1091,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        }
        handle->pcr = pcr;
 
-       if (!idr_pre_get(&rtsx_pci_idr, GFP_KERNEL)) {
-               ret = -ENOMEM;
-               goto free_handle;
-       }
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&rtsx_pci_lock);
-       ret = idr_get_new(&rtsx_pci_idr, pcr, &pcr->id);
+       ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               pcr->id = ret;
        spin_unlock(&rtsx_pci_lock);
-       if (ret)
+       idr_preload_end();
+       if (ret < 0)
                goto free_handle;
 
        pcr->pci = pcidev;
index f428d86..f32550a 100644 (file)
@@ -885,7 +885,7 @@ struct c2port_device *c2port_device_register(char *name,
                                        struct c2port_ops *ops, void *devdata)
 {
        struct c2port_device *c2dev;
-       int id, ret;
+       int ret;
 
        if (unlikely(!ops) || unlikely(!ops->access) || \
                unlikely(!ops->c2d_dir) || unlikely(!ops->c2ck_set) || \
@@ -897,22 +897,18 @@ struct c2port_device *c2port_device_register(char *name,
        if (unlikely(!c2dev))
                return ERR_PTR(-ENOMEM);
 
-       ret = idr_pre_get(&c2port_idr, GFP_KERNEL);
-       if (!ret) {
-               ret = -ENOMEM;
-               goto error_idr_get_new;
-       }
-
+       idr_preload(GFP_KERNEL);
        spin_lock_irq(&c2port_idr_lock);
-       ret = idr_get_new(&c2port_idr, c2dev, &id);
+       ret = idr_alloc(&c2port_idr, c2dev, 0, 0, GFP_NOWAIT);
        spin_unlock_irq(&c2port_idr_lock);
+       idr_preload_end();
 
        if (ret < 0)
-               goto error_idr_get_new;
-       c2dev->id = id;
+               goto error_idr_alloc;
+       c2dev->id = ret;
 
        c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
-                                       "c2port%d", id);
+                                  "c2port%d", c2dev->id);
        if (unlikely(IS_ERR(c2dev->dev))) {
                ret = PTR_ERR(c2dev->dev);
                goto error_device_create;
@@ -946,10 +942,10 @@ error_device_create_bin_file:
 
 error_device_create:
        spin_lock_irq(&c2port_idr_lock);
-       idr_remove(&c2port_idr, id);
+       idr_remove(&c2port_idr, c2dev->id);
        spin_unlock_irq(&c2port_idr_lock);
 
-error_idr_get_new:
+error_idr_alloc:
        kfree(c2dev);
 
        return ERR_PTR(ret);
index 3aa9a96..36f5d52 100644 (file)
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
+#include <asm/sections.h>
 
 #define v1printk(a...) do { \
        if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
                addr = (unsigned long)do_fork;
        else if (!strcmp(arg, "hw_break_val"))
                addr = (unsigned long)&hw_break_val;
+       addr = (unsigned long) dereference_function_descriptor((void *)addr);
        return addr;
 }
 
index 240a6d3..2129274 100644 (file)
@@ -280,11 +280,10 @@ static struct mmu_notifier *mmu_find_ops(struct mm_struct *mm,
                        const struct mmu_notifier_ops *ops)
 {
        struct mmu_notifier *mn, *gru_mn = NULL;
-       struct hlist_node *n;
 
        if (mm->mmu_notifier_mm) {
                rcu_read_lock();
-               hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list,
+               hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list,
                                         hlist)
                    if (mn->ops == ops) {
                        gru_mn = mn;
index 0bd5349..0ab7c92 100644 (file)
@@ -196,13 +196,14 @@ int tifm_add_adapter(struct tifm_adapter *fm)
 {
        int rc;
 
-       if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL))
-               return -ENOMEM;
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&tifm_adapter_lock);
-       rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id);
+       rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT);
+       if (rc >= 0)
+               fm->id = rc;
        spin_unlock(&tifm_adapter_lock);
-       if (rc)
+       idr_preload_end();
+       if (rc < 0)
                return rc;
 
        dev_set_name(&fm->dev, "tifm%u", fm->id);
index c3e8397..a8cee33 100644 (file)
@@ -127,9 +127,8 @@ static struct dbell_entry *dbell_index_table_find(u32 idx)
 {
        u32 bucket = VMCI_DOORBELL_HASH(idx);
        struct dbell_entry *dbell;
-       struct hlist_node *node;
 
-       hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+       hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
                             node) {
                if (idx == dbell->idx)
                        return dbell;
@@ -359,12 +358,10 @@ static void dbell_fire_entries(u32 notify_idx)
 {
        u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
        struct dbell_entry *dbell;
-       struct hlist_node *node;
 
        spin_lock_bh(&vmci_doorbell_it.lock);
 
-       hlist_for_each_entry(dbell, node,
-                            &vmci_doorbell_it.entries[bucket], node) {
+       hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
                if (dbell->idx == notify_idx &&
                    atomic_read(&dbell->active) == 1) {
                        if (dbell->run_delayed) {
index a196f84..9a53a30 100644 (file)
@@ -46,11 +46,10 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
                                                  enum vmci_resource_type type)
 {
        struct vmci_resource *r, *resource = NULL;
-       struct hlist_node *node;
        unsigned int idx = vmci_resource_hash(handle);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(r, node,
+       hlist_for_each_entry_rcu(r,
                                 &vmci_resource_table.entries[idx], node) {
                u32 cid = r->handle.context;
                u32 rid = r->handle.resource;
@@ -146,12 +145,11 @@ void vmci_resource_remove(struct vmci_resource *resource)
        struct vmci_handle handle = resource->handle;
        unsigned int idx = vmci_resource_hash(handle);
        struct vmci_resource *r;
-       struct hlist_node *node;
 
        /* Remove resource from hash table. */
        spin_lock(&vmci_resource_table.lock);
 
-       hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+       hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
                if (vmci_handle_is_equal(r->handle, resource->handle)) {
                        hlist_del_init_rcu(&r->node);
                        break;
index 821cd82..2a3593d 100644 (file)
@@ -429,19 +429,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        int err;
        struct mmc_host *host;
 
-       if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
-               return NULL;
-
        host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
        if (!host)
                return NULL;
 
        /* scanning will be enabled when we're ready */
        host->rescan_disable = 1;
+       idr_preload(GFP_KERNEL);
        spin_lock(&mmc_host_lock);
-       err = idr_get_new(&mmc_host_idr, host, &host->index);
+       err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
+       if (err >= 0)
+               host->index = err;
        spin_unlock(&mmc_host_lock);
-       if (err)
+       idr_preload_end();
+       if (err < 0)
                goto free;
 
        dev_set_name(&host->class_dev, "mmc%d", host->index);
index 60063cc..9834221 100644 (file)
@@ -1453,7 +1453,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
                if (!sg_miter_next(sg_miter))
                        goto done;
 
-               host->sg = sg_miter->__sg;
+               host->sg = sg_miter->piter.sg;
                buf = sg_miter->addr;
                remain = sg_miter->length;
                offset = 0;
@@ -1508,7 +1508,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
                if (!sg_miter_next(sg_miter))
                        goto done;
 
-               host->sg = sg_miter->__sg;
+               host->sg = sg_miter->piter.sg;
                buf = sg_miter->addr;
                remain = sg_miter->length;
                offset = 0;
index 03f2eb5..557bec5 100644 (file)
@@ -74,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
 endif # MTD_REDBOOT_PARTS
 
 config MTD_CMDLINE_PARTS
-       bool "Command line partition table parsing"
-       depends on MTD = "y"
+       tristate "Command line partition table parsing"
+       depends on MTD
        ---help---
          Allow generic configuration of the MTD partition tables via the kernel
          command line. Multiple flash resources are supported for hardware where
index 7c057a0..ddc0a42 100644 (file)
@@ -142,7 +142,13 @@ static int __init ar7_parser_init(void)
        return register_mtd_parser(&ar7_parser);
 }
 
+static void __exit ar7_parser_exit(void)
+{
+       deregister_mtd_parser(&ar7_parser);
+}
+
 module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
index e06d782..63feb75 100644 (file)
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 
 /* 10 parts were found on sflash on Netgear WNDR4500 */
 #define BCM47XXPART_MAX_PARTS          12
 
-/*
- * Amount of bytes we read when analyzing each block of flash memory.
- * Set it big enough to allow detecting partition and reading important data.
- */
-#define BCM47XXPART_BYTES_TO_READ      0x404
-
 /* Magics */
 #define BOARD_DATA_MAGIC               0x5246504D      /* MPFR */
 #define POT_MAGIC1                     0x54544f50      /* POTT */
@@ -59,13 +53,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        uint32_t *buf;
        size_t bytes_read;
        uint32_t offset;
-       uint32_t blocksize = 0x10000;
+       uint32_t blocksize = master->erasesize;
        struct trx_header *trx;
+       int trx_part = -1;
+       int last_trx_part = -1;
+       int max_bytes_to_read = 0x8004;
+
+       if (blocksize <= 0x10000)
+               blocksize = 0x10000;
+       if (blocksize == 0x20000)
+               max_bytes_to_read = 0x18004;
 
        /* Alloc */
        parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
                        GFP_KERNEL);
-       buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+       buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
 
        /* Parse block by block looking for magics */
        for (offset = 0; offset <= master->size - blocksize;
@@ -80,7 +82,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read beginning of the block */
-               if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+               if (mtd_read(master, offset, max_bytes_to_read,
                             &bytes_read, (uint8_t *)buf) < 0) {
                        pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
                               offset);
@@ -95,9 +97,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Standard NVRAM */
-               if (buf[0x000 / 4] == NVRAM_HEADER) {
+               if (buf[0x000 / 4] == NVRAM_HEADER ||
+                   buf[0x1000 / 4] == NVRAM_HEADER ||
+                   buf[0x8000 / 4] == NVRAM_HEADER ||
+                   (blocksize == 0x20000 && (
+                     buf[0x10000 / 4] == NVRAM_HEADER ||
+                     buf[0x11000 / 4] == NVRAM_HEADER ||
+                     buf[0x18000 / 4] == NVRAM_HEADER))) {
                        bcm47xxpart_add_part(&parts[curr_part++], "nvram",
                                             offset, 0);
+                       offset = rounddown(offset, blocksize);
                        continue;
                }
 
@@ -131,6 +140,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                if (buf[0x000 / 4] == TRX_MAGIC) {
                        trx = (struct trx_header *)buf;
 
+                       trx_part = curr_part;
+                       bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+                                            offset, 0);
+
                        i = 0;
                        /* We have LZMA loader if offset[2] points to sth */
                        if (trx->offset[2]) {
@@ -154,6 +167,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                                             offset + trx->offset[i], 0);
                        i++;
 
+                       last_trx_part = curr_part - 1;
+
                        /*
                         * We have whole TRX scanned, skip to the next part. Use
                         * roundown (not roundup), as the loop will increase
@@ -169,11 +184,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
         * Assume that partitions end at the beginning of the one they are
         * followed by.
         */
-       for (i = 0; i < curr_part - 1; i++)
-               parts[i].size = parts[i + 1].offset - parts[i].offset;
-       if (curr_part > 0)
-               parts[curr_part - 1].size =
-                               master->size - parts[curr_part - 1].offset;
+       for (i = 0; i < curr_part; i++) {
+               u64 next_part_offset = (i < curr_part - 1) ?
+                                      parts[i + 1].offset : master->size;
+
+               parts[i].size = next_part_offset - parts[i].offset;
+               if (i == last_trx_part && trx_part >= 0)
+                       parts[trx_part].size = next_part_offset -
+                                              parts[trx_part].offset;
+       }
 
        *pparts = parts;
        return curr_part;
index b861972..fff665d 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/mtd/map.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/cfi.h>
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
        .probe          = NULL, /* Not usable directly */
        .destroy        = cfi_amdstd_destroy,
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 {
        struct cfi_private *cfi = map->fldrv_priv;
+       struct device_node __maybe_unused *np = map->device_node;
        struct mtd_info *mtd;
        int i;
 
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
                        cfi_tell_features(extp);
 #endif
 
+#ifdef CONFIG_OF
+                       if (np && of_property_read_bool(
+                                   np, "use-advanced-sector-protection")
+                           && extp->BlkProtUnprot == 8) {
+                               printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
+                               mtd->_lock = cfi_ppb_lock;
+                               mtd->_unlock = cfi_ppb_unlock;
+                               mtd->_is_locked = cfi_ppb_is_locked;
+                       }
+#endif
+
                        bootloc = extp->TopBottom;
                        if ((bootloc < 2) || (bootloc > 5)) {
                                printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
        return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
 }
 
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+       struct flchip *chip;
+       loff_t offset;
+       int locked;
+};
+
+#define MAX_SECTORS                    512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK                ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK      ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK     ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+                                       struct flchip *chip,
+                                       unsigned long adr, int len, void *thunk)
+{
+       struct cfi_private *cfi = map->fldrv_priv;
+       unsigned long timeo;
+       int ret;
+
+       mutex_lock(&chip->mutex);
+       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       if (ret) {
+               mutex_unlock(&chip->mutex);
+               return ret;
+       }
+
+       pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+       cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+                        cfi->device_type, NULL);
+       cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+                        cfi->device_type, NULL);
+       /* PPB entry command */
+       cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+                        cfi->device_type, NULL);
+
+       if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+               chip->state = FL_LOCKING;
+               map_write(map, CMD(0xA0), chip->start + adr);
+               map_write(map, CMD(0x00), chip->start + adr);
+       } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+               /*
+                * Unlocking of one specific sector is not supported, so we
+                * have to unlock all sectors of this device instead
+                */
+               chip->state = FL_UNLOCKING;
+               map_write(map, CMD(0x80), chip->start);
+               map_write(map, CMD(0x30), chip->start);
+       } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+               chip->state = FL_JEDEC_QUERY;
+               /* Return locked status: 0->locked, 1->unlocked */
+               ret = !cfi_read_query(map, adr);
+       } else
+               BUG();
+
+       /*
+        * Wait for some time as unlocking of all sectors takes quite long
+        */
+       timeo = jiffies + msecs_to_jiffies(2000);       /* 2s max (un)locking */
+       for (;;) {
+               if (chip_ready(map, adr))
+                       break;
+
+               if (time_after(jiffies, timeo)) {
+                       printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+                       ret = -EIO;
+                       break;
+               }
+
+               UDELAY(map, chip, adr, 1);
+       }
+
+       /* Exit BC commands */
+       map_write(map, CMD(0x90), chip->start);
+       map_write(map, CMD(0x00), chip->start);
+
+       chip->state = FL_READY;
+       put_chip(map, chip, adr + chip->start);
+       mutex_unlock(&chip->mutex);
+
+       return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+                                      uint64_t len)
+{
+       return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+                               DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+                                        uint64_t len)
+{
+       struct mtd_erase_region_info *regions = mtd->eraseregions;
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct ppb_lock *sect;
+       unsigned long adr;
+       loff_t offset;
+       uint64_t length;
+       int chipnum;
+       int i;
+       int sectors;
+       int ret;
+
+       /*
+        * PPB unlocking always unlocks all sectors of the flash chip.
+        * We need to re-lock all previously locked sectors. So lets
+        * first check the locking status of all sectors and save
+        * it for future use.
+        */
+       sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+       if (!sect)
+               return -ENOMEM;
+
+       /*
+        * This code to walk all sectors is a slightly modified version
+        * of the cfi_varsize_frob() code.
+        */
+       i = 0;
+       chipnum = 0;
+       adr = 0;
+       sectors = 0;
+       offset = 0;
+       length = mtd->size;
+
+       while (length) {
+               int size = regions[i].erasesize;
+
+               /*
+                * Only test sectors that shall not be unlocked. The other
+                * sectors shall be unlocked, so lets keep their locking
+                * status at "unlocked" (locked=0) for the final re-locking.
+                */
+               if ((adr < ofs) || (adr >= (ofs + len))) {
+                       sect[sectors].chip = &cfi->chips[chipnum];
+                       sect[sectors].offset = offset;
+                       sect[sectors].locked = do_ppb_xxlock(
+                               map, &cfi->chips[chipnum], adr, 0,
+                               DO_XXLOCK_ONEBLOCK_GETLOCK);
+               }
+
+               adr += size;
+               offset += size;
+               length -= size;
+
+               if (offset == regions[i].offset + size * regions[i].numblocks)
+                       i++;
+
+               if (adr >> cfi->chipshift) {
+                       adr = 0;
+                       chipnum++;
+
+                       if (chipnum >= cfi->numchips)
+                               break;
+               }
+
+               sectors++;
+               if (sectors >= MAX_SECTORS) {
+                       printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+                              MAX_SECTORS);
+                       kfree(sect);
+                       return -EINVAL;
+               }
+       }
+
+       /* Now unlock the whole chip */
+       ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+                              DO_XXLOCK_ONEBLOCK_UNLOCK);
+       if (ret) {
+               kfree(sect);
+               return ret;
+       }
+
+       /*
+        * PPB unlocking always unlocks all sectors of the flash chip.
+        * We need to re-lock all previously locked sectors.
+        */
+       for (i = 0; i < sectors; i++) {
+               if (sect[i].locked)
+                       do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+                                     DO_XXLOCK_ONEBLOCK_LOCK);
+       }
+
+       kfree(sect);
+       return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+                                           uint64_t len)
+{
+       return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+                               DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
 
 static void cfi_amdstd_sync (struct mtd_info *mtd)
 {
index c533f27..721caeb 100644 (file)
  *
  * mtdparts=<mtddef>[;<mtddef]
  * <mtddef>  := <mtd-id>:<partdef>[,<partdef>]
- *              where <mtd-id> is the name from the "cat /proc/mtd" command
- * <partdef> := <size>[@offset][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
  * <mtd-id>  := unique name used in mapping driver/device (mtd->name)
  * <size>    := standard linux memsize OR "-" to denote all remaining space
+ *              size is automatically truncated at end of device
+ *              if specified or trucated size is 0 the part is skipped
+ * <offset>  := standard linux memsize
+ *              if omitted the part will immediately follow the previous part
+ *              or 0 if the first part
  * <name>    := '(' NAME ')'
+ *              NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
  *
  * Examples:
  *
@@ -70,6 +81,7 @@ struct cmdline_mtd_partition {
 static struct cmdline_mtd_partition *partitions;
 
 /* the command line passed to mtdpart_setup() */
+static char *mtdparts;
 static char *cmdline;
 static int cmdline_parsed;
 
@@ -330,6 +342,14 @@ static int parse_cmdline_partitions(struct mtd_info *master,
                if (part->parts[i].size == SIZE_REMAINING)
                        part->parts[i].size = master->size - offset;
 
+               if (offset + part->parts[i].size > master->size) {
+                       printk(KERN_WARNING ERRP
+                              "%s: partitioning exceeds flash size, truncating\n",
+                              part->mtd_id);
+                       part->parts[i].size = master->size - offset;
+               }
+               offset += part->parts[i].size;
+
                if (part->parts[i].size == 0) {
                        printk(KERN_WARNING ERRP
                               "%s: skipping zero sized partition\n",
@@ -337,16 +357,8 @@ static int parse_cmdline_partitions(struct mtd_info *master,
                        part->num_parts--;
                        memmove(&part->parts[i], &part->parts[i + 1],
                                sizeof(*part->parts) * (part->num_parts - i));
-                       continue;
-               }
-
-               if (offset + part->parts[i].size > master->size) {
-                       printk(KERN_WARNING ERRP
-                              "%s: partitioning exceeds flash size, truncating\n",
-                              part->mtd_id);
-                       part->parts[i].size = master->size - offset;
+                       i--;
                }
-               offset += part->parts[i].size;
        }
 
        *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
@@ -365,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
  *
  * This function needs to be visible for bootloaders.
  */
-static int mtdpart_setup(char *s)
+static int __init mtdpart_setup(char *s)
 {
        cmdline = s;
        return 1;
@@ -381,10 +393,21 @@ static struct mtd_part_parser cmdline_parser = {
 
 static int __init cmdline_parser_init(void)
 {
+       if (mtdparts)
+               mtdpart_setup(mtdparts);
        return register_mtd_parser(&cmdline_parser);
 }
 
+static void __exit cmdline_parser_exit(void)
+{
+       deregister_mtd_parser(&cmdline_parser);
+}
+
 module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
index 395733a..369a194 100644 (file)
@@ -17,8 +17,10 @@ obj-$(CONFIG_MTD_LART)               += lart.o
 obj-$(CONFIG_MTD_BLOCK2MTD)    += block2mtd.o
 obj-$(CONFIG_MTD_DATAFLASH)    += mtd_dataflash.o
 obj-$(CONFIG_MTD_M25P80)       += m25p80.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH)        += elm.o
 obj-$(CONFIG_MTD_SPEAR_SMI)    += spear_smi.o
 obj-$(CONFIG_MTD_SST25L)       += sst25l.o
 obj-$(CONFIG_MTD_BCM47XXSFLASH)        += bcm47xxsflash.o
 
-CFLAGS_docg3.o                 += -I$(src)
\ No newline at end of file
+
+CFLAGS_docg3.o                 += -I$(src)
index 4714584..9526628 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
+#include "bcm47xxsflash.h"
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
 
@@ -13,26 +15,28 @@ static const char *probes[] = { "bcm47xxpart", NULL };
 static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
                              size_t *retlen, u_char *buf)
 {
-       struct bcma_sflash *sflash = mtd->priv;
+       struct bcm47xxsflash *b47s = mtd->priv;
 
        /* Check address range */
        if ((from + len) > mtd->size)
                return -EINVAL;
 
-       memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
+       memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
                      len);
+       *retlen = len;
 
        return len;
 }
 
-static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
-                                  struct mtd_info *mtd)
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
 {
-       mtd->priv = sflash;
+       struct mtd_info *mtd = &b47s->mtd;
+
+       mtd->priv = b47s;
        mtd->name = "bcm47xxsflash";
        mtd->owner = THIS_MODULE;
        mtd->type = MTD_ROM;
-       mtd->size = sflash->size;
+       mtd->size = b47s->size;
        mtd->_read = bcm47xxsflash_read;
 
        /* TODO: implement writing support and verify/change following code */
@@ -40,19 +44,30 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
        mtd->writebufsize = mtd->writesize = 1;
 }
 
-static int bcm47xxsflash_probe(struct platform_device *pdev)
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
 {
        struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+       struct bcm47xxsflash *b47s;
        int err;
 
-       sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
-       if (!sflash->mtd) {
+       b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
+       if (!b47s) {
                err = -ENOMEM;
                goto out;
        }
-       bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
+       sflash->priv = b47s;
 
-       err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
+       b47s->window = sflash->window;
+       b47s->blocksize = sflash->blocksize;
+       b47s->numblocks = sflash->numblocks;
+       b47s->size = sflash->size;
+       bcm47xxsflash_fill_mtd(b47s);
+
+       err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
        if (err) {
                pr_err("Failed to register MTD device: %d\n", err);
                goto err_dev_reg;
@@ -61,34 +76,40 @@ static int bcm47xxsflash_probe(struct platform_device *pdev)
        return 0;
 
 err_dev_reg:
-       kfree(sflash->mtd);
+       kfree(&b47s->mtd);
 out:
        return err;
 }
 
-static int bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
 {
        struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+       struct bcm47xxsflash *b47s = sflash->priv;
 
-       mtd_device_unregister(sflash->mtd);
-       kfree(sflash->mtd);
+       mtd_device_unregister(&b47s->mtd);
+       kfree(b47s);
 
        return 0;
 }
 
 static struct platform_driver bcma_sflash_driver = {
-       .remove = bcm47xxsflash_remove,
+       .probe  = bcm47xxsflash_bcma_probe,
+       .remove = bcm47xxsflash_bcma_remove,
        .driver = {
                .name = "bcma_sflash",
                .owner = THIS_MODULE,
        },
 };
 
+/**************************************************
+ * Init
+ **************************************************/
+
 static int __init bcm47xxsflash_init(void)
 {
        int err;
 
-       err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
+       err = platform_driver_register(&bcma_sflash_driver);
        if (err)
                pr_err("Failed to register BCMA serial flash driver: %d\n",
                       err);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644 (file)
index 0000000..ebf6f71
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+struct bcm47xxsflash {
+       u32 window;
+       u32 blocksize;
+       u16 numblocks;
+       u32 size;
+
+       struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644 (file)
index 0000000..2ec5da9
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_IRQSTATUS                  0x018
+#define ELM_IRQENABLE                  0x01c
+#define ELM_LOCATION_CONFIG            0x020
+#define ELM_PAGE_CTRL                  0x080
+#define ELM_SYNDROME_FRAGMENT_0                0x400
+#define ELM_SYNDROME_FRAGMENT_6                0x418
+#define ELM_LOCATION_STATUS            0x800
+#define ELM_ERROR_LOCATION_0           0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID         BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK              BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK             0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID             BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK           BIT(8)
+#define ECC_NB_ERRORS_MASK             0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK                0x1fff
+
+#define ELM_ECC_SIZE                   0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE     0x40
+#define ERROR_LOCATION_SIZE            0x100
+
+struct elm_info {
+       struct device *dev;
+       void __iomem *elm_base;
+       struct completion elm_completion;
+       struct list_head list;
+       enum bch_ecc bch_type;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+       writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+       return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev:       ELM device
+ * @bch_type:  Type of BCH ecc
+ */
+void elm_config(struct device *dev, enum bch_ecc bch_type)
+{
+       u32 reg_val;
+       struct elm_info *info = dev_get_drvdata(dev);
+
+       reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+       elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+       info->bch_type = bch_type;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info:      elm info
+ * @index:     index number of syndrome fragment vector
+ * @enable:    enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+               bool enable)
+{
+       u32 reg_val;
+
+       reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+       if (enable)
+               reg_val |= BIT(index);  /* enable page mode */
+       else
+               reg_val &= ~BIT(index); /* disable page mode */
+
+       elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info:      elm info
+ * @err_vec:   elm error vectors
+ * @ecc:       buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+               struct elm_errorvec *err_vec, u8 *ecc)
+{
+       int i, offset;
+       u32 val;
+
+       for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+               /* Check error reported */
+               if (err_vec[i].error_reported) {
+                       elm_configure_page_mode(info, i, true);
+                       offset = ELM_SYNDROME_FRAGMENT_0 +
+                               SYNDROME_FRAGMENT_REG_SIZE * i;
+
+                       /* BCH8 */
+                       if (info->bch_type) {
+
+                               /* syndrome fragment 0 = ecc[9-12B] */
+                               val = cpu_to_be32(*(u32 *) &ecc[9]);
+                               elm_write_reg(info, offset, val);
+
+                               /* syndrome fragment 1 = ecc[5-8B] */
+                               offset += 4;
+                               val = cpu_to_be32(*(u32 *) &ecc[5]);
+                               elm_write_reg(info, offset, val);
+
+                               /* syndrome fragment 2 = ecc[1-4B] */
+                               offset += 4;
+                               val = cpu_to_be32(*(u32 *) &ecc[1]);
+                               elm_write_reg(info, offset, val);
+
+                               /* syndrome fragment 3 = ecc[0B] */
+                               offset += 4;
+                               val = ecc[0];
+                               elm_write_reg(info, offset, val);
+                       } else {
+                               /* syndrome fragment 0 = ecc[20-52b] bits */
+                               val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+                                       ((ecc[2] & 0xf) << 28);
+                               elm_write_reg(info, offset, val);
+
+                               /* syndrome fragment 1 = ecc[0-20b] bits */
+                               offset += 4;
+                               val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+                               elm_write_reg(info, offset, val);
+                       }
+               }
+
+               /* Update ecc pointer with ecc byte size */
+               ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
+       }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info:      elm info
+ * @err_vec:   elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+               struct elm_errorvec *err_vec)
+{
+       int i, offset;
+       u32 reg_val;
+
+       /*
+        * Set syndrome vector valid, so that ELM module
+        * will process it for vectors error is reported
+        */
+       for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+               if (err_vec[i].error_reported) {
+                       offset = ELM_SYNDROME_FRAGMENT_6 +
+                               SYNDROME_FRAGMENT_REG_SIZE * i;
+                       reg_val = elm_read_reg(info, offset);
+                       reg_val |= ELM_SYNDROME_VALID;
+                       elm_write_reg(info, offset, reg_val);
+               }
+       }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info:      elm info
+ * @err_vec:   elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+               struct elm_errorvec *err_vec)
+{
+       int i, j, errors = 0;
+       int offset;
+       u32 reg_val;
+
+       for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+               /* Check error reported */
+               if (err_vec[i].error_reported) {
+                       offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+                       reg_val = elm_read_reg(info, offset);
+
+                       /* Check correctable error or not */
+                       if (reg_val & ECC_CORRECTABLE_MASK) {
+                               offset = ELM_ERROR_LOCATION_0 +
+                                       ERROR_LOCATION_SIZE * i;
+
+                               /* Read count of correctable errors */
+                               err_vec[i].error_count = reg_val &
+                                       ECC_NB_ERRORS_MASK;
+
+                               /* Update the error locations in error vector */
+                               for (j = 0; j < err_vec[i].error_count; j++) {
+
+                                       reg_val = elm_read_reg(info, offset);
+                                       err_vec[i].error_loc[j] = reg_val &
+                                               ECC_ERROR_LOCATION_MASK;
+
+                                       /* Update error location register */
+                                       offset += 4;
+                               }
+
+                               errors += err_vec[i].error_count;
+                       } else {
+                               err_vec[i].error_uncorrectable = true;
+                       }
+
+                       /* Clearing interrupts for processed error vectors */
+                       elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+                       /* Disable page mode */
+                       elm_configure_page_mode(info, i, false);
+               }
+       }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev:       device pointer
+ * @ecc_calc:  calculated ECC bytes from GPMC
+ * @err_vec:   elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+               struct elm_errorvec *err_vec)
+{
+       struct elm_info *info = dev_get_drvdata(dev);
+       u32 reg_val;
+
+       /* Enable page mode interrupt */
+       reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+       elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+       elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+       /* Load valid ecc byte to syndrome fragment register */
+       elm_load_syndrome(info, err_vec, ecc_calc);
+
+       /* Enable syndrome processing for which syndrome fragment is updated */
+       elm_start_processing(info, err_vec);
+
+       /* Wait for ELM module to finish locating error correction */
+       wait_for_completion(&info->elm_completion);
+
+       /* Disable page mode interrupt */
+       reg_val = elm_read_reg(info, ELM_IRQENABLE);
+       elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+       elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+       u32 reg_val;
+       struct elm_info *info = dev_id;
+
+       reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+       /* All error vectors processed */
+       if (reg_val & INTR_STATUS_PAGE_VALID) {
+               elm_write_reg(info, ELM_IRQSTATUS,
+                               reg_val & INTR_STATUS_PAGE_VALID);
+               complete(&info->elm_completion);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res, *irq;
+       struct elm_info *info;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info) {
+               dev_err(&pdev->dev, "failed to allocate memory\n");
+               return -ENOMEM;
+       }
+
+       info->dev = &pdev->dev;
+
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "no irq resource defined\n");
+               return -ENODEV;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "no memory resource defined\n");
+               return -ENODEV;
+       }
+
+       info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (!info->elm_base)
+               return -EADDRNOTAVAIL;
+
+       ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+                       pdev->name, info);
+       if (ret) {
+               dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+               return ret;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+       if (pm_runtime_get_sync(&pdev->dev)) {
+               ret = -EINVAL;
+               pm_runtime_disable(&pdev->dev);
+               dev_err(&pdev->dev, "can't enable clock\n");
+               return ret;
+       }
+
+       init_completion(&info->elm_completion);
+       INIT_LIST_HEAD(&info->list);
+       list_add(&info->list, &elm_devices);
+       platform_set_drvdata(pdev, info);
+       return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+       { .compatible = "ti,am3352-elm" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+       .driver = {
+               .name   = "elm",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(elm_of_match),
+       },
+       .probe  = elm_probe,
+       .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
index 4eeeb2d..5b6b072 100644 (file)
@@ -565,6 +565,96 @@ time_out:
        return ret;
 }
 
+static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       struct m25p *flash = mtd_to_m25p(mtd);
+       uint32_t offset = ofs;
+       uint8_t status_old, status_new;
+       int res = 0;
+
+       mutex_lock(&flash->lock);
+       /* Wait until finished previous command */
+       if (wait_till_ready(flash)) {
+               res = 1;
+               goto err;
+       }
+
+       status_old = read_sr(flash);
+
+       if (offset < flash->mtd.size-(flash->mtd.size/2))
+               status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+       else if (offset < flash->mtd.size-(flash->mtd.size/4))
+               status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+       else if (offset < flash->mtd.size-(flash->mtd.size/8))
+               status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+       else if (offset < flash->mtd.size-(flash->mtd.size/16))
+               status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+       else if (offset < flash->mtd.size-(flash->mtd.size/32))
+               status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+       else if (offset < flash->mtd.size-(flash->mtd.size/64))
+               status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+       else
+               status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+
+       /* Only modify protection if it will not unlock other areas */
+       if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
+                                       (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+               write_enable(flash);
+               if (write_sr(flash, status_new) < 0) {
+                       res = 1;
+                       goto err;
+               }
+       }
+
+err:   mutex_unlock(&flash->lock);
+       return res;
+}
+
+static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+       struct m25p *flash = mtd_to_m25p(mtd);
+       uint32_t offset = ofs;
+       uint8_t status_old, status_new;
+       int res = 0;
+
+       mutex_lock(&flash->lock);
+       /* Wait until finished previous command */
+       if (wait_till_ready(flash)) {
+               res = 1;
+               goto err;
+       }
+
+       status_old = read_sr(flash);
+
+       if (offset+len > flash->mtd.size-(flash->mtd.size/64))
+               status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
+       else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
+               status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+       else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
+               status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+       else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
+               status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+       else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
+               status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+       else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
+               status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+       else
+               status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+       /* Only modify protection if it will not lock other areas */
+       if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
+                                       (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+               write_enable(flash);
+               if (write_sr(flash, status_new) < 0) {
+                       res = 1;
+                       goto err;
+               }
+       }
+
+err:   mutex_unlock(&flash->lock);
+       return res;
+}
+
 /****************************************************************************/
 
 /*
@@ -642,6 +732,10 @@ static const struct spi_device_id m25p_ids[] = {
        /* Everspin */
        { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
 
+       /* GigaDevice */
+       { "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
+       { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
        { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
@@ -899,6 +993,12 @@ static int m25p_probe(struct spi_device *spi)
        flash->mtd._erase = m25p80_erase;
        flash->mtd._read = m25p80_read;
 
+       /* flash protection support for STmicro chips */
+       if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+               flash->mtd._lock = m25p80_lock;
+               flash->mtd._unlock = m25p80_unlock;
+       }
+
        /* sst flash chips use AAI word program */
        if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
                flash->mtd._write = sst_write;
index 62ba82c..3ed17c4 100644 (file)
@@ -429,7 +429,7 @@ config MTD_GPIO_ADDR
 
 config MTD_UCLINUX
        bool "Generic uClinux RAM/ROM filesystem support"
-       depends on MTD_RAM=y && (!MMU || COLDFIRE)
+       depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
        help
          Map driver to support image based filesystems for uClinux.
 
index 7901d72..363939d 100644 (file)
@@ -68,9 +68,6 @@ static int of_flash_remove(struct platform_device *dev)
                        kfree(info->list[i].res);
                }
        }
-
-       kfree(info);
-
        return 0;
 }
 
@@ -199,8 +196,9 @@ static int of_flash_probe(struct platform_device *dev)
        map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
 
        err = -ENOMEM;
-       info = kzalloc(sizeof(struct of_flash) +
-                      sizeof(struct of_flash_list) * count, GFP_KERNEL);
+       info = devm_kzalloc(&dev->dev,
+                           sizeof(struct of_flash) +
+                           sizeof(struct of_flash_list) * count, GFP_KERNEL);
        if (!info)
                goto err_flash_remove;
 
@@ -241,6 +239,7 @@ static int of_flash_probe(struct platform_device *dev)
                info->list[i].map.phys = res.start;
                info->list[i].map.size = res_size;
                info->list[i].map.bankwidth = be32_to_cpup(width);
+               info->list[i].map.device_node = dp;
 
                err = -ENOMEM;
                info->list[i].map.virt = ioremap(info->list[i].map.phys,
index 299bf88..c1af83d 100644 (file)
 
 /****************************************************************************/
 
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
+
 struct map_info uclinux_ram_map = {
-       .name = "RAM",
-       .phys = (unsigned long)__bss_stop,
+       .name = MAP_NAME,
        .size = 0,
 };
 
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
 static struct mtd_info *uclinux_ram_mtdinfo;
 
 /****************************************************************************/
@@ -60,11 +74,17 @@ static int __init uclinux_mtd_init(void)
        struct map_info *mapp;
 
        mapp = &uclinux_ram_map;
+
+       if (physaddr == -1)
+               mapp->phys = (resource_size_t)__bss_stop;
+       else
+               mapp->phys = physaddr;
+
        if (!mapp->size)
                mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
        mapp->bankwidth = 4;
 
-       printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
+       printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
                (int) mapp->phys, (int) mapp->size);
 
        /*
@@ -82,7 +102,7 @@ static int __init uclinux_mtd_init(void)
 
        simple_map_init(mapp);
 
-       mtd = do_map_probe("map_ram", mapp);
+       mtd = do_map_probe("map_" MAP_NAME, mapp);
        if (!mtd) {
                printk("uclinux[mtd]: failed to find a mapping?\n");
                return(-ENXIO);
@@ -118,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
 
 /****************************************************************************/
index ec794a7..61d5f56 100644 (file)
@@ -349,13 +349,8 @@ int add_mtd_device(struct mtd_info *mtd)
        BUG_ON(mtd->writesize == 0);
        mutex_lock(&mtd_table_mutex);
 
-       do {
-               if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
-                       goto fail_locked;
-               error = idr_get_new(&mtd_idr, mtd, &i);
-       } while (error == -EAGAIN);
-
-       if (error)
+       i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+       if (i < 0)
                goto fail_locked;
 
        mtd->index = i;
index c516a94..ffcbcca 100644 (file)
@@ -101,6 +101,8 @@ struct atmel_nand_host {
        u8                      pmecc_corr_cap;
        u16                     pmecc_sector_size;
        u32                     pmecc_lookup_table_offset;
+       u32                     pmecc_lookup_table_offset_512;
+       u32                     pmecc_lookup_table_offset_1024;
 
        int                     pmecc_bytes_per_sector;
        int                     pmecc_sector_number;
@@ -908,6 +910,84 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
        pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
 }
 
+/*
+ * Get ECC requirement in ONFI parameters, returns -1 if ONFI
+ * parameters is not supported.
+ * return 0 if success to get the ECC requirement.
+ */
+static int get_onfi_ecc_param(struct nand_chip *chip,
+               int *ecc_bits, int *sector_size)
+{
+       *ecc_bits = *sector_size = 0;
+
+       if (chip->onfi_params.ecc_bits == 0xff)
+               /* TODO: the sector_size and ecc_bits need to be find in
+                * extended ecc parameter, currently we don't support it.
+                */
+               return -1;
+
+       *ecc_bits = chip->onfi_params.ecc_bits;
+
+       /* The default sector size (ecc codeword size) is 512 */
+       *sector_size = 512;
+
+       return 0;
+}
+
+/*
+ * Get ecc requirement from ONFI parameters ecc requirement.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to ONFI ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+               int *cap, int *sector_size)
+{
+       /* Get ECC requirement from ONFI parameters */
+       *cap = *sector_size = 0;
+       if (host->nand_chip.onfi_version) {
+               if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
+                       dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+                               *cap, *sector_size);
+               else
+                       dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
+       } else {
+               dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
+       }
+       if (*cap == 0 && *sector_size == 0) {
+               *cap = 2;
+               *sector_size = 512;
+       }
+
+       /* If dts file doesn't specify then use the one in ONFI parameters */
+       if (host->pmecc_corr_cap == 0) {
+               /* use the most fitable ecc bits (the near bigger one ) */
+               if (*cap <= 2)
+                       host->pmecc_corr_cap = 2;
+               else if (*cap <= 4)
+                       host->pmecc_corr_cap = 4;
+               else if (*cap < 8)
+                       host->pmecc_corr_cap = 8;
+               else if (*cap < 12)
+                       host->pmecc_corr_cap = 12;
+               else if (*cap < 24)
+                       host->pmecc_corr_cap = 24;
+               else
+                       return -EINVAL;
+       }
+       if (host->pmecc_sector_size == 0) {
+               /* use the most fitable sector size (the near smaller one ) */
+               if (*sector_size >= 1024)
+                       host->pmecc_sector_size = 1024;
+               else if (*sector_size >= 512)
+                       host->pmecc_sector_size = 512;
+               else
+                       return -EINVAL;
+       }
+       return 0;
+}
+
 static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
                                         struct atmel_nand_host *host)
 {
@@ -916,8 +996,22 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
        struct resource *regs, *regs_pmerr, *regs_rom;
        int cap, sector_size, err_no;
 
+       err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+       if (err_no) {
+               dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+               return err_no;
+       }
+
+       if (cap != host->pmecc_corr_cap ||
+                       sector_size != host->pmecc_sector_size)
+               dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
        cap = host->pmecc_corr_cap;
        sector_size = host->pmecc_sector_size;
+       host->pmecc_lookup_table_offset = (sector_size == 512) ?
+                       host->pmecc_lookup_table_offset_512 :
+                       host->pmecc_lookup_table_offset_1024;
+
        dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
                 cap, sector_size);
 
@@ -1215,7 +1309,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
 static int atmel_of_init_port(struct atmel_nand_host *host,
                              struct device_node *np)
 {
-       u32 val, table_offset;
+       u32 val;
        u32 offset[2];
        int ecc_mode;
        struct atmel_nand_data *board = &host->board;
@@ -1259,42 +1353,41 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
 
        /* use PMECC, get correction capability, sector size and lookup
         * table offset.
+        * If correction bits and sector size are not specified, then find
+        * them from NAND ONFI parameters.
         */
-       if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
-               dev_err(host->dev, "Cannot decide PMECC Capability\n");
-               return -EINVAL;
-       } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
-           (val != 24)) {
-               dev_err(host->dev,
-                       "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
-                       val);
-               return -EINVAL;
+       if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+               if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+                               (val != 24)) {
+                       dev_err(host->dev,
+                               "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+                               val);
+                       return -EINVAL;
+               }
+               host->pmecc_corr_cap = (u8)val;
        }
-       host->pmecc_corr_cap = (u8)val;
 
-       if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
-               dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
-               return -EINVAL;
-       } else if ((val != 512) && (val != 1024)) {
-               dev_err(host->dev,
-                       "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
-                       val);
-               return -EINVAL;
+       if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+               if ((val != 512) && (val != 1024)) {
+                       dev_err(host->dev,
+                               "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+                               val);
+                       return -EINVAL;
+               }
+               host->pmecc_sector_size = (u16)val;
        }
-       host->pmecc_sector_size = (u16)val;
 
        if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
                        offset, 2) != 0) {
                dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
                return -EINVAL;
        }
-       table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
-
-       if (!table_offset) {
+       if (!offset[0] && !offset[1]) {
                dev_err(host->dev, "Invalid PMECC lookup table offset\n");
                return -EINVAL;
        }
-       host->pmecc_lookup_table_offset = table_offset;
+       host->pmecc_lookup_table_offset_512 = offset[0];
+       host->pmecc_lookup_table_offset_1024 = offset[1];
 
        return 0;
 }
index 0bdb2ce..c005a62 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef __BCM47XXNFLASH_H
 #define __BCM47XXNFLASH_H
 
+#ifndef pr_fmt
+#define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
+#endif
+
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 
index 8363a9a..7bae569 100644 (file)
@@ -9,14 +9,14 @@
  *
  */
 
+#include "bcm47xxnflash.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-#include "bcm47xxnflash.h"
-
 MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki");
@@ -77,6 +77,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver bcm47xxnflash_driver = {
+       .probe  = bcm47xxnflash_probe,
        .remove = bcm47xxnflash_remove,
        .driver = {
                .name = "bcma_nflash",
@@ -88,13 +89,10 @@ static int __init bcm47xxnflash_init(void)
 {
        int err;
 
-       /*
-        * Platform device "bcma_nflash" exists on SoCs and is registered very
-        * early, it won't be added during runtime (use platform_driver_probe).
-        */
-       err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+       err = platform_driver_register(&bcm47xxnflash_driver);
        if (err)
-               pr_err("Failed to register serial flash driver: %d\n", err);
+               pr_err("Failed to register bcm47xx nand flash driver: %d\n",
+                      err);
 
        return err;
 }
index 595de40..b2ab373 100644 (file)
@@ -9,13 +9,13 @@
  *
  */
 
+#include "bcm47xxnflash.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/bcma/bcma.h>
 
-#include "bcm47xxnflash.h"
-
 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
  * shown ~1000 retries as maxiumum. */
 #define NFLASH_READY_RETRIES           10000
index feae55c..94e17af 100644 (file)
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        if (pdev->id < 0 || pdev->id > 3)
                return -ENODEV;
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info) {
                dev_err(&pdev->dev, "unable to allocate memory\n");
                ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                goto err_nomem;
        }
 
-       vaddr = ioremap(res1->start, resource_size(res1));
-       base = ioremap(res2->start, resource_size(res2));
+       vaddr = devm_request_and_ioremap(&pdev->dev, res1);
+       base = devm_request_and_ioremap(&pdev->dev, res2);
        if (!vaddr || !base) {
                dev_err(&pdev->dev, "ioremap failed\n");
-               ret = -EINVAL;
+               ret = -EADDRNOTAVAIL;
                goto err_ioremap;
        }
 
@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
        }
        info->chip.ecc.mode = ecc_mode;
 
-       info->clk = clk_get(&pdev->dev, "aemif");
+       info->clk = devm_clk_get(&pdev->dev, "aemif");
        if (IS_ERR(info->clk)) {
                ret = PTR_ERR(info->clk);
                dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ err_timing:
        clk_disable_unprepare(info->clk);
 
 err_clk_enable:
-       clk_put(info->clk);
-
        spin_lock_irq(&davinci_nand_lock);
        if (ecc_mode == NAND_ECC_HW_SYNDROME)
                ecc4_busy = false;
@@ -855,13 +853,7 @@ err_clk_enable:
 err_ecc:
 err_clk:
 err_ioremap:
-       if (base)
-               iounmap(base);
-       if (vaddr)
-               iounmap(vaddr);
-
 err_nomem:
-       kfree(info);
        return ret;
 }
 
@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
                ecc4_busy = false;
        spin_unlock_irq(&davinci_nand_lock);
 
-       iounmap(info->base);
-       iounmap(info->vaddr);
-
        nand_release(&info->mtd);
 
        clk_disable_unprepare(info->clk);
-       clk_put(info->clk);
-
-       kfree(info);
 
        return 0;
 }
index ad62226..f1f7f12 100644 (file)
@@ -176,8 +176,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
 
        ifc_nand_ctrl->page = page_addr;
        /* Program ROW0/COL0 */
-       out_be32(&ifc->ifc_nand.row0, page_addr);
-       out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+       iowrite32be(page_addr, &ifc->ifc_nand.row0);
+       iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
 
        buf_num = page_addr & priv->bufnum_mask;
 
@@ -239,18 +239,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
        int i;
 
        /* set the chip select for NAND Transaction */
-       out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+       iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+                   &ifc->ifc_nand.nand_csel);
 
        dev_vdbg(priv->dev,
                        "%s: fir0=%08x fcr0=%08x\n",
                        __func__,
-                       in_be32(&ifc->ifc_nand.nand_fir0),
-                       in_be32(&ifc->ifc_nand.nand_fcr0));
+                       ioread32be(&ifc->ifc_nand.nand_fir0),
+                       ioread32be(&ifc->ifc_nand.nand_fcr0));
 
        ctrl->nand_stat = 0;
 
        /* start read/write seq */
-       out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+       iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
        /* wait for command complete flag or timeout */
        wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -273,7 +274,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
                int sector_end = sector + chip->ecc.steps - 1;
 
                for (i = sector / 4; i <= sector_end / 4; i++)
-                       eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+                       eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
 
                for (i = sector; i <= sector_end; i++) {
                        errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -313,31 +314,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
 
        /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
        if (mtd->writesize > 512) {
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                        (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                        (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-                        (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-                        (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
-                        (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
-               out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
-
-               out_be32(&ifc->ifc_nand.nand_fcr0,
-                       (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
-                       (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+               iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                           (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+                           (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+                           (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+                           (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+                           &ifc->ifc_nand.nand_fir0);
+               iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+               iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+                           (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+                           &ifc->ifc_nand.nand_fcr0);
        } else {
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                        (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                        (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-                        (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
-                        (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
-               out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+               iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                           (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+                           (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
+                           (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+                           &ifc->ifc_nand.nand_fir0);
+               iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
 
                if (oob)
-                       out_be32(&ifc->ifc_nand.nand_fcr0,
-                                NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+                       iowrite32be(NAND_CMD_READOOB <<
+                                   IFC_NAND_FCR0_CMD0_SHIFT,
+                                   &ifc->ifc_nand.nand_fcr0);
                else
-                       out_be32(&ifc->ifc_nand.nand_fcr0,
-                               NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+                       iowrite32be(NAND_CMD_READ0 <<
+                                   IFC_NAND_FCR0_CMD0_SHIFT,
+                                   &ifc->ifc_nand.nand_fcr0);
        }
 }
 
@@ -357,7 +360,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
        switch (command) {
        /* READ0 read the entire buffer to use hardware ECC. */
        case NAND_CMD_READ0:
-               out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+               iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
                set_addr(mtd, 0, page_addr, 0);
 
                ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -372,7 +375,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 
        /* READOOB reads only the OOB because no ECC is performed. */
        case NAND_CMD_READOOB:
-               out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+               iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
                set_addr(mtd, column, page_addr, 1);
 
                ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -388,19 +391,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                if (command == NAND_CMD_PARAM)
                        timing = IFC_FIR_OP_RBCD;
 
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                               (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                               (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-                               (timing << IFC_NAND_FIR0_OP2_SHIFT));
-               out_be32(&ifc->ifc_nand.nand_fcr0,
-                               command << IFC_NAND_FCR0_CMD0_SHIFT);
-               out_be32(&ifc->ifc_nand.row3, column);
+               iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                           (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+                           (timing << IFC_NAND_FIR0_OP2_SHIFT),
+                           &ifc->ifc_nand.nand_fir0);
+               iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+                           &ifc->ifc_nand.nand_fcr0);
+               iowrite32be(column, &ifc->ifc_nand.row3);
 
                /*
                 * although currently it's 8 bytes for READID, we always read
                 * the maximum 256 bytes(for PARAM)
                 */
-               out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+               iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
                ifc_nand_ctrl->read_bytes = 256;
 
                set_addr(mtd, 0, 0, 0);
@@ -415,16 +418,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 
        /* ERASE2 uses the block and page address from ERASE1 */
        case NAND_CMD_ERASE2:
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                        (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                        (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-                        (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+               iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                           (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+                           (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+                           &ifc->ifc_nand.nand_fir0);
 
-               out_be32(&ifc->ifc_nand.nand_fcr0,
-                        (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
-                        (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+               iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+                           (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+                           &ifc->ifc_nand.nand_fcr0);
 
-               out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+               iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
                ifc_nand_ctrl->read_bytes = 0;
                fsl_ifc_run_command(mtd);
                return;
@@ -440,26 +443,28 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                                (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
                                (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
 
-                       out_be32(&ifc->ifc_nand.nand_fir0,
-                                (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                                (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-                                (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-                                (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
-                                (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+                       iowrite32be(
+                               (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                               (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+                               (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+                               (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+                               (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
+                               &ifc->ifc_nand.nand_fir0);
                } else {
                        nand_fcr0 = ((NAND_CMD_PAGEPROG <<
                                        IFC_NAND_FCR0_CMD1_SHIFT) |
                                    (NAND_CMD_SEQIN <<
                                        IFC_NAND_FCR0_CMD2_SHIFT));
 
-                       out_be32(&ifc->ifc_nand.nand_fir0,
-                                (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                                (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
-                                (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-                                (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
-                                (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
-                       out_be32(&ifc->ifc_nand.nand_fir1,
-                                (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+                       iowrite32be(
+                               (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                               (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+                               (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+                               (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+                               (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+                               &ifc->ifc_nand.nand_fir0);
+                       iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
+                                   &ifc->ifc_nand.nand_fir1);
 
                        if (column >= mtd->writesize)
                                nand_fcr0 |=
@@ -474,7 +479,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                        column -= mtd->writesize;
                        ifc_nand_ctrl->oob = 1;
                }
-               out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+               iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
                set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
                return;
        }
@@ -482,10 +487,11 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
        /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
        case NAND_CMD_PAGEPROG: {
                if (ifc_nand_ctrl->oob) {
-                       out_be32(&ifc->ifc_nand.nand_fbcr,
-                               ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+                       iowrite32be(ifc_nand_ctrl->index -
+                                   ifc_nand_ctrl->column,
+                                   &ifc->ifc_nand.nand_fbcr);
                } else {
-                       out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+                       iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
                }
 
                fsl_ifc_run_command(mtd);
@@ -493,12 +499,12 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
        }
 
        case NAND_CMD_STATUS:
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                               (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                               (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
-               out_be32(&ifc->ifc_nand.nand_fcr0,
-                               NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
-               out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+               iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                           (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+                           &ifc->ifc_nand.nand_fir0);
+               iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+                           &ifc->ifc_nand.nand_fcr0);
+               iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
                set_addr(mtd, 0, 0, 0);
                ifc_nand_ctrl->read_bytes = 1;
 
@@ -512,10 +518,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                return;
 
        case NAND_CMD_RESET:
-               out_be32(&ifc->ifc_nand.nand_fir0,
-                               IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
-               out_be32(&ifc->ifc_nand.nand_fcr0,
-                               NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+               iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+                           &ifc->ifc_nand.nand_fir0);
+               iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+                           &ifc->ifc_nand.nand_fcr0);
                fsl_ifc_run_command(mtd);
                return;
 
@@ -639,18 +645,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
        u32 nand_fsr;
 
        /* Use READ_STATUS command, but wait for the device to be ready */
-       out_be32(&ifc->ifc_nand.nand_fir0,
-                (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
-       out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
-                       IFC_NAND_FCR0_CMD0_SHIFT);
-       out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+       iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                   (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+                   &ifc->ifc_nand.nand_fir0);
+       iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+                   &ifc->ifc_nand.nand_fcr0);
+       iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
        set_addr(mtd, 0, 0, 0);
        ifc_nand_ctrl->read_bytes = 1;
 
        fsl_ifc_run_command(mtd);
 
-       nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+       nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
 
        /*
         * The chip always seems to report that it is
@@ -744,34 +750,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
        uint32_t cs = priv->bank;
 
        /* Save CSOR and CSOR_ext */
-       csor = in_be32(&ifc->csor_cs[cs].csor);
-       csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+       csor = ioread32be(&ifc->csor_cs[cs].csor);
+       csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
 
        /* chage PageSize 8K and SpareSize 1K*/
        csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-       out_be32(&ifc->csor_cs[cs].csor, csor_8k);
-       out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+       iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+       iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
 
        /* READID */
-       out_be32(&ifc->ifc_nand.nand_fir0,
-                       (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                       (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-                       (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
-       out_be32(&ifc->ifc_nand.nand_fcr0,
-                       NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
-       out_be32(&ifc->ifc_nand.row3, 0x0);
+       iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                   (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+                   (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+                   &ifc->ifc_nand.nand_fir0);
+       iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+                   &ifc->ifc_nand.nand_fcr0);
+       iowrite32be(0x0, &ifc->ifc_nand.row3);
 
-       out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+       iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
 
        /* Program ROW0/COL0 */
-       out_be32(&ifc->ifc_nand.row0, 0x0);
-       out_be32(&ifc->ifc_nand.col0, 0x0);
+       iowrite32be(0x0, &ifc->ifc_nand.row0);
+       iowrite32be(0x0, &ifc->ifc_nand.col0);
 
        /* set the chip select for NAND Transaction */
-       out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+       iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
 
        /* start read seq */
-       out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+       iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
        /* wait for command complete flag or timeout */
        wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -781,8 +787,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
                printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
 
        /* Restore CSOR and CSOR_ext */
-       out_be32(&ifc->csor_cs[cs].csor, csor);
-       out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+       iowrite32be(csor, &ifc->csor_cs[cs].csor);
+       iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
 }
 
 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -799,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
 
        /* fill in nand_chip structure */
        /* set up function call table */
-       if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+       if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
                chip->read_byte = fsl_ifc_read_byte16;
        else
                chip->read_byte = fsl_ifc_read_byte;
@@ -813,13 +819,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        chip->bbt_td = &bbt_main_descr;
        chip->bbt_md = &bbt_mirror_descr;
 
-       out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+       iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
 
        /* set up nand options */
        chip->bbt_options = NAND_BBT_USE_FLASH;
 
 
-       if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+       if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
                chip->read_byte = fsl_ifc_read_byte16;
                chip->options |= NAND_BUSWIDTH_16;
        } else {
@@ -832,7 +838,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
        chip->ecc.read_page = fsl_ifc_read_page;
        chip->ecc.write_page = fsl_ifc_write_page;
 
-       csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+       csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
 
        /* Hardware generates ECC per 512 Bytes */
        chip->ecc.size = 512;
@@ -884,7 +890,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
                chip->ecc.mode = NAND_ECC_SOFT;
        }
 
-       ver = in_be32(&ifc->ifc_rev);
+       ver = ioread32be(&ifc->ifc_rev);
        if (ver == FSL_IFC_V1_1_0)
                fsl_ifc_sram_init(priv);
 
@@ -910,7 +916,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
 static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
                      phys_addr_t addr)
 {
-       u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+       u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
 
        if (!(cspr & CSPR_V))
                return 0;
@@ -997,17 +1003,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
 
        dev_set_drvdata(priv->dev, priv);
 
-       out_be32(&ifc->ifc_nand.nand_evter_en,
-                       IFC_NAND_EVTER_EN_OPC_EN |
-                       IFC_NAND_EVTER_EN_FTOER_EN |
-                       IFC_NAND_EVTER_EN_WPER_EN);
+       iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+                   IFC_NAND_EVTER_EN_FTOER_EN |
+                   IFC_NAND_EVTER_EN_WPER_EN,
+                   &ifc->ifc_nand.nand_evter_en);
 
        /* enable NAND Machine Interrupts */
-       out_be32(&ifc->ifc_nand.nand_evter_intr_en,
-                       IFC_NAND_EVTER_INTR_OPCIR_EN |
-                       IFC_NAND_EVTER_INTR_FTOERIR_EN |
-                       IFC_NAND_EVTER_INTR_WPERIR_EN);
-
+       iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+                   IFC_NAND_EVTER_INTR_FTOERIR_EN |
+                   IFC_NAND_EVTER_INTR_WPERIR_EN,
+                   &ifc->ifc_nand.nand_evter_intr_en);
        priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
        if (!priv->mtd.name) {
                ret = -ENOMEM;
index a092451..588f537 100644 (file)
                        & BM_BCH_FLASH0LAYOUT0_ECC0)            \
        )
 
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14     10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14                     \
+                               (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x)                          \
+       ((GPMI_IS_MX6Q(x) && ((v) == 14))                       \
+               ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)  \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14)   \
+               : 0                                             \
+       )
+
 #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE                0
 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
                        & BM_BCH_FLASH0LAYOUT1_ECCN)            \
        )
 
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14     10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14                     \
+                               (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x)                          \
+       ((GPMI_IS_MX6Q(x) && ((v) == 14))                       \
+               ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)  \
+                       & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14)   \
+               : 0                                             \
+       )
+
 #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE                0
 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE                \
                        (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
                ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)   \
                : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)               \
        )
+
+#define HW_BCH_VERSION                         0x00000160
 #endif
index d84699c..4f8857f 100644 (file)
@@ -208,6 +208,11 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
        }
 
        /* start to print out the BCH info */
+       pr_err("Show BCH registers :\n");
+       for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+               reg = readl(r->bch_regs + i * 0x10);
+               pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+       }
        pr_err("BCH Geometry :\n");
        pr_err("GF length              : %u\n", geo->gf_len);
        pr_err("ECC Strength           : %u\n", geo->ecc_strength);
@@ -232,6 +237,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        unsigned int metadata_size;
        unsigned int ecc_strength;
        unsigned int page_size;
+       unsigned int gf_len;
        int ret;
 
        if (common_nfc_set_geometry(this))
@@ -242,6 +248,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        metadata_size = bch_geo->metadata_size;
        ecc_strength  = bch_geo->ecc_strength >> 1;
        page_size     = bch_geo->page_size;
+       gf_len        = bch_geo->gf_len;
 
        ret = gpmi_enable_clk(this);
        if (ret)
@@ -263,11 +270,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
                        | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
                        | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
                        | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT0);
 
        writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
                        | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+                       | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
                        | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
                        r->bch_regs + HW_BCH_FLASH0LAYOUT1);
 
index e9b1c47..717881a 100644 (file)
@@ -94,6 +94,25 @@ static inline int get_ecc_strength(struct gpmi_nand_data *this)
        return round_down(ecc_strength, 2);
 }
 
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+       struct bch_geometry *geo = &this->bch_geometry;
+
+       /* Do the sanity check. */
+       if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+               /* The mx23/mx28 only support the GF13. */
+               if (geo->gf_len == 14)
+                       return false;
+
+               if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
+                       return false;
+       } else if (GPMI_IS_MX6Q(this)) {
+               if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
+                       return false;
+       }
+       return true;
+}
+
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
@@ -112,17 +131,24 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
        /* The default for the length of Galois Field. */
        geo->gf_len = 13;
 
-       /* The default for chunk size. There is no oobsize greater then 512. */
+       /* The default for chunk size. */
        geo->ecc_chunk_size = 512;
-       while (geo->ecc_chunk_size < mtd->oobsize)
+       while (geo->ecc_chunk_size < mtd->oobsize) {
                geo->ecc_chunk_size *= 2; /* keep C >= O */
+               geo->gf_len = 14;
+       }
 
        geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
 
        /* We use the same ECC strength for all chunks. */
        geo->ecc_strength = get_ecc_strength(this);
-       if (!geo->ecc_strength) {
-               pr_err("wrong ECC strength.\n");
+       if (!gpmi_check_ecc(this)) {
+               dev_err(this->dev,
+                       "We can not support this nand chip."
+                       " Its required ecc strength(%d) is beyond our"
+                       " capability(%d).\n", geo->ecc_strength,
+                       (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
+                                       : MXS_ECC_STRENGTH_MAX));
                return -EINVAL;
        }
 
@@ -920,8 +946,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
        dma_addr_t    auxiliary_phys;
        unsigned int  i;
        unsigned char *status;
-       unsigned int  failed;
-       unsigned int  corrected;
+       unsigned int  max_bitflips = 0;
        int           ret;
 
        pr_debug("page number is : %d\n", page);
@@ -945,35 +970,25 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                        payload_virt, payload_phys);
        if (ret) {
                pr_err("Error in ECC-based read: %d\n", ret);
-               goto exit_nfc;
+               return ret;
        }
 
        /* handle the block mark swapping */
        block_mark_swapping(this, payload_virt, auxiliary_virt);
 
        /* Loop over status bytes, accumulating ECC status. */
-       failed          = 0;
-       corrected       = 0;
-       status          = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+       status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
        for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
                if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
                        continue;
 
                if (*status == STATUS_UNCORRECTABLE) {
-                       failed++;
+                       mtd->ecc_stats.failed++;
                        continue;
                }
-               corrected += *status;
-       }
-
-       /*
-        * Propagate ECC status to the owning MTD only when failed or
-        * corrected times nearly reaches our ECC correction threshold.
-        */
-       if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
-               mtd->ecc_stats.failed    += failed;
-               mtd->ecc_stats.corrected += corrected;
+               mtd->ecc_stats.corrected += *status;
+               max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
        if (oob_required) {
@@ -995,8 +1010,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                        this->payload_virt, this->payload_phys,
                        nfc_geo->payload_size,
                        payload_virt, payload_phys);
-exit_nfc:
-       return ret;
+
+       return max_bitflips;
 }
 
 static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1668,8 +1683,8 @@ exit_nfc_init:
        release_resources(this);
 exit_acquire_resources:
        platform_set_drvdata(pdev, NULL);
-       kfree(this);
        dev_err(this->dev, "driver registration failed: %d\n", ret);
+       kfree(this);
 
        return ret;
 }
index 3d93a5e..0729477 100644 (file)
@@ -284,6 +284,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
 #define STATUS_ERASED          0xff
 #define STATUS_UNCORRECTABLE   0xfe
 
+/* BCH's bit correction capability. */
+#define MXS_ECC_STRENGTH_MAX   20      /* mx23 and mx28 */
+#define MX6_ECC_STRENGTH_MAX   40
+
 /* Use the platform_id to distinguish different Archs. */
 #define IS_MX23                        0x0
 #define IS_MX28                        0x1
index 60ac5b9..07e5784 100644 (file)
@@ -530,12 +530,23 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
 
 static void send_read_id_v3(struct mxc_nand_host *host)
 {
+       struct nand_chip *this = &host->nand;
+
        /* Read ID into main buffer */
        writel(NFC_ID, NFC_V3_LAUNCH);
 
        wait_op_done(host, true);
 
        memcpy32_fromio(host->data_buf, host->main_area0, 16);
+
+       if (this->options & NAND_BUSWIDTH_16) {
+               /* compress the ID info */
+               host->data_buf[1] = host->data_buf[2];
+               host->data_buf[2] = host->data_buf[4];
+               host->data_buf[3] = host->data_buf[6];
+               host->data_buf[4] = host->data_buf[8];
+               host->data_buf[5] = host->data_buf[10];
+       }
 }
 
 /* Request the NANDFC to perform a read of the NAND device ID. */
index 3766682..4321415 100644 (file)
@@ -825,13 +825,8 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
 {
 
-       unsigned long timeo = jiffies;
        int status, state = chip->state;
-
-       if (state == FL_ERASING)
-               timeo += (HZ * 400) / 1000;
-       else
-               timeo += (HZ * 20) / 1000;
+       unsigned long timeo = (state == FL_ERASING ? 400 : 20);
 
        led_trigger_event(nand_led_trigger, LED_FULL);
 
@@ -849,6 +844,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
        if (in_interrupt() || oops_in_progress)
                panic_nand_wait(mtd, chip, timeo);
        else {
+               timeo = jiffies + msecs_to_jiffies(timeo);
                while (time_before(jiffies, timeo)) {
                        if (chip->dev_ready) {
                                if (chip->dev_ready(mtd))
index b7cfe0d..053c9a2 100644 (file)
@@ -55,8 +55,7 @@ struct mtd_info;
 #define MODULE_AUTHOR(x)       /* x */
 #define MODULE_DESCRIPTION(x)  /* x */
 
-#define printk printf
-#define KERN_ERR               ""
+#define pr_err printf
 #endif
 
 /*
@@ -507,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
        if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
                return 1;       /* error in ECC data; no action needed */
 
-       printk(KERN_ERR "uncorrectable error : ");
+       pr_err("%s: uncorrectable ECC error", __func__);
        return -1;
 }
 EXPORT_SYMBOL(__nand_correct_data);
index 818b65c..891c52a 100644 (file)
@@ -1408,40 +1408,32 @@ static void clear_memalloc(int memalloc)
                current->flags &= ~PF_MEMALLOC;
 }
 
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
 {
-       mm_segment_t old_fs;
        ssize_t tx;
        int err, memalloc;
 
-       err = get_pages(ns, file, count, *pos);
+       err = get_pages(ns, file, count, pos);
        if (err)
                return err;
-       old_fs = get_fs();
-       set_fs(get_ds());
        memalloc = set_memalloc();
-       tx = vfs_read(file, (char __user *)buf, count, pos);
+       tx = kernel_read(file, pos, buf, count);
        clear_memalloc(memalloc);
-       set_fs(old_fs);
        put_pages(ns);
        return tx;
 }
 
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
 {
-       mm_segment_t old_fs;
        ssize_t tx;
        int err, memalloc;
 
-       err = get_pages(ns, file, count, *pos);
+       err = get_pages(ns, file, count, pos);
        if (err)
                return err;
-       old_fs = get_fs();
-       set_fs(get_ds());
        memalloc = set_memalloc();
-       tx = vfs_write(file, (char __user *)buf, count, pos);
+       tx = kernel_write(file, buf, count, pos);
        clear_memalloc(memalloc);
-       set_fs(old_fs);
        put_pages(ns);
        return tx;
 }
@@ -1476,12 +1468,12 @@ int do_read_error(struct nandsim *ns, int num)
 
 void do_bit_flips(struct nandsim *ns, int num)
 {
-       if (bitflips && random32() < (1 << 22)) {
+       if (bitflips && prandom_u32() < (1 << 22)) {
                int flips = 1;
                if (bitflips > 1)
-                       flips = (random32() % (int) bitflips) + 1;
+                       flips = (prandom_u32() % (int) bitflips) + 1;
                while (flips--) {
-                       int pos = random32() % (num * 8);
+                       int pos = prandom_u32() % (num * 8);
                        ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
                        NS_WARN("read_page: flipping bit %d in page %d "
                                "reading from %d ecc: corrected=%u failed=%u\n",
@@ -1511,7 +1503,7 @@ static void read_page(struct nandsim *ns, int num)
                        if (do_read_error(ns, num))
                                return;
                        pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
-                       tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+                       tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
                        if (tx != num) {
                                NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return;
@@ -1573,7 +1565,7 @@ static int prog_page(struct nandsim *ns, int num)
        u_char *pg_off;
 
        if (ns->cfile) {
-               loff_t off, pos;
+               loff_t off;
                ssize_t tx;
                int all;
 
@@ -1585,8 +1577,7 @@ static int prog_page(struct nandsim *ns, int num)
                        memset(ns->file_buf, 0xff, ns->geom.pgszoob);
                } else {
                        all = 0;
-                       pos = off;
-                       tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+                       tx = read_file(ns, ns->cfile, pg_off, num, off);
                        if (tx != num) {
                                NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
@@ -1595,16 +1586,15 @@ static int prog_page(struct nandsim *ns, int num)
                for (i = 0; i < num; i++)
                        pg_off[i] &= ns->buf.byte[i];
                if (all) {
-                       pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
-                       tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+                       loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+                       tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
                        if (tx != ns->geom.pgszoob) {
                                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
                        }
                        ns->pages_written[ns->regs.row] = 1;
                } else {
-                       pos = off;
-                       tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+                       tx = write_file(ns, ns->cfile, pg_off, num, off);
                        if (tx != num) {
                                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
index 1d33349..8e820dd 100644 (file)
 #include <linux/omap-dma.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #ifdef CONFIG_MTD_NAND_OMAP_BCH
 #include <linux/bch.h>
+#include <linux/platform_data/elm.h>
 #endif
 
 #include <linux/platform_data/mtd-nand-omap2.h>
 
 #define OMAP24XX_DMA_GPMC              4
 
+#define BCH8_MAX_ERROR         8       /* upto 8 bit correctable */
+#define BCH4_MAX_ERROR         4       /* upto 4 bit correctable */
+
+#define SECTOR_BYTES           512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD           4
+#define BCH8_ECC_MAX           ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
+#define BCH4_ECC_MAX           ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1         1       /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0                0x1a    /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1                0x2     /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0                0xd     /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1                0x3     /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6         6       /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0          0x0     /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1          0x20    /* ecc_size1 = 32 */
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+       0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+#endif
+
 /* oob info generated runtime depending on ecc algorithm and layout selected */
 static struct nand_ecclayout omap_oobinfo;
 /* Define some generic bad / good block scan pattern which are used
@@ -156,6 +186,9 @@ struct omap_nand_info {
 #ifdef CONFIG_MTD_NAND_OMAP_BCH
        struct bch_control             *bch;
        struct nand_ecclayout           ecclayout;
+       bool                            is_elm_used;
+       struct device                   *elm_dev;
+       struct device_node              *of_node;
 #endif
 };
 
@@ -1031,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
  * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
  * @mtd: MTD device structure
  * @mode: Read/Write mode
+ *
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Using wrapping mode 6 both for reading and writing if ELM module not uses
+ * for error correction.
+ * On writing,
+ * eccsize0 = 0  (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
  */
 static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
 {
@@ -1039,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                   mtd);
        struct nand_chip *chip = mtd->priv;
-       u32 val;
+       u32 val, wr_mode;
+       unsigned int ecc_size1, ecc_size0;
+
+       /* Using wrapping mode 6 for writing */
+       wr_mode = BCH_WRAPMODE_6;
 
-       nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
-       dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
-       nsectors = 1;
        /*
-        * Program GPMC to perform correction on one 512-byte sector at a time.
-        * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
-        * gives a slight (5%) performance gain (but requires additional code).
+        * ECC engine enabled for valid ecc_size0 nibbles
+        * and disabled for ecc_size1 nibbles.
         */
+       ecc_size0 = BCH_ECC_SIZE0;
+       ecc_size1 = BCH_ECC_SIZE1;
+
+       /* Perform ecc calculation on 512-byte sector */
+       nsectors = 1;
+
+       /* Update number of error correction */
+       nerrors = info->nand.ecc.strength;
+
+       /* Multi sector reading/writing for NAND flash with page size < 4096 */
+       if (info->is_elm_used && (mtd->writesize <= 4096)) {
+               if (mode == NAND_ECC_READ) {
+                       /* Using wrapping mode 1 for reading */
+                       wr_mode = BCH_WRAPMODE_1;
+
+                       /*
+                        * ECC engine enabled for ecc_size0 nibbles
+                        * and disabled for ecc_size1 nibbles.
+                        */
+                       ecc_size0 = (nerrors == 8) ?
+                               BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
+                       ecc_size1 = (nerrors == 8) ?
+                               BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
+               }
+
+               /* Perform ecc calculation for one page (< 4096) */
+               nsectors = info->nand.ecc.steps;
+       }
 
        writel(ECC1, info->reg.gpmc_ecc_control);
 
-       /*
-        * When using BCH, sector size is hardcoded to 512 bytes.
-        * Here we are using wrapping mode 6 both for reading and writing, with:
-        *  size0 = 0  (no additional protected byte in spare area)
-        *  size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
-        */
-       val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
+       /* Configure ecc size for BCH */
+       val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
        writel(val, info->reg.gpmc_ecc_size_config);
 
+       dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
        /* BCH configuration */
        val = ((1                        << 16) | /* enable BCH */
               (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
-              (0x06                     <<  8) | /* wrap mode = 6 */
+              (wr_mode                  <<  8) | /* wrap mode */
               (dev_width                <<  7) | /* bus width */
               (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
               (info->gpmc_cs            <<  1) | /* ECC CS */
@@ -1072,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
 
        writel(val, info->reg.gpmc_ecc_config);
 
-       /* clear ecc and enable bits */
+       /* Clear ecc and enable bits */
        writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
 }
 
@@ -1161,6 +1226,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
        return 0;
 }
 
+/**
+ * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd:       MTD device structure
+ * @dat:       The pointer to data on which ecc is computed
+ * @ecc_code:  The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
+                                   u_char *ecc_code)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                                                  mtd);
+       unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+       int i, eccbchtsel;
+
+       nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+       /*
+        * find BCH scheme used
+        * 0 -> BCH4
+        * 1 -> BCH8
+        */
+       eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
+
+       for (i = 0; i < nsectors; i++) {
+
+               /* Read hw-computed remainder */
+               bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
+               bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
+               if (eccbchtsel) {
+                       bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
+                       bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
+               }
+
+               if (eccbchtsel) {
+                       /* BCH8 ecc scheme */
+                       *ecc_code++ = (bch_val4 & 0xFF);
+                       *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+                       *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+                       *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+                       *ecc_code++ = (bch_val3 & 0xFF);
+                       *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+                       *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+                       *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+                       *ecc_code++ = (bch_val2 & 0xFF);
+                       *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+                       *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+                       *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+                       *ecc_code++ = (bch_val1 & 0xFF);
+                       /*
+                        * Setting 14th byte to zero to handle
+                        * erased page & maintain compatibility
+                        * with RBL
+                        */
+                       *ecc_code++ = 0x0;
+               } else {
+                       /* BCH4 ecc scheme */
+                       *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+                       *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+                       *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+                               ((bch_val1 >> 28) & 0xF);
+                       *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+                       *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+                       *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+                       *ecc_code++ = ((bch_val1 & 0xF) << 4);
+                       /*
+                        * Setting 8th byte to zero to handle
+                        * erased page
+                        */
+                       *ecc_code++ = 0x0;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data:      data sector buffer
+ * @oob:       oob buffer
+ * @info:      omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+               struct omap_nand_info *info)
+{
+       int flip_bits = 0, i;
+
+       for (i = 0; i < info->nand.ecc.size; i++) {
+               flip_bits += hweight8(~data[i]);
+               if (flip_bits > info->nand.ecc.strength)
+                       return 0;
+       }
+
+       for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+               flip_bits += hweight8(~oob[i]);
+               if (flip_bits > info->nand.ecc.strength)
+                       return 0;
+       }
+
+       /*
+        * Bit flips falls in correctable level.
+        * Fill data area with 0xFF
+        */
+       if (flip_bits) {
+               memset(data, 0xFF, info->nand.ecc.size);
+               memset(oob, 0xFF, info->nand.ecc.bytes);
+       }
+
+       return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd:       MTD device structure
+ * @data:      page data
+ * @read_ecc:  ecc read from nand flash
+ * @calc_ecc:  ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of error/erased pages non-zero error vector is reported.
+ * In case of non-zero ecc vector, check read_ecc at fixed offset
+ * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
+ * To handle bit flips in this data, count the number of 0's in
+ * read_ecc[x] and check if it greater than 4. If it is less, it is
+ * programmed page, else erased page.
+ *
+ * 1. If page is erased, check with standard ecc vector (ecc vector
+ * for erased page to find any bit flip). If check fails, bit flip
+ * is present in erased page. Count the bit flips in erased page and
+ * if it falls under correctable level, report page with 0xFF and
+ * update the correctable bit information.
+ * 2. If error is reported on programmed page, update elm error
+ * vector and correct the page with ELM error correction routine.
+ *
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+                               u_char *read_ecc, u_char *calc_ecc)
+{
+       struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+                       mtd);
+       int eccsteps = info->nand.ecc.steps;
+       int i , j, stat = 0;
+       int eccsize, eccflag, ecc_vector_size;
+       struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+       u_char *ecc_vec = calc_ecc;
+       u_char *spare_ecc = read_ecc;
+       u_char *erased_ecc_vec;
+       enum bch_ecc type;
+       bool is_error_reported = false;
+
+       /* Initialize elm error vector to zero */
+       memset(err_vec, 0, sizeof(err_vec));
+
+       if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
+               type = BCH8_ECC;
+               erased_ecc_vec = bch8_vector;
+       } else {
+               type = BCH4_ECC;
+               erased_ecc_vec = bch4_vector;
+       }
+
+       ecc_vector_size = info->nand.ecc.bytes;
+
+       /*
+        * Remove extra byte padding for BCH8 RBL
+        * compatibility and erased page handling
+        */
+       eccsize = ecc_vector_size - 1;
+
+       for (i = 0; i < eccsteps ; i++) {
+               eccflag = 0;    /* initialize eccflag */
+
+               /*
+                * Check any error reported,
+                * In case of error, non zero ecc reported.
+                */
+
+               for (j = 0; (j < eccsize); j++) {
+                       if (calc_ecc[j] != 0) {
+                               eccflag = 1; /* non zero ecc, error present */
+                               break;
+                       }
+               }
+
+               if (eccflag == 1) {
+                       /*
+                        * Set threshold to minimum of 4, half of ecc.strength/2
+                        * to allow max bit flip in byte to 4
+                        */
+                       unsigned int threshold = min_t(unsigned int, 4,
+                                       info->nand.ecc.strength / 2);
+
+                       /*
+                        * Check data area is programmed by counting
+                        * number of 0's at fixed offset in spare area.
+                        * Checking count of 0's against threshold.
+                        * In case programmed page expects at least threshold
+                        * zeros in byte.
+                        * If zeros are less than threshold for programmed page/
+                        * zeros are more than threshold erased page, either
+                        * case page reported as uncorrectable.
+                        */
+                       if (hweight8(~read_ecc[eccsize]) >= threshold) {
+                               /*
+                                * Update elm error vector as
+                                * data area is programmed
+                                */
+                               err_vec[i].error_reported = true;
+                               is_error_reported = true;
+                       } else {
+                               /* Error reported in erased page */
+                               int bitflip_count;
+                               u_char *buf = &data[info->nand.ecc.size * i];
+
+                               if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
+                                       bitflip_count = erased_sector_bitflips(
+                                                       buf, read_ecc, info);
+
+                                       if (bitflip_count)
+                                               stat += bitflip_count;
+                                       else
+                                               return -EINVAL;
+                               }
+                       }
+               }
+
+               /* Update the ecc vector */
+               calc_ecc += ecc_vector_size;
+               read_ecc += ecc_vector_size;
+       }
+
+       /* Check if any error reported */
+       if (!is_error_reported)
+               return 0;
+
+       /* Decode BCH error using ELM module */
+       elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+       for (i = 0; i < eccsteps; i++) {
+               if (err_vec[i].error_reported) {
+                       for (j = 0; j < err_vec[i].error_count; j++) {
+                               u32 bit_pos, byte_pos, error_max, pos;
+
+                               if (type == BCH8_ECC)
+                                       error_max = BCH8_ECC_MAX;
+                               else
+                                       error_max = BCH4_ECC_MAX;
+
+                               if (info->nand.ecc.strength == BCH8_MAX_ERROR)
+                                       pos = err_vec[i].error_loc[j];
+                               else
+                                       /* Add 4 to take care 4 bit padding */
+                                       pos = err_vec[i].error_loc[j] +
+                                               BCH4_BIT_PAD;
+
+                               /* Calculate bit position of error */
+                               bit_pos = pos % 8;
+
+                               /* Calculate byte position of error */
+                               byte_pos = (error_max - pos - 1) / 8;
+
+                               if (pos < error_max) {
+                                       if (byte_pos < 512)
+                                               data[byte_pos] ^= 1 << bit_pos;
+                                       else
+                                               spare_ecc[byte_pos - 512] ^=
+                                                       1 << bit_pos;
+                               }
+                               /* else, not interested to correct ecc */
+                       }
+               }
+
+               /* Update number of correctable errors */
+               stat += err_vec[i].error_count;
+
+               /* Update page data with sector size */
+               data += info->nand.ecc.size;
+               spare_ecc += ecc_vector_size;
+       }
+
+       for (i = 0; i < eccsteps; i++)
+               /* Return error if uncorrectable error present */
+               if (err_vec[i].error_uncorrectable)
+                       return -EINVAL;
+
+       return stat;
+}
+
 /**
  * omap3_correct_data_bch - Decode received data and correct errors
  * @mtd: MTD device structure
@@ -1193,6 +1550,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
        return count;
 }
 
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd:               mtd info structure
+ * @chip:              nand chip info structure
+ * @buf:               data buffer
+ * @oob_required:      must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+                                 const uint8_t *buf, int oob_required)
+{
+       int i;
+       uint8_t *ecc_calc = chip->buffers->ecccalc;
+       uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+       /* Enable GPMC ecc engine */
+       chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+       /* Write data */
+       chip->write_buf(mtd, buf, mtd->writesize);
+
+       /* Update ecc vector from GPMC result registers */
+       chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+       for (i = 0; i < chip->ecc.total; i++)
+               chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+       /* Write ecc vector to OOB area */
+       chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+       return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd:               mtd info structure
+ * @chip:              nand chip info structure
+ * @buf:               buffer to store read data
+ * @oob_required:      caller requires OOB data read to chip->oob_poi
+ * @page:              page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+                               uint8_t *buf, int oob_required, int page)
+{
+       uint8_t *ecc_calc = chip->buffers->ecccalc;
+       uint8_t *ecc_code = chip->buffers->ecccode;
+       uint32_t *eccpos = chip->ecc.layout->eccpos;
+       uint8_t *oob = &chip->oob_poi[eccpos[0]];
+       uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+       int stat;
+       unsigned int max_bitflips = 0;
+
+       /* Enable GPMC ecc engine */
+       chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+       /* Read data */
+       chip->read_buf(mtd, buf, mtd->writesize);
+
+       /* Read oob bytes */
+       chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+       chip->read_buf(mtd, oob, chip->ecc.total);
+
+       /* Calculate ecc bytes */
+       chip->ecc.calculate(mtd, buf, ecc_calc);
+
+       memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+       stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+       if (stat < 0) {
+               mtd->ecc_stats.failed++;
+       } else {
+               mtd->ecc_stats.corrected += stat;
+               max_bitflips = max_t(unsigned int, max_bitflips, stat);
+       }
+
+       return max_bitflips;
+}
+
 /**
  * omap3_free_bch - Release BCH ecc resources
  * @mtd: MTD device structure
@@ -1218,43 +1661,86 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                   mtd);
 #ifdef CONFIG_MTD_NAND_OMAP_BCH8
-       const int hw_errors = 8;
+       const int hw_errors = BCH8_MAX_ERROR;
 #else
-       const int hw_errors = 4;
+       const int hw_errors = BCH4_MAX_ERROR;
 #endif
+       enum bch_ecc bch_type;
+       const __be32 *parp;
+       int lenp;
+       struct device_node *elm_node;
+
        info->bch = NULL;
 
-       max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+       max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
+               BCH8_MAX_ERROR : BCH4_MAX_ERROR;
        if (max_errors != hw_errors) {
                pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
                       max_errors, hw_errors);
                goto fail;
        }
 
-       /* software bch library is only used to detect and locate errors */
-       info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
-       if (!info->bch)
-               goto fail;
+       info->nand.ecc.size = 512;
+       info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+       info->nand.ecc.mode = NAND_ECC_HW;
+       info->nand.ecc.strength = max_errors;
 
-       info->nand.ecc.size    = 512;
-       info->nand.ecc.hwctl   = omap3_enable_hwecc_bch;
-       info->nand.ecc.correct = omap3_correct_data_bch;
-       info->nand.ecc.mode    = NAND_ECC_HW;
+       if (hw_errors == BCH8_MAX_ERROR)
+               bch_type = BCH8_ECC;
+       else
+               bch_type = BCH4_ECC;
 
-       /*
-        * The number of corrected errors in an ecc block that will trigger
-        * block scrubbing defaults to the ecc strength (4 or 8).
-        * Set mtd->bitflip_threshold here to define a custom threshold.
-        */
+       /* Detect availability of ELM module */
+       parp = of_get_property(info->of_node, "elm_id", &lenp);
+       if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+               pr_err("Missing elm_id property, fall back to Software BCH\n");
+               info->is_elm_used = false;
+       } else {
+               struct platform_device *pdev;
 
-       if (max_errors == 8) {
-               info->nand.ecc.strength  = 8;
-               info->nand.ecc.bytes     = 13;
-               info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+               elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
+               pdev = of_find_device_by_node(elm_node);
+               info->elm_dev = &pdev->dev;
+               elm_config(info->elm_dev, bch_type);
+               info->is_elm_used = true;
+       }
+
+       if (info->is_elm_used && (mtd->writesize <= 4096)) {
+
+               if (hw_errors == BCH8_MAX_ERROR)
+                       info->nand.ecc.bytes = BCH8_SIZE;
+               else
+                       info->nand.ecc.bytes = BCH4_SIZE;
+
+               info->nand.ecc.correct = omap_elm_correct_data;
+               info->nand.ecc.calculate = omap3_calculate_ecc_bch;
+               info->nand.ecc.read_page = omap_read_page_bch;
+               info->nand.ecc.write_page = omap_write_page_bch;
        } else {
-               info->nand.ecc.strength  = 4;
-               info->nand.ecc.bytes     = 7;
-               info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+               /*
+                * software bch library is only used to detect and
+                * locate errors
+                */
+               info->bch = init_bch(13, max_errors,
+                               0x201b /* hw polynomial */);
+               if (!info->bch)
+                       goto fail;
+
+               info->nand.ecc.correct = omap3_correct_data_bch;
+
+               /*
+                * The number of corrected errors in an ecc block that will
+                * trigger block scrubbing defaults to the ecc strength (4 or 8)
+                * Set mtd->bitflip_threshold here to define a custom threshold.
+                */
+
+               if (max_errors == 8) {
+                       info->nand.ecc.bytes = 13;
+                       info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+               } else {
+                       info->nand.ecc.bytes = 7;
+                       info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+               }
        }
 
        pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
@@ -1270,7 +1756,7 @@ fail:
  */
 static int omap3_init_bch_tail(struct mtd_info *mtd)
 {
-       int i, steps;
+       int i, steps, offset;
        struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
                                                   mtd);
        struct nand_ecclayout *layout = &info->ecclayout;
@@ -1292,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd)
                goto fail;
        }
 
+       /* ECC layout compatible with RBL for BCH8 */
+       if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+               offset = 2;
+       else
+               offset = mtd->oobsize - layout->eccbytes;
+
        /* put ecc bytes at oob tail */
        for (i = 0; i < layout->eccbytes; i++)
-               layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+               layout->eccpos[i] = offset + i;
+
+       if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+               layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
+       else
+               layout->oobfree[0].offset = 2;
 
-       layout->oobfree[0].offset = 2;
        layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
        info->nand.ecc.layout = layout;
 
@@ -1360,6 +1856,9 @@ static int omap_nand_probe(struct platform_device *pdev)
 
        info->nand.options      = pdata->devsize;
        info->nand.options      |= NAND_SKIP_BBTSCAN;
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+       info->of_node           = pdata->of_node;
+#endif
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
index dbd3aa5..30bd907 100644 (file)
@@ -174,7 +174,14 @@ out:
        return rc;
 }
 
+static void __exit ofpart_parser_exit(void)
+{
+       deregister_mtd_parser(&ofpart_parser);
+       deregister_mtd_parser(&ofoldpart_parser);
+}
+
 module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
index 1eee264..7010660 100644 (file)
@@ -44,7 +44,7 @@ struct nand_ecc_test {
 static void single_bit_error_data(void *error_data, void *correct_data,
                                size_t size)
 {
-       unsigned int offset = random32() % (size * BITS_PER_BYTE);
+       unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
 
        memcpy(error_data, correct_data, size);
        __change_bit_le(offset, error_data);
@@ -55,9 +55,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
 {
        unsigned int offset[2];
 
-       offset[0] = random32() % (size * BITS_PER_BYTE);
+       offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
        do {
-               offset[1] = random32() % (size * BITS_PER_BYTE);
+               offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
        } while (offset[0] == offset[1]);
 
        memcpy(error_data, correct_data, size);
@@ -68,7 +68,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
 
 static unsigned int random_ecc_bit(size_t size)
 {
-       unsigned int offset = random32() % (3 * BITS_PER_BYTE);
+       unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
 
        if (size == 256) {
                /*
@@ -76,7 +76,7 @@ static unsigned int random_ecc_bit(size_t size)
                 * and 17th bit) in ECC code for 256 byte data block
                 */
                while (offset == 16 || offset == 17)
-                       offset = random32() % (3 * BITS_PER_BYTE);
+                       offset = prandom_u32() % (3 * BITS_PER_BYTE);
        }
 
        return offset;
@@ -256,7 +256,7 @@ static int nand_ecc_test_run(const size_t size)
                goto error;
        }
 
-       get_random_bytes(correct_data, size);
+       prandom_bytes(correct_data, size);
        __nand_calculate_ecc(correct_data, size, correct_ecc);
 
        for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
index e827fa8..3e24b37 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
@@ -46,26 +47,7 @@ static int use_offset;
 static int use_len;
 static int use_len_max;
 static int vary_offset;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
-       next = next * 1103515245 + 12345;
-       return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
-       next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; ++i)
-               buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
 
 static int erase_eraseblock(int ebnum)
 {
@@ -129,7 +111,7 @@ static int write_eraseblock(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
 
        for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
-               set_random_data(writebuf, use_len);
+               prandom_bytes_state(&rnd_state, writebuf, use_len);
                ops.mode      = MTD_OPS_AUTO_OOB;
                ops.len       = 0;
                ops.retlen    = 0;
@@ -182,7 +164,7 @@ static int verify_eraseblock(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
 
        for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
-               set_random_data(writebuf, use_len);
+               prandom_bytes_state(&rnd_state, writebuf, use_len);
                ops.mode      = MTD_OPS_AUTO_OOB;
                ops.len       = 0;
                ops.retlen    = 0;
@@ -273,7 +255,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
        loff_t addr = ebnum * mtd->erasesize;
        size_t len = mtd->ecclayout->oobavail * pgcnt;
 
-       set_random_data(writebuf, len);
+       prandom_bytes_state(&rnd_state, writebuf, len);
        ops.mode      = MTD_OPS_AUTO_OOB;
        ops.len       = 0;
        ops.retlen    = 0;
@@ -424,12 +406,12 @@ static int __init mtd_oobtest_init(void)
        if (err)
                goto out;
 
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        err = write_whole_device();
        if (err)
                goto out;
 
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        err = verify_all_eraseblocks();
        if (err)
                goto out;
@@ -444,13 +426,13 @@ static int __init mtd_oobtest_init(void)
        if (err)
                goto out;
 
-       simple_srand(3);
+       prandom_seed_state(&rnd_state, 3);
        err = write_whole_device();
        if (err)
                goto out;
 
        /* Check all eraseblocks */
-       simple_srand(3);
+       prandom_seed_state(&rnd_state, 3);
        pr_info("verifying all eraseblocks\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
@@ -479,7 +461,7 @@ static int __init mtd_oobtest_init(void)
        use_len = mtd->ecclayout->oobavail;
        use_len_max = mtd->ecclayout->oobavail;
        vary_offset = 1;
-       simple_srand(5);
+       prandom_seed_state(&rnd_state, 5);
 
        err = write_whole_device();
        if (err)
@@ -490,7 +472,7 @@ static int __init mtd_oobtest_init(void)
        use_len = mtd->ecclayout->oobavail;
        use_len_max = mtd->ecclayout->oobavail;
        vary_offset = 1;
-       simple_srand(5);
+       prandom_seed_state(&rnd_state, 5);
        err = verify_all_eraseblocks();
        if (err)
                goto out;
@@ -649,7 +631,7 @@ static int __init mtd_oobtest_init(void)
                goto out;
 
        /* Write all eraseblocks */
-       simple_srand(11);
+       prandom_seed_state(&rnd_state, 11);
        pr_info("writing OOBs of whole device\n");
        for (i = 0; i < ebcnt - 1; ++i) {
                int cnt = 2;
@@ -659,7 +641,7 @@ static int __init mtd_oobtest_init(void)
                        continue;
                addr = (i + 1) * mtd->erasesize - mtd->writesize;
                for (pg = 0; pg < cnt; ++pg) {
-                       set_random_data(writebuf, sz);
+                       prandom_bytes_state(&rnd_state, writebuf, sz);
                        ops.mode      = MTD_OPS_AUTO_OOB;
                        ops.len       = 0;
                        ops.retlen    = 0;
@@ -680,12 +662,13 @@ static int __init mtd_oobtest_init(void)
        pr_info("written %u eraseblocks\n", i);
 
        /* Check all eraseblocks */
-       simple_srand(11);
+       prandom_seed_state(&rnd_state, 11);
        pr_info("verifying all eraseblocks\n");
        for (i = 0; i < ebcnt - 1; ++i) {
                if (bbt[i] || bbt[i + 1])
                        continue;
-               set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
+               prandom_bytes_state(&rnd_state, writebuf,
+                                       mtd->ecclayout->oobavail * 2);
                addr = (i + 1) * mtd->erasesize - mtd->writesize;
                ops.mode      = MTD_OPS_AUTO_OOB;
                ops.len       = 0;
index f93a76f..0c1140b 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
@@ -45,26 +46,7 @@ static int bufsize;
 static int ebcnt;
 static int pgcnt;
 static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
-       next = next * 1103515245 + 12345;
-       return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
-       next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; ++i)
-               buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
 
 static int erase_eraseblock(int ebnum)
 {
@@ -98,7 +80,7 @@ static int write_eraseblock(int ebnum)
        size_t written;
        loff_t addr = ebnum * mtd->erasesize;
 
-       set_random_data(writebuf, mtd->erasesize);
+       prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
        cond_resched();
        err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
        if (err || written != mtd->erasesize)
@@ -124,7 +106,7 @@ static int verify_eraseblock(int ebnum)
        for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i)
                addrn -= mtd->erasesize;
 
-       set_random_data(writebuf, mtd->erasesize);
+       prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
        for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
                /* Do a read to set the internal dataRAMs to different data */
                err = mtd_read(mtd, addr0, bufsize, &read, twopages);
@@ -160,7 +142,8 @@ static int verify_eraseblock(int ebnum)
        }
        /* Check boundary between eraseblocks */
        if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
-               unsigned long oldnext = next;
+               struct rnd_state old_state = rnd_state;
+
                /* Do a read to set the internal dataRAMs to different data */
                err = mtd_read(mtd, addr0, bufsize, &read, twopages);
                if (mtd_is_bitflip(err))
@@ -188,13 +171,13 @@ static int verify_eraseblock(int ebnum)
                        return err;
                }
                memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
-               set_random_data(boundary + pgsize, pgsize);
+               prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
                if (memcmp(twopages, boundary, bufsize)) {
                        pr_err("error: verify failed at %#llx\n",
                               (long long)addr);
                        errcnt += 1;
                }
-               next = oldnext;
+               rnd_state = old_state;
        }
        return err;
 }
@@ -326,7 +309,7 @@ static int erasecrosstest(void)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
-       set_random_data(writebuf, pgsize);
+       prandom_bytes_state(&rnd_state, writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
        err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
@@ -359,7 +342,7 @@ static int erasecrosstest(void)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
-       set_random_data(writebuf, pgsize);
+       prandom_bytes_state(&rnd_state, writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
        err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
@@ -417,7 +400,7 @@ static int erasetest(void)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
-       set_random_data(writebuf, pgsize);
+       prandom_bytes_state(&rnd_state, writebuf, pgsize);
        err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
        if (err || written != pgsize) {
                pr_err("error: write failed at %#llx\n",
@@ -565,7 +548,7 @@ static int __init mtd_pagetest_init(void)
        pr_info("erased %u eraseblocks\n", i);
 
        /* Write all eraseblocks */
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        pr_info("writing whole device\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
@@ -580,7 +563,7 @@ static int __init mtd_pagetest_init(void)
        pr_info("written %u eraseblocks\n", i);
 
        /* Check all eraseblocks */
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        pr_info("verifying all eraseblocks\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
index 596cbea..a6ce9c1 100644 (file)
@@ -49,13 +49,6 @@ static int pgcnt;
 static int goodebcnt;
 static struct timeval start, finish;
 
-static void set_random_data(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; ++i)
-               buf[i] = random32();
-}
 
 static int erase_eraseblock(int ebnum)
 {
@@ -396,7 +389,7 @@ static int __init mtd_speedtest_init(void)
                goto out;
        }
 
-       set_random_data(iobuf, mtd->erasesize);
+       prandom_bytes(iobuf, mtd->erasesize);
 
        err = scan_for_bad_eraseblocks();
        if (err)
index 3729f67..787f539 100644 (file)
@@ -55,7 +55,7 @@ static int rand_eb(void)
        unsigned int eb;
 
 again:
-       eb = random32();
+       eb = prandom_u32();
        /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
        eb %= (ebcnt - 1);
        if (bbt[eb])
@@ -67,7 +67,7 @@ static int rand_offs(void)
 {
        unsigned int offs;
 
-       offs = random32();
+       offs = prandom_u32();
        offs %= bufsize;
        return offs;
 }
@@ -76,7 +76,7 @@ static int rand_len(int offs)
 {
        unsigned int len;
 
-       len = random32();
+       len = prandom_u32();
        len %= (bufsize - offs);
        return len;
 }
@@ -191,7 +191,7 @@ static int do_write(void)
 
 static int do_operation(void)
 {
-       if (random32() & 1)
+       if (prandom_u32() & 1)
                return do_read();
        else
                return do_write();
@@ -282,8 +282,7 @@ static int __init mtd_stresstest_init(void)
        }
        for (i = 0; i < ebcnt; i++)
                offsets[i] = mtd->erasesize;
-       for (i = 0; i < bufsize; i++)
-               writebuf[i] = random32();
+       prandom_bytes(writebuf, bufsize);
 
        err = scan_for_bad_eraseblocks();
        if (err)
index c880c22..aade56f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/random.h>
 
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
@@ -43,26 +44,7 @@ static int bufsize;
 static int ebcnt;
 static int pgcnt;
 static int errcnt;
-static unsigned long next = 1;
-
-static inline unsigned int simple_rand(void)
-{
-       next = next * 1103515245 + 12345;
-       return (unsigned int)((next / 65536) % 32768);
-}
-
-static inline void simple_srand(unsigned long seed)
-{
-       next = seed;
-}
-
-static void set_random_data(unsigned char *buf, size_t len)
-{
-       size_t i;
-
-       for (i = 0; i < len; ++i)
-               buf[i] = simple_rand();
-}
+static struct rnd_state rnd_state;
 
 static inline void clear_data(unsigned char *buf, size_t len)
 {
@@ -119,7 +101,7 @@ static int write_eraseblock(int ebnum)
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       set_random_data(writebuf, subpgsize);
+       prandom_bytes_state(&rnd_state, writebuf, subpgsize);
        err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
        if (unlikely(err || written != subpgsize)) {
                pr_err("error: write failed at %#llx\n",
@@ -133,7 +115,7 @@ static int write_eraseblock(int ebnum)
 
        addr += subpgsize;
 
-       set_random_data(writebuf, subpgsize);
+       prandom_bytes_state(&rnd_state, writebuf, subpgsize);
        err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
        if (unlikely(err || written != subpgsize)) {
                pr_err("error: write failed at %#llx\n",
@@ -157,7 +139,7 @@ static int write_eraseblock2(int ebnum)
        for (k = 1; k < 33; ++k) {
                if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
                        break;
-               set_random_data(writebuf, subpgsize * k);
+               prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
                err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
                if (unlikely(err || written != subpgsize * k)) {
                        pr_err("error: write failed at %#llx\n",
@@ -193,7 +175,7 @@ static int verify_eraseblock(int ebnum)
        int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       set_random_data(writebuf, subpgsize);
+       prandom_bytes_state(&rnd_state, writebuf, subpgsize);
        clear_data(readbuf, subpgsize);
        err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
        if (unlikely(err || read != subpgsize)) {
@@ -220,7 +202,7 @@ static int verify_eraseblock(int ebnum)
 
        addr += subpgsize;
 
-       set_random_data(writebuf, subpgsize);
+       prandom_bytes_state(&rnd_state, writebuf, subpgsize);
        clear_data(readbuf, subpgsize);
        err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
        if (unlikely(err || read != subpgsize)) {
@@ -257,7 +239,7 @@ static int verify_eraseblock2(int ebnum)
        for (k = 1; k < 33; ++k) {
                if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
                        break;
-               set_random_data(writebuf, subpgsize * k);
+               prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
                clear_data(readbuf, subpgsize * k);
                err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
                if (unlikely(err || read != subpgsize * k)) {
@@ -430,7 +412,7 @@ static int __init mtd_subpagetest_init(void)
                goto out;
 
        pr_info("writing whole device\n");
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
                        continue;
@@ -443,7 +425,7 @@ static int __init mtd_subpagetest_init(void)
        }
        pr_info("written %u eraseblocks\n", i);
 
-       simple_srand(1);
+       prandom_seed_state(&rnd_state, 1);
        pr_info("verifying all eraseblocks\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
@@ -466,7 +448,7 @@ static int __init mtd_subpagetest_init(void)
                goto out;
 
        /* Write all eraseblocks */
-       simple_srand(3);
+       prandom_seed_state(&rnd_state, 3);
        pr_info("writing whole device\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
@@ -481,7 +463,7 @@ static int __init mtd_subpagetest_init(void)
        pr_info("written %u eraseblocks\n", i);
 
        /* Check all eraseblocks */
-       simple_srand(3);
+       prandom_seed_state(&rnd_state, 3);
        pr_info("verifying all eraseblocks\n");
        for (i = 0; i < ebcnt; ++i) {
                if (bbt[i])
index c4cde1e..3a9f6a6 100644 (file)
@@ -208,7 +208,7 @@ static inline int write_pattern(int ebnum, void *buf)
 static int __init tort_init(void)
 {
        int err = 0, i, infinite = !cycles_count;
-       int bad_ebs[ebcnt];
+       int *bad_ebs;
 
        printk(KERN_INFO "\n");
        printk(KERN_INFO "=================================================\n");
@@ -250,28 +250,24 @@ static int __init tort_init(void)
 
        err = -ENOMEM;
        patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!patt_5A5) {
-               pr_err("error: cannot allocate memory\n");
+       if (!patt_5A5)
                goto out_mtd;
-       }
 
        patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!patt_A5A) {
-               pr_err("error: cannot allocate memory\n");
+       if (!patt_A5A)
                goto out_patt_5A5;
-       }
 
        patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!patt_FF) {
-               pr_err("error: cannot allocate memory\n");
+       if (!patt_FF)
                goto out_patt_A5A;
-       }
 
        check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!check_buf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!check_buf)
                goto out_patt_FF;
-       }
+
+       bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+       if (!bad_ebs)
+               goto out_check_buf;
 
        err = 0;
 
@@ -290,7 +286,6 @@ static int __init tort_init(void)
        /*
         * Check if there is a bad eraseblock among those we are going to test.
         */
-       memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
        if (mtd_can_have_bb(mtd)) {
                for (i = eb; i < eb + ebcnt; i++) {
                        err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
@@ -394,6 +389,8 @@ out:
 
        pr_info("finished after %u erase cycles\n",
               erase_cycles);
+       kfree(bad_ebs);
+out_check_buf:
        kfree(check_buf);
 out_patt_FF:
        kfree(patt_FF);
index dfcc65b..4f02848 100644 (file)
@@ -194,7 +194,7 @@ static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
 {
        struct ubi_volume_desc *desc = file->private_data;
        struct ubi_device *ubi = desc->vol->ubi;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int err;
        mutex_lock(&inode->i_mutex);
        err = ubi_sync(ubi->ubi_num);
index 33f8f3b..cba89fc 100644 (file)
@@ -86,7 +86,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
 static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_bitflips)
-               return !(random32() % 200);
+               return !(prandom_u32() % 200);
        return 0;
 }
 
@@ -100,7 +100,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
 static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_io_failures)
-               return !(random32() % 500);
+               return !(prandom_u32() % 500);
        return 0;
 }
 
@@ -114,7 +114,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
 static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_io_failures)
-               return !(random32() % 400);
+               return !(prandom_u32() % 400);
        return 0;
 }
 
index a7efec2..9b017d9 100644 (file)
@@ -381,7 +381,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
 }
 
 #ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 static void b44_wap54g10_workaround(struct b44 *bp)
 {
        char buf[20];
@@ -393,7 +393,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
         * see https://dev.openwrt.org/ticket/146
         * check and reset bit "isolate"
         */
-       if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+       if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
                return;
        if (simple_strtoul(buf, NULL, 0) == 2) {
                err = __b44_readphy(bp, 0, MII_BMCR, &val);
index bf985c0..da5f439 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/mii.h>
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 
 static const struct bcma_device_id bgmac_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
                        bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
                                  ring->start);
                } else {
+                       /* Omit CRC. */
+                       len -= ETH_FCS_LEN;
+
                        new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
                        if (new_skb) {
                                skb_put(new_skb, len);
                                skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
                                                                 new_skb->data,
                                                                 len);
+                               skb_checksum_none_assert(skb);
                                new_skb->protocol =
                                        eth_type_trans(new_skb, bgmac->net_dev);
                                netif_receive_skb(new_skb);
@@ -908,7 +912,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
                             BGMAC_CHIPCTL_1_IF_TYPE_RMII;
                char buf[2];
 
-               if (nvram_getenv("et_swtype", buf, 1) > 0) {
+               if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
                        if (kstrtou8(buf, 0, &et_swtype))
                                bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
                                          buf);
@@ -1386,7 +1390,7 @@ static int bgmac_probe(struct bcma_device *core)
        }
 
        bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
-       if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+       if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
                bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
 
        /* TODO: reset the external phy. Specs are needed */
index ecac04a..a923bc4 100644 (file)
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
                tsum = ~csum_fold(csum_add((__force __wsum) csum,
                                  csum_partial(t_header, -fix, 0)));
 
-       return bswab16(csum);
+       return bswab16(tsum);
 }
 
 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
index 9a674b1..edfa67a 100644 (file)
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
                if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
                        cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+               if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+                       cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
        }
 
        cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                                ADVERTISED_10000baseKR_Full))
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+                       if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+                               bp->link_params.speed_cap_mask[cfg_idx] |=
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
                }
        } else { /* forced speed */
                /* advertise the requested speed and duplex if supported */
index 1663e0b..31c5787 100644 (file)
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x0);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant off.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x0);
+                       }
                }
                break;
        case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
                                         0x20);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Disable MI_INT interrupt before setting LED4
+                                * source to constant on.
+                                */
+                               if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                                          params->port*4) &
+                                   NIG_MASK_MI_INT) {
+                                       params->link_flags |=
+                                       LINK_FLAGS_INT_DISABLED;
+
+                                       bnx2x_bits_dis(
+                                               bp,
+                                               NIG_REG_MASK_INTERRUPT_PORT0 +
+                                               params->port*4,
+                                               NIG_MASK_MI_INT);
+                               }
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x20);
+                       }
                }
                break;
 
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LINK_SIGNAL,
                                         val);
+                       if (phy->type ==
+                           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+                               /* Restore LED4 source to external link,
+                                * and re-enable interrupts.
+                                */
+                               bnx2x_cl45_write(bp, phy,
+                                                MDIO_PMA_DEVAD,
+                                                MDIO_PMA_REG_8481_SIGNAL_MASK,
+                                                0x40);
+                               if (params->link_flags &
+                                   LINK_FLAGS_INT_DISABLED) {
+                                       bnx2x_link_int_enable(params);
+                                       params->link_flags &=
+                                               ~LINK_FLAGS_INT_DISABLED;
+                               }
+                       }
                }
                break;
        }
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                        phy->media_type = ETH_PHY_KR;
                        phy->flags |= FLAGS_WC_DUAL_MODE;
                        phy->supported &= (SUPPORTED_20000baseKR2_Full |
+                                          SUPPORTED_10000baseT_Full |
+                                          SUPPORTED_1000baseT_Full |
                                           SUPPORTED_Autoneg |
                                           SUPPORTED_FIBRE |
                                           SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
                struct bnx2x_phy *phy = &params->phy[INT_PHY];
                bnx2x_set_aer_mmd(params, phy);
                if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
-                   (phy->speed_cap_mask & SPEED_20000))
+                   (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
                        bnx2x_check_kr2_wa(params, vars, phy);
                bnx2x_check_over_curr(params, vars);
                if (vars->rx_tx_asic_rst)
index d25c7d7..be5c195 100644 (file)
@@ -307,7 +307,8 @@ struct link_params {
        struct bnx2x *bp;
        u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
                                req_flow_ctrl is set to AUTO */
-       u16 rsrv1;
+       u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED                (1<<0)
        u32 lfa_base;
 };
 
index c6c05bf..e707e31 100644 (file)
@@ -2347,7 +2347,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
        loff_t pos = *ppos;
-       loff_t avail = file->f_path.dentry->d_inode->i_size;
+       loff_t avail = file_inode(file)->i_size;
        unsigned int mem = (uintptr_t)file->private_data & 3;
        struct adapter *adap = file->private_data - mem;
 
index 28ceb84..29aff55 100644 (file)
@@ -349,6 +349,7 @@ struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
 
+       u8 __iomem *csr;        /* CSR BAR used only for BE2/3 */
        u8 __iomem *db;         /* Door Bell */
 
        struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
index 071aea7..3c9b4f1 100644 (file)
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
        return 0;
 }
 
-static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+static u16 be_POST_stage_get(struct be_adapter *adapter)
 {
        u32 sem;
-       u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
-                                         SLIPORT_SEMAPHORE_OFFSET_BE;
 
-       pci_read_config_dword(adapter->pdev, reg, &sem);
-       *stage = sem & POST_STAGE_MASK;
-
-       if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
-               return -1;
+       if (BEx_chip(adapter))
+               sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
        else
-               return 0;
+               pci_read_config_dword(adapter->pdev,
+                                     SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+
+       return sem & POST_STAGE_MASK;
 }
 
 int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
        }
 
        do {
-               status = be_POST_stage_get(adapter, &stage);
-               if (status) {
-                       dev_err(dev, "POST error; stage=0x%x\n", stage);
-                       return -1;
-               } else if (stage != POST_STAGE_ARMFW_RDY) {
-                       if (msleep_interruptible(2000)) {
-                               dev_err(dev, "Waiting for POST aborted\n");
-                               return -EINTR;
-                       }
-                       timeout += 2;
-               } else {
+               stage = be_POST_stage_get(adapter);
+               if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
+
+               dev_info(dev, "Waiting for POST, %ds elapsed\n",
+                        timeout);
+               if (msleep_interruptible(2000)) {
+                       dev_err(dev, "Waiting for POST aborted\n");
+                       return -EINTR;
                }
+               timeout += 2;
        } while (timeout < 60);
 
        dev_err(dev, "POST timeout; stage=0x%x\n", stage);
index 541d453..62dc220 100644 (file)
@@ -32,8 +32,8 @@
 #define MPU_EP_CONTROL                 0
 
 /********** MPU semphore: used for SH & BE  *************/
-#define SLIPORT_SEMAPHORE_OFFSET_BE            0x7c
-#define SLIPORT_SEMAPHORE_OFFSET_SH            0x94
+#define SLIPORT_SEMAPHORE_OFFSET_BEx           0xac  /* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH            0x94  /* PCI-CFG offset */
 #define POST_STAGE_MASK                                0x0000FFFF
 #define POST_ERR_MASK                          0x1
 #define POST_ERR_SHIFT                         31
index 3860888..08e54f3 100644 (file)
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
 {
+       if (adapter->csr)
+               pci_iounmap(adapter->pdev, adapter->csr);
        if (adapter->db)
                pci_iounmap(adapter->pdev, adapter->db);
 }
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
        adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
                                SLI_INTF_IF_TYPE_SHIFT;
 
+       if (BEx_chip(adapter) && be_physfn(adapter)) {
+               adapter->csr = pci_iomap(adapter->pdev, 2, 0);
+               if (adapter->csr == NULL)
+                       return -ENOMEM;
+       }
+
        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
        if (addr == NULL)
                goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        pci_restore_state(pdev);
 
        /* Check if card is ok and fw is ready */
+       dev_info(&adapter->pdev->dev,
+                "Waiting for FW to be ready after EEH reset\n");
        status = be_fw_wait_ready(adapter);
        if (status)
                return PCI_ERS_RESULT_DISCONNECT;
index fccc3bf..069a155 100644 (file)
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct bufdesc *bdp;
        void *bufaddr;
        unsigned short  status;
-       unsigned long flags;
+       unsigned int index;
 
        if (!fep->link) {
                /* Link is down or autonegotiation is in progress. */
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock_irqsave(&fep->hw_lock, flags);
        /* Fill in a Tx ring entry */
        bdp = fep->cur_tx;
 
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                 * This should not happen, since ndev->tbusy should be set.
                 */
                printk("%s: tx queue full!.\n", ndev->name);
-               spin_unlock_irqrestore(&fep->hw_lock, flags);
                return NETDEV_TX_BUSY;
        }
 
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
         * 4-byte boundaries. Use bounce buffers to copy data
         * and get it aligned. Ugh.
         */
+       if (fep->bufdesc_ex)
+               index = (struct bufdesc_ex *)bdp -
+                       (struct bufdesc_ex *)fep->tx_bd_base;
+       else
+               index = bdp - fep->tx_bd_base;
+
        if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
-               unsigned int index;
-               if (fep->bufdesc_ex)
-                       index = (struct bufdesc_ex *)bdp -
-                               (struct bufdesc_ex *)fep->tx_bd_base;
-               else
-                       index = bdp - fep->tx_bd_base;
                memcpy(fep->tx_bounce[index], skb->data, skb->len);
                bufaddr = fep->tx_bounce[index];
        }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                swap_buffer(bufaddr, skb->len);
 
        /* Save skb pointer */
-       fep->tx_skbuff[fep->skb_cur] = skb;
-
-       ndev->stats.tx_bytes += skb->len;
-       fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+       fep->tx_skbuff[index] = skb;
 
        /* Push the data cache so the CPM does not get stale memory
         * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        ebdp->cbd_esc = BD_ENET_TX_INT;
                }
        }
-       /* Trigger transmission start */
-       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
        /* If this was the last BD in the ring, start at the beginning again. */
        if (status & BD_ENET_TX_WRAP)
                bdp = fep->tx_bd_base;
        else
                bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
 
-       if (bdp == fep->dirty_tx) {
-               fep->tx_full = 1;
+       fep->cur_tx = bdp;
+
+       if (fep->cur_tx == fep->dirty_tx)
                netif_stop_queue(ndev);
-       }
 
-       fep->cur_tx = bdp;
+       /* Trigger transmission start */
+       writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
        skb_tx_timestamp(skb);
 
-       spin_unlock_irqrestore(&fep->hw_lock, flags);
-
        return NETDEV_TX_OK;
 }
 
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
        fep->cur_rx = fep->rx_bd_base;
 
-       /* Reset SKB transmit buffers. */
-       fep->skb_cur = fep->skb_dirty = 0;
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
+       int     index = 0;
 
        fep = netdev_priv(ndev);
-       spin_lock(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
+       /* get next bdp of dirty_tx */
+       if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+               bdp = fep->tx_bd_base;
+       else
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
        while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-               if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+               /* current queue is empty */
+               if (bdp == fep->cur_tx)
                        break;
 
+               if (fep->bufdesc_ex)
+                       index = (struct bufdesc_ex *)bdp -
+                               (struct bufdesc_ex *)fep->tx_bd_base;
+               else
+                       index = bdp - fep->tx_bd_base;
+
                dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
                bdp->cbd_bufaddr = 0;
 
-               skb = fep->tx_skbuff[fep->skb_dirty];
+               skb = fep->tx_skbuff[index];
+
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                   BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[fep->skb_dirty] = NULL;
-               fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+               fep->tx_skbuff[index] = NULL;
+
+               fep->dirty_tx = bdp;
 
                /* Update pointer to next buffer descriptor to be transmitted */
                if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (fep->tx_full) {
-                       fep->tx_full = 0;
+               if (fep->dirty_tx != fep->cur_tx) {
                        if (netif_queue_stopped(ndev))
                                netif_wake_queue(ndev);
                }
        }
-       fep->dirty_tx = bdp;
-       spin_unlock(&fep->hw_lock);
+       return;
 }
 
 
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
                int_events = readl(fep->hwp + FEC_IEVENT);
                writel(int_events, fep->hwp + FEC_IEVENT);
 
-               if (int_events & FEC_ENET_RXF) {
+               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
                        ret = IRQ_HANDLED;
 
                        /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
                        }
                }
 
-               /* Transmit OK, or non-fatal error. Update the buffer
-                * descriptors. FEC handles all errors, we just discover
-                * them as part of the transmit process.
-                */
-               if (int_events & FEC_ENET_TXF) {
-                       ret = IRQ_HANDLED;
-                       fec_enet_tx(ndev);
-               }
-
                if (int_events & FEC_ENET_MII) {
                        ret = IRQ_HANDLED;
                        complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
        int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       fec_enet_tx(ndev);
+
        if (pkts < budget) {
                napi_complete(napi);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        /* ...and the same for transmit */
        bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
        for (i = 0; i < TX_RING_SIZE; i++) {
 
                /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
        /* Set the last buffer to wrap */
        bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
        bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
 
        fec_restart(ndev, 0);
 
index 01579b8..f539007 100644 (file)
@@ -97,6 +97,13 @@ struct bufdesc {
        unsigned short cbd_sc;  /* Control and status info */
        unsigned long cbd_bufaddr;      /* Buffer address */
 };
+#else
+struct bufdesc {
+       unsigned short  cbd_sc;                 /* Control and status info */
+       unsigned short  cbd_datlen;             /* Data length */
+       unsigned long   cbd_bufaddr;            /* Buffer address */
+};
+#endif
 
 struct bufdesc_ex {
        struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
        unsigned short res0[4];
 };
 
-#else
-struct bufdesc {
-       unsigned short  cbd_sc;                 /* Control and status info */
-       unsigned short  cbd_datlen;             /* Data length */
-       unsigned long   cbd_bufaddr;            /* Buffer address */
-};
-#endif
-
 /*
  *     The following definitions courtesy of commproc.h, which where
  *     Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
        unsigned char *tx_bounce[TX_RING_SIZE];
        struct  sk_buff *tx_skbuff[TX_RING_SIZE];
        struct  sk_buff *rx_skbuff[RX_RING_SIZE];
-       ushort  skb_cur;
-       ushort  skb_dirty;
 
        /* CPM dual port RAM relative addresses */
        dma_addr_t      bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
        /* The ring entries to be free()ed */
        struct bufdesc  *dirty_tx;
 
-       uint    tx_full;
        /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
        spinlock_t hw_lock;
 
index 2c18137..f91a8f3 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
 #include <linux/mdio.h>
+#include <linux/pm_runtime.h>
 
 #include "e1000.h"
 
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
        return 0;
 }
 
+static int e1000e_ethtool_begin(struct net_device *netdev)
+{
+       return pm_runtime_get_sync(netdev->dev.parent);
+}
+
+static void e1000e_ethtool_complete(struct net_device *netdev)
+{
+       pm_runtime_put_sync(netdev->dev.parent);
+}
+
 static const struct ethtool_ops e1000_ethtool_ops = {
+       .begin                  = e1000e_ethtool_begin,
+       .complete               = e1000e_ethtool_complete,
        .get_settings           = e1000_get_settings,
        .set_settings           = e1000_set_settings,
        .get_drvinfo            = e1000_get_drvinfo,
index dff7bff..121a865 100644 (file)
@@ -781,6 +781,59 @@ release:
        return ret_val;
 }
 
+/**
+ *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
+ *  preventing further DMA write requests.  Workaround the issue by disabling
+ *  the de-assertion of the clock request when in 1Gpbs mode.
+ **/
+static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
+{
+       u32 fextnvm6 = er32(FEXTNVM6);
+       s32 ret_val = 0;
+
+       if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
+               u16 kmrn_reg;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val =
+                   e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
+                                               &kmrn_reg);
+               if (ret_val)
+                       goto release;
+
+               ret_val =
+                   e1000e_write_kmrn_reg_locked(hw,
+                                                E1000_KMRNCTRLSTA_K1_CONFIG,
+                                                kmrn_reg &
+                                                ~E1000_KMRNCTRLSTA_K1_ENABLE);
+               if (ret_val)
+                       goto release;
+
+               usleep_range(10, 20);
+
+               ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
+
+               ret_val =
+                   e1000e_write_kmrn_reg_locked(hw,
+                                                E1000_KMRNCTRLSTA_K1_CONFIG,
+                                                kmrn_reg);
+release:
+               hw->phy.ops.release(hw);
+       } else {
+               /* clear FEXTNVM6 bit 8 on link down or 10/100 */
+               ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+       }
+
+       return ret_val;
+}
+
 /**
  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  *  @hw: pointer to the HW structure
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
+       /* Work-around I218 hang issue */
+       if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+               ret_val = e1000_k1_workaround_lpt_lp(hw, link);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Clear link partner's EEE ability */
        hw->dev_spec.ich8lan.eee_lp_ability = 0;
 
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 
        phy_ctrl = er32(PHY_CTRL);
        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+
        if (hw->phy.type == e1000_phy_i217) {
-               u16 phy_reg;
+               u16 phy_reg, device_id = hw->adapter->pdev->device;
+
+               if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
+                   (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+                       u32 fextnvm6 = er32(FEXTNVM6);
+
+                       ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+               }
 
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
index b6d3174..8bf4655 100644 (file)
@@ -92,6 +92,8 @@
 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
 
+#define E1000_FEXTNVM6_REQ_PLL_CLK     0x00000100
+
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
index a177b8b..948b86f 100644 (file)
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
        netif_start_queue(netdev);
 
        adapter->idle_check = true;
+       hw->mac.get_link_status = true;
        pm_runtime_put(&pdev->dev);
 
        /* fire a link status change interrupt to start the watchdog */
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
                int ret_val;
 
+               pm_runtime_get_sync(&adapter->pdev->dev);
                ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
                ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
                ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
                ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
                if (ret_val)
                        e_warn("Error reading PHY register\n");
+               pm_runtime_put_sync(&adapter->pdev->dev);
        } else {
                /* Do not read PHY registers if link is not up
                 * Set values to typical power-on defaults
@@ -5887,8 +5890,7 @@ release:
        return retval;
 }
 
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
-                           bool runtime)
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
        }
        e1000e_reset_interrupt_capability(adapter);
 
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-
        status = er32(STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
                ew32(WUFC, 0);
        }
 
-       *enable_wake = !!wufc;
-
-       /* make sure adapter isn't asleep if manageability is enabled */
-       if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
-           (hw->mac.ops.check_mng_mode(hw)))
-               *enable_wake = true;
-
        if (adapter->hw.phy.type == e1000_phy_igp_3)
                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
 
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
         */
        e1000e_release_hw_control(adapter);
 
-       pci_disable_device(pdev);
-
-       return 0;
-}
-
-static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
-{
-       if (sleep && wake) {
-               pci_prepare_to_sleep(pdev);
-               return;
-       }
-
-       pci_wake_from_d3(pdev, wake);
-       pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
-                                    bool wake)
-{
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct e1000_adapter *adapter = netdev_priv(netdev);
+       pci_clear_master(pdev);
 
        /* The pci-e switch on some quad port adapters will report a
         * correctable error when the MAC transitions from D0 to D3.  To
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
                                           (devctl & ~PCI_EXP_DEVCTL_CERE));
 
-               e1000_power_off(pdev, sleep, wake);
+               pci_save_state(pdev);
+               pci_prepare_to_sleep(pdev);
 
                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
-       } else {
-               e1000_power_off(pdev, sleep, wake);
        }
+
+       return 0;
 }
 
 #ifdef CONFIG_PCIEASPM
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        if (aspm_disable_flag)
                e1000e_disable_aspm(pdev, aspm_disable_flag);
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       pci_save_state(pdev);
+       pci_set_master(pdev);
 
        e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
 static int e1000_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       int retval;
-       bool wake;
-
-       retval = __e1000_shutdown(pdev, &wake, false);
-       if (!retval)
-               e1000_complete_shutdown(pdev, true, wake);
 
-       return retval;
+       return __e1000_shutdown(pdev, false);
 }
 
 static int e1000_resume(struct device *dev)
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       if (e1000e_pm_ready(adapter)) {
-               bool wake;
-
-               __e1000_shutdown(pdev, &wake, true);
-       }
+       if (!e1000e_pm_ready(adapter))
+               return 0;
 
-       return 0;
+       return __e1000_shutdown(pdev, true);
 }
 
 static int e1000_idle(struct device *dev)
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
-       bool wake = false;
-
-       __e1000_shutdown(pdev, &wake, false);
-
-       if (system_state == SYSTEM_POWER_OFF)
-               e1000_complete_shutdown(pdev, false, wake);
+       __e1000_shutdown(pdev, false);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
                        "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
-               pci_set_master(pdev);
                pdev->state_saved = true;
                pci_restore_state(pdev);
+               pci_set_master(pdev);
 
                pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* initialize the wol settings based on the eeprom settings */
        adapter->wol = adapter->eeprom_wol;
-       device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+       /* make sure adapter isn't asleep if manageability is enabled */
+       if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
+           (hw->mac.ops.check_mng_mode(hw)))
+               device_wakeup_enable(&pdev->dev);
 
        /* save off EEPROM version number */
        e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
index 794fe14..a7e6a3e 100644 (file)
@@ -42,6 +42,7 @@
 #define E1000_FEXTNVM  0x00028 /* Future Extended NVM - RW */
 #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
index 84e7e09..b64542a 100644 (file)
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
        switch (hw->phy.type) {
        case e1000_phy_i210:
        case e1000_phy_m88:
-               if (hw->phy.id == I347AT4_E_PHY_ID ||
-                   hw->phy.id == M88E1112_E_PHY_ID)
+               switch (hw->phy.id) {
+               case I347AT4_E_PHY_ID:
+               case M88E1112_E_PHY_ID:
+               case I210_I_PHY_ID:
                        ret_val = igb_copper_link_setup_m88_gen2(hw);
-               else
+                       break;
+               default:
                        ret_val = igb_copper_link_setup_m88(hw);
+                       break;
+               }
                break;
        case e1000_phy_igp_3:
                ret_val = igb_copper_link_setup_igp(hw);
index d27edbc..2515140 100644 (file)
@@ -447,7 +447,7 @@ struct igb_adapter {
 #endif
        struct i2c_algo_bit_data i2c_algo;
        struct i2c_adapter i2c_adap;
-       struct igb_i2c_client_list *i2c_clients;
+       struct i2c_client *i2c_client;
 };
 
 #define IGB_FLAG_HAS_MSI               (1 << 0)
index 0a9b073..4623502 100644 (file)
 #include <linux/pci.h>
 
 #ifdef CONFIG_IGB_HWMON
+struct i2c_board_info i350_sensor_info = {
+       I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
 /* hwmon callback functions */
 static ssize_t igb_hwmon_show_location(struct device *dev,
                                         struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
        unsigned int i;
        int n_attrs;
        int rc = 0;
+       struct i2c_client *client = NULL;
 
        /* If this method isn't defined we don't support thermals */
        if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
                if (rc)
                        goto exit;
 
+       /* init i2c_client */
+       client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+       if (client == NULL) {
+               dev_info(&adapter->pdev->dev,
+                       "Failed to create new i2c device..\n");
+               goto exit;
+       }
+       adapter->i2c_client = client;
+
        /* Allocation space for max attributes
         * max num sensors * values (loc, temp, max, caution)
         */
index ed79a1c..4dbd629 100644 (file)
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
        return;
 }
 
-static const struct i2c_board_info i350_sensor_info = {
-       I2C_BOARD_INFO("i350bb", 0Xf8),
-};
-
 /*  igb_init_i2c - Init I2C interface
  *  @adapter: pointer to adapter structure
  *
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
        /* If we spanned a buffer we have a huge mess so test for it */
        BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
 
-       /* Guarantee this function can be used by verifying buffer sizes */
-       BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
-                                                       NET_IP_ALIGN +
-                                                       IGB_TS_HDR_LEN +
-                                                       ETH_FRAME_LEN +
-                                                       ETH_FCS_LEN));
-
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
        page = rx_buffer->page;
        prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
        }
 }
 
-static DEFINE_SPINLOCK(i2c_clients_lock);
-
-/*  igb_get_i2c_client - returns matching client
- *  in adapters's client list.
- *  @adapter: adapter struct
- *  @dev_addr: device address of i2c needed.
- */
-static struct i2c_client *
-igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
-{
-       ulong flags;
-       struct igb_i2c_client_list *client_list;
-       struct i2c_client *client = NULL;
-       struct i2c_board_info client_info = {
-               I2C_BOARD_INFO("igb", 0x00),
-       };
-
-       spin_lock_irqsave(&i2c_clients_lock, flags);
-       client_list = adapter->i2c_clients;
-
-       /* See if we already have an i2c_client */
-       while (client_list) {
-               if (client_list->client->addr == (dev_addr >> 1)) {
-                       client = client_list->client;
-                       goto exit;
-               } else {
-                       client_list = client_list->next;
-               }
-       }
-
-       /* no client_list found, create a new one */
-       client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
-       if (client_list == NULL)
-               goto exit;
-
-       /* dev_addr passed to us is left-shifted by 1 bit
-        * i2c_new_device call expects it to be flush to the right.
-        */
-       client_info.addr = dev_addr >> 1;
-       client_info.platform_data = adapter;
-       client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
-       if (client_list->client == NULL) {
-               dev_info(&adapter->pdev->dev,
-                       "Failed to create new i2c device..\n");
-               goto err_no_client;
-       }
-
-       /* insert new client at head of list */
-       client_list->next = adapter->i2c_clients;
-       adapter->i2c_clients = client_list;
-
-       client = client_list->client;
-       goto exit;
-
-err_no_client:
-       kfree(client_list);
-exit:
-       spin_unlock_irqrestore(&i2c_clients_lock, flags);
-       return client;
-}
-
 /*  igb_read_i2c_byte - Reads 8 bit word over I2C
  *  @hw: pointer to hardware structure
  *  @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                                u8 dev_addr, u8 *data)
 {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
-       struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+       struct i2c_client *this_client = adapter->i2c_client;
        s32 status;
        u16 swfw_mask = 0;
 
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                                 u8 dev_addr, u8 data)
 {
        struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
-       struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+       struct i2c_client *this_client = adapter->i2c_client;
        s32 status;
        u16 swfw_mask = E1000_SWFW_PHY0_SM;
 
index f4d2e9e..c3f1afd 100644 (file)
@@ -2197,13 +2197,13 @@ static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
        union ixgbe_atr_input *mask = &adapter->fdir_mask;
        struct ethtool_rx_flow_spec *fsp =
                (struct ethtool_rx_flow_spec *)&cmd->fs;
-       struct hlist_node *node, *node2;
+       struct hlist_node *node2;
        struct ixgbe_fdir_filter *rule = NULL;
 
        /* report total rule count */
        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
 
-       hlist_for_each_entry_safe(rule, node, node2,
+       hlist_for_each_entry_safe(rule, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
                if (fsp->location <= rule->sw_idx)
                        break;
@@ -2264,14 +2264,14 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
                                      struct ethtool_rxnfc *cmd,
                                      u32 *rule_locs)
 {
-       struct hlist_node *node, *node2;
+       struct hlist_node *node2;
        struct ixgbe_fdir_filter *rule;
        int cnt = 0;
 
        /* report total rule count */
        cmd->data = (1024 << adapter->fdir_pballoc) - 2;
 
-       hlist_for_each_entry_safe(rule, node, node2,
+       hlist_for_each_entry_safe(rule, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
                if (cnt == cmd->rule_cnt)
                        return -EMSGSIZE;
@@ -2358,19 +2358,19 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
                                           u16 sw_idx)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct hlist_node *node, *node2, *parent;
-       struct ixgbe_fdir_filter *rule;
+       struct hlist_node *node2;
+       struct ixgbe_fdir_filter *rule, *parent;
        int err = -EINVAL;
 
        parent = NULL;
        rule = NULL;
 
-       hlist_for_each_entry_safe(rule, node, node2,
+       hlist_for_each_entry_safe(rule, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
                /* hash found, or no matching entry */
                if (rule->sw_idx >= sw_idx)
                        break;
-               parent = node;
+               parent = rule;
        }
 
        /* if there is an old rule occupying our place remove it */
@@ -2399,7 +2399,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 
        /* add filter to the list */
        if (parent)
-               hlist_add_after(parent, &input->fdir_node);
+               hlist_add_after(&parent->fdir_node, &input->fdir_node);
        else
                hlist_add_head(&input->fdir_node,
                               &adapter->fdir_filter_list);
index 68478d6..db5611a 100644 (file)
@@ -3891,7 +3891,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct hlist_node *node, *node2;
+       struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
 
        spin_lock(&adapter->fdir_perfect_lock);
@@ -3899,7 +3899,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
        if (!hlist_empty(&adapter->fdir_filter_list))
                ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
 
-       hlist_for_each_entry_safe(filter, node, node2,
+       hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
                ixgbe_fdir_write_perfect_filter_82599(hw,
                                &filter->filter,
@@ -4356,12 +4356,12 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 
 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
 {
-       struct hlist_node *node, *node2;
+       struct hlist_node *node2;
        struct ixgbe_fdir_filter *filter;
 
        spin_lock(&adapter->fdir_perfect_lock);
 
-       hlist_for_each_entry_safe(filter, node, node2,
+       hlist_for_each_entry_safe(filter, node2,
                                  &adapter->fdir_filter_list, fdir_node) {
                hlist_del(&filter->fdir_node);
                kfree(filter);
index 5385474..bb4d8d9 100644 (file)
@@ -225,11 +225,10 @@ static inline struct mlx4_en_filter *
 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
                    __be16 src_port, __be16 dst_port)
 {
-       struct hlist_node *elem;
        struct mlx4_en_filter *filter;
        struct mlx4_en_filter *ret = NULL;
 
-       hlist_for_each_entry(filter, elem,
+       hlist_for_each_entry(filter,
                             filter_hash_bucket(priv, src_ip, dst_ip,
                                                src_port, dst_port),
                             filter_chain) {
@@ -574,13 +573,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
 
        if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
                struct mlx4_mac_entry *entry;
-               struct hlist_node *n, *tmp;
+               struct hlist_node *tmp;
                struct hlist_head *bucket;
                unsigned int mac_hash;
 
                mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
                bucket = &priv->mac_hash[mac_hash];
-               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+               hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
                        if (ether_addr_equal_64bits(entry->mac,
                                                    priv->dev->dev_addr)) {
                                en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
@@ -609,11 +608,11 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
                struct hlist_head *bucket;
                unsigned int mac_hash;
                struct mlx4_mac_entry *entry;
-               struct hlist_node *n, *tmp;
+               struct hlist_node *tmp;
                u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
 
                bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
-               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+               hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
                        if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
                                mlx4_en_uc_steer_release(priv, entry->mac,
                                                         qpn, entry->reg_id);
@@ -1019,7 +1018,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
 {
        struct netdev_hw_addr *ha;
        struct mlx4_mac_entry *entry;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        bool found;
        u64 mac;
        int err = 0;
@@ -1035,7 +1034,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
        /* find what to remove */
        for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
                bucket = &priv->mac_hash[i];
-               hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+               hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
                        found = false;
                        netdev_for_each_uc_addr(ha, dev) {
                                if (ether_addr_equal_64bits(entry->mac,
@@ -1078,7 +1077,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
        netdev_for_each_uc_addr(ha, dev) {
                found = false;
                bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
-               hlist_for_each_entry(entry, n, bucket, hlist) {
+               hlist_for_each_entry(entry, bucket, hlist) {
                        if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
                                found = true;
                                break;
index ce38654..c7f8563 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/mlx4/qp.h>
 #include <linux/skbuff.h>
+#include <linux/rculist.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
@@ -617,7 +618,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 
                        if (is_multicast_ether_addr(ethh->h_dest)) {
                                struct mlx4_mac_entry *entry;
-                               struct hlist_node *n;
                                struct hlist_head *bucket;
                                unsigned int mac_hash;
 
@@ -625,7 +625,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
                                bucket = &priv->mac_hash[mac_hash];
                                rcu_read_lock();
-                               hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+                               hlist_for_each_entry_rcu(entry, bucket, hlist) {
                                        if (ether_addr_equal_64bits(entry->mac,
                                                                    ethh->h_source)) {
                                                rcu_read_unlock();
index 325e11e..f89cc7a 100644 (file)
@@ -576,7 +576,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
 void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_filter *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
+       struct hlist_node *n;
        struct hlist_head *head;
        int i;
        unsigned long time;
@@ -584,7 +584,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < adapter->fhash.fbucket_size; i++) {
                head = &(adapter->fhash.fhead[i]);
-               hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
                                                  QLCNIC_MAC_DEL;
                        time = tmp_fil->ftime;
@@ -604,7 +604,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
        for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
                head = &(adapter->rx_fhash.fhead[i]);
 
-               hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
                {
                        time = tmp_fil->ftime;
                        if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
@@ -621,14 +621,14 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_filter *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
+       struct hlist_node *n;
        struct hlist_head *head;
        int i;
        u8 cmd;
 
        for (i = 0; i < adapter->fhash.fbucket_size; i++) {
                head = &(adapter->fhash.fhead[i]);
-               hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
                                                  QLCNIC_MAC_DEL;
                        qlcnic_sre_macaddr_change(adapter,
index 6387e0c..0e63006 100644 (file)
@@ -162,7 +162,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
 {
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
        struct qlcnic_filter *fil, *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
+       struct hlist_node *n;
        struct hlist_head *head;
        unsigned long time;
        u64 src_addr = 0;
@@ -179,7 +179,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
                         (adapter->fhash.fbucket_size - 1);
                head = &(adapter->rx_fhash.fhead[hindex]);
 
-               hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
                            tmp_fil->vlan_id == vlan_id) {
                                time = tmp_fil->ftime;
@@ -205,7 +205,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
                         (adapter->fhash.fbucket_size - 1);
                head = &(adapter->rx_fhash.fhead[hindex]);
                spin_lock(&adapter->rx_mac_learn_lock);
-               hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+               hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
                            tmp_fil->vlan_id == vlan_id) {
                                found = 1;
@@ -272,7 +272,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct sk_buff *skb)
 {
        struct qlcnic_filter *fil, *tmp_fil;
-       struct hlist_node *tmp_hnode, *n;
+       struct hlist_node *n;
        struct hlist_head *head;
        struct net_device *netdev = adapter->netdev;
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -294,7 +294,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
        head = &(adapter->fhash.fhead[hindex]);
 
-       hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+       hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
index 8900398..28fb50a 100644 (file)
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 
-       rtl_tx_performance_tweak(pdev,
-               (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+       if (tp->dev->mtu <= ETH_DATA_LEN) {
+               rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
+                                        PCI_EXP_DEVCTL_NOSNOOP_EN);
+       }
 }
 
 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_disable_clock_request(pdev);
 
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 
        rtl_csi_access_enable_1(tp);
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        RTL_W8(MaxTxPacketSize, TxPacketMax);
 
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
-       rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+       if (tp->dev->mtu <= ETH_DATA_LEN)
+               rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
index 50247df..d2f790d 100644 (file)
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
         * TX scheduler is stopped when we're done and before
         * netif_device_present() becomes false.
         */
-       netif_tx_lock(dev);
+       netif_tx_lock_bh(dev);
        netif_device_detach(dev);
-       netif_tx_unlock(dev);
+       netif_tx_unlock_bh(dev);
 }
 
 #endif /* EFX_EFX_H */
index 879ff58..bb579a6 100644 (file)
@@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
                rx_buf = efx_rx_buffer(rx_queue, index);
                rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
                rx_buf->u.page = page;
-               rx_buf->page_offset = page_offset;
+               rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
                rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
                rx_buf->flags = EFX_RX_BUF_PAGE;
                ++rx_queue->added_count;
index 289b4ee..1df0ff3 100644 (file)
@@ -614,10 +614,9 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
        unsigned int hash = vnet_hashfn(skb->data);
        struct hlist_head *hp = &vp->port_hash[hash];
-       struct hlist_node *n;
        struct vnet_port *port;
 
-       hlist_for_each_entry(port, n, hp, hash) {
+       hlist_for_each_entry(port, hp, hash) {
                if (ether_addr_equal(port->raddr, skb->data))
                        return port;
        }
index defcd8a..417b2af 100644 (file)
@@ -55,9 +55,8 @@ static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
                                               const unsigned char *addr)
 {
        struct macvlan_dev *vlan;
-       struct hlist_node *n;
 
-       hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
+       hlist_for_each_entry_rcu(vlan, &port->vlan_hash[addr[5]], hlist) {
                if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
                        return vlan;
        }
@@ -149,7 +148,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
 {
        const struct ethhdr *eth = eth_hdr(skb);
        const struct macvlan_dev *vlan;
-       struct hlist_node *n;
        struct sk_buff *nskb;
        unsigned int i;
        int err;
@@ -159,7 +157,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
                return;
 
        for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
-               hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
+               hlist_for_each_entry_rcu(vlan, &port->vlan_hash[i], hlist) {
                        if (vlan->dev == src || !(vlan->mode & mode))
                                continue;
 
index 9724301..a449439 100644 (file)
@@ -279,28 +279,17 @@ static int macvtap_receive(struct sk_buff *skb)
 static int macvtap_get_minor(struct macvlan_dev *vlan)
 {
        int retval = -ENOMEM;
-       int id;
 
        mutex_lock(&minor_lock);
-       if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
-               goto exit;
-
-       retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
-       if (retval < 0) {
-               if (retval == -EAGAIN)
-                       retval = -ENOMEM;
-               goto exit;
-       }
-       if (id < MACVTAP_NUM_DEVS) {
-               vlan->minor = id;
-       } else {
+       retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
+       if (retval >= 0) {
+               vlan->minor = retval;
+       } else if (retval == -ENOSPC) {
                printk(KERN_ERR "too many macvtap devices\n");
                retval = -EINVAL;
-               idr_remove(&minor_idr, id);
        }
-exit:
        mutex_unlock(&minor_lock);
-       return retval;
+       return retval < 0 ? retval : 0;
 }
 
 static void macvtap_free_minor(struct macvlan_dev *vlan)
index 2993444..abf7b61 100644 (file)
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
        .name           = "Micrel KSZ9021 Gigabit PHY",
-       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
-                               | SUPPORTED_Asym_Pause),
+       .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
index 9930f99..3657b4a 100644 (file)
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
 
 void phy_device_free(struct phy_device *phydev)
 {
-       kfree(phydev);
+       put_device(&phydev->dev);
 }
 EXPORT_SYMBOL(phy_device_free);
 
 static void phy_device_release(struct device *dev)
 {
-       phy_device_free(to_phy_device(dev));
+       kfree(to_phy_device(dev));
 }
 
 static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
           there's no driver _already_ loaded. */
        request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
 
+       device_initialize(&dev->dev);
+
        return dev;
 }
 EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
        /* Run all of the fixups for this PHY */
        phy_scan_fixups(phydev);
 
-       err = device_register(&phydev->dev);
+       err = device_add(&phydev->dev);
        if (err) {
-               pr_err("phy %d failed to register\n", phydev->addr);
+               pr_err("PHY %d failed to add\n", phydev->addr);
                goto out;
        }
 
index 3db9131..72ff14b 100644 (file)
@@ -2953,46 +2953,21 @@ static void __exit ppp_cleanup(void)
  * by holding all_ppp_mutex
  */
 
-static int __unit_alloc(struct idr *p, void *ptr, int n)
-{
-       int unit, err;
-
-again:
-       if (!idr_pre_get(p, GFP_KERNEL)) {
-               pr_err("PPP: No free memory for idr\n");
-               return -ENOMEM;
-       }
-
-       err = idr_get_new_above(p, ptr, n, &unit);
-       if (err < 0) {
-               if (err == -EAGAIN)
-                       goto again;
-               return err;
-       }
-
-       return unit;
-}
-
 /* associate pointer with specified number */
 static int unit_set(struct idr *p, void *ptr, int n)
 {
        int unit;
 
-       unit = __unit_alloc(p, ptr, n);
-       if (unit < 0)
-               return unit;
-       else if (unit != n) {
-               idr_remove(p, unit);
-               return -EINVAL;
-       }
-
+       unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL);
+       if (unit == -ENOSPC)
+               unit = -EINVAL;
        return unit;
 }
 
 /* get new free unit number and associate pointer with it */
 static int unit_get(struct idr *p, void *ptr)
 {
-       return __unit_alloc(p, ptr, 0);
+       return idr_alloc(p, ptr, 0, 0, GFP_KERNEL);
 }
 
 /* put unit number back to a pool */
index b6f45c5..b7c457a 100644 (file)
@@ -197,9 +197,8 @@ static inline u32 tun_hashfn(u32 rxhash)
 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 {
        struct tun_flow_entry *e;
-       struct hlist_node *n;
 
-       hlist_for_each_entry_rcu(e, n, head, hash_link) {
+       hlist_for_each_entry_rcu(e, head, hash_link) {
                if (e->rxhash == rxhash)
                        return e;
        }
@@ -241,9 +240,9 @@ static void tun_flow_flush(struct tun_struct *tun)
        spin_lock_bh(&tun->lock);
        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
                struct tun_flow_entry *e;
-               struct hlist_node *h, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+               hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
                        tun_flow_delete(tun, e);
        }
        spin_unlock_bh(&tun->lock);
@@ -256,9 +255,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
        spin_lock_bh(&tun->lock);
        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
                struct tun_flow_entry *e;
-               struct hlist_node *h, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+               hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
                        if (e->queue_index == queue_index)
                                tun_flow_delete(tun, e);
                }
@@ -279,9 +278,9 @@ static void tun_flow_cleanup(unsigned long data)
        spin_lock_bh(&tun->lock);
        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
                struct tun_flow_entry *e;
-               struct hlist_node *h, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+               hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
                        unsigned long this_timer;
                        count++;
                        this_timer = e->updated + delay;
@@ -748,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
                goto drop;
        skb_orphan(skb);
 
+       nf_reset(skb);
+
        /* Enqueue packet */
        skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
 
index da92ed3..3b6e9b8 100644 (file)
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
          This driver creates an interface named "ethX", where X depends on
          what other networking devices you have in use.
 
+config USB_NET_AX88179_178A
+       tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
+       depends on USB_USBNET
+       select CRC32
+       select PHYLIB
+       default y
+       help
+         This option adds support for ASIX AX88179 based USB 3.0/2.0
+         to Gigabit Ethernet adapters.
+
+         This driver should work with at least the following devices:
+           * ASIX AX88179
+           * ASIX AX88178A
+           * Sitcomm LN-032
+
+         This driver creates an interface named "ethX", where X depends on
+         what other networking devices you have in use.
+
 config USB_NET_CDCETHER
        tristate "CDC Ethernet support (smart devices such as cable modems)"
        depends on USB_USBNET
index 4786913..119b06c 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150)       += rtl8150.o
 obj-$(CONFIG_USB_HSO)          += hso.o
 obj-$(CONFIG_USB_NET_AX8817X)  += asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
+obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
 obj-$(CONFIG_USB_NET_CDC_EEM)  += cdc_eem.o
 obj-$(CONFIG_USB_NET_DM9601)   += dm9601.o
index 2205dbc..7097534 100644 (file)
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
        .tx_fixup = asix_tx_fixup,
 };
 
+/*
+ * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
+ * no-name packaging.
+ * USB device strings are:
+ *   1: Manufacturer: USBLINK
+ *   2: Product: HG20F9 USB2.0
+ *   3: Serial: 000003
+ * Appears to be compatible with Asix 88772B.
+ */
+static const struct driver_info hg20f9_info = {
+       .description = "HG20F9 USB 2.0 Ethernet",
+       .bind = ax88772_bind,
+       .unbind = ax88772_unbind,
+       .status = asix_status,
+       .link_reset = ax88772_link_reset,
+       .reset = ax88772_reset,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+                FLAG_MULTI_PACKET,
+       .rx_fixup = asix_rx_fixup_common,
+       .tx_fixup = asix_tx_fixup,
+       .data = FLAG_EEPROM_MAC,
+};
+
 extern const struct driver_info ax88172a_info;
 
 static const struct usb_device_id      products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id        products [] = {
        /* ASIX 88172a demo board */
        USB_DEVICE(0x0b95, 0x172a),
        .driver_info = (unsigned long) &ax88172a_info,
+}, {
+       /*
+        * USBLINK HG20F9 "USB 2.0 LAN"
+        * Appears to have gazumped Linksys's manufacturer ID but
+        * doesn't (yet) conflict with any known Linksys product.
+        */
+       USB_DEVICE(0x066b, 0x20f9),
+       .driver_info = (unsigned long) &hg20f9_info,
 },
        { },            // END
 };
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644 (file)
index 0000000..71c27d8
--- /dev/null
@@ -0,0 +1,1448 @@
+/*
+ * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
+ *
+ * Copyright (C) 2011-2013 ASIX
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#define AX88179_PHY_ID                         0x03
+#define AX_EEPROM_LEN                          0x100
+#define AX88179_EEPROM_MAGIC                   0x17900b95
+#define AX_MCAST_FLTSIZE                       8
+#define AX_MAX_MCAST                           64
+#define AX_INT_PPLS_LINK                       ((u32)BIT(16))
+#define AX_RXHDR_L4_TYPE_MASK                  0x1c
+#define AX_RXHDR_L4_TYPE_UDP                   4
+#define AX_RXHDR_L4_TYPE_TCP                   16
+#define AX_RXHDR_L3CSUM_ERR                    2
+#define AX_RXHDR_L4CSUM_ERR                    1
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_ACCESS_MAC                          0x01
+#define AX_ACCESS_PHY                          0x02
+#define AX_ACCESS_EEPROM                       0x04
+#define AX_ACCESS_EFUS                         0x05
+#define AX_PAUSE_WATERLVL_HIGH                 0x54
+#define AX_PAUSE_WATERLVL_LOW                  0x55
+
+#define PHYSICAL_LINK_STATUS                   0x02
+       #define AX_USB_SS               0x04
+       #define AX_USB_HS               0x02
+
+#define GENERAL_STATUS                         0x03
+/* Check AX88179 version. UA1:Bit2 = 0,  UA2:Bit2 = 1 */
+       #define AX_SECLD                0x04
+
+#define AX_SROM_ADDR                           0x07
+#define AX_SROM_CMD                            0x0a
+       #define EEP_RD                  0x04
+       #define EEP_BUSY                0x10
+
+#define AX_SROM_DATA_LOW                       0x08
+#define AX_SROM_DATA_HIGH                      0x09
+
+#define AX_RX_CTL                              0x0b
+       #define AX_RX_CTL_DROPCRCERR    0x0100
+       #define AX_RX_CTL_IPE           0x0200
+       #define AX_RX_CTL_START         0x0080
+       #define AX_RX_CTL_AP            0x0020
+       #define AX_RX_CTL_AM            0x0010
+       #define AX_RX_CTL_AB            0x0008
+       #define AX_RX_CTL_AMALL         0x0002
+       #define AX_RX_CTL_PRO           0x0001
+       #define AX_RX_CTL_STOP          0x0000
+
+#define AX_NODE_ID                             0x10
+#define AX_MULFLTARY                           0x16
+
+#define AX_MEDIUM_STATUS_MODE                  0x22
+       #define AX_MEDIUM_GIGAMODE      0x01
+       #define AX_MEDIUM_FULL_DUPLEX   0x02
+       #define AX_MEDIUM_ALWAYS_ONE    0x04
+       #define AX_MEDIUM_EN_125MHZ     0x08
+       #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
+       #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
+       #define AX_MEDIUM_RECEIVE_EN    0x100
+       #define AX_MEDIUM_PS            0x200
+       #define AX_MEDIUM_JUMBO_EN      0x8040
+
+#define AX_MONITOR_MOD                         0x24
+       #define AX_MONITOR_MODE_RWLC    0x02
+       #define AX_MONITOR_MODE_RWMP    0x04
+       #define AX_MONITOR_MODE_PMEPOL  0x20
+       #define AX_MONITOR_MODE_PMETYPE 0x40
+
+#define AX_GPIO_CTRL                           0x25
+       #define AX_GPIO_CTRL_GPIO3EN    0x80
+       #define AX_GPIO_CTRL_GPIO2EN    0x40
+       #define AX_GPIO_CTRL_GPIO1EN    0x20
+
+#define AX_PHYPWR_RSTCTL                       0x26
+       #define AX_PHYPWR_RSTCTL_BZ     0x0010
+       #define AX_PHYPWR_RSTCTL_IPRL   0x0020
+       #define AX_PHYPWR_RSTCTL_AT     0x1000
+
+#define AX_RX_BULKIN_QCTRL                     0x2e
+#define AX_CLK_SELECT                          0x33
+       #define AX_CLK_SELECT_BCS       0x01
+       #define AX_CLK_SELECT_ACS       0x02
+       #define AX_CLK_SELECT_ULR       0x08
+
+#define AX_RXCOE_CTL                           0x34
+       #define AX_RXCOE_IP             0x01
+       #define AX_RXCOE_TCP            0x02
+       #define AX_RXCOE_UDP            0x04
+       #define AX_RXCOE_TCPV6          0x20
+       #define AX_RXCOE_UDPV6          0x40
+
+#define AX_TXCOE_CTL                           0x35
+       #define AX_TXCOE_IP             0x01
+       #define AX_TXCOE_TCP            0x02
+       #define AX_TXCOE_UDP            0x04
+       #define AX_TXCOE_TCPV6          0x20
+       #define AX_TXCOE_UDPV6          0x40
+
+#define AX_LEDCTRL                             0x73
+
+#define GMII_PHY_PHYSR                         0x11
+       #define GMII_PHY_PHYSR_SMASK    0xc000
+       #define GMII_PHY_PHYSR_GIGA     0x8000
+       #define GMII_PHY_PHYSR_100      0x4000
+       #define GMII_PHY_PHYSR_FULL     0x2000
+       #define GMII_PHY_PHYSR_LINK     0x400
+
+#define GMII_LED_ACT                           0x1a
+       #define GMII_LED_ACTIVE_MASK    0xff8f
+       #define GMII_LED0_ACTIVE        BIT(4)
+       #define GMII_LED1_ACTIVE        BIT(5)
+       #define GMII_LED2_ACTIVE        BIT(6)
+
+#define GMII_LED_LINK                          0x1c
+       #define GMII_LED_LINK_MASK      0xf888
+       #define GMII_LED0_LINK_10       BIT(0)
+       #define GMII_LED0_LINK_100      BIT(1)
+       #define GMII_LED0_LINK_1000     BIT(2)
+       #define GMII_LED1_LINK_10       BIT(4)
+       #define GMII_LED1_LINK_100      BIT(5)
+       #define GMII_LED1_LINK_1000     BIT(6)
+       #define GMII_LED2_LINK_10       BIT(8)
+       #define GMII_LED2_LINK_100      BIT(9)
+       #define GMII_LED2_LINK_1000     BIT(10)
+       #define LED0_ACTIVE             BIT(0)
+       #define LED0_LINK_10            BIT(1)
+       #define LED0_LINK_100           BIT(2)
+       #define LED0_LINK_1000          BIT(3)
+       #define LED0_FD                 BIT(4)
+       #define LED0_USB3_MASK          0x001f
+       #define LED1_ACTIVE             BIT(5)
+       #define LED1_LINK_10            BIT(6)
+       #define LED1_LINK_100           BIT(7)
+       #define LED1_LINK_1000          BIT(8)
+       #define LED1_FD                 BIT(9)
+       #define LED1_USB3_MASK          0x03e0
+       #define LED2_ACTIVE             BIT(10)
+       #define LED2_LINK_1000          BIT(13)
+       #define LED2_LINK_100           BIT(12)
+       #define LED2_LINK_10            BIT(11)
+       #define LED2_FD                 BIT(14)
+       #define LED_VALID               BIT(15)
+       #define LED2_USB3_MASK          0x7c00
+
+#define GMII_PHYPAGE                           0x1e
+#define GMII_PHY_PAGE_SELECT                   0x1f
+       #define GMII_PHY_PGSEL_EXT      0x0007
+       #define GMII_PHY_PGSEL_PAGE0    0x0000
+
+struct ax88179_data {
+       u16 rxctl;
+       u16 reserved;
+};
+
+struct ax88179_int_data {
+       __le32 intdata1;
+       __le32 intdata2;
+};
+
+static const struct {
+       unsigned char ctrl, timer_l, timer_h, size, ifg;
+} AX88179_BULKIN_SIZE[] =      {
+       {7, 0x4f, 0,    0x12, 0xff},
+       {7, 0x20, 3,    0x16, 0xff},
+       {7, 0xae, 7,    0x18, 0xff},
+       {7, 0xcc, 0x4c, 0x18, 8},
+};
+
+static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                             u16 size, void *data, int in_pm)
+{
+       int ret;
+       int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
+
+       BUG_ON(!dev);
+
+       if (!in_pm)
+               fn = usbnet_read_cmd;
+       else
+               fn = usbnet_read_cmd_nopm;
+
+       ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                value, index, data, size);
+
+       if (unlikely(ret < 0))
+               netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+                           index, ret);
+
+       return ret;
+}
+
+static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                              u16 size, void *data, int in_pm)
+{
+       int ret;
+       int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+       BUG_ON(!dev);
+
+       if (!in_pm)
+               fn = usbnet_write_cmd;
+       else
+               fn = usbnet_write_cmd_nopm;
+
+       ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                value, index, data, size);
+
+       if (unlikely(ret < 0))
+               netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+                           index, ret);
+
+       return ret;
+}
+
+static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+                                   u16 index, u16 size, void *data)
+{
+       u16 buf;
+
+       if (2 == size) {
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+                                      USB_RECIP_DEVICE, value, index, &buf,
+                                      size);
+       } else {
+               usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+                                      USB_RECIP_DEVICE, value, index, data,
+                                      size);
+       }
+}
+
+static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+                                u16 index, u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+               le16_to_cpus(&buf);
+               *((u16 *)data) = buf;
+       } else if (4 == size) {
+               u32 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+               le32_to_cpus(&buf);
+               *((u32 *)data) = buf;
+       } else {
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
+       }
+
+       return ret;
+}
+
+static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+                                 u16 index, u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, &buf, 1);
+       } else {
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, data, 1);
+       }
+
+       return ret;
+}
+
+static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                           u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+               le16_to_cpus(&buf);
+               *((u16 *)data) = buf;
+       } else if (4 == size) {
+               u32 buf;
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+               le32_to_cpus(&buf);
+               *((u32 *)data) = buf;
+       } else {
+               ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
+       }
+
+       return ret;
+}
+
+static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+                            u16 size, void *data)
+{
+       int ret;
+
+       if (2 == size) {
+               u16 buf;
+               buf = *((u16 *)data);
+               cpu_to_le16s(&buf);
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, &buf, 0);
+       } else {
+               ret = __ax88179_write_cmd(dev, cmd, value, index,
+                                         size, data, 0);
+       }
+
+       return ret;
+}
+
+static void ax88179_status(struct usbnet *dev, struct urb *urb)
+{
+       struct ax88179_int_data *event;
+       u32 link;
+
+       if (urb->actual_length < 8)
+               return;
+
+       event = urb->transfer_buffer;
+       le32_to_cpus((void *)&event->intdata1);
+
+       link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
+
+       if (netif_carrier_ok(dev->net) != link) {
+               if (link)
+                       usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+               else
+                       netif_carrier_off(dev->net);
+
+               netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+       }
+}
+
+static int ax88179_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       u16 res;
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+       return res;
+}
+
+static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
+                              int val)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       u16 res = (u16) val;
+
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+}
+
+static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       u16 tmp16;
+       u8 tmp8;
+
+       usbnet_suspend(intf, message);
+
+       /* Disable RX path */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                             2, 2, &tmp16);
+       tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                              2, 2, &tmp16);
+
+       /* Force bulk-in zero length */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                             2, 2, &tmp16);
+
+       tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+
+       /* change clock */
+       tmp8 = 0;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+       /* Configure RX control register => stop operation */
+       tmp16 = AX_RX_CTL_STOP;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       return 0;
+}
+
+/* This function is used to enable the autodetach function. */
+/* This function is determined by offset 0x43 of EEPROM */
+static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
+{
+       u16 tmp16;
+       u8 tmp8;
+       int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
+       int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+
+       if (!in_pm) {
+               fnr = ax88179_read_cmd;
+               fnw = ax88179_write_cmd;
+       } else {
+               fnr = ax88179_read_cmd_nopm;
+               fnw = ax88179_write_cmd_nopm;
+       }
+
+       if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
+               return 0;
+
+       if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
+               return 0;
+
+       /* Enable Auto Detach bit */
+       tmp8 = 0;
+       fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+       tmp8 |= AX_CLK_SELECT_ULR;
+       fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+       fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+       tmp16 |= AX_PHYPWR_RSTCTL_AT;
+       fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+
+       return 0;
+}
+
+static int ax88179_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       u16 tmp16;
+       u8 tmp8;
+
+       netif_carrier_off(dev->net);
+
+       /* Power up ethernet PHY */
+       tmp16 = 0;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+       udelay(1000);
+
+       tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+                              2, 2, &tmp16);
+       msleep(200);
+
+       /* Ethernet PHY Auto Detach*/
+       ax88179_auto_detach(dev, 1);
+
+       /* Enable clock */
+       ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC,  AX_CLK_SELECT, 1, 1, &tmp8);
+       tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+       msleep(100);
+
+       /* Configure RX control register => start operation */
+       tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+               AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       return usbnet_resume(intf);
+}
+
+static void
+ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u8 opt;
+
+       if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+                            1, 1, &opt) < 0) {
+               wolinfo->supported = 0;
+               wolinfo->wolopts = 0;
+               return;
+       }
+
+       wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+       wolinfo->wolopts = 0;
+       if (opt & AX_MONITOR_MODE_RWLC)
+               wolinfo->wolopts |= WAKE_PHY;
+       if (opt & AX_MONITOR_MODE_RWMP)
+               wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u8 opt = 0;
+
+       if (wolinfo->wolopts & WAKE_PHY)
+               opt |= AX_MONITOR_MODE_RWLC;
+       if (wolinfo->wolopts & WAKE_MAGIC)
+               opt |= AX_MONITOR_MODE_RWMP;
+
+       if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+                             1, 1, &opt) < 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ax88179_get_eeprom_len(struct net_device *net)
+{
+       return AX_EEPROM_LEN;
+}
+
+static int
+ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+                  u8 *data)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u16 *eeprom_buff;
+       int first_word, last_word;
+       int i, ret;
+
+       if (eeprom->len == 0)
+               return -EINVAL;
+
+       eeprom->magic = AX88179_EEPROM_MAGIC;
+
+       first_word = eeprom->offset >> 1;
+       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+       eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+                             GFP_KERNEL);
+       if (!eeprom_buff)
+               return -ENOMEM;
+
+       /* ax88179/178A returns 2 bytes from eeprom on read */
+       for (i = first_word; i <= last_word; i++) {
+               ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
+                                        &eeprom_buff[i - first_word],
+                                        0);
+               if (ret < 0) {
+                       kfree(eeprom_buff);
+                       return -EIO;
+               }
+       }
+
+       memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+       kfree(eeprom_buff);
+       return 0;
+}
+
+static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return mii_ethtool_gset(&dev->mii, cmd);
+}
+
+static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return mii_ethtool_sset(&dev->mii, cmd);
+}
+
+
+static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+       struct usbnet *dev = netdev_priv(net);
+       return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops ax88179_ethtool_ops = {
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = usbnet_get_msglevel,
+       .set_msglevel           = usbnet_set_msglevel,
+       .get_wol                = ax88179_get_wol,
+       .set_wol                = ax88179_set_wol,
+       .get_eeprom_len         = ax88179_get_eeprom_len,
+       .get_eeprom             = ax88179_get_eeprom,
+       .get_settings           = ax88179_get_settings,
+       .set_settings           = ax88179_set_settings,
+       .nway_reset             = usbnet_nway_reset,
+};
+
+static void ax88179_set_multicast(struct net_device *net)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct ax88179_data *data = (struct ax88179_data *)dev->data;
+       u8 *m_filter = ((u8 *)dev->data) + 12;
+
+       data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
+
+       if (net->flags & IFF_PROMISC) {
+               data->rxctl |= AX_RX_CTL_PRO;
+       } else if (net->flags & IFF_ALLMULTI ||
+                  netdev_mc_count(net) > AX_MAX_MCAST) {
+               data->rxctl |= AX_RX_CTL_AMALL;
+       } else if (netdev_mc_empty(net)) {
+               /* just broadcast and directed */
+       } else {
+               /* We use the 20 byte dev->data for our 8 byte filter buffer
+                * to avoid allocating memory that is tricky to free later
+                */
+               u32 crc_bits;
+               struct netdev_hw_addr *ha;
+
+               memset(m_filter, 0, AX_MCAST_FLTSIZE);
+
+               netdev_for_each_mc_addr(ha, net) {
+                       crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+                       *(m_filter + (crc_bits >> 3)) |= (1 << (crc_bits & 7));
+               }
+
+               ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_MULFLTARY,
+                                       AX_MCAST_FLTSIZE, AX_MCAST_FLTSIZE,
+                                       m_filter);
+
+               data->rxctl |= AX_RX_CTL_AM;
+       }
+
+       ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_RX_CTL,
+                               2, 2, &data->rxctl);
+}
+
+static int
+ax88179_set_features(struct net_device *net, netdev_features_t features)
+{
+       u8 tmp;
+       struct usbnet *dev = netdev_priv(net);
+       netdev_features_t changed = net->features ^ features;
+
+       if (changed & NETIF_F_IP_CSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_TXCOE_TCP | AX_TXCOE_UDP;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+       }
+
+       if (changed & NETIF_F_IPV6_CSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+       }
+
+       if (changed & NETIF_F_RXCSUM) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+               tmp ^= AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+                      AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+       }
+
+       return 0;
+}
+
+static int ax88179_change_mtu(struct net_device *net, int new_mtu)
+{
+       struct usbnet *dev = netdev_priv(net);
+       u16 tmp16;
+
+       if (new_mtu <= 0 || new_mtu > 4088)
+               return -EINVAL;
+
+       net->mtu = new_mtu;
+       dev->hard_mtu = net->mtu + net->hard_header_len;
+
+       if (net->mtu > 1500) {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                2, 2, &tmp16);
+               tmp16 |= AX_MEDIUM_JUMBO_EN;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                 2, 2, &tmp16);
+       } else {
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                2, 2, &tmp16);
+               tmp16 &= ~AX_MEDIUM_JUMBO_EN;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                                 2, 2, &tmp16);
+       }
+
+       return 0;
+}
+
+static int ax88179_set_mac_addr(struct net_device *net, void *p)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct sockaddr *addr = p;
+
+       if (netif_running(net))
+               return -EBUSY;
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+       /* Set the MAC address */
+       return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+                                ETH_ALEN, net->dev_addr);
+}
+
+static const struct net_device_ops ax88179_netdev_ops = {
+       .ndo_open               = usbnet_open,
+       .ndo_stop               = usbnet_stop,
+       .ndo_start_xmit         = usbnet_start_xmit,
+       .ndo_tx_timeout         = usbnet_tx_timeout,
+       .ndo_change_mtu         = ax88179_change_mtu,
+       .ndo_set_mac_address    = ax88179_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = ax88179_ioctl,
+       .ndo_set_rx_mode        = ax88179_set_multicast,
+       .ndo_set_features       = ax88179_set_features,
+};
+
+static int ax88179_check_eeprom(struct usbnet *dev)
+{
+       u8 i, buf, eeprom[20];
+       u16 csum, delay = HZ / 10;
+       unsigned long jtimeout;
+
+       /* Read EEPROM content */
+       for (i = 0; i < 6; i++) {
+               buf = i;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+                                     1, 1, &buf) < 0)
+                       return -EINVAL;
+
+               buf = EEP_RD;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                     1, 1, &buf) < 0)
+                       return -EINVAL;
+
+               jtimeout = jiffies + delay;
+               do {
+                       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                        1, 1, &buf);
+
+                       if (time_after(jiffies, jtimeout))
+                               return -EINVAL;
+
+               } while (buf & EEP_BUSY);
+
+               __ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+                                  2, 2, &eeprom[i * 2], 0);
+
+               if ((i == 0) && (eeprom[0] == 0xFF))
+                       return -EINVAL;
+       }
+
+       csum = eeprom[6] + eeprom[7] + eeprom[8] + eeprom[9];
+       csum = (csum >> 8) + (csum & 0xff);
+       if ((csum + eeprom[10]) != 0xff)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ax88179_check_efuse(struct usbnet *dev, u16 *ledmode)
+{
+       u8      i;
+       u8      efuse[64];
+       u16     csum = 0;
+
+       if (ax88179_read_cmd(dev, AX_ACCESS_EFUS, 0, 64, 64, efuse) < 0)
+               return -EINVAL;
+
+       if (*efuse == 0xFF)
+               return -EINVAL;
+
+       for (i = 0; i < 64; i++)
+               csum = csum + efuse[i];
+
+       while (csum > 255)
+               csum = (csum & 0x00FF) + ((csum >> 8) & 0x00FF);
+
+       if (csum != 0xFF)
+               return -EINVAL;
+
+       *ledmode = (efuse[51] << 8) | efuse[52];
+
+       return 0;
+}
+
+static int ax88179_convert_old_led(struct usbnet *dev, u16 *ledvalue)
+{
+       u16 led;
+
+       /* Loaded the old eFuse LED Mode */
+       if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x3C, 1, 2, &led) < 0)
+               return -EINVAL;
+
+       led >>= 8;
+       switch (led) {
+       case 0xFF:
+               led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+                     LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+                     LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+               break;
+       case 0xFE:
+               led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 | LED_VALID;
+               break;
+       case 0xFD:
+               led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 |
+                     LED2_LINK_10 | LED_VALID;
+               break;
+       case 0xFC:
+               led = LED0_ACTIVE | LED1_ACTIVE | LED1_LINK_1000 | LED2_ACTIVE |
+                     LED2_LINK_100 | LED2_LINK_10 | LED_VALID;
+               break;
+       default:
+               led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+                     LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+                     LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+               break;
+       }
+
+       *ledvalue = led;
+
+       return 0;
+}
+
+static int ax88179_led_setting(struct usbnet *dev)
+{
+       u8 ledfd, value = 0;
+       u16 tmp, ledact, ledlink, ledvalue = 0, delay = HZ / 10;
+       unsigned long jtimeout;
+
+       /* Check AX88179 version. UA1 or UA2*/
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, GENERAL_STATUS, 1, 1, &value);
+
+       if (!(value & AX_SECLD)) {      /* UA1 */
+               value = AX_GPIO_CTRL_GPIO3EN | AX_GPIO_CTRL_GPIO2EN |
+                       AX_GPIO_CTRL_GPIO1EN;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_GPIO_CTRL,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+       }
+
+       /* Check EEPROM */
+       if (!ax88179_check_eeprom(dev)) {
+               value = 0x42;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+
+               value = EEP_RD;
+               if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                     1, 1, &value) < 0)
+                       return -EINVAL;
+
+               jtimeout = jiffies + delay;
+               do {
+                       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+                                        1, 1, &value);
+
+                       if (time_after(jiffies, jtimeout))
+                               return -EINVAL;
+
+               } while (value & EEP_BUSY);
+
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_HIGH,
+                                1, 1, &value);
+               ledvalue = (value << 8);
+
+               ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+                                1, 1, &value);
+               ledvalue |= value;
+
+               /* load internal ROM for defaule setting */
+               if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+                       ax88179_convert_old_led(dev, &ledvalue);
+
+       } else if (!ax88179_check_efuse(dev, &ledvalue)) {
+               if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+                       ax88179_convert_old_led(dev, &ledvalue);
+       } else {
+               ax88179_convert_old_led(dev, &ledvalue);
+       }
+
+       tmp = GMII_PHY_PGSEL_EXT;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+       tmp = 0x2c;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHYPAGE, 2, &tmp);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_LED_ACT, 2, &ledact);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_LED_LINK, 2, &ledlink);
+
+       ledact &= GMII_LED_ACTIVE_MASK;
+       ledlink &= GMII_LED_LINK_MASK;
+
+       if (ledvalue & LED0_ACTIVE)
+               ledact |= GMII_LED0_ACTIVE;
+
+       if (ledvalue & LED1_ACTIVE)
+               ledact |= GMII_LED1_ACTIVE;
+
+       if (ledvalue & LED2_ACTIVE)
+               ledact |= GMII_LED2_ACTIVE;
+
+       if (ledvalue & LED0_LINK_10)
+               ledlink |= GMII_LED0_LINK_10;
+
+       if (ledvalue & LED1_LINK_10)
+               ledlink |= GMII_LED1_LINK_10;
+
+       if (ledvalue & LED2_LINK_10)
+               ledlink |= GMII_LED2_LINK_10;
+
+       if (ledvalue & LED0_LINK_100)
+               ledlink |= GMII_LED0_LINK_100;
+
+       if (ledvalue & LED1_LINK_100)
+               ledlink |= GMII_LED1_LINK_100;
+
+       if (ledvalue & LED2_LINK_100)
+               ledlink |= GMII_LED2_LINK_100;
+
+       if (ledvalue & LED0_LINK_1000)
+               ledlink |= GMII_LED0_LINK_1000;
+
+       if (ledvalue & LED1_LINK_1000)
+               ledlink |= GMII_LED1_LINK_1000;
+
+       if (ledvalue & LED2_LINK_1000)
+               ledlink |= GMII_LED2_LINK_1000;
+
+       tmp = ledact;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_LED_ACT, 2, &tmp);
+
+       tmp = ledlink;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_LED_LINK, 2, &tmp);
+
+       tmp = GMII_PHY_PGSEL_PAGE0;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+       /* LED full duplex setting */
+       ledfd = 0;
+       if (ledvalue & LED0_FD)
+               ledfd |= 0x01;
+       else if ((ledvalue & LED0_USB3_MASK) == 0)
+               ledfd |= 0x02;
+
+       if (ledvalue & LED1_FD)
+               ledfd |= 0x04;
+       else if ((ledvalue & LED1_USB3_MASK) == 0)
+               ledfd |= 0x08;
+
+       if (ledvalue & LED2_FD)
+               ledfd |= 0x10;
+       else if ((ledvalue & LED2_USB3_MASK) == 0)
+               ledfd |= 0x20;
+
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_LEDCTRL, 1, 1, &ledfd);
+
+       return 0;
+}
+
+static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       u8 buf[5];
+       u16 *tmp16;
+       u8 *tmp;
+       struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+
+       usbnet_get_endpoints(dev, intf);
+
+       tmp16 = (u16 *)buf;
+       tmp = (u8 *)buf;
+
+       memset(ax179_data, 0, sizeof(*ax179_data));
+
+       /* Power up ethernet PHY */
+       *tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       msleep(200);
+
+       *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+       msleep(100);
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+                        ETH_ALEN, dev->net->dev_addr);
+       memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+       /* RX bulk configuration */
+       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = 1024 * 20;
+
+       *tmp = 0x34;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+       *tmp = 0x52;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+                         1, 1, tmp);
+
+       dev->net->netdev_ops = &ax88179_netdev_ops;
+       dev->net->ethtool_ops = &ax88179_ethtool_ops;
+       dev->net->needed_headroom = 8;
+
+       /* Initialize MII structure */
+       dev->mii.dev = dev->net;
+       dev->mii.mdio_read = ax88179_mdio_read;
+       dev->mii.mdio_write = ax88179_mdio_write;
+       dev->mii.phy_id_mask = 0xff;
+       dev->mii.reg_num_mask = 0xff;
+       dev->mii.phy_id = 0x03;
+       dev->mii.supports_gmii = 1;
+
+       dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       /* Enable checksum offload */
+       *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+              AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+       *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+              AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+       /* Configure RX control register => start operation */
+       *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+                AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+       *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+              AX_MONITOR_MODE_RWMP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+       /* Configure default medium type => giga */
+       *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+                AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+                AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, tmp16);
+
+       ax88179_led_setting(dev);
+
+       /* Restart autoneg */
+       mii_nway_restart(&dev->mii);
+
+       netif_carrier_off(dev->net);
+
+       return 0;
+}
+
+static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+       u16 tmp16;
+
+       /* Configure RX control register => stop operation */
+       tmp16 = AX_RX_CTL_STOP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+       tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp16);
+
+       /* Power down ethernet PHY */
+       tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+}
+
+static void
+ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
+{
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* checksum error bit is set */
+       if ((*pkt_hdr & AX_RXHDR_L3CSUM_ERR) ||
+           (*pkt_hdr & AX_RXHDR_L4CSUM_ERR))
+               return;
+
+       /* It must be a TCP or UDP packet with a valid checksum */
+       if (((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_TCP) ||
+           ((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_UDP))
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+       struct sk_buff *ax_skb;
+       int pkt_cnt;
+       u32 rx_hdr;
+       u16 hdr_off;
+       u32 *pkt_hdr;
+
+       skb_trim(skb, skb->len - 4);
+       memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
+       le32_to_cpus(&rx_hdr);
+
+       pkt_cnt = (u16)rx_hdr;
+       hdr_off = (u16)(rx_hdr >> 16);
+       pkt_hdr = (u32 *)(skb->data + hdr_off);
+
+       while (pkt_cnt--) {
+               u16 pkt_len;
+
+               le32_to_cpus(pkt_hdr);
+               pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+
+               /* Check CRC or runt packet */
+               if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
+                   (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
+                       skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+                       pkt_hdr++;
+                       continue;
+               }
+
+               if (pkt_cnt == 0) {
+                       /* Skip IP alignment psudo header */
+                       skb_pull(skb, 2);
+                       skb->len = pkt_len;
+                       skb_set_tail_pointer(skb, pkt_len);
+                       skb->truesize = pkt_len + sizeof(struct sk_buff);
+                       ax88179_rx_checksum(skb, pkt_hdr);
+                       return 1;
+               }
+
+               ax_skb = skb_clone(skb, GFP_ATOMIC);
+               if (ax_skb) {
+                       ax_skb->len = pkt_len;
+                       ax_skb->data = skb->data + 2;
+                       skb_set_tail_pointer(ax_skb, pkt_len);
+                       ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
+                       ax88179_rx_checksum(ax_skb, pkt_hdr);
+                       usbnet_skb_return(dev, ax_skb);
+               } else {
+                       return 0;
+               }
+
+               skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+               pkt_hdr++;
+       }
+       return 1;
+}
+
+static struct sk_buff *
+ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+       u32 tx_hdr1, tx_hdr2;
+       int frame_size = dev->maxpacket;
+       int mss = skb_shinfo(skb)->gso_size;
+       int headroom;
+       int tailroom;
+
+       tx_hdr1 = skb->len;
+       tx_hdr2 = mss;
+       if (((skb->len + 8) % frame_size) == 0)
+               tx_hdr2 |= 0x80008000;  /* Enable padding */
+
+       skb_linearize(skb);
+       headroom = skb_headroom(skb);
+       tailroom = skb_tailroom(skb);
+
+       if (!skb_header_cloned(skb) &&
+           !skb_cloned(skb) &&
+           (headroom + tailroom) >= 8) {
+               if (headroom < 8) {
+                       skb->data = memmove(skb->head + 8, skb->data, skb->len);
+                       skb_set_tail_pointer(skb, skb->len);
+               }
+       } else {
+               struct sk_buff *skb2;
+
+               skb2 = skb_copy_expand(skb, 8, 0, flags);
+               dev_kfree_skb_any(skb);
+               skb = skb2;
+               if (!skb)
+                       return NULL;
+       }
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_hdr2);
+       skb_copy_to_linear_data(skb, &tx_hdr2, 4);
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_hdr1);
+       skb_copy_to_linear_data(skb, &tx_hdr1, 4);
+
+       return skb;
+}
+
+static int ax88179_link_reset(struct usbnet *dev)
+{
+       struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+       u8 tmp[5], link_sts;
+       u16 mode, tmp16, delay = HZ / 10;
+       u32 tmp32 = 0x40000000;
+       unsigned long jtimeout;
+
+       jtimeout = jiffies + delay;
+       while (tmp32 & 0x40000000) {
+               mode = 0;
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &mode);
+               ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2,
+                                 &ax179_data->rxctl);
+
+               /*link up, check the usb device control TX FIFO full or empty*/
+               ax88179_read_cmd(dev, 0x81, 0x8c, 0, 4, &tmp32);
+
+               if (time_after(jiffies, jtimeout))
+                       return 0;
+       }
+
+       mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+              AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
+                        1, 1, &link_sts);
+
+       ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                        GMII_PHY_PHYSR, 2, &tmp16);
+
+       if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+               return 0;
+       } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+               mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+               if (dev->net->mtu > 1500)
+                       mode |= AX_MEDIUM_JUMBO_EN;
+
+               if (link_sts & AX_USB_SS)
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+               else if (link_sts & AX_USB_HS)
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[1], 5);
+               else
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       } else if (GMII_PHY_PHYSR_100 == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+               mode |= AX_MEDIUM_PS;
+
+               if (link_sts & (AX_USB_SS | AX_USB_HS))
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[2], 5);
+               else
+                       memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       } else {
+               memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+       }
+
+       /* RX bulk configuration */
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = (1024 * (tmp[3] + 2));
+
+       if (tmp16 & GMII_PHY_PHYSR_FULL)
+               mode |= AX_MEDIUM_FULL_DUPLEX;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, &mode);
+
+       netif_carrier_on(dev->net);
+
+       return 0;
+}
+
+static int ax88179_reset(struct usbnet *dev)
+{
+       u8 buf[5];
+       u16 *tmp16;
+       u8 *tmp;
+
+       tmp16 = (u16 *)buf;
+       tmp = (u8 *)buf;
+
+       /* Power up ethernet PHY */
+       *tmp16 = 0;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+
+       *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+       msleep(200);
+
+       *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+       msleep(100);
+
+       /* Ethernet PHY Auto Detach*/
+       ax88179_auto_detach(dev, 0);
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+                        dev->net->dev_addr);
+       memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+       /* RX bulk configuration */
+       memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+       dev->rx_urb_size = 1024 * 20;
+
+       *tmp = 0x34;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+       *tmp = 0x52;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+                         1, 1, tmp);
+
+       dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                             NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+       /* Enable checksum offload */
+       *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+              AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+       *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+              AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+       /* Configure RX control register => start operation */
+       *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+                AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+       *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+              AX_MONITOR_MODE_RWMP;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+       /* Configure default medium type => giga */
+       *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+                AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+                AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, tmp16);
+
+       ax88179_led_setting(dev);
+
+       /* Restart autoneg */
+       mii_nway_restart(&dev->mii);
+
+       netif_carrier_off(dev->net);
+
+       return 0;
+}
+
+static int ax88179_stop(struct usbnet *dev)
+{
+       u16 tmp16;
+
+       ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                        2, 2, &tmp16);
+       tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+       ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+                         2, 2, &tmp16);
+
+       return 0;
+}
+
+static const struct driver_info ax88179_info = {
+       .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info ax88178a_info = {
+       .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info sitecom_info = {
+       .description = "Sitecom USB 3.0 to Gigabit Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+{
+       /* ASIX AX88179 10/100/1000 */
+       USB_DEVICE(0x0b95, 0x1790),
+       .driver_info = (unsigned long)&ax88179_info,
+}, {
+       /* ASIX AX88178A 10/100/1000 */
+       USB_DEVICE(0x0b95, 0x178a),
+       .driver_info = (unsigned long)&ax88178a_info,
+}, {
+       /* Sitecom USB 3.0 to Gigabit Adapter */
+       USB_DEVICE(0x0df6, 0x0072),
+       .driver_info = (unsigned long) &sitecom_info,
+},
+       { },
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver ax88179_178a_driver = {
+       .name =         "ax88179_178a",
+       .id_table =     products,
+       .probe =        usbnet_probe,
+       .suspend =      ax88179_suspend,
+       .resume =       ax88179_resume,
+       .disconnect =   usbnet_disconnect,
+       .supports_autosuspend = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ax88179_178a_driver);
+
+MODULE_DESCRIPTION("ASIX AX88179/178A based USB 3.0/2.0 Gigabit Ethernet Devices");
+MODULE_LICENSE("GPL");
index 4a8c25a..61b74a2 100644 (file)
@@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
          .driver_info = (unsigned long) &wwan_info,
        },
 
+       /* tag Huawei devices as wwan */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
+                                       USB_CLASS_COMM,
+                                       USB_CDC_SUBCLASS_NCM,
+                                       USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+
        /* Huawei NCM devices disguised as vendor specific */
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
          .driver_info = (unsigned long)&wwan_info,
index 192c91c..57ac4b0 100644 (file)
@@ -1736,17 +1736,7 @@ static struct virtio_driver virtio_net_driver = {
 #endif
 };
 
-static int __init init(void)
-{
-       return register_virtio_driver(&virtio_net_driver);
-}
-
-static void __exit fini(void)
-{
-       unregister_virtio_driver(&virtio_net_driver);
-}
-module_init(init);
-module_exit(fini);
+module_virtio_driver(virtio_net_driver);
 
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio network driver");
index f736823..c3e3d29 100644 (file)
@@ -145,9 +145,8 @@ static inline struct hlist_head *vni_head(struct net *net, u32 id)
 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
 {
        struct vxlan_dev *vxlan;
-       struct hlist_node *node;
 
-       hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+       hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
                if (vxlan->vni == id)
                        return vxlan;
        }
@@ -292,9 +291,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
 {
        struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
        struct vxlan_fdb *f;
-       struct hlist_node *node;
 
-       hlist_for_each_entry_rcu(f, node, head, hlist) {
+       hlist_for_each_entry_rcu(f, head, hlist) {
                if (compare_ether_addr(mac, f->eth_addr) == 0)
                        return f;
        }
@@ -422,10 +420,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
 
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
                struct vxlan_fdb *f;
-               struct hlist_node *n;
                int err;
 
-               hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+               hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
                        if (idx < cb->args[0])
                                goto skip;
 
@@ -483,11 +480,10 @@ static bool vxlan_group_used(struct vxlan_net *vn,
                             const struct vxlan_dev *this)
 {
        const struct vxlan_dev *vxlan;
-       struct hlist_node *node;
        unsigned h;
 
        for (h = 0; h < VNI_HASH_SIZE; ++h)
-               hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+               hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
                        if (vxlan == this)
                                continue;
 
@@ -965,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
        iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
        tunnel_ip_select_ident(skb, old_iph, &rt->dst);
 
+       nf_reset(skb);
+
        vxlan_set_owner(dev, skb);
 
        /* See iptunnel_xmit() */
index 0179cef..84734a8 100644 (file)
@@ -938,14 +938,14 @@ static int cosa_open(struct inode *inode, struct file *file)
        int ret = 0;
 
        mutex_lock(&cosa_chardev_mutex);
-       if ((n=iminor(file->f_path.dentry->d_inode)>>CARD_MINOR_BITS)
+       if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS)
                >= nr_cards) {
                ret = -ENODEV;
                goto out;
        }
        cosa = cosa_cards+n;
 
-       if ((n=iminor(file->f_path.dentry->d_inode)
+       if ((n=iminor(file_inode(file))
                & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
                ret = -ENODEV;
                goto out;
index 5f845be..050ca4a 100644 (file)
@@ -27,7 +27,7 @@
 #define WME_MAX_BA              WME_BA_BMP_SIZE
 #define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
 
-#define ATH_RSSI_DUMMY_MARKER   0x127
+#define ATH_RSSI_DUMMY_MARKER   127
 #define ATH_RSSI_LPF_LEN               10
 #define RSSI_LPF_THRESHOLD             -20
 #define ATH_RSSI_EP_MULTIPLIER     (1<<7)
index 96bfb18..d3b099d 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/leds.h>
 #include <linux/slab.h>
 #include <net/mac80211.h>
index 3ad1fd0..bd8251c 100644 (file)
@@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
 
        last_rssi = priv->rx.last_rssi;
 
-       if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-               rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
-                                                    ATH_RSSI_EP_MULTIPLIER);
+       if (ieee80211_is_beacon(hdr->frame_control) &&
+           !is_zero_ether_addr(common->curbssid) &&
+           ether_addr_equal(hdr->addr3, common->curbssid)) {
+               s8 rssi = rxbuf->rxstatus.rs_rssi;
 
-       if (rxbuf->rxstatus.rs_rssi < 0)
-               rxbuf->rxstatus.rs_rssi = 0;
+               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
 
-       if (ieee80211_is_beacon(fc))
-               priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+               if (rssi < 0)
+                       rssi = 0;
+
+               priv->ah->stats.avgbrssi = rssi;
+       }
 
        rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
        rx_status->band = hw->conf.channel->band;
index 2a2ae40..07e2526 100644 (file)
@@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
                        reset_type = ATH9K_RESET_POWER_ON;
                else
                        reset_type = ATH9K_RESET_COLD;
-       }
+       } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
+                  (REG_READ(ah, AR_CR) & AR_CR_RXE))
+               reset_type = ATH9K_RESET_COLD;
 
        if (!ath9k_hw_set_reset_reg(ah, reset_type))
                return false;
index 94ef338..b775769 100644 (file)
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (!(flags & CMD_ASYNC)) {
-               cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
+               cmd.flags |= CMD_WANT_SKB;
                might_sleep();
        }
 
index 9a0f45e..81aa91f 100644 (file)
@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
 TRACE_EVENT(iwlwifi_dev_hcmd,
        TP_PROTO(const struct device *dev,
                 struct iwl_host_cmd *cmd, u16 total_size,
-                const void *hdr, size_t hdr_len),
-       TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
+                struct iwl_cmd_header *hdr),
+       TP_ARGS(dev, cmd, total_size, hdr),
        TP_STRUCT__entry(
                DEV_ENTRY
                __dynamic_array(u8, hcmd, total_size)
                __field(u32, flags)
        ),
        TP_fast_assign(
-               int i, offset = hdr_len;
+               int i, offset = sizeof(*hdr);
 
                DEV_ASSIGN;
                __entry->flags = cmd->flags;
-               memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
+               memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
 
-               for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+               for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                        if (!cmd->len[i])
                                continue;
-                       if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
-                               continue;
                        memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
                               cmd->data[i], cmd->len[i]);
                        offset += cmd->len[i];
index 6f228bb..fbfd2d1 100644 (file)
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
 
 /* shared module parameters */
 struct iwl_mod_params iwlwifi_mod_params = {
-       .amsdu_size_8K = 1,
        .restart_fw = 1,
        .plcp_check = true,
        .bt_coex_active = true,
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
        "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
                   int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
 module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
 
index e5e3a79..2c2a729 100644 (file)
@@ -91,7 +91,7 @@ enum iwl_power_level {
  * @sw_crypto: using hardware encryption, default = 0
  * @disable_11n: disable 11n capabilities, default = 0,
  *     use IWL_DISABLE_HT_* constants
- * @amsdu_size_8K: enable 8K amsdu size, default = 1
+ * @amsdu_size_8K: enable 8K amsdu size, default = 0
  * @restart_fw: restart firmware, default = 1
  * @plcp_check: enable plcp health check, default = true
  * @wd_disable: enable stuck queue check, default = 0
index 14fc8d3..3392011 100644 (file)
@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
        u8 data[];
 } __packed;
 
-#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
-static inline void iwl_phy_db_test_pic(__le32 pic)
-{
-       WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
-}
-
 struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
 {
        struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
                        (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
        }
 
-       /* Test PIC */
-       if (type != IWL_PHY_DB_CFG)
-               iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
-                                     (size / sizeof(__le32)) - 1));
-
        IWL_DEBUG_INFO(phy_db->trans,
                       "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
                       __func__, __LINE__, type, size);
@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
                *size = entry->size;
        }
 
-       /* Test PIC */
-       if (type != IWL_PHY_DB_CFG)
-               iwl_phy_db_test_pic(*(((__le32 *)*data) +
-                                     (*size / sizeof(__le32)) - 1));
-
        IWL_DEBUG_INFO(phy_db->trans,
                       "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
                       __func__, __LINE__, type, *size);
index 8c7bec6..0cac2b7 100644 (file)
@@ -186,19 +186,13 @@ struct iwl_rx_packet {
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
  *     response. The caller needs to call iwl_free_resp when done.
- * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
- *     response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
- *     copied. The pointer passed to the response handler is in the transport
- *     ownership and don't need to be freed by the op_mode. This also means
- *     that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
        CMD_SYNC                = 0,
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
-       CMD_WANT_HCMD           = BIT(2),
-       CMD_ON_DEMAND           = BIT(3),
+       CMD_ON_DEMAND           = BIT(2),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -217,7 +211,11 @@ struct iwl_device_cmd {
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
-#define IWL_MAX_CMD_TFDS       2
+/*
+ * number of transfer buffers (fragments) per transmit frame descriptor;
+ * this is just the driver's idea, the hardware supports 20
+ */
+#define IWL_MAX_CMD_TBS_PER_TFD        2
 
 /**
  * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
  * @id: id of the host command
  */
 struct iwl_host_cmd {
-       const void *data[IWL_MAX_CMD_TFDS];
+       const void *data[IWL_MAX_CMD_TBS_PER_TFD];
        struct iwl_rx_packet *resp_pkt;
        unsigned long _rx_page_addr;
        u32 _rx_page_order;
        int handler_status;
 
        u32 flags;
-       u16 len[IWL_MAX_CMD_TFDS];
-       u8 dataflags[IWL_MAX_CMD_TFDS];
+       u16 len[IWL_MAX_CMD_TBS_PER_TFD];
+       u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
        u8 id;
 };
 
index c64d864..994c8c2 100644 (file)
@@ -61,6 +61,7 @@
  *
  *****************************************************************************/
 
+#include <linux/etherdevice.h>
 #include <net/cfg80211.h>
 #include <net/ipv6.h>
 #include "iwl-modparams.h"
@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
                                           sizeof(wkc), &wkc);
                data->error = ret != 0;
 
+               mvm->ptk_ivlen = key->iv_len;
+               mvm->ptk_icvlen = key->icv_len;
+               mvm->gtk_ivlen = key->iv_len;
+               mvm->gtk_icvlen = key->icv_len;
+
                /* don't upload key again */
                goto out_unlock;
        }
@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
         */
        if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
                key->hw_key_idx = 0;
+               mvm->ptk_ivlen = key->iv_len;
+               mvm->ptk_icvlen = key->icv_len;
        } else {
                data->gtk_key_idx++;
                key->hw_key_idx = data->gtk_key_idx;
+               mvm->gtk_ivlen = key->iv_len;
+               mvm->gtk_icvlen = key->icv_len;
        }
 
        ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        /* We reprogram keys and shouldn't allocate new key indices */
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 
+       mvm->ptk_ivlen = 0;
+       mvm->ptk_icvlen = 0;
+       mvm->ptk_ivlen = 0;
+       mvm->ptk_icvlen = 0;
+
        /*
         * The D3 firmware still hardcodes the AP station ID for the
         * BSS we're associated with as 0. As a result, we have to move
@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        struct iwl_wowlan_status *status;
        u32 reasons;
        int ret, len;
-       bool pkt8023 = false;
        struct sk_buff *pkt = NULL;
 
        iwl_trans_read_mem_bytes(mvm->trans, base,
@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        status = (void *)cmd.resp_pkt->data;
 
        if (len - sizeof(struct iwl_cmd_header) !=
-           sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
+           sizeof(*status) +
+           ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
                goto out;
        }
@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                goto report;
        }
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
                wakeup.magic_pkt = true;
-               pkt8023 = true;
-       }
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
                wakeup.pattern_idx =
                        le16_to_cpu(status->pattern_number);
-               pkt8023 = true;
-       }
 
        if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
                       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
                wakeup.disconnect = true;
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
                wakeup.gtk_rekey_failure = true;
-               pkt8023 = true;
-       }
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
                wakeup.rfkill_release = true;
-               pkt8023 = true;
-       }
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
                wakeup.eap_identity_req = true;
-               pkt8023 = true;
-       }
 
-       if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+       if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
                wakeup.four_way_handshake = true;
-               pkt8023 = true;
-       }
 
        if (status->wake_packet_bufsize) {
-               u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
-               u32 pktlen = le32_to_cpu(status->wake_packet_length);
+               int pktsize = le32_to_cpu(status->wake_packet_bufsize);
+               int pktlen = le32_to_cpu(status->wake_packet_length);
+               const u8 *pktdata = status->wake_packet;
+               struct ieee80211_hdr *hdr = (void *)pktdata;
+               int truncated = pktlen - pktsize;
+
+               /* this would be a firmware bug */
+               if (WARN_ON_ONCE(truncated < 0))
+                       truncated = 0;
+
+               if (ieee80211_is_data(hdr->frame_control)) {
+                       int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+                       int ivlen = 0, icvlen = 4; /* also FCS */
 
-               if (pkt8023) {
                        pkt = alloc_skb(pktsize, GFP_KERNEL);
                        if (!pkt)
                                goto report;
-                       memcpy(skb_put(pkt, pktsize), status->wake_packet,
-                              pktsize);
+
+                       memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
+                       pktdata += hdrlen;
+                       pktsize -= hdrlen;
+
+                       if (ieee80211_has_protected(hdr->frame_control)) {
+                               if (is_multicast_ether_addr(hdr->addr1)) {
+                                       ivlen = mvm->gtk_ivlen;
+                                       icvlen += mvm->gtk_icvlen;
+                               } else {
+                                       ivlen = mvm->ptk_ivlen;
+                                       icvlen += mvm->ptk_icvlen;
+                               }
+                       }
+
+                       /* if truncated, FCS/ICV is (partially) gone */
+                       if (truncated >= icvlen) {
+                               icvlen = 0;
+                               truncated -= icvlen;
+                       } else {
+                               icvlen -= truncated;
+                               truncated = 0;
+                       }
+
+                       pktsize -= ivlen + icvlen;
+                       pktdata += ivlen;
+
+                       memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
+
                        if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
                                goto report;
                        wakeup.packet = pkt->data;
                        wakeup.packet_present_len = pkt->len;
-                       wakeup.packet_len = pkt->len - (pktlen - pktsize);
+                       wakeup.packet_len = pkt->len - truncated;
                        wakeup.packet_80211 = false;
                } else {
+                       int fcslen = 4;
+
+                       if (truncated >= 4) {
+                               truncated -= 4;
+                               fcslen = 0;
+                       } else {
+                               fcslen -= truncated;
+                               truncated = 0;
+                       }
+                       pktsize -= fcslen;
                        wakeup.packet = status->wake_packet;
                        wakeup.packet_present_len = pktsize;
-                       wakeup.packet_len = pktlen;
+                       wakeup.packet_len = pktlen - truncated;
                        wakeup.packet_80211 = true;
                }
        }
index 23eebda..2adb61f 100644 (file)
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
 #define IWL_RX_INFO_PHY_CNT 8
 #define IWL_RX_INFO_AGC_IDX 1
 #define IWL_RX_INFO_RSSI_AB_IDX 2
-#define IWL_RX_INFO_RSSI_C_IDX 3
-#define IWL_OFDM_AGC_DB_MSK 0xfe00
-#define IWL_OFDM_AGC_DB_POS 9
+#define IWL_OFDM_AGC_A_MSK 0x0000007f
+#define IWL_OFDM_AGC_A_POS 0
+#define IWL_OFDM_AGC_B_MSK 0x00003f80
+#define IWL_OFDM_AGC_B_POS 7
+#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWL_OFDM_AGC_CODE_POS 20
 #define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
-#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
 #define IWL_OFDM_RSSI_A_POS 0
+#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
 #define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
-#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
 #define IWL_OFDM_RSSI_B_POS 16
-#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
-#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
-#define IWL_OFDM_RSSI_C_POS 0
+#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
 
 /**
  * struct iwl_rx_phy_info - phy info
index d3d959d..500f818 100644 (file)
 #define UCODE_VALID_OK cpu_to_le32(0x1)
 
 /* Default calibration values for WkP - set to INIT image w/o running */
-static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
-                                                0x00, 0x18, 0x00 };
-static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
-                                            0x7f, 0x7f, 0x7f };
-static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
-static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
-                                            0x00 };
-static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
 static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
 static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
-static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
 
 struct iwl_calib_default_data {
        u16 size;
@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
 #define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
 
 static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
-       [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
-       [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
-       [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
-       [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
        [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
-       [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
        [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
 };
 
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        return 0;
 }
-#define IWL_HW_REV_ID_RAINBOW  0x2
-#define IWL_PROJ_TYPE_LHP      0x5
-
-static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
-{
-       struct iwl_nvm_data *data = mvm->nvm_data;
-       /* Temp calls to static definitions, will be changed to CSR calls */
-       u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
-       u8 project_type = IWL_PROJ_TYPE_LHP;
-
-       return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
-               (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
-               (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
-}
 
 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 {
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
        enum iwl_ucode_type ucode_type = mvm->cur_ucode;
 
        /* Set parameters */
-       phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
+       phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
        phy_cfg_cmd.calib_control.event_trigger =
                mvm->fw->default_calib[ucode_type].event_trigger;
        phy_cfg_cmd.calib_control.flow_trigger =
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
                                    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 }
 
-/* Starting with the new PHY DB implementation - New calibs are enabled */
-/* Value - 0x405e7 */
-#define IWL_CALIB_DEFAULT_FLOW_INIT    (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
-                                        IWL_CALIB_CFG_LO_LEAKAGE_IDX   |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_AGC_IDX)
-
-#define IWL_CALIB_DEFAULT_EVENT_INIT   0x0
-
-/* Value 0x41567 */
-#define IWL_CALIB_DEFAULT_FLOW_RUN     (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_BB_FILTER_IDX    |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_RX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_SENSITIVITY_IDX  |\
-                                        IWL_CALIB_CFG_AGC_IDX)
-
-#define IWL_CALIB_DEFAULT_EVENT_RUN    (IWL_CALIB_CFG_XTAL_IDX         |\
-                                        IWL_CALIB_CFG_TEMPERATURE_IDX  |\
-                                        IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
-                                        IWL_CALIB_CFG_TX_PWR_IDX       |\
-                                        IWL_CALIB_CFG_DC_IDX           |\
-                                        IWL_CALIB_CFG_TX_IQ_IDX        |\
-                                        IWL_CALIB_CFG_SENSITIVITY_IDX)
-
-/*
- * Sets the calibrations trigger values that will be sent to the FW for runtime
- * and init calibrations.
- * The ones given in the FW TLV are not correct.
- */
-static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
-{
-       struct iwl_tlv_calib_ctrl default_calib;
-
-       /*
-        * WkP FW TLV calib bits are wrong, overwrite them.
-        * This defines the dynamic calibrations which are implemented in the
-        * uCode both for init(flow) calculation and event driven calibs.
-        */
-
-       /* Init Image */
-       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
-       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
-
-       if (default_calib.event_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
-               IWL_ERR(mvm,
-                       "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
-                       default_calib.event_trigger);
-       if (default_calib.flow_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
-               IWL_ERR(mvm,
-                       "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
-                       default_calib.flow_trigger);
-
-       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
-              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
-       IWL_ERR(mvm,
-               "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
-               default_calib.event_trigger,
-               default_calib.flow_trigger);
-
-       /* Run time image */
-       default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
-       default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
-
-       if (default_calib.event_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
-               IWL_ERR(mvm,
-                       "Updating the event calib for RT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
-                       default_calib.event_trigger);
-       if (default_calib.flow_trigger !=
-           mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
-               IWL_ERR(mvm,
-                       "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
-                       mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
-                       default_calib.flow_trigger);
-
-       memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
-              &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
-       IWL_ERR(mvm,
-               "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
-               default_calib.event_trigger,
-               default_calib.flow_trigger);
-}
-
 static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
 {
        u8 cmd_raw[16]; /* holds the variable size commands */
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
        WARN_ON(ret);
 
-       /* Override the calibrations from TLV and the const of fw */
-       iwl_set_default_calib_trigger(mvm);
+       /* Send TX valid antennas before triggering calibrations */
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
+       if (ret)
+               goto error;
 
        /* WkP doesn't have all calibrations, need to set default values */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
index e8264e1..7e169b0 100644 (file)
@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        return ret;
 }
 
-static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
-                                        struct ieee80211_vif *vif)
+static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        u32 tfd_msk = 0, ac;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
                 */
                flush_work(&mvm->sta_drained_wk);
        }
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       iwl_mvm_prepare_mac_removal(mvm, vif);
 
        mutex_lock(&mvm->mutex);
 
        /*
         * For AP/GO interface, the tear down of the resources allocated to the
-        * interface should be handled as part of the bss_info_changed flow.
+        * interface is be handled as part of the stop_ap flow.
         */
        if (vif->type == NL80211_IFTYPE_AP) {
                iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
+       iwl_mvm_prepare_mac_removal(mvm, vif);
+
        mutex_lock(&mvm->mutex);
 
        mvmvif->ap_active = false;
index 4e339cc..bdae700 100644 (file)
@@ -80,7 +80,8 @@
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          2
-#define IWL_RSSI_OFFSET 44
+/* RSSI offset for WkP */
+#define IWL_RSSI_OFFSET 50
 
 enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BK = 0,
@@ -327,6 +328,10 @@ struct iwl_mvm {
        struct led_classdev led;
 
        struct ieee80211_vif *p2p_device_vif;
+
+#ifdef CONFIG_PM_SLEEP
+       int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+#endif
 };
 
 /* Extract MVM priv from op_mode and _hw */
index aa59adf..d0f9c1e 100644 (file)
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
        ieee80211_free_txskb(mvm->hw, skb);
 }
 
-static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-       iwl_mvm_dump_nic_error_log(mvm);
-
        iwl_abort_notification_waits(&mvm->notif_wait);
 
        /*
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
        }
 }
 
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_dump_nic_error_log(mvm);
+
+       iwl_mvm_nic_restart(mvm);
+}
+
 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
 {
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
        WARN_ON(1);
+       iwl_mvm_nic_restart(mvm);
 }
 
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
index 3f40ab0..b0b190d 100644 (file)
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
                             struct iwl_rx_phy_info *phy_info)
 {
-       u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
+       int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
+       int rssi_all_band_a, rssi_all_band_b;
+       u32 agc_a, agc_b, max_agc;
        u32 val;
 
-       /* Find max rssi among 3 possible receivers.
+       /* Find max rssi among 2 possible receivers.
         * These values are measured by the Digital Signal Processor (DSP).
         * They should stay fairly constant even as the signal strength varies,
         * if the radio's Automatic Gain Control (AGC) is working right.
         * AGC value (see below) will provide the "interesting" info.
         */
+       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
+       agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
+       agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
+       max_agc = max_t(u32, agc_a, agc_b);
+
        val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
        rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
        rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
-       rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
-       agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
+       rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
+                               IWL_OFDM_RSSI_ALLBAND_A_POS;
+       rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
+                               IWL_OFDM_RSSI_ALLBAND_B_POS;
 
-       max_rssi = max_t(u32, rssi_a, rssi_b);
-       max_rssi = max_t(u32, max_rssi, rssi_c);
+       /*
+        * dBm = rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal.
+        */
+       rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
+       rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
+       max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
 
-       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-                       rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
+       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
+                       rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
 
-       /* dBm = max_rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal. */
-       return max_rssi - agc_db - IWL_RSSI_OFFSET;
+       return max_rssi_dbm;
 }
 
 /*
index 861a7f9..274f44e 100644 (file)
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        u16 txq_id;
        int err;
 
+
+       /*
+        * If mac80211 is cleaning its state, then say that we finished since
+        * our state has been cleared anyway.
+        */
+       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               return 0;
+       }
+
        spin_lock_bh(&mvmsta->lock);
 
        txq_id = tid_data->txq_id;
index 6b67ce3..6645efe 100644 (file)
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 
                /* Single frame failure in an AMPDU queue => send BAR */
                if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
-                   !(info->flags & IEEE80211_TX_STAT_ACK)) {
-                       /* there must be only one skb in the skb_list */
-                       WARN_ON_ONCE(skb_freed > 1 ||
-                                    !skb_queue_empty(&skbs));
+                   !(info->flags & IEEE80211_TX_STAT_ACK))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-               }
 
                /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
                if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
index aa2a39a..148843e 100644 (file)
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
 struct iwl_cmd_meta {
        /* only for SYNC commands, iff the reply skb is wanted */
        struct iwl_host_cmd *source;
-
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       DEFINE_DMA_UNMAP_LEN(len);
-
        u32 flags;
 };
 
@@ -182,19 +178,39 @@ struct iwl_queue {
 #define TFD_TX_CMD_SLOTS 256
 #define TFD_CMD_SLOTS 32
 
+/*
+ * The FH will write back to the first TB only, so we need
+ * to copy some data into the buffer regardless of whether
+ * it should be mapped or not. This indicates how big the
+ * first TB must be to include the scratch buffer. Since
+ * the scratch is 4 bytes at offset 12, it's 16 now. If we
+ * make it bigger then allocations will be bigger and copy
+ * slower, so that's probably not useful.
+ */
+#define IWL_HCMD_SCRATCHBUF_SIZE       16
+
 struct iwl_pcie_txq_entry {
        struct iwl_device_cmd *cmd;
-       struct iwl_device_cmd *copy_cmd;
        struct sk_buff *skb;
        /* buffer to free after command completes */
        const void *free_buf;
        struct iwl_cmd_meta meta;
 };
 
+struct iwl_pcie_txq_scratch_buf {
+       struct iwl_cmd_header hdr;
+       u8 buf[8];
+       __le32 scratch;
+};
+
 /**
  * struct iwl_txq - Tx Queue for DMA
  * @q: generic Rx/Tx queue descriptor
  * @tfds: transmit frame descriptors (DMA memory)
+ * @scratchbufs: start of command headers, including scratch buffers, for
+ *     the writeback -- this is DMA memory and an array holding one buffer
+ *     for each command on the queue
+ * @scratchbufs_dma: DMA address for the scratchbufs start
  * @entries: transmit entries (driver state)
  * @lock: queue lock
  * @stuck_timer: timer that fires if queue gets stuck
@@ -208,6 +224,8 @@ struct iwl_pcie_txq_entry {
 struct iwl_txq {
        struct iwl_queue q;
        struct iwl_tfd *tfds;
+       struct iwl_pcie_txq_scratch_buf *scratchbufs;
+       dma_addr_t scratchbufs_dma;
        struct iwl_pcie_txq_entry *entries;
        spinlock_t lock;
        struct timer_list stuck_timer;
@@ -216,6 +234,13 @@ struct iwl_txq {
        u8 active;
 };
 
+static inline dma_addr_t
+iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
+{
+       return txq->scratchbufs_dma +
+              sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
+}
+
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
index b0ae06d..567e67a 100644 (file)
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim) {
-                       struct iwl_pcie_txq_entry *ent;
-                       ent = &txq->entries[cmd_index];
-                       cmd = ent->copy_cmd;
-                       WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
-               } else {
+               if (reclaim)
+                       cmd = txq->entries[cmd_index].cmd;
+               else
                        cmd = NULL;
-               }
 
                err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
                if (reclaim) {
-                       /* The original command isn't needed any more */
-                       kfree(txq->entries[cmd_index].copy_cmd);
-                       txq->entries[cmd_index].copy_cmd = NULL;
-                       /* nor is the duplicated part of the command */
                        kfree(txq->entries[cmd_index].free_buf);
                        txq->entries[cmd_index].free_buf = NULL;
                }
index 8e9e321..8595c16 100644 (file)
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
        }
 
        for (i = q->read_ptr; i != q->write_ptr;
-            i = iwl_queue_inc_wrap(i, q->n_bd)) {
-               struct iwl_tx_cmd *tx_cmd =
-                       (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+            i = iwl_queue_inc_wrap(i, q->n_bd))
                IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
-                       get_unaligned_le32(&tx_cmd->scratch));
-       }
+                       le32_to_cpu(txq->scratchbufs[i].scratch));
 
        iwl_op_mode_nic_error(trans->op_mode);
 }
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
 }
 
 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
-                              struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
-                              enum dma_data_direction dma_dir)
+                              struct iwl_cmd_meta *meta,
+                              struct iwl_tfd *tfd)
 {
        int i;
        int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
                return;
        }
 
-       /* Unmap tx_cmd */
-       if (num_tbs)
-               dma_unmap_single(trans->dev,
-                               dma_unmap_addr(meta, mapping),
-                               dma_unmap_len(meta, len),
-                               DMA_BIDIRECTIONAL);
+       /* first TB is never freed - it's the scratchbuf data */
 
-       /* Unmap chunks, if any. */
        for (i = 1; i < num_tbs; i++)
                dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-                                iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
+                                iwl_pcie_tfd_tb_get_len(tfd, i),
+                                DMA_TO_DEVICE);
 
        tfd->num_tbs = 0;
 }
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
  * Does NOT advance any TFD circular buffer read/write indexes
  * Does NOT free the TFD itself (which is within circular buffer)
  */
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
-                                 enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
 {
        struct iwl_tfd *tfd_tmp = txq->tfds;
 
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
        lockdep_assert_held(&txq->lock);
 
        /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
-       iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
-                          dma_dir);
+       iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
 
        /* free SKB */
        if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+       size_t scratchbuf_sz;
        int i;
 
        if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
                IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
                goto error;
        }
+
+       BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
+       BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
+                       sizeof(struct iwl_cmd_header) +
+                       offsetof(struct iwl_tx_cmd, scratch));
+
+       scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
+
+       txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
+                                             &txq->scratchbufs_dma,
+                                             GFP_KERNEL);
+       if (!txq->scratchbufs)
+               goto err_free_tfds;
+
        txq->q.id = txq_id;
 
        return 0;
+err_free_tfds:
+       dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
 error:
        if (txq->entries && txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = &trans_pcie->txq[txq_id];
        struct iwl_queue *q = &txq->q;
-       enum dma_data_direction dma_dir;
 
        if (!q->n_bd)
                return;
 
-       /* In the command queue, all the TBs are mapped as BIDI
-        * so unmap them as such.
-        */
-       if (txq_id == trans_pcie->cmd_queue)
-               dma_dir = DMA_BIDIRECTIONAL;
-       else
-               dma_dir = DMA_TO_DEVICE;
-
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
-               iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
+               iwl_pcie_txq_free_tfd(trans, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
        spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
        if (txq_id == trans_pcie->cmd_queue)
                for (i = 0; i < txq->q.n_window; i++) {
                        kfree(txq->entries[i].cmd);
-                       kfree(txq->entries[i].copy_cmd);
                        kfree(txq->entries[i].free_buf);
                }
 
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
                dma_free_coherent(dev, sizeof(struct iwl_tfd) *
                                  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
                txq->q.dma_addr = 0;
+
+               dma_free_coherent(dev,
+                                 sizeof(*txq->scratchbufs) * txq->q.n_window,
+                                 txq->scratchbufs, txq->scratchbufs_dma);
        }
 
        kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
 
-               iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+               iwl_pcie_txq_free_tfd(trans, txq);
        }
 
        iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,20 +1153,37 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        void *dup_buf = NULL;
        dma_addr_t phys_addr;
        int idx;
-       u16 copy_size, cmd_size;
+       u16 copy_size, cmd_size, scratch_size;
        bool had_nocopy = false;
        int i;
        u32 cmd_pos;
+       const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+       u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
        copy_size = sizeof(out_cmd->hdr);
        cmd_size = sizeof(out_cmd->hdr);
 
        /* need one for the header if the first is NOCOPY */
-       BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+       BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
+
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               cmddata[i] = cmd->data[i];
+               cmdlen[i] = cmd->len[i];
 
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
                        continue;
+
+               /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+                       int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+                       if (copy > cmdlen[i])
+                               copy = cmdlen[i];
+                       cmdlen[i] -= copy;
+                       cmddata[i] += copy;
+                       copy_size += copy;
+               }
+
                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
                        had_nocopy = true;
                        if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1203,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                                goto free_dup_buf;
                        }
 
-                       dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+                       dup_buf = kmemdup(cmddata[i], cmdlen[i],
                                          GFP_ATOMIC);
                        if (!dup_buf)
                                return -ENOMEM;
@@ -1195,7 +1213,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                                idx = -EINVAL;
                                goto free_dup_buf;
                        }
-                       copy_size += cmd->len[i];
+                       copy_size += cmdlen[i];
                }
                cmd_size += cmd->len[i];
        }
@@ -1242,30 +1260,30 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 
        /* and copy the data that needs to be copied */
        cmd_pos = offsetof(struct iwl_device_cmd, payload);
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
-               if (!cmd->len[i])
+       copy_size = sizeof(out_cmd->hdr);
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               int copy = 0;
+
+               if (!cmd->len)
                        continue;
-               if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
-                                        IWL_HCMD_DFL_DUP))
-                       break;
-               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
-               cmd_pos += cmd->len[i];
-       }
 
-       WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+               /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+               if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+                       copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
 
-       /*
-        * since out_cmd will be the source address of the FH, it will write
-        * the retry count there. So when the user needs to receivce the HCMD
-        * that corresponds to the response in the response handler, it needs
-        * to set CMD_WANT_HCMD.
-        */
-       if (cmd->flags & CMD_WANT_HCMD) {
-               txq->entries[idx].copy_cmd =
-                       kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
-               if (unlikely(!txq->entries[idx].copy_cmd)) {
-                       idx = -ENOMEM;
-                       goto out;
+                       if (copy > cmd->len[i])
+                               copy = cmd->len[i];
+               }
+
+               /* copy everything if not nocopy/dup */
+               if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+                                          IWL_HCMD_DFL_DUP)))
+                       copy = cmd->len[i];
+
+               if (copy) {
+                       memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+                       cmd_pos += copy;
+                       copy_size += copy;
                }
        }
 
@@ -1275,22 +1293,35 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
                     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
-       phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
-                                  DMA_BIDIRECTIONAL);
-       if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
-               idx = -ENOMEM;
-               goto out;
-       }
-
-       dma_unmap_addr_set(out_meta, mapping, phys_addr);
-       dma_unmap_len_set(out_meta, len, copy_size);
+       /* start the TFD with the scratchbuf */
+       scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
+       memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
+       iwl_pcie_txq_build_tfd(trans, txq,
+                              iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
+                              scratch_size, 1);
+
+       /* map first command fragment, if any remains */
+       if (copy_size > scratch_size) {
+               phys_addr = dma_map_single(trans->dev,
+                                          ((u8 *)&out_cmd->hdr) + scratch_size,
+                                          copy_size - scratch_size,
+                                          DMA_TO_DEVICE);
+               if (dma_mapping_error(trans->dev, phys_addr)) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
+                       idx = -ENOMEM;
+                       goto out;
+               }
 
-       iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
+               iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+                                      copy_size - scratch_size, 0);
+       }
 
-       for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
-               const void *data = cmd->data[i];
+       /* map the remaining (adjusted) nocopy/dup fragments */
+       for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+               const void *data = cmddata[i];
 
-               if (!cmd->len[i])
+               if (!cmdlen[i])
                        continue;
                if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
                                           IWL_HCMD_DFL_DUP)))
@@ -1298,16 +1329,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
                        data = dup_buf;
                phys_addr = dma_map_single(trans->dev, (void *)data,
-                                          cmd->len[i], DMA_BIDIRECTIONAL);
+                                          cmdlen[i], DMA_TO_DEVICE);
                if (dma_mapping_error(trans->dev, phys_addr)) {
                        iwl_pcie_tfd_unmap(trans, out_meta,
-                                          &txq->tfds[q->write_ptr],
-                                          DMA_BIDIRECTIONAL);
+                                          &txq->tfds[q->write_ptr]);
                        idx = -ENOMEM;
                        goto out;
                }
 
-               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
+               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
        }
 
        out_meta->flags = cmd->flags;
@@ -1317,8 +1347,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 
        txq->need_update = 1;
 
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
-                              &out_cmd->hdr, copy_size);
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
@@ -1377,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        cmd = txq->entries[cmd_index].cmd;
        meta = &txq->entries[cmd_index].meta;
 
-       iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+       iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
 
        /* Input error checking is done when commands are added to queue. */
        if (meta->flags & CMD_WANT_SKB) {
@@ -1556,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq;
        struct iwl_queue *q;
-       dma_addr_t phys_addr = 0;
-       dma_addr_t txcmd_phys;
-       dma_addr_t scratch_phys;
-       u16 len, firstlen, secondlen;
+       dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+       void *tb1_addr;
+       u16 len, tb1_len, tb2_len;
        u8 wait_write_ptr = 0;
        __le16 fc = hdr->frame_control;
        u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1597,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
                            INDEX_TO_SEQ(q->write_ptr)));
 
+       tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
+       scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+                      offsetof(struct iwl_tx_cmd, scratch);
+
+       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+       tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_meta = &txq->entries[q->write_ptr].meta;
 
        /*
-        * Use the first empty entry in this queue's command buffer array
-        * to contain the Tx command and MAC header concatenated together
-        * (payload data will be in another buffer).
-        * Size of this varies, due to varying MAC header length.
-        * If end is not dword aligned, we'll have 2 extra bytes at the end
-        * of the MAC header (device reads on dword boundaries).
-        * We'll tell device about this padding later.
+        * The second TB (tb1) points to the remainder of the TX command
+        * and the 802.11 header - dword aligned size
+        * (This calculation modifies the TX command, so do it before the
+        * setup of the first TB)
         */
-       len = sizeof(struct iwl_tx_cmd) +
-               sizeof(struct iwl_cmd_header) + hdr_len;
-       firstlen = (len + 3) & ~3;
+       len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+             hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
+       tb1_len = (len + 3) & ~3;
 
        /* Tell NIC about any 2-byte padding after MAC header */
-       if (firstlen != len)
+       if (tb1_len != len)
                tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
 
-       /* Physical address of this Tx command's header (not MAC header!),
-        * within command buffer array. */
-       txcmd_phys = dma_map_single(trans->dev,
-                                   &dev_cmd->hdr, firstlen,
-                                   DMA_BIDIRECTIONAL);
-       if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
-               goto out_err;
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, firstlen);
+       /* The first TB points to the scratchbuf data - min_copy bytes */
+       memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
+              IWL_HCMD_SCRATCHBUF_SIZE);
+       iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+                              IWL_HCMD_SCRATCHBUF_SIZE, 1);
 
-       if (!ieee80211_has_morefrags(fc)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
+       /* there must be data left over for TB1 or this code must be changed */
+       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
+
+       /* map the data for TB1 */
+       tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
+       tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+               goto out_err;
+       iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
 
-       /* Set up TFD's 2nd entry to point directly to remainder of skb,
-        * if any (802.11 null frames have no payload). */
-       secondlen = skb->len - hdr_len;
-       if (secondlen > 0) {
-               phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
-                                          secondlen, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
-                       dma_unmap_single(trans->dev,
-                                        dma_unmap_addr(out_meta, mapping),
-                                        dma_unmap_len(out_meta, len),
-                                        DMA_BIDIRECTIONAL);
+       /*
+        * Set up TFD's third entry to point directly to remainder
+        * of skb, if any (802.11 null frames have no payload).
+        */
+       tb2_len = skb->len - hdr_len;
+       if (tb2_len > 0) {
+               dma_addr_t tb2_phys = dma_map_single(trans->dev,
+                                                    skb->data + hdr_len,
+                                                    tb2_len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
                        goto out_err;
                }
+               iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
        }
 
-       /* Attach buffers to TFD */
-       iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
-       if (secondlen > 0)
-               iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
-
-       scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
-                               offsetof(struct iwl_tx_cmd, scratch);
-
-       /* take back ownership of DMA buffer to enable update */
-       dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
-                               DMA_BIDIRECTIONAL);
-       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
-       tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
-       dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
-                                  DMA_BIDIRECTIONAL);
-
        trace_iwlwifi_dev_tx(trans->dev, skb,
                             &txq->tfds[txq->q.write_ptr],
                             sizeof(struct iwl_tfd),
-                            &dev_cmd->hdr, firstlen,
-                            skb->data + hdr_len, secondlen);
+                            &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+                            skb->data + hdr_len, tb2_len);
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
-                                 skb->data + hdr_len, secondlen);
+                                 skb->data + hdr_len, tb2_len);
+
+       if (!ieee80211_has_morefrags(fc)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
 
        /* start timer if queue currently empty */
        if (txq->need_update && q->read_ptr == q->write_ptr &&
index 739309e..4557833 100644 (file)
@@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
 
        sdio_release_host(func);
 
+       /* Set fw_ready before queuing any commands so that
+        * lbs_thread won't block from sending them to firmware.
+        */
+       priv->fw_ready = 1;
+
        /*
         * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
         */
@@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
                        netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
        }
 
-       priv->fw_ready = 1;
        wake_up(&card->pwron_waitq);
 
        if (!card->started) {
index 35c7972..5c395e2 100644 (file)
@@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
                i++;
                usleep_range(10, 20);
                /* 50ms max wait */
-               if (i == 50000)
+               if (i == 5000)
                        break;
        }
 
index e7cf37f..3109c0d 100644 (file)
@@ -2778,7 +2778,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
                nr = nr * 10 + c;
                p++;
        } while (--len);
-       *(int *)PDE(file->f_path.dentry->d_inode)->data = nr;
+       *(int *)PDE(file_inode(file))->data = nr;
        return count;
 }
 
index 1031db6..189744d 100644 (file)
@@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
         */
        if_limit = &rt2x00dev->if_limits_ap;
        if_limit->max = rt2x00dev->ops->max_ap_intf;
-       if_limit->types = BIT(NL80211_IFTYPE_AP) |
-                       BIT(NL80211_IFTYPE_MESH_POINT);
+       if_limit->types = BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_MAC80211_MESH
+       if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
+#endif
 
        /*
         * Build up AP interface combinations structure.
@@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
                rt2x00dev->hw->wiphy->interface_modes |=
                    BIT(NL80211_IFTYPE_ADHOC) |
                    BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
                    BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
                    BIT(NL80211_IFTYPE_WDS);
 
        rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
index 48273dd..4941f20 100644 (file)
@@ -309,7 +309,6 @@ static void zd1201_usbrx(struct urb *urb)
        if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
                int datalen = urb->actual_length-1;
                unsigned short len, fc, seq;
-               struct hlist_node *node;
 
                len = ntohs(*(__be16 *)&data[datalen-2]);
                if (len>datalen)
@@ -362,7 +361,7 @@ static void zd1201_usbrx(struct urb *urb)
                                hlist_add_head(&frag->fnode, &zd->fraglist);
                                goto resubmit;
                        }
-                       hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
+                       hlist_for_each_entry(frag, &zd->fraglist, fnode)
                                if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
                                        break;
                        if (!frag)
@@ -1831,14 +1830,14 @@ err_zd:
 static void zd1201_disconnect(struct usb_interface *interface)
 {
        struct zd1201 *zd = usb_get_intfdata(interface);
-       struct hlist_node *node, *node2;
+       struct hlist_node *node2;
        struct zd1201_frag *frag;
 
        if (!zd)
                return;
        usb_set_intfdata(interface, NULL);
 
-       hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) {
+       hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
                hlist_del_init(&frag->fnode);
                kfree_skb(frag->skb);
                kfree(frag);
index 849357c..445ffda 100644 (file)
@@ -139,17 +139,22 @@ static int __oprofilefs_create_file(struct super_block *sb,
        struct dentry *dentry;
        struct inode *inode;
 
+       mutex_lock(&root->d_inode->i_mutex);
        dentry = d_alloc_name(root, name);
-       if (!dentry)
+       if (!dentry) {
+               mutex_unlock(&root->d_inode->i_mutex);
                return -ENOMEM;
+       }
        inode = oprofilefs_get_inode(sb, S_IFREG | perm);
        if (!inode) {
                dput(dentry);
+               mutex_unlock(&root->d_inode->i_mutex);
                return -ENOMEM;
        }
        inode->i_fop = fops;
+       inode->i_private = priv;
        d_add(dentry, inode);
-       dentry->d_inode->i_private = priv;
+       mutex_unlock(&root->d_inode->i_mutex);
        return 0;
 }
 
@@ -212,17 +217,22 @@ struct dentry *oprofilefs_mkdir(struct super_block *sb,
        struct dentry *dentry;
        struct inode *inode;
 
+       mutex_lock(&root->d_inode->i_mutex);
        dentry = d_alloc_name(root, name);
-       if (!dentry)
+       if (!dentry) {
+               mutex_unlock(&root->d_inode->i_mutex);
                return NULL;
+       }
        inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
        if (!inode) {
                dput(dentry);
+               mutex_unlock(&root->d_inode->i_mutex);
                return NULL;
        }
        inode->i_op = &simple_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
        d_add(dentry, inode);
+       mutex_unlock(&root->d_inode->i_mutex);
        return dentry;
 }
 
index f2f501e..d4d800c 100644 (file)
@@ -179,7 +179,7 @@ static int led_proc_open(struct inode *inode, struct file *file)
 static ssize_t led_proc_write(struct file *file, const char *buf,
        size_t count, loff_t *pos)
 {
-       void *data = PDE(file->f_path.dentry->d_inode)->data;
+       void *data = PDE(file_inode(file))->data;
        char *cur, lbuf[32];
        int d;
 
index 924e466..b099e00 100644 (file)
@@ -842,9 +842,8 @@ static struct pci_cap_saved_state *pci_find_saved_cap(
        struct pci_dev *pci_dev, char cap)
 {
        struct pci_cap_saved_state *tmp;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+       hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
                if (tmp->cap.cap_nr == cap)
                        return tmp;
        }
@@ -1041,7 +1040,6 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
        struct pci_saved_state *state;
        struct pci_cap_saved_state *tmp;
        struct pci_cap_saved_data *cap;
-       struct hlist_node *pos;
        size_t size;
 
        if (!dev->state_saved)
@@ -1049,7 +1047,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
 
        size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
 
-       hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
+       hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
                size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
 
        state = kzalloc(size, GFP_KERNEL);
@@ -1060,7 +1058,7 @@ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
               sizeof(state->config_space));
 
        cap = state->cap;
-       hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
+       hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
                size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
                memcpy(cap, &tmp->cap, len);
                cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
@@ -2038,9 +2036,9 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
 void pci_free_cap_save_buffers(struct pci_dev *dev)
 {
        struct pci_cap_saved_state *tmp;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
 
-       hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
+       hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
                kfree(tmp);
 }
 
index 9b8505c..0b00947 100644 (file)
@@ -21,7 +21,7 @@ static loff_t
 proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
 {
        loff_t new = -1;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        mutex_lock(&inode->i_mutex);
        switch (whence) {
@@ -46,7 +46,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
 static ssize_t
 proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
-       const struct inode *ino = file->f_path.dentry->d_inode;
+       const struct inode *ino = file_inode(file);
        const struct proc_dir_entry *dp = PDE(ino);
        struct pci_dev *dev = dp->data;
        unsigned int pos = *ppos;
@@ -132,7 +132,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp
 static ssize_t
 proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos)
 {
-       struct inode *ino = file->f_path.dentry->d_inode;
+       struct inode *ino = file_inode(file);
        const struct proc_dir_entry *dp = PDE(ino);
        struct pci_dev *dev = dp->data;
        int pos = *ppos;
@@ -212,7 +212,7 @@ struct pci_filp_private {
 static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
                               unsigned long arg)
 {
-       const struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
+       const struct proc_dir_entry *dp = PDE(file_inode(file));
        struct pci_dev *dev = dp->data;
 #ifdef HAVE_PCI_MMAP
        struct pci_filp_private *fpriv = file->private_data;
@@ -253,7 +253,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
 #ifdef HAVE_PCI_MMAP
 static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        const struct proc_dir_entry *dp = PDE(inode);
        struct pci_dev *dev = dp->data;
        struct pci_filp_private *fpriv = file->private_data;
index 7ab0b2f..3338437 100644 (file)
@@ -79,6 +79,17 @@ config ASUS_LAPTOP
 
          If you have an ACPI-compatible ASUS laptop, say Y or M here.
 
+config CHROMEOS_LAPTOP
+       tristate "Chrome OS Laptop"
+       depends on I2C
+       depends on DMI
+       ---help---
+         This driver instantiates i2c and smbus devices such as
+         light sensors and touchpads.
+
+         If you have a supported Chromebook, choose Y or M here.
+         The module will be called chromeos_laptop.
+
 config DELL_LAPTOP
        tristate "Dell Laptop Extras"
        depends on X86
@@ -288,9 +299,11 @@ config IDEAPAD_LAPTOP
        depends on ACPI
        depends on RFKILL && INPUT
        depends on SERIO_I8042
+       depends on BACKLIGHT_CLASS_DEVICE
        select INPUT_SPARSEKMAP
        help
-         This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
+         This is a driver for Lenovo IdeaPad netbooks contains drivers for
+         rfkill switch, hotkey, fan control and backlight control.
 
 config THINKPAD_ACPI
        tristate "ThinkPad ACPI Laptop Extras"
index bf7e4f9..ace2b38 100644 (file)
@@ -50,3 +50,4 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON)  += intel_mid_powerbtn.o
 obj-$(CONFIG_INTEL_OAKTRAIL)   += intel_oaktrail.o
 obj-$(CONFIG_SAMSUNG_Q10)      += samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)       += apple-gmux.o
+obj-$(CONFIG_CHROMEOS_LAPTOP)  += chromeos_laptop.o
index afed701..c9076bd 100644 (file)
@@ -511,6 +511,24 @@ static struct dmi_system_id acer_quirks[] = {
                },
                .driver_data = &quirk_fujitsu_amilo_li_1718,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "Lenovo Ideapad S205-10382JG",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "10382JG"),
+               },
+               .driver_data = &quirk_lenovo_ideapad_s205,
+       },
+       {
+               .callback = dmi_matched,
+               .ident = "Lenovo Ideapad S205-1038DPG",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "1038DPG"),
+               },
+               .driver_data = &quirk_lenovo_ideapad_s205,
+       },
        {}
 };
 
@@ -1204,6 +1222,9 @@ static acpi_status WMID_set_capabilities(void)
                        devices = *((u32 *) obj->buffer.pointer);
                } else if (obj->type == ACPI_TYPE_INTEGER) {
                        devices = (u32) obj->integer.value;
+               } else {
+                       kfree(out.pointer);
+                       return AE_ERROR;
                }
        } else {
                kfree(out.pointer);
index d9f9a0d..0eea09c 100644 (file)
@@ -128,10 +128,12 @@ MODULE_PARM_DESC(als_status, "Set the ALS status on boot "
 /*
  * Some events we use, same for all Asus
  */
-#define ATKD_BR_UP     0x10    /* (event & ~ATKD_BR_UP) = brightness level */
-#define ATKD_BR_DOWN   0x20    /* (event & ~ATKD_BR_DOWN) = britghness level */
-#define ATKD_BR_MIN    ATKD_BR_UP
-#define ATKD_BR_MAX    (ATKD_BR_DOWN | 0xF)    /* 0x2f */
+#define ATKD_BRNUP_MIN         0x10
+#define ATKD_BRNUP_MAX         0x1f
+#define ATKD_BRNDOWN_MIN       0x20
+#define ATKD_BRNDOWN_MAX       0x2f
+#define ATKD_BRNDOWN           0x20
+#define ATKD_BRNUP             0x2f
 #define ATKD_LCD_ON    0x33
 #define ATKD_LCD_OFF   0x34
 
@@ -301,40 +303,65 @@ static const struct key_entry asus_keymap[] = {
        {KE_KEY, 0x17, { KEY_ZOOM } },
        {KE_KEY, 0x1f, { KEY_BATTERY } },
        /* End of Lenovo SL Specific keycodes */
+       {KE_KEY, ATKD_BRNDOWN, { KEY_BRIGHTNESSDOWN } },
+       {KE_KEY, ATKD_BRNUP, { KEY_BRIGHTNESSUP } },
        {KE_KEY, 0x30, { KEY_VOLUMEUP } },
        {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        {KE_KEY, 0x32, { KEY_MUTE } },
-       {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
-       {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+       {KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
+       {KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
        {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
        {KE_KEY, 0x41, { KEY_NEXTSONG } },
-       {KE_KEY, 0x43, { KEY_STOPCD } },
+       {KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
        {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
-       {KE_KEY, 0x4c, { KEY_MEDIA } },
+       {KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
        {KE_KEY, 0x50, { KEY_EMAIL } },
        {KE_KEY, 0x51, { KEY_WWW } },
        {KE_KEY, 0x55, { KEY_CALC } },
+       {KE_IGNORE, 0x57, },  /* Battery mode */
+       {KE_IGNORE, 0x58, },  /* AC mode */
        {KE_KEY, 0x5C, { KEY_SCREENLOCK } },  /* Screenlock */
-       {KE_KEY, 0x5D, { KEY_WLAN } },
-       {KE_KEY, 0x5E, { KEY_WLAN } },
-       {KE_KEY, 0x5F, { KEY_WLAN } },
-       {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
-       {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
-       {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
-       {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
-       {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+       {KE_KEY, 0x5D, { KEY_WLAN } }, /* WLAN Toggle */
+       {KE_KEY, 0x5E, { KEY_WLAN } }, /* WLAN Enable */
+       {KE_KEY, 0x5F, { KEY_WLAN } }, /* WLAN Disable */
+       {KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+       {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+       {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+       {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+       {KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+       {KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+       {KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+       {KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+       {KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
        {KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
        {KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
-       {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
-       {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+       {KE_IGNORE, 0x6E, },  /* Low Battery notification */
+       {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+       {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
        {KE_KEY, 0x82, { KEY_CAMERA } },
-       {KE_KEY, 0x88, { KEY_WLAN  } },
-       {KE_KEY, 0x8A, { KEY_PROG1 } },
+       {KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
+       {KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+       {KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+       {KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+       {KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+       {KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+       {KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+       {KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+       {KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+       {KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
        {KE_KEY, 0x95, { KEY_MEDIA } },
        {KE_KEY, 0x99, { KEY_PHONE } },
-       {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
-       {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
-       {KE_KEY, 0xb5, { KEY_CALC } },
+       {KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+       {KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+       {KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+       {KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+       {KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+       {KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+       {KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+       {KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+       {KE_KEY, 0xB5, { KEY_CALC } },
+       {KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+       {KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
        {KE_END, 0},
 };
 
@@ -1521,15 +1548,19 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
                                        dev_name(&asus->device->dev), event,
                                        count);
 
-       /* Brightness events are special */
-       if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
+       if (event >= ATKD_BRNUP_MIN && event <= ATKD_BRNUP_MAX)
+               event = ATKD_BRNUP;
+       else if (event >= ATKD_BRNDOWN_MIN &&
+                event <= ATKD_BRNDOWN_MAX)
+               event = ATKD_BRNDOWN;
 
-               /* Ignore them completely if the acpi video driver is used */
+       /* Brightness events are special */
+       if (event == ATKD_BRNDOWN || event == ATKD_BRNUP) {
                if (asus->backlight_device != NULL) {
                        /* Update the backlight device. */
                        asus_backlight_notify(asus);
+                       return ;
                }
-               return ;
        }
 
        /* Accelerometer "coarse orientation change" event */
index be79040..210b5b8 100644 (file)
@@ -59,6 +59,17 @@ static struct quirk_entry quirk_asus_unknown = {
        .wapf = 0,
 };
 
+/*
+ * For those machines that need software to control bt/wifi status
+ * and can't adjust brightness through ACPI interface
+ * and have duplicate events(ACPI and WMI) for display toggle
+ */
+static struct quirk_entry quirk_asus_x55u = {
+       .wapf = 4,
+       .wmi_backlight_power = true,
+       .no_display_toggle = true,
+};
+
 static struct quirk_entry quirk_asus_x401u = {
        .wapf = 4,
 };
@@ -77,6 +88,15 @@ static struct dmi_system_id asus_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
                },
+               .driver_data = &quirk_asus_x55u,
+       },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X401A",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X401A"),
+               },
                .driver_data = &quirk_asus_x401u,
        },
        {
@@ -95,6 +115,15 @@ static struct dmi_system_id asus_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
                },
+               .driver_data = &quirk_asus_x55u,
+       },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X501A",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X501A"),
+               },
                .driver_data = &quirk_asus_x401u,
        },
        {
@@ -131,7 +160,7 @@ static struct dmi_system_id asus_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
                },
-               .driver_data = &quirk_asus_x401u,
+               .driver_data = &quirk_asus_x55u,
        },
        {
                .callback = dmi_matched,
@@ -161,6 +190,8 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
 }
 
 static const struct key_entry asus_nb_wmi_keymap[] = {
+       { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+       { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
@@ -168,9 +199,9 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
        { KE_KEY, 0x41, { KEY_NEXTSONG } },
-       { KE_KEY, 0x43, { KEY_STOPCD } },
+       { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
        { KE_KEY, 0x45, { KEY_PLAYPAUSE } },
-       { KE_KEY, 0x4c, { KEY_MEDIA } },
+       { KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
        { KE_KEY, 0x50, { KEY_EMAIL } },
        { KE_KEY, 0x51, { KEY_WWW } },
        { KE_KEY, 0x55, { KEY_CALC } },
@@ -180,25 +211,42 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
        { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
        { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
-       { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
-       { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
-       { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
-       { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+       { KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+       { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+       { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+       { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+       { KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+       { KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+       { KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+       { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
        { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
-       { KE_KEY, 0x7D, { KEY_BLUETOOTH } },
-       { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+       { KE_IGNORE, 0x6E, },  /* Low Battery notification */
+       { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+       { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
        { KE_KEY, 0x82, { KEY_CAMERA } },
-       { KE_KEY, 0x88, { KEY_RFKILL  } },
-       { KE_KEY, 0x8A, { KEY_PROG1 } },
+       { KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
+       { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+       { KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+       { KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+       { KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+       { KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+       { KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+       { KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+       { KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+       { KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
        { KE_KEY, 0x95, { KEY_MEDIA } },
        { KE_KEY, 0x99, { KEY_PHONE } },
        { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
        { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
        { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
        { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
-       { KE_KEY, 0xb5, { KEY_CALC } },
-       { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
-       { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+       { KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+       { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+       { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+       { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+       { KE_KEY, 0xB5, { KEY_CALC } },
+       { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+       { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
        { KE_END, 0},
 };
 
index f80ae4d..c11b242 100644 (file)
@@ -187,6 +187,8 @@ struct asus_wmi {
        struct device *hwmon_device;
        struct platform_device *platform_device;
 
+       struct led_classdev wlan_led;
+       int wlan_led_wk;
        struct led_classdev tpd_led;
        int tpd_led_wk;
        struct led_classdev kbd_led;
@@ -194,6 +196,7 @@ struct asus_wmi {
        struct workqueue_struct *led_workqueue;
        struct work_struct tpd_led_work;
        struct work_struct kbd_led_work;
+       struct work_struct wlan_led_work;
 
        struct asus_rfkill wlan;
        struct asus_rfkill bluetooth;
@@ -456,12 +459,65 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
        return value;
 }
 
+static int wlan_led_unknown_state(struct asus_wmi *asus)
+{
+       u32 result;
+
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+       return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
+}
+
+static int wlan_led_presence(struct asus_wmi *asus)
+{
+       u32 result;
+
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+       return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
+static void wlan_led_update(struct work_struct *work)
+{
+       int ctrl_param;
+       struct asus_wmi *asus;
+
+       asus = container_of(work, struct asus_wmi, wlan_led_work);
+
+       ctrl_param = asus->wlan_led_wk;
+       asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
+}
+
+static void wlan_led_set(struct led_classdev *led_cdev,
+                        enum led_brightness value)
+{
+       struct asus_wmi *asus;
+
+       asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+
+       asus->wlan_led_wk = !!value;
+       queue_work(asus->led_workqueue, &asus->wlan_led_work);
+}
+
+static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
+{
+       struct asus_wmi *asus;
+       u32 result;
+
+       asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+       asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+       return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
 static void asus_wmi_led_exit(struct asus_wmi *asus)
 {
        if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
                led_classdev_unregister(&asus->kbd_led);
        if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
                led_classdev_unregister(&asus->tpd_led);
+       if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
+               led_classdev_unregister(&asus->wlan_led);
        if (asus->led_workqueue)
                destroy_workqueue(asus->led_workqueue);
 }
@@ -498,6 +554,23 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
 
                rv = led_classdev_register(&asus->platform_device->dev,
                                           &asus->kbd_led);
+               if (rv)
+                       goto error;
+       }
+
+       if (wlan_led_presence(asus)) {
+               INIT_WORK(&asus->wlan_led_work, wlan_led_update);
+
+               asus->wlan_led.name = "asus::wlan";
+               asus->wlan_led.brightness_set = wlan_led_set;
+               if (!wlan_led_unknown_state(asus))
+                       asus->wlan_led.brightness_get = wlan_led_get;
+               asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
+               asus->wlan_led.max_brightness = 1;
+               asus->wlan_led.default_trigger = "asus-wlan";
+
+               rv = led_classdev_register(&asus->platform_device->dev,
+                                          &asus->wlan_led);
        }
 
 error:
@@ -813,6 +886,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
        if (!*rfkill)
                return -EINVAL;
 
+       if (dev_id == ASUS_WMI_DEVID_WLAN)
+               rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
+
        rfkill_init_sw_state(*rfkill, !result);
        result = rfkill_register(*rfkill);
        if (result) {
@@ -1265,6 +1341,18 @@ static void asus_wmi_backlight_exit(struct asus_wmi *asus)
        asus->backlight_device = NULL;
 }
 
+static int is_display_toggle(int code)
+{
+       /* display toggle keys */
+       if ((code >= 0x61 && code <= 0x67) ||
+           (code >= 0x8c && code <= 0x93) ||
+           (code >= 0xa0 && code <= 0xa7) ||
+           (code >= 0xd0 && code <= 0xd5))
+               return 1;
+
+       return 0;
+}
+
 static void asus_wmi_notify(u32 value, void *context)
 {
        struct asus_wmi *asus = context;
@@ -1298,16 +1386,24 @@ static void asus_wmi_notify(u32 value, void *context)
        }
 
        if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
-               code = NOTIFY_BRNUP_MIN;
+               code = ASUS_WMI_BRN_UP;
        else if (code >= NOTIFY_BRNDOWN_MIN &&
                 code <= NOTIFY_BRNDOWN_MAX)
-               code = NOTIFY_BRNDOWN_MIN;
+               code = ASUS_WMI_BRN_DOWN;
 
-       if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
-               if (!acpi_video_backlight_support())
+       if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
+               if (!acpi_video_backlight_support()) {
                        asus_wmi_backlight_notify(asus, orig_code);
-       } else if (!sparse_keymap_report_event(asus->inputdev, code,
-                                              key_value, autorelease))
+                       goto exit;
+               }
+       }
+
+       if (is_display_toggle(code) &&
+           asus->driver->quirks->no_display_toggle)
+               goto exit;
+
+       if (!sparse_keymap_report_event(asus->inputdev, code,
+                                       key_value, autorelease))
                pr_info("Unknown key %x pressed\n", code);
 
 exit:
index 4c9bd38..4da4c8b 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/platform_device.h>
 
 #define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_BRN_DOWN      0x20
+#define ASUS_WMI_BRN_UP                0x2f
 
 struct module;
 struct key_entry;
@@ -41,6 +43,13 @@ struct quirk_entry {
        bool store_backlight_power;
        bool wmi_backlight_power;
        int wapf;
+       /*
+        * For machines with AMD graphic chips, it will send out WMI event
+        * and ACPI interrupt at the same time while hitting the hotkey.
+        * To simplify the problem, we just have to ignore the WMI event,
+        * and let the ACPI interrupt to send out the key event.
+        */
+       int no_display_toggle;
 };
 
 struct asus_wmi_driver {
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
new file mode 100644 (file)
index 0000000..93d6680
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ *  chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices.
+ *
+ *  Author : Benson Leung <bleung@chromium.org>
+ *
+ *  Copyright (C) 2012 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#define ATMEL_TP_I2C_ADDR      0x4b
+#define ATMEL_TP_I2C_BL_ADDR   0x25
+#define ATMEL_TS_I2C_ADDR      0x4a
+#define ATMEL_TS_I2C_BL_ADDR   0x26
+#define CYAPA_TP_I2C_ADDR      0x67
+#define ISL_ALS_I2C_ADDR       0x44
+#define TAOS_ALS_I2C_ADDR      0x29
+
+static struct i2c_client *als;
+static struct i2c_client *tp;
+static struct i2c_client *ts;
+
+const char *i2c_adapter_names[] = {
+       "SMBus I801 adapter",
+       "i915 gmbus vga",
+       "i915 gmbus panel",
+};
+
+/* Keep this enum consistent with i2c_adapter_names */
+enum i2c_adapter_type {
+       I2C_ADAPTER_SMBUS = 0,
+       I2C_ADAPTER_VGADDC,
+       I2C_ADAPTER_PANEL,
+};
+
+static struct i2c_board_info __initdata cyapa_device = {
+       I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+       .flags          = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata isl_als_device = {
+       I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2583_als_device = {
+       I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2563_als_device = {
+       I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata atmel_224s_tp_device = {
+       I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
+       .platform_data = NULL,
+       .flags          = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata atmel_1664s_device = {
+       I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
+       .platform_data = NULL,
+       .flags          = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_client __init *__add_probed_i2c_device(
+               const char *name,
+               int bus,
+               struct i2c_board_info *info,
+               const unsigned short *addrs)
+{
+       const struct dmi_device *dmi_dev;
+       const struct dmi_dev_onboard *dev_data;
+       struct i2c_adapter *adapter;
+       struct i2c_client *client;
+
+       if (bus < 0)
+               return NULL;
+       /*
+        * If a name is specified, look for irq platform information stashed
+        * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware.
+        */
+       if (name) {
+               dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL);
+               if (!dmi_dev) {
+                       pr_err("%s failed to dmi find device %s.\n",
+                              __func__,
+                              name);
+                       return NULL;
+               }
+               dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data;
+               if (!dev_data) {
+                       pr_err("%s failed to get data from dmi for %s.\n",
+                              __func__, name);
+                       return NULL;
+               }
+               info->irq = dev_data->instance;
+       }
+
+       adapter = i2c_get_adapter(bus);
+       if (!adapter) {
+               pr_err("%s failed to get i2c adapter %d.\n", __func__, bus);
+               return NULL;
+       }
+
+       /* add the i2c device */
+       client = i2c_new_probed_device(adapter, info, addrs, NULL);
+       if (!client)
+               pr_err("%s failed to register device %d-%02x\n",
+                      __func__, bus, info->addr);
+       else
+               pr_debug("%s added i2c device %d-%02x\n",
+                        __func__, bus, info->addr);
+
+       i2c_put_adapter(adapter);
+       return client;
+}
+
+static int __init __find_i2c_adap(struct device *dev, void *data)
+{
+       const char *name = data;
+       static const char *prefix = "i2c-";
+       struct i2c_adapter *adapter;
+       if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
+               return 0;
+       adapter = to_i2c_adapter(dev);
+       return (strncmp(adapter->name, name, strlen(name)) == 0);
+}
+
+static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+{
+       struct device *dev = NULL;
+       struct i2c_adapter *adapter;
+       const char *name = i2c_adapter_names[type];
+       /* find the adapter by name */
+       dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
+                             __find_i2c_adap);
+       if (!dev) {
+               pr_err("%s: i2c adapter %s not found on system.\n", __func__,
+                      name);
+               return -ENODEV;
+       }
+       adapter = to_i2c_adapter(dev);
+       return adapter->nr;
+}
+
+/*
+ * Takes a list of addresses in addrs as such :
+ * { addr1, ... , addrn, I2C_CLIENT_END };
+ * add_probed_i2c_device will use i2c_new_probed_device
+ * and probe for devices at all of the addresses listed.
+ * Returns NULL if no devices found.
+ * See Documentation/i2c/instantiating-devices for more information.
+ */
+static __init struct i2c_client *add_probed_i2c_device(
+               const char *name,
+               enum i2c_adapter_type type,
+               struct i2c_board_info *info,
+               const unsigned short *addrs)
+{
+       return __add_probed_i2c_device(name,
+                                      find_i2c_adapter_num(type),
+                                      info,
+                                      addrs);
+}
+
+/*
+ * Probes for a device at a single address, the one provided by
+ * info->addr.
+ * Returns NULL if no device found.
+ */
+static __init struct i2c_client *add_i2c_device(const char *name,
+                                               enum i2c_adapter_type type,
+                                               struct i2c_board_info *info)
+{
+       const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
+       return __add_probed_i2c_device(name,
+                                      find_i2c_adapter_num(type),
+                                      info,
+                                      addr_list);
+}
+
+
+static struct i2c_client __init *add_smbus_device(const char *name,
+                                                 struct i2c_board_info *info)
+{
+       return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
+}
+
+static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
+{
+       /* add cyapa touchpad on smbus */
+       tp = add_smbus_device("trackpad", &cyapa_device);
+       return 0;
+}
+
+static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+{
+       const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
+                                            ATMEL_TP_I2C_ADDR,
+                                            I2C_CLIENT_END };
+
+       /* add atmel mxt touchpad on VGA DDC GMBus */
+       tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+                                  &atmel_224s_tp_device, addr_list);
+       return 0;
+}
+
+static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+{
+       const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
+                                            ATMEL_TS_I2C_ADDR,
+                                            I2C_CLIENT_END };
+
+       /* add atmel mxt touch device on PANEL GMBus */
+       ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+                                  &atmel_1664s_device, addr_list);
+       return 0;
+}
+
+
+static int __init setup_isl29018_als(const struct dmi_system_id *id)
+{
+       /* add isl29018 light sensor */
+       als = add_smbus_device("lightsensor", &isl_als_device);
+       return 0;
+}
+
+static int __init setup_isl29023_als(const struct dmi_system_id *id)
+{
+       /* add isl29023 light sensor on Panel GMBus */
+       als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
+                            &isl_als_device);
+       return 0;
+}
+
+static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+{
+       /* add tsl2583 light sensor on smbus */
+       als = add_smbus_device(NULL, &tsl2583_als_device);
+       return 0;
+}
+
+static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+{
+       /* add tsl2563 light sensor on smbus */
+       als = add_smbus_device(NULL, &tsl2563_als_device);
+       return 0;
+}
+
+static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
+       {
+               .ident = "Samsung Series 5 550 - Touchpad",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+               },
+               .callback = setup_cyapa_smbus_tp,
+       },
+       {
+               .ident = "Chromebook Pixel - Touchscreen",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+               },
+               .callback = setup_atmel_1664s_ts,
+       },
+       {
+               .ident = "Chromebook Pixel - Touchpad",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+               },
+               .callback = setup_atmel_224s_tp,
+       },
+       {
+               .ident = "Samsung Series 5 550 - Light Sensor",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+               },
+               .callback = setup_isl29018_als,
+       },
+       {
+               .ident = "Chromebook Pixel - Light Sensor",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+               },
+               .callback = setup_isl29023_als,
+       },
+       {
+               .ident = "Acer C7 Chromebook - Touchpad",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
+               },
+               .callback = setup_cyapa_smbus_tp,
+       },
+       {
+               .ident = "HP Pavilion 14 Chromebook - Touchpad",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+               },
+               .callback = setup_cyapa_smbus_tp,
+       },
+       {
+               .ident = "Samsung Series 5 - Light Sensor",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+               },
+               .callback = setup_tsl2583_als,
+       },
+       {
+               .ident = "Cr-48 - Light Sensor",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+               },
+               .callback = setup_tsl2563_als,
+       },
+       {
+               .ident = "Acer AC700 - Light Sensor",
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+               },
+               .callback = setup_tsl2563_als,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+
+static int __init chromeos_laptop_init(void)
+{
+       if (!dmi_check_system(chromeos_laptop_dmi_table)) {
+               pr_debug("%s unsupported system.\n", __func__);
+               return -ENODEV;
+       }
+       return 0;
+}
+
+static void __exit chromeos_laptop_exit(void)
+{
+       if (als)
+               i2c_unregister_device(als);
+       if (tp)
+               i2c_unregister_device(tp);
+       if (ts)
+               i2c_unregister_device(ts);
+}
+
+module_init(chromeos_laptop_init);
+module_exit(chromeos_laptop_exit);
+
+MODULE_DESCRIPTION("Chrome OS Laptop driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
index 60cb76a..af67e6e 100644 (file)
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(hotplug_wireless,
 #define HOME_RELEASE   0xe5
 
 static const struct key_entry eeepc_wmi_keymap[] = {
+       { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+       { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
        /* Sleep already handled via generic ACPI code */
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
index 1dde7ac..45cacf7 100644 (file)
@@ -60,6 +60,7 @@ enum hp_wmi_radio {
        HPWMI_WIFI = 0,
        HPWMI_BLUETOOTH = 1,
        HPWMI_WWAN = 2,
+       HPWMI_GPS = 3,
 };
 
 enum hp_wmi_event_ids {
@@ -72,10 +73,6 @@ enum hp_wmi_event_ids {
        HPWMI_LOCK_SWITCH = 7,
 };
 
-static int hp_wmi_bios_setup(struct platform_device *device);
-static int __exit hp_wmi_bios_remove(struct platform_device *device);
-static int hp_wmi_resume_handler(struct device *device);
-
 struct bios_args {
        u32 signature;
        u32 command;
@@ -137,6 +134,7 @@ static const struct key_entry hp_wmi_keymap[] = {
        { KE_KEY, 0x2142, { KEY_MEDIA } },
        { KE_KEY, 0x213b, { KEY_INFO } },
        { KE_KEY, 0x2169, { KEY_DIRECTION } },
+       { KE_KEY, 0x216a, { KEY_SETUP } },
        { KE_KEY, 0x231b, { KEY_HELP } },
        { KE_END, 0 }
 };
@@ -147,6 +145,7 @@ static struct platform_device *hp_wmi_platform_dev;
 static struct rfkill *wifi_rfkill;
 static struct rfkill *bluetooth_rfkill;
 static struct rfkill *wwan_rfkill;
+static struct rfkill *gps_rfkill;
 
 struct rfkill2_device {
        u8 id;
@@ -157,21 +156,6 @@ struct rfkill2_device {
 static int rfkill2_count;
 static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
 
-static const struct dev_pm_ops hp_wmi_pm_ops = {
-       .resume  = hp_wmi_resume_handler,
-       .restore  = hp_wmi_resume_handler,
-};
-
-static struct platform_driver hp_wmi_driver = {
-       .driver = {
-               .name = "hp-wmi",
-               .owner = THIS_MODULE,
-               .pm = &hp_wmi_pm_ops,
-       },
-       .probe = hp_wmi_bios_setup,
-       .remove = hp_wmi_bios_remove,
-};
-
 /*
  * hp_wmi_perform_query
  *
@@ -543,6 +527,10 @@ static void hp_wmi_notify(u32 value, void *context)
                        rfkill_set_states(wwan_rfkill,
                                          hp_wmi_get_sw_state(HPWMI_WWAN),
                                          hp_wmi_get_hw_state(HPWMI_WWAN));
+               if (gps_rfkill)
+                       rfkill_set_states(gps_rfkill,
+                                         hp_wmi_get_sw_state(HPWMI_GPS),
+                                         hp_wmi_get_hw_state(HPWMI_GPS));
                break;
        case HPWMI_CPU_BATTERY_THROTTLE:
                pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
@@ -670,7 +658,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                                           (void *) HPWMI_WWAN);
                if (!wwan_rfkill) {
                        err = -ENOMEM;
-                       goto register_bluetooth_error;
+                       goto register_gps_error;
                }
                rfkill_init_sw_state(wwan_rfkill,
                                     hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -681,10 +669,33 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
                        goto register_wwan_err;
        }
 
+       if (wireless & 0x8) {
+               gps_rfkill = rfkill_alloc("hp-gps", &device->dev,
+                                               RFKILL_TYPE_GPS,
+                                               &hp_wmi_rfkill_ops,
+                                               (void *) HPWMI_GPS);
+               if (!gps_rfkill) {
+                       err = -ENOMEM;
+                       goto register_bluetooth_error;
+               }
+               rfkill_init_sw_state(gps_rfkill,
+                                    hp_wmi_get_sw_state(HPWMI_GPS));
+               rfkill_set_hw_state(bluetooth_rfkill,
+                                   hp_wmi_get_hw_state(HPWMI_GPS));
+               err = rfkill_register(gps_rfkill);
+               if (err)
+                       goto register_gps_error;
+       }
+
        return 0;
 register_wwan_err:
        rfkill_destroy(wwan_rfkill);
        wwan_rfkill = NULL;
+       if (gps_rfkill)
+               rfkill_unregister(gps_rfkill);
+register_gps_error:
+       rfkill_destroy(gps_rfkill);
+       gps_rfkill = NULL;
        if (bluetooth_rfkill)
                rfkill_unregister(bluetooth_rfkill);
 register_bluetooth_error:
@@ -729,6 +740,10 @@ static int hp_wmi_rfkill2_setup(struct platform_device *device)
                        type = RFKILL_TYPE_WWAN;
                        name = "hp-wwan";
                        break;
+               case HPWMI_GPS:
+                       type = RFKILL_TYPE_GPS;
+                       name = "hp-gps";
+                       break;
                default:
                        pr_warn("unknown device type 0x%x\n",
                                state.device[i].radio_type);
@@ -778,7 +793,7 @@ fail:
        return err;
 }
 
-static int hp_wmi_bios_setup(struct platform_device *device)
+static int __init hp_wmi_bios_setup(struct platform_device *device)
 {
        int err;
 
@@ -786,6 +801,7 @@ static int hp_wmi_bios_setup(struct platform_device *device)
        wifi_rfkill = NULL;
        bluetooth_rfkill = NULL;
        wwan_rfkill = NULL;
+       gps_rfkill = NULL;
        rfkill2_count = 0;
 
        if (hp_wmi_rfkill_setup(device))
@@ -835,6 +851,10 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
                rfkill_unregister(wwan_rfkill);
                rfkill_destroy(wwan_rfkill);
        }
+       if (gps_rfkill) {
+               rfkill_unregister(gps_rfkill);
+               rfkill_destroy(gps_rfkill);
+       }
 
        return 0;
 }
@@ -870,51 +890,70 @@ static int hp_wmi_resume_handler(struct device *device)
                rfkill_set_states(wwan_rfkill,
                                  hp_wmi_get_sw_state(HPWMI_WWAN),
                                  hp_wmi_get_hw_state(HPWMI_WWAN));
+       if (gps_rfkill)
+               rfkill_set_states(gps_rfkill,
+                                 hp_wmi_get_sw_state(HPWMI_GPS),
+                                 hp_wmi_get_hw_state(HPWMI_GPS));
 
        return 0;
 }
 
+static const struct dev_pm_ops hp_wmi_pm_ops = {
+       .resume  = hp_wmi_resume_handler,
+       .restore  = hp_wmi_resume_handler,
+};
+
+static struct platform_driver hp_wmi_driver = {
+       .driver = {
+               .name = "hp-wmi",
+               .owner = THIS_MODULE,
+               .pm = &hp_wmi_pm_ops,
+       },
+       .remove = __exit_p(hp_wmi_bios_remove),
+};
+
 static int __init hp_wmi_init(void)
 {
        int err;
        int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
        int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
 
+       if (!bios_capable && !event_capable)
+               return -ENODEV;
+
        if (event_capable) {
                err = hp_wmi_input_setup();
                if (err)
                        return err;
+               
+               //Enable magic for hotkeys that run on the SMBus
+               ec_write(0xe6,0x6e);
        }
 
        if (bios_capable) {
-               err = platform_driver_register(&hp_wmi_driver);
-               if (err)
-                       goto err_driver_reg;
-               hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
-               if (!hp_wmi_platform_dev) {
-                       err = -ENOMEM;
-                       goto err_device_alloc;
+               hp_wmi_platform_dev =
+                       platform_device_register_simple("hp-wmi", -1, NULL, 0);
+               if (IS_ERR(hp_wmi_platform_dev)) {
+                       err = PTR_ERR(hp_wmi_platform_dev);
+                       goto err_destroy_input;
                }
-               err = platform_device_add(hp_wmi_platform_dev);
+
+               err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
                if (err)
-                       goto err_device_add;
+                       goto err_unregister_device;
        }
 
-       if (!bios_capable && !event_capable)
-               return -ENODEV;
-
        return 0;
 
-err_device_add:
-       platform_device_put(hp_wmi_platform_dev);
-err_device_alloc:
-       platform_driver_unregister(&hp_wmi_driver);
-err_driver_reg:
+err_unregister_device:
+       platform_device_unregister(hp_wmi_platform_dev);
+err_destroy_input:
        if (event_capable)
                hp_wmi_input_destroy();
 
        return err;
 }
+module_init(hp_wmi_init);
 
 static void __exit hp_wmi_exit(void)
 {
@@ -926,6 +965,4 @@ static void __exit hp_wmi_exit(void)
                platform_driver_unregister(&hp_wmi_driver);
        }
 }
-
-module_init(hp_wmi_init);
 module_exit(hp_wmi_exit);
index 2111dbb..6b22938 100644 (file)
 #define MSI_STANDARD_EC_SCM_LOAD_ADDRESS       0x2d
 #define MSI_STANDARD_EC_SCM_LOAD_MASK          (1 << 0)
 
-#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS       0xe4
+#define MSI_STANDARD_EC_FUNCTIONS_ADDRESS      0xe4
+/* Power LED is orange - Turbo mode */
+#define MSI_STANDARD_EC_TURBO_MASK             (1 << 1)
+/* Power LED is green - ECO mode */
+#define MSI_STANDARD_EC_ECO_MASK               (1 << 3)
+/* Touchpad is turned on */
 #define MSI_STANDARD_EC_TOUCHPAD_MASK          (1 << 4)
+/* If this bit != bit 1, turbo mode can't be toggled */
+#define MSI_STANDARD_EC_TURBO_COOLDOWN_MASK    (1 << 7)
+
+#define MSI_STANDARD_EC_FAN_ADDRESS            0x33
+/* If zero, fan rotates at maximal speed */
+#define MSI_STANDARD_EC_AUTOFAN_MASK           (1 << 0)
 
 #ifdef CONFIG_PM_SLEEP
 static int msi_laptop_resume(struct device *device);
@@ -108,23 +119,38 @@ static const struct key_entry msi_laptop_keymap[] = {
 
 static struct input_dev *msi_laptop_input_dev;
 
-static bool old_ec_model;
 static int wlan_s, bluetooth_s, threeg_s;
 static int threeg_exists;
-
-/* Some MSI 3G netbook only have one fn key to control Wlan/Bluetooth/3G,
- * those netbook will load the SCM (windows app) to disable the original
- * Wlan/Bluetooth control by BIOS when user press fn key, then control
- * Wlan/Bluetooth/3G by SCM (software control by OS). Without SCM, user
- * cann't on/off 3G module on those 3G netbook.
- * On Linux, msi-laptop driver will do the same thing to disable the
- * original BIOS control, then might need use HAL or other userland
- * application to do the software control that simulate with SCM.
- * e.g. MSI N034 netbook
- */
-static bool load_scm_model;
 static struct rfkill *rfk_wlan, *rfk_bluetooth, *rfk_threeg;
 
+/* MSI laptop quirks */
+struct quirk_entry {
+       bool old_ec_model;
+
+       /* Some MSI 3G netbook only have one fn key to control
+        * Wlan/Bluetooth/3G, those netbook will load the SCM (windows app) to
+        * disable the original Wlan/Bluetooth control by BIOS when user press
+        * fn key, then control Wlan/Bluetooth/3G by SCM (software control by
+        * OS). Without SCM, user cann't on/off 3G module on those 3G netbook.
+        * On Linux, msi-laptop driver will do the same thing to disable the
+        * original BIOS control, then might need use HAL or other userland
+        * application to do the software control that simulate with SCM.
+        * e.g. MSI N034 netbook
+        */
+       bool load_scm_model;
+
+       /* Some MSI laptops need delay before reading from EC */
+       bool ec_delay;
+
+       /* Some MSI Wind netbooks (e.g. MSI Wind U100) need loading SCM to get
+        * some features working (e.g. ECO mode), but we cannot change
+        * Wlan/Bluetooth state in software and we can only read its state.
+        */
+       bool ec_read_only;
+};
+
+static struct quirk_entry *quirks;
+
 /* Hardware access */
 
 static int set_lcd_level(int level)
@@ -195,10 +221,13 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
        if (sscanf(buf, "%i", &status) != 1 || (status < 0 || status > 1))
                return -EINVAL;
 
+       if (quirks->ec_read_only)
+               return -EOPNOTSUPP;
+
        /* read current device state */
        result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
        if (result < 0)
-               return -EINVAL;
+               return result;
 
        if (!!(rdata & mask) != status) {
                /* reverse device bit */
@@ -209,7 +238,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
 
                result = ec_write(MSI_STANDARD_EC_COMMAND_ADDRESS, wdata);
                if (result < 0)
-                       return -EINVAL;
+                       return result;
        }
 
        return count;
@@ -222,7 +251,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
 
        result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
        if (result < 0)
-               return -1;
+               return result;
 
        if (wlan)
                *wlan = !!(rdata & 8);
@@ -240,7 +269,7 @@ static int get_wireless_state_ec_standard(void)
 
        result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
        if (result < 0)
-               return -1;
+               return result;
 
        wlan_s = !!(rdata & MSI_STANDARD_EC_WLAN_MASK);
 
@@ -258,7 +287,7 @@ static int get_threeg_exists(void)
 
        result = ec_read(MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS, &rdata);
        if (result < 0)
-               return -1;
+               return result;
 
        threeg_exists = !!(rdata & MSI_STANDARD_EC_3G_MASK);
 
@@ -291,9 +320,9 @@ static ssize_t show_wlan(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
 
-       int ret, enabled;
+       int ret, enabled = 0;
 
-       if (old_ec_model) {
+       if (quirks->old_ec_model) {
                ret = get_wireless_state(&enabled, NULL);
        } else {
                ret = get_wireless_state_ec_standard();
@@ -315,9 +344,9 @@ static ssize_t show_bluetooth(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
 
-       int ret, enabled;
+       int ret, enabled = 0;
 
-       if (old_ec_model) {
+       if (quirks->old_ec_model) {
                ret = get_wireless_state(NULL, &enabled);
        } else {
                ret = get_wireless_state_ec_standard();
@@ -342,8 +371,8 @@ static ssize_t show_threeg(struct device *dev,
        int ret;
 
        /* old msi ec not support 3G */
-       if (old_ec_model)
-               return -1;
+       if (quirks->old_ec_model)
+               return -ENODEV;
 
        ret = get_wireless_state_ec_standard();
        if (ret < 0)
@@ -417,18 +446,119 @@ static ssize_t store_auto_brightness(struct device *dev,
        return count;
 }
 
+static ssize_t show_touchpad(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       u8 rdata;
+       int result;
+
+       result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+       if (result < 0)
+               return result;
+
+       return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
+}
+
+static ssize_t show_turbo(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       u8 rdata;
+       int result;
+
+       result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+       if (result < 0)
+               return result;
+
+       return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
+}
+
+static ssize_t show_eco(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       u8 rdata;
+       int result;
+
+       result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+       if (result < 0)
+               return result;
+
+       return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
+}
+
+static ssize_t show_turbo_cooldown(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       u8 rdata;
+       int result;
+
+       result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+       if (result < 0)
+               return result;
+
+       return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
+               (!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
+}
+
+static ssize_t show_auto_fan(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       u8 rdata;
+       int result;
+
+       result = ec_read(MSI_STANDARD_EC_FAN_ADDRESS, &rdata);
+       if (result < 0)
+               return result;
+
+       return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
+}
+
+static ssize_t store_auto_fan(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+
+       int enable, result;
+
+       if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+               return -EINVAL;
+
+       result = ec_write(MSI_STANDARD_EC_FAN_ADDRESS, enable);
+       if (result < 0)
+               return result;
+
+       return count;
+}
+
 static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
 static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
                   store_auto_brightness);
 static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
 static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
 static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
+static DEVICE_ATTR(touchpad, 0444, show_touchpad, NULL);
+static DEVICE_ATTR(turbo_mode, 0444, show_turbo, NULL);
+static DEVICE_ATTR(eco_mode, 0444, show_eco, NULL);
+static DEVICE_ATTR(turbo_cooldown, 0444, show_turbo_cooldown, NULL);
+static DEVICE_ATTR(auto_fan, 0644, show_auto_fan, store_auto_fan);
 
 static struct attribute *msipf_attributes[] = {
-       &dev_attr_lcd_level.attr,
-       &dev_attr_auto_brightness.attr,
        &dev_attr_bluetooth.attr,
        &dev_attr_wlan.attr,
+       &dev_attr_touchpad.attr,
+       &dev_attr_turbo_mode.attr,
+       &dev_attr_eco_mode.attr,
+       &dev_attr_turbo_cooldown.attr,
+       &dev_attr_auto_fan.attr,
+       NULL
+};
+
+static struct attribute *msipf_old_attributes[] = {
+       &dev_attr_lcd_level.attr,
+       &dev_attr_auto_brightness.attr,
        NULL
 };
 
@@ -436,6 +566,10 @@ static struct attribute_group msipf_attribute_group = {
        .attrs = msipf_attributes
 };
 
+static struct attribute_group msipf_old_attribute_group = {
+       .attrs = msipf_old_attributes
+};
+
 static struct platform_driver msipf_driver = {
        .driver = {
                .name = "msi-laptop-pf",
@@ -448,9 +582,26 @@ static struct platform_device *msipf_device;
 
 /* Initialization */
 
-static int dmi_check_cb(const struct dmi_system_id *id)
+static struct quirk_entry quirk_old_ec_model = {
+       .old_ec_model = true,
+};
+
+static struct quirk_entry quirk_load_scm_model = {
+       .load_scm_model = true,
+       .ec_delay = true,
+};
+
+static struct quirk_entry quirk_load_scm_ro_model = {
+       .load_scm_model = true,
+       .ec_read_only = true,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *dmi)
 {
-       pr_info("Identified laptop model '%s'\n", id->ident);
+       pr_info("Identified laptop model '%s'\n", dmi->ident);
+
+       quirks = dmi->driver_data;
+
        return 1;
 }
 
@@ -464,6 +615,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
                        DMI_MATCH(DMI_CHASSIS_VENDOR,
                                  "MICRO-STAR INT'L CO.,LTD")
                },
+               .driver_data = &quirk_old_ec_model,
                .callback = dmi_check_cb
        },
        {
@@ -474,6 +626,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
                        DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
                },
+               .driver_data = &quirk_old_ec_model,
                .callback = dmi_check_cb
        },
        {
@@ -484,6 +637,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
                        DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
                },
+               .driver_data = &quirk_old_ec_model,
                .callback = dmi_check_cb
        },
        {
@@ -495,12 +649,9 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
                        DMI_MATCH(DMI_CHASSIS_VENDOR,
                                  "MICRO-STAR INT'L CO.,LTD")
                },
+               .driver_data = &quirk_old_ec_model,
                .callback = dmi_check_cb
        },
-       { }
-};
-
-static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
        {
                .ident = "MSI N034",
                .matches = {
@@ -510,6 +661,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
                        DMI_MATCH(DMI_CHASSIS_VENDOR,
                        "MICRO-STAR INTERNATIONAL CO., LTD")
                },
+               .driver_data = &quirk_load_scm_model,
                .callback = dmi_check_cb
        },
        {
@@ -521,6 +673,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
                        DMI_MATCH(DMI_CHASSIS_VENDOR,
                        "MICRO-STAR INTERNATIONAL CO., LTD")
                },
+               .driver_data = &quirk_load_scm_model,
                .callback = dmi_check_cb
        },
        {
@@ -530,6 +683,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
                                "MICRO-STAR INTERNATIONAL CO., LTD"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
                },
+               .driver_data = &quirk_load_scm_model,
                .callback = dmi_check_cb
        },
        {
@@ -539,6 +693,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
                                "Micro-Star International"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
                },
+               .driver_data = &quirk_load_scm_model,
                .callback = dmi_check_cb
        },
        {
@@ -548,6 +703,17 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
                                "Micro-Star International Co., Ltd."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
                },
+               .driver_data = &quirk_load_scm_model,
+               .callback = dmi_check_cb
+       },
+       {
+               .ident = "MSI U90/U100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR,
+                               "MICRO-STAR INTERNATIONAL CO., LTD"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "U90/U100"),
+               },
+               .driver_data = &quirk_load_scm_ro_model,
                .callback = dmi_check_cb
        },
        { }
@@ -560,32 +726,26 @@ static int rfkill_bluetooth_set(void *data, bool blocked)
         * blocked == false is on
         * blocked == true is off
         */
-       if (blocked)
-               set_device_state("0", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
-       else
-               set_device_state("1", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
+       int result = set_device_state(blocked ? "0" : "1", 0,
+                       MSI_STANDARD_EC_BLUETOOTH_MASK);
 
-       return 0;
+       return min(result, 0);
 }
 
 static int rfkill_wlan_set(void *data, bool blocked)
 {
-       if (blocked)
-               set_device_state("0", 0, MSI_STANDARD_EC_WLAN_MASK);
-       else
-               set_device_state("1", 0, MSI_STANDARD_EC_WLAN_MASK);
+       int result = set_device_state(blocked ? "0" : "1", 0,
+                       MSI_STANDARD_EC_WLAN_MASK);
 
-       return 0;
+       return min(result, 0);
 }
 
 static int rfkill_threeg_set(void *data, bool blocked)
 {
-       if (blocked)
-               set_device_state("0", 0, MSI_STANDARD_EC_3G_MASK);
-       else
-               set_device_state("1", 0, MSI_STANDARD_EC_3G_MASK);
+       int result = set_device_state(blocked ? "0" : "1", 0,
+                       MSI_STANDARD_EC_3G_MASK);
 
-       return 0;
+       return min(result, 0);
 }
 
 static const struct rfkill_ops rfkill_bluetooth_ops = {
@@ -618,25 +778,34 @@ static void rfkill_cleanup(void)
        }
 }
 
+static bool msi_rfkill_set_state(struct rfkill *rfkill, bool blocked)
+{
+       if (quirks->ec_read_only)
+               return rfkill_set_hw_state(rfkill, blocked);
+       else
+               return rfkill_set_sw_state(rfkill, blocked);
+}
+
 static void msi_update_rfkill(struct work_struct *ignored)
 {
        get_wireless_state_ec_standard();
 
        if (rfk_wlan)
-               rfkill_set_sw_state(rfk_wlan, !wlan_s);
+               msi_rfkill_set_state(rfk_wlan, !wlan_s);
        if (rfk_bluetooth)
-               rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+               msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
        if (rfk_threeg)
-               rfkill_set_sw_state(rfk_threeg, !threeg_s);
+               msi_rfkill_set_state(rfk_threeg, !threeg_s);
 }
-static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill);
+static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
+static DECLARE_WORK(msi_rfkill_work, msi_update_rfkill);
 
 static void msi_send_touchpad_key(struct work_struct *ignored)
 {
        u8 rdata;
        int result;
 
-       result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata);
+       result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
        if (result < 0)
                return;
 
@@ -644,7 +813,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
                (rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
                KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
 }
-static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key);
+static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
+static DECLARE_WORK(msi_touchpad_work, msi_send_touchpad_key);
 
 static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
                                struct serio *port)
@@ -662,14 +832,20 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
                extended = false;
                switch (data) {
                case 0xE4:
-                       schedule_delayed_work(&msi_touchpad_work,
-                               round_jiffies_relative(0.5 * HZ));
+                       if (quirks->ec_delay) {
+                               schedule_delayed_work(&msi_touchpad_dwork,
+                                       round_jiffies_relative(0.5 * HZ));
+                       } else
+                               schedule_work(&msi_touchpad_work);
                        break;
                case 0x54:
                case 0x62:
                case 0x76:
-                       schedule_delayed_work(&msi_rfkill_work,
-                               round_jiffies_relative(0.5 * HZ));
+                       if (quirks->ec_delay) {
+                               schedule_delayed_work(&msi_rfkill_dwork,
+                                       round_jiffies_relative(0.5 * HZ));
+                       } else
+                               schedule_work(&msi_rfkill_work);
                        break;
                }
        }
@@ -736,8 +912,11 @@ static int rfkill_init(struct platform_device *sdev)
        }
 
        /* schedule to run rfkill state initial */
-       schedule_delayed_work(&msi_rfkill_init,
-                               round_jiffies_relative(1 * HZ));
+       if (quirks->ec_delay) {
+               schedule_delayed_work(&msi_rfkill_init,
+                       round_jiffies_relative(1 * HZ));
+       } else
+               schedule_work(&msi_rfkill_work);
 
        return 0;
 
@@ -761,7 +940,7 @@ static int msi_laptop_resume(struct device *device)
        u8 data;
        int result;
 
-       if (!load_scm_model)
+       if (!quirks->load_scm_model)
                return 0;
 
        /* set load SCM to disable hardware control by fn key */
@@ -819,13 +998,15 @@ static int __init load_scm_model_init(struct platform_device *sdev)
        u8 data;
        int result;
 
-       /* allow userland write sysfs file  */
-       dev_attr_bluetooth.store = store_bluetooth;
-       dev_attr_wlan.store = store_wlan;
-       dev_attr_threeg.store = store_threeg;
-       dev_attr_bluetooth.attr.mode |= S_IWUSR;
-       dev_attr_wlan.attr.mode |= S_IWUSR;
-       dev_attr_threeg.attr.mode |= S_IWUSR;
+       if (!quirks->ec_read_only) {
+               /* allow userland write sysfs file  */
+               dev_attr_bluetooth.store = store_bluetooth;
+               dev_attr_wlan.store = store_wlan;
+               dev_attr_threeg.store = store_threeg;
+               dev_attr_bluetooth.attr.mode |= S_IWUSR;
+               dev_attr_wlan.attr.mode |= S_IWUSR;
+               dev_attr_threeg.attr.mode |= S_IWUSR;
+       }
 
        /* disable hardware control by fn key */
        result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
@@ -874,21 +1055,22 @@ static int __init msi_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-       if (force || dmi_check_system(msi_dmi_table))
-               old_ec_model = 1;
+       dmi_check_system(msi_dmi_table);
+       if (!quirks)
+               /* quirks may be NULL if no match in DMI table */
+               quirks = &quirk_load_scm_model;
+       if (force)
+               quirks = &quirk_old_ec_model;
 
-       if (!old_ec_model)
+       if (!quirks->old_ec_model)
                get_threeg_exists();
 
-       if (!old_ec_model && dmi_check_system(msi_load_scm_models_dmi_table))
-               load_scm_model = 1;
-
        if (auto_brightness < 0 || auto_brightness > 2)
                return -EINVAL;
 
        /* Register backlight stuff */
 
-       if (acpi_video_backlight_support()) {
+       if (!quirks->old_ec_model || acpi_video_backlight_support()) {
                pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
        } else {
                struct backlight_properties props;
@@ -918,7 +1100,7 @@ static int __init msi_init(void)
        if (ret)
                goto fail_platform_device1;
 
-       if (load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
+       if (quirks->load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
                ret = -EINVAL;
                goto fail_platform_device1;
        }
@@ -928,20 +1110,25 @@ static int __init msi_init(void)
        if (ret)
                goto fail_platform_device2;
 
-       if (!old_ec_model) {
+       if (!quirks->old_ec_model) {
                if (threeg_exists)
                        ret = device_create_file(&msipf_device->dev,
                                                &dev_attr_threeg);
                if (ret)
                        goto fail_platform_device2;
-       }
+       } else {
+               ret = sysfs_create_group(&msipf_device->dev.kobj,
+                                        &msipf_old_attribute_group);
+               if (ret)
+                       goto fail_platform_device2;
 
-       /* Disable automatic brightness control by default because
-        * this module was probably loaded to do brightness control in
-        * software. */
+               /* Disable automatic brightness control by default because
+                * this module was probably loaded to do brightness control in
+                * software. */
 
-       if (auto_brightness != 2)
-               set_auto_brightness(auto_brightness);
+               if (auto_brightness != 2)
+                       set_auto_brightness(auto_brightness);
+       }
 
        pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
 
@@ -949,9 +1136,10 @@ static int __init msi_init(void)
 
 fail_platform_device2:
 
-       if (load_scm_model) {
+       if (quirks->load_scm_model) {
                i8042_remove_filter(msi_laptop_i8042_filter);
-               cancel_delayed_work_sync(&msi_rfkill_work);
+               cancel_delayed_work_sync(&msi_rfkill_dwork);
+               cancel_work_sync(&msi_rfkill_work);
                rfkill_cleanup();
        }
        platform_device_del(msipf_device);
@@ -973,23 +1161,26 @@ fail_backlight:
 
 static void __exit msi_cleanup(void)
 {
-       if (load_scm_model) {
+       if (quirks->load_scm_model) {
                i8042_remove_filter(msi_laptop_i8042_filter);
                msi_laptop_input_destroy();
-               cancel_delayed_work_sync(&msi_rfkill_work);
+               cancel_delayed_work_sync(&msi_rfkill_dwork);
+               cancel_work_sync(&msi_rfkill_work);
                rfkill_cleanup();
        }
 
        sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
-       if (!old_ec_model && threeg_exists)
+       if (!quirks->old_ec_model && threeg_exists)
                device_remove_file(&msipf_device->dev, &dev_attr_threeg);
        platform_device_unregister(msipf_device);
        platform_driver_unregister(&msipf_driver);
        backlight_device_unregister(msibl_device);
 
-       /* Enable automatic brightness control again */
-       if (auto_brightness != 2)
-               set_auto_brightness(1);
+       if (quirks->old_ec_model) {
+               /* Enable automatic brightness control again */
+               if (auto_brightness != 2)
+                       set_auto_brightness(1);
+       }
 
        pr_info("driver unloaded\n");
 }
@@ -1011,3 +1202,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
 MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
 MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
 MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnU90/U100:*");
index 2264331..70222f2 100644 (file)
@@ -34,29 +34,65 @@ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
 MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
-MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
-
 #define DRV_NAME "msi-wmi"
 
 #define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
-#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-
-#define SCANCODE_BASE 0xD0
-#define MSI_WMI_BRIGHTNESSUP   SCANCODE_BASE
-#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
-#define MSI_WMI_VOLUMEUP       (SCANCODE_BASE + 2)
-#define MSI_WMI_VOLUMEDOWN     (SCANCODE_BASE + 3)
-#define MSI_WMI_MUTE           (SCANCODE_BASE + 4)
+#define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+#define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F"
+
+MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID);
+
+enum msi_scancodes {
+       /* Generic MSI keys (not present on MSI Wind) */
+       MSI_KEY_BRIGHTNESSUP    = 0xD0,
+       MSI_KEY_BRIGHTNESSDOWN,
+       MSI_KEY_VOLUMEUP,
+       MSI_KEY_VOLUMEDOWN,
+       MSI_KEY_MUTE,
+       /* MSI Wind keys */
+       WIND_KEY_TOUCHPAD       = 0x08, /* Fn+F3 touchpad toggle */
+       WIND_KEY_BLUETOOTH      = 0x56, /* Fn+F11 Bluetooth toggle */
+       WIND_KEY_CAMERA,                /* Fn+F6 webcam toggle */
+       WIND_KEY_WLAN           = 0x5f, /* Fn+F11 Wi-Fi toggle */
+       WIND_KEY_TURBO,                 /* Fn+F10 turbo mode toggle */
+       WIND_KEY_ECO            = 0x69, /* Fn+F10 ECO mode toggle */
+};
 static struct key_entry msi_wmi_keymap[] = {
-       { KE_KEY, MSI_WMI_BRIGHTNESSUP,   {KEY_BRIGHTNESSUP} },
-       { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
-       { KE_KEY, MSI_WMI_VOLUMEUP,       {KEY_VOLUMEUP} },
-       { KE_KEY, MSI_WMI_VOLUMEDOWN,     {KEY_VOLUMEDOWN} },
-       { KE_KEY, MSI_WMI_MUTE,           {KEY_MUTE} },
-       { KE_END, 0}
+       { KE_KEY, MSI_KEY_BRIGHTNESSUP,         {KEY_BRIGHTNESSUP} },
+       { KE_KEY, MSI_KEY_BRIGHTNESSDOWN,       {KEY_BRIGHTNESSDOWN} },
+       { KE_KEY, MSI_KEY_VOLUMEUP,             {KEY_VOLUMEUP} },
+       { KE_KEY, MSI_KEY_VOLUMEDOWN,           {KEY_VOLUMEDOWN} },
+       { KE_KEY, MSI_KEY_MUTE,                 {KEY_MUTE} },
+
+       /* These keys work without WMI. Ignore them to avoid double keycodes */
+       { KE_IGNORE, WIND_KEY_TOUCHPAD,         {KEY_TOUCHPAD_TOGGLE} },
+       { KE_IGNORE, WIND_KEY_BLUETOOTH,        {KEY_BLUETOOTH} },
+       { KE_IGNORE, WIND_KEY_CAMERA,           {KEY_CAMERA} },
+       { KE_IGNORE, WIND_KEY_WLAN,             {KEY_WLAN} },
+
+       /* These are unknown WMI events found on MSI Wind */
+       { KE_IGNORE, 0x00 },
+       { KE_IGNORE, 0x62 },
+       { KE_IGNORE, 0x63 },
+
+       /* These are MSI Wind keys that should be handled via WMI */
+       { KE_KEY, WIND_KEY_TURBO,               {KEY_PROG1} },
+       { KE_KEY, WIND_KEY_ECO,                 {KEY_PROG2} },
+
+       { KE_END, 0 }
+};
+
+static ktime_t last_pressed;
+
+static const struct {
+       const char *guid;
+       bool quirk_last_pressed;
+} *event_wmi, event_wmis[] = {
+       { MSIWMI_MSI_EVENT_GUID, true },
+       { MSIWMI_WIND_EVENT_GUID, false },
 };
-static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
 
 static struct backlight_device *backlight;
 
@@ -149,7 +185,6 @@ static void msi_wmi_notify(u32 value, void *context)
        struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
        static struct key_entry *key;
        union acpi_object *obj;
-       ktime_t cur;
        acpi_status status;
 
        status = wmi_get_event_data(value, &response);
@@ -165,39 +200,67 @@ static void msi_wmi_notify(u32 value, void *context)
                pr_debug("Eventcode: 0x%x\n", eventcode);
                key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
                                eventcode);
-               if (key) {
-                       ktime_t diff;
-                       cur = ktime_get_real();
-                       diff = ktime_sub(cur, last_pressed[key->code -
-                                       SCANCODE_BASE]);
-                       /* Ignore event if the same event happened in a 50 ms
+               if (!key) {
+                       pr_info("Unknown key pressed - %x\n", eventcode);
+                       goto msi_wmi_notify_exit;
+               }
+
+               if (event_wmi->quirk_last_pressed) {
+                       ktime_t cur = ktime_get_real();
+                       ktime_t diff = ktime_sub(cur, last_pressed);
+                       /* Ignore event if any event happened in a 50 ms
                           timeframe -> Key press may result in 10-20 GPEs */
                        if (ktime_to_us(diff) < 1000 * 50) {
                                pr_debug("Suppressed key event 0x%X - "
                                         "Last press was %lld us ago\n",
                                         key->code, ktime_to_us(diff));
-                               return;
-                       }
-                       last_pressed[key->code - SCANCODE_BASE] = cur;
-
-                       if (key->type == KE_KEY &&
-                       /* Brightness is served via acpi video driver */
-                       (!acpi_video_backlight_support() ||
-                       (key->code != MSI_WMI_BRIGHTNESSUP &&
-                       key->code != MSI_WMI_BRIGHTNESSDOWN))) {
-                               pr_debug("Send key: 0x%X - "
-                                        "Input layer keycode: %d\n",
-                                        key->code, key->keycode);
-                               sparse_keymap_report_entry(msi_wmi_input_dev,
-                                               key, 1, true);
+                               goto msi_wmi_notify_exit;
                        }
-               } else
-                       pr_info("Unknown key pressed - %x\n", eventcode);
+                       last_pressed = cur;
+               }
+
+               if (key->type == KE_KEY &&
+               /* Brightness is served via acpi video driver */
+               (backlight ||
+               (key->code != MSI_KEY_BRIGHTNESSUP &&
+               key->code != MSI_KEY_BRIGHTNESSDOWN))) {
+                       pr_debug("Send key: 0x%X - Input layer keycode: %d\n",
+                                key->code, key->keycode);
+                       sparse_keymap_report_entry(msi_wmi_input_dev, key, 1,
+                                                  true);
+               }
        } else
                pr_info("Unknown event received\n");
+
+msi_wmi_notify_exit:
        kfree(response.pointer);
 }
 
+static int __init msi_wmi_backlight_setup(void)
+{
+       int err;
+       struct backlight_properties props;
+
+       memset(&props, 0, sizeof(struct backlight_properties));
+       props.type = BACKLIGHT_PLATFORM;
+       props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+       backlight = backlight_device_register(DRV_NAME, NULL, NULL,
+                                             &msi_backlight_ops,
+                                             &props);
+       if (IS_ERR(backlight))
+               return PTR_ERR(backlight);
+
+       err = bl_get(NULL);
+       if (err < 0) {
+               backlight_device_unregister(backlight);
+               return err;
+       }
+
+       backlight->props.brightness = err;
+
+       return 0;
+}
+
 static int __init msi_wmi_input_setup(void)
 {
        int err;
@@ -219,7 +282,7 @@ static int __init msi_wmi_input_setup(void)
        if (err)
                goto err_free_keymap;
 
-       memset(last_pressed, 0, sizeof(last_pressed));
+       last_pressed = ktime_set(0, 0);
 
        return 0;
 
@@ -233,61 +296,66 @@ err_free_dev:
 static int __init msi_wmi_init(void)
 {
        int err;
+       int i;
 
-       if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
-               pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
-               return -ENODEV;
-       }
-       err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
-                       msi_wmi_notify, NULL);
-       if (ACPI_FAILURE(err))
-               return -EINVAL;
+       for (i = 0; i < ARRAY_SIZE(event_wmis); i++) {
+               if (!wmi_has_guid(event_wmis[i].guid))
+                       continue;
 
-       err = msi_wmi_input_setup();
-       if (err)
-               goto err_uninstall_notifier;
-
-       if (!acpi_video_backlight_support()) {
-               struct backlight_properties props;
-               memset(&props, 0, sizeof(struct backlight_properties));
-               props.type = BACKLIGHT_PLATFORM;
-               props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
-               backlight = backlight_device_register(DRV_NAME, NULL, NULL,
-                                                     &msi_backlight_ops,
-                                                     &props);
-               if (IS_ERR(backlight)) {
-                       err = PTR_ERR(backlight);
+               err = msi_wmi_input_setup();
+               if (err) {
+                       pr_err("Unable to setup input device\n");
+                       return err;
+               }
+
+               err = wmi_install_notify_handler(event_wmis[i].guid,
+                       msi_wmi_notify, NULL);
+               if (ACPI_FAILURE(err)) {
+                       pr_err("Unable to setup WMI notify handler\n");
                        goto err_free_input;
                }
 
-               err = bl_get(NULL);
-               if (err < 0)
-                       goto err_free_backlight;
+               pr_debug("Event handler installed\n");
+               event_wmi = &event_wmis[i];
+               break;
+       }
 
-               backlight->props.brightness = err;
+       if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) {
+               err = msi_wmi_backlight_setup();
+               if (err) {
+                       pr_err("Unable to setup backlight device\n");
+                       goto err_uninstall_handler;
+               }
+               pr_debug("Backlight device created\n");
+       }
+
+       if (!event_wmi && !backlight) {
+               pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n");
+               return -ENODEV;
        }
-       pr_debug("Event handler installed\n");
 
        return 0;
 
-err_free_backlight:
-       backlight_device_unregister(backlight);
+err_uninstall_handler:
+       if (event_wmi)
+               wmi_remove_notify_handler(event_wmi->guid);
 err_free_input:
-       sparse_keymap_free(msi_wmi_input_dev);
-       input_unregister_device(msi_wmi_input_dev);
-err_uninstall_notifier:
-       wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+       if (event_wmi) {
+               sparse_keymap_free(msi_wmi_input_dev);
+               input_unregister_device(msi_wmi_input_dev);
+       }
        return err;
 }
 
 static void __exit msi_wmi_exit(void)
 {
-       if (wmi_has_guid(MSIWMI_EVENT_GUID)) {
-               wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+       if (event_wmi) {
+               wmi_remove_notify_handler(event_wmi->guid);
                sparse_keymap_free(msi_wmi_input_dev);
                input_unregister_device(msi_wmi_input_dev);
-               backlight_device_unregister(backlight);
        }
+       if (backlight)
+               backlight_device_unregister(backlight);
 }
 
 module_init(msi_wmi_init);
index ceb41ef..14d4dce 100644 (file)
@@ -158,6 +158,11 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd);
 static int sony_nc_lid_resume_setup(struct platform_device *pd);
 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
 
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+               unsigned int handle);
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd);
+static int __sony_nc_gfx_switch_status_get(void);
+
 static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
 static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
 
@@ -1241,17 +1246,13 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                        /* Hybrid GFX switching */
                        sony_call_snc_handle(handle, 0x0000, &result);
                        dprintk("GFX switch event received (reason: %s)\n",
-                                       (result & 0x01) ?
-                                       "switch change" : "unknown");
-
-                       /* verify the switch state
-                        * 1: discrete GFX
-                        * 0: integrated GFX
-                        */
-                       sony_call_snc_handle(handle, 0x0100, &result);
+                                       (result == 0x1) ? "switch change" :
+                                       (result == 0x2) ? "output switch" :
+                                       (result == 0x3) ? "output switch" :
+                                       "");
 
                        ev_type = GFX_SWITCH;
-                       real_ev = result & 0xff;
+                       real_ev = __sony_nc_gfx_switch_status_get();
                        break;
 
                default:
@@ -1350,6 +1351,13 @@ static void sony_nc_function_setup(struct acpi_device *device,
                                pr_err("couldn't set up thermal profile function (%d)\n",
                                                result);
                        break;
+               case 0x0128:
+               case 0x0146:
+                       result = sony_nc_gfx_switch_setup(pf_device, handle);
+                       if (result)
+                               pr_err("couldn't set up GFX Switch status (%d)\n",
+                                               result);
+                       break;
                case 0x0131:
                        result = sony_nc_highspeed_charging_setup(pf_device);
                        if (result)
@@ -1365,6 +1373,8 @@ static void sony_nc_function_setup(struct acpi_device *device,
                        break;
                case 0x0137:
                case 0x0143:
+               case 0x014b:
+               case 0x014c:
                        result = sony_nc_kbd_backlight_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1414,6 +1424,10 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                case 0x0122:
                        sony_nc_thermal_cleanup(pd);
                        break;
+               case 0x0128:
+               case 0x0146:
+                       sony_nc_gfx_switch_cleanup(pd);
+                       break;
                case 0x0131:
                        sony_nc_highspeed_charging_cleanup(pd);
                        break;
@@ -1423,6 +1437,8 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                        break;
                case 0x0137:
                case 0x0143:
+               case 0x014b:
+               case 0x014c:
                        sony_nc_kbd_backlight_cleanup(pd);
                        break;
                default:
@@ -1467,6 +1483,8 @@ static void sony_nc_function_resume(void)
                        break;
                case 0x0137:
                case 0x0143:
+               case 0x014b:
+               case 0x014c:
                        sony_nc_kbd_backlight_resume();
                        break;
                default:
@@ -1534,7 +1552,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
        int argument = sony_rfkill_address[(long) data] + 0x100;
 
        if (!blocked)
-               argument |= 0x030000;
+               argument |= 0x070000;
 
        return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
 }
@@ -2333,7 +2351,7 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd)
        return 0;
 
 liderror:
-       for (; i > 0; i--)
+       for (i--; i >= 0; i--)
                device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
 
        kfree(lid_ctl);
@@ -2355,6 +2373,97 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
        }
 }
 
+/* GFX Switch position */
+enum gfx_switch {
+       SPEED,
+       STAMINA,
+       AUTO
+};
+struct snc_gfx_switch_control {
+       struct device_attribute attr;
+       unsigned int handle;
+};
+static struct snc_gfx_switch_control *gfxs_ctl;
+
+/* returns 0 for speed, 1 for stamina */
+static int __sony_nc_gfx_switch_status_get(void)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+               return -EIO;
+
+       switch (gfxs_ctl->handle) {
+       case 0x0146:
+               /* 1: discrete GFX (speed)
+                * 0: integrated GFX (stamina)
+                */
+               return result & 0x1 ? SPEED : STAMINA;
+               break;
+       case 0x0128:
+               /* it's a more elaborated bitmask, for now:
+                * 2: integrated GFX (stamina)
+                * 0: discrete GFX (speed)
+                */
+               dprintk("GFX Status: 0x%x\n", result);
+               return result & 0x80 ? AUTO :
+                       result & 0x02 ? STAMINA : SPEED;
+               break;
+       }
+       return -EINVAL;
+}
+
+static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buffer)
+{
+       int pos = __sony_nc_gfx_switch_status_get();
+
+       if (pos < 0)
+               return pos;
+
+       return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+}
+
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+               unsigned int handle)
+{
+       unsigned int result;
+
+       gfxs_ctl = kzalloc(sizeof(struct snc_gfx_switch_control), GFP_KERNEL);
+       if (!gfxs_ctl)
+               return -ENOMEM;
+
+       gfxs_ctl->handle = handle;
+
+       sysfs_attr_init(&gfxs_ctl->attr.attr);
+       gfxs_ctl->attr.attr.name = "gfx_switch_status";
+       gfxs_ctl->attr.attr.mode = S_IRUGO;
+       gfxs_ctl->attr.show = sony_nc_gfx_switch_status_show;
+
+       result = device_create_file(&pd->dev, &gfxs_ctl->attr);
+       if (result)
+               goto gfxerror;
+
+       return 0;
+
+gfxerror:
+       kfree(gfxs_ctl);
+       gfxs_ctl = NULL;
+
+       return result;
+}
+
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+{
+       if (gfxs_ctl) {
+               device_remove_file(&pd->dev, &gfxs_ctl->attr);
+
+               kfree(gfxs_ctl);
+               gfxs_ctl = NULL;
+       }
+}
+
 /* High speed charging function */
 static struct device_attribute *hsc_handle;
 
@@ -2533,6 +2642,8 @@ static void sony_nc_backlight_ng_read_limits(int handle,
                lvl_table_len = 9;
                break;
        case 0x143:
+       case 0x14b:
+       case 0x14c:
                lvl_table_len = 16;
                break;
        }
@@ -2584,6 +2695,18 @@ static void sony_nc_backlight_setup(void)
                sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
+       } else if (sony_find_snc_handle(0x14b) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x3000;
+               sony_nc_backlight_ng_read_limits(0x14b, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+       } else if (sony_find_snc_handle(0x14c) >= 0) {
+               ops = &sony_backlight_ng_ops;
+               sony_bl_props.cmd_base = 0x3000;
+               sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
        } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
                                                &unused))) {
                ops = &sony_backlight_ops;
@@ -3566,7 +3689,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
        }
 
        if (ret > 0) {
-               struct inode *inode = file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(file);
                inode->i_atime = current_fs_time(inode->i_sb);
        }
 
index ebcb461..9a90756 100644 (file)
@@ -209,9 +209,8 @@ enum tpacpi_hkey_event_t {
        TP_HKEY_EV_ALARM_SENSOR_XHOT    = 0x6022, /* sensor critically hot */
        TP_HKEY_EV_THM_TABLE_CHANGED    = 0x6030, /* thermal table changed */
 
-       TP_HKEY_EV_UNK_6040             = 0x6040, /* Related to AC change?
-                                                    some sort of APM hint,
-                                                    W520 */
+       /* AC-related events */
+       TP_HKEY_EV_AC_CHANGED           = 0x6040, /* AC status changed */
 
        /* Misc */
        TP_HKEY_EV_RFKILL_CHANGED       = 0x7000, /* rfkill switch changed */
@@ -852,7 +851,7 @@ static ssize_t dispatch_proc_write(struct file *file,
                        const char __user *userbuf,
                        size_t count, loff_t *pos)
 {
-       struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
+       struct ibm_struct *ibm = PDE(file_inode(file))->data;
        char *kernbuf;
        int ret;
 
@@ -3629,6 +3628,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
                         "a sensor reports something is extremely hot!\n");
                /* recommended action: immediate sleep/hibernate */
                break;
+       case TP_HKEY_EV_AC_CHANGED:
+               /* X120e, X121e, X220, X220i, X220t, X230, T420, T420s, W520:
+                * AC status changed; can be triggered by plugging or
+                * unplugging AC adapter, docking or undocking. */
+
+               /* fallthrough */
 
        case TP_HKEY_EV_KEY_NUMLOCK:
        case TP_HKEY_EV_KEY_FN:
@@ -8574,7 +8579,8 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
        return s && strlen(s) >= 8 &&
                tpacpi_is_fw_digit(s[0]) &&
                tpacpi_is_fw_digit(s[1]) &&
-               s[2] == t && s[3] == 'T' &&
+               s[2] == t &&
+               (s[3] == 'T' || s[3] == 'N') &&
                tpacpi_is_fw_digit(s[4]) &&
                tpacpi_is_fw_digit(s[5]);
 }
@@ -8607,7 +8613,8 @@ static int __must_check __init get_thinkpad_model_data(
                return -ENOMEM;
 
        /* Really ancient ThinkPad 240X will fail this, which is fine */
-       if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
+       if (!(tpacpi_is_valid_fw_id(tp->bios_version_str, 'E') ||
+             tpacpi_is_valid_fw_id(tp->bios_version_str, 'C')))
                return 0;
 
        tp->bios_model = tp->bios_version_str[0]
index 904476b..242abac 100644 (file)
@@ -583,7 +583,7 @@ static int set_lcd_status(struct backlight_device *bd)
 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
                              size_t count, loff_t *pos)
 {
-       struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+       struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
        char cmd[42];
        size_t len;
        int value;
@@ -650,7 +650,7 @@ static int video_proc_open(struct inode *inode, struct file *file)
 static ssize_t video_proc_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *pos)
 {
-       struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+       struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
        char *cmd, *buffer;
        int ret;
        int value;
@@ -750,7 +750,7 @@ static int fan_proc_open(struct inode *inode, struct file *file)
 static ssize_t fan_proc_write(struct file *file, const char __user *buf,
                              size_t count, loff_t *pos)
 {
-       struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+       struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
        char cmd[42];
        size_t len;
        int value;
@@ -822,7 +822,7 @@ static int keys_proc_open(struct inode *inode, struct file *file)
 static ssize_t keys_proc_write(struct file *file, const char __user *buf,
                               size_t count, loff_t *pos)
 {
-       struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
+       struct toshiba_acpi_dev *dev = PDE(file_inode(file))->data;
        char cmd[42];
        size_t len;
        int value;
index 315b311..65f735a 100644 (file)
@@ -30,7 +30,7 @@ static struct proc_dir_entry *isapnp_proc_bus_dir = NULL;
 static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
 {
        loff_t new = -1;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        mutex_lock(&inode->i_mutex);
        switch (whence) {
@@ -55,7 +55,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
 static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
                                    size_t nbytes, loff_t * ppos)
 {
-       struct inode *ino = file->f_path.dentry->d_inode;
+       struct inode *ino = file_inode(file);
        struct proc_dir_entry *dp = PDE(ino);
        struct pnp_dev *dev = dp->data;
        int pos = *ppos;
index bc89f39..63ddb01 100644 (file)
@@ -244,7 +244,7 @@ static int pnpbios_proc_open(struct inode *inode, struct file *file)
 static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
                                  size_t count, loff_t *pos)
 {
-       void *data = PDE(file->f_path.dentry->d_inode)->data;
+       void *data = PDE(file_inode(file))->data;
        struct pnp_bios_node *node;
        int boot = (long)data >> 8;
        u8 nodenum = (long)data;
index ca91396..0727f92 100644 (file)
@@ -1515,16 +1515,11 @@ static int bq2415x_probe(struct i2c_client *client,
        }
 
        /* Get new ID for the new device */
-       ret = idr_pre_get(&bq2415x_id, GFP_KERNEL);
-       if (ret == 0)
-               return -ENOMEM;
-
        mutex_lock(&bq2415x_id_mutex);
-       ret = idr_get_new(&bq2415x_id, client, &num);
+       num = idr_alloc(&bq2415x_id, client, 0, 0, GFP_KERNEL);
        mutex_unlock(&bq2415x_id_mutex);
-
-       if (ret < 0)
-               return ret;
+       if (num < 0)
+               return num;
 
        name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
        if (!name) {
index 8ccf5d7..26037ca 100644 (file)
@@ -791,14 +791,11 @@ static int bq27x00_battery_probe(struct i2c_client *client,
        int retval = 0;
 
        /* Get new ID for the new battery device */
-       retval = idr_pre_get(&battery_id, GFP_KERNEL);
-       if (retval == 0)
-               return -ENOMEM;
        mutex_lock(&battery_mutex);
-       retval = idr_get_new(&battery_id, client, &num);
+       num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
        mutex_unlock(&battery_mutex);
-       if (retval < 0)
-               return retval;
+       if (num < 0)
+               return num;
 
        name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
        if (!name) {
index e7301b3..c09e772 100644 (file)
@@ -395,17 +395,12 @@ static int ds278x_battery_probe(struct i2c_client *client,
        }
 
        /* Get an ID for this battery */
-       ret = idr_pre_get(&battery_id, GFP_KERNEL);
-       if (ret == 0) {
-               ret = -ENOMEM;
-               goto fail_id;
-       }
-
        mutex_lock(&battery_lock);
-       ret = idr_get_new(&battery_id, client, &num);
+       ret = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
        mutex_unlock(&battery_lock);
        if (ret < 0)
                goto fail_id;
+       num = ret;
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
index 2bf0c1b..d3db26e 100644 (file)
@@ -128,7 +128,8 @@ static int pps_gpio_probe(struct platform_device *pdev)
        }
 
        /* allocate space for device info */
-       data = kzalloc(sizeof(struct pps_gpio_device_data), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, sizeof(struct pps_gpio_device_data),
+                           GFP_KERNEL);
        if (data == NULL) {
                err = -ENOMEM;
                goto return_error;
@@ -150,7 +151,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
                pps_default_params |= PPS_CAPTURECLEAR | PPS_OFFSETCLEAR;
        data->pps = pps_register_source(&data->info, pps_default_params);
        if (data->pps == NULL) {
-               kfree(data);
                pr_err("failed to register IRQ %d as PPS source\n", irq);
                err = -EINVAL;
                goto return_error;
@@ -164,7 +164,6 @@ static int pps_gpio_probe(struct platform_device *pdev)
                        get_irqf_trigger_flags(pdata), data->info.name, data);
        if (ret) {
                pps_unregister_source(data->pps);
-               kfree(data);
                pr_err("failed to acquire IRQ %d\n", irq);
                err = -EINVAL;
                goto return_error;
@@ -190,7 +189,6 @@ static int pps_gpio_remove(struct platform_device *pdev)
        gpio_free(pdata->gpio_pin);
        pps_unregister_source(data->pps);
        pr_info("removed IRQ %d as PPS source\n", data->irq);
-       kfree(data);
        return 0;
 }
 
index f197e8e..cdad4d9 100644 (file)
@@ -102,7 +102,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
                goto pps_register_source_exit;
        }
 
-       /* These initializations must be done before calling idr_get_new()
+       /* These initializations must be done before calling idr_alloc()
         * in order to avoid reces into pps_event().
         */
        pps->params.api_version = PPS_API_VERS;
index 6437703..7173e3a 100644 (file)
@@ -295,29 +295,21 @@ int pps_register_cdev(struct pps_device *pps)
        dev_t devt;
 
        mutex_lock(&pps_idr_lock);
-       /* Get new ID for the new PPS source */
-       if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
-               mutex_unlock(&pps_idr_lock);
-               return -ENOMEM;
-       }
-
-       /* Now really allocate the PPS source.
-        * After idr_get_new() calling the new source will be freely available
-        * into the kernel.
+       /*
+        * Get new ID for the new PPS source.  After idr_alloc() calling
+        * the new source will be freely available into the kernel.
         */
-       err = idr_get_new(&pps_idr, pps, &pps->id);
-       mutex_unlock(&pps_idr_lock);
-
-       if (err < 0)
-               return err;
-
-       pps->id &= MAX_IDR_MASK;
-       if (pps->id >= PPS_MAX_SOURCES) {
-               pr_err("%s: too many PPS sources in the system\n",
-                                       pps->info.name);
-               err = -EBUSY;
-               goto free_idr;
+       err = idr_alloc(&pps_idr, pps, 0, PPS_MAX_SOURCES, GFP_KERNEL);
+       if (err < 0) {
+               if (err == -ENOSPC) {
+                       pr_err("%s: too many PPS sources in the system\n",
+                              pps->info.name);
+                       err = -EBUSY;
+               }
+               goto out_unlock;
        }
+       pps->id = err;
+       mutex_unlock(&pps_idr_lock);
 
        devt = MKDEV(MAJOR(pps_devt), pps->id);
 
@@ -351,8 +343,8 @@ del_cdev:
 free_idr:
        mutex_lock(&pps_idr_lock);
        idr_remove(&pps_idr, pps->id);
+out_unlock:
        mutex_unlock(&pps_idr_lock);
-
        return err;
 }
 
index dd3bfaf..29387df 100644 (file)
@@ -199,11 +199,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
        /* actual size of vring (in bytes) */
        size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
 
-       if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
-               dev_err(dev, "idr_pre_get failed\n");
-               return -ENOMEM;
-       }
-
        /*
         * Allocate non-cacheable memory for the vring. In the future
         * this call will also configure the IOMMU for us
@@ -221,12 +216,13 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
         * TODO: let the rproc know the notifyid of this vring
         * TODO: support predefined notifyids (via resource table)
         */
-       ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
+       ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
        if (ret) {
-               dev_err(dev, "idr_get_new failed: %d\n", ret);
+               dev_err(dev, "idr_alloc failed: %d\n", ret);
                dma_free_coherent(dev->parent, size, va, dma);
                return ret;
        }
+       notifyid = ret;
 
        /* Store largest notifyid */
        rproc->max_notifyid = max(rproc->max_notifyid, notifyid);
@@ -1180,7 +1176,6 @@ static void rproc_type_release(struct device *dev)
 
        rproc_delete_debug_dir(rproc);
 
-       idr_remove_all(&rproc->notifyids);
        idr_destroy(&rproc->notifyids);
 
        if (rproc->index >= 0)
index 9e198e5..afed9b7 100644 (file)
@@ -222,7 +222,7 @@ static void rproc_virtio_finalize_features(struct virtio_device *vdev)
        rvdev->gfeatures = vdev->features[0];
 }
 
-static struct virtio_config_ops rproc_virtio_config_ops = {
+static const struct virtio_config_ops rproc_virtio_config_ops = {
        .get_features   = rproc_virtio_get_features,
        .finalize_features = rproc_virtio_finalize_features,
        .find_vqs       = rproc_virtio_find_vqs,
index d854460..a59684b 100644 (file)
@@ -213,13 +213,10 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
                struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
                void *priv, u32 addr)
 {
-       int err, tmpaddr, request;
+       int id_min, id_max, id;
        struct rpmsg_endpoint *ept;
        struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
 
-       if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
-               return NULL;
-
        ept = kzalloc(sizeof(*ept), GFP_KERNEL);
        if (!ept) {
                dev_err(dev, "failed to kzalloc a new ept\n");
@@ -234,31 +231,28 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
        ept->priv = priv;
 
        /* do we need to allocate a local address ? */
-       request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
+       if (addr == RPMSG_ADDR_ANY) {
+               id_min = RPMSG_RESERVED_ADDRESSES;
+               id_max = 0;
+       } else {
+               id_min = addr;
+               id_max = addr + 1;
+       }
 
        mutex_lock(&vrp->endpoints_lock);
 
        /* bind the endpoint to an rpmsg address (and allocate one if needed) */
-       err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
-       if (err) {
-               dev_err(dev, "idr_get_new_above failed: %d\n", err);
+       id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL);
+       if (id < 0) {
+               dev_err(dev, "idr_alloc failed: %d\n", id);
                goto free_ept;
        }
-
-       /* make sure the user's address request is fulfilled, if relevant */
-       if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
-               dev_err(dev, "address 0x%x already in use\n", addr);
-               goto rem_idr;
-       }
-
-       ept->addr = tmpaddr;
+       ept->addr = id;
 
        mutex_unlock(&vrp->endpoints_lock);
 
        return ept;
 
-rem_idr:
-       idr_remove(&vrp->endpoints, request);
 free_ept:
        mutex_unlock(&vrp->endpoints_lock);
        kref_put(&ept->refcount, __ept_release);
@@ -1036,7 +1030,6 @@ static void rpmsg_remove(struct virtio_device *vdev)
        if (vrp->ns_ept)
                __rpmsg_destroy_ept(vrp, vrp->ns_ept);
 
-       idr_remove_all(&vrp->endpoints);
        idr_destroy(&vrp->endpoints);
 
        vdev->config->del_vqs(vrp->vdev);
index b2a8ed9..98f0d3c 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
+#include <linux/stmp_device.h>
+#include <linux/stmp3xxx_rtc_wdt.h>
 
 #include <mach/common.h>
 
@@ -36,6 +38,7 @@
 #define STMP3XXX_RTC_CTRL_ALARM_IRQ_EN         0x00000001
 #define STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN       0x00000002
 #define STMP3XXX_RTC_CTRL_ALARM_IRQ            0x00000004
+#define STMP3XXX_RTC_CTRL_WATCHDOGEN           0x00000010
 
 #define STMP3XXX_RTC_STAT                      0x10
 #define STMP3XXX_RTC_STAT_STALE_SHIFT          16
@@ -45,6 +48,8 @@
 
 #define STMP3XXX_RTC_ALARM                     0x40
 
+#define STMP3XXX_RTC_WATCHDOG                  0x50
+
 #define STMP3XXX_RTC_PERSISTENT0               0x60
 #define STMP3XXX_RTC_PERSISTENT0_SET           0x64
 #define STMP3XXX_RTC_PERSISTENT0_CLR           0x68
 #define STMP3XXX_RTC_PERSISTENT0_ALARM_EN      0x00000004
 #define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE    0x00000080
 
+#define STMP3XXX_RTC_PERSISTENT1               0x70
+/* missing bitmask in headers */
+#define STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER 0x80000000
+
 struct stmp3xxx_rtc_data {
        struct rtc_device *rtc;
        void __iomem *io;
        int irq_alarm;
 };
 
+#if IS_ENABLED(CONFIG_STMP3XXX_RTC_WATCHDOG)
+/**
+ * stmp3xxx_wdt_set_timeout - configure the watchdog inside the STMP3xxx RTC
+ * @dev: the parent device of the watchdog (= the RTC)
+ * @timeout: the desired value for the timeout register of the watchdog.
+ *           0 disables the watchdog
+ *
+ * The watchdog needs one register and two bits which are in the RTC domain.
+ * To handle the resource conflict, the RTC driver will create another
+ * platform_device for the watchdog driver as a child of the RTC device.
+ * The watchdog driver is passed the below accessor function via platform_data
+ * to configure the watchdog. Locking is not needed because accessing SET/CLR
+ * registers is atomic.
+ */
+
+static void stmp3xxx_wdt_set_timeout(struct device *dev, u32 timeout)
+{
+       struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
+
+       if (timeout) {
+               writel(timeout, rtc_data->io + STMP3XXX_RTC_WATCHDOG);
+               writel(STMP3XXX_RTC_CTRL_WATCHDOGEN,
+                      rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_SET);
+               writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER,
+                      rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_SET);
+       } else {
+               writel(STMP3XXX_RTC_CTRL_WATCHDOGEN,
+                      rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR);
+               writel(STMP3XXX_RTC_PERSISTENT1_FORCE_UPDATER,
+                      rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_CLR);
+       }
+}
+
+static struct stmp3xxx_wdt_pdata wdt_pdata = {
+       .wdt_set_timeout = stmp3xxx_wdt_set_timeout,
+};
+
+static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
+{
+       struct platform_device *wdt_pdev =
+               platform_device_alloc("stmp3xxx_rtc_wdt", rtc_pdev->id);
+
+       if (wdt_pdev) {
+               wdt_pdev->dev.parent = &rtc_pdev->dev;
+               wdt_pdev->dev.platform_data = &wdt_pdata;
+               platform_device_add(wdt_pdev);
+       }
+}
+#else
+static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
+{
+}
+#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
+
 static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
 {
        /*
@@ -233,6 +296,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
                goto out_irq_alarm;
        }
 
+       stmp3xxx_wdt_register(pdev);
        return 0;
 
 out_irq_alarm:
index 33f26bf..6999fd9 100644 (file)
@@ -1573,7 +1573,10 @@ static void dasd_eckd_do_validate_server(struct work_struct *work)
 {
        struct dasd_device *device = container_of(work, struct dasd_device,
                                                  kick_validate);
-       if (dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST)
+       unsigned long flags = 0;
+
+       set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
+       if (dasd_eckd_validate_server(device, flags)
            == -EAGAIN) {
                /* schedule worker again if failed */
                schedule_work(&device->kick_validate);
@@ -4157,6 +4160,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
        int rc;
        struct dasd_uid temp_uid;
        unsigned long flags;
+       unsigned long cqr_flags = 0;
 
        private = (struct dasd_eckd_private *) device->private;
 
@@ -4178,7 +4182,9 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
        rc = dasd_alias_make_device_known_to_lcu(device);
        if (rc)
                return rc;
-       dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST);
+
+       set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
+       dasd_eckd_validate_server(device, cqr_flags);
 
        /* RE-Read Configuration Data */
        dasd_eckd_read_conf(device);
index 230697a..96e52bf 100644 (file)
@@ -433,9 +433,9 @@ fs3270_open(struct inode *inode, struct file *filp)
        struct idal_buffer *ib;
        int minor, rc = 0;
 
-       if (imajor(filp->f_path.dentry->d_inode) != IBM_FS3270_MAJOR)
+       if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
                return -ENODEV;
-       minor = iminor(filp->f_path.dentry->d_inode);
+       minor = iminor(file_inode(filp));
        /* Check for minor 0 multiplexer. */
        if (minor == 0) {
                struct tty_struct *tty = get_current_tty();
index 2d61db3..6dc6072 100644 (file)
@@ -273,13 +273,13 @@ tapechar_open (struct inode *inode, struct file *filp)
        int minor, rc;
 
        DBF_EVENT(6, "TCHAR:open: %i:%i\n",
-               imajor(filp->f_path.dentry->d_inode),
-               iminor(filp->f_path.dentry->d_inode));
+               imajor(file_inode(filp)),
+               iminor(file_inode(filp)));
 
-       if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
+       if (imajor(file_inode(filp)) != tapechar_major)
                return -ENODEV;
 
-       minor = iminor(filp->f_path.dentry->d_inode);
+       minor = iminor(file_inode(filp));
        device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
        if (IS_ERR(device)) {
                DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
index 483f72b..c180e31 100644 (file)
@@ -703,7 +703,7 @@ static int ur_open(struct inode *inode, struct file *file)
         * We treat the minor number as the devno of the ur device
         * to find in the driver tree.
         */
-       devno = MINOR(file->f_dentry->d_inode->i_rdev);
+       devno = MINOR(file_inode(file)->i_rdev);
 
        urd = urdev_get_from_devno(devno);
        if (!urd) {
index e6e0d31..ccaae9d 100644 (file)
@@ -128,7 +128,7 @@ static int qstat_show(struct seq_file *m, void *v)
 static int qstat_seq_open(struct inode *inode, struct file *filp)
 {
        return single_open(filp, qstat_show,
-                          filp->f_path.dentry->d_inode->i_private);
+                          file_inode(filp)->i_private);
 }
 
 static const struct file_operations debugfs_fops = {
@@ -221,7 +221,7 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
 static int qperf_seq_open(struct inode *inode, struct file *filp)
 {
        return single_open(filp, qperf_show,
-                          filp->f_path.dentry->d_inode->i_private);
+                          file_inode(filp)->i_private);
 }
 
 static struct file_operations debugfs_perf_fops = {
@@ -232,7 +232,8 @@ static struct file_operations debugfs_perf_fops = {
        .llseek  = seq_lseek,
        .release = single_release,
 };
-static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
+
+static void setup_debugfs_entry(struct qdio_q *q)
 {
        char name[QDIO_DEBUGFS_NAME_LEN];
 
@@ -263,12 +264,12 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
                irq_ptr->debugfs_perf = NULL;
 
        for_each_input_queue(irq_ptr, q, i)
-               setup_debugfs_entry(q, cdev);
+               setup_debugfs_entry(q);
        for_each_output_queue(irq_ptr, q, i)
-               setup_debugfs_entry(q, cdev);
+               setup_debugfs_entry(q);
 }
 
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
 {
        struct qdio_q *q;
        int i;
index 7f8b973..647b422 100644 (file)
@@ -85,8 +85,7 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
                       struct qdio_irq *irq_ptr);
 void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
                              struct ccw_device *cdev);
-void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
-                                struct ccw_device *cdev);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
 int qdio_debug_init(void);
 void qdio_debug_exit(void);
 
index abc550e..843051b 100644 (file)
@@ -1226,7 +1226,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
 
        tiqdio_remove_input_queues(irq_ptr);
        qdio_shutdown_queues(cdev);
-       qdio_shutdown_debug_entries(irq_ptr, cdev);
+       qdio_shutdown_debug_entries(irq_ptr);
 
        /* cleanup subchannel */
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
index 03a15e0..6711e65 100644 (file)
@@ -275,7 +275,7 @@ static const char *kvm_bus_name(struct virtio_device *vdev)
 /*
  * The config ops structure as defined by virtio config
  */
-static struct virtio_config_ops kvm_vq_configspace_ops = {
+static const struct virtio_config_ops kvm_vq_configspace_ops = {
        .get_features = kvm_get_features,
        .finalize_features = kvm_finalize_features,
        .get = kvm_get,
index e85c803..fc1339c 100644 (file)
@@ -107,7 +107,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        int error = 0;
        u8 ireg = 0;
 
-       if (D7S_MINOR != iminor(file->f_path.dentry->d_inode))
+       if (D7S_MINOR != iminor(file_inode(file)))
                return -ENODEV;
 
        mutex_lock(&d7s_mutex);
index d1f0120..5e1e12c 100644 (file)
@@ -640,7 +640,7 @@ out:
 /* This function handles ioctl for the character device */
 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        long timeout;
        unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
        dma_addr_t dma_handle;
index 52a2f05..c845bdb 100644 (file)
@@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
        dma_addr_t dma_handle;
        int request_id = 0;
        TW_Ioctl_Driver_Command driver_command;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        TW_Ioctl_Buf_Apache *tw_ioctl;
        TW_Command_Full *full_command_packet;
        TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
index 62071d2..56662ae 100644 (file)
@@ -889,7 +889,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
        unsigned long flags;
        unsigned int data_buffer_length = 0;
        unsigned long data_buffer_length_adjusted = 0;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        unsigned long *cpu_addr;
        long timeout;
        TW_New_Ioctl *tw_ioctl;
index 742f5d7..a6f7190 100644 (file)
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29801
+# define AAC_DRIVER_BUILD 30000
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS 32
 
 #define AAC_NUM_MGT_FIB         8
-#define AAC_NUM_IO_FIB         (512 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_IO_FIB         (1024 - AAC_NUM_MGT_FIB)
 #define AAC_NUM_FIB            (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
 
 #define AAC_MAX_LUN            (8)
 #define CONTAINER_TO_ID(cont)          (cont)
 #define CONTAINER_TO_LUN(cont)         (0)
 
+#define PMC_DEVICE_S7  0x28c
+#define PMC_DEVICE_S8  0x28d
+#define PMC_DEVICE_S9  0x28f
+
 #define aac_phys_to_logical(x)  ((x)+1)
 #define aac_logical_to_phys(x)  ((x)?(x)-1:0)
 
index 8e5d3be..3f75995 100644 (file)
@@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
                dev->max_fib_size = status[1] & 0xFFE0;
                host->sg_tablesize = status[2] >> 16;
                dev->sg_tablesize = status[2] & 0xFFFF;
-               host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+               if (dev->pdev->device == PMC_DEVICE_S7 ||
+                   dev->pdev->device == PMC_DEVICE_S8 ||
+                   dev->pdev->device == PMC_DEVICE_S9)
+                       host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+                               (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+               else
+                       host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
                dev->max_num_aif = status[4] & 0xFFFF;
                /*
                 *      NOTE:
@@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
                }
        }
 
+       if (host->can_queue > AAC_NUM_IO_FIB)
+               host->can_queue = AAC_NUM_IO_FIB;
+
        /*
         *      Ok now init the communication subsystem
         */
index 3b021ec..e2e3492 100644 (file)
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
                fib->hw_fib_va->header.StructType = FIB_MAGIC2;
                fib->hw_fib_va->header.SenderFibAddress = (u32)address;
                fib->hw_fib_va->header.u.TimeStamp = 0;
-               BUG_ON((u32)(address >> 32) != 0L);
+               BUG_ON(upper_32_bits(address) != 0L);
                address |= fibsize;
        } else {
                /* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
                address |= fibsize;
        }
 
-       src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+       src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
        src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
 
        return 0;
index e6bf126..a5f7690 100644 (file)
@@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
                        sizeof(driver_info.host_os_patch) - 1);
 
        strncpy(driver_info.os_device_name, bfad->pci_name,
-               sizeof(driver_info.os_device_name - 1));
+               sizeof(driver_info.os_device_name) - 1);
 
        /* FCS driver info init */
        spin_lock_irqsave(&bfad->bfad_lock, flags);
index 8f92732..5864f98 100644 (file)
@@ -523,20 +523,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
        int error = 1;
 
        mutex_lock(&bfad_mutex);
-       if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+       error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
+       if (error < 0) {
                mutex_unlock(&bfad_mutex);
-               printk(KERN_WARNING "idr_pre_get failure\n");
+               printk(KERN_WARNING "idr_alloc failure\n");
                goto out;
        }
-
-       error = idr_get_new(&bfad_im_port_index, im_port,
-                                        &im_port->idr_id);
-       if (error) {
-               mutex_unlock(&bfad_mutex);
-               printk(KERN_WARNING "idr_get_new failure\n");
-               goto out;
-       }
-
+       im_port->idr_id = error;
        mutex_unlock(&bfad_mutex);
 
        im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
index 3486845..50fcd01 100644 (file)
@@ -64,7 +64,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.12"
+#define BNX2FC_VERSION         "1.0.13"
 
 #define PFX                    "bnx2fc: "
 
 #define BNX2FC_RELOGIN_WAIT_TIME       200
 #define BNX2FC_RELOGIN_WAIT_CNT                10
 
+#define BNX2FC_STATS(hba, stat, cnt)                                   \
+       do {                                                            \
+               u32 val;                                                \
+                                                                       \
+               val = fw_stats->stat.cnt;                               \
+               if (hba->prev_stats.stat.cnt <= val)                    \
+                       val -= hba->prev_stats.stat.cnt;                \
+               else                                                    \
+                       val += (0xfffffff - hba->prev_stats.stat.cnt);  \
+               hba->bfw_stats.cnt += val;                              \
+       } while (0)
+
 /* bnx2fc driver uses only one instance of fcoe_percpu_s */
 extern struct fcoe_percpu_s bnx2fc_global;
 
@@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
        spinlock_t fp_work_lock;
 };
 
+struct bnx2fc_fw_stats {
+       u64     fc_crc_cnt;
+       u64     fcoe_tx_pkt_cnt;
+       u64     fcoe_rx_pkt_cnt;
+       u64     fcoe_tx_byte_cnt;
+       u64     fcoe_rx_byte_cnt;
+};
+
 struct bnx2fc_hba {
        struct list_head list;
        struct cnic_dev *cnic;
@@ -207,6 +227,8 @@ struct bnx2fc_hba {
        struct bnx2fc_rport **tgt_ofld_list;
 
        /* statistics */
+       struct bnx2fc_fw_stats bfw_stats;
+       struct fcoe_statistics_params prev_stats;
        struct fcoe_statistics_params *stats_buffer;
        dma_addr_t stats_buf_dma;
        struct completion stat_req_done;
@@ -280,6 +302,7 @@ struct bnx2fc_rport {
 #define BNX2FC_FLAG_UPLD_REQ_COMPL     0x7
 #define BNX2FC_FLAG_EXPL_LOGO          0x8
 #define BNX2FC_FLAG_DISABLE_FAILED     0x9
+#define BNX2FC_FLAG_ENABLED            0xa
 
        u8 src_addr[ETH_ALEN];
        u32 max_sqes;
@@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
                                        struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+                                       struct bnx2fc_rport *tgt);
 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
                                    struct bnx2fc_rport *tgt);
 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
index 70ecd95..2daf4b0 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Jun 04, 2012"
+#define DRV_MODULE_RELDATE     "Dec 21, 2012"
 
 
 static char version[] =
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device);
 static int bnx2fc_enable(struct net_device *netdev);
 static int bnx2fc_disable(struct net_device *netdev);
 
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
 static void bnx2fc_recv_frame(struct sk_buff *skb);
 
 static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
 static void bnx2fc_stop(struct bnx2fc_interface *interface);
 static int __init bnx2fc_mod_init(void);
 static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 unsigned int bnx2fc_debug_level;
 module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
                ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
 }
 
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
-                           struct fc_els_lesb *fc_lesb)
-{
-       struct net_device *netdev = bnx2fc_netdev(lport);
-
-       __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
-       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
-       struct net_device *netdev = bnx2fc_netdev(fip->lp);
-       struct fcoe_fc_els_lesb *fcoe_lesb;
-       struct fc_els_lesb fc_lesb;
-
-       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
-       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
-       ctlr_dev->lesb.lesb_link_fail =
-               ntohl(fcoe_lesb->lesb_link_fail);
-       ctlr_dev->lesb.lesb_vlink_fail =
-               ntohl(fcoe_lesb->lesb_vlink_fail);
-       ctlr_dev->lesb.lesb_miss_fka =
-               ntohl(fcoe_lesb->lesb_miss_fka);
-       ctlr_dev->lesb.lesb_symb_err =
-               ntohl(fcoe_lesb->lesb_symb_err);
-       ctlr_dev->lesb.lesb_err_block =
-               ntohl(fcoe_lesb->lesb_err_block);
-       ctlr_dev->lesb.lesb_fcs_error =
-               ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
 static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
 {
        struct fcoe_ctlr_device *ctlr_dev =
@@ -687,11 +652,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
                BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
                return bnx2fc_stats;
        }
-       bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
-       bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
-       bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
-       bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
-       bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+       BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+       bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+       BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
+       bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
+       BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
+       bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
+       BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
+       bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
+       BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
+       bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
 
        bnx2fc_stats->dumped_frames = 0;
        bnx2fc_stats->lip_count = 0;
@@ -700,6 +670,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
        bnx2fc_stats->loss_of_signal_count = 0;
        bnx2fc_stats->prim_seq_protocol_err_count = 0;
 
+       memcpy(&hba->prev_stats, hba->stats_buffer,
+              sizeof(struct fcoe_statistics_params));
        return bnx2fc_stats;
 }
 
@@ -734,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        return 0;
 }
 
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
-       struct fcoe_port *port = lport_priv(lport);
-       struct bnx2fc_interface *interface = port->priv;
-       struct net_device *netdev = interface->netdev;
-       struct ethtool_cmd ecmd;
-
-       if (!__ethtool_get_settings(netdev, &ecmd)) {
-               lport->link_supported_speeds &=
-                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-                                     SUPPORTED_1000baseT_Full))
-                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-               if (ecmd.supported & SUPPORTED_10000baseT_Full)
-                       lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
-               switch (ethtool_cmd_speed(&ecmd)) {
-               case SPEED_1000:
-                       lport->link_speed = FC_PORTSPEED_1GBIT;
-                       break;
-               case SPEED_2500:
-                       lport->link_speed = FC_PORTSPEED_2GBIT;
-                       break;
-               case SPEED_10000:
-                       lport->link_speed = FC_PORTSPEED_10GBIT;
-                       break;
-               }
-       }
-}
 static int bnx2fc_link_ok(struct fc_lport *lport)
 {
        struct fcoe_port *port = lport_priv(lport);
@@ -820,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
        port->fcoe_pending_queue_active = 0;
        setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
 
-       bnx2fc_link_speed_update(lport);
+       fcoe_link_speed_update(lport);
 
        if (!lport->vport) {
                if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
@@ -864,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                                     u16 vlan_id)
 {
        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+       struct fcoe_ctlr_device *cdev;
        struct fc_lport *lport;
        struct fc_lport *vport;
        struct bnx2fc_interface *interface, *tmp;
@@ -923,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
                                interface->netdev->name, event);
 
-               bnx2fc_link_speed_update(lport);
+               fcoe_link_speed_update(lport);
+
+               cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
 
                if (link_possible && !bnx2fc_link_ok(lport)) {
-                       /* Reset max recv frame size to default */
-                       fc_set_mfs(lport, BNX2FC_MFS);
-                       /*
-                        * ctlr link up will only be handled during
-                        * enable to avoid sending discovery solicitation
-                        * on a stale vlan
-                        */
-                       if (interface->enabled)
-                               fcoe_ctlr_link_up(ctlr);
+                       switch (cdev->enabled) {
+                       case FCOE_CTLR_DISABLED:
+                               pr_info("Link up while interface is disabled.\n");
+                               break;
+                       case FCOE_CTLR_ENABLED:
+                       case FCOE_CTLR_UNUSED:
+                               /* Reset max recv frame size to default */
+                               fc_set_mfs(lport, BNX2FC_MFS);
+                               /*
+                                * ctlr link up will only be handled during
+                                * enable to avoid sending discovery
+                                * solicitation on a stale vlan
+                                */
+                               if (interface->enabled)
+                                       fcoe_ctlr_link_up(ctlr);
+                       };
                } else if (fcoe_ctlr_link_down(ctlr)) {
-                       mutex_lock(&lport->lp_mutex);
-                       list_for_each_entry(vport, &lport->vports, list)
-                               fc_host_port_type(vport->host) =
-                                                       FC_PORTTYPE_UNKNOWN;
-                       mutex_unlock(&lport->lp_mutex);
-                       fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
-                       per_cpu_ptr(lport->stats,
-                                   get_cpu())->LinkFailureCount++;
-                       put_cpu();
-                       fcoe_clean_pending_queue(lport);
-                       wait_for_upload = 1;
+                       switch (cdev->enabled) {
+                       case FCOE_CTLR_DISABLED:
+                               pr_info("Link down while interface is disabled.\n");
+                               break;
+                       case FCOE_CTLR_ENABLED:
+                       case FCOE_CTLR_UNUSED:
+                               mutex_lock(&lport->lp_mutex);
+                               list_for_each_entry(vport, &lport->vports, list)
+                                       fc_host_port_type(vport->host) =
+                                       FC_PORTTYPE_UNKNOWN;
+                               mutex_unlock(&lport->lp_mutex);
+                               fc_host_port_type(lport->host) =
+                                       FC_PORTTYPE_UNKNOWN;
+                               per_cpu_ptr(lport->stats,
+                                           get_cpu())->LinkFailureCount++;
+                               put_cpu();
+                               fcoe_clean_pending_queue(lport);
+                               wait_for_upload = 1;
+                       };
                }
        }
        mutex_unlock(&bnx2fc_dev_lock);
@@ -1477,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
        port = lport_priv(lport);
        port->lport = lport;
        port->priv = interface;
+       port->get_netdev = bnx2fc_netdev;
        INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
 
        /* Configure fcoe_port */
@@ -1996,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
                set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
 }
 
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
 static int bnx2fc_disable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
@@ -2022,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev)
        return rc;
 }
 
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
 static int bnx2fc_enable(struct net_device *netdev)
 {
        struct bnx2fc_interface *interface;
@@ -2048,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev)
 }
 
 /**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+       struct fc_lport *lport = ctlr->lp;
+       struct net_device *netdev = bnx2fc_netdev(lport);
+
+       switch (cdev->enabled) {
+       case FCOE_CTLR_ENABLED:
+               return bnx2fc_enable(netdev);
+       case FCOE_CTLR_DISABLED:
+               return bnx2fc_disable(netdev);
+       case FCOE_CTLR_UNUSED:
+       default:
+               return -ENOTSUPP;
+       };
+}
+
+enum bnx2fc_create_link_state {
+       BNX2FC_CREATE_LINK_DOWN,
+       BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev  :   The net_device object the Ethernet interface to create on
+ * @fip_mode:   The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
  *
- * @buffer: The name of Ethernet interface to create on
- * @kp:     The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
  *
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
  *
  * Returns: 0 for success
  */
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+                         enum fip_state fip_mode,
+                         enum bnx2fc_create_link_state link_state)
 {
+       struct fcoe_ctlr_device *cdev;
        struct fcoe_ctlr *ctlr;
        struct bnx2fc_interface *interface;
        struct bnx2fc_hba *hba;
@@ -2153,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this master N_port */
        ctlr->lp = lport;
 
-       if (!bnx2fc_link_ok(lport)) {
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+       if (link_state == BNX2FC_CREATE_LINK_UP)
+               cdev->enabled = FCOE_CTLR_ENABLED;
+       else
+               cdev->enabled = FCOE_CTLR_DISABLED;
+
+       if (link_state == BNX2FC_CREATE_LINK_UP &&
+           !bnx2fc_link_ok(lport)) {
                fcoe_ctlr_link_up(ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
@@ -2161,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
 
        BNX2FC_HBA_DBG(lport, "create: START DISC\n");
        bnx2fc_start_disc(interface);
-       interface->enabled = true;
+
+       if (link_state == BNX2FC_CREATE_LINK_UP)
+               interface->enabled = true;
+
        /*
         * Release from kref_init in bnx2fc_interface_setup, on success
         * lport should be holding a reference taken in bnx2fc_if_create
@@ -2186,6 +2203,37 @@ mod_err:
        return rc;
 }
 
+/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev  : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+       return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+       return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+                             BNX2FC_CREATE_LINK_DOWN);
+}
+
 /**
  * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
  *
@@ -2311,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = {
        .name = {"bnx2fc"},
        .attached = false,
        .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+       .alloc = bnx2fc_ctlr_alloc,
        .match = bnx2fc_match,
        .create = bnx2fc_create,
        .destroy = bnx2fc_destroy,
@@ -2555,13 +2604,13 @@ module_init(bnx2fc_mod_init);
 module_exit(bnx2fc_mod_exit);
 
 static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
-       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
-       .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
-       .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+       .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+       .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+       .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
 
        .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
        .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
@@ -2660,7 +2709,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
        .can_queue              = BNX2FC_CAN_QUEUE,
        .use_clustering         = ENABLE_CLUSTERING,
        .sg_tablesize           = BNX2FC_MAX_BDS_PER_CMD,
-       .max_sectors            = 512,
+       .max_sectors            = 1024,
 };
 
 static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
@@ -2668,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
        .elsct_send             = bnx2fc_elsct_send,
        .fcp_abort_io           = bnx2fc_abort_io,
        .fcp_cleanup            = bnx2fc_cleanup,
-       .get_lesb               = bnx2fc_get_lesb,
+       .get_lesb               = fcoe_get_lesb,
        .rport_event_callback   = bnx2fc_rport_event_handler,
 };
 
index ef60afa..85ea98a 100644 (file)
@@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
  * @port:              port structure pointer
  * @tgt:               bnx2fc_rport structure pointer
  */
-static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
                                        struct bnx2fc_rport *tgt)
 {
        struct kwqe *kwqe_arr[2];
@@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
                case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
                        BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
                                   xid);
-                       memset(&io_req->err_entry, 0,
-                              sizeof(struct fcoe_err_report_entry));
                        memcpy(&io_req->err_entry, err_entry,
                               sizeof(struct fcoe_err_report_entry));
                        if (!test_bit(BNX2FC_FLAG_SRR_SENT,
@@ -847,8 +845,6 @@ ret_err_rqe:
                        goto ret_warn_rqe;
                }
 
-               memset(&io_req->err_entry, 0,
-                      sizeof(struct fcoe_err_report_entry));
                memcpy(&io_req->err_entry, err_entry,
                       sizeof(struct fcoe_err_report_entry));
 
@@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
        struct bnx2fc_interface         *interface;
        u32                             conn_id;
        u32                             context_id;
-       int                             rc;
 
        conn_id = ofld_kcqe->fcoe_conn_id;
        context_id = ofld_kcqe->fcoe_conn_context_id;
@@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
                                "resources\n");
                        set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
                }
-               goto ofld_cmpl_err;
        } else {
-
-               /* now enable the session */
-               rc = bnx2fc_send_session_enable_req(port, tgt);
-               if (rc) {
-                       printk(KERN_ERR PFX "enable session failed\n");
-                       goto ofld_cmpl_err;
-               }
+               /* FW offload request successfully completed */
+               set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
        }
-       return;
 ofld_cmpl_err:
        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
        wake_up_interruptible(&tgt->ofld_wait);
@@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
                printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
                goto enbl_cmpl_err;
        }
-       if (ofld_kcqe->completion_status)
-               goto enbl_cmpl_err;
-       else {
+       if (!ofld_kcqe->completion_status)
                /* enable successful - rport ready for issuing IOs */
-               set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
-               set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
-               wake_up_interruptible(&tgt->ofld_wait);
-       }
-       return;
+               set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
 
 enbl_cmpl_err:
        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
                /* disable successful */
                BNX2FC_TGT_DBG(tgt, "disable successful\n");
                clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+               clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
                set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
                wake_up_interruptible(&tgt->upld_wait);
index 8d4626c..60798e8 100644 (file)
@@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
        mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
                                                 &mp_req->mp_resp_bd_dma,
                                                 GFP_ATOMIC);
-       if (!mp_req->mp_req_bd) {
+       if (!mp_req->mp_resp_bd) {
                printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
                bnx2fc_free_mp_resc(io_req);
                return FAILED;
@@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
 static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
 {
        struct fc_lport *lport;
-       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-       struct fc_rport_libfc_priv *rp = rport->dd_data;
+       struct fc_rport *rport;
+       struct fc_rport_libfc_priv *rp;
        struct fcoe_port *port;
        struct bnx2fc_interface *interface;
        struct bnx2fc_rport *tgt;
@@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
        unsigned long start = jiffies;
 
        lport = shost_priv(host);
+       rport = starget_to_rport(scsi_target(sc_cmd->device));
        port = lport_priv(lport);
        interface = port->priv;
 
@@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
                rc = FAILED;
                goto tmf_err;
        }
+       rp = rport->dd_data;
 
        rc = fc_block_scsi_eh(sc_cmd);
        if (rc)
index b9d0d9c..c57a3bb 100644 (file)
@@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
        BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
        /* fake upload completion */
        clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+       clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
        set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
        wake_up_interruptible(&tgt->upld_wait);
 }
@@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
         * resources are freed up in bnx2fc_offload_session
         */
        clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+       clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
        wake_up_interruptible(&tgt->ofld_wait);
 }
 
+static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
+{
+       setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+       mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+       wait_event_interruptible(tgt->ofld_wait,
+                                (test_bit(
+                                 BNX2FC_FLAG_OFLD_REQ_CMPL,
+                                 &tgt->flags)));
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&tgt->ofld_timer);
+}
+
 static void bnx2fc_offload_session(struct fcoe_port *port,
                                        struct bnx2fc_rport *tgt,
                                        struct fc_rport_priv *rdata)
@@ -103,17 +119,7 @@ retry_ofld:
         * wait for the session is offloaded and enabled. 3 Secs
         * should be ample time for this process to complete.
         */
-       setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
-       mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
-       wait_event_interruptible(tgt->ofld_wait,
-                                (test_bit(
-                                 BNX2FC_FLAG_OFLD_REQ_CMPL,
-                                 &tgt->flags)));
-       if (signal_pending(current))
-               flush_signals(current);
-
-       del_timer_sync(&tgt->ofld_timer);
+       bnx2fc_ofld_wait(tgt);
 
        if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
                if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
@@ -131,14 +137,23 @@ retry_ofld:
        }
        if (bnx2fc_map_doorbell(tgt)) {
                printk(KERN_ERR PFX "map doorbell failed - no mem\n");
-               /* upload will take care of cleaning up sess resc */
-               lport->tt.rport_logoff(rdata);
+               goto ofld_err;
        }
+       clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+       rval = bnx2fc_send_session_enable_req(port, tgt);
+       if (rval) {
+               pr_err(PFX "enable session failed\n");
+               goto ofld_err;
+       }
+       bnx2fc_ofld_wait(tgt);
+       if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
+               goto ofld_err;
        return;
 
 ofld_err:
        /* couldn't offload the session. log off from this rport */
        BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+       clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
        /* Free session resources */
        bnx2fc_free_session_resc(hba, tgt);
 tgt_init_err:
@@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
        spin_unlock_bh(&tgt->tgt_lock);
 }
 
+static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
+{
+       setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+       mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+       wait_event_interruptible(tgt->upld_wait,
+                                (test_bit(
+                                 BNX2FC_FLAG_UPLD_REQ_COMPL,
+                                 &tgt->flags)));
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&tgt->upld_timer);
+}
+
 static void bnx2fc_upload_session(struct fcoe_port *port,
                                        struct bnx2fc_rport *tgt)
 {
@@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
         * wait for upload to complete. 3 Secs
         * should be sufficient time for this process to complete.
         */
-       setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
-       mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
        BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
-       wait_event_interruptible(tgt->upld_wait,
-                                (test_bit(
-                                 BNX2FC_FLAG_UPLD_REQ_COMPL,
-                                 &tgt->flags)));
-
-       if (signal_pending(current))
-               flush_signals(current);
-
-       del_timer_sync(&tgt->upld_timer);
+       bnx2fc_upld_wait(tgt);
 
        /*
         * traverse thru the active_q and tmf_q and cleanup
@@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
                bnx2fc_send_session_destroy_req(hba, tgt);
 
                /* wait for destroy to complete */
-               setup_timer(&tgt->upld_timer,
-                           bnx2fc_upld_timer, (unsigned long)tgt);
-               mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
-               wait_event_interruptible(tgt->upld_wait,
-                                        (test_bit(
-                                         BNX2FC_FLAG_UPLD_REQ_COMPL,
-                                         &tgt->flags)));
+               bnx2fc_upld_wait(tgt);
 
                if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
                        printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
 
                BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
                        tgt->flags);
-               if (signal_pending(current))
-                       flush_signals(current);
-
-               del_timer_sync(&tgt->upld_timer);
 
        } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
                printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
@@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
        tgt->rq_cons_idx = 0;
        atomic_set(&tgt->num_active_ios, 0);
 
-       if (rdata->flags & FC_RP_FLAGS_RETRY) {
+       if (rdata->flags & FC_RP_FLAGS_RETRY &&
+           rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+           !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
                tgt->dev_type = TYPE_TAPE;
                tgt->io_timeout = 0; /* use default ULP timeout */
        } else {
@@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
                tgt = (struct bnx2fc_rport *)&rp[1];
 
                /* This can happen when ADISC finds the same target */
-               if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+               if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
                        BNX2FC_TGT_DBG(tgt, "already offloaded\n");
                        mutex_unlock(&hba->hba_mutex);
                        return;
@@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
                BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
                        hba->num_ofld_sess);
 
-               if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
-                       /*
-                        * Session is offloaded and enabled. Map
-                        * doorbell register for this target
-                        */
+               if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+                       /* Session is offloaded and enabled.  */
                        BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
                        /* This counter is protected with hba mutex */
                        hba->num_ofld_sess++;
@@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
                 */
                tgt = (struct bnx2fc_rport *)&rp[1];
 
-               if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+               if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
                        mutex_unlock(&hba->hba_mutex);
                        break;
                }
index 91eec60..a28b03e 100644 (file)
@@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
                (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
        if (error_mask1) {
                iscsi_init2.error_bit_map[0] = error_mask1;
-               mask64 &= (u32)(~mask64);
+               mask64 ^= (u32)(mask64);
                mask64 |= error_mask1;
        } else
                iscsi_init2.error_bit_map[0] = (u32) mask64;
index a15474e..2a32374 100644 (file)
@@ -895,7 +895,7 @@ static int ch_probe(struct device *dev)
 {
        struct scsi_device *sd = to_scsi_device(dev);
        struct device *class_dev;
-       int minor, ret = -ENOMEM;
+       int ret;
        scsi_changer *ch;
 
        if (sd->type != TYPE_MEDIUM_CHANGER)
@@ -905,22 +905,19 @@ static int ch_probe(struct device *dev)
        if (NULL == ch)
                return -ENOMEM;
 
-       if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
-               goto free_ch;
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&ch_index_lock);
-       ret = idr_get_new(&ch_index_idr, ch, &minor);
+       ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
        spin_unlock(&ch_index_lock);
+       idr_preload_end();
 
-       if (ret)
+       if (ret < 0) {
+               if (ret == -ENOSPC)
+                       ret = -ENODEV;
                goto free_ch;
-
-       if (minor > CH_MAX_DEVS) {
-               ret = -ENODEV;
-               goto remove_idr;
        }
 
-       ch->minor = minor;
+       ch->minor = ret;
        sprintf(ch->name,"ch%d",ch->minor);
 
        class_dev = device_create(ch_sysfs_class, dev,
@@ -944,7 +941,7 @@ static int ch_probe(struct device *dev)
 
        return 0;
 remove_idr:
-       idr_remove(&ch_index_idr, minor);
+       idr_remove(&ch_index_idr, ch->minor);
 free_ch:
        kfree(ch);
        return ret;
index 8ecdb94..bdd78fb 100644 (file)
@@ -2131,13 +2131,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
                value_to_add = 4 - (cf->size % 4);
 
        cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
-       if (cfg_data == NULL)
-               return -ENOMEM;
+       if (cfg_data == NULL) {
+               ret = -ENOMEM;
+               goto leave;
+       }
 
        memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
-
-       if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
-               return -EINVAL;
+       if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
+               ret = -EINVAL;
+               goto leave;
+       }
 
        mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
        maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
@@ -2149,9 +2152,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
                strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
        }
 
+leave:
        kfree(cfg_data);
        release_firmware(cf);
-
        return ret;
 }
 
index b42cbbd..0604b5f 100644 (file)
@@ -60,18 +60,11 @@ static struct scsi_transport_template *csio_fcoe_transport_vport;
 /*
  * debugfs support
  */
-static int
-csio_mem_open(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-
 static ssize_t
 csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
        loff_t pos = *ppos;
-       loff_t avail = file->f_path.dentry->d_inode->i_size;
+       loff_t avail = file_inode(file)->i_size;
        unsigned int mem = (uintptr_t)file->private_data & 3;
        struct csio_hw *hw = file->private_data - mem;
 
@@ -110,7 +103,7 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 
 static const struct file_operations csio_mem_debugfs_fops = {
        .owner   = THIS_MODULE,
-       .open    = csio_mem_open,
+       .open    = simple_open,
        .read    = csio_mem_read,
        .llseek  = default_llseek,
 };
index f924b3c..3fecf35 100644 (file)
@@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
                break;
        case CXGB4_STATE_DETACH:
                pr_info("cdev 0x%p, DETACH.\n", cdev);
+               cxgbi_device_unregister(cdev);
                break;
        default:
                pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
index 865c64f..fed486b 100644 (file)
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
        dcb->max_command = 1;
        dcb->target_id = target;
        dcb->target_lun = lun;
+       dcb->dev_mode = eeprom->target[target].cfg0;
 #ifndef DC395x_NO_DISCONNECT
        dcb->identify_msg =
            IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
 #else
        dcb->identify_msg = IDENTIFY(0, lun);
 #endif
-       dcb->dev_mode = eeprom->target[target].cfg0;
        dcb->inquiry7 = 0;
        dcb->sync_mode = 0;
        dcb->min_nego_period = clock_period[period_index];
index b4f6c9a..b6e2700 100644 (file)
@@ -2161,7 +2161,7 @@ static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
        struct inode *inode;
        long ret;
  
-       inode = file->f_dentry->d_inode;
+       inode = file_inode(file);
  
        mutex_lock(&adpt_mutex);
        ret = adpt_ioctl(inode, file, cmd, arg);
@@ -2177,7 +2177,7 @@ static long compat_adpt_ioctl(struct file *file,
        struct inode *inode;
        long ret;
  
-       inode = file->f_dentry->d_inode;
+       inode = file_inode(file);
  
        mutex_lock(&adpt_mutex);
  
index 666b7ac..b5d92fc 100644 (file)
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
                    struct packet_type *, struct net_device *);
 static int fcoe_percpu_receive_thread(void *);
 static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
 static int fcoe_link_ok(struct fc_lport *);
 
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
 
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev);
 static int fcoe_enable(struct net_device *netdev);
 static int fcoe_disable(struct net_device *netdev);
 
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
                                      u32 did, struct fc_frame *,
                                      unsigned int op,
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
                                      void *, u32 timeout);
 static void fcoe_recv_frame(struct sk_buff *skb);
 
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
 /* notification function for packets from net device */
 static struct notifier_block fcoe_notifier = {
        .notifier_call = fcoe_device_notification,
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
 static int fcoe_vport_disable(struct fc_vport *, bool disable);
 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
 
 static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
-       .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+       .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+       .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
        .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
        .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
        .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
        port = lport_priv(lport);
        port->lport = lport;
        port->priv = fcoe;
+       port->get_netdev = fcoe_netdev;
        port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
        port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
        INIT_WORK(&port->destroy_work, fcoe_destroy_work);
 
+       /*
+        * Need to add the lport to the hostlist
+        * so we catch NETDEV_CHANGE events.
+        */
+       fcoe_hostlist_add(lport);
+
        /* configure a fc_lport including the exchange manager */
        rc = fcoe_lport_config(lport);
        if (rc) {
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 out_lp_destroy:
        fc_exch_mgr_free(lport);
 out_host_put:
+       fcoe_hostlist_del(lport);
        scsi_host_put(lport->host);
 out:
        return ERR_PTR(rc);
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
 static int fcoe_device_notification(struct notifier_block *notifier,
                                    ulong event, void *ptr)
 {
+       struct fcoe_ctlr_device *cdev;
        struct fc_lport *lport = NULL;
        struct net_device *netdev = ptr;
        struct fcoe_ctlr *ctlr;
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier,
 
        fcoe_link_speed_update(lport);
 
-       if (link_possible && !fcoe_link_ok(lport))
-               fcoe_ctlr_link_up(ctlr);
-       else if (fcoe_ctlr_link_down(ctlr)) {
-               stats = per_cpu_ptr(lport->stats, get_cpu());
-               stats->LinkFailureCount++;
-               put_cpu();
-               fcoe_clean_pending_queue(lport);
+       cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+       if (link_possible && !fcoe_link_ok(lport)) {
+               switch (cdev->enabled) {
+               case FCOE_CTLR_DISABLED:
+                       pr_info("Link up while interface is disabled.\n");
+                       break;
+               case FCOE_CTLR_ENABLED:
+               case FCOE_CTLR_UNUSED:
+                       fcoe_ctlr_link_up(ctlr);
+               };
+       } else if (fcoe_ctlr_link_down(ctlr)) {
+               switch (cdev->enabled) {
+               case FCOE_CTLR_DISABLED:
+                       pr_info("Link down while interface is disabled.\n");
+                       break;
+               case FCOE_CTLR_ENABLED:
+               case FCOE_CTLR_UNUSED:
+                       stats = per_cpu_ptr(lport->stats, get_cpu());
+                       stats->LinkFailureCount++;
+                       put_cpu();
+                       fcoe_clean_pending_queue(lport);
+               };
        }
 out:
        return rc;
@@ -2039,6 +2067,8 @@ out:
  * Called from fcoe transport.
  *
  * Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
  */
 static int fcoe_disable(struct net_device *netdev)
 {
@@ -2097,6 +2127,33 @@ out:
        return rc;
 }
 
+/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+       struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+       struct fc_lport *lport = ctlr->lp;
+       struct net_device *netdev = fcoe_netdev(lport);
+
+       switch (cdev->enabled) {
+       case FCOE_CTLR_ENABLED:
+               return fcoe_enable(netdev);
+       case FCOE_CTLR_DISABLED:
+               return fcoe_disable(netdev);
+       case FCOE_CTLR_UNUSED:
+       default:
+               return -ENOTSUPP;
+       };
+}
+
 /**
  * fcoe_destroy() - Destroy a FCoE interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work)
 {
        struct fcoe_port *port;
        struct fcoe_interface *fcoe;
+       struct Scsi_Host *shost;
+       struct fc_host_attrs *fc_host;
+       unsigned long flags;
+       struct fc_vport *vport;
+       struct fc_vport *next_vport;
 
        port = container_of(work, struct fcoe_port, destroy_work);
+       shost = port->lport->host;
+       fc_host = shost_to_fc_host(shost);
+
+       /* Loop through all the vports and mark them for deletion */
+       spin_lock_irqsave(shost->host_lock, flags);
+       list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+               if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+                       continue;
+               } else {
+                       vport->flags |= FC_VPORT_DELETING;
+                       queue_work(fc_host_work_q(shost),
+                                  &vport->vport_delete_work);
+               }
+       }
+       spin_unlock_irqrestore(shost->host_lock, flags);
+
+       flush_workqueue(fc_host_work_q(shost));
+
        mutex_lock(&fcoe_config_mutex);
 
        fcoe = port->priv;
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 #endif
 }
 
+enum fcoe_create_link_state {
+       FCOE_CREATE_LINK_DOWN,
+       FCOE_CREATE_LINK_UP,
+};
+
 /**
- * fcoe_create() - Create a fcoe interface
- * @netdev  : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev  :   The net_device object the Ethernet interface to create on
+ * @fip_mode:   The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
  *
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
  *
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
  */
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+                       enum fcoe_create_link_state link_state)
 {
        int rc = 0;
        struct fcoe_ctlr_device *ctlr_dev;
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* setup DCB priority attributes. */
        fcoe_dcb_create(fcoe);
 
-       /* add to lports list */
-       fcoe_hostlist_add(lport);
-
        /* start FIP Discovery and FLOGI */
        lport->boot_time = jiffies;
        fc_fabric_login(lport);
-       if (!fcoe_link_ok(lport)) {
+
+       /*
+        * If the fcoe_ctlr_device is to be set to DISABLED
+        * it must be done after the lport is added to the
+        * hostlist, but before the rtnl_lock is released.
+        * This is because the rtnl_lock protects the
+        * hostlist that fcoe_device_notification uses. If
+        * the FCoE Controller is intended to be created
+        * DISABLED then 'enabled' needs to be considered
+        * handling link events. 'enabled' must be set
+        * before the lport can be found in the hostlist
+        * when a link up event is received.
+        */
+       if (link_state == FCOE_CREATE_LINK_UP)
+               ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+       else
+               ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+       if (link_state == FCOE_CREATE_LINK_UP &&
+           !fcoe_link_ok(lport)) {
                rtnl_unlock();
                fcoe_ctlr_link_up(ctlr);
                mutex_unlock(&fcoe_config_mutex);
@@ -2275,37 +2381,34 @@ out_nortnl:
 }
 
 /**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev  : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+       return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
  *
- * Returns: 0 if the ethtool query was successful
- *          -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
  */
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
 {
-       struct net_device *netdev = fcoe_netdev(lport);
-       struct ethtool_cmd ecmd;
-
-       if (!__ethtool_get_settings(netdev, &ecmd)) {
-               lport->link_supported_speeds &=
-                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-                                     SUPPORTED_1000baseT_Full))
-                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-               if (ecmd.supported & SUPPORTED_10000baseT_Full)
-                       lport->link_supported_speeds |=
-                               FC_PORTSPEED_10GBIT;
-               switch (ethtool_cmd_speed(&ecmd)) {
-               case SPEED_1000:
-                       lport->link_speed = FC_PORTSPEED_1GBIT;
-                       break;
-               case SPEED_10000:
-                       lport->link_speed = FC_PORTSPEED_10GBIT;
-                       break;
-               }
-               return 0;
-       }
-       return -1;
+       return _fcoe_create(netdev, FIP_MODE_FABRIC,
+                           FCOE_CREATE_LINK_DOWN);
 }
 
 /**
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost)
        struct fcoe_port *port = lport_priv(lport);
        struct fcoe_interface *fcoe = port->priv;
        struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+       struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
 
        fcoe_ctlr_link_down(ctlr);
        fcoe_clean_pending_queue(ctlr->lp);
-       if (!fcoe_link_ok(ctlr->lp))
+
+       if (cdev->enabled != FCOE_CTLR_DISABLED &&
+           !fcoe_link_ok(ctlr->lp))
                fcoe_ctlr_link_up(ctlr);
        return 0;
 }
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
        return 0;
 }
 
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ *                      port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+       struct fcoe_interface *fcoe;
+       struct fcoe_port *port;
+
+       port = lport_priv(lport);
+       fcoe = port->priv;
+       list_del(&fcoe->list);
+       return;
+}
 
 static struct fcoe_transport fcoe_sw_transport = {
        .name = {FCOE_TRANSPORT_DEFAULT},
        .attached = false,
        .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
        .match = fcoe_match,
+       .alloc = fcoe_ctlr_alloc,
        .create = fcoe_create,
        .destroy = fcoe_destroy,
        .enable = fcoe_enable,
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void)
        /* releases the associated fcoe hosts */
        rtnl_lock();
        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
-               list_del(&fcoe->list);
                ctlr = fcoe_to_ctlr(fcoe);
                port = lport_priv(ctlr->lp);
+               fcoe_hostlist_del(port->lport);
                queue_work(fcoe_wq, &port->destroy_work);
        }
        rtnl_unlock();
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
                             NULL, NULL, 3 * lport->r_a_tov);
 }
 
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
-                        struct fc_els_lesb *fc_lesb)
-{
-       struct net_device *netdev = fcoe_netdev(lport);
-
-       __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
-       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
-       struct net_device *netdev = fcoe_netdev(fip->lp);
-       struct fcoe_fc_els_lesb *fcoe_lesb;
-       struct fc_els_lesb fc_lesb;
-
-       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
-       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
-       ctlr_dev->lesb.lesb_link_fail =
-               ntohl(fcoe_lesb->lesb_link_fail);
-       ctlr_dev->lesb.lesb_vlink_fail =
-               ntohl(fcoe_lesb->lesb_vlink_fail);
-       ctlr_dev->lesb.lesb_miss_fka =
-               ntohl(fcoe_lesb->lesb_miss_fka);
-       ctlr_dev->lesb.lesb_symb_err =
-               ntohl(fcoe_lesb->lesb_symb_err);
-       ctlr_dev->lesb.lesb_err_block =
-               ntohl(fcoe_lesb->lesb_err_block);
-       ctlr_dev->lesb.lesb_fcs_error =
-               ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
 static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
 {
        struct fcoe_ctlr_device *ctlr_dev =
index b42dc32..2b53672 100644 (file)
@@ -55,12 +55,12 @@ do {                                                                \
 
 #define FCOE_DBG(fmt, args...)                                         \
        FCOE_CHECK_LOGGING(FCOE_LOGGING,                                \
-                          printk(KERN_INFO "fcoe: " fmt, ##args);)
+                          pr_info("fcoe: " fmt, ##args);)
 
 #define FCOE_NETDEV_DBG(netdev, fmt, args...)                  \
        FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING,                 \
-                          printk(KERN_INFO "fcoe: %s: " fmt,   \
-                                 netdev->name, ##args);)
+                          pr_info("fcoe: %s: " fmt,            \
+                                  netdev->name, ##args);)
 
 /**
  * struct fcoe_interface - A FCoE interface
index 4a909d7..08c3bc3 100644 (file)
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 
        LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
 
-       if (!fcf || !lport->port_id)
+       if (!fcf || !lport->port_id) {
+               /*
+                * We are yet to select best FCF, but we got CVL in the
+                * meantime. reset the ctlr and let it rediscover the FCF
+                */
+               mutex_lock(&fip->ctlr_mutex);
+               fcoe_ctlr_reset(fip);
+               mutex_unlock(&fip->ctlr_mutex);
                return;
+       }
 
        /*
         * mask of required descriptors.  Validating each one clears its bit.
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
                                fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
                                fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
                                fcf->flogi_sent, fcf->pri);
-               if (fcf->fabric_name != first->fabric_name ||
-                   fcf->vfid != first->vfid ||
-                   fcf->fc_map != first->fc_map) {
-                       LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
-                                       "or FC-MAP\n");
-                       return NULL;
-               }
-               if (fcf->flogi_sent)
-                       continue;
                if (!fcoe_ctlr_fcf_usable(fcf)) {
                        LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
                                        "map %x %svalid %savailable\n",
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
                                        "" : "un");
                        continue;
                }
+               if (fcf->fabric_name != first->fabric_name ||
+                   fcf->vfid != first->vfid ||
+                   fcf->fc_map != first->fc_map) {
+                       LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+                                       "or FC-MAP\n");
+                       return NULL;
+               }
+               if (fcf->flogi_sent)
+                       continue;
                if (!best || fcf->pri < best->pri || best->flogi_sent)
                        best = fcf;
        }
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
 }
 EXPORT_SYMBOL(fcoe_fcf_get_selected);
 
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
 {
        struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
 
        mutex_lock(&ctlr->ctlr_mutex);
-       switch (ctlr->mode) {
-       case FIP_MODE_FABRIC:
-               ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
-               break;
-       case FIP_MODE_VN2VN:
-               ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+       switch (ctlr_dev->mode) {
+       case FIP_CONN_TYPE_VN2VN:
+               ctlr->mode = FIP_MODE_VN2VN;
                break;
+       case FIP_CONN_TYPE_FABRIC:
        default:
-               ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+               ctlr->mode = FIP_MODE_FABRIC;
                break;
        }
+
        mutex_unlock(&ctlr->ctlr_mutex);
 }
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
index 5e75168..8c05ae0 100644 (file)
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
+#include <linux/ctype.h>
 
 #include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
 
 static atomic_t ctlr_num;
 static atomic_t fcf_num;
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
        ((x)->lesb.lesb_err_block)
 #define fcoe_ctlr_fcs_error(x)                 \
        ((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x)                   \
+       ((x)->enabled)
 #define fcoe_fcf_state(x)                      \
        ((x)->state)
 #define fcoe_fcf_fabric_name(x)                        \
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev,   \
 #define fcoe_enum_name_search(title, table_type, table)                        \
 static const char *get_fcoe_##title##_name(enum table_type table_key)  \
 {                                                                      \
-       int i;                                                          \
-       char *name = NULL;                                              \
-                                                                       \
-       for (i = 0; i < ARRAY_SIZE(table); i++) {                       \
-               if (table[i].value == table_key) {                      \
-                       name = table[i].name;                           \
-                       break;                                          \
-               }                                                       \
-       }                                                               \
-       return name;                                                    \
+       if (table_key < 0 || table_key >= ARRAY_SIZE(table))            \
+               return NULL;                                            \
+       return table[table_key];                                        \
+}
+
+static char *fip_conn_type_names[] = {
+       [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+       [ FIP_CONN_TYPE_FABRIC ]  = "Fabric",
+       [ FIP_CONN_TYPE_VN2VN ]   = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+               if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+                       return i;
+       }
+
+       return FIP_CONN_TYPE_UNKNOWN;
 }
 
-static struct {
-       enum fcf_state value;
-       char           *name;
-} fcf_state_names[] = {
-       { FCOE_FCF_STATE_UNKNOWN,      "Unknown" },
-       { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
-       { FCOE_FCF_STATE_CONNECTED,    "Connected" },
+static char *fcf_state_names[] = {
+       [ FCOE_FCF_STATE_UNKNOWN ]      = "Unknown",
+       [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+       [ FCOE_FCF_STATE_CONNECTED ]    = "Connected",
 };
 fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
 #define FCOE_FCF_STATE_MAX_NAMELEN 50
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev,
 }
 static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
 
-static struct {
-       enum fip_conn_type value;
-       char               *name;
-} fip_conn_type_names[] = {
-       { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
-       { FIP_CONN_TYPE_FABRIC, "Fabric" },
-       { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
 static ssize_t show_ctlr_mode(struct device *dev,
                              struct device_attribute *attr,
                              char *buf)
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev,
        struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
        const char *name;
 
-       if (ctlr->f->get_fcoe_ctlr_mode)
-               ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
        name = get_fcoe_ctlr_mode_name(ctlr->mode);
        if (!name)
                return -EINVAL;
-       return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+       return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+                       "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+       if (count > FCOE_MAX_MODENAME_LEN)
+               return -EINVAL;
+
+       strncpy(mode, buf, count);
+
+       if (mode[count - 1] == '\n')
+               mode[count - 1] = '\0';
+       else
+               mode[count] = '\0';
+
+       switch (ctlr->enabled) {
+       case FCOE_CTLR_ENABLED:
+               LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+               return -EBUSY;
+       case FCOE_CTLR_DISABLED:
+               if (!ctlr->f->set_fcoe_ctlr_mode) {
+                       LIBFCOE_SYSFS_DBG(ctlr,
+                                         "Mode change not supported by LLD.");
+                       return -ENOTSUPP;
+               }
+
+               ctlr->mode = fcoe_parse_mode(mode);
+               if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+                       LIBFCOE_SYSFS_DBG(ctlr,
+                                         "Unknown mode %s provided.", buf);
+                       return -EINVAL;
+               }
+
+               ctlr->f->set_fcoe_ctlr_mode(ctlr);
+               LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+               return count;
+       case FCOE_CTLR_UNUSED:
+       default:
+               LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+               return -ENOTSUPP;
+       };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+                       show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       int rc;
+
+       switch (ctlr->enabled) {
+       case FCOE_CTLR_ENABLED:
+               if (*buf == '1')
+                       return count;
+               ctlr->enabled = FCOE_CTLR_DISABLED;
+               break;
+       case FCOE_CTLR_DISABLED:
+               if (*buf == '0')
+                       return count;
+               ctlr->enabled = FCOE_CTLR_ENABLED;
+               break;
+       case FCOE_CTLR_UNUSED:
+               return -ENOTSUPP;
+       };
+
+       rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+       if (rc)
+               return rc;
+
+       return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+       [ FCOE_CTLR_ENABLED ]  = "1",
+       [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+                     ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       const char *name;
+
+       name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+       if (!name)
+               return -EINVAL;
+       return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
                        "%s\n", name);
 }
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
-                       show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+                       show_ctlr_enabled_state,
+                       store_ctlr_enabled);
 
 static ssize_t
 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
 
 static struct attribute *fcoe_ctlr_attrs[] = {
        &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+       &device_attr_fcoe_ctlr_enabled.attr,
        &device_attr_fcoe_ctlr_mode.attr,
        NULL,
 };
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = {
        .release = fcoe_fcf_device_release,
 };
 
+struct bus_attribute fcoe_bus_attr_group[] = {
+       __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+       __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+       __ATTR_NULL
+};
+
 struct bus_type fcoe_bus_type = {
        .name = "fcoe",
        .match = &fcoe_bus_match,
+       .bus_attrs = fcoe_bus_attr_group,
 };
 
 /**
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
 
        ctlr->id = atomic_inc_return(&ctlr_num) - 1;
        ctlr->f = f;
+       ctlr->mode = FIP_CONN_TYPE_FABRIC;
        INIT_LIST_HEAD(&ctlr->fcfs);
        mutex_init(&ctlr->lock);
        ctlr->dev.parent = parent;
index ac76d8a..f3a5a53 100644 (file)
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = {
        .notifier_call = libfcoe_device_notification,
 };
 
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ *          -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+       struct net_device *netdev = fcoe_get_netdev(lport);
+       struct ethtool_cmd ecmd;
+
+       if (!__ethtool_get_settings(netdev, &ecmd)) {
+               lport->link_supported_speeds &=
+                       ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+               if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+                                     SUPPORTED_1000baseT_Full))
+                       lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+               if (ecmd.supported & SUPPORTED_10000baseT_Full)
+                       lport->link_supported_speeds |=
+                               FC_PORTSPEED_10GBIT;
+               switch (ethtool_cmd_speed(&ecmd)) {
+               case SPEED_1000:
+                       lport->link_speed = FC_PORTSPEED_1GBIT;
+                       break;
+               case SPEED_10000:
+                       lport->link_speed = FC_PORTSPEED_10GBIT;
+                       break;
+               }
+               return 0;
+       }
+       return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
 void __fcoe_get_lesb(struct fc_lport *lport,
                     struct fc_els_lesb *fc_lesb,
                     struct net_device *netdev)
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport,
 }
 EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
 
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+                        struct fc_els_lesb *fc_lesb)
+{
+       struct net_device *netdev = fcoe_get_netdev(lport);
+
+       __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+       struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+       struct net_device *netdev = fcoe_get_netdev(fip->lp);
+       struct fcoe_fc_els_lesb *fcoe_lesb;
+       struct fc_els_lesb fc_lesb;
+
+       __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+       fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+       ctlr_dev->lesb.lesb_link_fail =
+               ntohl(fcoe_lesb->lesb_link_fail);
+       ctlr_dev->lesb.lesb_vlink_fail =
+               ntohl(fcoe_lesb->lesb_vlink_fail);
+       ctlr_dev->lesb.lesb_miss_fka =
+               ntohl(fcoe_lesb->lesb_miss_fka);
+       ctlr_dev->lesb.lesb_symb_err =
+               ntohl(fcoe_lesb->lesb_symb_err);
+       ctlr_dev->lesb.lesb_err_block =
+               ntohl(fcoe_lesb->lesb_err_block);
+       ctlr_dev->lesb.lesb_fcs_error =
+               ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
 void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
 {
        u8 wwpn[8];
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
        return NOTIFY_OK;
 }
 
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+                              const char *buf, size_t count)
+{
+       struct net_device *netdev = NULL;
+       struct fcoe_transport *ft = NULL;
+       struct fcoe_ctlr_device *ctlr_dev = NULL;
+       int rc = 0;
+       int err;
+
+       mutex_lock(&ft_mutex);
+
+       netdev = fcoe_if_to_netdev(buf);
+       if (!netdev) {
+               LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+               rc = -ENODEV;
+               goto out_nodev;
+       }
+
+       ft = fcoe_netdev_map_lookup(netdev);
+       if (ft) {
+               LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+                                     "FCoE instance on %s.\n",
+                                     ft->name, netdev->name);
+               rc = -EEXIST;
+               goto out_putdev;
+       }
+
+       ft = fcoe_transport_lookup(netdev);
+       if (!ft) {
+               LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+                                     netdev->name);
+               rc = -ENODEV;
+               goto out_putdev;
+       }
+
+       /* pass to transport create */
+       err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+       if (err) {
+               fcoe_del_netdev_mapping(netdev);
+               rc = -ENOMEM;
+               goto out_putdev;
+       }
+
+       err = fcoe_add_netdev_mapping(netdev, ft);
+       if (err) {
+               LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+                                     "for FCoE transport %s for %s.\n",
+                                     ft->name, netdev->name);
+               rc = -ENODEV;
+               goto out_putdev;
+       }
+
+       LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+                             ft->name, (ctlr_dev) ? "succeeded" : "failed",
+                             netdev->name);
+
+out_putdev:
+       dev_put(netdev);
+out_nodev:
+       mutex_unlock(&ft_mutex);
+       if (rc)
+               return rc;
+       return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+                               const char *buf, size_t count)
+{
+       int rc = -ENODEV;
+       struct net_device *netdev = NULL;
+       struct fcoe_transport *ft = NULL;
+
+       mutex_lock(&ft_mutex);
+
+       netdev = fcoe_if_to_netdev(buf);
+       if (!netdev) {
+               LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+               goto out_nodev;
+       }
+
+       ft = fcoe_netdev_map_lookup(netdev);
+       if (!ft) {
+               LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+                                     netdev->name);
+               goto out_putdev;
+       }
+
+       /* pass to transport destroy */
+       rc = ft->destroy(netdev);
+       if (rc)
+               goto out_putdev;
+
+       fcoe_del_netdev_mapping(netdev);
+       LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+                             ft->name, (rc) ? "failed" : "succeeded",
+                             netdev->name);
+       rc = count; /* required for successful return */
+out_putdev:
+       dev_put(netdev);
+out_nodev:
+       mutex_unlock(&ft_mutex);
+       return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
 
 /**
  * fcoe_transport_create() - Create a fcoe interface
@@ -769,11 +962,7 @@ out_putdev:
        dev_put(netdev);
 out_nodev:
        mutex_unlock(&ft_mutex);
-
-       if (rc == -ERESTARTSYS)
-               return restart_syscall();
-       else
-               return rc;
+       return rc;
 }
 
 /**
index 6af5fc3..d3bb16d 100644 (file)
@@ -2,9 +2,10 @@
 #define _FCOE_LIBFCOE_H_
 
 extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING            0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING      0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING                  0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING       0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING     0x08 /* fcoe_sysfs logging */
 
 #define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)              \
 do {                                                   \
@@ -16,16 +17,19 @@ do {                                                        \
 
 #define LIBFCOE_DBG(fmt, args...)                                      \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,                          \
-                             printk(KERN_INFO "libfcoe: " fmt, ##args);)
+                             pr_info("libfcoe: " fmt, ##args);)
 
 #define LIBFCOE_FIP_DBG(fip, fmt, args...)                             \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,                      \
-                             printk(KERN_INFO "host%d: fip: " fmt,     \
-                                    (fip)->lp->host->host_no, ##args);)
+                             pr_info("host%d: fip: " fmt,              \
+                                     (fip)->lp->host->host_no, ##args);)
 
 #define LIBFCOE_TRANSPORT_DBG(fmt, args...)                            \
        LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING,                \
-                             printk(KERN_INFO "%s: " fmt,              \
-                                    __func__, ##args);)
+                             pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...)                          \
+       LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING,                    \
+                             pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
 
 #endif /* _FCOE_LIBFCOE_H_ */
index 37c3440..383598f 100644 (file)
@@ -7,6 +7,8 @@ fnic-y  := \
        fnic_res.o \
        fnic_fcs.o \
        fnic_scsi.o \
+       fnic_trace.o \
+       fnic_debugfs.o \
        vnic_cq.o \
        vnic_dev.o \
        vnic_intr.o \
index 95a5ba2..98436c3 100644 (file)
@@ -26,6 +26,7 @@
 #include <scsi/libfcoe.h>
 #include "fnic_io.h"
 #include "fnic_res.h"
+#include "fnic_trace.h"
 #include "vnic_dev.h"
 #include "vnic_wq.h"
 #include "vnic_rq.h"
 #define FNIC_TAG_MASK          (BIT(24) - 1)   /* mask for lookup */
 #define FNIC_NO_TAG             -1
 
+/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS                   0
+#define FNIC_IO_INITIALIZED             BIT(0)
+#define FNIC_IO_ISSUED                  BIT(1)
+#define FNIC_IO_DONE                    BIT(2)
+#define FNIC_IO_REQ_NULL                BIT(3)
+#define FNIC_IO_ABTS_PENDING            BIT(4)
+#define FNIC_IO_ABORTED                 BIT(5)
+#define FNIC_IO_ABTS_ISSUED             BIT(6)
+#define FNIC_IO_TERM_ISSUED             BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED    BIT(8)
+#define FNIC_IO_ABT_TERM_DONE           BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL       BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT      BIT(11)
+#define FNIC_DEVICE_RESET               BIT(12)  /* Device reset request */
+#define FNIC_DEV_RST_ISSUED             BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT          BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED        BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED        BIT(16)
+#define FNIC_DEV_RST_DONE               BIT(17)
+#define FNIC_DEV_RST_REQ_NULL           BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE          BIT(19)
+#define FNIC_DEV_RST_TERM_DONE          BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING       BIT(21)
+
 /*
  * Usage of the scsi_cmnd scratchpad.
  * These fields are locked by the hashed io_req_lock.
@@ -64,6 +93,7 @@
 #define CMD_ABTS_STATUS(Cmnd)  ((Cmnd)->SCp.Message)
 #define CMD_LR_STATUS(Cmnd)    ((Cmnd)->SCp.have_data_in)
 #define CMD_TAG(Cmnd)           ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd)         ((Cmnd)->SCp.Status)
 
 #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
 
 #define FNIC_HOST_RESET_TIMEOUT             10000      /* mSec */
 #define FNIC_RMDEVICE_TIMEOUT        1000       /* mSec */
 #define FNIC_HOST_RESET_SETTLE_TIME  30         /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT  500        /* mSec */
 
 #define FNIC_MAX_FCP_TARGET     256
 
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET           BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO          BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE                        (0)
+#define FNIC_FLAGS_FWRESET             (__FNIC_FLAGS_FWRESET | \
+                                       __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED          (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags)  \
+       __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags)  \
+       __fnic_set_state_flags(fnicp, st_flags, 1)
+
 extern unsigned int fnic_log_level;
 
 #define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
 
        struct completion *remove_wait; /* device remove thread blocks */
 
+       atomic_t in_flight;             /* io counter */
+       u32 _reserved;                  /* fill hole */
+       unsigned long state_flags;      /* protected by host lock */
        enum fnic_state state;
        spinlock_t fnic_lock;
 
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
 void fnic_log_q_error(struct fnic *fnic);
 void fnic_handle_link_event(struct fnic *fnic);
 
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+       return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
 #endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644 (file)
index 0000000..adc1f7f
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+                                 char __user *ubuf,
+                                 size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       int len;
+       len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+                                 const char __user *ubuf,
+                                 size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       fnic_tracing_enabled = val;
+       (*ppos)++;
+
+       return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+                                 struct file *file)
+{
+       fnic_dbgfs_t *fnic_dbg_prt;
+       fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+       if (!fnic_dbg_prt)
+               return -ENOMEM;
+
+       fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+       if (!fnic_dbg_prt->buffer) {
+               kfree(fnic_dbg_prt);
+               return -ENOMEM;
+       }
+       memset((void *)fnic_dbg_prt->buffer, 0,
+                         (3*(trace_max_pages * PAGE_SIZE)));
+       fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+       file->private_data = fnic_dbg_prt;
+       return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+                                       loff_t offset,
+                                       int howto)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+       loff_t pos = -1;
+
+       switch (howto) {
+       case 0:
+               pos = offset;
+               break;
+       case 1:
+               pos = file->f_pos + offset;
+               break;
+       case 2:
+               pos = fnic_dbg_prt->buffer_len - offset;
+       }
+       return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+                         -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+                                       char __user *ubuf,
+                                       size_t nbytes,
+                                       loff_t *pos)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+       int rc = 0;
+       rc = simple_read_from_buffer(ubuf, nbytes, pos,
+                                 fnic_dbg_prt->buffer,
+                                 fnic_dbg_prt->buffer_len);
+       return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+                                         struct file *file)
+{
+       fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+       vfree(fnic_dbg_prt->buffer);
+       kfree(fnic_dbg_prt);
+       return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_trace_ctrl_open,
+       .read = fnic_trace_ctrl_read,
+       .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_trace_debugfs_open,
+       .llseek = fnic_trace_debugfs_lseek,
+       .read = fnic_trace_debugfs_read,
+       .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+       int rc = -1;
+       fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+       if (!fnic_trace_debugfs_root) {
+               printk(KERN_DEBUG "Cannot create debugfs root\n");
+               return rc;
+       }
+       fnic_trace_enable = debugfs_create_file("tracing_enable",
+                                         S_IFREG|S_IRUGO|S_IWUSR,
+                                         fnic_trace_debugfs_root,
+                                         NULL, &fnic_trace_ctrl_fops);
+
+       if (!fnic_trace_enable) {
+               printk(KERN_DEBUG "Cannot create trace_enable file"
+                                 " under debugfs");
+               return rc;
+       }
+
+       fnic_trace_debugfs_file = debugfs_create_file("trace",
+                                                 S_IFREG|S_IRUGO|S_IWUSR,
+                                                 fnic_trace_debugfs_root,
+                                                 NULL,
+                                                 &fnic_trace_debugfs_fops);
+
+       if (!fnic_trace_debugfs_file) {
+               printk(KERN_DEBUG "Cannot create trace file under debugfs");
+               return rc;
+       }
+       rc = 0;
+       return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+       if (fnic_trace_debugfs_file) {
+               debugfs_remove(fnic_trace_debugfs_file);
+               fnic_trace_debugfs_file = NULL;
+       }
+       if (fnic_trace_enable) {
+               debugfs_remove(fnic_trace_enable);
+               fnic_trace_enable = NULL;
+       }
+       if (fnic_trace_debugfs_root) {
+               debugfs_remove(fnic_trace_debugfs_root);
+               fnic_trace_debugfs_root = NULL;
+       }
+}
index 3c53c34..483eb9d 100644 (file)
@@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
        }
 
        fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
-                              fnic->vlan_hw_insert, fnic->vlan_id, 1);
+                              0 /* hw inserts cos value */,
+                              fnic->vlan_id, 1);
        spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 }
 
@@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
        }
 
        fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
-                          fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+                          0 /* hw inserts cos value */,
+                          fnic->vlan_id, 1, 1, 1);
 fnic_send_frame_end:
        spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 
index f0b8969..c35b8f1 100644 (file)
@@ -21,7 +21,7 @@
 #include <scsi/fc/fc_fcp.h>
 
 #define FNIC_DFLT_SG_DESC_CNT  32
-#define FNIC_MAX_SG_DESC_CNT        1024    /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT        256     /* Maximum descriptors per sgl */
 #define FNIC_SG_DESC_ALIGN          16      /* Descriptor address alignment */
 
 struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
 };
 
 enum fnic_ioreq_state {
-       FNIC_IOREQ_CMD_PENDING = 0,
+       FNIC_IOREQ_NOT_INITED = 0,
+       FNIC_IOREQ_CMD_PENDING,
        FNIC_IOREQ_ABTS_PENDING,
        FNIC_IOREQ_ABTS_COMPLETE,
        FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
        u8 sgl_type; /* device DMA descriptor list type */
        u8 io_completed:1; /* set to 1 when fw completes IO */
        u32 port_id; /* remote port DID */
+       unsigned long start_time; /* in jiffies */
        struct completion *abts_done; /* completion for abts */
        struct completion *dr_done; /* completion for device reset */
 };
index fbf3ac6..d601ac5 100644 (file)
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
 module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
 
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+                                       "for fnic trace buffer");
 
 static struct libfc_function_template fnic_transport_template = {
        .frame_send = fnic_send,
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        fnic->state = FNIC_IN_FC_MODE;
 
+       atomic_set(&fnic->in_flight, 0);
+       fnic->state_flags = FNIC_FLAGS_NONE;
+
        /* Enable hardware stripping of vlan header on ingress */
        fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
 
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void)
 
        printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
+       /* Allocate memory for trace buffer */
+       err = fnic_trace_buf_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "Trace buffer initialization Failed "
+                                 "Fnic Tracing utility is disabled\n");
+               fnic_trace_free();
+       }
+
        /* Create a cache for allocation of default size sgls */
        len = sizeof(struct fnic_dflt_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab:
 err_create_fnic_sgl_slab_max:
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 err_create_fnic_sgl_slab_dflt:
+       fnic_trace_free();
        return err;
 }
 
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
        fc_release_transport(fnic_fc_transport);
+       fnic_trace_free();
 }
 
 module_init(fnic_init_module);
index c40ce52..be99e75 100644 (file)
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
 };
 
 static const char *fnic_ioreq_state_str[] = {
+       [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
        [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
        [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
        [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 }
 
 
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+                       unsigned long clearbits)
+{
+       struct Scsi_Host *host = fnic->lport->host;
+       int sh_locked = spin_is_locked(host->host_lock);
+       unsigned long flags = 0;
+
+       if (!sh_locked)
+               spin_lock_irqsave(host->host_lock, flags);
+
+       if (clearbits)
+               fnic->state_flags &= ~st_flags;
+       else
+               fnic->state_flags |= st_flags;
+
+       if (!sh_locked)
+               spin_unlock_irqrestore(host->host_lock, flags);
+
+       return;
+}
+
+
 /*
  * fnic_fw_reset_handler
  * Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
        int ret = 0;
        unsigned long flags;
 
+       /* indicate fwreset to io path */
+       fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       /* wait for io cmpl */
+       while (atomic_read(&fnic->in_flight))
+               schedule_timeout(msecs_to_jiffies(1));
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
        if (!ret)
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Issued fw reset\n");
-       else
+       else {
+               fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Failed to issue fw reset\n");
+       }
+
        return ret;
 }
 
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 
        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+               FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                         "fnic_queue_wq_copy_desc failure - no descriptors\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
  */
 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
-       struct fc_lport *lp;
+       struct fc_lport *lp = shost_priv(sc->device->host);
        struct fc_rport *rport;
-       struct fnic_io_req *io_req;
-       struct fnic *fnic;
+       struct fnic_io_req *io_req = NULL;
+       struct fnic *fnic = lport_priv(lp);
        struct vnic_wq_copy *wq;
        int ret;
-       int sg_count;
+       u64 cmd_trace;
+       int sg_count = 0;
        unsigned long flags;
        unsigned long ptr;
 
+       if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        rport = starget_to_rport(scsi_target(sc->device));
        ret = fc_remote_port_chkready(rport);
        if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                return 0;
        }
 
-       lp = shost_priv(sc->device->host);
        if (lp->state != LPORT_ST_READY || !(lp->link_up))
                return SCSI_MLQUEUE_HOST_BUSY;
 
+       atomic_inc(&fnic->in_flight);
+
        /*
         * Release host lock, use driver resource specific locks from here.
         * Don't re-enable interrupts in case they were disabled prior to the
         * caller disabling them.
         */
        spin_unlock(lp->host->host_lock);
+       CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+       CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 
        /* Get a new io_req for this SCSI IO */
-       fnic = lport_priv(lp);
-
        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
        if (!io_req) {
                ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        /* Map the data buffer */
        sg_count = scsi_dma_map(sc);
        if (sg_count < 0) {
+               FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                         sc->request->tag, sc, 0, sc->cmnd[0],
+                         sg_count, CMD_STATE(sc));
                mempool_free(io_req, fnic->io_req_pool);
                goto out;
        }
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
 
        /* initialize rest of io_req */
        io_req->port_id = rport->port_id;
+       io_req->start_time = jiffies;
        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
        CMD_SP(sc) = (char *)io_req;
+       CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
        sc->scsi_done = done;
 
        /* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                 * refetch the pointer under the lock.
                 */
                spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+               FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                         sc->request->tag, sc, 0, 0, 0,
+                         (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
                spin_lock_irqsave(io_lock, flags);
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                        fnic_release_ioreq_buf(fnic, io_req, sc);
                        mempool_free(io_req, fnic->io_req_pool);
                }
+       } else {
+               /* REVISIT: Use per IO lock in the final code */
+               CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
        }
 out:
+       cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+                       (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+                       (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+                       sc->cmnd[5]);
+
+       FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+                 sc->request->tag, sc, io_req,
+                 sg_count, cmd_trace,
+                 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+       atomic_dec(&fnic->in_flight);
        /* acquire host lock before returning to SCSI */
        spin_lock(lp->host->host_lock);
        return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
        fnic_flush_tx(fnic);
 
  reset_cmpl_handler_end:
+       fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
        return ret;
 }
 
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
        struct vnic_wq_copy *wq;
        u16 request_out = desc->u.ack.request_out;
        unsigned long flags;
+       u64 *ox_id_tag = (u64 *)(void *)desc;
 
        /* mark the ack state */
        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
                fnic->fw_ack_recd[0] = 1;
        }
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+       FNIC_TRACE(fnic_fcpio_ack_handler,
+                 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+                 ox_id_tag[4], ox_id_tag[5]);
 }
 
 /*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        struct scsi_cmnd *sc;
        unsigned long flags;
        spinlock_t *io_lock;
+       u64 cmd_trace;
+       unsigned long start_time;
 
        /* Decode the cmpl description to get the io_req id */
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
        fcpio_tag_id_dec(&tag, &id);
+       icmnd_cmpl = &desc->u.icmnd_cmpl;
 
-       if (id >= FNIC_MAX_IO_REQ)
+       if (id >= FNIC_MAX_IO_REQ) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                       "Tag out of range tag %x hdr status = %s\n",
+                            id, fnic_fcpio_status_to_str(hdr_status));
                return;
+       }
 
        sc = scsi_host_find_tag(fnic->lport->host, id);
        WARN_ON_ONCE(!sc);
-       if (!sc)
+       if (!sc) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "icmnd_cmpl sc is null - "
+                         "hdr status = %s tag = 0x%x desc = 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, desc);
+               FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+                         fnic->lport->host->host_no, id,
+                         ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+                         (u64)icmnd_cmpl->_resvd0[0]),
+                         ((u64)hdr_status << 16 |
+                         (u64)icmnd_cmpl->scsi_status << 8 |
+                         (u64)icmnd_cmpl->flags), desc,
+                         (u64)icmnd_cmpl->residual, 0);
                return;
+       }
 
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
+               CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
                spin_unlock_irqrestore(io_lock, flags);
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "icmnd_cmpl io_req is null - "
+                         "hdr status = %s tag = 0x%x sc 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, sc);
                return;
        }
+       start_time = io_req->start_time;
 
        /* firmware completed the io */
        io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
         */
        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+               switch (hdr_status) {
+               case FCPIO_SUCCESS:
+                       CMD_FLAGS(sc) |= FNIC_IO_DONE;
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "icmnd_cmpl ABTS pending hdr status = %s "
+                                 "sc  0x%p scsi_status %x  residual %d\n",
+                                 fnic_fcpio_status_to_str(hdr_status), sc,
+                                 icmnd_cmpl->scsi_status,
+                                 icmnd_cmpl->residual);
+                       break;
+               case FCPIO_ABORTED:
+                       CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+                       break;
+               default:
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                         "icmnd_cmpl abts pending "
+                                         "hdr status = %s tag = 0x%x sc = 0x%p\n",
+                                         fnic_fcpio_status_to_str(hdr_status),
+                                         id, sc);
+                       break;
+               }
                return;
        }
 
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 
        /* Break link with the SCSI command */
        CMD_SP(sc) = NULL;
+       CMD_FLAGS(sc) |= FNIC_IO_DONE;
 
        spin_unlock_irqrestore(io_lock, flags);
 
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
 
        mempool_free(io_req, fnic->io_req_pool);
 
+       cmd_trace = ((u64)hdr_status << 56) |
+                 (u64)icmnd_cmpl->scsi_status << 48 |
+                 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+       FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+                 sc->device->host->host_no, id, sc,
+                 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+                 (u64)icmnd_cmpl->_resvd0[0] << 48 |
+                 jiffies_to_msecs(jiffies - start_time)),
+                 desc, cmd_trace,
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
                fnic->lport->host_stats.fcp_input_requests++;
                fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        /* Call SCSI completion function to complete the IO */
        if (sc->scsi_done)
                sc->scsi_done(sc);
-
 }
 
 /* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        struct fnic_io_req *io_req;
        unsigned long flags;
        spinlock_t *io_lock;
+       unsigned long start_time;
 
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
        fcpio_tag_id_dec(&tag, &id);
 
-       if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+       if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+               "Tag out of range tag %x hdr status = %s\n",
+               id, fnic_fcpio_status_to_str(hdr_status));
                return;
+       }
 
        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
        WARN_ON_ONCE(!sc);
-       if (!sc)
+       if (!sc) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+                         fnic_fcpio_status_to_str(hdr_status), id);
                return;
-
+       }
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+               shost_printk(KERN_ERR, fnic->lport->host,
+                         "itmf_cmpl io_req is null - "
+                         "hdr status = %s tag = 0x%x sc 0x%p\n",
+                         fnic_fcpio_status_to_str(hdr_status), id, sc);
                return;
        }
+       start_time = io_req->start_time;
 
-       if (id & FNIC_TAG_ABORT) {
+       if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+               /* Abort and terminate completion of device reset req */
+               /* REVISIT : Add asserts about various flags */
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                             "dev reset abts cmpl recd. id %x status %s\n",
+                             id, fnic_fcpio_status_to_str(hdr_status));
+               CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+               CMD_ABTS_STATUS(sc) = hdr_status;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+               if (io_req->abts_done)
+                       complete(io_req->abts_done);
+               spin_unlock_irqrestore(io_lock, flags);
+       } else if (id & FNIC_TAG_ABORT) {
                /* Completion of abort cmd */
                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
                        /* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
                CMD_ABTS_STATUS(sc) = hdr_status;
 
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "abts cmpl recd. id %d status %s\n",
                              (int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
 
                        fnic_release_ioreq_buf(fnic, io_req, sc);
                        mempool_free(io_req, fnic->io_req_pool);
-                       if (sc->scsi_done)
+                       if (sc->scsi_done) {
+                               FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                       sc->device->host->host_no, id,
+                                       sc,
+                                       jiffies_to_msecs(jiffies - start_time),
+                                       desc,
+                                       (((u64)hdr_status << 40) |
+                                       (u64)sc->cmnd[0] << 32 |
+                                       (u64)sc->cmnd[2] << 24 |
+                                       (u64)sc->cmnd[3] << 16 |
+                                       (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                                       (((u64)CMD_FLAGS(sc) << 32) |
+                                       CMD_STATE(sc)));
                                sc->scsi_done(sc);
+                       }
                }
 
        } else if (id & FNIC_TAG_DEV_RST) {
                /* Completion of device reset */
                CMD_LR_STATUS(sc) = hdr_status;
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+                       FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                 sc->device->host->host_no, id, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 desc, 0,
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "Terminate pending "
+                               "dev reset cmpl recd. id %d status %s\n",
+                               (int)(id & FNIC_TAG_MASK),
+                               fnic_fcpio_status_to_str(hdr_status));
+                       return;
+               }
+               if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+                       /* Need to wait for terminate completion */
+                       spin_unlock_irqrestore(io_lock, flags);
+                       FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+                                 sc->device->host->host_no, id, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 desc, 0,
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "dev reset cmpl recd after time out. "
+                               "id %d status %s\n",
+                               (int)(id & FNIC_TAG_MASK),
+                               fnic_fcpio_status_to_str(hdr_status));
+                       return;
+               }
                CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "dev reset cmpl recd. id %d status %s\n",
                              (int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
                                   struct fcpio_fw_req *desc)
 {
        struct fnic *fnic = vnic_dev_priv(vdev);
-       int ret = 0;
 
        switch (desc->hdr.type) {
        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
 
        case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
        case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
-               ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+               fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
                break;
 
        case FCPIO_RESET_CMPL: /* fw completed reset */
-               ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+               fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
                break;
 
        default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
                break;
        }
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
        unsigned long flags = 0;
        struct scsi_cmnd *sc;
        spinlock_t *io_lock;
+       unsigned long start_time = 0;
 
        for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
                if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
                io_lock = fnic_io_lock_hash(fnic, sc);
                spin_lock_irqsave(io_lock, flags);
                io_req = (struct fnic_io_req *)CMD_SP(sc);
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+                       /*
+                        * We will be here only when FW completes reset
+                        * without sending completions for outstanding ios.
+                        */
+                       CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+                       if (io_req && io_req->dr_done)
+                               complete(io_req->dr_done);
+                       else if (io_req && io_req->abts_done)
+                               complete(io_req->abts_done);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
                if (!io_req) {
                        spin_unlock_irqrestore(io_lock, flags);
                        goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
                 * If there is a scsi_cmnd associated with this io_req, then
                 * free the corresponding state
                 */
+               start_time = io_req->start_time;
                fnic_release_ioreq_buf(fnic, io_req, sc);
                mempool_free(io_req, fnic->io_req_pool);
 
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
                              " DID_TRANSPORT_DISRUPTED\n");
 
                /* Complete the command to SCSI */
-               if (sc->scsi_done)
+               if (sc->scsi_done) {
+                       FNIC_TRACE(fnic_cleanup_io,
+                                 sc->device->host->host_no, i, sc,
+                                 jiffies_to_msecs(jiffies - start_time),
+                                 0, ((u64)sc->cmnd[0] << 32 |
+                                 (u64)sc->cmnd[2] << 24 |
+                                 (u64)sc->cmnd[3] << 16 |
+                                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
                        sc->scsi_done(sc);
+               }
        }
 }
 
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
        struct scsi_cmnd *sc;
        unsigned long flags;
        spinlock_t *io_lock;
+       unsigned long start_time = 0;
 
        /* get the tag reference */
        fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
 
        spin_unlock_irqrestore(io_lock, flags);
 
+       start_time = io_req->start_time;
        fnic_release_ioreq_buf(fnic, io_req, sc);
        mempool_free(io_req, fnic->io_req_pool);
 
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
                      " DID_NO_CONNECT\n");
 
-       if (sc->scsi_done)
+       if (sc->scsi_done) {
+               FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+                         sc->device->host->host_no, id, sc,
+                         jiffies_to_msecs(jiffies - start_time),
+                         0, ((u64)sc->cmnd[0] << 32 |
+                         (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                         (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                         (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
                sc->scsi_done(sc);
+       }
 }
 
 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
                                          struct fnic_io_req *io_req)
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+       struct Scsi_Host *host = fnic->lport->host;
        unsigned long flags;
 
+       spin_lock_irqsave(host->host_lock, flags);
+       if (unlikely(fnic_chk_state_flags_locked(fnic,
+                                               FNIC_FLAGS_IO_BLOCKED))) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               return 1;
+       } else
+               atomic_inc(&fnic->in_flight);
+       spin_unlock_irqrestore(host->host_lock, flags);
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
 
        if (!vnic_wq_copy_desc_avail(wq)) {
                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+               atomic_dec(&fnic->in_flight);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_queue_abort_io_req: failure: no descriptors\n");
                return 1;
        }
        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
                                     fnic->config.ra_tov, fnic->config.ed_tov);
 
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+       atomic_dec(&fnic->in_flight);
+
        return 0;
 }
 
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 {
        int tag;
+       int abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 
        FNIC_SCSI_DBG(KERN_DEBUG,
                      fnic->lport->host,
-                     "fnic_rport_reset_exch called portid 0x%06x\n",
+                     "fnic_rport_exch_reset called portid 0x%06x\n",
                      port_id);
 
        if (fnic->in_remove)
                return;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               abt_tag = tag;
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                if (!sc)
                        continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                        continue;
                }
 
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+                       sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+
                /*
                 * Found IO that is still pending with firmware and
                 * belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                        spin_unlock_irqrestore(io_lock, flags);
                        continue;
                }
+               if (io_req->abts_done) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                       "fnic_rport_exch_reset: io_req->abts_done is set "
+                       "state is %s\n",
+                       fnic_ioreq_state_to_str(CMD_STATE(sc)));
+               }
+
+               if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                                 "rport_exch_reset "
+                                 "IO not yet issued %p tag 0x%x flags "
+                                 "%x state %d\n",
+                                 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+               }
                old_ioreq_state = CMD_STATE(sc);
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag = (tag | FNIC_TAG_DEV_RST);
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_rport_exch_reset dev rst sc 0x%p\n",
+                       sc);
+               }
 
                BUG_ON(io_req->abts_done);
 
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        /*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                         * aborted later by scsi_eh, or cleaned up during
                         * lun reset
                         */
-                       io_lock = fnic_io_lock_hash(fnic, sc);
-
                        spin_lock_irqsave(io_lock, flags);
                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
                                CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       else
+                               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
        }
 
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 void fnic_terminate_rport_io(struct fc_rport *rport)
 {
        int tag;
+       int abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
 
        FNIC_SCSI_DBG(KERN_DEBUG,
                      fnic->lport->host, "fnic_terminate_rport_io called"
-                     " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
-                     rport->port_name, rport->node_name,
+                     " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+                     rport->port_name, rport->node_name, rport,
                      rport->port_id);
 
        if (fnic->in_remove)
                return;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               abt_tag = tag;
                sc = scsi_host_find_tag(fnic->lport->host, tag);
                if (!sc)
                        continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                        continue;
                }
 
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+                       sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
                /*
                 * Found IO that is still pending with firmware and
                 * belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                        spin_unlock_irqrestore(io_lock, flags);
                        continue;
                }
+               if (io_req->abts_done) {
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                       "fnic_terminate_rport_io: io_req->abts_done is set "
+                       "state is %s\n",
+                       fnic_ioreq_state_to_str(CMD_STATE(sc)));
+               }
+               if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "fnic_terminate_rport_io "
+                                 "IO not yet issued %p tag 0x%x flags "
+                                 "%x state %d\n",
+                                 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+               }
                old_ioreq_state = CMD_STATE(sc);
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag = (tag | FNIC_TAG_DEV_RST);
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+               }
 
                BUG_ON(io_req->abts_done);
 
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        /*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                         * aborted later by scsi_eh, or cleaned up during
                         * lun reset
                         */
-                       io_lock = fnic_io_lock_hash(fnic, sc);
-
                        spin_lock_irqsave(io_lock, flags);
                        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
                                CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       else
+                               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
        }
 
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 {
        struct fc_lport *lp;
        struct fnic *fnic;
-       struct fnic_io_req *io_req;
+       struct fnic_io_req *io_req = NULL;
        struct fc_rport *rport;
        spinlock_t *io_lock;
        unsigned long flags;
+       unsigned long start_time = 0;
        int ret = SUCCESS;
-       u32 task_req;
+       u32 task_req = 0;
        struct scsi_lun fc_lun;
+       int tag;
        DECLARE_COMPLETION_ONSTACK(tm_done);
 
        /* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 
        fnic = lport_priv(lp);
        rport = starget_to_rport(scsi_target(sc->device));
-       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                       "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
-                       rport->port_id, sc->device->lun, sc->request->tag);
+       tag = sc->request->tag;
+       FNIC_SCSI_DBG(KERN_DEBUG,
+               fnic->lport->host,
+               "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+               rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+       CMD_FLAGS(sc) = FNIC_NO_FLAGS;
 
        if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
                ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
+       if (task_req == FCPIO_ITMF_ABT_TASK)
+               CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+       else
+               CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
 
        /*
         * We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        /* fw did not complete abort, timed out */
        if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                spin_unlock_irqrestore(io_lock, flags);
+               CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 
        spin_unlock_irqrestore(io_lock, flags);
 
+       start_time = io_req->start_time;
        fnic_release_ioreq_buf(fnic, io_req, sc);
        mempool_free(io_req, fnic->io_req_pool);
 
 fnic_abort_cmd_end:
+       FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+                 sc->request->tag, sc,
+                 jiffies_to_msecs(jiffies - start_time),
+                 0, ((u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                     "Returning from abort cmd %s\n",
+                     "Returning from abort cmd type %x %s\n", task_req,
                      (ret == SUCCESS) ?
                      "SUCCESS" : "FAILED");
        return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
                                       struct fnic_io_req *io_req)
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+       struct Scsi_Host *host = fnic->lport->host;
        struct scsi_lun fc_lun;
        int ret = 0;
        unsigned long intr_flags;
 
+       spin_lock_irqsave(host->host_lock, intr_flags);
+       if (unlikely(fnic_chk_state_flags_locked(fnic,
+                                               FNIC_FLAGS_IO_BLOCKED))) {
+               spin_unlock_irqrestore(host->host_lock, intr_flags);
+               return FAILED;
+       } else
+               atomic_inc(&fnic->in_flight);
+       spin_unlock_irqrestore(host->host_lock, intr_flags);
+
        spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
 
        if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
                free_wq_copy_descs(fnic, wq);
 
        if (!vnic_wq_copy_desc_avail(wq)) {
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                         "queue_dr_io_req failure - no descriptors\n");
                ret = -EAGAIN;
                goto lr_io_req_end;
        }
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
 
 lr_io_req_end:
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+       atomic_dec(&fnic->in_flight);
 
        return ret;
 }
@@ -1412,7 +1773,7 @@ lr_io_req_end:
 static int fnic_clean_pending_aborts(struct fnic *fnic,
                                     struct scsi_cmnd *lr_sc)
 {
-       int tag;
+       int tag, abt_tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
        struct scsi_lun fc_lun;
        struct scsi_device *lun_dev = lr_sc->device;
        DECLARE_COMPLETION_ONSTACK(tm_done);
+       enum fnic_ioreq_state old_ioreq_state;
 
        for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
                sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                              "Found IO in %s on lun\n",
                              fnic_ioreq_state_to_str(CMD_STATE(sc)));
 
-               BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+               if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+                       (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                               "%s dev rst not pending sc 0x%p\n", __func__,
+                               sc);
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+               old_ioreq_state = CMD_STATE(sc);
+               /*
+                * Any pending IO issued prior to reset is expected to be
+                * in abts pending state, if not we need to set
+                * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+                * When IO is completed, the IO will be handed over and
+                * handled in this function.
+                */
+               CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+               if (io_req->abts_done)
+                       shost_printk(KERN_ERR, fnic->lport->host,
+                         "%s: io_req->abts_done is set state is %s\n",
+                         __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+               BUG_ON(io_req->abts_done);
+
+               abt_tag = tag;
+               if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       abt_tag |= FNIC_TAG_DEV_RST;
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                                 "%s: dev rst sc 0x%p\n", __func__, sc);
+               }
 
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
                io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                /* Now queue the abort command to firmware */
                int_to_scsilun(sc->device->lun, &fc_lun);
 
-               if (fnic_queue_abort_io_req(fnic, tag,
+               if (fnic_queue_abort_io_req(fnic, abt_tag,
                                            FCPIO_ITMF_ABT_TASK_TERM,
                                            fc_lun.scsi_lun, io_req)) {
                        spin_lock_irqsave(io_lock, flags);
                        io_req = (struct fnic_io_req *)CMD_SP(sc);
                        if (io_req)
                                io_req->abts_done = NULL;
+                       if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+                               CMD_STATE(sc) = old_ioreq_state;
                        spin_unlock_irqrestore(io_lock, flags);
                        ret = 1;
                        goto clean_pending_aborts_end;
+               } else {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                       spin_unlock_irqrestore(io_lock, flags);
                }
+               CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
 
                wait_for_completion_timeout(&tm_done,
                                            msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                if (!io_req) {
                        spin_unlock_irqrestore(io_lock, flags);
-                       ret = 1;
-                       goto clean_pending_aborts_end;
+                       CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+                       continue;
                }
 
                io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                /* if abort is still pending with fw, fail */
                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
                        spin_unlock_irqrestore(io_lock, flags);
+                       CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
                        ret = 1;
                        goto clean_pending_aborts_end;
                }
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
                mempool_free(io_req, fnic->io_req_pool);
        }
 
+       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+       /* walk again to check, if IOs are still pending in fw */
+       if (fnic_is_abts_pending(fnic, lr_sc))
+               ret = FAILED;
+
 clean_pending_aborts_end:
        return ret;
 }
 
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+       int tag, ret = SCSI_NO_TAG;
+
+       BUG_ON(!bqt);
+       if (!bqt) {
+               pr_err("Tags are not supported\n");
+               goto end;
+       }
+
+       do {
+               tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+               if (tag >= bqt->max_depth) {
+                       pr_err("Tag allocation failure\n");
+                       goto end;
+               }
+       } while (test_and_set_bit(tag, bqt->tag_map));
+
+       bqt->tag_index[tag] = sc->request;
+       sc->request->tag = tag;
+       sc->tag = tag;
+       if (!sc->request->special)
+               sc->request->special = sc;
+
+       ret = tag;
+
+end:
+       return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+       int tag = sc->request->tag;
+
+       if (tag == SCSI_NO_TAG)
+               return;
+
+       BUG_ON(!bqt || !bqt->tag_index[tag]);
+       if (!bqt)
+               return;
+
+       bqt->tag_index[tag] = NULL;
+       clear_bit(tag, bqt->tag_map);
+
+       return;
+}
+
 /*
  * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
  * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
 {
        struct fc_lport *lp;
        struct fnic *fnic;
-       struct fnic_io_req *io_req;
+       struct fnic_io_req *io_req = NULL;
        struct fc_rport *rport;
        int status;
        int ret = FAILED;
        spinlock_t *io_lock;
        unsigned long flags;
+       unsigned long start_time = 0;
+       struct scsi_lun fc_lun;
+       int tag = 0;
        DECLARE_COMPLETION_ONSTACK(tm_done);
+       int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
 
        /* Wait for rport to unblock */
        fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
 
        rport = starget_to_rport(scsi_target(sc->device));
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-                       "Device reset called FCID 0x%x, LUN 0x%x\n",
-                       rport->port_id, sc->device->lun);
+                     "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+                     rport->port_id, sc->device->lun, sc);
 
        if (lp->state != LPORT_ST_READY || !(lp->link_up))
                goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        if (fc_remote_port_chkready(rport))
                goto fnic_device_reset_end;
 
+       CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+       /* Allocate tag if not present */
+
+       tag = sc->request->tag;
+       if (unlikely(tag < 0)) {
+               tag = fnic_scsi_host_start_tag(fnic, sc);
+               if (unlikely(tag == SCSI_NO_TAG))
+                       goto fnic_device_reset_end;
+               tag_gen_flag = 1;
+       }
        io_lock = fnic_io_lock_hash(fnic, sc);
        spin_lock_irqsave(io_lock, flags);
        io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
        spin_unlock_irqrestore(io_lock, flags);
 
-       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
-                     sc->request->tag);
+       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
 
        /*
         * issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
                        io_req->dr_done = NULL;
                goto fnic_device_reset_clean;
        }
+       spin_lock_irqsave(io_lock, flags);
+       CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+       spin_unlock_irqrestore(io_lock, flags);
 
        /*
         * Wait on the local completion for LUN reset.  The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        if (!io_req) {
                spin_unlock_irqrestore(io_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
                goto fnic_device_reset_end;
        }
        io_req->dr_done = NULL;
 
        status = CMD_LR_STATUS(sc);
-       spin_unlock_irqrestore(io_lock, flags);
 
        /*
         * If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        if (status == FCPIO_INVALID_CODE) {
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Device reset timed out\n");
-               goto fnic_device_reset_end;
+               CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+               spin_unlock_irqrestore(io_lock, flags);
+               int_to_scsilun(sc->device->lun, &fc_lun);
+               /*
+                * Issue abort and terminate on the device reset request.
+                * If q'ing of the abort fails, retry issue it after a delay.
+                */
+               while (1) {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+                               spin_unlock_irqrestore(io_lock, flags);
+                               break;
+                       }
+                       spin_unlock_irqrestore(io_lock, flags);
+                       if (fnic_queue_abort_io_req(fnic,
+                               tag | FNIC_TAG_DEV_RST,
+                               FCPIO_ITMF_ABT_TASK_TERM,
+                               fc_lun.scsi_lun, io_req)) {
+                               wait_for_completion_timeout(&tm_done,
+                               msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+                       } else {
+                               spin_lock_irqsave(io_lock, flags);
+                               CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+                               CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+                               io_req->abts_done = &tm_done;
+                               spin_unlock_irqrestore(io_lock, flags);
+                               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "Abort and terminate issued on Device reset "
+                               "tag 0x%x sc 0x%p\n", tag, sc);
+                               break;
+                       }
+               }
+               while (1) {
+                       spin_lock_irqsave(io_lock, flags);
+                       if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+                               spin_unlock_irqrestore(io_lock, flags);
+                               wait_for_completion_timeout(&tm_done,
+                               msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+                               break;
+                       } else {
+                               io_req = (struct fnic_io_req *)CMD_SP(sc);
+                               io_req->abts_done = NULL;
+                               goto fnic_device_reset_clean;
+                       }
+               }
+       } else {
+               spin_unlock_irqrestore(io_lock, flags);
        }
 
        /* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
        spin_unlock_irqrestore(io_lock, flags);
 
        if (io_req) {
+               start_time = io_req->start_time;
                fnic_release_ioreq_buf(fnic, io_req, sc);
                mempool_free(io_req, fnic->io_req_pool);
        }
 
 fnic_device_reset_end:
+       FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+                 sc->request->tag, sc,
+                 jiffies_to_msecs(jiffies - start_time),
+                 0, ((u64)sc->cmnd[0] << 32 |
+                 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+                 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+                 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+       /* free tag if it is allocated */
+       if (unlikely(tag_gen_flag))
+               fnic_scsi_host_end_tag(fnic, sc);
+
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                      "Returning from device reset %s\n",
                      (ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
        DECLARE_COMPLETION_ONSTACK(remove_wait);
 
        /* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
        spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+               /* fw reset is in progress, poll for its completion */
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               schedule_timeout(msecs_to_jiffies(100));
+               goto retry_fw_reset;
+       }
+
        fnic->remove_wait = &remove_wait;
        old_state = fnic->state;
        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
        struct fnic *fnic = lport_priv(lp);
 
        /* issue fw reset */
+retry_fw_reset:
        spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+               /* fw reset is in progress, poll for its completion */
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               schedule_timeout(msecs_to_jiffies(100));
+               goto retry_fw_reset;
+       }
        old_state = fnic->state;
        fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
        fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
        fc_exch_mgr_reset(lp, sid, did);
 
 }
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+       int tag;
+       struct fnic_io_req *io_req;
+       spinlock_t *io_lock;
+       unsigned long flags;
+       int ret = 0;
+       struct scsi_cmnd *sc;
+       struct scsi_device *lun_dev = NULL;
+
+       if (lr_sc)
+               lun_dev = lr_sc->device;
+
+       /* walk again to check, if IOs are still pending in fw */
+       for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+               sc = scsi_host_find_tag(fnic->lport->host, tag);
+               /*
+                * ignore this lun reset cmd or cmds that do not belong to
+                * this lun
+                */
+               if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+                       continue;
+
+               io_lock = fnic_io_lock_hash(fnic, sc);
+               spin_lock_irqsave(io_lock, flags);
+
+               io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+               if (!io_req || sc->device != lun_dev) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       continue;
+               }
+
+               /*
+                * Found IO that is still pending with firmware and
+                * belongs to the LUN that we are resetting
+                */
+               FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                             "Found IO in %s on lun\n",
+                             fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+                       spin_unlock_irqrestore(io_lock, flags);
+                       ret = 1;
+                       continue;
+               }
+       }
+
+       return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644 (file)
index 0000000..23a60e3
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+       unsigned long fnic_buf_head;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic_trace_lock, flags);
+
+       /*
+        * Get next available memory location for writing trace information
+        * at @wr_idx and increment @wr_idx
+        */
+       fnic_buf_head =
+               fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+       fnic_trace_entries.wr_idx++;
+
+       /*
+        * Verify if trace buffer is full then change wd_idx to
+        * start from zero
+        */
+       if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+               fnic_trace_entries.wr_idx = 0;
+
+       /*
+        * Verify if write index @wr_idx and read index @rd_idx are same then
+        * increment @rd_idx to move to next entry in trace buffer
+        */
+       if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+               fnic_trace_entries.rd_idx++;
+               if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+                       fnic_trace_entries.rd_idx = 0;
+       }
+       spin_unlock_irqrestore(&fnic_trace_lock, flags);
+       return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+       int rd_idx;
+       int wr_idx;
+       int len = 0;
+       unsigned long flags;
+       char str[KSYM_SYMBOL_LEN];
+       struct timespec val;
+       fnic_trace_data_t *tbp;
+
+       spin_lock_irqsave(&fnic_trace_lock, flags);
+       rd_idx = fnic_trace_entries.rd_idx;
+       wr_idx = fnic_trace_entries.wr_idx;
+       if (wr_idx < rd_idx) {
+               while (1) {
+                       /* Start from read index @rd_idx */
+                       tbp = (fnic_trace_data_t *)
+                                 fnic_trace_entries.page_offset[rd_idx];
+                       if (!tbp) {
+                               spin_unlock_irqrestore(&fnic_trace_lock, flags);
+                               return 0;
+                       }
+                       /* Convert function pointer to function name */
+                       if (sizeof(unsigned long) < 8) {
+                               sprint_symbol(str, tbp->fnaddr.low);
+                               jiffies_to_timespec(tbp->timestamp.low, &val);
+                       } else {
+                               sprint_symbol(str, tbp->fnaddr.val);
+                               jiffies_to_timespec(tbp->timestamp.val, &val);
+                       }
+                       /*
+                        * Dump trace buffer entry to memory file
+                        * and increment read index @rd_idx
+                        */
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                                 (trace_max_pages * PAGE_SIZE * 3) - len,
+                                 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+                                 "%16llx %16llx %16llx\n", val.tv_sec,
+                                 val.tv_nsec, str, tbp->host_no, tbp->tag,
+                                 tbp->data[0], tbp->data[1], tbp->data[2],
+                                 tbp->data[3], tbp->data[4]);
+                       rd_idx++;
+                       /*
+                        * If rd_idx is reached to maximum trace entries
+                        * then move rd_idx to zero
+                        */
+                       if (rd_idx > (fnic_max_trace_entries-1))
+                               rd_idx = 0;
+                       /*
+                        * Continure dumpping trace buffer entries into
+                        * memory file till rd_idx reaches write index
+                        */
+                       if (rd_idx == wr_idx)
+                               break;
+               }
+       } else if (wr_idx > rd_idx) {
+               while (1) {
+                       /* Start from read index @rd_idx */
+                       tbp = (fnic_trace_data_t *)
+                                 fnic_trace_entries.page_offset[rd_idx];
+                       if (!tbp) {
+                               spin_unlock_irqrestore(&fnic_trace_lock, flags);
+                               return 0;
+                       }
+                       /* Convert function pointer to function name */
+                       if (sizeof(unsigned long) < 8) {
+                               sprint_symbol(str, tbp->fnaddr.low);
+                               jiffies_to_timespec(tbp->timestamp.low, &val);
+                       } else {
+                               sprint_symbol(str, tbp->fnaddr.val);
+                               jiffies_to_timespec(tbp->timestamp.val, &val);
+                       }
+                       /*
+                        * Dump trace buffer entry to memory file
+                        * and increment read index @rd_idx
+                        */
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                                 (trace_max_pages * PAGE_SIZE * 3) - len,
+                                 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+                                 "%16llx %16llx %16llx\n", val.tv_sec,
+                                 val.tv_nsec, str, tbp->host_no, tbp->tag,
+                                 tbp->data[0], tbp->data[1], tbp->data[2],
+                                 tbp->data[3], tbp->data[4]);
+                       rd_idx++;
+                       /*
+                        * Continue dumpping trace buffer entries into
+                        * memory file till rd_idx reaches write index
+                        */
+                       if (rd_idx == wr_idx)
+                               break;
+               }
+       }
+       spin_unlock_irqrestore(&fnic_trace_lock, flags);
+       return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+       unsigned long fnic_buf_head;
+       int i;
+       int err = 0;
+
+       trace_max_pages = fnic_trace_max_pages;
+       fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+                                         FNIC_ENTRY_SIZE_BYTES;
+
+       fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+       if (!fnic_trace_buf_p) {
+               printk(KERN_ERR PFX "Failed to allocate memory "
+                                 "for fnic_trace_buf_p\n");
+               err = -ENOMEM;
+               goto err_fnic_trace_buf_init;
+       }
+       memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+       fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+                                                 sizeof(unsigned long));
+       if (!fnic_trace_entries.page_offset) {
+               printk(KERN_ERR PFX "Failed to allocate memory for"
+                                 " page_offset\n");
+               if (fnic_trace_buf_p) {
+                       vfree((void *)fnic_trace_buf_p);
+                       fnic_trace_buf_p = 0;
+               }
+               err = -ENOMEM;
+               goto err_fnic_trace_buf_init;
+       }
+       memset((void *)fnic_trace_entries.page_offset, 0,
+                 (fnic_max_trace_entries * sizeof(unsigned long)));
+       fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+       fnic_buf_head = fnic_trace_buf_p;
+
+       /*
+        * Set page_offset field of fnic_trace_entries struct by
+        * calculating memory location for every trace entry using
+        * length of each trace entry
+        */
+       for (i = 0; i < fnic_max_trace_entries; i++) {
+               fnic_trace_entries.page_offset[i] = fnic_buf_head;
+               fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+       }
+       err = fnic_trace_debugfs_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+               goto err_fnic_trace_debugfs_init;
+       }
+       printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+       return err;
+err_fnic_trace_debugfs_init:
+       fnic_trace_free();
+err_fnic_trace_buf_init:
+       return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+       fnic_tracing_enabled = 0;
+       fnic_trace_debugfs_terminate();
+       if (fnic_trace_entries.page_offset) {
+               vfree((void *)fnic_trace_entries.page_offset);
+               fnic_trace_entries.page_offset = NULL;
+       }
+       if (fnic_trace_buf_p) {
+               vfree((void *)fnic_trace_buf_p);
+               fnic_trace_buf_p = 0;
+       }
+       printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644 (file)
index 0000000..cef42b4
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+                                         size_t count,
+                                         loff_t *ppos,
+                                         const void *from,
+                                         size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+       int wr_idx;
+       int rd_idx;
+       unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+       int buffer_len;
+       char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+       union {
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+               u64 val;
+       } timestamp, fnaddr;
+       u32 host_no;
+       u32 tag;
+       u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+                 (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e)           \
+       if (unlikely(fnic_tracing_enabled)) {                   \
+               fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+               if (trace_buf) { \
+                       if (sizeof(unsigned long) < 8) { \
+                               trace_buf->timestamp.low = jiffies; \
+                               trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+                       } else { \
+                               trace_buf->timestamp.val = jiffies; \
+                               trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+                       } \
+                       trace_buf->host_no = _hn; \
+                       trace_buf->tag = _t; \
+                       trace_buf->data[0] = (u64)(unsigned long)_a; \
+                       trace_buf->data[1] = (u64)(unsigned long)_b; \
+                       trace_buf->data[2] = (u64)(unsigned long)_c; \
+                       trace_buf->data[3] = (u64)(unsigned long)_d; \
+                       trace_buf->data[4] = (u64)(unsigned long)_e; \
+               } \
+       }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
index 599790e..59bceac 100644 (file)
@@ -1107,14 +1107,8 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
        pci_read_config_word(pdev, PCI_COMMAND, &command);
         command |= 6;
        pci_write_config_word(pdev, PCI_COMMAND, command);
-       if (pci_resource_start(pdev, 8) == 1UL)
-           pci_resource_start(pdev, 8) = 0UL;
-        i = 0xFEFF0001UL;
-       pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
-        gdth_delay(1);
-       pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
-                              pci_resource_start(pdev, 8));
-        
+       gdth_delay(1);
+
         dp6m_ptr = ha->brd;
 
         /* Ensure that it is safe to access the non HW portions of DPMEM.
index 4f33806..7f4f790 100644 (file)
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
        int cmd_type);
 
@@ -1131,7 +1131,7 @@ clean:
        return -ENOMEM;
 }
 
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
        struct CommandList *c)
 {
        struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
                (c->Header.SGTotal - h->max_cmd_sg_entries);
        temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
                                PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&h->pdev->dev, temp64)) {
+               /* prevent subsequent unmapping */
+               chain_sg->Addr.lower = 0;
+               chain_sg->Addr.upper = 0;
+               return -1;
+       }
        chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
        chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+       return 0;
 }
 
 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
        }
 }
 
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
                struct CommandList *cp,
                unsigned char *buf,
                size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
        if (buflen == 0 || data_direction == PCI_DMA_NONE) {
                cp->Header.SGList = 0;
                cp->Header.SGTotal = 0;
-               return;
+               return 0;
        }
 
        addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+       if (dma_mapping_error(&pdev->dev, addr64)) {
+               /* Prevent subsequent unmap of something never mapped */
+               cp->Header.SGList = 0;
+               cp->Header.SGTotal = 0;
+               return -1;
+       }
        cp->SG[0].Addr.lower =
          (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
        cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
        cp->SG[0].Len = buflen;
        cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
        cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+       return 0;
 }
 
 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+       if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+                       page, scsi3addr, TYPE_CMD)) {
+               rc = -1;
+               goto out;
+       }
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
        ei = c->err_info;
        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
                hpsa_scsi_interpret_error(c);
                rc = -1;
        }
+out:
        cmd_special_free(h, c);
        return rc;
 }
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+       /* fill_cmd can't fail here, no data buffer to map. */
+       (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+                       NULL, 0, 0, scsi3addr, TYPE_MSG);
        hpsa_scsi_do_simple_cmd_core(h, c);
        /* no unmap needed here because no data xfer. */
 
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
        }
        /* address the controller */
        memset(scsi3addr, 0, sizeof(scsi3addr));
-       fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
-               buf, bufsize, 0, scsi3addr, TYPE_CMD);
+       if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+               buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+               rc = -1;
+               goto out;
+       }
        if (extended_response)
                c->Request.CDB[1] = extended_response;
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
                hpsa_scsi_interpret_error(c);
                rc = -1;
        }
+out:
        cmd_special_free(h, c);
        return rc;
 }
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
        if (chained) {
                cp->Header.SGList = h->max_cmd_sg_entries;
                cp->Header.SGTotal = (u16) (use_sg + 1);
-               hpsa_map_sg_chain_block(h, cp);
+               if (hpsa_map_sg_chain_block(h, cp)) {
+                       scsi_dma_unmap(cmd);
+                       return -1;
+               }
                return 0;
        }
 
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
                if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
                        waittime = waittime * 2;
 
-               /* Send the Test Unit Ready */
-               fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+               /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+               (void) fill_cmd(c, TEST_UNIT_READY, h,
+                               NULL, 0, 0, lunaddr, TYPE_CMD);
                hpsa_scsi_do_simple_cmd_core(h, c);
                /* no unmap needed here because no data xfer. */
 
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
                return -ENOMEM;
        }
 
-       fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+       /* fill_cmd can't fail here, no buffer to map */
+       (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+               0, 0, scsi3addr, TYPE_MSG);
        if (swizzle)
                swizzle_abort_tag(&c->Request.CDB[4]);
        hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        struct CommandList *c;
        char *buff = NULL;
        union u64bit temp64;
+       int rc = 0;
 
        if (!argp)
                return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                        /* Copy the data into the buffer we created */
                        if (copy_from_user(buff, iocommand.buf,
                                iocommand.buf_size)) {
-                               kfree(buff);
-                               return -EFAULT;
+                               rc = -EFAULT;
+                               goto out_kfree;
                        }
                } else {
                        memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        }
        c = cmd_special_alloc(h);
        if (c == NULL) {
-               kfree(buff);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto out_kfree;
        }
        /* Fill in the command type */
        c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        if (iocommand.buf_size > 0) {
                temp64.val = pci_map_single(h->pdev, buff,
                        iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+                       c->SG[0].Addr.lower = 0;
+                       c->SG[0].Addr.upper = 0;
+                       c->SG[0].Len = 0;
+                       rc = -ENOMEM;
+                       goto out;
+               }
                c->SG[0].Addr.lower = temp64.val32.lower;
                c->SG[0].Addr.upper = temp64.val32.upper;
                c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        memcpy(&iocommand.error_info, c->err_info,
                sizeof(iocommand.error_info));
        if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
-               kfree(buff);
-               cmd_special_free(h, c);
-               return -EFAULT;
+               rc = -EFAULT;
+               goto out;
        }
        if (iocommand.Request.Type.Direction == XFER_READ &&
                iocommand.buf_size > 0) {
                /* Copy the data out of the buffer we created */
                if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
-                       kfree(buff);
-                       cmd_special_free(h, c);
-                       return -EFAULT;
+                       rc = -EFAULT;
+                       goto out;
                }
        }
-       kfree(buff);
+out:
        cmd_special_free(h, c);
-       return 0;
+out_kfree:
+       kfree(buff);
+       return rc;
 }
 
 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                for (i = 0; i < sg_used; i++) {
                        temp64.val = pci_map_single(h->pdev, buff[i],
                                    buff_size[i], PCI_DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+                               c->SG[i].Addr.lower = 0;
+                               c->SG[i].Addr.upper = 0;
+                               c->SG[i].Len = 0;
+                               hpsa_pci_unmap(h->pdev, c, i,
+                                       PCI_DMA_BIDIRECTIONAL);
+                               status = -ENOMEM;
+                               goto cleanup1;
+                       }
                        c->SG[i].Addr.lower = temp64.val32.lower;
                        c->SG[i].Addr.upper = temp64.val32.upper;
                        c->SG[i].Len = buff_size[i];
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        c = cmd_alloc(h);
        if (!c)
                return -ENOMEM;
-       fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+       /* fill_cmd can't fail here, no data buffer to map */
+       (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
                RAID_CTLR_LUNID, TYPE_MSG);
        c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
        c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
        return 0;
 }
 
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
        int cmd_type)
 {
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                default:
                        dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
                        BUG();
-                       return;
+                       return -1;
                }
        } else if (cmd_type == TYPE_MSG) {
                switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        default:
                pci_dir = PCI_DMA_BIDIRECTIONAL;
        }
-
-       hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
-       return;
+       if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+               return -1;
+       return 0;
 }
 
 /*
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
                dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
                goto out_of_memory;
        }
-       fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
-               RAID_CTLR_LUNID, TYPE_CMD);
+       if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+               RAID_CTLR_LUNID, TYPE_CMD)) {
+               goto out;
+       }
        hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
        if (c->err_info->CommandStatus != 0)
+out:
                dev_warn(&h->pdev->dev,
                        "error flushing cache on controller\n");
        cmd_special_free(h, c);
index 1d7da3f..f328089 100644 (file)
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
 static unsigned int ipr_debug = 0;
 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
 static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
 static DEFINE_SPINLOCK(ipr_driver_lock);
 
 /* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
                .max_cmds = 100,
                .cache_line_size = 0x20,
                .clear_isr = 1,
+               .iopoll_weight = 0,
                {
                        .set_interrupt_mask_reg = 0x0022C,
                        .clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
                .max_cmds = 100,
                .cache_line_size = 0x20,
                .clear_isr = 1,
+               .iopoll_weight = 0,
                {
                        .set_interrupt_mask_reg = 0x00288,
                        .clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
                .max_cmds = 1000,
                .cache_line_size = 0x20,
                .clear_isr = 0,
+               .iopoll_weight = 64,
                {
                        .set_interrupt_mask_reg = 0x00010,
                        .clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
 module_param_named(max_devs, ipr_max_devs, int, 0);
 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
                 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5).  (default:2)");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IPR_DRIVER_VERSION);
 
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
        struct ipr_trace_entry *trace_entry;
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 
-       trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+       trace_entry = &ioa_cfg->trace[atomic_add_return
+                       (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
        trace_entry->time = jiffies;
        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
        trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
        trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
        trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
        trace_entry->u.add_data = add_data;
+       wmb();
 }
 #else
 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
        struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
        dma_addr_t dma_addr = ipr_cmd->dma_addr;
+       int hrrq_id;
 
+       hrrq_id = ioarcb->cmd_pkt.hrrq_id;
        memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+       ioarcb->cmd_pkt.hrrq_id = hrrq_id;
        ioarcb->data_transfer_length = 0;
        ioarcb->read_data_transfer_length = 0;
        ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
  *     pointer to ipr command struct
  **/
 static
-struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
 {
-       struct ipr_cmnd *ipr_cmd;
+       struct ipr_cmnd *ipr_cmd = NULL;
+
+       if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+               ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+                       struct ipr_cmnd, queue);
+               list_del(&ipr_cmd->queue);
+       }
 
-       ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
-       list_del(&ipr_cmd->queue);
 
        return ipr_cmd;
 }
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 static
 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 {
-       struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+       struct ipr_cmnd *ipr_cmd =
+               __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
        ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
        return ipr_cmd;
 }
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
                                          u32 clr_ints)
 {
        volatile u32 int_reg;
+       int i;
 
        /* Stop new interrupts */
-       ioa_cfg->allow_interrupts = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].allow_interrupts = 0;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
 
        /* Set interrupt mask to stop all new interrupts */
        if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  **/
 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
 {
-       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct ata_queued_cmd *qc = ipr_cmd->qc;
        struct ipr_sata_port *sata_port = qc->ap->private_data;
 
        qc->err_mask |= AC_ERR_OTHER;
        sata_port->ioasa.status |= ATA_BUSY;
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        ata_qc_complete(qc);
 }
 
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  **/
 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 {
-       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 
        scsi_cmd->result |= (DID_ERROR << 16);
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 }
 
 /**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
 {
        struct ipr_cmnd *ipr_cmd, *temp;
+       struct ipr_hrr_queue *hrrq;
 
        ENTER;
-       list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
-               list_del(&ipr_cmd->queue);
+       for_each_hrrq(hrrq, ioa_cfg) {
+               spin_lock(&hrrq->_lock);
+               list_for_each_entry_safe(ipr_cmd,
+                                       temp, &hrrq->hrrq_pending_q, queue) {
+                       list_del(&ipr_cmd->queue);
 
-               ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
-               ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+                       ipr_cmd->s.ioasa.hdr.ioasc =
+                               cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+                       ipr_cmd->s.ioasa.hdr.ilid =
+                               cpu_to_be32(IPR_DRIVER_ILID);
 
-               if (ipr_cmd->scsi_cmd)
-                       ipr_cmd->done = ipr_scsi_eh_done;
-               else if (ipr_cmd->qc)
-                       ipr_cmd->done = ipr_sata_eh_done;
+                       if (ipr_cmd->scsi_cmd)
+                               ipr_cmd->done = ipr_scsi_eh_done;
+                       else if (ipr_cmd->qc)
+                               ipr_cmd->done = ipr_sata_eh_done;
 
-               ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
-               del_timer(&ipr_cmd->timer);
-               ipr_cmd->done(ipr_cmd);
+                       ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+                                    IPR_IOASC_IOA_WAS_RESET);
+                       del_timer(&ipr_cmd->timer);
+                       ipr_cmd->done(ipr_cmd);
+               }
+               spin_unlock(&hrrq->_lock);
        }
-
        LEAVE;
 }
 
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
                       void (*done) (struct ipr_cmnd *),
                       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
 {
-       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 
        ipr_cmd->done = done;
 
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
        spin_lock_irq(ioa_cfg->host->host_lock);
 }
 
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+       if (ioa_cfg->hrrq_num == 1)
+               return 0;
+       else
+               return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
 /**
  * ipr_send_hcam - Send an HCAM to the adapter.
  * @ioa_cfg:   ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
        struct ipr_cmnd *ipr_cmd;
        struct ipr_ioarcb *ioarcb;
 
-       if (ioa_cfg->allow_cmds) {
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
                ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
-               list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
 
                ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
 }
 
 /**
- * ipr_format_res_path - Format the resource path for printing.
+ * __ipr_format_res_path - Format the resource path for printing.
  * @res_path:  resource path
  * @buf:       buffer
+ * @len:       length of buffer provided
  *
  * Return value:
  *     pointer to buffer
  **/
-static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
 {
        int i;
        char *p = buffer;
@@ -1186,6 +1221,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
        return buffer;
 }
 
+/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg:   ioa config struct
+ * @res_path:  resource path
+ * @buf:       buffer
+ * @len:       length of buffer provided
+ *
+ * Return value:
+ *     pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+                                u8 *res_path, char *buffer, int len)
+{
+       char *p = buffer;
+
+       *p = '\0';
+       p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+       __ipr_format_res_path(res_path, p, len - (buffer - p));
+       return buffer;
+}
+
 /**
  * ipr_update_res_entry - Update the resource entry.
  * @res:       resource entry struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
 
                if (res->sdev && new_path)
                        sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
-                                   ipr_format_res_path(res->res_path, buffer,
-                                                       sizeof(buffer)));
+                                   ipr_format_res_path(res->ioa_cfg,
+                                       res->res_path, buffer, sizeof(buffer)));
        } else {
                res->flags = cfgtew->u.cfgte->flags;
                if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
        list_del(&hostrcb->queue);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
        if (ioasc) {
                if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
                ipr_err_separator;
 
                ipr_err("Device %d : %s", i + 1,
-                        ipr_format_res_path(dev_entry->res_path, buffer,
-                                            sizeof(buffer)));
+                       __ipr_format_res_path(dev_entry->res_path,
+                                             buffer, sizeof(buffer)));
                ipr_log_ext_vpd(&dev_entry->vpd);
 
                ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
 
                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
                                     path_active_desc[i].desc, path_state_desc[j].desc,
-                                    ipr_format_res_path(fabric->res_path, buffer,
-                                                        sizeof(buffer)));
+                                    ipr_format_res_path(hostrcb->ioa_cfg,
+                                               fabric->res_path,
+                                               buffer, sizeof(buffer)));
                        return;
                }
        }
 
        ipr_err("Path state=%02X Resource Path=%s\n", path_state,
-               ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
+               ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+                                   buffer, sizeof(buffer)));
 }
 
 static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
 
                        ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
                                     path_status_desc[j].desc, path_type_desc[i].desc,
-                                    ipr_format_res_path(cfg->res_path, buffer,
-                                                        sizeof(buffer)),
-                                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
-                                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                                    ipr_format_res_path(hostrcb->ioa_cfg,
+                                       cfg->res_path, buffer, sizeof(buffer)),
+                                       link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                       be32_to_cpu(cfg->wwid[0]),
+                                       be32_to_cpu(cfg->wwid[1]));
                        return;
                }
        }
        ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
                     "WWN=%08X%08X\n", cfg->type_status,
-                    ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
-                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
-                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                    ipr_format_res_path(hostrcb->ioa_cfg,
+                       cfg->res_path, buffer, sizeof(buffer)),
+                       link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                       be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
 }
 
 /**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
 
        ipr_err("RAID %s Array Configuration: %s\n",
                error->protection_level,
-               ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
+               ipr_format_res_path(ioa_cfg, error->last_res_path,
+                       buffer, sizeof(buffer)));
 
        ipr_err_separator;
 
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
                ipr_err("Array Member %d:\n", i);
                ipr_log_ext_vpd(&array_entry->vpd);
                ipr_err("Current Location: %s\n",
-                        ipr_format_res_path(array_entry->res_path, buffer,
-                                            sizeof(buffer)));
+                        ipr_format_res_path(ioa_cfg, array_entry->res_path,
+                               buffer, sizeof(buffer)));
                ipr_err("Expected Location: %s\n",
-                        ipr_format_res_path(array_entry->expected_res_path,
-                                            buffer, sizeof(buffer)));
+                        ipr_format_res_path(ioa_cfg,
+                               array_entry->expected_res_path,
+                               buffer, sizeof(buffer)));
 
                ipr_err_separator;
        }
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
                fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
        list_del(&hostrcb->queue);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
        if (!ioasc) {
                ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2490,36 +2552,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
        LEAVE;
 }
 
-/**
- * ipr_reset_reload - Reset/Reload the IOA
- * @ioa_cfg:           ioa config struct
- * @shutdown_type:     shutdown type
- *
- * This function resets the adapter and re-initializes it.
- * This function assumes that all new host commands have been stopped.
- * Return value:
- *     SUCCESS / FAILED
- **/
-static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
-                           enum ipr_shutdown_type shutdown_type)
-{
-       if (!ioa_cfg->in_reset_reload)
-               ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
-
-       spin_unlock_irq(ioa_cfg->host->host_lock);
-       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-       spin_lock_irq(ioa_cfg->host->host_lock);
-
-       /* If we got hit with a host reset while we were already resetting
-        the adapter for some reason, and the reset failed. */
-       if (ioa_cfg->ioa_is_dead) {
-               ipr_trace;
-               return FAILED;
-       }
-
-       return SUCCESS;
-}
-
 /**
  * ipr_find_ses_entry - Find matching SES in SES table
  * @res:       resource entry struct of SES
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
 restart:
        do {
                did_work = 0;
-               if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+                   !ioa_cfg->allow_ml_add_del) {
                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                        return;
                }
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
        int len;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-       if (ioa_cfg->ioa_is_dead)
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
                len = snprintf(buf, PAGE_SIZE, "offline\n");
        else
                len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
        unsigned long lock_flags;
-       int result = count;
+       int result = count, i;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-       if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
-               ioa_cfg->ioa_is_dead = 0;
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+           !strncmp(buf, "online", 6)) {
+               for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+                       spin_lock(&ioa_cfg->hrrq[i]._lock);
+                       ioa_cfg->hrrq[i].ioa_is_dead = 0;
+                       spin_unlock(&ioa_cfg->hrrq[i]._lock);
+               }
+               wmb();
                ioa_cfg->reset_retries = 0;
                ioa_cfg->in_ioa_bringdown = 0;
                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
        .store = ipr_store_reset_adapter
 };
 
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev:       class device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags = 0;
+       int len;
+
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+       return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev:       class device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long user_iopoll_weight;
+       unsigned long lock_flags = 0;
+       int i;
+
+       if (!ioa_cfg->sis64) {
+               dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+               return -EINVAL;
+       }
+       if (kstrtoul(buf, 10, &user_iopoll_weight))
+               return -EINVAL;
+
+       if (user_iopoll_weight > 256) {
+               dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+               return -EINVAL;
+       }
+
+       if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+               dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+               return strlen(buf);
+       }
+
+       if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+                       ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+               for (i = 1; i < ioa_cfg->hrrq_num; i++)
+                       blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+       }
+
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       ioa_cfg->iopoll_weight = user_iopoll_weight;
+       if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+                       ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+               for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+                       blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+                                       ioa_cfg->iopoll_weight, ipr_iopoll);
+                       blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+               }
+       }
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+       return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+       .attr = {
+               .name =         "iopoll_weight",
+               .mode =         S_IRUGO | S_IWUSR,
+       },
+       .show = ipr_show_iopoll_weight,
+       .store = ipr_store_iopoll_weight
+};
+
 /**
  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
  * @buf_len:           buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
        &ipr_ioa_reset_attr,
        &ipr_update_fw_attr,
        &ipr_ioa_fw_type_attr,
+       &ipr_iopoll_weight_attr,
        NULL,
 };
 
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
 
        ioa_cfg->dump = dump;
        ioa_cfg->sdt_state = WAIT_FOR_DUMP;
-       if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
                ioa_cfg->dump_taken = 1;
                schedule_work(&ioa_cfg->work_q);
        }
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
        res = (struct ipr_resource_entry *)sdev->hostdata;
        if (res && ioa_cfg->sis64)
                len = snprintf(buf, PAGE_SIZE, "%s\n",
-                              ipr_format_res_path(res->res_path, buffer,
-                                                  sizeof(buffer)));
+                              __ipr_format_res_path(res->res_path, buffer,
+                                                    sizeof(buffer)));
        else if (res)
                len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
                               res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                        scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
                if (ioa_cfg->sis64)
                        sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
-                                   ipr_format_res_path(res->res_path, buffer,
-                                                       sizeof(buffer)));
+                                   ipr_format_res_path(ioa_cfg,
+                               res->res_path, buffer, sizeof(buffer)));
                return 0;
        }
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
        return rc;
 }
 
-/**
- * ipr_eh_host_reset - Reset the host adapter
- * @scsi_cmd:  scsi command struct
- *
- * Return value:
- *     SUCCESS / FAILED
- **/
-static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
-       int rc;
+       unsigned long lock_flags = 0;
+       int rc = SUCCESS;
 
        ENTER;
-       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+       ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
        if (!ioa_cfg->in_reset_reload) {
+               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
                dev_err(&ioa_cfg->pdev->dev,
                        "Adapter being reset as a result of error recovery.\n");
 
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
                        ioa_cfg->sdt_state = GET_DUMP;
        }
 
-       rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
-
-       LEAVE;
-       return rc;
-}
-
-static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
-{
-       int rc;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-       spin_lock_irq(cmd->device->host->host_lock);
-       rc = __ipr_eh_host_reset(cmd);
-       spin_unlock_irq(cmd->device->host->host_lock);
+       /* If we got hit with a host reset while we were already resetting
+        the adapter for some reason, and the reset failed. */
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+               ipr_trace;
+               rc = FAILED;
+       }
 
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       LEAVE;
        return rc;
 }
 
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
 
        ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
        ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
                if (ipr_cmd->ioa_cfg->sis64)
                        memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
        struct ipr_resource_entry *res;
        struct ata_port *ap;
        int rc = 0;
+       struct ipr_hrr_queue *hrrq;
 
        ENTER;
        ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
         */
        if (ioa_cfg->in_reset_reload)
                return FAILED;
-       if (ioa_cfg->ioa_is_dead)
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
                return FAILED;
 
-       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-               if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
-                       if (ipr_cmd->scsi_cmd)
-                               ipr_cmd->done = ipr_scsi_eh_done;
-                       if (ipr_cmd->qc)
-                               ipr_cmd->done = ipr_sata_eh_done;
-                       if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
-                               ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
-                               ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+       for_each_hrrq(hrrq, ioa_cfg) {
+               spin_lock(&hrrq->_lock);
+               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                       if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+                               if (ipr_cmd->scsi_cmd)
+                                       ipr_cmd->done = ipr_scsi_eh_done;
+                               if (ipr_cmd->qc)
+                                       ipr_cmd->done = ipr_sata_eh_done;
+                               if (ipr_cmd->qc &&
+                                   !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+                                       ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+                                       ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+                               }
                        }
                }
+               spin_unlock(&hrrq->_lock);
        }
-
        res->resetting_device = 1;
        scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
 
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
                ata_std_error_handler(ap);
                spin_lock_irq(scsi_cmd->device->host->host_lock);
 
-               list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-                       if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
-                               rc = -EIO;
-                               break;
+               for_each_hrrq(hrrq, ioa_cfg) {
+                       spin_lock(&hrrq->_lock);
+                       list_for_each_entry(ipr_cmd,
+                                           &hrrq->hrrq_pending_q, queue) {
+                               if (ipr_cmd->ioarcb.res_handle ==
+                                   res->res_handle) {
+                                       rc = -EIO;
+                                       break;
+                               }
                        }
+                       spin_unlock(&hrrq->_lock);
                }
        } else
                rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
        else
                ipr_cmd->sibling->done(ipr_cmd->sibling);
 
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        LEAVE;
 }
 
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
        struct ipr_cmd_pkt *cmd_pkt;
        u32 ioasc, int_reg;
        int op_found = 0;
+       struct ipr_hrr_queue *hrrq;
 
        ENTER;
        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
         * This will force the mid-layer to call ipr_eh_host_reset,
         * which will then go to sleep and wait for the reset to complete
         */
-       if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+       if (ioa_cfg->in_reset_reload ||
+           ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
                return FAILED;
        if (!res)
                return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
        if (!ipr_is_gscsi(res))
                return FAILED;
 
-       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-               if (ipr_cmd->scsi_cmd == scsi_cmd) {
-                       ipr_cmd->done = ipr_scsi_eh_done;
-                       op_found = 1;
-                       break;
+       for_each_hrrq(hrrq, ioa_cfg) {
+               spin_lock(&hrrq->_lock);
+               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                       if (ipr_cmd->scsi_cmd == scsi_cmd) {
+                               ipr_cmd->done = ipr_scsi_eh_done;
+                               op_found = 1;
+                               break;
+                       }
                }
+               spin_unlock(&hrrq->_lock);
        }
 
        if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
                ipr_trace;
        }
 
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
        if (!ipr_is_naca_model(res))
                res->needs_sync_complete = 1;
 
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
        } else {
                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
                        ioa_cfg->ioa_unit_checked = 1;
+               else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "No Host RRQ. 0x%08X\n", int_reg);
                else
                        dev_err(&ioa_cfg->pdev->dev,
                                "Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
  * Return value:
  *     none
  **/
-static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
 {
        ioa_cfg->errors_logged++;
-       dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+       dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
 
        if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
                ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
 }
 
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+                                               struct list_head *doneq)
+{
+       u32 ioasc;
+       u16 cmd_index;
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+       int num_hrrq = 0;
+
+       /* If interrupts are disabled, ignore the interrupt */
+       if (!hrr_queue->allow_interrupts)
+               return 0;
+
+       while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+              hrr_queue->toggle_bit) {
+
+               cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+                            IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+                            IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+               if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+                            cmd_index < hrr_queue->min_cmd_id)) {
+                       ipr_isr_eh(ioa_cfg,
+                               "Invalid response handle from IOA: ",
+                               cmd_index);
+                       break;
+               }
+
+               ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+               ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+               ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+               list_move_tail(&ipr_cmd->queue, doneq);
+
+               if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+                       hrr_queue->hrrq_curr++;
+               } else {
+                       hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+                       hrr_queue->toggle_bit ^= 1u;
+               }
+               num_hrrq++;
+               if (budget > 0 && num_hrrq >= budget)
+                       break;
+       }
+
+       return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct ipr_hrr_queue *hrrq;
+       struct ipr_cmnd *ipr_cmd, *temp;
+       unsigned long hrrq_flags;
+       int completed_ops;
+       LIST_HEAD(doneq);
+
+       hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+       ioa_cfg = hrrq->ioa_cfg;
+
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
+       completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+       if (completed_ops < budget)
+               blk_iopoll_complete(iop);
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+       list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+               list_del(&ipr_cmd->queue);
+               del_timer(&ipr_cmd->timer);
+               ipr_cmd->fast_done(ipr_cmd);
+       }
+
+       return completed_ops;
+}
+
 /**
  * ipr_isr - Interrupt service routine
  * @irq:       irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
  **/
 static irqreturn_t ipr_isr(int irq, void *devp)
 {
-       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
-       unsigned long lock_flags = 0;
+       struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+       struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+       unsigned long hrrq_flags = 0;
        u32 int_reg = 0;
-       u32 ioasc;
-       u16 cmd_index;
        int num_hrrq = 0;
        int irq_none = 0;
        struct ipr_cmnd *ipr_cmd, *temp;
        irqreturn_t rc = IRQ_NONE;
        LIST_HEAD(doneq);
 
-       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
        /* If interrupts are disabled, ignore the interrupt */
-       if (!ioa_cfg->allow_interrupts) {
-               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       if (!hrrq->allow_interrupts) {
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                return IRQ_NONE;
        }
 
        while (1) {
-               ipr_cmd = NULL;
-
-               while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
-                      ioa_cfg->toggle_bit) {
-
-                       cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
-                                    IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
-
-                       if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
-                               ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
-                               rc = IRQ_HANDLED;
-                               goto unlock_out;
-                       }
-
-                       ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
-
-                       ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
-                       ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
-                       list_move_tail(&ipr_cmd->queue, &doneq);
-
-                       rc = IRQ_HANDLED;
-
-                       if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
-                               ioa_cfg->hrrq_curr++;
-                       } else {
-                               ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
-                               ioa_cfg->toggle_bit ^= 1u;
-                       }
-               }
+               if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+                       rc =  IRQ_HANDLED;
 
-               if (ipr_cmd && !ioa_cfg->clear_isr)
-                       break;
+                       if (!ioa_cfg->clear_isr)
+                               break;
 
-               if (ipr_cmd != NULL) {
                        /* Clear the PCI interrupt */
                        num_hrrq = 0;
                        do {
-                               writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+                               writel(IPR_PCII_HRRQ_UPDATED,
+                                    ioa_cfg->regs.clr_interrupt_reg32);
                                int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
                        } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
-                                       num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+                               num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
 
                } else if (rc == IRQ_NONE && irq_none == 0) {
                        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
                        irq_none++;
                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
                           int_reg & IPR_PCII_HRRQ_UPDATED) {
-                       ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+                       ipr_isr_eh(ioa_cfg,
+                               "Error clearing HRRQ: ", num_hrrq);
                        rc = IRQ_HANDLED;
-                       goto unlock_out;
+                       break;
                } else
                        break;
        }
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
        if (unlikely(rc == IRQ_NONE))
                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
 
-unlock_out:
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
        list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
                list_del(&ipr_cmd->queue);
                del_timer(&ipr_cmd->timer);
                ipr_cmd->fast_done(ipr_cmd);
        }
+       return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq:       irq number
+ * @devp:      pointer to ioa config struct
+ *
+ * Return value:
+ *     IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+       struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+       struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+       unsigned long hrrq_flags = 0;
+       struct ipr_cmnd *ipr_cmd, *temp;
+       irqreturn_t rc = IRQ_NONE;
+       LIST_HEAD(doneq);
+
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+       /* If interrupts are disabled, ignore the interrupt */
+       if (!hrrq->allow_interrupts) {
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+               return IRQ_NONE;
+       }
+
+       if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+                       ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+               if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+                      hrrq->toggle_bit) {
+                       if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+                               blk_iopoll_sched(&hrrq->iopoll);
+                       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+                       return IRQ_HANDLED;
+               }
+       } else {
+               if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+                       hrrq->toggle_bit)
+
+                       if (ipr_process_hrrq(hrrq, -1, &doneq))
+                               rc =  IRQ_HANDLED;
+       }
+
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
 
+       list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+               list_del(&ipr_cmd->queue);
+               del_timer(&ipr_cmd->timer);
+               ipr_cmd->fast_done(ipr_cmd);
+       }
        return rc;
 }
 
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
 {
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
-       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
        if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
                res->in_erp = 0;
        }
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        scsi_cmd->scsi_done(scsi_cmd);
 }
 
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
        }
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        scsi_cmd->scsi_done(scsi_cmd);
 }
 
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       unsigned long lock_flags;
+       unsigned long hrrq_flags;
 
        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
 
-               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
                scsi_cmd->scsi_done(scsi_cmd);
-               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
        } else {
-               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
                ipr_erp_start(ioa_cfg, ipr_cmd);
-               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
        }
 }
 
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
        struct ipr_resource_entry *res;
        struct ipr_ioarcb *ioarcb;
        struct ipr_cmnd *ipr_cmd;
-       unsigned long lock_flags;
+       unsigned long hrrq_flags, lock_flags;
        int rc;
+       struct ipr_hrr_queue *hrrq;
+       int hrrq_id;
 
        ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 
-       spin_lock_irqsave(shost->host_lock, lock_flags);
        scsi_cmd->result = (DID_OK << 16);
        res = scsi_cmd->device->hostdata;
 
+       if (ipr_is_gata(res) && res->sata_port) {
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return rc;
+       }
+
+       hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+       hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
        /*
         * We are currently blocking all devices due to a host reset
         * We have told the host to stop giving us new requests, but
         * ERP ops don't count. FIXME
         */
-       if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
-               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
         * FIXME - Create scsi_set_host_offline interface
         *  and the ioa_is_dead check can be removed
         */
-       if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
-               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                goto err_nodev;
        }
 
-       if (ipr_is_gata(res) && res->sata_port) {
-               rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
-               spin_unlock_irqrestore(shost->host_lock, lock_flags);
-               return rc;
+       ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+       if (ipr_cmd == NULL) {
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+               return SCSI_MLQUEUE_HOST_BUSY;
        }
-
-       ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
-       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
 
        ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
        ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
        }
 
        if (scsi_cmd->cmnd[0] >= 0xC0 &&
-           (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
+           (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+       }
 
        if (ioa_cfg->sis64)
                rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
        else
                rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 
-       spin_lock_irqsave(shost->host_lock, lock_flags);
-       if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
-               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
+       if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+               list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                if (!rc)
                        scsi_dma_unmap(scsi_cmd);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
-       if (unlikely(ioa_cfg->ioa_is_dead)) {
-               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       if (unlikely(hrrq->ioa_is_dead)) {
+               list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+               spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                scsi_dma_unmap(scsi_cmd);
                goto err_nodev;
        }
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
                res->needs_sync_complete = 0;
        }
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
-       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
        return 0;
 
 err_nodev:
-       spin_lock_irqsave(shost->host_lock, lock_flags);
+       spin_lock_irqsave(hrrq->lock, hrrq_flags);
        memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
        scsi_cmd->result = (DID_NO_CONNECT << 16);
        scsi_cmd->scsi_done(scsi_cmd);
-       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
        return 0;
 }
 
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        }
 
-       if (!ioa_cfg->allow_cmds)
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
                goto out_unlock;
 
        rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
        struct ipr_sata_port *sata_port = qc->ap->private_data;
        struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
        struct ipr_cmnd *ipr_cmd;
+       struct ipr_hrr_queue *hrrq;
        unsigned long flags;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        }
 
-       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-               if (ipr_cmd->qc == qc) {
-                       ipr_device_reset(ioa_cfg, sata_port->res);
-                       break;
+       for_each_hrrq(hrrq, ioa_cfg) {
+               spin_lock(&hrrq->_lock);
+               list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                       if (ipr_cmd->qc == qc) {
+                               ipr_device_reset(ioa_cfg, sata_port->res);
+                               break;
+                       }
                }
+               spin_unlock(&hrrq->_lock);
        }
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_resource_entry *res = sata_port->res;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
+       spin_lock(&ipr_cmd->hrrq->_lock);
        if (ipr_cmd->ioa_cfg->sis64)
                memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
                       sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
                qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
        else
                qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+       spin_unlock(&ipr_cmd->hrrq->_lock);
        ata_qc_complete(qc);
 }
 
@@ -6243,6 +6501,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
                last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
 }
 
+/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc:        queued command
+ *
+ * Return value:
+ *     0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct ipr_sata_port *sata_port = ap->private_data;
+       struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_hrr_queue *hrrq;
+       int hrrq_id;
+
+       hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+       hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+       qc->lldd_task = NULL;
+       spin_lock(&hrrq->_lock);
+       if (unlikely(hrrq->ioa_is_dead)) {
+               spin_unlock(&hrrq->_lock);
+               return 0;
+       }
+
+       if (unlikely(!hrrq->allow_cmds)) {
+               spin_unlock(&hrrq->_lock);
+               return ATA_DEFER_LINK;
+       }
+
+       ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+       if (ipr_cmd == NULL) {
+               spin_unlock(&hrrq->_lock);
+               return ATA_DEFER_LINK;
+       }
+
+       qc->lldd_task = ipr_cmd;
+       spin_unlock(&hrrq->_lock);
+       return 0;
+}
+
 /**
  * ipr_qc_issue - Issue a SATA qc to a device
  * @qc:        queued command
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
        struct ipr_ioarcb *ioarcb;
        struct ipr_ioarcb_ata_regs *regs;
 
-       if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+       if (qc->lldd_task == NULL)
+               ipr_qc_defer(qc);
+
+       ipr_cmd = qc->lldd_task;
+       if (ipr_cmd == NULL)
                return AC_ERR_SYSTEM;
 
-       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       qc->lldd_task = NULL;
+       spin_lock(&ipr_cmd->hrrq->_lock);
+       if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+                       ipr_cmd->hrrq->ioa_is_dead)) {
+               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+               spin_unlock(&ipr_cmd->hrrq->_lock);
+               return AC_ERR_SYSTEM;
+       }
+
+       ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
        ioarcb = &ipr_cmd->ioarcb;
 
        if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
        memset(regs, 0, sizeof(*regs));
        ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
 
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->qc = qc;
        ipr_cmd->done = ipr_sata_done;
        ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
 
        default:
                WARN_ON(1);
+               spin_unlock(&ipr_cmd->hrrq->_lock);
                return AC_ERR_INVALID;
        }
 
        ipr_send_command(ipr_cmd);
+       spin_unlock(&ipr_cmd->hrrq->_lock);
 
        return 0;
 }
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
        .hardreset = ipr_sata_reset,
        .post_internal_cmd = ipr_ata_post_internal,
        .qc_prep = ata_noop_qc_prep,
+       .qc_defer = ipr_qc_defer,
        .qc_issue = ipr_qc_issue,
        .qc_fill_rtf = ipr_qc_fill_rtf,
        .port_start = ata_sas_port_start,
@@ -6425,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 
        ENTER;
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+               ipr_trace;
+               spin_unlock_irq(ioa_cfg->host->host_lock);
+               scsi_unblock_requests(ioa_cfg->host);
+               spin_lock_irq(ioa_cfg->host->host_lock);
+       }
+
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
-
-       spin_unlock_irq(ioa_cfg->host->host_lock);
-       scsi_unblock_requests(ioa_cfg->host);
-       spin_lock_irq(ioa_cfg->host->host_lock);
        LEAVE;
 
        return IPR_RC_JOB_RETURN;
@@ -6454,11 +6773,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct ipr_resource_entry *res;
        struct ipr_hostrcb *hostrcb, *temp;
-       int i = 0;
+       int i = 0, j;
 
        ENTER;
        ioa_cfg->in_reset_reload = 0;
-       ioa_cfg->allow_cmds = 1;
+       for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+               spin_lock(&ioa_cfg->hrrq[j]._lock);
+               ioa_cfg->hrrq[j].allow_cmds = 1;
+               spin_unlock(&ioa_cfg->hrrq[j]._lock);
+       }
+       wmb();
        ioa_cfg->reset_cmd = NULL;
        ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
 
@@ -6482,14 +6806,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
 
        ioa_cfg->reset_retries = 0;
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
 
        spin_unlock(ioa_cfg->host->host_lock);
        scsi_unblock_requests(ioa_cfg->host);
        spin_lock(ioa_cfg->host->host_lock);
 
-       if (!ioa_cfg->allow_cmds)
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
                scsi_block_requests(ioa_cfg->host);
 
        LEAVE;
@@ -6560,9 +6884,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
 
                if (!ioa_cfg->sis64)
                        ipr_cmd->job_step = ipr_set_supported_devs;
+               LEAVE;
                return IPR_RC_JOB_RETURN;
        }
 
+       LEAVE;
        return IPR_RC_JOB_CONTINUE;
 }
 
@@ -6820,7 +7146,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
                ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
 
        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        return IPR_RC_JOB_RETURN;
 }
 
@@ -7278,46 +7604,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_hrr_queue *hrrq;
 
        ENTER;
+       ipr_cmd->job_step = ipr_ioafp_std_inquiry;
        dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
 
-       ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
-       ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+       if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+               hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
 
-       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
-       if (ioa_cfg->sis64)
-               ioarcb->cmd_pkt.cdb[1] = 0x1;
-       ioarcb->cmd_pkt.cdb[2] =
-               ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
-       ioarcb->cmd_pkt.cdb[3] =
-               ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
-       ioarcb->cmd_pkt.cdb[4] =
-               ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
-       ioarcb->cmd_pkt.cdb[5] =
-               ((u64) ioa_cfg->host_rrq_dma) & 0xff;
-       ioarcb->cmd_pkt.cdb[7] =
-               ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
-       ioarcb->cmd_pkt.cdb[8] =
-               (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+               ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+               ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 
-       if (ioa_cfg->sis64) {
-               ioarcb->cmd_pkt.cdb[10] =
-                       ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
-               ioarcb->cmd_pkt.cdb[11] =
-                       ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
-               ioarcb->cmd_pkt.cdb[12] =
-                       ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
-               ioarcb->cmd_pkt.cdb[13] =
-                       ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
-       }
+               ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+               if (ioa_cfg->sis64)
+                       ioarcb->cmd_pkt.cdb[1] = 0x1;
 
-       ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+               if (ioa_cfg->nvectors == 1)
+                       ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+               else
+                       ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+               ioarcb->cmd_pkt.cdb[2] =
+                       ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+               ioarcb->cmd_pkt.cdb[3] =
+                       ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+               ioarcb->cmd_pkt.cdb[4] =
+                       ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+               ioarcb->cmd_pkt.cdb[5] =
+                       ((u64) hrrq->host_rrq_dma) & 0xff;
+               ioarcb->cmd_pkt.cdb[7] =
+                       ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+               ioarcb->cmd_pkt.cdb[8] =
+                       (sizeof(u32) * hrrq->size) & 0xff;
+
+               if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+                       ioarcb->cmd_pkt.cdb[9] =
+                                       ioa_cfg->identify_hrrq_index;
 
-       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+               if (ioa_cfg->sis64) {
+                       ioarcb->cmd_pkt.cdb[10] =
+                               ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+                       ioarcb->cmd_pkt.cdb[11] =
+                               ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+                       ioarcb->cmd_pkt.cdb[12] =
+                               ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+                       ioarcb->cmd_pkt.cdb[13] =
+                               ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+               }
+
+               if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+                       ioarcb->cmd_pkt.cdb[14] =
+                                       ioa_cfg->identify_hrrq_index;
+
+               ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+                          IPR_INTERNAL_TIMEOUT);
+
+               if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+                       ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+               LEAVE;
+               return IPR_RC_JOB_RETURN;
+       }
 
        LEAVE;
-       return IPR_RC_JOB_RETURN;
+       return IPR_RC_JOB_CONTINUE;
 }
 
 /**
@@ -7365,7 +7716,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
                                  unsigned long timeout)
 {
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+
+       ENTER;
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->done = ipr_reset_ioa_job;
 
        ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7736,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
  **/
 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
 {
-       memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+       struct ipr_hrr_queue *hrrq;
 
-       /* Initialize Host RRQ pointers */
-       ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
-       ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
-       ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
-       ioa_cfg->toggle_bit = 1;
+       for_each_hrrq(hrrq, ioa_cfg) {
+               spin_lock(&hrrq->_lock);
+               memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+               /* Initialize Host RRQ pointers */
+               hrrq->hrrq_start = hrrq->host_rrq;
+               hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+               hrrq->hrrq_curr = hrrq->hrrq_start;
+               hrrq->toggle_bit = 1;
+               spin_unlock(&hrrq->_lock);
+       }
+       wmb();
+
+       ioa_cfg->identify_hrrq_index = 0;
+       if (ioa_cfg->hrrq_num == 1)
+               atomic_set(&ioa_cfg->hrrq_index, 0);
+       else
+               atomic_set(&ioa_cfg->hrrq_index, 1);
 
        /* Zero out config table */
        memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7812,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 
        return IPR_RC_JOB_RETURN;
 }
@@ -7466,12 +7833,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        volatile u32 int_reg;
        volatile u64 maskval;
+       int i;
 
        ENTER;
        ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
        ipr_init_ioa_mem(ioa_cfg);
 
-       ioa_cfg->allow_interrupts = 1;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].allow_interrupts = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
        if (ioa_cfg->sis64) {
                /* Set the adapter to the correct endian mode. */
                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7884,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
        ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
        ipr_cmd->done = ipr_reset_ioa_job;
        add_timer(&ipr_cmd->timer);
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
 
        LEAVE;
        return IPR_RC_JOB_RETURN;
@@ -8030,7 +8403,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
        int rc = IPR_RC_JOB_CONTINUE;
 
        ENTER;
-       if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+       if (shutdown_type != IPR_SHUTDOWN_NONE &&
+                       !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
                ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
                ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8452,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
                         * We are doing nested adapter resets and this is
                         * not the current reset job.
                         */
-                       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+                       list_add_tail(&ipr_cmd->queue,
+                                       &ipr_cmd->hrrq->hrrq_free_q);
                        return;
                }
 
@@ -8113,10 +8488,17 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                                    enum ipr_shutdown_type shutdown_type)
 {
        struct ipr_cmnd *ipr_cmd;
+       int i;
 
        ioa_cfg->in_reset_reload = 1;
-       ioa_cfg->allow_cmds = 0;
-       scsi_block_requests(ioa_cfg->host);
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].allow_cmds = 0;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+               scsi_block_requests(ioa_cfg->host);
 
        ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
        ioa_cfg->reset_cmd = ipr_cmd;
@@ -8141,7 +8523,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                                   enum ipr_shutdown_type shutdown_type)
 {
-       if (ioa_cfg->ioa_is_dead)
+       int i;
+
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
                return;
 
        if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8540,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                        "IOA taken offline - error recovery failed\n");
 
                ioa_cfg->reset_retries = 0;
-               ioa_cfg->ioa_is_dead = 1;
+               for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+                       spin_lock(&ioa_cfg->hrrq[i]._lock);
+                       ioa_cfg->hrrq[i].ioa_is_dead = 1;
+                       spin_unlock(&ioa_cfg->hrrq[i]._lock);
+               }
+               wmb();
 
                if (ioa_cfg->in_ioa_bringdown) {
                        ioa_cfg->reset_cmd = NULL;
@@ -8164,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
                        ipr_fail_all_ops(ioa_cfg);
                        wake_up_all(&ioa_cfg->reset_wait_q);
 
-                       spin_unlock_irq(ioa_cfg->host->host_lock);
-                       scsi_unblock_requests(ioa_cfg->host);
-                       spin_lock_irq(ioa_cfg->host->host_lock);
+                       if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+                               spin_unlock_irq(ioa_cfg->host->host_lock);
+                               scsi_unblock_requests(ioa_cfg->host);
+                               spin_lock_irq(ioa_cfg->host->host_lock);
+                       }
                        return;
                } else {
                        ioa_cfg->in_ioa_bringdown = 1;
@@ -8188,9 +8579,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
  */
 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
 {
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int i;
+
        /* Disallow new interrupts, avoid loop */
-       ipr_cmd->ioa_cfg->allow_interrupts = 0;
-       list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].allow_interrupts = 0;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
        ipr_cmd->done = ipr_reset_ioa_job;
        return IPR_RC_JOB_RETURN;
 }
@@ -8247,13 +8646,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
 {
        unsigned long flags = 0;
        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+       int i;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
                ioa_cfg->sdt_state = ABORT_DUMP;
        ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
        ioa_cfg->in_ioa_bringdown = 1;
-       ioa_cfg->allow_cmds = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].allow_cmds = 0;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
@@ -8310,12 +8715,11 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
        } else
                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
                                        IPR_SHUTDOWN_NONE);
-
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
-       if (ioa_cfg->ioa_is_dead) {
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                rc = -EIO;
        } else if (ipr_invalid_adapter(ioa_cfg)) {
                if (!ipr_testmode)
@@ -8376,8 +8780,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
        pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
                            ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
        ipr_free_cmd_blks(ioa_cfg);
-       pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
-                           ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+
+       for (i = 0; i < ioa_cfg->hrrq_num; i++)
+               pci_free_consistent(ioa_cfg->pdev,
+                                       sizeof(u32) * ioa_cfg->hrrq[i].size,
+                                       ioa_cfg->hrrq[i].host_rrq,
+                                       ioa_cfg->hrrq[i].host_rrq_dma);
+
        pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
                            ioa_cfg->u.cfg_table,
                            ioa_cfg->cfg_table_dma);
@@ -8408,8 +8817,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
        struct pci_dev *pdev = ioa_cfg->pdev;
 
        ENTER;
-       free_irq(pdev->irq, ioa_cfg);
-       pci_disable_msi(pdev);
+       if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+           ioa_cfg->intr_flag == IPR_USE_MSIX) {
+               int i;
+               for (i = 0; i < ioa_cfg->nvectors; i++)
+                       free_irq(ioa_cfg->vectors_info[i].vec,
+                               &ioa_cfg->hrrq[i]);
+       } else
+               free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+       if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+               pci_disable_msi(pdev);
+               ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+       } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+               pci_disable_msix(pdev);
+               ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+       }
+
        iounmap(ioa_cfg->hdw_dma_regs);
        pci_release_regions(pdev);
        ipr_free_mem(ioa_cfg);
@@ -8430,7 +8854,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
        struct ipr_cmnd *ipr_cmd;
        struct ipr_ioarcb *ioarcb;
        dma_addr_t dma_addr;
-       int i;
+       int i, entries_each_hrrq, hrrq_id = 0;
 
        ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
                                                sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8870,41 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
                return -ENOMEM;
        }
 
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               if (ioa_cfg->hrrq_num > 1) {
+                       if (i == 0) {
+                               entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+                               ioa_cfg->hrrq[i].min_cmd_id = 0;
+                                       ioa_cfg->hrrq[i].max_cmd_id =
+                                               (entries_each_hrrq - 1);
+                       } else {
+                               entries_each_hrrq =
+                                       IPR_NUM_BASE_CMD_BLKS/
+                                       (ioa_cfg->hrrq_num - 1);
+                               ioa_cfg->hrrq[i].min_cmd_id =
+                                       IPR_NUM_INTERNAL_CMD_BLKS +
+                                       (i - 1) * entries_each_hrrq;
+                               ioa_cfg->hrrq[i].max_cmd_id =
+                                       (IPR_NUM_INTERNAL_CMD_BLKS +
+                                       i * entries_each_hrrq - 1);
+                       }
+               } else {
+                       entries_each_hrrq = IPR_NUM_CMD_BLKS;
+                       ioa_cfg->hrrq[i].min_cmd_id = 0;
+                       ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+               }
+               ioa_cfg->hrrq[i].size = entries_each_hrrq;
+       }
+
+       BUG_ON(ioa_cfg->hrrq_num == 0);
+
+       i = IPR_NUM_CMD_BLKS -
+               ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+       if (i > 0) {
+               ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+               ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+       }
+
        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
                ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
@@ -8484,7 +8943,11 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
                ipr_cmd->sense_buffer_dma = dma_addr +
                        offsetof(struct ipr_cmnd, sense_buffer);
 
-               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+               ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+               list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+               if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+                       hrrq_id++;
        }
 
        return 0;
@@ -8516,6 +8979,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
                                             BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
                ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
                                            BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+               if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+                       || !ioa_cfg->vset_ids)
+                       goto out_free_res_entries;
        }
 
        for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8997,34 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
        if (!ioa_cfg->vpd_cbs)
                goto out_free_res_entries;
 
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+               INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+               spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+               if (i == 0)
+                       ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+               else
+                       ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+       }
+
        if (ipr_alloc_cmd_blks(ioa_cfg))
                goto out_free_vpd_cbs;
 
-       ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
-                                                sizeof(u32) * IPR_NUM_CMD_BLKS,
-                                                &ioa_cfg->host_rrq_dma);
-
-       if (!ioa_cfg->host_rrq)
-               goto out_ipr_free_cmd_blocks;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+                                       sizeof(u32) * ioa_cfg->hrrq[i].size,
+                                       &ioa_cfg->hrrq[i].host_rrq_dma);
+
+               if (!ioa_cfg->hrrq[i].host_rrq)  {
+                       while (--i > 0)
+                               pci_free_consistent(pdev,
+                                       sizeof(u32) * ioa_cfg->hrrq[i].size,
+                                       ioa_cfg->hrrq[i].host_rrq,
+                                       ioa_cfg->hrrq[i].host_rrq_dma);
+                       goto out_ipr_free_cmd_blocks;
+               }
+               ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+       }
 
        ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
                                                    ioa_cfg->cfg_table_size,
@@ -8582,8 +9068,12 @@ out_free_hostrcb_dma:
                            ioa_cfg->u.cfg_table,
                            ioa_cfg->cfg_table_dma);
 out_free_host_rrq:
-       pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
-                           ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               pci_free_consistent(pdev,
+                               sizeof(u32) * ioa_cfg->hrrq[i].size,
+                               ioa_cfg->hrrq[i].host_rrq,
+                               ioa_cfg->hrrq[i].host_rrq_dma);
+       }
 out_ipr_free_cmd_blocks:
        ipr_free_cmd_blks(ioa_cfg);
 out_free_vpd_cbs:
@@ -8591,6 +9081,9 @@ out_free_vpd_cbs:
                            ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
 out_free_res_entries:
        kfree(ioa_cfg->res_entries);
+       kfree(ioa_cfg->target_ids);
+       kfree(ioa_cfg->array_ids);
+       kfree(ioa_cfg->vset_ids);
        goto out;
 }
 
@@ -8638,15 +9131,11 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        ioa_cfg->doorbell = IPR_DOORBELL;
        sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
        sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
-       sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
-       sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
        sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
        sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
        sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
        sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
 
-       INIT_LIST_HEAD(&ioa_cfg->free_q);
-       INIT_LIST_HEAD(&ioa_cfg->pending_q);
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8724,6 +9213,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
        return NULL;
 }
 
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+       int i, err, vectors;
+
+       for (i = 0; i < ARRAY_SIZE(entries); ++i)
+               entries[i].entry = i;
+
+       vectors = ipr_number_of_msix;
+
+       while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
+                       vectors = err;
+
+       if (err < 0) {
+               pci_disable_msix(ioa_cfg->pdev);
+               return err;
+       }
+
+       if (!err) {
+               for (i = 0; i < vectors; i++)
+                       ioa_cfg->vectors_info[i].vec = entries[i].vector;
+               ioa_cfg->nvectors = vectors;
+       }
+
+       return err;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i, err, vectors;
+
+       vectors = ipr_number_of_msix;
+
+       while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
+                       vectors = err;
+
+       if (err < 0) {
+               pci_disable_msi(ioa_cfg->pdev);
+               return err;
+       }
+
+       if (!err) {
+               for (i = 0; i < vectors; i++)
+                       ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+               ioa_cfg->nvectors = vectors;
+       }
+
+       return err;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+       for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+               snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+                        "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+               ioa_cfg->vectors_info[vec_idx].
+                       desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+       }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i, rc;
+
+       for (i = 1; i < ioa_cfg->nvectors; i++) {
+               rc = request_irq(ioa_cfg->vectors_info[i].vec,
+                       ipr_isr_mhrrq,
+                       0,
+                       ioa_cfg->vectors_info[i].desc,
+                       &ioa_cfg->hrrq[i]);
+               if (rc) {
+                       while (--i >= 0)
+                               free_irq(ioa_cfg->vectors_info[i].vec,
+                                       &ioa_cfg->hrrq[i]);
+                       return rc;
+               }
+       }
+       return 0;
+}
+
 /**
  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
  * @pdev:              PCI device struct
@@ -8740,6 +9311,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
        unsigned long lock_flags = 0;
        irqreturn_t rc = IRQ_HANDLED;
 
+       dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
        ioa_cfg->msi_received = 1;
@@ -8787,9 +9359,9 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
        writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
        int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
        wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
 
-       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
        if (!ioa_cfg->msi_received) {
                /* MSI test failed */
                dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
@@ -8806,8 +9378,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
        return rc;
 }
 
-/**
- * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
  * @pdev:              PCI device struct
  * @dev_id:            PCI device id struct
  *
@@ -8823,6 +9394,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
        void __iomem *ipr_regs;
        int rc = PCIBIOS_SUCCESSFUL;
        volatile u32 mask, uproc, interrupts;
+       unsigned long lock_flags;
 
        ENTER;
 
@@ -8918,17 +9490,56 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
                goto cleanup_nomem;
        }
 
-       /* Enable MSI style interrupts if they are supported. */
-       if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+       if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+               dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+                       IPR_MAX_MSIX_VECTORS);
+               ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+       }
+
+       if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+                       ipr_enable_msix(ioa_cfg) == 0)
+               ioa_cfg->intr_flag = IPR_USE_MSIX;
+       else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+                       ipr_enable_msi(ioa_cfg) == 0)
+               ioa_cfg->intr_flag = IPR_USE_MSI;
+       else {
+               ioa_cfg->intr_flag = IPR_USE_LSI;
+               ioa_cfg->nvectors = 1;
+               dev_info(&pdev->dev, "Cannot enable MSI.\n");
+       }
+
+       if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+           ioa_cfg->intr_flag == IPR_USE_MSIX) {
                rc = ipr_test_msi(ioa_cfg, pdev);
-               if (rc == -EOPNOTSUPP)
-                       pci_disable_msi(pdev);
+               if (rc == -EOPNOTSUPP) {
+                       if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+                               ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+                               pci_disable_msi(pdev);
+                        } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+                               ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+                               pci_disable_msix(pdev);
+                       }
+
+                       ioa_cfg->intr_flag = IPR_USE_LSI;
+                       ioa_cfg->nvectors = 1;
+               }
                else if (rc)
                        goto out_msi_disable;
-               else
-                       dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
-       } else if (ipr_debug)
-               dev_info(&pdev->dev, "Cannot enable MSI.\n");
+               else {
+                       if (ioa_cfg->intr_flag == IPR_USE_MSI)
+                               dev_info(&pdev->dev,
+                                       "Request for %d MSIs succeeded with starting IRQ: %d\n",
+                                       ioa_cfg->nvectors, pdev->irq);
+                       else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+                               dev_info(&pdev->dev,
+                                       "Request for %d MSIXs succeeded.",
+                                       ioa_cfg->nvectors);
+               }
+       }
+
+       ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+                               (unsigned int)num_online_cpus(),
+                               (unsigned int)IPR_MAX_HRRQ_NUM);
 
        /* Save away PCI config space for use following IOA reset */
        rc = pci_save_state(pdev);
@@ -8975,11 +9586,24 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
        if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
                ioa_cfg->ioa_unit_checked = 1;
 
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
        ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
-       rc = request_irq(pdev->irq, ipr_isr,
-                        ioa_cfg->msi_received ? 0 : IRQF_SHARED,
-                        IPR_NAME, ioa_cfg);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
+       if (ioa_cfg->intr_flag == IPR_USE_MSI
+                       || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+               name_msi_vectors(ioa_cfg);
+               rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+                       0,
+                       ioa_cfg->vectors_info[0].desc,
+                       &ioa_cfg->hrrq[0]);
+               if (!rc)
+                       rc = ipr_request_other_msi_irqs(ioa_cfg);
+       } else {
+               rc = request_irq(pdev->irq, ipr_isr,
+                        IRQF_SHARED,
+                        IPR_NAME, &ioa_cfg->hrrq[0]);
+       }
        if (rc) {
                dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
                        pdev->irq, rc);
@@ -9004,7 +9628,10 @@ out:
 cleanup_nolog:
        ipr_free_mem(ioa_cfg);
 out_msi_disable:
-       pci_disable_msi(pdev);
+       if (ioa_cfg->intr_flag == IPR_USE_MSI)
+               pci_disable_msi(pdev);
+       else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+               pci_disable_msix(pdev);
 cleanup_nomem:
        iounmap(ipr_regs);
 out_release_regions:
@@ -9074,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 {
        unsigned long host_lock_flags = 0;
        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+       int i;
        ENTER;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9083,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
        }
 
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].removing_ioa = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
        ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9138,7 +9772,7 @@ static void ipr_remove(struct pci_dev *pdev)
 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
 {
        struct ipr_ioa_cfg *ioa_cfg;
-       int rc;
+       int rc, i;
 
        rc = ipr_probe_ioa(pdev, dev_id);
 
@@ -9185,6 +9819,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
        ioa_cfg->allow_ml_add_del = 1;
        ioa_cfg->host->max_channel = IPR_VSET_BUS;
+       ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+       if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+                       ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+               for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+                       blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+                                       ioa_cfg->iopoll_weight, ipr_iopoll);
+                       blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+               }
+       }
+
        schedule_work(&ioa_cfg->work_q);
        return 0;
 }
@@ -9203,8 +9848,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
 {
        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
        unsigned long lock_flags = 0;
+       int i;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+                       ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+               ioa_cfg->iopoll_weight = 0;
+               for (i = 1; i < ioa_cfg->hrrq_num; i++)
+                       blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+       }
+
        while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9276,6 +9929,8 @@ static struct pci_device_id ipr_pci_table[] = {
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
@@ -9290,6 +9945,14 @@ static struct pci_device_id ipr_pci_table[] = {
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
        { }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9316,9 +9979,7 @@ static struct pci_driver ipr_driver = {
  **/
 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
 {
-       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 }
 
 /**
@@ -9340,7 +10001,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
 
        list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-               if (!ioa_cfg->allow_cmds) {
+               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
                        continue;
                }
index c8a137f..21a6ff1 100644 (file)
 #include <linux/libata.h>
 #include <linux/list.h>
 #include <linux/kref.h>
+#include <linux/blk-iopoll.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.5.4"
-#define IPR_DRIVER_DATE "(July 11, 2012)"
+#define IPR_DRIVER_VERSION "2.6.0"
+#define IPR_DRIVER_DATE "(November 16, 2012)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -82,6 +83,7 @@
 
 #define IPR_SUBS_DEV_ID_57B4    0x033B
 #define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C0    0x0352
 #define IPR_SUBS_DEV_ID_57C3    0x0353
 #define IPR_SUBS_DEV_ID_57C4    0x0354
 #define IPR_SUBS_DEV_ID_57C6    0x0357
 #define IPR_SUBS_DEV_ID_574D    0x0356
 #define IPR_SUBS_DEV_ID_57C8    0x035D
 
+#define IPR_SUBS_DEV_ID_57D5    0x03FB
+#define IPR_SUBS_DEV_ID_57D6    0x03FC
+#define IPR_SUBS_DEV_ID_57D7    0x03FF
+#define IPR_SUBS_DEV_ID_57D8    0x03FE
 #define IPR_NAME                               "ipr"
 
 /*
@@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
  * Misc literals
  */
 #define IPR_NUM_IOADL_ENTRIES                  IPR_MAX_SGLIST
+#define IPR_MAX_MSIX_VECTORS           0x5
+#define IPR_MAX_HRRQ_NUM               0x10
+#define IPR_INIT_HRRQ                  0x0
 
 /*
  * Adapter interface types
@@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
        __be64 dev_id;
        __be64 lun;
        __be64 lun_wwn[2];
-#define IPR_MAX_RES_PATH_LENGTH                24
+#define IPR_MAX_RES_PATH_LENGTH                48
        __be64 res_path;
        struct ipr_std_inq_data std_inq_data;
        u8 reserved2[4];
@@ -459,9 +468,40 @@ struct ipr_supported_device {
        u8 reserved2[16];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hrr_queue {
+       struct ipr_ioa_cfg *ioa_cfg;
+       __be32 *host_rrq;
+       dma_addr_t host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK  0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET          0x00000002
+#define IPR_HRRQ_TOGGLE_BIT            0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+#define IPR_ID_HRRQ_SELE_ENABLE                0x02
+       volatile __be32 *hrrq_start;
+       volatile __be32 *hrrq_end;
+       volatile __be32 *hrrq_curr;
+
+       struct list_head hrrq_free_q;
+       struct list_head hrrq_pending_q;
+       spinlock_t _lock;
+       spinlock_t *lock;
+
+       volatile u32 toggle_bit;
+       u32 size;
+       u32 min_cmd_id;
+       u32 max_cmd_id;
+       u8 allow_interrupts:1;
+       u8 ioa_is_dead:1;
+       u8 allow_cmds:1;
+       u8 removing_ioa:1;
+
+       struct blk_iopoll iopoll;
+};
+
 /* Command packet structure */
 struct ipr_cmd_pkt {
-       __be16 reserved;                /* Reserved by IOA */
+       u8 reserved;            /* Reserved by IOA */
+       u8 hrrq_id;
        u8 request_type;
 #define IPR_RQTYPE_SCSICDB             0x00
 #define IPR_RQTYPE_IOACMD              0x01
@@ -1022,6 +1062,10 @@ struct ipr_hostrcb64_fabric_desc {
        struct ipr_hostrcb64_config_element elem[1];
 }__attribute__((packed, aligned (8)));
 
+#define for_each_hrrq(hrrq, ioa_cfg) \
+               for (hrrq = (ioa_cfg)->hrrq; \
+                       hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
+
 #define for_each_fabric_cfg(fabric, cfg) \
                for (cfg = (fabric)->elem; \
                        cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -1308,6 +1352,7 @@ struct ipr_chip_cfg_t {
        u16 max_cmds;
        u8 cache_line_size;
        u8 clear_isr;
+       u32 iopoll_weight;
        struct ipr_interrupt_offsets regs;
 };
 
@@ -1317,6 +1362,7 @@ struct ipr_chip_t {
        u16 intr_type;
 #define IPR_USE_LSI                    0x00
 #define IPR_USE_MSI                    0x01
+#define IPR_USE_MSIX                   0x02
        u16 sis_type;
 #define IPR_SIS32                      0x00
 #define IPR_SIS64                      0x01
@@ -1375,13 +1421,10 @@ struct ipr_ioa_cfg {
 
        struct list_head queue;
 
-       u8 allow_interrupts:1;
        u8 in_reset_reload:1;
        u8 in_ioa_bringdown:1;
        u8 ioa_unit_checked:1;
-       u8 ioa_is_dead:1;
        u8 dump_taken:1;
-       u8 allow_cmds:1;
        u8 allow_ml_add_del:1;
        u8 needs_hard_reset:1;
        u8 dual_raid:1;
@@ -1413,21 +1456,7 @@ struct ipr_ioa_cfg {
        char trace_start[8];
 #define IPR_TRACE_START_LABEL                  "trace"
        struct ipr_trace_entry *trace;
-       u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
-
-       /*
-        * Queue for free command blocks
-        */
-       char ipr_free_label[8];
-#define IPR_FREEQ_LABEL                        "free-q"
-       struct list_head free_q;
-
-       /*
-        * Queue for command blocks outstanding to the adapter
-        */
-       char ipr_pending_label[8];
-#define IPR_PENDQ_LABEL                        "pend-q"
-       struct list_head pending_q;
+       atomic_t trace_index;
 
        char cfg_table_start[8];
 #define IPR_CFG_TBL_START              "cfg"
@@ -1452,16 +1481,10 @@ struct ipr_ioa_cfg {
        struct list_head hostrcb_free_q;
        struct list_head hostrcb_pending_q;
 
-       __be32 *host_rrq;
-       dma_addr_t host_rrq_dma;
-#define IPR_HRRQ_REQ_RESP_HANDLE_MASK  0xfffffffc
-#define IPR_HRRQ_RESP_BIT_SET                  0x00000002
-#define IPR_HRRQ_TOGGLE_BIT                            0x00000001
-#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
-       volatile __be32 *hrrq_start;
-       volatile __be32 *hrrq_end;
-       volatile __be32 *hrrq_curr;
-       volatile u32 toggle_bit;
+       struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+       u32 hrrq_num;
+       atomic_t  hrrq_index;
+       u16 identify_hrrq_index;
 
        struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
 
@@ -1507,6 +1530,17 @@ struct ipr_ioa_cfg {
        u32 max_cmds;
        struct ipr_cmnd **ipr_cmnd_list;
        dma_addr_t *ipr_cmnd_list_dma;
+
+       u16 intr_flag;
+       unsigned int nvectors;
+
+       struct {
+               unsigned short vec;
+               char desc[22];
+       } vectors_info[IPR_MAX_MSIX_VECTORS];
+
+       u32 iopoll_weight;
+
 }; /* struct ipr_ioa_cfg */
 
 struct ipr_cmnd {
@@ -1544,6 +1578,7 @@ struct ipr_cmnd {
                struct scsi_device *sdev;
        } u;
 
+       struct ipr_hrr_queue *hrrq;
        struct ipr_ioa_cfg *ioa_cfg;
 };
 
@@ -1717,7 +1752,8 @@ struct ipr_ucode_image_header {
        if (ipr_is_device(hostrcb)) {                                   \
                if ((hostrcb)->ioa_cfg->sis64) {                        \
                        printk(KERN_ERR IPR_NAME ": %s: " fmt,          \
-                               ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
+                               ipr_format_res_path(hostrcb->ioa_cfg,   \
+                                       hostrcb->hcam.u.error64.fd_res_path, \
                                        hostrcb->rp_buffer,             \
                                        sizeof(hostrcb->rp_buffer)),    \
                                __VA_ARGS__);                           \
index fcb9d0b..09c81b2 100644 (file)
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data)
 
        fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
 
-       if (fsp->state & FC_SRB_RCV_STATUS)
-               fc_fcp_complete_locked(fsp);
-       else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+       if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
                fc_fcp_rec(fsp);
+       else if (fsp->state & FC_SRB_RCV_STATUS)
+               fc_fcp_complete_locked(fsp);
        else
                fc_fcp_recovery(fsp, FC_TIMED_OUT);
        fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
index c2830cc..b74189d 100644 (file)
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging;
 
 #define FC_LIBFC_DBG(fmt, args...)                                     \
        FC_CHECK_LOGGING(FC_LIBFC_LOGGING,                              \
-                        printk(KERN_INFO "libfc: " fmt, ##args))
+                        pr_info("libfc: " fmt, ##args))
 
 #define FC_LPORT_DBG(lport, fmt, args...)                              \
        FC_CHECK_LOGGING(FC_LPORT_LOGGING,                              \
-                        printk(KERN_INFO "host%u: lport %6.6x: " fmt,  \
-                               (lport)->host->host_no,                 \
-                               (lport)->port_id, ##args))
+                        pr_info("host%u: lport %6.6x: " fmt,           \
+                                (lport)->host->host_no,                \
+                                (lport)->port_id, ##args))
 
-#define FC_DISC_DBG(disc, fmt, args...)                                \
-       FC_CHECK_LOGGING(FC_DISC_LOGGING,                       \
-                        printk(KERN_INFO "host%u: disc: " fmt, \
-                               fc_disc_lport(disc)->host->host_no,     \
-                               ##args))
+#define FC_DISC_DBG(disc, fmt, args...)                                        \
+       FC_CHECK_LOGGING(FC_DISC_LOGGING,                               \
+                        pr_info("host%u: disc: " fmt,                  \
+                                fc_disc_lport(disc)->host->host_no,    \
+                                ##args))
 
 #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...)                  \
        FC_CHECK_LOGGING(FC_RPORT_LOGGING,                              \
-                        printk(KERN_INFO "host%u: rport %6.6x: " fmt,  \
-                               (lport)->host->host_no,                 \
-                               (port_id), ##args))
+                        pr_info("host%u: rport %6.6x: " fmt,           \
+                                (lport)->host->host_no,                \
+                                (port_id), ##args))
 
 #define FC_RPORT_DBG(rdata, fmt, args...)                              \
        FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging;
                if ((pkt)->seq_ptr) {                                   \
                        struct fc_exch *_ep = NULL;                     \
                        _ep = fc_seq_exch((pkt)->seq_ptr);              \
-                       printk(KERN_INFO "host%u: fcp: %6.6x: "         \
+                       pr_info("host%u: fcp: %6.6x: "                  \
                                "xid %04x-%04x: " fmt,                  \
                                (pkt)->lp->host->host_no,               \
                                (pkt)->rport->port_id,                  \
                                (_ep)->oxid, (_ep)->rxid, ##args);      \
                } else {                                                \
-                       printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,    \
+                       pr_info("host%u: fcp: %6.6x: " fmt,             \
                                (pkt)->lp->host->host_no,               \
                                (pkt)->rport->port_id, ##args);         \
                }                                                       \
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging;
 
 #define FC_EXCH_DBG(exch, fmt, args...)                                        \
        FC_CHECK_LOGGING(FC_EXCH_LOGGING,                               \
-                        printk(KERN_INFO "host%u: xid %4x: " fmt,      \
-                               (exch)->lp->host->host_no,              \
-                               exch->xid, ##args))
+                        pr_info("host%u: xid %4x: " fmt,               \
+                                (exch)->lp->host->host_no,             \
+                                exch->xid, ##args))
 
 #define FC_SCSI_DBG(lport, fmt, args...)                               \
        FC_CHECK_LOGGING(FC_SCSI_LOGGING,                               \
-                        printk(KERN_INFO "host%u: scsi: " fmt,         \
-                               (lport)->host->host_no, ##args))
+                        pr_info("host%u: scsi: " fmt,                  \
+                                (lport)->host->host_no, ##args))
 
 /*
  * FC-4 Providers.
index 83aa1ef..d518d17 100644 (file)
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
                                 struct fc_frame *fp)
 {
-       unsigned long delay = FC_DEF_E_D_TOV;
+       unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
 
        /* make sure this isn't an FC_EX_CLOSED error, never retry those */
        if (PTR_ERR(fp) == -FC_EX_CLOSED)
index df4c13a..7706c99 100644 (file)
@@ -466,11 +466,13 @@ enum intr_type_t {
        MSIX,
 };
 
+#define LPFC_CT_CTX_MAX                64
 struct unsol_rcv_ct_ctx {
        uint32_t ctxt_id;
        uint32_t SID;
-       uint32_t flags;
-#define UNSOL_VALID    0x00000001
+       uint32_t valid;
+#define UNSOL_INVALID          0
+#define UNSOL_VALID            1
        uint16_t oxid;
        uint16_t rxid;
 };
@@ -750,6 +752,15 @@ struct lpfc_hba {
        void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
                                            PCI BAR2 */
 
+       void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+                                           PCI BAR0 with dual-ULP support */
+       void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+                                           PCI BAR2 with dual-ULP support */
+       void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+                                           PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0 0
+#define PCI_64BIT_BAR2 2
+#define PCI_64BIT_BAR4 4
        void __iomem *MBslimaddr;       /* virtual address for mbox cmds */
        void __iomem *HAregaddr;        /* virtual address for host attn reg */
        void __iomem *CAregaddr;        /* virtual address for chip attn reg */
@@ -938,7 +949,7 @@ struct lpfc_hba {
 
        spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
        struct list_head ct_ev_waiters;
-       struct unsol_rcv_ct_ctx ct_ctx[64];
+       struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
        uint32_t ctx_idx;
 
        uint8_t menlo_flag;     /* menlo generic flags */
index f7368eb..32d5683 100644 (file)
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                spin_lock_irqsave(&phba->ct_ev_lock, flags);
                if (phba->sli_rev == LPFC_SLI_REV4) {
                        evt_dat->immed_dat = phba->ctx_idx;
-                       phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+                       phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
                        /* Provide warning for over-run of the ct_ctx array */
-                       if (phba->ct_ctx[evt_dat->immed_dat].flags &
+                       if (phba->ct_ctx[evt_dat->immed_dat].valid ==
                            UNSOL_VALID)
                                lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
                                                "2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                                piocbq->iocb.unsli3.rcvsli3.ox_id;
                        phba->ct_ctx[evt_dat->immed_dat].SID =
                                piocbq->iocb.un.rcvels.remoteID;
-                       phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
+                       phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
                } else
                        evt_dat->immed_dat = piocbq->iocb.ulpContext;
 
@@ -1012,6 +1012,47 @@ error_ct_unsol_exit:
        return 1;
 }
 
+/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+       struct fc_frame_header fc_hdr;
+       struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+       int ctx_idx, handled = 0;
+       uint16_t oxid, rxid;
+       uint32_t sid;
+
+       memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+       sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+       oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+       rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+       for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+               if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+                       continue;
+               if (phba->ct_ctx[ctx_idx].rxid != rxid)
+                       continue;
+               if (phba->ct_ctx[ctx_idx].oxid != oxid)
+                       continue;
+               if (phba->ct_ctx[ctx_idx].SID != sid)
+                       continue;
+               phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+               handled = 1;
+       }
+       return handled;
+}
+
 /**
  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
  * @job: SET_EVENT fc_bsg_job
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        icmd->ulpClass = CLASS3;
        if (phba->sli_rev == LPFC_SLI_REV4) {
                /* Do not issue unsol response if oxid not marked as valid */
-               if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+               if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
                        rc = IOCB_ERROR;
                        goto issue_ct_rsp_exit;
                }
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
                                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
 
                /* The exchange is done, mark the entry as invalid */
-               phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+               phba->ct_ctx[tag].valid = UNSOL_INVALID;
        } else
                icmd->ulpContext = (ushort) tag;
 
index 69d66e3..76ca65d 100644 (file)
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
 
 void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
                         struct lpfc_iocbq *);
-void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
-                                   struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
 void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
 int lpfc_bsg_timeout(struct fc_bsg_job *);
 int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
                             struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
 void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
        struct lpfc_iocbq *);
 struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
index 65f9fb6..7bff3a1 100644 (file)
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 }
 
 /**
- * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
  * @phba: Pointer to HBA context object.
- * @pring: Pointer to the driver internal I/O ring.
- * @piocbq: Pointer to the IOCBQ.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
  *
- * This function serves as the default handler for the sli4 unsolicited
- * abort event. It shall be invoked when there is no application interface
- * registered unsolicited abort handler. This handler does nothing but
- * just simply releases the dma buffer used by the unsol abort event.
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
  **/
-void
-lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
-                              struct lpfc_sli_ring *pring,
-                              struct lpfc_iocbq *piocbq)
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
 {
-       IOCB_t *icmd = &piocbq->iocb;
-       struct lpfc_dmabuf *bdeBuf;
-       uint32_t size;
+       int handled;
 
-       /* Forward abort event to any process registered to receive ct event */
-       if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
-               return;
+       /* CT upper level goes through BSG */
+       handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
 
-       /* If there is no BDE associated with IOCB, there is nothing to do */
-       if (icmd->ulpBdeCount == 0)
-               return;
-       bdeBuf = piocbq->context2;
-       piocbq->context2 = NULL;
-       size  = icmd->un.cont64[0].tus.f.bdeSize;
-       lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
-       lpfc_in_buf_free(phba, bdeBuf);
+       return handled;
 }
 
 static void
index b9440de..08d156a 100644 (file)
@@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
                case IOERR_SEQUENCE_TIMEOUT:
                case IOERR_INVALID_RPI:
+                       if (cmd == ELS_CMD_PLOGI &&
+                           did == NameServer_DID) {
+                               /* Continue forever if plogi to */
+                               /* the nameserver fails */
+                               maxretry = 0;
+                               delay = 100;
+                       }
                        retry = 1;
                        break;
                }
@@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        struct lpfc_nodelist *ndlp;
        struct ls_rjt stat;
        uint32_t *payload;
-       uint32_t cmd, did, newnode, rjt_err = 0;
+       uint32_t cmd, did, newnode;
+       uint8_t rjt_exp, rjt_err = 0;
        IOCB_t *icmd = &elsiocb->iocb;
 
        if (!vport || !(elsiocb->context2))
@@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                /* If Nport discovery is delayed, reject PLOGIs */
                if (vport->fc_flag & FC_DISC_DELAYED) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                if (vport->port_state < LPFC_DISC_AUTH) {
                        if (!(phba->pport->fc_flag & FC_PT2PT) ||
                                (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
                                rjt_err = LSRJT_UNABLE_TPC;
+                               rjt_exp = LSEXP_NOTHING_MORE;
                                break;
                        }
                        /* We get here, and drop thru, if we are PT2PT with
@@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                lpfc_send_els_event(vport, ndlp, payload);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                lpfc_send_els_event(vport, ndlp, payload);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                phba->fc_stat.elsRcvADISC++;
                if (vport->port_state < LPFC_DISC_AUTH) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                phba->fc_stat.elsRcvPDISC++;
                if (vport->port_state < LPFC_DISC_AUTH) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                phba->fc_stat.elsRcvPRLI++;
                if (vport->port_state < LPFC_DISC_AUTH) {
                        rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_REC:
+                       /* receive this due to exchange closed */
+                       rjt_err = LSRJT_UNABLE_TPC;
+                       rjt_exp = LSEXP_INVALID_OX_RX;
+               break;
        default:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                /* Unsupported ELS command, reject */
                rjt_err = LSRJT_CMD_UNSUPPORTED;
+               rjt_exp = LSEXP_NOTHING_MORE;
 
                /* Unknown ELS command <elsCmd> received from NPORT <did> */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        if (rjt_err) {
                memset(&stat, 0, sizeof(stat));
                stat.un.b.lsRjtRsnCode = rjt_err;
-               stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+               stat.un.b.lsRjtRsnCodeExp = rjt_exp;
                lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
                        NULL);
        }
index 7398ca8..e8c4760 100644 (file)
@@ -538,6 +538,7 @@ struct fc_vft_header {
 #define ELS_CMD_ECHO      0x10000000
 #define ELS_CMD_TEST      0x11000000
 #define ELS_CMD_RRQ       0x12000000
+#define ELS_CMD_REC       0x13000000
 #define ELS_CMD_PRLI      0x20100014
 #define ELS_CMD_PRLO      0x21100014
 #define ELS_CMD_PRLO_ACC  0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
 #define ELS_CMD_ECHO      0x10
 #define ELS_CMD_TEST      0x11
 #define ELS_CMD_RRQ       0x12
+#define ELS_CMD_REC       0x13
 #define ELS_CMD_PRLI      0x14001020
 #define ELS_CMD_PRLO      0x14001021
 #define ELS_CMD_PRLO_ACC  0x14001002
index a47cfbd..6e93b88 100644 (file)
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
 
 #define LPFC_SLI4_MB_WORD_COUNT                64
 #define LPFC_MAX_MQ_PAGE               8
+#define LPFC_MAX_WQ_PAGE_V0            4
 #define LPFC_MAX_WQ_PAGE               8
 #define LPFC_MAX_CQ_PAGE               4
 #define LPFC_MAX_EQ_PAGE               8
@@ -703,24 +704,41 @@ struct lpfc_register {
  * BAR0.  The offsets are the same so the driver must account for
  * any base address difference.
  */
-#define LPFC_RQ_DOORBELL               0x00A0
-#define lpfc_rq_doorbell_num_posted_SHIFT      16
-#define lpfc_rq_doorbell_num_posted_MASK       0x3FFF
-#define lpfc_rq_doorbell_num_posted_WORD       word0
-#define lpfc_rq_doorbell_id_SHIFT              0
-#define lpfc_rq_doorbell_id_MASK               0xFFFF
-#define lpfc_rq_doorbell_id_WORD               word0
-
-#define LPFC_WQ_DOORBELL               0x0040
-#define lpfc_wq_doorbell_num_posted_SHIFT      24
-#define lpfc_wq_doorbell_num_posted_MASK       0x00FF
-#define lpfc_wq_doorbell_num_posted_WORD       word0
-#define lpfc_wq_doorbell_index_SHIFT           16
-#define lpfc_wq_doorbell_index_MASK            0x00FF
-#define lpfc_wq_doorbell_index_WORD            word0
-#define lpfc_wq_doorbell_id_SHIFT              0
-#define lpfc_wq_doorbell_id_MASK               0xFFFF
-#define lpfc_wq_doorbell_id_WORD               word0
+#define LPFC_ULP0_RQ_DOORBELL          0x00A0
+#define LPFC_ULP1_RQ_DOORBELL          0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT    24
+#define lpfc_rq_db_list_fm_num_posted_MASK     0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD     word0
+#define lpfc_rq_db_list_fm_index_SHIFT         16
+#define lpfc_rq_db_list_fm_index_MASK          0x00FF
+#define lpfc_rq_db_list_fm_index_WORD          word0
+#define lpfc_rq_db_list_fm_id_SHIFT            0
+#define lpfc_rq_db_list_fm_id_MASK             0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD             word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT    16
+#define lpfc_rq_db_ring_fm_num_posted_MASK     0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD     word0
+#define lpfc_rq_db_ring_fm_id_SHIFT            0
+#define lpfc_rq_db_ring_fm_id_MASK             0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD             word0
+
+#define LPFC_ULP0_WQ_DOORBELL          0x0040
+#define LPFC_ULP1_WQ_DOORBELL          0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT    24
+#define lpfc_wq_db_list_fm_num_posted_MASK     0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD     word0
+#define lpfc_wq_db_list_fm_index_SHIFT         16
+#define lpfc_wq_db_list_fm_index_MASK          0x00FF
+#define lpfc_wq_db_list_fm_index_WORD          word0
+#define lpfc_wq_db_list_fm_id_SHIFT            0
+#define lpfc_wq_db_list_fm_id_MASK             0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD             word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT     16
+#define lpfc_wq_db_ring_fm_num_posted_MASK      0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD      word0
+#define lpfc_wq_db_ring_fm_id_SHIFT             0
+#define lpfc_wq_db_ring_fm_id_MASK              0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD              word0
 
 #define LPFC_EQCQ_DOORBELL             0x0120
 #define lpfc_eqcq_doorbell_se_SHIFT            31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
                struct {        /* Version 0 Request */
                        uint32_t word0;
 #define lpfc_mbx_wq_create_num_pages_SHIFT     0
-#define lpfc_mbx_wq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_MASK      0x000000FF
 #define lpfc_mbx_wq_create_num_pages_WORD      word0
+#define lpfc_mbx_wq_create_dua_SHIFT           8
+#define lpfc_mbx_wq_create_dua_MASK            0x00000001
+#define lpfc_mbx_wq_create_dua_WORD            word0
 #define lpfc_mbx_wq_create_cq_id_SHIFT         16
 #define lpfc_mbx_wq_create_cq_id_MASK          0x0000FFFF
 #define lpfc_mbx_wq_create_cq_id_WORD          word0
-                       struct dma_address page[LPFC_MAX_WQ_PAGE];
+                       struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+                       uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT           0
+#define lpfc_mbx_wq_create_bua_MASK            0x00000001
+#define lpfc_mbx_wq_create_bua_WORD            word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT       8
+#define lpfc_mbx_wq_create_ulp_num_MASK                0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD                word9
                } request;
                struct {        /* Version 1 Request */
                        uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
 #define lpfc_mbx_wq_create_q_id_SHIFT  0
 #define lpfc_mbx_wq_create_q_id_MASK   0x0000FFFF
 #define lpfc_mbx_wq_create_q_id_WORD   word0
+                       uint32_t doorbell_offset;
+                       uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT       0
+#define lpfc_mbx_wq_create_bar_set_MASK                0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD                word2
+#define WQ_PCI_BAR_0_AND_1     0x00
+#define WQ_PCI_BAR_2_AND_3     0x01
+#define WQ_PCI_BAR_4_AND_5     0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT     16
+#define lpfc_mbx_wq_create_db_format_MASK      0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD      word2
                } response;
        } u;
 };
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
 #define lpfc_mbx_rq_create_num_pages_SHIFT     0
 #define lpfc_mbx_rq_create_num_pages_MASK      0x0000FFFF
 #define lpfc_mbx_rq_create_num_pages_WORD      word0
+#define lpfc_mbx_rq_create_dua_SHIFT           16
+#define lpfc_mbx_rq_create_dua_MASK            0x00000001
+#define lpfc_mbx_rq_create_dua_WORD            word0
+#define lpfc_mbx_rq_create_bqu_SHIFT           17
+#define lpfc_mbx_rq_create_bqu_MASK            0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD            word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT       24
+#define lpfc_mbx_rq_create_ulp_num_MASK                0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD                word0
                        struct rq_context context;
                        struct dma_address page[LPFC_MAX_WQ_PAGE];
                } request;
                struct {
                        uint32_t word0;
-#define lpfc_mbx_rq_create_q_id_SHIFT  0
-#define lpfc_mbx_rq_create_q_id_MASK   0x0000FFFF
-#define lpfc_mbx_rq_create_q_id_WORD   word0
+#define lpfc_mbx_rq_create_q_id_SHIFT          0
+#define lpfc_mbx_rq_create_q_id_MASK           0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD           word0
+                       uint32_t doorbell_offset;
+                       uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT       0
+#define lpfc_mbx_rq_create_bar_set_MASK                0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD                word2
+#define lpfc_mbx_rq_create_db_format_SHIFT     16
+#define lpfc_mbx_rq_create_db_format_MASK      0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD      word2
                } response;
        } u;
 };
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
        } u;
 };
 
+struct lpfc_mbx_query_fw_config {
+       struct mbox_header header;
+       struct {
+               uint32_t config_number;
+#define        LPFC_FC_FCOE            0x00000007
+               uint32_t asic_revision;
+               uint32_t physical_port;
+               uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE     0x00000040
+#define LPFC_FCOE_TGT_MODE     0x00000080
+#define LPFC_DUA_MODE          0x00000800
+               uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE        0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
+               uint32_t ulp0_nap_words[12];
+               uint32_t ulp1_mode;
+               uint32_t ulp1_nap_words[12];
+               uint32_t function_capabilities;
+               uint32_t cqid_base;
+               uint32_t cqid_tot;
+               uint32_t eqid_base;
+               uint32_t eqid_tot;
+               uint32_t ulp0_nap2_words[2];
+               uint32_t ulp1_nap2_words[2];
+       } rsp;
+};
+
 struct lpfc_id_range {
        uint32_t word5;
 #define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
 #define lpfc_mbx_redisc_fcf_index_WORD         word12
 };
 
-struct lpfc_mbx_query_fw_cfg {
-       struct mbox_header header;
-       uint32_t config_number;
-       uint32_t asic_rev;
-       uint32_t phys_port;
-       uint32_t function_mode;
-/* firmware Function Mode */
-#define lpfc_function_mode_toe_SHIFT           0
-#define lpfc_function_mode_toe_MASK            0x00000001
-#define lpfc_function_mode_toe_WORD            function_mode
-#define lpfc_function_mode_nic_SHIFT           1
-#define lpfc_function_mode_nic_MASK            0x00000001
-#define lpfc_function_mode_nic_WORD            function_mode
-#define lpfc_function_mode_rdma_SHIFT          2
-#define lpfc_function_mode_rdma_MASK           0x00000001
-#define lpfc_function_mode_rdma_WORD           function_mode
-#define lpfc_function_mode_vm_SHIFT            3
-#define lpfc_function_mode_vm_MASK             0x00000001
-#define lpfc_function_mode_vm_WORD             function_mode
-#define lpfc_function_mode_iscsi_i_SHIFT       4
-#define lpfc_function_mode_iscsi_i_MASK                0x00000001
-#define lpfc_function_mode_iscsi_i_WORD                function_mode
-#define lpfc_function_mode_iscsi_t_SHIFT       5
-#define lpfc_function_mode_iscsi_t_MASK                0x00000001
-#define lpfc_function_mode_iscsi_t_WORD                function_mode
-#define lpfc_function_mode_fcoe_i_SHIFT                6
-#define lpfc_function_mode_fcoe_i_MASK         0x00000001
-#define lpfc_function_mode_fcoe_i_WORD         function_mode
-#define lpfc_function_mode_fcoe_t_SHIFT                7
-#define lpfc_function_mode_fcoe_t_MASK         0x00000001
-#define lpfc_function_mode_fcoe_t_WORD         function_mode
-#define lpfc_function_mode_dal_SHIFT           8
-#define lpfc_function_mode_dal_MASK            0x00000001
-#define lpfc_function_mode_dal_WORD            function_mode
-#define lpfc_function_mode_lro_SHIFT           9
-#define lpfc_function_mode_lro_MASK            0x00000001
-#define lpfc_function_mode_lro_WORD            function_mode
-#define lpfc_function_mode_flex10_SHIFT                10
-#define lpfc_function_mode_flex10_MASK         0x00000001
-#define lpfc_function_mode_flex10_WORD         function_mode
-#define lpfc_function_mode_ncsi_SHIFT          11
-#define lpfc_function_mode_ncsi_MASK           0x00000001
-#define lpfc_function_mode_ncsi_WORD           function_mode
-};
-
 /* Status field for embedded SLI_CONFIG mailbox command */
 #define STATUS_SUCCESS                                 0x0
 #define STATUS_FAILED                                  0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
                struct lpfc_mbx_read_config rd_config;
                struct lpfc_mbx_request_features req_ftrs;
                struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
-               struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+               struct lpfc_mbx_query_fw_config query_fw_cfg;
                struct lpfc_mbx_supp_pages supp_pages;
                struct lpfc_mbx_pc_sli4_params sli4_params;
                struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
index 89ad558..314b4f6 100644 (file)
@@ -3165,14 +3165,10 @@ destroy_port(struct lpfc_vport *vport)
 int
 lpfc_get_instance(void)
 {
-       int instance = 0;
+       int ret;
 
-       /* Assign an unused number */
-       if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
-               return -1;
-       if (idr_get_new(&lpfc_hba_index, NULL, &instance))
-               return -1;
-       return instance;
+       ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
+       return ret < 0 ? -1 : ret;
 }
 
 /**
@@ -6233,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
                        phba->sli4_hba.conf_regs_memmap_p +
                                                LPFC_CTL_PORT_SEM_OFFSET;
                phba->sli4_hba.RQDBregaddr =
-                       phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_ULP0_RQ_DOORBELL;
                phba->sli4_hba.WQDBregaddr =
-                       phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_ULP0_WQ_DOORBELL;
                phba->sli4_hba.EQCQDBregaddr =
                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
                phba->sli4_hba.MQDBregaddr =
@@ -6289,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
                return -ENODEV;
 
        phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
-                               vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+                               vf * LPFC_VFR_PAGE_SIZE +
+                                       LPFC_ULP0_RQ_DOORBELL);
        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
-                               vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+                               vf * LPFC_VFR_PAGE_SIZE +
+                                       LPFC_ULP0_WQ_DOORBELL);
        phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
                                vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6987,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
                phba->sli4_hba.fcp_wq = NULL;
        }
 
+       if (phba->pci_bar0_memmap_p) {
+               iounmap(phba->pci_bar0_memmap_p);
+               phba->pci_bar0_memmap_p = NULL;
+       }
+       if (phba->pci_bar2_memmap_p) {
+               iounmap(phba->pci_bar2_memmap_p);
+               phba->pci_bar2_memmap_p = NULL;
+       }
+       if (phba->pci_bar4_memmap_p) {
+               iounmap(phba->pci_bar4_memmap_p);
+               phba->pci_bar4_memmap_p = NULL;
+       }
+
        /* Release FCP CQ mapping array */
        if (phba->sli4_hba.fcp_cq_map != NULL) {
                kfree(phba->sli4_hba.fcp_cq_map);
@@ -7050,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        int rc = -ENOMEM;
        int fcp_eqidx, fcp_cqidx, fcp_wqidx;
        int fcp_cq_index = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+       LPFC_MBOXQ_t *mboxq;
+       uint32_t length;
+
+       /* Check for dual-ULP support */
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3249 Unable to allocate memory for "
+                               "QUERY_FW_CFG mailbox command\n");
+               return -ENOMEM;
+       }
+       length = (sizeof(struct lpfc_mbx_query_fw_config) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+                        length, LPFC_SLI4_MBX_EMBED);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+       shdr = (union lpfc_sli4_cfg_shdr *)
+                       &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3250 QUERY_FW_CFG mailbox failed with status "
+                               "x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               if (rc != MBX_TIMEOUT)
+                       mempool_free(mboxq, phba->mbox_mem_pool);
+               rc = -ENXIO;
+               goto out_error;
+       }
+
+       phba->sli4_hba.fw_func_mode =
+                       mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+       phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+       phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+                       "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+                       phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
 
        /*
         * Set up HBA Event Queues (EQs)
@@ -7663,78 +7723,6 @@ out:
        return rc;
 }
 
-/**
- * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
- * @phba: pointer to lpfc hba data structure.
- * @cnt: number of nop mailbox commands to send.
- *
- * This routine is invoked to send a number @cnt of NOP mailbox command and
- * wait for each command to complete.
- *
- * Return: the number of NOP mailbox command completed.
- **/
-static int
-lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
-{
-       LPFC_MBOXQ_t *mboxq;
-       int length, cmdsent;
-       uint32_t mbox_tmo;
-       uint32_t rc = 0;
-       uint32_t shdr_status, shdr_add_status;
-       union lpfc_sli4_cfg_shdr *shdr;
-
-       if (cnt == 0) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "2518 Requested to send 0 NOP mailbox cmd\n");
-               return cnt;
-       }
-
-       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       if (!mboxq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "2519 Unable to allocate memory for issuing "
-                               "NOP mailbox command\n");
-               return 0;
-       }
-
-       /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
-       length = (sizeof(struct lpfc_mbx_nop) -
-                 sizeof(struct lpfc_sli4_cfg_mhdr));
-
-       for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
-               lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
-                                LPFC_MBOX_OPCODE_NOP, length,
-                                LPFC_SLI4_MBX_EMBED);
-               if (!phba->sli4_hba.intr_enable)
-                       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-               else {
-                       mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
-                       rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
-               }
-               if (rc == MBX_TIMEOUT)
-                       break;
-               /* Check return status */
-               shdr = (union lpfc_sli4_cfg_shdr *)
-                       &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
-               shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-               shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
-                                        &shdr->response);
-               if (shdr_status || shdr_add_status || rc) {
-                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                                       "2520 NOP mailbox command failed "
-                                       "status x%x add_status x%x mbx "
-                                       "status x%x\n", shdr_status,
-                                       shdr_add_status, rc);
-                       break;
-               }
-       }
-
-       if (rc != MBX_TIMEOUT)
-               mempool_free(mboxq, phba->mbox_mem_pool);
-
-       return cmdsent;
-}
-
 /**
  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
  * @phba: pointer to lpfc hba data structure.
@@ -8502,37 +8490,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
        return;
 }
 
-/**
- * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to unset the HBA device initialization steps to
- * a device with SLI-4 interface spec.
- **/
-static void
-lpfc_sli4_unset_hba(struct lpfc_hba *phba)
-{
-       struct lpfc_vport *vport = phba->pport;
-       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-
-       spin_lock_irq(shost->host_lock);
-       vport->load_flag |= FC_UNLOADING;
-       spin_unlock_irq(shost->host_lock);
-
-       phba->pport->work_port_events = 0;
-
-       /* Stop the SLI4 device port */
-       lpfc_stop_port(phba);
-
-       lpfc_sli4_disable_intr(phba);
-
-       /* Reset SLI4 HBA FCoE function */
-       lpfc_pci_function_reset(phba);
-       lpfc_sli4_queue_destroy(phba);
-
-       return;
-}
-
 /**
  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
  * @phba: Pointer to HBA context object.
@@ -9595,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        struct Scsi_Host  *shost = NULL;
        int error, ret;
        uint32_t cfg_mode, intr_mode;
-       int mcnt;
        int adjusted_fcp_io_channel;
 
        /* Allocate memory for HBA structure */
@@ -9684,58 +9640,35 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
        /* Now, trying to enable interrupt and bring up the device */
        cfg_mode = phba->cfg_use_msi;
-       while (true) {
-               /* Put device to a known state before enabling interrupt */
-               lpfc_stop_port(phba);
-               /* Configure and enable interrupt */
-               intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
-               if (intr_mode == LPFC_INTR_ERROR) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0426 Failed to enable interrupt.\n");
-                       error = -ENODEV;
-                       goto out_free_sysfs_attr;
-               }
-               /* Default to single EQ for non-MSI-X */
-               if (phba->intr_type != MSIX)
-                       adjusted_fcp_io_channel = 1;
-               else
-                       adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
-               phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
-               /* Set up SLI-4 HBA */
-               if (lpfc_sli4_hba_setup(phba)) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "1421 Failed to set up hba\n");
-                       error = -ENODEV;
-                       goto out_disable_intr;
-               }
 
-               /* Send NOP mbx cmds for non-INTx mode active interrupt test */
-               if (intr_mode != 0)
-                       mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
-                                                           LPFC_ACT_INTR_CNT);
-
-               /* Check active interrupts received only for MSI/MSI-X */
-               if (intr_mode == 0 ||
-                   phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
-                       /* Log the current active interrupt mode */
-                       phba->intr_mode = intr_mode;
-                       lpfc_log_intr_mode(phba, intr_mode);
-                       break;
-               }
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "0451 Configure interrupt mode (%d) "
-                               "failed active interrupt test.\n",
-                               intr_mode);
-               /* Unset the previous SLI-4 HBA setup. */
-               /*
-                * TODO:  Is this operation compatible with IF TYPE 2
-                * devices?  All port state is deleted and cleared.
-                */
-               lpfc_sli4_unset_hba(phba);
-               /* Try next level of interrupt mode */
-               cfg_mode = --intr_mode;
+       /* Put device to a known state before enabling interrupt */
+       lpfc_stop_port(phba);
+       /* Configure and enable interrupt */
+       intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+       if (intr_mode == LPFC_INTR_ERROR) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0426 Failed to enable interrupt.\n");
+               error = -ENODEV;
+               goto out_free_sysfs_attr;
+       }
+       /* Default to single EQ for non-MSI-X */
+       if (phba->intr_type != MSIX)
+               adjusted_fcp_io_channel = 1;
+       else
+               adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+       phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+       /* Set up SLI-4 HBA */
+       if (lpfc_sli4_hba_setup(phba)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1421 Failed to set up hba\n");
+               error = -ENODEV;
+               goto out_disable_intr;
        }
 
+       /* Log the current active interrupt mode */
+       phba->intr_mode = intr_mode;
+       lpfc_log_intr_mode(phba, intr_mode);
+
        /* Perform post initialization setup */
        lpfc_post_init_setup(phba);
 
index d8fadcb..46128c6 100644 (file)
@@ -1115,6 +1115,13 @@ out:
                                 "0261 Cannot Register NameServer login\n");
        }
 
+       /*
+       ** In case the node reference counter does not go to zero, ensure that
+       ** the stale state for the node is not processed.
+       */
+
+       ndlp->nlp_prev_state = ndlp->nlp_state;
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DEFER_RM;
        spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
        struct lpfc_iocbq *cmdiocb, *rspiocb;
        IOCB_t *irsp;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
        cmdiocb = (struct lpfc_iocbq *) arg;
        rspiocb = cmdiocb->context_un.rsp_iocb;
 
        irsp = &rspiocb->iocb;
        if (irsp->ulpStatus) {
+               spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DEFER_RM;
+               spin_unlock_irq(shost->host_lock);
                return NLP_STE_FREED_NODE;
        }
        return ndlp->nlp_state;
index 60e5a17..98af07c 100644 (file)
@@ -287,6 +287,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
        return sdev->queue_depth;
 }
 
+/**
+ * lpfc_change_queue_type() - Change a device's scsi tag queuing type
+ * @sdev: Pointer the scsi device whose queue depth is to change
+ * @tag_type: Identifier for queue tag type
+ */
+static int
+lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+       if (sdev->tagged_supported) {
+               scsi_set_tag_type(sdev, tag_type);
+               if (tag_type)
+                       scsi_activate_tcq(sdev, sdev->queue_depth);
+               else
+                       scsi_deactivate_tcq(sdev, sdev->queue_depth);
+       } else
+               tag_type = 0;
+
+       return tag_type;
+}
+
 /**
  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
  * @phba: The Hba for which this call is being executed.
@@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                        break;
                }
        } else
-               fcp_cmnd->fcpCntl1 = 0;
+               fcp_cmnd->fcpCntl1 = SIMPLE_Q;
 
        sli4 = (phba->sli_rev == LPFC_SLI_REV4);
 
@@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
        .max_sectors            = 0xFFFF,
        .vendor_id              = LPFC_NL_VENDOR_ID,
        .change_queue_depth     = lpfc_change_queue_depth,
+       .change_queue_type      = lpfc_change_queue_type,
 };
 
 struct scsi_host_template lpfc_vport_template = {
@@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
        .shost_attrs            = lpfc_vport_attrs,
        .max_sectors            = 0xFFFF,
        .change_queue_depth     = lpfc_change_queue_depth,
+       .change_queue_type      = lpfc_change_queue_type,
 };
index 624eab3..74b67d9 100644 (file)
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 
        /* Ring Doorbell */
        doorbell.word0 = 0;
-       bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
-       bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
-       bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
-       writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+       if (q->db_format == LPFC_DB_LIST_FORMAT) {
+               bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+               bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+               bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+       } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+               bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+               bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+       } else {
+               return -EINVAL;
+       }
+       writel(doorbell.word0, q->db_regaddr);
 
        return 0;
 }
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
        /* Ring The Header Receive Queue Doorbell */
        if (!(hq->host_index % hq->entry_repost)) {
                doorbell.word0 = 0;
-               bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
-                      hq->entry_repost);
-               bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
-               writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+               if (hq->db_format == LPFC_DB_RING_FORMAT) {
+                       bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+                              hq->entry_repost);
+                       bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+               } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+                       bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+                              hq->entry_repost);
+                       bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+                              hq->host_index);
+                       bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+               } else {
+                       return -EINVAL;
+               }
+               writel(doorbell.word0, hq->db_regaddr);
        }
        return put_index;
 }
@@ -4939,7 +4956,7 @@ out_free_mboxq:
 static void
 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
 {
-       uint8_t fcp_eqidx;
+       int fcp_eqidx;
 
        lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
        lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
                }
                /* RPIs. */
                count = phba->sli4_hba.max_cfg_param.max_rpi;
+               if (count <= 0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "3279 Invalid provisioning of "
+                                       "rpi:%d\n", count);
+                       rc = -EINVAL;
+                       goto err_exit;
+               }
                base = phba->sli4_hba.max_cfg_param.rpi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
                phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
                /* VPIs. */
                count = phba->sli4_hba.max_cfg_param.max_vpi;
+               if (count <= 0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "3280 Invalid provisioning of "
+                                       "vpi:%d\n", count);
+                       rc = -EINVAL;
+                       goto free_rpi_ids;
+               }
                base = phba->sli4_hba.max_cfg_param.vpi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
                phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
                /* XRIs. */
                count = phba->sli4_hba.max_cfg_param.max_xri;
+               if (count <= 0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "3281 Invalid provisioning of "
+                                       "xri:%d\n", count);
+                       rc = -EINVAL;
+                       goto free_vpi_ids;
+               }
                base = phba->sli4_hba.max_cfg_param.xri_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
                phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
                /* VFIs. */
                count = phba->sli4_hba.max_cfg_param.max_vfi;
+               if (count <= 0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "3282 Invalid provisioning of "
+                                       "vfi:%d\n", count);
+                       rc = -EINVAL;
+                       goto free_xri_ids;
+               }
                base = phba->sli4_hba.max_cfg_param.vfi_base;
                longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
                phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -6599,7 +6644,7 @@ static int
 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                       uint32_t flag)
 {
-       MAILBOX_t *mb;
+       MAILBOX_t *mbx;
        struct lpfc_sli *psli = &phba->sli;
        uint32_t status, evtctr;
        uint32_t ha_copy, hc_copy;
@@ -6653,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 
        psli = &phba->sli;
 
-       mb = &pmbox->u.mb;
+       mbx = &pmbox->u.mb;
        status = MBX_SUCCESS;
 
        if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6668,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                goto out_not_finished;
        }
 
-       if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+       if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
                if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
                        !(hc_copy & HC_MBINT_ENA)) {
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6722,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                "(%d):0308 Mbox cmd issue - BUSY Data: "
                                "x%x x%x x%x x%x\n",
                                pmbox->vport ? pmbox->vport->vpi : 0xffffff,
-                               mb->mbxCommand, phba->pport->port_state,
+                               mbx->mbxCommand, phba->pport->port_state,
                                psli->sli_flag, flag);
 
                psli->slistat.mbox_busy++;
@@ -6732,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        lpfc_debugfs_disc_trc(pmbox->vport,
                                LPFC_DISC_TRC_MBOX_VPORT,
                                "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
                else {
                        lpfc_debugfs_disc_trc(phba->pport,
                                LPFC_DISC_TRC_MBOX,
                                "MBOX Bsy:        cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
 
                return MBX_BUSY;
@@ -6751,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
        /* If we are not polling, we MUST be in SLI2 mode */
        if (flag != MBX_POLL) {
                if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
-                   (mb->mbxCommand != MBX_KILL_BOARD)) {
+                   (mbx->mbxCommand != MBX_KILL_BOARD)) {
                        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
                        /* Mbox command <mbxCommand> cannot issue */
@@ -6773,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
                        "x%x\n",
                        pmbox->vport ? pmbox->vport->vpi : 0,
-                       mb->mbxCommand, phba->pport->port_state,
+                       mbx->mbxCommand, phba->pport->port_state,
                        psli->sli_flag, flag);
 
-       if (mb->mbxCommand != MBX_HEARTBEAT) {
+       if (mbx->mbxCommand != MBX_HEARTBEAT) {
                if (pmbox->vport) {
                        lpfc_debugfs_disc_trc(pmbox->vport,
                                LPFC_DISC_TRC_MBOX_VPORT,
                                "MBOX Send vport: cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
                else {
                        lpfc_debugfs_disc_trc(phba->pport,
                                LPFC_DISC_TRC_MBOX,
                                "MBOX Send:       cmd:x%x mb:x%x x%x",
-                               (uint32_t)mb->mbxCommand,
-                               mb->un.varWords[0], mb->un.varWords[1]);
+                               (uint32_t)mbx->mbxCommand,
+                               mbx->un.varWords[0], mbx->un.varWords[1]);
                }
        }
 
@@ -6797,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
        evtctr = psli->slistat.mbox_event;
 
        /* next set own bit for the adapter and copy over command word */
-       mb->mbxOwner = OWN_CHIP;
+       mbx->mbxOwner = OWN_CHIP;
 
        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                /* Populate mbox extension offset word. */
                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
-                       *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+                       *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
                                = (uint8_t *)phba->mbox_ext
                                  - (uint8_t *)phba->mbox;
                }
@@ -6814,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                pmbox->in_ext_byte_len);
                }
                /* Copy command data to host SLIM area */
-               lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+               lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
        } else {
                /* Populate mbox extension offset word. */
                if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
-                       *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+                       *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
                                = MAILBOX_HBA_EXT_OFFSET;
 
                /* Copy the mailbox extension data */
@@ -6828,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                pmbox->context2, pmbox->in_ext_byte_len);
 
                }
-               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                        /* copy command data into host mbox for cmpl */
-                       lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+                       lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
                }
 
                /* First copy mbox command data to HBA SLIM, skip past first
                   word */
                to_slim = phba->MBslimaddr + sizeof (uint32_t);
-               lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+               lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
                            MAILBOX_CMD_SIZE - sizeof (uint32_t));
 
                /* Next copy over first word, with mbxOwner set */
-               ldata = *((uint32_t *)mb);
+               ldata = *((uint32_t *)mbx);
                to_slim = phba->MBslimaddr;
                writel(ldata, to_slim);
                readl(to_slim); /* flush */
 
-               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                        /* switch over to host mailbox */
                        psli->sli_flag |= LPFC_SLI_ACTIVE;
                }
@@ -6920,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                                /* First copy command data */
                                word0 = *((uint32_t *)phba->mbox);
                                word0 = le32_to_cpu(word0);
-                               if (mb->mbxCommand == MBX_CONFIG_PORT) {
+                               if (mbx->mbxCommand == MBX_CONFIG_PORT) {
                                        MAILBOX_t *slimmb;
                                        uint32_t slimword0;
                                        /* Check real SLIM for any errors */
@@ -6947,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
 
                if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                        /* copy results back to user */
-                       lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+                       lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
                        /* Copy the mailbox extension data */
                        if (pmbox->out_ext_byte_len && pmbox->context2) {
                                lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -6956,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        }
                } else {
                        /* First copy command data */
-                       lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+                       lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
                                                        MAILBOX_CMD_SIZE);
                        /* Copy the mailbox extension data */
                        if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -6971,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                readl(phba->HAregaddr); /* flush */
 
                psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-               status = mb->mbxStatus;
+               status = mbx->mbxStatus;
        }
 
        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
                 * This is a continuation of a commandi,(CX) so this
                 * sglq is on the active list
                 */
-               sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+               sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
                if (!sglq)
                        return IOCB_ERROR;
        }
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                        pring->prt[3].type = FC_TYPE_CT;
                        pring->prt[3].lpfc_sli_rcv_unsol_event =
                            lpfc_ct_unsol_event;
-                       /* abort unsolicited sequence */
-                       pring->prt[4].profile = 0;      /* Mask 4 */
-                       pring->prt[4].rctl = FC_RCTL_BA_ABTS;
-                       pring->prt[4].type = FC_TYPE_BLS;
-                       pring->prt[4].lpfc_sli_rcv_unsol_event =
-                           lpfc_sli4_ct_abort_unsol_event;
                        break;
                }
                totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
        struct lpfc_eqe *eqe;
        unsigned long iflag;
        int ecount = 0;
-       uint32_t fcp_eqidx;
+       int fcp_eqidx;
 
        /* Get the driver's phba structure from the dev_id */
        fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
        struct lpfc_hba  *phba;
        irqreturn_t hba_irq_rc;
        bool hba_handled = false;
-       uint32_t fcp_eqidx;
+       int fcp_eqidx;
 
        /* Get the driver's phba structure from the dev_id */
        phba = (struct lpfc_hba *)dev_id;
@@ -12096,6 +12135,54 @@ out_fail:
        return NULL;
 }
 
+/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+       struct pci_dev *pdev;
+       unsigned long bar_map, bar_map_len;
+
+       if (!phba->pcidev)
+               return NULL;
+       else
+               pdev = phba->pcidev;
+
+       switch (pci_barset) {
+       case WQ_PCI_BAR_0_AND_1:
+               if (!phba->pci_bar0_memmap_p) {
+                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+                       phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
+               }
+               return phba->pci_bar0_memmap_p;
+       case WQ_PCI_BAR_2_AND_3:
+               if (!phba->pci_bar2_memmap_p) {
+                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+                       phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
+               }
+               return phba->pci_bar2_memmap_p;
+       case WQ_PCI_BAR_4_AND_5:
+               if (!phba->pci_bar4_memmap_p) {
+                       bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+                       bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+                       phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
+               }
+               return phba->pci_bar4_memmap_p;
+       default:
+               break;
+       }
+       return NULL;
+}
+
 /**
  * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
  * @phba: HBA structure that indicates port to create a queue on.
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
        struct dma_address *page;
+       void __iomem *bar_memmap_p;
+       uint32_t db_offset;
+       uint16_t pci_barset;
 
        /* sanity check on queue memory */
        if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                    cq->queue_id);
        bf_set(lpfc_mbox_hdr_version, &shdr->request,
               phba->sli4_hba.pc_sli4_params.wqv);
+
        if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
                bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
                       wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
                page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
        }
+
+       if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+               bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        /* The IOCTL status is embedded in the mailbox subheader. */
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                status = -ENXIO;
                goto out;
        }
+       if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+               wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+                                      &wq_create->u.response);
+               if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+                   (wq->db_format != LPFC_DB_RING_FORMAT)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3265 WQ[%d] doorbell format not "
+                                       "supported: x%x\n", wq->queue_id,
+                                       wq->db_format);
+                       status = -EINVAL;
+                       goto out;
+               }
+               pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+                                   &wq_create->u.response);
+               bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+               if (!bar_memmap_p) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3263 WQ[%d] failed to memmap pci "
+                                       "barset:x%x\n", wq->queue_id,
+                                       pci_barset);
+                       status = -ENOMEM;
+                       goto out;
+               }
+               db_offset = wq_create->u.response.doorbell_offset;
+               if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+                   (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3252 WQ[%d] doorbell offset not "
+                                       "supported: x%x\n", wq->queue_id,
+                                       db_offset);
+                       status = -EINVAL;
+                       goto out;
+               }
+               wq->db_regaddr = bar_memmap_p + db_offset;
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3264 WQ[%d]: barset:x%x, offset:x%x\n",
+                               wq->queue_id, pci_barset, db_offset);
+       } else {
+               wq->db_format = LPFC_DB_LIST_FORMAT;
+               wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+       }
        wq->type = LPFC_WQ;
        wq->assoc_qid = cq->queue_id;
        wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+       void __iomem *bar_memmap_p;
+       uint32_t db_offset;
+       uint16_t pci_barset;
 
        /* sanity check on queue memory */
        if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
                                        putPaddrHigh(dmabuf->phys);
        }
+       if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+               bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        /* The IOCTL status is embedded in the mailbox subheader. */
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                status = -ENXIO;
                goto out;
        }
+
+       if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+               hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+                                       &rq_create->u.response);
+               if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+                   (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3262 RQ [%d] doorbell format not "
+                                       "supported: x%x\n", hrq->queue_id,
+                                       hrq->db_format);
+                       status = -EINVAL;
+                       goto out;
+               }
+
+               pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+                                   &rq_create->u.response);
+               bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+               if (!bar_memmap_p) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3269 RQ[%d] failed to memmap pci "
+                                       "barset:x%x\n", hrq->queue_id,
+                                       pci_barset);
+                       status = -ENOMEM;
+                       goto out;
+               }
+
+               db_offset = rq_create->u.response.doorbell_offset;
+               if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+                   (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3270 RQ[%d] doorbell offset not "
+                                       "supported: x%x\n", hrq->queue_id,
+                                       db_offset);
+                       status = -EINVAL;
+                       goto out;
+               }
+               hrq->db_regaddr = bar_memmap_p + db_offset;
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
+                               hrq->queue_id, pci_barset, db_offset);
+       } else {
+               hrq->db_format = LPFC_DB_RING_FORMAT;
+               hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+       }
        hrq->type = LPFC_HRQ;
        hrq->assoc_qid = cq->queue_id;
        hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
                                        putPaddrHigh(dmabuf->phys);
        }
+       if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+               bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        /* The IOCTL status is embedded in the mailbox subheader. */
        shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14062,6 +14250,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
        return false;
 }
 
+/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true  -- if there is matching pending context of the sequence cleaned
+ *          at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ *          at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+       struct lpfc_hba *phba = vport->phba;
+       int handled;
+
+       /* Accepting abort at ulp with SLI4 only */
+       if (phba->sli_rev < LPFC_SLI_REV4)
+               return false;
+
+       /* Register all caring upper level protocols to attend abort */
+       handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+       if (handled)
+               return true;
+
+       return false;
+}
+
 /**
  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
  * @phba: Pointer to HBA context object.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
                             struct lpfc_iocbq *cmd_iocbq,
                             struct lpfc_iocbq *rsp_iocbq)
 {
-       if (cmd_iocbq)
+       struct lpfc_nodelist *ndlp;
+
+       if (cmd_iocbq) {
+               ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+               lpfc_nlp_put(ndlp);
+               lpfc_nlp_not_used(ndlp);
                lpfc_sli_release_iocbq(phba, cmd_iocbq);
+       }
 
        /* Failure means BLS ABORT RSP did not get delivered to remote node*/
        if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
  * event after aborting the sequence handling.
  **/
 static void
-lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
-                       struct fc_frame_header *fc_hdr)
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+                       struct fc_frame_header *fc_hdr, bool aborted)
 {
+       struct lpfc_hba *phba = vport->phba;
        struct lpfc_iocbq *ctiocb = NULL;
        struct lpfc_nodelist *ndlp;
        uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
        rxid = be16_to_cpu(fc_hdr->fh_rx_id);
 
-       ndlp = lpfc_findnode_did(phba->pport, sid);
+       ndlp = lpfc_findnode_did(vport, sid);
        if (!ndlp) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
-                               "1268 Find ndlp returned NULL for oxid:x%x "
-                               "SID:x%x\n", oxid, sid);
-               return;
+               ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+               if (!ndlp) {
+                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+                                        "1268 Failed to allocate ndlp for "
+                                        "oxid:x%x SID:x%x\n", oxid, sid);
+                       return;
+               }
+               lpfc_nlp_init(vport, ndlp, sid);
+               /* Put ndlp onto pport node list */
+               lpfc_enqueue_node(vport, ndlp);
+       } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+               /* re-setup ndlp without removing from node list */
+               ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+               if (!ndlp) {
+                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+                                        "3275 Failed to active ndlp found "
+                                        "for oxid:x%x SID:x%x\n", oxid, sid);
+                       return;
+               }
        }
 
        /* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
        icmd->ulpLe = 1;
        icmd->ulpClass = CLASS3;
        icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
-       ctiocb->context1 = ndlp;
+       ctiocb->context1 = lpfc_nlp_get(ndlp);
 
        ctiocb->iocb_cmpl = NULL;
        ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
        if (lxri != NO_XRI)
                lpfc_set_rrq_active(phba, ndlp, lxri,
                        (xri == oxid) ? rxid : oxid, 0);
-       /* If the oxid maps to the FCP XRI range or if it is out of range,
-        * send a BLS_RJT.  The driver no longer has that exchange.
-        * Override the IOCB for a BA_RJT.
+       /* For BA_ABTS from exchange responder, if the logical xri with
+        * the oxid maps to the FCP XRI range, the port no longer has
+        * that exchange context, send a BLS_RJT. Override the IOCB for
+        * a BA_RJT.
         */
-       if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
-                   phba->sli4_hba.max_cfg_param.xri_base) ||
-           xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
-                   phba->sli4_hba.max_cfg_param.xri_base)) {
+       if ((fctl & FC_FC_EX_CTX) &&
+           (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+               icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+               bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+               bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+               bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+       }
+
+       /* If BA_ABTS failed to abort a partially assembled receive sequence,
+        * the driver no longer has that exchange, send a BLS_RJT. Override
+        * the IOCB for a BA_RJT.
+        */
+       if (aborted == false) {
                icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
                bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
                bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
        bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
 
        /* Xmit CT abts response on exchange <xid> */
-       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
-                       icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+                        icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
        if (rc == IOCB_ERROR) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-                               "2925 Failed to issue CT ABTS RSP x%x on "
-                               "xri x%x, Data x%x\n",
-                               icmd->un.xseq64.w5.hcsw.Rctl, oxid,
-                               phba->link_state);
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "2925 Failed to issue CT ABTS RSP x%x on "
+                                "xri x%x, Data x%x\n",
+                                icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+                                phba->link_state);
+               lpfc_nlp_put(ndlp);
+               ctiocb->context1 = NULL;
                lpfc_sli_release_iocbq(phba, ctiocb);
        }
 }
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
        struct lpfc_hba *phba = vport->phba;
        struct fc_frame_header fc_hdr;
        uint32_t fctl;
-       bool abts_par;
+       bool aborted;
 
        /* Make a copy of fc_hdr before the dmabuf being released */
        memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
        fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
 
        if (fctl & FC_FC_EX_CTX) {
-               /*
-                * ABTS sent by responder to exchange, just free the buffer
-                */
-               lpfc_in_buf_free(phba, &dmabuf->dbuf);
+               /* ABTS by responder to exchange, no cleanup needed */
+               aborted = true;
        } else {
-               /*
-                * ABTS sent by initiator to exchange, need to do cleanup
-                */
-               /* Try to abort partially assembled seq */
-               abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
-
-               /* Send abort to ULP if partially seq abort failed */
-               if (abts_par == false)
-                       lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
-               else
-                       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+               /* ABTS by initiator to exchange, need to do cleanup */
+               aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+               if (aborted == false)
+                       aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
        }
-       /* Send basic accept (BA_ACC) to the abort requester */
-       lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+       /* Respond with BA_ACC or BA_RJT accordingly */
+       lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
 }
 
 /**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
 {
        uint16_t next_fcf_index;
 
+initial_priority:
        /* Search start from next bit of currently registered FCF index */
+       next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
 next_priority:
-       next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
-                                       LPFC_SLI4_FCF_TBL_INDX_MAX;
+       /* Determine the next fcf index to check */
+       next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
        next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
                                       LPFC_SLI4_FCF_TBL_INDX_MAX,
                                       next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
                 * at that level and continue the selection process.
                 */
                if (lpfc_check_next_fcf_pri_level(phba))
-                       goto next_priority;
+                       goto initial_priority;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
                                "2844 No roundrobin failover FCF available\n");
                if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
index 44c427a..be02b59 100644 (file)
@@ -139,6 +139,10 @@ struct lpfc_queue {
 
        struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
 
+       uint16_t db_format;
+#define LPFC_DB_RING_FORMAT    0x01
+#define LPFC_DB_LIST_FORMAT    0x02
+       void __iomem *db_regaddr;
        /* For q stats */
        uint32_t q_cnt_1;
        uint32_t q_cnt_2;
@@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
        struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
        struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
 
+       uint8_t fw_func_mode;   /* FW function protocol mode */
+       uint32_t ulp0_mode;     /* ULP0 protocol mode */
+       uint32_t ulp1_mode;     /* ULP1 protocol mode */
+
        /* Setup information for various queue parameters */
        int eq_esize;
        int eq_ecount;
index ba596e8..f3b7795 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.36"
+#define LPFC_DRIVER_VERSION "8.3.37"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 3b2365c..408d254 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.504.01.00-rc1"
-#define MEGASAS_RELDATE                                "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION                    "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION                                "06.506.00.00-rc1"
+#define MEGASAS_RELDATE                                "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION                    "Sat. Feb. 9 17:00:00 PDT 2013"
 
 /*
  * Device IDs
index 66a0fec..9d53540 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v06.504.01.00-rc1
+ *  Version : v06.506.00.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
index 74030af..a7d5668 100644 (file)
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                                MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
                }
                io_request->Control |= (0x4 << 26);
-               io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+               io_request->EEDPBlockSize = scp->device->sector_size;
        } else {
                /* Some drives don't support 16/12 byte CDB's, convert to 10 */
                if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
        if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
            instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
                io_request->Function = 0;
-               io_request->DevHandle =
+               if (fusion->fast_path_io)
+                       io_request->DevHandle =
                        local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
                io_request->RaidContext.timeoutValue =
                        local_map_ptr->raidMap.fpPdIoTimeoutSec;
index a7c64f0..f68a3cd 100644 (file)
@@ -61,7 +61,6 @@
 #define MEGASAS_SCSI_ADDL_CDB_LEN                   0x18
 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL                    0x20
 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE           0x60
-#define MEGASAS_EEDPBLOCKSIZE                      512
 
 /*
  * Raid context flags
index ffd85c5..bcb23d2 100644 (file)
@@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
        struct task_struct *p;
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-       if (ioc->shost_recovery)
+       if (ioc->shost_recovery || ioc->pci_error_recovery)
                goto rearm_timer;
        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
@@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
                printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
                        ioc->name, __func__);
 
+               /* It may be possible that EEH recovery can resolve some of
+                * pci bus failure issues rather removing the dead ioc function
+                * by considering controller is in a non-operational state. So
+                * here priority is given to the EEH recovery. If it doesn't
+                * not resolve this issue, mpt2sas driver will consider this
+                * controller to non-operational state and remove the dead ioc
+                * function.
+                */
+               if (ioc->non_operational_loop++ < 5) {
+                       spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
+                                                        flags);
+                       goto rearm_timer;
+               }
+
                /*
                 * Call _scsih_flush_pending_cmds callback so that we flush all
                 * pending commands back to OS. This call is required to aovid
@@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
                return; /* don't rearm timer */
        }
 
+       ioc->non_operational_loop = 0;
+
        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
                rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                    FORCE_BIG_HAMMER);
@@ -2007,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RMS25KB040_BRANDING);
                        break;
+               case MPT2SAS_INTEL_RMS25LB040_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25LB040_BRANDING);
+                       break;
+               case MPT2SAS_INTEL_RMS25LB080_SSDID:
+                       printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+                           MPT2SAS_INTEL_RMS25LB080_BRANDING);
+                       break;
                default:
                        break;
                }
@@ -4386,6 +4410,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
        if (missing_delay[0] != -1 && missing_delay[1] != -1)
                _base_update_missing_delay(ioc, missing_delay[0],
                    missing_delay[1]);
+       ioc->non_operational_loop = 0;
 
        return 0;
 
index 543d8d6..4caaac1 100644 (file)
                                "Intel(R) Integrated RAID Module RMS25KB080"
 #define MPT2SAS_INTEL_RMS25KB040_BRANDING    \
                                "Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING      \
+                               "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING      \
+                               "Intel(R) Integrated RAID Module RMS25LB080"
 #define MPT2SAS_INTEL_RMS2LL080_BRANDING       \
                                "Intel Integrated RAID Module RMS2LL080"
 #define MPT2SAS_INTEL_RMS2LL040_BRANDING       \
 #define MPT2SAS_INTEL_RMS25JB040_SSDID         0x3517
 #define MPT2SAS_INTEL_RMS25KB080_SSDID         0x3518
 #define MPT2SAS_INTEL_RMS25KB040_SSDID         0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID         0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID         0x351B
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 #define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
@@ -835,6 +841,7 @@ struct MPT2SAS_ADAPTER {
        u16             cpu_msix_table_sz;
        u32             ioc_reset_count;
        MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+       u32             non_operational_loop;
 
        /* internal commands, callback index */
        u8              scsi_io_cb_idx;
index 04f8010..1836003 100644 (file)
@@ -42,7 +42,6 @@
  * USA.
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -1310,7 +1309,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
        void *sg_local, *chain;
        u32 chain_offset;
        u32 chain_length;
-       u32 chain_flags;
        int sges_left;
        u32 sges_in_segment;
        u8 simple_sgl_flags;
@@ -1356,8 +1354,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
                sges_in_segment--;
        }
 
-       /* initializing the chain flags and pointers */
-       chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
+       /* initializing the pointers */
        chain_req = _base_get_chain_buffer_tracker(ioc, smid);
        if (!chain_req)
                return -1;
index ce7e59b..1df9ed4 100644 (file)
@@ -41,7 +41,6 @@
  * USA.
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
index 8af944d..054d523 100644 (file)
@@ -42,7 +42,6 @@
  * USA.
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -3136,7 +3135,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
        sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
        memset(&ioc->diag_trigger_mpi, 0,
-           sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+           sizeof(ioc->diag_trigger_mpi));
        memcpy(&ioc->diag_trigger_mpi, buf, sz);
        if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
                ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
index 6421a06..dcbf7c8 100644 (file)
@@ -41,7 +41,6 @@
  * USA.
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -2755,13 +2754,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
        int i;
        u16 handle;
        u16 reason_code;
-       u8 phy_number;
 
        for (i = 0; i < event_data->NumEntries; i++) {
                handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
                if (!handle)
                        continue;
-               phy_number = event_data->StartPhyNum + i;
                reason_code = event_data->PHY[i].PhyStatus &
                    MPI2_EVENT_SAS_TOPO_RC_MASK;
                if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
index da6c5f2..6f8d621 100644 (file)
@@ -42,7 +42,6 @@
  * USA.
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/errno.h>
index 078c639..532110f 100644 (file)
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
                             struct mvs_task_exec_info *tei)
 {
        int elem, rc, i;
+       struct sas_ha_struct *sha = mvi->sas;
        struct sas_task *task = tei->task;
        struct mvs_cmd_hdr *hdr = tei->hdr;
        struct domain_device *dev = task->dev;
        struct asd_sas_port *sas_port = dev->port;
+       struct sas_phy *sphy = dev->phy;
+       struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
        struct scatterlist *sg_req, *sg_resp;
        u32 req_len, resp_len, tag = tei->tag;
        void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
        slot->tx = mvi->tx_prod;
        mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
                                        TXQ_MODE_I | tag |
-                                       (sas_port->phy_mask << TXQ_PHY_SHIFT));
+                                       (MVS_PHY_ID << TXQ_PHY_SHIFT));
 
        hdr->flags |= flags;
        hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
 static int mvs_task_prep_ata(struct mvs_info *mvi,
                             struct mvs_task_exec_info *tei)
 {
+       struct sas_ha_struct *sha = mvi->sas;
        struct sas_task *task = tei->task;
        struct domain_device *dev = task->dev;
        struct mvs_device *mvi_dev = dev->lldd_dev;
        struct mvs_cmd_hdr *hdr = tei->hdr;
        struct asd_sas_port *sas_port = dev->port;
+       struct sas_phy *sphy = dev->phy;
+       struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
        struct mvs_slot_info *slot;
        void *buf_prd;
        u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
        slot->tx = mvi->tx_prod;
        del_q = TXQ_MODE_I | tag |
                (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-               (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+               (MVS_PHY_ID << TXQ_PHY_SHIFT) |
                (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
        mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
 
index 2ae77a0..9f3cc13 100644 (file)
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache;
                                        (__mc) != 0 ;           \
                                        (++__lseq), (__mc) >>= 1)
 
+#define MVS_PHY_ID (1U << sas_phy->id)
 #define MV_INIT_DELAYED_WORK(w, f, d)  INIT_DELAYED_WORK(w, f)
 #define UNASSOC_D2H_FIS(id)            \
        ((void *) mvi->rx_fis + 0x100 * id)
index c06b8e5..d8293f2 100644 (file)
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
        odi->osdname_len = get_attrs[a].len;
        /* Avoid NULL for memcmp optimization 0-length is good enough */
        odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+       if (!odi->osdname) {
+               ret = -ENOMEM;
+               goto out;
+       }
        if (odi->osdname_len)
                memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
        OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
index 4c9fe73..3d5e522 100644 (file)
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
                        pci_free_consistent(pm8001_ha->pdev,
-                               pm8001_ha->memoryMap.region[i].element_size,
+                               (pm8001_ha->memoryMap.region[i].total_len +
+                               pm8001_ha->memoryMap.region[i].alignment),
                                pm8001_ha->memoryMap.region[i].virt_ptr,
                                pm8001_ha->memoryMap.region[i].phys_addr);
                        }
index 83d7984..1d82eef 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-       int rval = QLA_FUNCTION_FAILED;
-       uint16_t temp, frac;
+       uint16_t temp = 0;
 
-       if (!vha->hw->flags.thermal_supported)
-               return snprintf(buf, PAGE_SIZE, "\n");
+       if (!vha->hw->thermal_support) {
+               ql_log(ql_log_warn, vha, 0x70db,
+                   "Thermal not supported by this card.\n");
+               goto done;
+       }
 
-       temp = frac = 0;
-       if (qla2x00_reset_active(vha))
-               ql_log(ql_log_warn, vha, 0x707b,
-                   "ISP reset active.\n");
-       else if (!vha->hw->flags.eeh_busy)
-               rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
-       if (rval != QLA_SUCCESS)
-               return snprintf(buf, PAGE_SIZE, "\n");
+       if (qla2x00_reset_active(vha)) {
+               ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+               goto done;
+       }
+
+       if (vha->hw->flags.eeh_busy) {
+               ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+               goto done;
+       }
+
+       if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+               return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 
-       return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+       return snprintf(buf, PAGE_SIZE, "\n");
 }
 
 static ssize_t
index 9f34ded..ad54099 100644 (file)
@@ -27,7 +27,7 @@ void
 qla2x00_bsg_sp_free(void *data, void *ptr)
 {
        srb_t *sp = (srb_t *)ptr;
-       struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+       struct scsi_qla_host *vha = sp->fcport->vha;
        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
        struct qla_hw_data *ha = vha->hw;
 
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
        if (sp->type == SRB_CT_CMD ||
            sp->type == SRB_ELS_CMD_HST)
                kfree(sp->fcport);
-       mempool_free(sp, vha->hw->srb_mempool);
+       qla2x00_rel_sp(vha, sp);
 }
 
 int
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x700e,
                    "qla2x00_start_sp failed = %d\n", rval);
-               mempool_free(sp, ha->srb_mempool);
+               qla2x00_rel_sp(vha, sp);
                rval = -EIO;
                goto done_unmap_sg;
        }
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x7017,
                    "qla2x00_start_sp failed=%d.\n", rval);
-               mempool_free(sp, ha->srb_mempool);
+               qla2x00_rel_sp(vha, sp);
                rval = -EIO;
                goto done_free_fcport;
        }
@@ -531,6 +531,75 @@ done_unmap_sg:
 done:
        return rval;
 }
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+                           int wait, int wait2)
+{
+       int ret = 0;
+       int rval = 0;
+       uint16_t new_config[4];
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+               goto done_reset_internal;
+
+       memset(new_config, 0 , sizeof(new_config));
+       if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+           ENABLE_INTERNAL_LOOPBACK ||
+           (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+           ENABLE_EXTERNAL_LOOPBACK) {
+               new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+               ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+                   (new_config[0] & INTERNAL_LOOPBACK_MASK));
+               memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+               ha->notify_dcbx_comp = wait;
+               ha->notify_lb_portup_comp = wait2;
+
+               ret = qla81xx_set_port_config(vha, new_config);
+               if (ret != QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x7025,
+                           "Set port config failed.\n");
+                       ha->notify_dcbx_comp = 0;
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               }
+
+               /* Wait for DCBX complete event */
+               if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+                       (DCBX_COMP_TIMEOUT * HZ))) {
+                       ql_dbg(ql_dbg_user, vha, 0x7026,
+                           "DCBX completion not received.\n");
+                       ha->notify_dcbx_comp = 0;
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               } else
+                       ql_dbg(ql_dbg_user, vha, 0x7027,
+                           "DCBX completion received.\n");
+
+               if (wait2 &&
+                   !wait_for_completion_timeout(&ha->lb_portup_comp,
+                   (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+                       ql_dbg(ql_dbg_user, vha, 0x70c5,
+                           "Port up completion not received.\n");
+                       ha->notify_lb_portup_comp = 0;
+                       rval = -EINVAL;
+                       goto done_reset_internal;
+               } else
+                       ql_dbg(ql_dbg_user, vha, 0x70c6,
+                           "Port up completion received.\n");
+
+               ha->notify_dcbx_comp = 0;
+               ha->notify_lb_portup_comp = 0;
+       }
+done_reset_internal:
+       return rval;
+}
+
 /*
  * Set the port configuration to enable the internal or external loopback
  * depending on the loopback mode.
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
        }
 
        /* Wait for DCBX complete event */
-       if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+       if (!wait_for_completion_timeout(&ha->dcbx_comp,
+           (DCBX_COMP_TIMEOUT * HZ))) {
                ql_dbg(ql_dbg_user, vha, 0x7022,
-                   "State change notification not received.\n");
+                   "DCBX completion not received.\n");
+               ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+               /*
+                * If the reset of the loopback mode doesn't work take a FCoE
+                * dump and reset the chip.
+                */
+               if (ret) {
+                       ha->isp_ops->fw_dump(vha, 0);
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               }
                rval = -EINVAL;
        } else {
                if (ha->flags.idc_compl_status) {
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
                        ha->flags.idc_compl_status = 0;
                } else
                        ql_dbg(ql_dbg_user, vha, 0x7023,
-                           "State change received.\n");
+                           "DCBX completion received.\n");
        }
 
        ha->notify_dcbx_comp = 0;
@@ -587,57 +666,6 @@ done_set_internal:
        return rval;
 }
 
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
-    int wait)
-{
-       int ret = 0;
-       int rval = 0;
-       uint16_t new_config[4];
-       struct qla_hw_data *ha = vha->hw;
-
-       if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
-               goto done_reset_internal;
-
-       memset(new_config, 0 , sizeof(new_config));
-       if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
-           ENABLE_INTERNAL_LOOPBACK ||
-           (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
-           ENABLE_EXTERNAL_LOOPBACK) {
-               new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
-               ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
-                   (new_config[0] & INTERNAL_LOOPBACK_MASK));
-               memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
-               ha->notify_dcbx_comp = wait;
-               ret = qla81xx_set_port_config(vha, new_config);
-               if (ret != QLA_SUCCESS) {
-                       ql_log(ql_log_warn, vha, 0x7025,
-                           "Set port config failed.\n");
-                       ha->notify_dcbx_comp = 0;
-                       rval = -EINVAL;
-                       goto done_reset_internal;
-               }
-
-               /* Wait for DCBX complete event */
-               if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
-                       (20 * HZ))) {
-                       ql_dbg(ql_dbg_user, vha, 0x7026,
-                           "State change notification not received.\n");
-                       ha->notify_dcbx_comp = 0;
-                       rval = -EINVAL;
-                       goto done_reset_internal;
-               } else
-                       ql_dbg(ql_dbg_user, vha, 0x7027,
-                           "State change received.\n");
-
-               ha->notify_dcbx_comp = 0;
-       }
-done_reset_internal:
-       return rval;
-}
-
 static int
 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 {
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
                        memset(config, 0, sizeof(config));
                        memset(new_config, 0, sizeof(new_config));
+
                        if (qla81xx_get_port_config(vha, config)) {
                                ql_log(ql_log_warn, vha, 0x701f,
                                    "Get port config failed.\n");
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                goto done_free_dma_rsp;
                        }
 
+                       if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+                               ql_dbg(ql_dbg_user, vha, 0x70c4,
+                                   "Loopback operation already in "
+                                   "progress.\n");
+                               rval = -EAGAIN;
+                               goto done_free_dma_rsp;
+                       }
+
                        ql_dbg(ql_dbg_user, vha, 0x70c0,
                            "elreq.options=%04x\n", elreq.options);
 
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                            config, new_config, elreq.options);
                                else
                                        rval = qla81xx_reset_loopback_mode(vha,
-                                           config, 1);
+                                           config, 1, 0);
                        else
                                rval = qla81xx_set_loopback_mode(vha, config,
                                    new_config, elreq.options);
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                        command_sent = INT_DEF_LB_LOOPBACK_CMD;
                        rval = qla2x00_loopback_test(vha, &elreq, response);
 
-                       if (new_config[0]) {
-                               /* Revert back to original port config
-                                * Also clear internal loopback
-                                */
-                               qla81xx_reset_loopback_mode(vha,
-                                   new_config, 0);
-                       }
-
                        if (response[0] == MBS_COMMAND_ERROR &&
                                        response[1] == MBS_LB_RESET) {
                                ql_log(ql_log_warn, vha, 0x7029,
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                qla2xxx_wake_dpc(vha);
                                qla2x00_wait_for_chip_reset(vha);
                                /* Also reset the MPI */
-                               if (qla81xx_restart_mpi_firmware(vha) !=
-                                   QLA_SUCCESS) {
-                                       ql_log(ql_log_warn, vha, 0x702a,
-                                           "MPI reset failed.\n");
+                               if (IS_QLA81XX(ha)) {
+                                       if (qla81xx_restart_mpi_firmware(vha) !=
+                                           QLA_SUCCESS) {
+                                               ql_log(ql_log_warn, vha, 0x702a,
+                                                   "MPI reset failed.\n");
+                                       }
                                }
 
                                rval = -EIO;
                                goto done_free_dma_rsp;
                        }
+
+                       if (new_config[0]) {
+                               int ret;
+
+                               /* Revert back to original port config
+                                * Also clear internal loopback
+                                */
+                               ret = qla81xx_reset_loopback_mode(vha,
+                                   new_config, 0, 1);
+                               if (ret) {
+                                       /*
+                                        * If the reset of the loopback mode
+                                        * doesn't work take FCoE dump and then
+                                        * reset the chip.
+                                        */
+                                       ha->isp_ops->fw_dump(vha, 0);
+                                       set_bit(ISP_ABORT_NEEDED,
+                                           &vha->dpc_flags);
+                               }
+
+                       }
+
                } else {
                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
                        ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
                if (!req)
                        continue;
 
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
                                if (((sp->type == SRB_CT_CMD) ||
@@ -1985,6 +2038,6 @@ done:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        if (bsg_job->request->msgcode == FC_BSG_HST_CT)
                kfree(sp->fcport);
-       mempool_free(sp, ha->srb_mempool);
+       qla2x00_rel_sp(vha, sp);
        return 0;
 }
index 37b8b7b..e9f6b9b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 53f9e49..1626de5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0125       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x114f       | 0x111a-0x111b  |
+ * | Module Init and Probe        |       0x0126       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |
  * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3030       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x302d-0x302e  |
  * | DPC Thread                   |       0x401d       | 0x4002,0x4013  |
  * | Async Events                 |       0x5071       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
  * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x70c3       | 0x7018,0x702e, |
+ * | User Space Interactions      |       0x70c4       | 0x7018,0x702e, |
+ * |                              |                    | 0x7020,0x7024, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
  * |                              |                    | 0x708c,        |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb084       | 0xb002,0xb024  |
+ * | ISP82XX Specific             |       0xb086       | 0xb002,0xb024  |
  * | MultiQ                       |       0xc00c       |               |
  * | Misc                         |       0xd010       |               |
- * | Target Mode                 |       0xe06f       |                |
- * | Target Mode Management      |       0xf071       |                |
+ * | Target Mode                 |       0xe070       |                |
+ * | Target Mode Management      |       0xf072       |                |
  * | Target Mode Task Management  |      0x1000b      |                |
  * ----------------------------------------------------------------------
  */
index 8f911c0..35e20b4 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 6e7727f..c650991 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -37,6 +37,7 @@
 #include "qla_nx.h"
 #define QLA2XXX_DRIVER_NAME    "qla2xxx"
 #define QLA2XXX_APIDEV         "ql2xapidev"
+#define QLA2XXX_MANUFACTURER   "QLogic Corporation"
 
 /*
  * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
 #define LOOP_DOWN_TIME                 255     /* 240 */
 #define        LOOP_DOWN_RESET                 (LOOP_DOWN_TIME - 30)
 
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS       1024
+#define DEFAULT_OUTSTANDING_COMMANDS   1024
+#define MIN_OUTSTANDING_COMMANDS       128
 
 /* ISP request and response entry counts (37-65535) */
 #define REQUEST_ENTRY_CNT_2100         128     /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
        uint32_t req_q_out;
        uint32_t rsp_q_in;
        uint32_t rsp_q_out;
+       uint32_t atio_q_in;
+       uint32_t atio_q_out;
 };
 
 typedef union {
@@ -563,6 +566,9 @@ typedef union {
         &(reg)->u.isp2100.mailbox5 : \
         &(reg)->u.isp2300.rsp_q_out)
 
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
 #define MAILBOX_REG(ha, reg, num) \
        (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
         (num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
 #define MBC_PORT_LOGOUT                        0x56    /* Port Logout request */
 #define MBC_SEND_RNID_ELS              0x57    /* Send RNID ELS request */
 #define MBC_SET_RNID_PARAMS            0x59    /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS            0x5a    /* Data Rate */
-#define MBC_DATA_RATE                  0x5d    /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS            0x5a    /* Get RNID parameters */
+#define MBC_DATA_RATE                  0x5d    /* Data Rate */
 #define MBC_INITIALIZE_FIRMWARE                0x60    /* Initialize firmware */
 #define MBC_INITIATE_LIP               0x62    /* Initiate Loop */
                                                /* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
 #define MBC_HOST_MEMORY_COPY           0x53    /* Host Memory Copy. */
 #define MBC_SEND_RNFT_ELS              0x5e    /* Send RNFT ELS request */
 #define MBC_GET_LINK_PRIV_STATS                0x6d    /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION                0x72    /* Do link initialization. */
 #define MBC_SET_VENDOR_ID              0x76    /* Set Vendor ID. */
 #define MBC_PORT_RESET                 0x120   /* Port Reset */
 #define MBC_SET_PORT_CONFIG            0x122   /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
 #define        MBX_1           BIT_1
 #define        MBX_0           BIT_0
 
+#define RNID_TYPE_SET_VERSION  0x9
+#define RNID_TYPE_ASIC_TEMP    0xC
+
 /*
  * Firmware state codes from get firmware state mailbox command
  */
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
        uint8_t scan_state;
 } fc_port_t;
 
-#define QLA_FCPORT_SCAN_NONE   0
-#define QLA_FCPORT_SCAN_FOUND  1
-
 /*
  * Fibre channel port/lun states.
  */
@@ -2533,8 +2540,10 @@ struct req_que {
        uint16_t  qos;
        uint16_t  vp_idx;
        struct rsp_que *rsp;
-       srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+       srb_t **outstanding_cmds;
        uint32_t current_outstanding_cmd;
+       uint16_t num_outstanding_cmds;
+#define        MAX_Q_DEPTH             32
        int max_q_depth;
 };
 
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
        struct atio *atio_ring_ptr;     /* Current address. */
        uint16_t atio_ring_index; /* Current index. */
        uint16_t atio_q_length;
+       uint32_t __iomem *atio_q_in;
+       uint32_t __iomem *atio_q_out;
 
        void *target_lport_ptr;
        struct qla_tgt_func_tmpl *tgt_ops;
        struct qla_tgt *qla_tgt;
-       struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+       struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
        uint16_t current_handle;
 
        struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
                uint32_t        nic_core_hung:1;
 
                uint32_t        quiesce_owner:1;
-               uint32_t        thermal_supported:1;
                uint32_t        nic_core_reset_hdlr_active:1;
                uint32_t        nic_core_reset_owner:1;
                uint32_t        isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
 #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
 
        /* HBA serial number */
        uint8_t         serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
+       struct completion lb_portup_comp; /* Used to wait for link up during
+                                          * loopback */
+#define DCBX_COMP_TIMEOUT      20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
        int notify_dcbx_comp;
+       int notify_lb_portup_comp;
        struct mutex selflogin_lock;
 
        /* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
 #define RISC_START_ADDRESS_2300 0x800
 #define RISC_START_ADDRESS_2400 0x100000
        uint16_t        fw_xcb_count;
+       uint16_t        fw_iocb_count;
 
        uint16_t        fw_options[16];         /* slots: 1,2,3,10,11 */
        uint8_t         fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
        struct work_struct idc_state_handler;
        struct work_struct nic_core_unrecoverable;
 
+#define HOST_QUEUE_RAMPDOWN_INTERVAL           (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL             (30 * HZ)
+       unsigned long   host_last_rampdown_time;
+       unsigned long   host_last_rampup_time;
+       int             cfg_lun_q_depth;
+
        struct qlt_hw_data tgt;
+       uint16_t        thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
 };
 
 /*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
 #define MPI_RESET_NEEDED       19      /* Initiate MPI FW reset */
 #define ISP_QUIESCE_NEEDED     20      /* Driver need some quiescence */
 #define SCR_PENDING            21      /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH     22
+#define HOST_RAMP_UP_QUEUE_DEPTH       23
 
        uint32_t        device_flags;
 #define SWITCH_FOUND           BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
 
 #define NVRAM_DELAY()          udelay(10)
 
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
 /*
  * Flash support definitions
  */
index 706c4f7..792a292 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index be6d61a..1ac2b0e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -300,7 +300,8 @@ struct init_cb_24xx {
        uint32_t prio_request_q_address[2];
 
        uint16_t msix;
-       uint8_t reserved_2[6];
+       uint16_t msix_atio;
+       uint8_t reserved_2[4];
 
        uint16_t atio_q_inpointer;
        uint16_t atio_q_length;
@@ -1387,9 +1388,7 @@ struct qla_flt_header {
 #define FLT_REG_FCP_PRIO_0     0x87
 #define FLT_REG_FCP_PRIO_1     0x88
 #define FLT_REG_FCOE_FW                0xA4
-#define FLT_REG_FCOE_VPD_0     0xA9
 #define FLT_REG_FCOE_NVRAM_0   0xAA
-#define FLT_REG_FCOE_VPD_1     0xAB
 #define FLT_REG_FCOE_NVRAM_1   0xAC
 
 struct qla_flt_region {
index 2411d1a..eb3ca21 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
 
 extern void qla84xx_put_chip(struct scsi_qla_host *);
 
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
 extern void qla83xx_reset_ownership(scsi_qla_host_t *);
 extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
 
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
 extern int ql2xplogiabsentdevice;
 extern int ql2xloginretrycount;
 extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xiidmaenable;
@@ -277,6 +281,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
 extern int
 qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
 
+extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
 extern int
 qla2x00_lip_reset(scsi_qla_host_t *);
 
@@ -350,6 +357,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
 extern int
 qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
 
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
 extern int
 qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
        uint16_t, uint16_t, uint16_t, uint16_t);
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
     uint32_t);
 extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
     uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
 
 extern int qla2x00_beacon_on(struct scsi_qla_host *);
 extern int qla2x00_beacon_off(struct scsi_qla_host *);
index 01efc0e..9b45525 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
        /* Manufacturer. */
        eiter = (struct ct_fdmi_hba_attr *) (entries + size);
        eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
-       strcpy(eiter->a.manufacturer, "QLogic Corporation");
-       alen = strlen(eiter->a.manufacturer);
+       alen = strlen(QLA2XXX_MANUFACTURER);
+       strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
        alen += (alen & 3) ? (4 - (alen & 3)) : 4;
        eiter->len = cpu_to_be16(4 + alen);
        size += 4 + alen;
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
        /* OS device name. */
        eiter = (struct ct_fdmi_port_attr *) (entries + size);
        eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
-       strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
-       alen = strlen(eiter->a.os_dev_name);
+       alen = strlen(QLA2XXX_DRIVER_NAME);
+       strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
        alen += (alen & 3) ? (4 - (alen & 3)) : 4;
        eiter->len = cpu_to_be16(4 + alen);
        size += 4 + alen;
index 563eee3..edf4d14 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
        struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
 
        del_timer(&iocb->timer);
-       mempool_free(sp, vha->hw->srb_mempool);
-
-       QLA_VHA_MARK_NOT_BUSY(vha);
+       qla2x00_rel_sp(vha, sp);
 }
 
 /* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        vha->flags.reset_active = 0;
        ha->flags.pci_channel_io_perm_failure = 0;
        ha->flags.eeh_busy = 0;
-       ha->flags.thermal_supported = 1;
+       ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                qla24xx_read_fcp_prio_cfg(vha);
 
+       qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
        return (rval);
 }
 
@@ -1559,6 +1559,47 @@ done:
        return rval;
 }
 
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+       /* Don't try to reallocate the array */
+       if (req->outstanding_cmds)
+               return QLA_SUCCESS;
+
+       if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+           (ql2xmultique_tag || ql2xmaxqueues > 1)))
+               req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+       else {
+               if (ha->fw_xcb_count <= ha->fw_iocb_count)
+                       req->num_outstanding_cmds = ha->fw_xcb_count;
+               else
+                       req->num_outstanding_cmds = ha->fw_iocb_count;
+       }
+
+       req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+           req->num_outstanding_cmds, GFP_KERNEL);
+
+       if (!req->outstanding_cmds) {
+               /*
+                * Try to allocate a minimal size just so we can get through
+                * initialization.
+                */
+               req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+               req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+                   req->num_outstanding_cmds, GFP_KERNEL);
+
+               if (!req->outstanding_cmds) {
+                       ql_log(ql_log_fatal, NULL, 0x0126,
+                           "Failed to allocate memory for "
+                           "outstanding_cmds for req_que %p.\n", req);
+                       req->num_outstanding_cmds = 0;
+                       return QLA_FUNCTION_FAILED;
+               }
+       }
+
+       return QLA_SUCCESS;
+}
+
 /**
  * qla2x00_setup_chip() - Load and start RISC firmware.
  * @ha: HA context
@@ -1628,9 +1669,18 @@ enable_82xx_npiv:
                                                    MIN_MULTI_ID_FABRIC - 1;
                                }
                                qla2x00_get_resource_cnts(vha, NULL,
-                                   &ha->fw_xcb_count, NULL, NULL,
+                                   &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
                                    &ha->max_npiv_vports, NULL);
 
+                               /*
+                                * Allocate the array of outstanding commands
+                                * now that we know the firmware resources.
+                                */
+                               rval = qla2x00_alloc_outstanding_cmds(ha,
+                                   vha->req);
+                               if (rval != QLA_SUCCESS)
+                                       goto failed;
+
                                if (!fw_major_version && ql2xallocfwdump
                                    && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
                WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
                WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
        }
-       qlt_24xx_config_rings(vha, reg);
+       qlt_24xx_config_rings(vha);
 
        /* PCI posting */
        RD_REG_DWORD(&ioreg->hccr);
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                req = ha->req_q_map[que];
                if (!req)
                        continue;
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
                        req->outstanding_cmds[cnt] = NULL;
 
                req->current_outstanding_cmd = 1;
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        char            connect_type[22];
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
        /* Get host addresses. */
        rval = qla2x00_get_adapter_id(vha,
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
                } else {
                        ql_log(ql_log_warn, vha, 0x2009,
                            "Unable to get host loop ID.\n");
+                       if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+                           (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+                               ql_log(ql_log_warn, vha, 0x1151,
+                                   "Doing link init.\n");
+                               if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+                                       return rval;
+                       }
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                }
                return (rval);
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
-       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
 
        return fcport;
 }
@@ -3103,7 +3160,7 @@ static int
 qla2x00_configure_fabric(scsi_qla_host_t *vha)
 {
        int     rval;
-       fc_port_t       *fcport;
+       fc_port_t       *fcport, *fcptemp;
        uint16_t        next_loopid;
        uint16_t        mb[MAILBOX_REGISTER_COUNT];
        uint16_t        loop_id;
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                    0xfc, mb, BIT_1|BIT_0);
                if (rval != QLA_SUCCESS) {
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       break;
+                       return rval;
                }
                if (mb[0] != MBS_COMMAND_COMPLETE) {
                        ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        }
                }
 
+#define QLA_FCPORT_SCAN                1
+#define QLA_FCPORT_FOUND       2
+
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       fcport->scan_state = QLA_FCPORT_SCAN;
+               }
+
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
 
-               /* Add new ports to existing port list */
-               list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
-               /* Starting free loop ID. */
-               next_loopid = ha->min_external_loopid;
-
+               /*
+                * Logout all previous fabric devices marked lost, except
+                * FCP2 devices.
+                */
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       /* Logout lost/gone fabric devices (non-FCP2) */
-                       if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
                                    ql2xplogiabsentdevice, 0);
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                                            fcport->d_id.b.domain,
                                            fcport->d_id.b.area,
                                            fcport->d_id.b.al_pa);
+                                       fcport->loop_id = FC_NO_LOOP_ID;
                                }
-                               continue;
                        }
-                       fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
-                       /* Login fabric devices that need a login */
-                       if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
-                           atomic_read(&vha->loop_down_timer) == 0) {
-                               if (fcport->loop_id == FC_NO_LOOP_ID) {
-                                       fcport->loop_id = next_loopid;
-                                       rval = qla2x00_find_new_loop_id(
-                                           base_vha, fcport);
-                                       if (rval != QLA_SUCCESS) {
-                                               /* Ran out of IDs to use */
-                                               continue;
-                                       }
+               }
+
+               /* Starting free loop ID. */
+               next_loopid = ha->min_external_loopid;
+
+               /*
+                * Scan through our port list and login entries that need to be
+                * logged in.
+                */
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (atomic_read(&vha->loop_down_timer) ||
+                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                               break;
+
+                       if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+                           (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+                               continue;
+
+                       if (fcport->loop_id == FC_NO_LOOP_ID) {
+                               fcport->loop_id = next_loopid;
+                               rval = qla2x00_find_new_loop_id(
+                                   base_vha, fcport);
+                               if (rval != QLA_SUCCESS) {
+                                       /* Ran out of IDs to use */
+                                       break;
                                }
                        }
+                       /* Login and update database */
+                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+               }
+
+               /* Exit if out of loop IDs. */
+               if (rval != QLA_SUCCESS) {
+                       break;
+               }
+
+               /*
+                * Login and add the new devices to our port list.
+                */
+               list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+                       if (atomic_read(&vha->loop_down_timer) ||
+                           test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                               break;
+
+                       /* Find a new loop ID to use. */
+                       fcport->loop_id = next_loopid;
+                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
+                       if (rval != QLA_SUCCESS) {
+                               /* Ran out of IDs to use */
+                               break;
+                       }
 
                        /* Login and update database */
                        qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+                       list_move_tail(&fcport->list, &vha->vp_fcports);
                }
        } while (0);
 
+       /* Free all new device structures not processed. */
+       list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+               list_del(&fcport->list);
+               kfree(fcport);
+       }
+
        if (rval) {
                ql_dbg(ql_dbg_disc, vha, 0x2068,
                    "Configure fabric error exit rval=%d.\n", rval);
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
        int             first_dev, last_dev;
        port_id_t       wrap = {}, nxt_d_id;
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
-       struct scsi_qla_host *tvp;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
        rval = QLA_SUCCESS;
 
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        continue;
 
                /* Bypass virtual ports of the same host. */
-               found = 0;
-               if (ha->num_vhosts) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&ha->vport_slock, flags);
-                       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                               if (new_fcport->d_id.b24 == vp->d_id.b24) {
-                                       found = 1;
-                                       break;
-                               }
-                       }
-                       spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-                       if (found)
-                               continue;
-               }
+               if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+                       continue;
 
                /* Bypass if same domain and area of adapter. */
                if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                            WWN_SIZE))
                                continue;
 
-                       fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+                       fcport->scan_state = QLA_FCPORT_FOUND;
 
                        found++;
 
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
        return rval;
 }
 
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
 
 int
 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        if (IS_T10_PI_CAPABLE(ha))
                nv->frame_payload_size &= ~7;
 
+       qlt_81xx_config_nvram_stage1(vha, nv);
+
        /* Reset Initialization control block */
        memset(icb, 0, ha->init_cb_size);
 
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
            "QLE8XXX");
 
+       qlt_81xx_config_nvram_stage2(vha, icb);
+
        /* Use alternate WWN? */
        if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
                memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
index c0462c0..68e2c4a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -197,6 +197,13 @@ done:
        return sp;
 }
 
+static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+       mempool_free(sp, vha->hw->srb_mempool);
+       QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
 static inline void
 qla2x00_init_timer(srb_t *sp, unsigned long tmo)
 {
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
 {
        return sizeof(struct gid_list_info) * ha->max_fibre_devices;
 }
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+       if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+               return;
+
+       /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+       if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+           HOST_QUEUE_RAMPDOWN_INTERVAL)))
+               return;
+
+       /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+       if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+           HOST_QUEUE_RAMPUP_INTERVAL)))
+               return;
+
+       set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
index a481684..d263031 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Map the sg table so we have an accurate count of sg entries needed */
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
-       }
 
        /* Map the sg table so we have an accurate count of sg entries needed */
        if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
 
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x700b,
                    "No room on outstanding cmd array.\n");
                goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-               if (handle == MAX_OUTSTANDING_COMMANDS)
+               if (handle == req->num_outstanding_cmds)
                        handle = 1;
                if (!req->outstanding_cmds[handle])
                        break;
        }
-       if (index == MAX_OUTSTANDING_COMMANDS)
+       if (index == req->num_outstanding_cmds)
                goto queuing_error;
 
        /* Map the sg table so we have an accurate count of sg entries needed */
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
 
        /* Check for room in outstanding command list. */
        handle = req->current_outstanding_cmd;
-       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
                handle++;
-       if (handle == MAX_OUTSTANDING_COMMANDS)
+       if (handle == req->num_outstanding_cmds)
                handle = 1;
        if (!req->outstanding_cmds[handle])
                break;
        }
 
-       if (index == MAX_OUTSTANDING_COMMANDS) {
+       if (index == req->num_outstanding_cmds) {
                rval = EXT_STATUS_BUSY;
                goto queuing_error;
        }
index 873c820..e9dbd74 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -13,6 +13,8 @@
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
 
+#include "qla_target.h"
+
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_process_completed_request(struct scsi_qla_host *,
        struct req_que *, uint32_t);
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
        if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
                ql_log(ql_log_info, vha, 0x506a,
                    "IDC Device-State changed = 0x%x.\n", mb[4]);
+               if (ha->flags.nic_core_reset_owner)
+                       return;
                qla83xx_schedule_work(vha, MBA_IDC_AEN);
        }
 }
 
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+       struct qla_hw_data *ha = vha->hw;
+       scsi_qla_host_t *vp;
+       uint32_t vp_did;
+       unsigned long flags;
+       int ret = 0;
+
+       if (!ha->num_vhosts)
+               return ret;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               vp_did = vp->d_id.b24;
+               if (vp_did == rscn_entry) {
+                       ret = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return ret;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -899,6 +928,10 @@ skip_rio:
                /* Ignore reserved bits from RSCN-payload. */
                rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
 
+               /* Skip RSCNs for virtual ports on the same physical port */
+               if (qla2x00_is_a_vp_did(vha, rscn_entry))
+                       break;
+
                atomic_set(&vha->loop_down_timer, 0);
                vha->flags.management_server_logged_in = 0;
 
@@ -983,14 +1016,25 @@ skip_rio:
                    mb[1], mb[2], mb[3]);
                break;
        case MBA_IDC_NOTIFY:
-               /* See if we need to quiesce any I/O */
-               if (IS_QLA8031(vha->hw))
-                       if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
-                           (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+               if (IS_QLA8031(vha->hw)) {
+                       mb[4] = RD_REG_WORD(&reg24->mailbox4);
+                       if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+                           (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+                           (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
                                set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+                               /*
+                                * Extend loop down timer since port is active.
+                                */
+                               if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+                                       atomic_set(&vha->loop_down_timer,
+                                           LOOP_DOWN_TIME);
                                qla2xxx_wake_dpc(vha);
                        }
+               }
        case MBA_IDC_COMPLETE:
+               if (ha->notify_lb_portup_comp)
+                       complete(&ha->lb_portup_comp);
+               /* Fallthru */
        case MBA_IDC_TIME_EXT:
                if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
                        qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
        struct qla_hw_data *ha = vha->hw;
 
        /* Validate handle. */
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x3014,
                    "Invalid SCSI command index (%x).\n", index);
 
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
        uint16_t index;
 
        index = LSW(pkt->handle);
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x5031,
                    "Invalid command index (%x).\n", index);
                if (IS_QLA82XX(ha))
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
        sts24 = (struct sts_entry_24xx *) pkt;
 
        /* Validate handle. */
-       if (index >= MAX_OUTSTANDING_COMMANDS) {
+       if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x70af,
                    "Invalid SCSI completion handle 0x%x.\n", index);
                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        req = ha->req_q_map[que];
 
        /* Validate handle. */
-       if (handle < MAX_OUTSTANDING_COMMANDS) {
+       if (handle < req->num_outstanding_cmds)
                sp = req->outstanding_cmds[handle];
-       else
+       else
                sp = NULL;
 
        if (sp == NULL) {
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 
        /* Fast path completion. */
        if (comp_status == CS_COMPLETE && scsi_status == 0) {
+               qla2x00_do_host_ramp_up(vha);
                qla2x00_process_completed_request(vha, req, handle);
 
                return;
@@ -2193,6 +2238,9 @@ out:
                    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
                    resid_len, fw_resid_len);
 
+       if (!res)
+               qla2x00_do_host_ramp_up(vha);
+
        if (rsp->status_srb == NULL)
                sp->done(ha, sp, res);
 }
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
        { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
 };
 
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+       { "qla2xxx (default)", qla24xx_msix_default },
+       { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
 static void
 qla24xx_disable_msix(struct qla_hw_data *ha)
 {
@@ -2827,9 +2881,13 @@ msix_failed:
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < ha->msix_count; i++) {
                qentry = &ha->msix_entries[i];
-               if (IS_QLA82XX(ha)) {
+               if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+                       ret = request_irq(qentry->vector,
+                               qla83xx_msix_entries[i].handler,
+                               0, qla83xx_msix_entries[i].name, rsp);
+               } else if (IS_QLA82XX(ha)) {
                        ret = request_irq(qentry->vector,
                                qla82xx_msix_entries[i].handler,
                                0, qla82xx_msix_entries[i].name, rsp);
index 68c55ea..186dd59 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
            "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+       for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (handle == MAX_OUTSTANDING_COMMANDS) {
+       if (handle == req->num_outstanding_cmds) {
                /* command not found */
                return QLA_FUNCTION_FAILED;
        }
@@ -1632,6 +1632,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
        return rval;
 }
 
+/*
+ * qla24xx_link_initialization
+ *     Issue link initialization mailbox command.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     TARGET_QUEUE_LOCK must be released.
+ *     ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *     qla2x00 local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+           "Entered %s.\n", __func__);
+
+       if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       mcp->mb[0] = MBC_LINK_INITIALIZATION;
+       mcp->mb[1] = BIT_6|BIT_4;
+       mcp->mb[2] = 0;
+       mcp->mb[3] = 0;
+       mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 /*
  * qla2x00_lip_reset
  *     Issue LIP reset mailbox command.
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
            "Entered %s.\n", __func__);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+       for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (handle == MAX_OUTSTANDING_COMMANDS) {
+       if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
                return QLA_FUNCTION_FAILED;
        }
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *vp;
        unsigned long   flags;
+       int found;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
            "Entered %s.\n", __func__);
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        return;
                }
 
+               found = 0;
                spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry(vp, &ha->vp_list, list)
-                       if (vp_idx == vp->vp_idx)
+               list_for_each_entry(vp, &ha->vp_list, list) {
+                       if (vp_idx == vp->vp_idx) {
+                               found = 1;
                                break;
+                       }
+               }
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-               if (!vp)
+               if (!found)
                        return;
 
                vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3813,6 +3866,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
        return rval;
 }
 
+int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       int len;
+       uint16_t dwlen;
+       uint8_t *str;
+       dma_addr_t str_dma;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+           "Entered %s.\n", __func__);
+
+       str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+       if (!str) {
+               ql_log(ql_log_warn, vha, 0x1156,
+                   "Failed to allocate driver version param.\n");
+               return QLA_MEMORY_ALLOC_FAILED;
+       }
+
+       memcpy(str, "\x7\x3\x11\x0", 4);
+       dwlen = str[0];
+       len = dwlen * sizeof(uint32_t) - 4;
+       memset(str + 4, 0, len);
+       if (len > strlen(version))
+               len = strlen(version);
+       memcpy(str + 4, version, len);
+
+       mcp->mb[0] = MBC_SET_RNID_PARAMS;
+       mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+       mcp->mb[2] = MSW(LSD(str_dma));
+       mcp->mb[3] = LSW(LSD(str_dma));
+       mcp->mb[6] = MSW(MSD(str_dma));
+       mcp->mb[7] = LSW(MSD(str_dma));
+       mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1157,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+                   "Done %s.\n", __func__);
+       }
+
+       dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+       return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_FWI2_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_GET_RNID_PARAMS;
+       mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+       mcp->out_mb = MBX_1|MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+       *temp = mcp->mb[1];
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x115a,
+                   "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 int
 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
 }
 
 int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
 {
-       int rval;
-       uint8_t byte;
+       int rval = QLA_FUNCTION_FAILED;
        struct qla_hw_data *ha = vha->hw;
+       uint8_t byte;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
            "Entered %s.\n", __func__);
 
-       /* Integer part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
-               BIT_13|BIT_12|BIT_0);
-       if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
-               ha->flags.thermal_supported = 0;
-               goto fail;
+       if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+               rval = qla2x00_read_sfp(vha, 0, &byte,
+                   0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+               *temp = byte;
+               if (rval == QLA_SUCCESS)
+                       goto done;
+
+               ql_log(ql_log_warn, vha, 0x10c9,
+                   "Thermal not supported by I2C.\n");
+               ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
        }
-       *temp = byte;
 
-       /* Fraction part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
-               BIT_13|BIT_12|BIT_0);
-       if (rval != QLA_SUCCESS) {
-               ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
-               ha->flags.thermal_supported = 0;
-               goto fail;
+       if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+               rval = qla2x00_read_asic_temperature(vha, temp);
+               if (rval == QLA_SUCCESS)
+                       goto done;
+
+               ql_log(ql_log_warn, vha, 0x1019,
+                   "Thermal not supported by ISP.\n");
+               ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
        }
-       *frac = (byte >> 6) * 25;
 
+       ql_log(ql_log_warn, vha, 0x1150,
+           "Thermal not supported by this card "
+           "(ignoring further requests).\n");
+       return  rval;
+
+done:
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
            "Done %s.\n", __func__);
-fail:
        return rval;
 }
 
index 20fd974..f868a9f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
                clear_bit(que_id, ha->req_qid_map);
                mutex_unlock(&ha->vport_lock);
        }
+       kfree(req->outstanding_cmds);
        kfree(req);
        req = NULL;
 }
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
                goto que_failed;
        }
 
+       ret = qla2x00_alloc_outstanding_cmds(ha, req);
+       if (ret != QLA_SUCCESS)
+               goto que_failed;
+
        mutex_lock(&ha->vport_lock);
        que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
        if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
            "options=0x%x.\n", req->options);
        ql_dbg(ql_dbg_init, base_vha, 0x00dd,
            "options=0x%x.\n", req->options);
-       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+       for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
                req->outstanding_cmds[cnt] = NULL;
        req->current_outstanding_cmd = 1;
 
index 3e3f593..10754f5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -847,14 +847,21 @@ static int
 qla82xx_rom_lock(struct qla_hw_data *ha)
 {
        int done = 0, timeout = 0;
+       uint32_t lock_owner = 0;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        while (!done) {
                /* acquire semaphore2 from PCI HW block */
                done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
                if (done == 1)
                        break;
-               if (timeout >= qla82xx_rom_lock_timeout)
+               if (timeout >= qla82xx_rom_lock_timeout) {
+                       lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+                       ql_dbg(ql_dbg_p3p, vha, 0xb085,
+                           "Failed to acquire rom lock, acquired by %d.\n",
+                           lock_owner);
                        return -1;
+               }
                timeout++;
        }
        qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        req = ha->req_q_map[que];
                        if (!req)
                                continue;
-                       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+                       for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                                sp = req->outstanding_cmds[cnt];
                                if (sp) {
                                        if (!sp->u.scmd.ctx ||
index 6c953e8..d268e84 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -897,7 +897,7 @@ struct ct6_dsd {
 #define FLT_REG_BOOT_CODE_82XX 0x78
 #define FLT_REG_FW_82XX                0x74
 #define FLT_REG_GOLD_FW_82XX   0x75
-#define FLT_REG_VPD_82XX       0x81
+#define FLT_REG_VPD_8XXX       0x81
 
 #define        FA_VPD_SIZE_82XX        0x400
 
index 10d23f8..2c6dd3d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
                "Enables FDMI registrations. "
                "0 - no FDMI. Default is 1 - perform FDMI.");
 
-#define MAX_Q_DEPTH    32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
                "Maximum queue depth to set for each LUN. "
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
                (req->length + 1) * sizeof(request_t),
                req->ring, req->dma);
 
+       if (req)
+               kfree(req->outstanding_cmds);
+
        kfree(req);
        req = NULL;
 }
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        }
 
        CMD_SP(cmd) = NULL;
-       mempool_free(sp, ha->srb_mempool);
+       qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
 static void
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto qc24_target_busy;
        }
 
-       sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
-       if (!sp)
+       sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+       if (!sp) {
+               set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
                goto qc24_host_busy;
+       }
 
        sp->u.scmd.cmd = cmd;
        sp->type = SRB_SCSI_CMD;
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
                    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+               set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
                goto qc24_host_busy_free_sp;
        }
 
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
        spin_lock_irqsave(&ha->hardware_lock, flags);
        req = vha->req;
        for (cnt = 1; status == QLA_SUCCESS &&
-               cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               cnt < req->num_outstanding_cmds; cnt++) {
                sp = req->outstanding_cmds[cnt];
                if (!sp)
                        continue;
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
        }
 
        if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+               atomic_set(&vha->loop_state, LOOP_DOWN);
+               atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+               qla2x00_mark_all_devices_lost(vha, 0);
                ret = qla2x00_full_login_lip(vha);
                if (ret != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_taskm, vha, 0x802d,
                            "full_login_lip=%d.\n", ret);
                }
-               atomic_set(&vha->loop_state, LOOP_DOWN);
-               atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-               qla2x00_mark_all_devices_lost(vha, 0);
        }
 
        if (ha->flags.enable_lip_reset) {
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                req = ha->req_q_map[que];
                if (!req)
                        continue;
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               if (!req->outstanding_cmds)
+                       continue;
+               for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
                                req->outstanding_cmds[cnt] = NULL;
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
        return tag_type;
 }
 
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+       scsi_qla_host_t *vp;
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ha->host_last_rampdown_time = jiffies;
+
+       if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+               return;
+
+       if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+               ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+       else
+               ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+       /*
+        * Geometrically ramp down the queue depth for all devices on this
+        * adapter
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               shost = vp->host;
+               shost_for_each_device(sdev, shost) {
+                       if (sdev->queue_depth > shost->cmd_per_lun) {
+                               if (sdev->queue_depth < ha->cfg_lun_q_depth)
+                                       continue;
+                               ql_log(ql_log_warn, vp, 0x3031,
+                                   "%ld:%d:%d: Ramping down queue depth to %d",
+                                   vp->host_no, sdev->id, sdev->lun,
+                                   ha->cfg_lun_q_depth);
+                               qla2x00_change_queue_depth(sdev,
+                                   ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+       scsi_qla_host_t *vp;
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ha->host_last_rampup_time = jiffies;
+       ha->cfg_lun_q_depth++;
+
+       /*
+        * Linearly ramp up the queue depth for all devices on this
+        * adapter
+        */
+       spin_lock_irqsave(&ha->vport_slock, flags);
+       list_for_each_entry(vp, &ha->vp_list, list) {
+               shost = vp->host;
+               shost_for_each_device(sdev, shost) {
+                       if (sdev->queue_depth > ha->cfg_lun_q_depth)
+                               continue;
+                       qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+                           SCSI_QDEPTH_RAMP_UP);
+               }
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       return;
+}
+
 /**
  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
  * @ha: HA context
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
 
 mqiobase_exit:
        ha->msix_count = ha->max_rsp_queues + 1;
+
+       qlt_83xx_iospace_config(ha);
+
        ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
            "MSIX Count:%d.\n", ha->msix_count);
        return 0;
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ha->init_cb_size = sizeof(init_cb_t);
        ha->link_data_rate = PORT_SPEED_UNKNOWN;
        ha->optrom_size = OPTROM_SIZE_2300;
+       ha->cfg_lun_q_depth = ql2xmaxqdepth;
 
        /* Assign ISP specific operations. */
        if (IS_QLA2100(ha)) {
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        complete(&ha->mbx_cmd_comp);
        init_completion(&ha->mbx_intr_comp);
        init_completion(&ha->dcbx_comp);
+       init_completion(&ha->lb_portup_comp);
 
        set_bit(0, (unsigned long *) ha->vp_idx_map);
 
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
        scsi_qla_host_t *vha;
        struct qla_hw_data  *ha;
 
+       if (!atomic_read(&pdev->enable_cnt))
+               return;
+
        vha = pci_get_drvdata(pdev);
        ha = vha->hw;
 
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
        uint32_t idc_lck_rcvry_stage_mask = 0x3;
        uint32_t idc_lck_rcvry_owner_mask = 0x3c;
        struct qla_hw_data *ha = base_vha->hw;
+       ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+           "Trying force recovery of the IDC lock.\n");
 
        rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
        if (rval)
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
 {
        uint16_t options = (requester_id << 15) | BIT_6;
        uint32_t data;
+       uint32_t lock_owner;
        struct qla_hw_data *ha = base_vha->hw;
 
        /* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4076,8 +4171,11 @@ retry_lock:
                        qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
                            ha->portnum);
                } else {
+                       qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+                           &lock_owner);
                        ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
-                           "Failed to acquire IDC lock. retrying...\n");
+                           "Failed to acquire IDC lock, acquired by %d, "
+                           "retrying...\n", lock_owner);
 
                        /* Retry/Perform IDC-Lock recovery */
                        if (qla83xx_idc_lock_recovery(base_vha)
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data)
                        qla2xxx_flash_npiv_conf(base_vha);
                }
 
+               if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+                   &base_vha->dpc_flags)) {
+                       /* Prevents simultaneous ramp up and down */
+                       clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+                           &base_vha->dpc_flags);
+                       qla2x00_host_ramp_down_queuedepth(base_vha);
+               }
+
+               if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+                   &base_vha->dpc_flags))
+                       qla2x00_host_ramp_up_queuedepth(base_vha);
+
                if (!ha->interrupts_on)
                        ha->isp_ops->enable_intrs(ha);
 
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
                                    cpu_flags);
                                req = ha->req_q_map[0];
                                for (index = 1;
-                                   index < MAX_OUTSTANDING_COMMANDS;
+                                   index < req->num_outstanding_cmds;
                                    index++) {
                                        fc_port_t *sfcp;
 
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
            test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
            test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
            test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
-           test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+           test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+           test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+           test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
                ql_dbg(ql_dbg_timer, vha, 0x600b,
                    "isp_abort_needed=%d loop_resync_needed=%d "
                    "fcport_update_needed=%d start_dpc=%d "
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_timer, vha, 0x600c,
                    "beacon_blink_needed=%d isp_unrecoverable=%d "
                    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
-                   "relogin_needed=%d.\n",
+                   "relogin_needed=%d, host_ramp_down_needed=%d "
+                   "host_ramp_up_needed=%d.\n",
                    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
                    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
                    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
                    test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
-                   test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+                   test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+                   test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+                   test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
                qla2xxx_wake_dpc(vha);
        }
 
index 892a81e..46ef0ac 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 32fdc2a..3bef673 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                case FLT_REG_BOOTLOAD_82XX:
                        ha->flt_region_bootload = start;
                        break;
-               case FLT_REG_VPD_82XX:
-                       ha->flt_region_vpd = start;
-                       break;
-               case FLT_REG_FCOE_VPD_0:
-                       if (!IS_QLA8031(ha))
-                               break;
-                       ha->flt_region_vpd_nvram = start;
-                       if (ha->flags.port0)
-                               ha->flt_region_vpd = start;
-                       break;
-               case FLT_REG_FCOE_VPD_1:
-                       if (!IS_QLA8031(ha))
-                               break;
-                       if (!ha->flags.port0)
+               case FLT_REG_VPD_8XXX:
+                       if (IS_CNA_CAPABLE(ha))
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_FCOE_NVRAM_0:
index 80f4b84..61b5d8c 100644 (file)
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
        "\"disabled\" - initiator mode will never be enabled; "
        "\"enabled\" (default) - initiator mode will always stay enabled.");
 
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
 
 /*
  * From scsi/fc/fc_fcp.h
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
        nack->u.isp24.status = ntfy->u.isp24.status;
        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
        /* always increment cmd handle */
        do {
                ++h;
-               if (h > MAX_OUTSTANDING_COMMANDS)
+               if (h > DEFAULT_OUTSTANDING_COMMANDS)
                        h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
                if (h == ha->tgt.current_handle) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
                        return NULL;
                }
                /* handle-1 is actually used */
-               if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+               if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe052,
                            "qla_target(%d): Wrong handle %x received\n",
                            vha->vp_idx, handle);
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
        if (!QLA_TGT_MODE_ENABLED())
                return 0;
 
+       if (!IS_TGT_MODE_CAPABLE(ha)) {
+               ql_log(ql_log_warn, base_vha, 0xe070,
+                   "This adapter does not support target mode.\n");
+               return 0;
+       }
+
        ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
            "Registering target for host %ld(%p)", base_vha->host_no, ha);
 
@@ -4666,7 +4673,6 @@ void
 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
        struct atio_from_isp *pkt;
        int cnt, i;
 
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
        }
 
        /* Adjust ring index */
-       WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+       WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
 }
 
 void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
 
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
-       if (ha->mqenable) {
-#if 0
-               WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
-               WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
-               RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
-       } else {
-               /* Setup APTIO registers for target mode */
-               WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
-               WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
-               RD_REG_DWORD(&reg->isp24.atio_q_out);
+       WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+       WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+       RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+       if (IS_ATIO_MSIX_CAPABLE(ha)) {
+               struct qla_msix_entry *msix = &ha->msix_entries[2];
+               struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+               icb->msix_atio = cpu_to_le16(msix->entry);
+               ql_dbg(ql_dbg_init, vha, 0xf072,
+                   "Registering ICB vector 0x%x for atio que.\n",
+                   msix->entry);
        }
 }
 
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
        }
 }
 
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (qla_tgt_mode_enabled(vha)) {
+               if (!ha->tgt.saved_set) {
+                       /* We save only once */
+                       ha->tgt.saved_exchange_count = nv->exchange_count;
+                       ha->tgt.saved_firmware_options_1 =
+                           nv->firmware_options_1;
+                       ha->tgt.saved_firmware_options_2 =
+                           nv->firmware_options_2;
+                       ha->tgt.saved_firmware_options_3 =
+                           nv->firmware_options_3;
+                       ha->tgt.saved_set = 1;
+               }
+
+               nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+               /* Enable target mode */
+               nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+               /* Disable ini mode, if requested */
+               if (!qla_ini_mode_enabled(vha))
+                       nv->firmware_options_1 |=
+                           __constant_cpu_to_le32(BIT_5);
+
+               /* Disable Full Login after LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+               /* Enable initial LIP */
+               nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+               /* Enable FC tapes support */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+               /* Disable Full Login after LIP */
+               nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+               /* Enable target PRLI control */
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+       } else {
+               if (ha->tgt.saved_set) {
+                       nv->exchange_count = ha->tgt.saved_exchange_count;
+                       nv->firmware_options_1 =
+                           ha->tgt.saved_firmware_options_1;
+                       nv->firmware_options_2 =
+                           ha->tgt.saved_firmware_options_2;
+                       nv->firmware_options_3 =
+                           ha->tgt.saved_firmware_options_3;
+               }
+               return;
+       }
+
+       /* out-of-order frames reassembly */
+       nv->firmware_options_3 |= BIT_6|BIT_9;
+
+       if (ha->tgt.enable_class_2) {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) =
+                               FC_COS_CLASS2 | FC_COS_CLASS3;
+
+               nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+       } else {
+               if (vha->flags.init_done)
+                       fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+               nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+       }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+       struct init_cb_81xx *icb)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       if (ha->tgt.node_name_set) {
+               memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+               icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+       }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+       if (!QLA_TGT_MODE_ENABLED())
+               return;
+
+       ha->msix_count += 1; /* For ATIO Q */
+}
+
 int
 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
        struct sts_entry_24xx *pkt)
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
+       if  (ha->mqenable || IS_QLA83XX(ha)) {
+               ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+               ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+       } else {
+               ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+               ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+       }
+
        mutex_init(&ha->tgt.tgt_mutex);
        mutex_init(&ha->tgt.tgt_host_action_mutex);
        qlt_clear_mode(base_vha);
 }
 
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+       struct rsp_que *rsp;
+       scsi_qla_host_t *vha;
+       struct qla_hw_data *ha;
+       unsigned long flags;
+
+       rsp = (struct rsp_que *) dev_id;
+       ha = rsp->hw;
+       vha = pci_get_drvdata(ha->pdev);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       qlt_24xx_process_atio_queue(vha);
+       qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
 int
 qlt_mem_alloc(struct qla_hw_data *ha)
 {
index bad7495..ff9ccb9 100644 (file)
@@ -60,8 +60,9 @@
  * multi-complete should come to the tgt driver or be handled there by qla2xxx
  */
 #define CTIO_COMPLETION_HANDLE_MARK    BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+       "DEFAULT_OUTSTANDING_COMMANDS"
 #endif
 #define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
 
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
                        uint16_t srr_rx_id;
                        uint16_t status;
                        uint8_t  status_subcode;
-                       uint8_t  reserved_3;
+                       uint8_t  fw_handle;
                        uint32_t exchange_address;
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
                        uint16_t srr_rx_id;
                        uint16_t status;
                        uint8_t  status_subcode;
-                       uint8_t  reserved_3;
+                       uint8_t  fw_handle;
                        uint32_t exchange_address;
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
  * is not set. Right now, ha value is ignored.
  */
 #define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
 
 static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
 {
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
 extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
 extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
 extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
-       device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
 extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
        struct nvram_24xx *);
 extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
        struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+       struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+       struct nvram_81xx *);
 extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
        struct sts_entry_24xx *);
 extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
 extern void qlt_mem_free(struct qla_hw_data *);
 extern void qlt_stop_phase1(struct qla_tgt *);
 extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
 
 #endif /* __QLA_TARGET_H */
index 49697ca..2b6e478 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2013 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 6e9af20..5d8fe4f 100644 (file)
@@ -538,7 +538,7 @@ struct device_info {
        int port_num;
 };
 
-static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
 {
        uint32_t drv_active;
        uint32_t dev_part, dev_part1, dev_part2;
@@ -1351,31 +1351,58 @@ exit_start_fw:
 
 /*----------------------Interrupt Related functions ---------------------*/
 
-void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+       if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+               qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
 {
        uint32_t mb_int, ret;
 
-       if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
-               qla4_8xxx_mbx_intr_disable(ha);
+       if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+               ret = readl(&ha->qla4_83xx_reg->mbox_int);
+               mb_int = ret & ~INT_ENABLE_FW_MB;
+               writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+               writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+       }
+}
 
-       ret = readl(&ha->qla4_83xx_reg->mbox_int);
-       mb_int = ret & ~INT_ENABLE_FW_MB;
-       writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
-       writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+       qla4_83xx_disable_mbox_intrs(ha);
+       qla4_83xx_disable_iocb_intrs(ha);
 }
 
-void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+       if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+               qla4_8xxx_intr_enable(ha);
+               set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+       }
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
 {
        uint32_t mb_int;
 
-       qla4_8xxx_mbx_intr_enable(ha);
-       mb_int = INT_ENABLE_FW_MB;
-       writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
-       writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+       if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+               mb_int = INT_ENABLE_FW_MB;
+               writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+               writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+               set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+       }
+}
 
-       set_bit(AF_INTERRUPTS_ON, &ha->flags);
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+       qla4_83xx_enable_mbox_intrs(ha);
+       qla4_83xx_enable_iocb_intrs(ha);
 }
 
+
 void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
                              int incount)
 {
index 76819b7..19ee55a 100644 (file)
@@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
                }
                break;
        case 2:
-               /* Reset HBA */
+               /* Reset HBA and collect FW dump */
                ha->isp_ops->idc_lock(ha);
                dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
                if (dev_state == QLA8XXX_DEV_READY) {
-                       ql4_printk(KERN_INFO, ha,
-                                  "%s: Setting Need reset, reset_owner is 0x%x.\n",
-                                  __func__, ha->func_num);
+                       ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+                                  __func__);
                        qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
                                            QLA8XXX_DEV_NEED_RESET);
-                       set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+                       if (is_qla8022(ha) ||
+                           (is_qla8032(ha) &&
+                            qla4_83xx_can_perform_reset(ha))) {
+                               set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+                               set_bit(AF_FW_RECOVERY, &ha->flags);
+                               ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+                                          __func__, ha->func_num);
+                       }
                } else
                        ql4_printk(KERN_INFO, ha,
                                   "%s: Reset not performed as device state is 0x%x\n",
index 329d553..129f5dd 100644 (file)
 #define RESPONSE_QUEUE_DEPTH           64
 #define QUEUE_SIZE                     64
 #define DMA_BUFFER_SIZE                        512
+#define IOCB_HIWAT_CUSHION             4
 
 /*
  * Misc
 #define DISABLE_ACB_TOV                        30
 #define IP_CONFIG_TOV                  30
 #define LOGIN_TOV                      12
+#define BOOT_LOGIN_RESP_TOV            60
 
 #define MAX_RESET_HA_RETRIES           2
 #define FW_ALIVE_WAIT_TOV              3
@@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
  * DDB flags.
  */
 #define DF_RELOGIN             0       /* Relogin to device */
+#define DF_BOOT_TGT            1       /* Boot target entry */
 #define DF_ISNS_DISCOVERED     2       /* Device was discovered via iSNS */
 #define DF_FO_MASKED           3
 
@@ -501,6 +504,7 @@ struct scsi_qla_host {
 #define AF_INTERRUPTS_ON               6 /* 0x00000040 */
 #define AF_GET_CRASH_RECORD            7 /* 0x00000080 */
 #define AF_LINK_UP                     8 /* 0x00000100 */
+#define AF_LOOPBACK                    9 /* 0x00000200 */
 #define AF_IRQ_ATTACHED                        10 /* 0x00000400 */
 #define AF_DISABLE_ACB_COMPLETE                11 /* 0x00000800 */
 #define AF_HA_REMOVAL                  12 /* 0x00001000 */
@@ -516,6 +520,8 @@ struct scsi_qla_host {
 #define AF_8XXX_RST_OWNER              25 /* 0x02000000 */
 #define AF_82XX_DUMP_READING           26 /* 0x04000000 */
 #define AF_83XX_NO_FW_DUMP             27 /* 0x08000000 */
+#define AF_83XX_IOCB_INTR_ON           28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON           29 /* 0x20000000 */
 
        unsigned long dpc_flags;
 
@@ -537,6 +543,7 @@ struct scsi_qla_host {
        uint32_t tot_ddbs;
 
        uint16_t iocb_cnt;
+       uint16_t iocb_hiwat;
 
        /* SRB cache. */
 #define SRB_MIN_REQ    128
@@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
 static inline int adapter_up(struct scsi_qla_host *ha)
 {
        return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
-               (test_bit(AF_LINK_UP, &ha->flags) != 0);
+              (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+              (!test_bit(AF_LOOPBACK, &ha->flags));
 }
 
 static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
index 1c47950..ad9d2e2 100644 (file)
@@ -495,7 +495,7 @@ struct qla_flt_region {
 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED      0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD                0x802E
 #define MBOX_ASTS_IDC_COMPLETE                 0x8100
-#define MBOX_ASTS_IDC_NOTIFY                   0x8101
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION     0x8101
 #define MBOX_ASTS_TXSCVR_INSERTED              0x8130
 #define MBOX_ASTS_TXSCVR_REMOVED               0x8131
 
@@ -522,6 +522,10 @@ struct qla_flt_region {
 #define FLASH_OPT_COMMIT       2
 #define FLASH_OPT_RMW_COMMIT   3
 
+/* Loopback type */
+#define ENABLE_INTERNAL_LOOPBACK       0x04
+#define ENABLE_EXTERNAL_LOOPBACK       0x08
+
 /*************************************************************************/
 
 /* Host Adapter Initialization Control Block (from host) */
index 57a5a3c..982293e 100644 (file)
@@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
 void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
 int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
 void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
 int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
 int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
 int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
 void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
index 1aca1b4..8fc8548 100644 (file)
@@ -195,12 +195,10 @@ exit_get_sys_info_no_free:
  * @ha: pointer to host adapter structure.
  *
  **/
-static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
 {
        /* Initialize aen queue */
        ha->aen_q_count = MAX_AEN_ENTRIES;
-
-       return qla4xxx_get_firmware_status(ha);
 }
 
 static uint8_t
@@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
        if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
                goto exit_init_hba;
 
+       /*
+        * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+        * Mailbox interrupts must be enabled prior to issuing any mailbox
+        * command in order to prevent the possibility of losing interrupts
+        * while switching from polling to interrupt mode. IOCB interrupts are
+        * enabled via isp_ops->enable_intrs.
+        */
+       if (is_qla8032(ha))
+               qla4_83xx_enable_mbox_intrs(ha);
+
        if (qla4xxx_about_firmware(ha) == QLA_ERROR)
                goto exit_init_hba;
 
        if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
                goto exit_init_hba;
 
-       if (qla4xxx_init_local_data(ha) == QLA_ERROR)
-               goto exit_init_hba;
+       qla4xxx_init_local_data(ha);
 
        status = qla4xxx_init_firmware(ha);
        if (status == QLA_ERROR)
index f48f37a..14fec97 100644 (file)
@@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
                goto queuing_error;
 
        /* total iocbs active */
-       if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
+       if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
                goto queuing_error;
 
        /* Build command packet */
index 15ea814..1b83dc2 100644 (file)
@@ -581,6 +581,33 @@ exit_prq_error:
        set_bit(DPC_RESET_HA, &ha->dpc_flags);
 }
 
+/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+       int rval = 1;
+
+       if (is_qla8032(ha)) {
+               if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+                   (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "%s: Loopback diagnostics in progress\n",
+                                         __func__));
+                       rval = 1;
+               } else {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "%s: Loopback diagnostics not in progress\n",
+                                         __func__));
+                       rval = 0;
+               }
+       }
+
+       return rval;
+}
+
 /**
  * qla4xxx_isr_decode_mailbox - decodes mailbox status
  * @ha: Pointer to host adapter structure.
@@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 
                case MBOX_ASTS_LINK_DOWN:
                        clear_bit(AF_LINK_UP, &ha->flags);
-                       if (test_bit(AF_INIT_DONE, &ha->flags))
+                       if (test_bit(AF_INIT_DONE, &ha->flags)) {
                                set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+                               qla4xxx_wake_dpc(ha);
+                       }
 
                        ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
                        qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
@@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                            " removed\n",  ha->host_no, mbox_sts[0]));
                        break;
 
-               case MBOX_ASTS_IDC_NOTIFY:
+               case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
                {
                        uint32_t opcode;
                        if (is_qla8032(ha)) {
@@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                                DEBUG2(ql4_printk(KERN_INFO, ha,
                                                  "scsi:%ld: AEN %04x IDC Complete notification\n",
                                                  ha->host_no, mbox_sts[0]));
+
+                               if (qla4_83xx_loopback_in_progress(ha))
+                                       set_bit(AF_LOOPBACK, &ha->flags);
+                               else
+                                       clear_bit(AF_LOOPBACK, &ha->flags);
                        }
                        break;
 
@@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
 
        /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
        if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
-               ql4_printk(KERN_ERR, ha,
-                          "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
-                          __func__);
+               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+                                 __func__));
                return IRQ_NONE;
        }
 
        /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
        if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
-               ql4_printk(KERN_ERR, ha,
-                          "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
-                          __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+                                 __func__, (leg_int_ptr & PF_BITS_MASK),
+                                 ha->pf_bit));
                return IRQ_NONE;
        }
 
@@ -1437,11 +1472,14 @@ irq_not_attached:
 
 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
 {
-       if (test_bit(AF_MSIX_ENABLED, &ha->flags))
-               qla4_8xxx_disable_msix(ha);
-       else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
-               free_irq(ha->pdev->irq, ha);
-               pci_disable_msi(ha->pdev);
-       } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
-               free_irq(ha->pdev->irq, ha);
+       if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+               if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
+                       qla4_8xxx_disable_msix(ha);
+               } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
+                       free_irq(ha->pdev->irq, ha);
+                       pci_disable_msi(ha->pdev);
+               } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
+                       free_irq(ha->pdev->irq, ha);
+               }
+       }
 }
index 3d41034..160d336 100644 (file)
@@ -43,6 +43,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
        }
 }
 
+/**
+ * qla4xxx_is_intr_poll_mode â€“ Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+       int rval = 1;
+
+       if (is_qla8032(ha)) {
+               if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+                   test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+                       rval = 0;
+       } else {
+               if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+                   test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+                   test_bit(AF_ONLINE, &ha->flags) &&
+                   !test_bit(AF_HA_REMOVAL, &ha->flags))
+                       rval = 0;
+       }
+
+       return rval;
+}
+
 /**
  * qla4xxx_mailbox_command - issues mailbox commands
  * @ha: Pointer to host adapter structure.
@@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
        /*
         * Wait for completion: Poll or completion queue
         */
-       if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
-           test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
-           test_bit(AF_ONLINE, &ha->flags) &&
-           !test_bit(AF_HA_REMOVAL, &ha->flags)) {
-               /* Do not poll for completion. Use completion queue */
-               set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
-               wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
-               clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
-       } else {
+       if (qla4xxx_is_intr_poll_mode(ha)) {
                /* Poll for command to complete */
                wait_count = jiffies + MBOX_TOV * HZ;
                while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
                        if (time_after_eq(jiffies, wait_count))
                                break;
-
                        /*
                         * Service the interrupt.
                         * The ISR will save the mailbox status registers
                         * to a temporary storage location in the adapter
                         * structure.
                         */
-
                        spin_lock_irqsave(&ha->hardware_lock, flags);
                        ha->isp_ops->process_mailbox_interrupt(ha, outCount);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        msleep(10);
                }
+       } else {
+               /* Do not poll for completion. Use completion queue */
+               set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+               wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+               clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
        }
 
        /* Check for mailbox timeout. */
@@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
                return QLA_ERROR;
        }
 
-       ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
-           ha->host_no, mbox_sts[2]);
+       /* High-water mark of IOCBs */
+       ha->iocb_hiwat = mbox_sts[2];
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: firmware IOCBs available = %d\n", __func__,
+                         ha->iocb_hiwat));
+
+       if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+               ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+       /* Ideally, we should not enter this code, as the # of firmware
+        * IOCBs is hard-coded in the firmware. We set a default
+        * iocb_hiwat here just in case */
+       if (ha->iocb_hiwat == 0) {
+               ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "%s: Setting IOCB's to = %d\n", __func__,
+                                 ha->iocb_hiwat));
+       }
 
        return QLA_SUCCESS;
 }
@@ -1385,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
        dma_addr_t chap_dma;
 
        chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
-       if (chap_table == NULL) {
-               ret = -ENOMEM;
-               goto exit_get_chap;
-       }
+       if (chap_table == NULL)
+               return -ENOMEM;
 
        chap_size = sizeof(struct ql4_chap_table);
        memset(chap_table, 0, chap_size);
index 499a92d..71d3d23 100644 (file)
@@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
 
        retval = qla4_8xxx_device_state_handler(ha);
 
-       if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
+       if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
                retval = qla4xxx_request_irqs(ha);
 
        return retval;
@@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
        }
 
        /* Make sure we receive the minimum required data to cache internally */
-       if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
+       if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+           offsetof(struct mbx_sys_info, reserved)) {
                DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
                    " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
                goto exit_validate_mac82;
-
        }
 
        /* Save M.A.C. address & serial_number */
@@ -3463,7 +3463,7 @@ exit_validate_mac82:
 
 /* Interrupt handling helpers. */
 
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
 {
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
        return QLA_SUCCESS;
 }
 
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
 {
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
 void
 qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
 {
-       qla4_8xxx_mbx_intr_enable(ha);
+       qla4_8xxx_intr_enable(ha);
 
        spin_lock_irq(&ha->hardware_lock);
        /* BIT 10 - reset */
@@ -3522,7 +3522,7 @@ void
 qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
 {
        if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
-               qla4_8xxx_mbx_intr_disable(ha);
+               qla4_8xxx_intr_disable(ha);
 
        spin_lock_irq(&ha->hardware_lock);
        /* BIT 10 - set */
index 4cec123..6142729 100644 (file)
@@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
                                              sess->password_in, BIDI_CHAP,
                                              &idx);
                if (rval)
-                       return -EINVAL;
-
-               len = sprintf(buf, "%hu\n", idx);
+                       len = sprintf(buf, "\n");
+               else
+                       len = sprintf(buf, "%hu\n", idx);
                break;
        case ISCSI_PARAM_CHAP_OUT_IDX:
                rval = qla4xxx_get_chap_index(ha, sess->username,
                                              sess->password, LOCAL_CHAP,
                                              &idx);
                if (rval)
-                       return -EINVAL;
-
-               len = sprintf(buf, "%hu\n", idx);
+                       len = sprintf(buf, "\n");
+               else
+                       len = sprintf(buf, "%hu\n", idx);
                break;
        default:
                return iscsi_session_get_param(cls_sess, param, buf);
@@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
            test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
            !test_bit(AF_ONLINE, &ha->flags) ||
            !test_bit(AF_LINK_UP, &ha->flags) ||
+           test_bit(AF_LOOPBACK, &ha->flags) ||
            test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
                goto qc_host_busy;
 
@@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
                if (status == QLA_SUCCESS) {
                        if (!test_bit(AF_FW_RECOVERY, &ha->flags))
                                qla4xxx_cmd_wait(ha);
+
                        ha->isp_ops->disable_intrs(ha);
                        qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                        qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -3479,7 +3481,8 @@ dpc_post_reset_ha:
        }
 
        /* ---- link change? --- */
-       if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+       if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+           test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
                if (!test_bit(AF_LINK_UP, &ha->flags)) {
                        /* ---- link down? --- */
                        qla4xxx_mark_all_devices_missing(ha);
@@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
 {
        qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
 
-       if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
-               /* Turn-off interrupts on the card. */
-               ha->isp_ops->disable_intrs(ha);
-       }
+       /* Turn-off interrupts on the card. */
+       ha->isp_ops->disable_intrs(ha);
 
        if (is_qla40XX(ha)) {
                writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
@@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
        }
 
        /* Detach interrupts */
-       if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
-               qla4xxx_free_irqs(ha);
+       qla4xxx_free_irqs(ha);
 
        /* free extra memory */
        qla4xxx_mem_free(ha);
@@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
        struct iscsi_endpoint *ep;
        struct sockaddr_in *addr;
        struct sockaddr_in6 *addr6;
-       struct sockaddr *dst_addr;
+       struct sockaddr *t_addr;
+       struct sockaddr_storage *dst_addr;
        char *ip;
 
        /* TODO: need to destroy on unload iscsi_endpoint*/
@@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
                return NULL;
 
        if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
-               dst_addr->sa_family = AF_INET6;
+               t_addr = (struct sockaddr *)dst_addr;
+               t_addr->sa_family = AF_INET6;
                addr6 = (struct sockaddr_in6 *)dst_addr;
                ip = (char *)&addr6->sin6_addr;
                memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
                addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
 
        } else {
-               dst_addr->sa_family = AF_INET;
+               t_addr = (struct sockaddr *)dst_addr;
+               t_addr->sa_family = AF_INET;
                addr = (struct sockaddr_in *)dst_addr;
                ip = (char *)&addr->sin_addr;
                memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
                addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
        }
 
-       ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+       ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
        vfree(dst_addr);
        return ep;
 }
@@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
 }
 
 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
-                                         struct ddb_entry *ddb_entry)
+                                         struct ddb_entry *ddb_entry,
+                                         uint16_t idx)
 {
        uint16_t def_timeout;
 
@@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
                def_timeout : LOGIN_TOV;
        ddb_entry->default_time2wait =
                le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+       if (ql4xdisablesysfsboot &&
+           (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+               set_bit(DF_BOOT_TGT, &ddb_entry->flags);
 }
 
 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
@@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
 
 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
                                   struct dev_db_entry *fw_ddb_entry,
-                                  int is_reset)
+                                  int is_reset, uint16_t idx)
 {
        struct iscsi_cls_session *cls_sess;
        struct iscsi_session *sess;
@@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
        memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
               sizeof(struct dev_db_entry));
 
-       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
 
        cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
 
@@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
                                goto continue_next_nt;
                }
 
-               ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+               ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
                if (ret == QLA_ERROR)
                        goto exit_nt_list;
 
@@ -5115,6 +5123,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
        qla4xxx_free_ddb_index(ha);
 }
 
+/**
+ * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+       struct ddb_entry *ddb_entry;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_entry_dma;
+       unsigned long wtime;
+       uint32_t ddb_state;
+       int max_ddbs, idx, ret;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto exit_login_resp;
+       }
+
+       wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "%s: DDB index [%d]\n", __func__,
+                                         ddb_entry->fw_ddb_index));
+                       do {
+                               ret = qla4xxx_get_fwddb_entry(ha,
+                                               ddb_entry->fw_ddb_index,
+                                               fw_ddb_entry, fw_ddb_entry_dma,
+                                               NULL, NULL, &ddb_state, NULL,
+                                               NULL, NULL);
+                               if (ret == QLA_ERROR)
+                                       goto exit_login_resp;
+
+                               if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+                                   (ddb_state == DDB_DS_SESSION_FAILED))
+                                       break;
+
+                               schedule_timeout_uninterruptible(HZ);
+
+                       } while ((time_after(wtime, jiffies)));
+
+                       if (!time_after(wtime, jiffies)) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "%s: Login response wait timer expired\n",
+                                                 __func__));
+                                goto exit_login_resp;
+                       }
+               }
+       }
+
+exit_login_resp:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -5270,7 +5350,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
                if (is_qla80XX(ha)) {
                        ha->isp_ops->idc_lock(ha);
                        dev_state = qla4_8xxx_rd_direct(ha,
-                                                       QLA82XX_CRB_DEV_STATE);
+                                                       QLA8XXX_CRB_DEV_STATE);
                        ha->isp_ops->idc_unlock(ha);
                        if (dev_state == QLA8XXX_DEV_FAILED) {
                                ql4_printk(KERN_WARNING, ha, "%s: don't retry "
@@ -5368,6 +5448,7 @@ skip_retry_init:
                /* Perform the build ddb list and login to each */
        qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
        iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+       qla4xxx_wait_login_resp_boot_tgt(ha);
 
        qla4xxx_create_chap_list(ha);
 
@@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
                goto exit_host_reset;
        }
 
-       rval = qla4xxx_wait_for_hba_online(ha);
-       if (rval != QLA_SUCCESS) {
-               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
-                                 "adapter\n", __func__));
-               rval = -EIO;
-               goto exit_host_reset;
-       }
-
        if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
                goto recover_adapter;
 
@@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 {
        uint32_t rval = QLA_ERROR;
-       uint32_t ret = 0;
        int fn;
        struct pci_dev *other_pdev = NULL;
 
@@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                        qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
                        qla4_8xxx_set_drv_active(ha);
                        ha->isp_ops->idc_unlock(ha);
-                       ret = qla4xxx_request_irqs(ha);
-                       if (ret) {
-                               ql4_printk(KERN_WARNING, ha, "Failed to "
-                                   "reserve interrupt %d already in use.\n",
-                                   ha->pdev->irq);
-                               rval = QLA_ERROR;
-                       } else {
-                               ha->isp_ops->enable_intrs(ha);
-                               rval = QLA_SUCCESS;
-                       }
+                       ha->isp_ops->enable_intrs(ha);
                }
        } else {
                ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
@@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                     QLA8XXX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
                        rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
-                       if (rval == QLA_SUCCESS) {
-                               ret = qla4xxx_request_irqs(ha);
-                               if (ret) {
-                                       ql4_printk(KERN_WARNING, ha, "Failed to"
-                                           " reserve interrupt %d already in"
-                                           " use.\n", ha->pdev->irq);
-                                       rval = QLA_ERROR;
-                               } else {
-                                       ha->isp_ops->enable_intrs(ha);
-                                       rval = QLA_SUCCESS;
-                               }
-                       }
+                       if (rval == QLA_SUCCESS)
+                               ha->isp_ops->enable_intrs(ha);
+
                        ha->isp_ops->idc_lock(ha);
                        qla4_8xxx_set_drv_active(ha);
                        ha->isp_ops->idc_unlock(ha);
index f6df2ea..6775a45 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
index 59d427b..0a74b97 100644 (file)
@@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
 }
 static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
                        NULL);
+static ssize_t
+show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+       return sprintf(buf, "%d\n", session->target_id);
+}
+static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+                       show_priv_session_target_id, NULL);
 
 #define iscsi_priv_session_attr_show(field, format)                    \
 static ssize_t                                                         \
@@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
        &dev_attr_priv_sess_creator.attr,
        &dev_attr_sess_chap_out_idx.attr,
        &dev_attr_sess_chap_in_idx.attr,
+       &dev_attr_priv_sess_target_id.attr,
        NULL,
 };
 
@@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
                return S_IRUGO;
        else if (attr == &dev_attr_priv_sess_creator.attr)
                return S_IRUGO;
+       else if (attr == &dev_attr_priv_sess_target_id.attr)
+               return S_IRUGO;
        else {
                WARN_ONCE(1, "Invalid session attr");
                return 0;
index be2c9a6..9f0c465 100644 (file)
@@ -1391,24 +1391,23 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
                return ERR_PTR(-ENOMEM);
        }
 
-       if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
-               printk(KERN_WARNING "idr expansion Sg_device failure\n");
-               error = -ENOMEM;
-               goto out;
-       }
-
+       idr_preload(GFP_KERNEL);
        write_lock_irqsave(&sg_index_lock, iflags);
 
-       error = idr_get_new(&sg_index_idr, sdp, &k);
-       if (error) {
-               write_unlock_irqrestore(&sg_index_lock, iflags);
-               printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
-                      error);
-               goto out;
+       error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
+       if (error < 0) {
+               if (error == -ENOSPC) {
+                       sdev_printk(KERN_WARNING, scsidp,
+                                   "Unable to attach sg device type=%d, minor number exceeds %d\n",
+                                   scsidp->type, SG_MAX_DEVS - 1);
+                       error = -ENODEV;
+               } else {
+                       printk(KERN_WARNING
+                              "idr allocation Sg_device failure: %d\n", error);
+               }
+               goto out_unlock;
        }
-
-       if (unlikely(k >= SG_MAX_DEVS))
-               goto overflow;
+       k = error;
 
        SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
        sprintf(disk->disk_name, "sg%d", k);
@@ -1420,25 +1419,17 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
        sdp->sg_tablesize = queue_max_segments(q);
        sdp->index = k;
        kref_init(&sdp->d_ref);
+       error = 0;
 
+out_unlock:
        write_unlock_irqrestore(&sg_index_lock, iflags);
+       idr_preload_end();
 
-       error = 0;
- out:
        if (error) {
                kfree(sdp);
                return ERR_PTR(error);
        }
        return sdp;
-
- overflow:
-       idr_remove(&sg_index_idr, k);
-       write_unlock_irqrestore(&sg_index_lock, iflags);
-       sdev_printk(KERN_WARNING, scsidp,
-                   "Unable to attach sg device type=%d, minor "
-                   "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
-       error = -ENODEV;
-       goto out;
 }
 
 static int
index 98156a9..8697447 100644 (file)
@@ -977,7 +977,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
        struct st_modedef *STm;
        struct st_partstat *STps;
        char *name = tape_name(STp);
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int mode = TAPE_MODE(inode);
 
        STp->ready = ST_READY;
@@ -4076,7 +4076,7 @@ static int st_probe(struct device *dev)
        struct st_modedef *STm;
        struct st_partstat *STps;
        struct st_buffer *buffer;
-       int i, dev_num, error;
+       int i, error;
        char *stp;
 
        if (SDp->type != TYPE_TAPE)
@@ -4178,27 +4178,17 @@ static int st_probe(struct device *dev)
            tpnt->blksize_changed = 0;
        mutex_init(&tpnt->lock);
 
-       if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
-               pr_warn("st: idr expansion failed\n");
-               error = -ENOMEM;
-               goto out_put_disk;
-       }
-
+       idr_preload(GFP_KERNEL);
        spin_lock(&st_index_lock);
-       error = idr_get_new(&st_index_idr, tpnt, &dev_num);
+       error = idr_alloc(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
        spin_unlock(&st_index_lock);
-       if (error) {
+       idr_preload_end();
+       if (error < 0) {
                pr_warn("st: idr allocation failed: %d\n", error);
                goto out_put_disk;
        }
-
-       if (dev_num > ST_MAX_TAPES) {
-               pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
-               goto out_put_index;
-       }
-
-       tpnt->index = dev_num;
-       sprintf(disk->disk_name, "st%d", dev_num);
+       tpnt->index = error;
+       sprintf(disk->disk_name, "st%d", tpnt->index);
 
        dev_set_drvdata(dev, tpnt);
 
@@ -4218,9 +4208,8 @@ static int st_probe(struct device *dev)
 
 out_remove_devs:
        remove_cdevs(tpnt);
-out_put_index:
        spin_lock(&st_index_lock);
-       idr_remove(&st_index_idr, dev_num);
+       idr_remove(&st_index_idr, tpnt->index);
        spin_unlock(&st_index_lock);
 out_put_disk:
        put_disk(disk);
index 270b3cf..16a3a0c 100644 (file)
@@ -201,6 +201,7 @@ enum storvsc_request_type {
 #define SRB_STATUS_AUTOSENSE_VALID     0x80
 #define SRB_STATUS_INVALID_LUN 0x20
 #define SRB_STATUS_SUCCESS     0x01
+#define SRB_STATUS_ABORTED     0x02
 #define SRB_STATUS_ERROR       0x04
 
 /*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
        uint lun;
 };
 
+static void storvsc_device_scan(struct work_struct *work)
+{
+       struct storvsc_scan_work *wrk;
+       uint lun;
+       struct scsi_device *sdev;
+
+       wrk = container_of(work, struct storvsc_scan_work, work);
+       lun = wrk->lun;
+
+       sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+       if (!sdev)
+               goto done;
+       scsi_rescan_device(&sdev->sdev_gendev);
+       scsi_device_put(sdev);
+
+done:
+       kfree(wrk);
+}
+
 static void storvsc_bus_scan(struct work_struct *work)
 {
        struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
        if (!bounce_sgl)
                return NULL;
 
+       sg_init_table(bounce_sgl, num_pages);
        for (i = 0; i < num_pages; i++) {
                page_buf = alloc_page(GFP_ATOMIC);
                if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
        return ret;
 }
 
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+                               struct scsi_cmnd *scmnd,
+                               struct Scsi_Host *host,
+                               u8 asc, u8 ascq)
+{
+       struct storvsc_scan_work *wrk;
+       void (*process_err_fn)(struct work_struct *work);
+       bool do_work = false;
+
+       switch (vm_srb->srb_status) {
+       case SRB_STATUS_ERROR:
+               /*
+                * If there is an error; offline the device since all
+                * error recovery strategies would have already been
+                * deployed on the host side. However, if the command
+                * were a pass-through command deal with it appropriately.
+                */
+               switch (scmnd->cmnd[0]) {
+               case ATA_16:
+               case ATA_12:
+                       set_host_byte(scmnd, DID_PASSTHROUGH);
+                       break;
+               default:
+                       set_host_byte(scmnd, DID_TARGET_FAILURE);
+               }
+               break;
+       case SRB_STATUS_INVALID_LUN:
+               do_work = true;
+               process_err_fn = storvsc_remove_lun;
+               break;
+       case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+               if ((asc == 0x2a) && (ascq == 0x9)) {
+                       do_work = true;
+                       process_err_fn = storvsc_device_scan;
+                       /*
+                        * Retry the I/O that trigerred this.
+                        */
+                       set_host_byte(scmnd, DID_REQUEUE);
+               }
+               break;
+       }
+
+       if (!do_work)
+               return;
+
+       /*
+        * We need to schedule work to process this error; schedule it.
+        */
+       wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+       if (!wrk) {
+               set_host_byte(scmnd, DID_TARGET_FAILURE);
+               return;
+       }
+
+       wrk->host = host;
+       wrk->lun = vm_srb->lun;
+       INIT_WORK(&wrk->work, process_err_fn);
+       schedule_work(&wrk->work);
+}
+
 
 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
 {
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
        void (*scsi_done_fn)(struct scsi_cmnd *);
        struct scsi_sense_hdr sense_hdr;
        struct vmscsi_request *vm_srb;
-       struct storvsc_scan_work *wrk;
        struct stor_mem_pools *memp = scmnd->device->hostdata;
+       struct Scsi_Host *host;
+       struct storvsc_device *stor_dev;
+       struct hv_device *dev = host_dev->dev;
+
+       stor_dev = get_in_stor_device(dev);
+       host = stor_dev->host;
 
        vm_srb = &cmd_request->vstor_packet.vm_srb;
        if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
                                        cmd_request->bounce_sgl_count);
        }
 
-       /*
-        * If there is an error; offline the device since all
-        * error recovery strategies would have already been
-        * deployed on the host side. However, if the command
-        * were a pass-through command deal with it appropriately.
-        */
        scmnd->result = vm_srb->scsi_status;
 
-       if (vm_srb->srb_status == SRB_STATUS_ERROR) {
-               switch (scmnd->cmnd[0]) {
-               case ATA_16:
-               case ATA_12:
-                       set_host_byte(scmnd, DID_PASSTHROUGH);
-                       break;
-               default:
-                       set_host_byte(scmnd, DID_TARGET_FAILURE);
-               }
-       }
-
-
-       /*
-        * If the LUN is invalid; remove the device.
-        */
-       if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
-               struct storvsc_device *stor_dev;
-               struct hv_device *dev = host_dev->dev;
-               struct Scsi_Host *host;
-
-               stor_dev = get_in_stor_device(dev);
-               host = stor_dev->host;
-
-               wrk = kmalloc(sizeof(struct storvsc_scan_work),
-                               GFP_ATOMIC);
-               if (!wrk) {
-                       scmnd->result = DID_TARGET_FAILURE << 16;
-               } else {
-                       wrk->host = host;
-                       wrk->lun = vm_srb->lun;
-                       INIT_WORK(&wrk->work, storvsc_remove_lun);
-                       schedule_work(&wrk->work);
-               }
-       }
-
        if (scmnd->result) {
                if (scsi_normalize_sense(scmnd->sense_buffer,
                                SCSI_SENSE_BUFFERSIZE, &sense_hdr))
                        scsi_print_sense_hdr("storvsc", &sense_hdr);
        }
 
+       if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+               storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+                                        sense_hdr.ascq);
+
        scsi_set_resid(scmnd,
                cmd_request->data_buffer.len -
                vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
 
        blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
 
+       sdevice->no_write_same = 1;
+
        return 0;
 }
 
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
        u8 scsi_op = scmnd->cmnd[0];
 
        switch (scsi_op) {
+       /* the host does not handle WRITE_SAME, log accident usage */
+       case WRITE_SAME:
        /*
         * smartd sends this command and the host does not handle
         * this. So, don't send it.
index 8f27f9d..0371047 100644 (file)
@@ -2,48 +2,58 @@
 # Kernel configuration file for the UFS Host Controller
 #
 # This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011  Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+#      Santosh Yaraganavi <santosh.sy@samsung.com>
+#      Vinayak Holikatti <h.vinayak@samsung.com>
 #
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
 
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+       tristate "Universal Flash Storage Controller Driver Core"
+       depends on SCSI
+       ---help---
+       This selects the support for UFS devices in Linux, say Y and make
+         sure that you know the name of your UFS host adapter (the card
+         inside your computer that "speaks" the UFS protocol, also
+         called UFS Host Controller), because you will be asked for it.
+         The module will be called ufshcd.
 
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
-# USA.
+         To compile this driver as a module, choose M here and read
+         <file:Documentation/scsi/ufs.txt>.
+         However, do not compile this as a module if your root file system
+         (the one containing the directory /) is located on a UFS device.
 
-config SCSI_UFSHCD
-       tristate "Universal Flash Storage host controller driver"
-       depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+       tristate "PCI bus based UFS Controller support"
+       depends on SCSI_UFSHCD && PCI
        ---help---
-       This is a generic driver which supports PCIe UFS Host controllers.
+       This selects the PCI UFS Host Controller Interface. Select this if
+       you have UFS Host Controller with PCI Interface.
+
+         If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index adf7895..9eda0df 100644 (file)
@@ -1,2 +1,3 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
index b207529..139bc06 100644 (file)
@@ -2,45 +2,35 @@
  * Universal Flash Storage Host controller driver
  *
  * This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
 #ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644 (file)
index 0000000..5cb1d75
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+       ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ *             data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+       struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+       disable_irq(pdev->irq);
+       free_irq(pdev->irq, hba);
+       ufshcd_remove(hba);
+       pci_release_regions(pdev);
+       pci_set_drvdata(pdev, NULL);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ *                      addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+       int err;
+
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+               && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+               return 0;
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto out_error;
+       }
+
+       pci_set_master(pdev);
+
+
+       err = pci_request_regions(pdev, UFSHCD);
+       if (err < 0) {
+               dev_err(&pdev->dev, "request regions failed\n");
+               goto out_disable;
+       }
+
+       mmio_base = pci_ioremap_bar(pdev, 0);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       err = ufshcd_set_dma_mask(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+       if (err) {
+               dev_err(&pdev->dev, "Initialization failed\n");
+               goto out_iounmap;
+       }
+
+       pci_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       pci_release_regions(pdev);
+out_disable:
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+out_error:
+       return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+       { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+       { }     /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+       .name = UFSHCD,
+       .id_table = ufshcd_pci_tbl,
+       .probe = ufshcd_pci_probe,
+       .remove = ufshcd_pci_remove,
+       .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+       .suspend = ufshcd_pci_suspend,
+       .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 91a4046..60fd40c 100644 (file)
@@ -1,77 +1,39 @@
 /*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
 
 enum {
        UFSHCD_MAX_CHANNEL      = 0,
@@ -101,121 +63,6 @@ enum {
        INT_AGGR_CONFIG,
 };
 
-/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
-       u32 command;
-       u32 argument1;
-       u32 argument2;
-       u32 argument3;
-       int cmd_active;
-       int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
-       void __iomem *mmio_base;
-
-       /* Virtual memory reference */
-       struct utp_transfer_cmd_desc *ucdl_base_addr;
-       struct utp_transfer_req_desc *utrdl_base_addr;
-       struct utp_task_req_desc *utmrdl_base_addr;
-
-       /* DMA memory reference */
-       dma_addr_t ucdl_dma_addr;
-       dma_addr_t utrdl_dma_addr;
-       dma_addr_t utmrdl_dma_addr;
-
-       struct Scsi_Host *host;
-       struct pci_dev *pdev;
-
-       struct ufshcd_lrb *lrb;
-
-       unsigned long outstanding_tasks;
-       unsigned long outstanding_reqs;
-
-       u32 capabilities;
-       int nutrs;
-       int nutmrs;
-       u32 ufs_version;
-
-       struct uic_command active_uic_cmd;
-       wait_queue_head_t ufshcd_tm_wait_queue;
-       unsigned long tm_condition;
-
-       u32 ufshcd_state;
-       u32 int_enable_mask;
-
-       /* Work Queues */
-       struct work_struct uic_workq;
-       struct work_struct feh_workq;
-
-       /* HBA Errors */
-       u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
-       struct utp_transfer_req_desc *utr_descriptor_ptr;
-       struct utp_upiu_cmd *ucd_cmd_ptr;
-       struct utp_upiu_rsp *ucd_rsp_ptr;
-       struct ufshcd_sg_entry *ucd_prdt_ptr;
-
-       struct scsi_cmnd *cmd;
-       u8 *sense_buffer;
-       unsigned int sense_bufflen;
-       int scsi_status;
-
-       int command_type;
-       int task_tag;
-       unsigned int lun;
-};
-
 /**
  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  * @hba - Pointer to adapter instance
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
 
        if (hba->utmrdl_base_addr) {
                utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
-               dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+               dma_free_coherent(hba->dev, utmrdl_size,
                                  hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
        }
 
        if (hba->utrdl_base_addr) {
                utrdl_size =
                (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
-               dma_free_coherent(&hba->pdev->dev, utrdl_size,
+               dma_free_coherent(hba->dev, utrdl_size,
                                  hba->utrdl_base_addr, hba->utrdl_dma_addr);
        }
 
        if (hba->ucdl_base_addr) {
                ucdl_size =
                (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
-               dma_free_coherent(&hba->pdev->dev, ucdl_size,
+               dma_free_coherent(hba->dev, ucdl_size,
                                  hba->ucdl_base_addr, hba->ucdl_dma_addr);
        }
 }
@@ -428,15 +275,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
                REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
 }
 
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
-       writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
 /**
  * ufshcd_hba_start - Start controller initialization sequence
  * @hba: per adapter instance
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
 
        /* Allocate memory for UTP command descriptors */
        ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
-       hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
                                                 ucdl_size,
                                                 &hba->ucdl_dma_addr,
                                                 GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         */
        if (!hba->ucdl_base_addr ||
            WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Command Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         * UFSHCI requires 1024 byte alignment of UTRD
         */
        utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
-       hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
                                                  utrdl_size,
                                                  &hba->utrdl_dma_addr,
                                                  GFP_KERNEL);
        if (!hba->utrdl_base_addr ||
            WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Transfer Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
         * UFSHCI requires 1024 byte alignment of UTMRD
         */
        utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
-       hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+       hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
                                                   utmrdl_size,
                                                   &hba->utmrdl_dma_addr,
                                                   GFP_KERNEL);
        if (!hba->utmrdl_base_addr ||
            WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                "Task Management Descriptor Memory allocation failed\n");
                goto out;
        }
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
        /* Allocate memory for local reference block */
        hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
        if (!hba->lrb) {
-               dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+               dev_err(hba->dev, "LRB Memory allocation failed\n");
                goto out;
        }
        return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
        /* check if controller is ready to accept UIC commands */
        if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
            UIC_COMMAND_READY) == 0x0) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Controller not ready"
                        " to accept UIC commands\n");
                return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        /* check if device present */
        reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
        if (!ufshcd_is_device_present(reg)) {
-               dev_err(&hba->pdev->dev, "cc: Device not present\n");
+               dev_err(hba->dev, "cc: Device not present\n");
                err = -ENXIO;
                goto out;
        }
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
        if (!(ufshcd_get_lists_status(reg))) {
                ufshcd_enable_run_stop_reg(hba);
        } else {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Host controller not ready to process requests");
                err = -EIO;
                goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
                if (retry) {
                        retry--;
                } else {
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "Controller enable failed\n");
                        return -EIO;
                }
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
 
        /* start the initialization process */
        if (ufshcd_initialize_hba(hba)) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Reset: Controller initialization failed\n");
                return FAILED;
        }
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
                        task_result = SUCCESS;
        } else {
                task_result = FAILED;
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "trc: Invalid ocs = %x\n", ocs_value);
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                /* check if the returned transfer response is valid */
                result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
                if (result) {
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "Invalid response = %x\n", result);
                        break;
                }
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        case OCS_FATAL_ERROR:
        default:
                result |= DID_ERROR << 16;
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                "OCS error from controller = %x\n", ocs);
                break;
        } /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
            !(ufshcd_get_uic_cmd_result(hba))) {
 
                if (ufshcd_make_hba_operational(hba))
-                       dev_err(&hba->pdev->dev,
+                       dev_err(hba->dev,
                                "cc: hba not operational state\n");
                return;
        }
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        free_slot = ufshcd_get_tm_free_slot(hba);
        if (free_slot >= hba->nutmrs) {
                spin_unlock_irqrestore(host->host_lock, flags);
-               dev_err(&hba->pdev->dev, "Task management queue full\n");
+               dev_err(hba->dev, "Task management queue full\n");
                err = FAILED;
                goto out;
        }
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                                         &hba->tm_condition) != 0),
                                         60 * HZ);
        if (!err) {
-               dev_err(&hba->pdev->dev,
+               dev_err(hba->dev,
                        "Task management command timed-out\n");
                err = FAILED;
                goto out;
@@ -1687,24 +1525,14 @@ static struct scsi_host_template ufshcd_driver_template = {
        .can_queue              = UFSHCD_CAN_QUEUE,
 };
 
-/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
-       ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
 /**
  * ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
  * @state: power state
  *
  * Returns -ENOSYS
  */
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
 {
        /*
         * TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
 
        return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
 
 /**
  * ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
  *
  * Returns -ENOSYS
  */
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
 {
        /*
         * TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
 
        return -ENOSYS;
 }
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
 
 /**
  * ufshcd_hba_free - free allocated memory for
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
 {
        iounmap(hba->mmio_base);
        ufshcd_free_hba_memory(hba);
-       pci_release_regions(hba->pdev);
 }
 
 /**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
  *             data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
  */
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
 {
-       struct ufs_hba *hba = pci_get_drvdata(pdev);
-
        /* disable interrupts */
        ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
-       free_irq(pdev->irq, hba);
 
        ufshcd_hba_stop(hba);
        ufshcd_hba_free(hba);
 
        scsi_remove_host(hba->host);
        scsi_host_put(hba->host);
-       pci_set_drvdata(pdev, NULL);
-       pci_clear_master(pdev);
-       pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- *                      addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
-       int err;
-       u64 dma_mask;
-
-       /*
-        * If controller supports 64 bit addressing mode, then set the DMA
-        * mask to 64-bit, else set the DMA mask to 32-bit
-        */
-       if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
-               dma_mask = DMA_BIT_MASK(64);
-       else
-               dma_mask = DMA_BIT_MASK(32);
-
-       err = pci_set_dma_mask(hba->pdev, dma_mask);
-       if (err)
-               return err;
-
-       err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
-       return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_remove);
 
 /**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
  * Returns 0 on success, non-zero value on failure
  */
-static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+                void __iomem *mmio_base, unsigned int irq)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
        int err;
 
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_device failed\n");
+       if (!dev) {
+               dev_err(dev,
+               "Invalid memory reference for dev is NULL\n");
+               err = -ENODEV;
                goto out_error;
        }
 
-       pci_set_master(pdev);
+       if (!mmio_base) {
+               dev_err(dev,
+               "Invalid memory reference for mmio_base is NULL\n");
+               err = -ENODEV;
+               goto out_error;
+       }
 
        host = scsi_host_alloc(&ufshcd_driver_template,
                                sizeof(struct ufs_hba));
        if (!host) {
-               dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+               dev_err(dev, "scsi_host_alloc failed\n");
                err = -ENOMEM;
-               goto out_disable;
+               goto out_error;
        }
        hba = shost_priv(host);
-
-       err = pci_request_regions(pdev, UFSHCD);
-       if (err < 0) {
-               dev_err(&pdev->dev, "request regions failed\n");
-               goto out_host_put;
-       }
-
-       hba->mmio_base = pci_ioremap_bar(pdev, 0);
-       if (!hba->mmio_base) {
-               dev_err(&pdev->dev, "memory map failed\n");
-               err = -ENOMEM;
-               goto out_release_regions;
-       }
-
        hba->host = host;
-       hba->pdev = pdev;
+       hba->dev = dev;
+       hba->mmio_base = mmio_base;
+       hba->irq = irq;
 
        /* Read capabilities registers */
        ufshcd_hba_capabilities(hba);
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
-       err = ufshcd_set_dma_mask(hba);
-       if (err) {
-               dev_err(&pdev->dev, "set dma mask failed\n");
-               goto out_iounmap;
-       }
-
        /* Allocate memory for host memory space */
        err = ufshcd_memory_alloc(hba);
        if (err) {
-               dev_err(&pdev->dev, "Memory allocation failed\n");
-               goto out_iounmap;
+               dev_err(hba->dev, "Memory allocation failed\n");
+               goto out_disable;
        }
 
        /* Configure LRB */
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
 
        /* IRQ registration */
-       err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+       err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
        if (err) {
-               dev_err(&pdev->dev, "request irq failed\n");
+               dev_err(hba->dev, "request irq failed\n");
                goto out_lrb_free;
        }
 
        /* Enable SCSI tag mapping */
        err = scsi_init_shared_tag_map(host, host->can_queue);
        if (err) {
-               dev_err(&pdev->dev, "init shared queue failed\n");
+               dev_err(hba->dev, "init shared queue failed\n");
                goto out_free_irq;
        }
 
-       pci_set_drvdata(pdev, hba);
-
-       err = scsi_add_host(host, &pdev->dev);
+       err = scsi_add_host(host, hba->dev);
        if (err) {
-               dev_err(&pdev->dev, "scsi_add_host failed\n");
+               dev_err(hba->dev, "scsi_add_host failed\n");
                goto out_free_irq;
        }
 
        /* Initialization routine */
        err = ufshcd_initialize_hba(hba);
        if (err) {
-               dev_err(&pdev->dev, "Initialization failed\n");
-               goto out_free_irq;
+               dev_err(hba->dev, "Initialization failed\n");
+               goto out_remove_scsi_host;
        }
+       *hba_handle = hba;
 
        return 0;
 
+out_remove_scsi_host:
+       scsi_remove_host(hba->host);
 out_free_irq:
-       free_irq(pdev->irq, hba);
+       free_irq(irq, hba);
 out_lrb_free:
        ufshcd_free_hba_memory(hba);
-out_iounmap:
-       iounmap(hba->mmio_base);
-out_release_regions:
-       pci_release_regions(pdev);
-out_host_put:
-       scsi_host_put(host);
 out_disable:
-       pci_clear_master(pdev);
-       pci_disable_device(pdev);
+       scsi_host_put(host);
 out_error:
        return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_init);
 
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
-       { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-       { }     /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
-       .name = UFSHCD,
-       .id_table = ufshcd_pci_tbl,
-       .probe = ufshcd_probe,
-       .remove = ufshcd_remove,
-       .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
-       .suspend = ufshcd_suspend,
-       .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
-             "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644 (file)
index 0000000..6b99a42
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+       u32 command;
+       u32 argument1;
+       u32 argument2;
+       u32 argument3;
+       int cmd_active;
+       int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+       struct utp_transfer_req_desc *utr_descriptor_ptr;
+       struct utp_upiu_cmd *ucd_cmd_ptr;
+       struct utp_upiu_rsp *ucd_rsp_ptr;
+       struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+       struct scsi_cmnd *cmd;
+       u8 *sense_buffer;
+       unsigned int sense_bufflen;
+       int scsi_status;
+
+       int command_type;
+       int task_tag;
+       unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+       void __iomem *mmio_base;
+
+       /* Virtual memory reference */
+       struct utp_transfer_cmd_desc *ucdl_base_addr;
+       struct utp_transfer_req_desc *utrdl_base_addr;
+       struct utp_task_req_desc *utmrdl_base_addr;
+
+       /* DMA memory reference */
+       dma_addr_t ucdl_dma_addr;
+       dma_addr_t utrdl_dma_addr;
+       dma_addr_t utmrdl_dma_addr;
+
+       struct Scsi_Host *host;
+       struct device *dev;
+
+       struct ufshcd_lrb *lrb;
+
+       unsigned long outstanding_tasks;
+       unsigned long outstanding_reqs;
+
+       u32 capabilities;
+       int nutrs;
+       int nutmrs;
+       u32 ufs_version;
+       unsigned int irq;
+
+       struct uic_command active_uic_cmd;
+       wait_queue_head_t ufshcd_tm_wait_queue;
+       unsigned long tm_condition;
+
+       u32 ufshcd_state;
+       u32 int_enable_mask;
+
+       /* Work Queues */
+       struct work_struct uic_workq;
+       struct work_struct feh_workq;
+
+       /* HBA Errors */
+       u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+                       unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+       writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
index 6e3510f..0c16484 100644 (file)
@@ -2,45 +2,35 @@
  * Universal Flash Storage Host controller driver
  *
  * This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
  *
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
  */
 
 #ifndef _UFSHCI_H
index a43415a..4c0f6d8 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/delay.h>
 #include <linux/export.h>
 #ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
 #endif
 
 #include "ssb_private.h"
@@ -322,7 +322,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
        if (bus->bustype == SSB_BUSTYPE_SSB) {
 #ifdef CONFIG_BCM47XX
                char buf[20];
-               if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
+               if (bcm47xx_nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
                        crystalfreq = simple_strtoul(buf, NULL, 0);
 #endif
        }
index 538ebe2..24456a0 100644 (file)
@@ -2880,7 +2880,6 @@ static int binder_release(struct inode *nodp, struct file *filp)
 
 static void binder_deferred_release(struct binder_proc *proc)
 {
-       struct hlist_node *pos;
        struct binder_transaction *t;
        struct rb_node *n;
        int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
@@ -2924,7 +2923,7 @@ static void binder_deferred_release(struct binder_proc *proc)
                        node->local_weak_refs = 0;
                        hlist_add_head(&node->dead_node, &binder_dead_nodes);
 
-                       hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+                       hlist_for_each_entry(ref, &node->refs, node_entry) {
                                incoming_refs++;
                                if (ref->death) {
                                        death++;
@@ -3156,12 +3155,11 @@ static void print_binder_thread(struct seq_file *m,
 static void print_binder_node(struct seq_file *m, struct binder_node *node)
 {
        struct binder_ref *ref;
-       struct hlist_node *pos;
        struct binder_work *w;
        int count;
 
        count = 0;
-       hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+       hlist_for_each_entry(ref, &node->refs, node_entry)
                count++;
 
        seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
@@ -3171,7 +3169,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
                   node->internal_strong_refs, count);
        if (count) {
                seq_puts(m, " proc");
-               hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+               hlist_for_each_entry(ref, &node->refs, node_entry)
                        seq_printf(m, " %d", ref->proc->pid);
        }
        seq_puts(m, "\n");
@@ -3369,7 +3367,6 @@ static void print_binder_proc_stats(struct seq_file *m,
 static int binder_state_show(struct seq_file *m, void *unused)
 {
        struct binder_proc *proc;
-       struct hlist_node *pos;
        struct binder_node *node;
        int do_lock = !binder_debug_no_lock;
 
@@ -3380,10 +3377,10 @@ static int binder_state_show(struct seq_file *m, void *unused)
 
        if (!hlist_empty(&binder_dead_nodes))
                seq_puts(m, "dead nodes:\n");
-       hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+       hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
                print_binder_node(m, node);
 
-       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
                print_binder_proc(m, proc, 1);
        if (do_lock)
                binder_unlock(__func__);
@@ -3393,7 +3390,6 @@ static int binder_state_show(struct seq_file *m, void *unused)
 static int binder_stats_show(struct seq_file *m, void *unused)
 {
        struct binder_proc *proc;
-       struct hlist_node *pos;
        int do_lock = !binder_debug_no_lock;
 
        if (do_lock)
@@ -3403,7 +3399,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
 
        print_binder_stats(m, "", &binder_stats);
 
-       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
                print_binder_proc_stats(m, proc);
        if (do_lock)
                binder_unlock(__func__);
@@ -3413,14 +3409,13 @@ static int binder_stats_show(struct seq_file *m, void *unused)
 static int binder_transactions_show(struct seq_file *m, void *unused)
 {
        struct binder_proc *proc;
-       struct hlist_node *pos;
        int do_lock = !binder_debug_no_lock;
 
        if (do_lock)
                binder_lock(__func__);
 
        seq_puts(m, "binder transactions:\n");
-       hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
                print_binder_proc(m, proc, 0);
        if (do_lock)
                binder_unlock(__func__);
index b5c2c4c..d23eeeb 100644 (file)
@@ -185,7 +185,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
                BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
                return -ENOENT;
        }
-       BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc);
+       BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)file_inode(flp)->i_size, loc);
        do_gettimeofday(&tv);
 
        BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "download start %lx", ((tv.tv_sec * 1000) + (tv.tv_usec / 1000)));
index 4f1142e..20bc2b4 100644 (file)
@@ -998,7 +998,7 @@ static int do_synchronize_cache(struct fsg_common *common)
 static void invalidate_sub(struct fsg_lun *curlun)
 {
        struct file     *filp = curlun->filp;
-       struct inode    *inode = filp->f_path.dentry->d_inode;
+       struct inode    *inode = file_inode(filp);
        unsigned long   rc;
 
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
index e4192b8..d9297ee 100644 (file)
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
 static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
                                size_t count, loff_t *ppos)
 {
-       rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+       rndis_params *p = PDE(file_inode(file))->data;
        u32 speed = 0;
        int i, fl_speed = 0;
 
index 8d9bcd8..abb01ac 100644 (file)
@@ -656,7 +656,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
        if (!(filp->f_mode & FMODE_WRITE))
                ro = 1;
 
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
        if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
                LINFO(curlun, "invalid file type: %s\n", filename);
                goto out;
index 195d56d..e336b28 100644 (file)
@@ -580,7 +580,7 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
                            struct comedi_devinfo __user *arg,
                            struct file *file)
 {
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_subdevice *s;
        struct comedi_devinfo devinfo;
@@ -1615,7 +1615,7 @@ static int do_poll_ioctl(struct comedi_device *dev, unsigned int arg,
 static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
                                  unsigned long arg)
 {
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_device *dev = comedi_dev_from_file_info(info);
        int rc;
@@ -1743,7 +1743,7 @@ static struct vm_operations_struct comedi_vm_ops = {
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_device *dev = comedi_dev_from_file_info(info);
        struct comedi_subdevice *s;
@@ -1823,7 +1823,7 @@ done:
 static unsigned int comedi_poll(struct file *file, poll_table *wait)
 {
        unsigned int mask = 0;
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_device *dev = comedi_dev_from_file_info(info);
        struct comedi_subdevice *s;
@@ -1869,7 +1869,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        struct comedi_async *async;
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_device *dev = comedi_dev_from_file_info(info);
 
@@ -1964,7 +1964,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        struct comedi_async *async;
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_file_info *info = comedi_file_info_from_minor(minor);
        struct comedi_device *dev = comedi_dev_from_file_info(info);
 
@@ -2133,7 +2133,7 @@ ok:
 
 static int comedi_fasync(int fd, struct file *file, int on)
 {
-       const unsigned minor = iminor(file->f_dentry->d_inode);
+       const unsigned minor = iminor(file_inode(file));
        struct comedi_device *dev = comedi_dev_from_minor(minor);
 
        if (!dev)
index 13c7ccf..73f287f 100644 (file)
@@ -357,7 +357,7 @@ static int dgrp_gen_proc_open(struct inode *inode, struct file *file)
        struct dgrp_proc_entry *entry;
        int ret = 0;
 
-       de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+       de = (struct proc_dir_entry *) PDE(file_inode(file));
        if (!de || !de->data) {
                ret = -ENXIO;
                goto done;
@@ -387,7 +387,7 @@ static int dgrp_gen_proc_close(struct inode *inode, struct file *file)
        struct proc_dir_entry *de;
        struct dgrp_proc_entry *entry;
 
-       de = (struct proc_dir_entry *) PDE(file->f_dentry->d_inode);
+       de = (struct proc_dir_entry *) PDE(file_inode(file));
        if (!de || !de->data)
                goto done;
 
index 75aa5bf..539fa57 100644 (file)
@@ -411,7 +411,7 @@ struct socket *sockfd_to_socket(unsigned int sockfd)
                return NULL;
        }
 
-       inode = file->f_dentry->d_inode;
+       inode = file_inode(file);
 
        if (!inode || !S_ISSOCK(inode->i_mode)) {
                fput(file);
index 57474cf..d074b1e 100644 (file)
@@ -318,7 +318,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
-       unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+       unsigned int minor = MINOR(file_inode(file)->i_rdev);
        ssize_t retval;
        size_t image_size;
        size_t okcount;
@@ -364,7 +364,7 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
 static ssize_t vme_user_write(struct file *file, const char __user *buf,
                        size_t count, loff_t *ppos)
 {
-       unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+       unsigned int minor = MINOR(file_inode(file)->i_rdev);
        ssize_t retval;
        size_t image_size;
        size_t okcount;
@@ -410,7 +410,7 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
 {
        loff_t absolute = -1;
-       unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
+       unsigned int minor = MINOR(file_inode(file)->i_rdev);
        size_t image_size;
 
        if (minor == CONTROL_MINOR)
@@ -583,7 +583,7 @@ vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        int ret;
 
        mutex_lock(&vme_user_mutex);
-       ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+       ret = vme_user_ioctl(file_inode(file), file, cmd, arg);
        mutex_unlock(&vme_user_mutex);
 
        return ret;
index 23a98e6..7ea246a 100644 (file)
@@ -144,23 +144,24 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
        spin_lock_init(&tiqn->login_stats.lock);
        spin_lock_init(&tiqn->logout_stats.lock);
 
-       if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
-               pr_err("idr_pre_get() for tiqn_idr failed\n");
-               kfree(tiqn);
-               return ERR_PTR(-ENOMEM);
-       }
        tiqn->tiqn_state = TIQN_STATE_ACTIVE;
 
+       idr_preload(GFP_KERNEL);
        spin_lock(&tiqn_lock);
-       ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+
+       ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
        if (ret < 0) {
-               pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+               pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
                spin_unlock(&tiqn_lock);
+               idr_preload_end();
                kfree(tiqn);
                return ERR_PTR(ret);
        }
+       tiqn->tiqn_index = ret;
        list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+
        spin_unlock(&tiqn_lock);
+       idr_preload_end();
 
        pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
 
@@ -3583,6 +3584,10 @@ check_rsp_state:
                                spin_lock_bh(&cmd->istate_lock);
                                cmd->i_state = ISTATE_SENT_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
+
+                               if (atomic_read(&conn->check_immediate_queue))
+                                       return 1;
+
                                continue;
                        } else if (ret == 2) {
                                /* Still must send status,
@@ -3672,7 +3677,7 @@ check_rsp_state:
                }
 
                if (atomic_read(&conn->check_immediate_queue))
-                       break;
+                       return 1;
        }
 
        return 0;
@@ -3716,12 +3721,15 @@ restart:
                     signal_pending(current))
                        goto transport_err;
 
+get_immediate:
                ret = handle_immediate_queue(conn);
                if (ret < 0)
                        goto transport_err;
 
                ret = handle_response_queue(conn);
-               if (ret == -EAGAIN)
+               if (ret == 1)
+                       goto get_immediate;
+               else if (ret == -EAGAIN)
                        goto restart;
                else if (ret < 0)
                        goto transport_err;
index fdb632f..2535d4d 100644 (file)
@@ -247,19 +247,16 @@ static int iscsi_login_zero_tsih_s1(
        spin_lock_init(&sess->session_usage_lock);
        spin_lock_init(&sess->ttt_lock);
 
-       if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
-               pr_err("idr_pre_get() for sess_idr failed\n");
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               kfree(sess);
-               return -ENOMEM;
-       }
+       idr_preload(GFP_KERNEL);
        spin_lock_bh(&sess_idr_lock);
-       ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
+       ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               sess->session_index = ret;
        spin_unlock_bh(&sess_idr_lock);
+       idr_preload_end();
 
        if (ret < 0) {
-               pr_err("idr_get_new() for sess_idr failed\n");
+               pr_err("idr_alloc() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                kfree(sess);
index 6917a9e..d3536f5 100644 (file)
@@ -2598,7 +2598,7 @@ static int __init sbp_init(void)
        return 0;
 };
 
-static void sbp_exit(void)
+static void __exit sbp_exit(void)
 {
        sbp_deregister_configfs();
 };
index 7d4ec02..ff1c5ee 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/spinlock.h>
 #include <linux/configfs.h>
 #include <linux/export.h>
+#include <linux/file.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <asm/unaligned.h>
@@ -715,36 +716,18 @@ static int core_alua_write_tpg_metadata(
        unsigned char *md_buf,
        u32 md_buf_len)
 {
-       mm_segment_t old_fs;
-       struct file *file;
-       struct iovec iov[1];
-       int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
-
-       memset(iov, 0, sizeof(struct iovec));
+       struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+       int ret;
 
-       file = filp_open(path, flags, 0600);
-       if (IS_ERR(file) || !file || !file->f_dentry) {
-               pr_err("filp_open(%s) for ALUA metadata failed\n",
-                       path);
+       if (IS_ERR(file)) {
+               pr_err("filp_open(%s) for ALUA metadata failed\n", path);
                return -ENODEV;
        }
-
-       iov[0].iov_base = &md_buf[0];
-       iov[0].iov_len = md_buf_len;
-
-       old_fs = get_fs();
-       set_fs(get_ds());
-       ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
-       set_fs(old_fs);
-
-       if (ret < 0) {
+       ret = kernel_write(file, md_buf, md_buf_len, 0);
+       if (ret < 0)
                pr_err("Error writing ALUA metadata file: %s\n", path);
-               filp_close(file, NULL);
-               return -EIO;
-       }
-       filp_close(file, NULL);
-
-       return 0;
+       fput(file);
+       return ret ? -EIO : 0;
 }
 
 /*
index ca36a38..17a6acb 100644 (file)
@@ -270,7 +270,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
                 * the expected virt_size for struct file w/o a backing struct
                 * block_device.
                 */
-               if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+               if (S_ISBLK(file_inode(fd)->i_mode)) {
                        if (ret < 0 || ret != cmd->data_length) {
                                pr_err("%s() returned %d, expecting %u for "
                                                "S_ISBLK\n", __func__, ret,
@@ -631,7 +631,7 @@ static int __init fileio_module_init(void)
        return transport_subsystem_register(&fileio_template);
 }
 
-static void fileio_module_exit(void)
+static void __exit fileio_module_exit(void)
 {
        transport_subsystem_release(&fileio_template);
 }
index c73f4a9..8bcc514 100644 (file)
@@ -821,7 +821,7 @@ static int __init iblock_module_init(void)
        return transport_subsystem_register(&iblock_template);
 }
 
-static void iblock_module_exit(void)
+static void __exit iblock_module_exit(void)
 {
        transport_subsystem_release(&iblock_template);
 }
index 8e0290b..3240f2c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/file.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <asm/unaligned.h>
@@ -1957,13 +1958,10 @@ static int __core_scsi3_write_aptpl_to_file(
 {
        struct t10_wwn *wwn = &dev->t10_wwn;
        struct file *file;
-       struct iovec iov[1];
-       mm_segment_t old_fs;
        int flags = O_RDWR | O_CREAT | O_TRUNC;
        char path[512];
        int ret;
 
-       memset(iov, 0, sizeof(struct iovec));
        memset(path, 0, 512);
 
        if (strlen(&wwn->unit_serial[0]) >= 512) {
@@ -1974,31 +1972,22 @@ static int __core_scsi3_write_aptpl_to_file(
 
        snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
        file = filp_open(path, flags, 0600);
-       if (IS_ERR(file) || !file || !file->f_dentry) {
+       if (IS_ERR(file)) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
-               return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+               return PTR_ERR(file);
        }
 
-       iov[0].iov_base = &buf[0];
        if (!pr_aptpl_buf_len)
-               iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
-       else
-               iov[0].iov_len = pr_aptpl_buf_len;
+               pr_aptpl_buf_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
 
-       old_fs = get_fs();
-       set_fs(get_ds());
-       ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
-       set_fs(old_fs);
+       ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
 
-       if (ret < 0) {
+       if (ret < 0)
                pr_debug("Error writing APTPL metadata file: %s\n", path);
-               filp_close(file, NULL);
-               return -EIO;
-       }
-       filp_close(file, NULL);
+       fput(file);
 
-       return 0;
+       return ret ? -EIO : 0;
 }
 
 static int
index 2bcfd79..82e78d7 100644 (file)
@@ -840,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error)
        bio_put(bio);
 }
 
-static inline struct bio *pscsi_get_bio(int sg_num)
+static inline struct bio *pscsi_get_bio(int nr_vecs)
 {
        struct bio *bio;
        /*
         * Use bio_malloc() following the comment in for bio -> struct request
         * in block/blk-core.c:blk_make_request()
         */
-       bio = bio_kmalloc(GFP_KERNEL, sg_num);
+       bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
        if (!bio) {
                pr_err("PSCSI: bio_kmalloc() failed\n");
                return NULL;
@@ -940,7 +940,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                bio = NULL;
                        }
 
-                       page++;
                        len -= bytes;
                        data_len -= bytes;
                        off = 0;
@@ -952,7 +951,6 @@ fail:
        while (*hbio) {
                bio = *hbio;
                *hbio = (*hbio)->bi_next;
-               bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1092,7 +1090,6 @@ fail_free_bio:
        while (hbio) {
                struct bio *bio = hbio;
                hbio = hbio->bi_next;
-               bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1178,7 +1175,7 @@ static int __init pscsi_module_init(void)
        return transport_subsystem_register(&pscsi_template);
 }
 
-static void pscsi_module_exit(void)
+static void __exit pscsi_module_exit(void)
 {
        transport_subsystem_release(&pscsi_template);
 }
index 6659dd3..113f335 100644 (file)
@@ -169,7 +169,6 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
 {
        struct ft_tport *tport;
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct ft_sess *sess;
 
        rcu_read_lock();
@@ -178,7 +177,7 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
                goto out;
 
        head = &tport->hash[ft_sess_hash(port_id)];
-       hlist_for_each_entry_rcu(sess, pos, head, hash) {
+       hlist_for_each_entry_rcu(sess, head, hash) {
                if (sess->port_id == port_id) {
                        kref_get(&sess->kref);
                        rcu_read_unlock();
@@ -201,10 +200,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
 {
        struct ft_sess *sess;
        struct hlist_head *head;
-       struct hlist_node *pos;
 
        head = &tport->hash[ft_sess_hash(port_id)];
-       hlist_for_each_entry_rcu(sess, pos, head, hash)
+       hlist_for_each_entry_rcu(sess, head, hash)
                if (sess->port_id == port_id)
                        return sess;
 
@@ -253,11 +251,10 @@ static void ft_sess_unhash(struct ft_sess *sess)
 static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct ft_sess *sess;
 
        head = &tport->hash[ft_sess_hash(port_id)];
-       hlist_for_each_entry_rcu(sess, pos, head, hash) {
+       hlist_for_each_entry_rcu(sess, head, hash) {
                if (sess->port_id == port_id) {
                        ft_sess_unhash(sess);
                        return sess;
@@ -273,12 +270,11 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
 static void ft_sess_delete_all(struct ft_tport *tport)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct ft_sess *sess;
 
        for (head = tport->hash;
             head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
-               hlist_for_each_entry_rcu(sess, pos, head, hash) {
+               hlist_for_each_entry_rcu(sess, head, hash) {
                        ft_sess_unhash(sess);
                        transport_deregister_session_configfs(sess->se_sess);
                        ft_sess_put(sess);      /* release from table */
index c2c77d1..a764f16 100644 (file)
@@ -29,14 +29,14 @@ choice
 
 config THERMAL_DEFAULT_GOV_STEP_WISE
        bool "step_wise"
-       select STEP_WISE
+       select THERMAL_GOV_STEP_WISE
        help
          Use the step_wise governor as default. This throttles the
          devices one step at a time.
 
 config THERMAL_DEFAULT_GOV_FAIR_SHARE
        bool "fair_share"
-       select FAIR_SHARE
+       select THERMAL_GOV_FAIR_SHARE
        help
          Use the fair_share governor as default. This throttles the
          devices based on their 'contribution' to a zone. The
@@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE
 
 config THERMAL_DEFAULT_GOV_USER_SPACE
        bool "user_space"
-       select USER_SPACE
+       select THERMAL_GOV_USER_SPACE
        help
          Select this if you want to let the user space manage the
          lpatform thermals.
 
 endchoice
 
-config FAIR_SHARE
+config THERMAL_GOV_FAIR_SHARE
        bool "Fair-share thermal governor"
        help
          Enable this to manage platform thermals using fair-share governor.
 
-config STEP_WISE
+config THERMAL_GOV_STEP_WISE
        bool "Step_wise thermal governor"
        help
          Enable this to manage platform thermals using a simple linear
 
-config USER_SPACE
+config THERMAL_GOV_USER_SPACE
        bool "User_space thermal governor"
        help
          Enable this to let the user space manage the platform thermals.
@@ -78,6 +78,14 @@ config CPU_THERMAL
          and not the ACPI interface.
          If you want this support, you should say Y here.
 
+config THERMAL_EMULATION
+       bool "Thermal emulation mode support"
+       help
+         Enable this option to make a emul_temp sysfs node in thermal zone
+         directory to support temperature emulation. With emulation sysfs node,
+         user can manually input temperature and test the different trip
+         threshold behaviour for simulation purpose.
+
 config SPEAR_THERMAL
        bool "SPEAr thermal sensor driver"
        depends on PLAT_SPEAR
@@ -93,6 +101,14 @@ config RCAR_THERMAL
          Enable this to plug the R-Car thermal sensor driver into the Linux
          thermal framework
 
+config KIRKWOOD_THERMAL
+       tristate "Temperature sensor on Marvell Kirkwood SoCs"
+       depends on ARCH_KIRKWOOD
+       depends on OF
+       help
+         Support for the Kirkwood thermal sensor driver into the Linux thermal
+         framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
+
 config EXYNOS_THERMAL
        tristate "Temperature sensor on Samsung EXYNOS"
        depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
@@ -101,6 +117,23 @@ config EXYNOS_THERMAL
          If you say yes here you get support for TMU (Thermal Management
          Unit) on SAMSUNG EXYNOS series of SoC.
 
+config EXYNOS_THERMAL_EMUL
+       bool "EXYNOS TMU emulation mode support"
+       depends on EXYNOS_THERMAL
+       help
+         Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
+         Enable this option will be make sysfs node in exynos thermal platform
+         device directory to support emulation mode. With emulation mode sysfs
+         node, you can manually input temperature to TMU for simulation purpose.
+
+config DOVE_THERMAL
+       tristate "Temperature sensor on Marvell Dove SoCs"
+       depends on ARCH_DOVE
+       depends on OF
+       help
+         Support for the Dove thermal sensor driver in the Linux thermal
+         framework.
+
 config DB8500_THERMAL
        bool "DB8500 thermal management"
        depends on ARCH_U8500
@@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING
          bound cpufreq cooling device turns active to set CPU frequency low to
          cool down the CPU.
 
+config INTEL_POWERCLAMP
+       tristate "Intel PowerClamp idle injection driver"
+       depends on THERMAL
+       depends on X86
+       depends on CPU_SUP_INTEL
+       help
+         Enable this to enable Intel PowerClamp idle injection driver. This
+         enforce idle time which results in more package C-state residency. The
+         user interface is exposed via generic thermal framework.
+
 endif
index d8da683..d3a2b38 100644 (file)
@@ -5,9 +5,9 @@
 obj-$(CONFIG_THERMAL)          += thermal_sys.o
 
 # governors
-obj-$(CONFIG_FAIR_SHARE)       += fair_share.o
-obj-$(CONFIG_STEP_WISE)                += step_wise.o
-obj-$(CONFIG_USER_SPACE)       += user_space.o
+obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE)   += fair_share.o
+obj-$(CONFIG_THERMAL_GOV_STEP_WISE)    += step_wise.o
+obj-$(CONFIG_THERMAL_GOV_USER_SPACE)   += user_space.o
 
 # cpufreq cooling
 obj-$(CONFIG_CPU_THERMAL)      += cpu_cooling.o
@@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL)    += cpu_cooling.o
 # platform thermal drivers
 obj-$(CONFIG_SPEAR_THERMAL)    += spear_thermal.o
 obj-$(CONFIG_RCAR_THERMAL)     += rcar_thermal.o
+obj-$(CONFIG_KIRKWOOD_THERMAL)  += kirkwood_thermal.o
 obj-$(CONFIG_EXYNOS_THERMAL)   += exynos_thermal.o
+obj-$(CONFIG_DOVE_THERMAL)     += dove_thermal.o
 obj-$(CONFIG_DB8500_THERMAL)   += db8500_thermal.o
 obj-$(CONFIG_DB8500_CPUFREQ_COOLING)   += db8500_cpufreq_cooling.o
+obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+
index 836828e..8dc44cb 100644 (file)
@@ -73,21 +73,14 @@ static struct cpufreq_cooling_device *notify_device;
  */
 static int get_idr(struct idr *idr, int *id)
 {
-       int err;
-again:
-       if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
-               return -ENOMEM;
+       int ret;
 
        mutex_lock(&cooling_cpufreq_lock);
-       err = idr_get_new(idr, NULL, id);
+       ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
        mutex_unlock(&cooling_cpufreq_lock);
-
-       if (unlikely(err == -EAGAIN))
-               goto again;
-       else if (unlikely(err))
-               return err;
-
-       *id = *id & MAX_IDR_MASK;
+       if (unlikely(ret < 0))
+               return ret;
+       *id = ret;
        return 0;
 }
 
@@ -118,8 +111,8 @@ static int is_cpufreq_valid(int cpu)
 /**
  * get_cpu_frequency - get the absolute value of frequency from level.
  * @cpu: cpu for which frequency is fetched.
- * @level: level of frequency of the CPU
- *     e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
+ * @level: level of frequency, equals cooling state of cpu cooling device
+ *     e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
  */
 static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
 {
index 4cf8e72..2141985 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
        { .compatible = "stericsson,db8500-cpufreq-cooling" },
        {},
 };
-#else
-#define db8500_cpufreq_cooling_match NULL
 #endif
 
 static struct platform_driver db8500_cpufreq_cooling_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "db8500-cpufreq-cooling",
-               .of_match_table = db8500_cpufreq_cooling_match,
+               .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
        },
        .probe = db8500_cpufreq_cooling_probe,
        .suspend = db8500_cpufreq_cooling_suspend,
index ec71ade..61ce60a 100644 (file)
@@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = {
        { .compatible = "stericsson,db8500-thermal" },
        {},
 };
-#else
-#define db8500_thermal_match NULL
 #endif
 
 static struct platform_driver db8500_thermal_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "db8500-thermal",
-               .of_match_table = db8500_thermal_match,
+               .of_match_table = of_match_ptr(db8500_thermal_match),
        },
        .probe = db8500_thermal_probe,
        .suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
new file mode 100644 (file)
index 0000000..7b0bfa0
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Dove thermal sensor driver
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define DOVE_THERMAL_TEMP_OFFSET       1
+#define DOVE_THERMAL_TEMP_MASK         0x1FF
+
+/* Dove Thermal Manager Control and Status Register */
+#define PMU_TM_DISABLE_OFFS            0
+#define PMU_TM_DISABLE_MASK            (0x1 << PMU_TM_DISABLE_OFFS)
+
+/* Dove Theraml Diode Control 0 Register */
+#define PMU_TDC0_SW_RST_MASK           (0x1 << 1)
+#define PMU_TDC0_SEL_VCAL_OFFS         5
+#define PMU_TDC0_SEL_VCAL_MASK         (0x3 << PMU_TDC0_SEL_VCAL_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS      11
+#define PMU_TDC0_REF_CAL_CNT_MASK      (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_AVG_NUM_OFFS          25
+#define PMU_TDC0_AVG_NUM_MASK          (0x7 << PMU_TDC0_AVG_NUM_OFFS)
+
+/* Dove Thermal Diode Control 1 Register */
+#define PMU_TEMP_DIOD_CTRL1_REG                0x04
+#define PMU_TDC1_TEMP_VALID_MASK       (0x1 << 10)
+
+/* Dove Thermal Sensor Dev Structure */
+struct dove_thermal_priv {
+       void __iomem *sensor;
+       void __iomem *control;
+};
+
+static int dove_init_sensor(const struct dove_thermal_priv *priv)
+{
+       u32 reg;
+       u32 i;
+
+       /* Configure the Diode Control Register #0 */
+       reg = readl_relaxed(priv->control);
+
+       /* Use average of 2 */
+       reg &= ~PMU_TDC0_AVG_NUM_MASK;
+       reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS);
+
+       /* Reference calibration value */
+       reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+       reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+
+       /* Set the high level reference for calibration */
+       reg &= ~PMU_TDC0_SEL_VCAL_MASK;
+       reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS);
+       writel(reg, priv->control);
+
+       /* Reset the sensor */
+       reg = readl_relaxed(priv->control);
+       writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+       writel(reg, priv->control);
+
+       /* Enable the sensor */
+       reg = readl_relaxed(priv->sensor);
+       reg &= ~PMU_TM_DISABLE_MASK;
+       writel(reg, priv->sensor);
+
+       /* Poll the sensor for the first reading */
+       for (i = 0; i < 1000000; i++) {
+               reg = readl_relaxed(priv->sensor);
+               if (reg & DOVE_THERMAL_TEMP_MASK)
+                       break;
+       }
+
+       if (i == 1000000)
+               return -EIO;
+
+       return 0;
+}
+
+static int dove_get_temp(struct thermal_zone_device *thermal,
+                         unsigned long *temp)
+{
+       unsigned long reg;
+       struct dove_thermal_priv *priv = thermal->devdata;
+
+       /* Valid check */
+       reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG);
+       if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) {
+               dev_err(&thermal->device,
+                       "Temperature sensor reading not valid\n");
+               return -EIO;
+       }
+
+       /*
+        * Calculate temperature. See Section 8.10.1 of 88AP510,
+        * Documentation/arm/Marvell/README
+        */
+       reg = readl_relaxed(priv->sensor);
+       reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
+       *temp = ((2281638UL - (7298*reg)) / 10);
+
+       return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+       .get_temp = dove_get_temp,
+};
+
+static const struct of_device_id dove_thermal_id_table[] = {
+       { .compatible = "marvell,dove-thermal" },
+       {}
+};
+
+static int dove_thermal_probe(struct platform_device *pdev)
+{
+       struct thermal_zone_device *thermal = NULL;
+       struct dove_thermal_priv *priv;
+       struct resource *res;
+       int ret;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get platform resource\n");
+               return -ENODEV;
+       }
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+       if (!priv->sensor) {
+               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+               return -EADDRNOTAVAIL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get platform resource\n");
+               return -ENODEV;
+       }
+       priv->control = devm_request_and_ioremap(&pdev->dev, res);
+       if (!priv->control) {
+               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+               return -EADDRNOTAVAIL;
+       }
+
+       ret = dove_init_sensor(priv);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize sensor\n");
+               return ret;
+       }
+
+       thermal = thermal_zone_device_register("dove_thermal", 0, 0,
+                                              priv, &ops, NULL, 0, 0);
+       if (IS_ERR(thermal)) {
+               dev_err(&pdev->dev,
+                       "Failed to register thermal zone device\n");
+               return PTR_ERR(thermal);
+       }
+
+       platform_set_drvdata(pdev, thermal);
+
+       return 0;
+}
+
+static int dove_thermal_exit(struct platform_device *pdev)
+{
+       struct thermal_zone_device *dove_thermal =
+               platform_get_drvdata(pdev);
+
+       thermal_zone_device_unregister(dove_thermal);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+MODULE_DEVICE_TABLE(of, dove_thermal_id_table);
+
+static struct platform_driver dove_thermal_driver = {
+       .probe = dove_thermal_probe,
+       .remove = dove_thermal_exit,
+       .driver = {
+               .name = "dove_thermal",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(dove_thermal_id_table),
+       },
+};
+
+module_platform_driver(dove_thermal_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Dove thermal driver");
+MODULE_LICENSE("GPL");
index bada130..e04ebd8 100644 (file)
@@ -82,7 +82,7 @@
 
 #define EXYNOS_TRIMINFO_RELOAD         0x1
 #define EXYNOS_TMU_CLEAR_RISE_INT      0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT      (0x111 << 16)
+#define EXYNOS_TMU_CLEAR_FALL_INT      (0x111 << 12)
 #define EXYNOS_MUX_ADDR_VALUE          6
 #define EXYNOS_MUX_ADDR_SHIFT          20
 #define EXYNOS_TMU_TRIP_MODE_SHIFT     13
 #define SENSOR_NAME_LEN        16
 #define MAX_TRIP_COUNT 8
 #define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 4
 
 #define ACTIVE_INTERVAL 500
 #define IDLE_INTERVAL 10000
 #define MCELSIUS       1000
 
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#define EXYNOS_EMUL_TIME       0x57F0
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK  0xFF
+#define EXYNOS_EMUL_ENABLE     0x1
+#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+
 /* CPU Zone information */
 #define PANIC_ZONE      4
 #define WARN_ZONE       3
@@ -125,6 +134,7 @@ struct exynos_tmu_data {
 struct thermal_trip_point_conf {
        int trip_val[MAX_TRIP_COUNT];
        int trip_count;
+       u8 trigger_falling;
 };
 
 struct thermal_cooling_conf {
@@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal,
 
        mutex_lock(&th_zone->therm_dev->lock);
 
-       if (mode == THERMAL_DEVICE_ENABLED)
+       if (mode == THERMAL_DEVICE_ENABLED &&
+               !th_zone->sensor_conf->trip_data.trigger_falling)
                th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
        else
                th_zone->therm_dev->polling_delay = 0;
@@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal,
                case MONITOR_ZONE:
                case WARN_ZONE:
                        if (thermal_zone_bind_cooling_device(thermal, i, cdev,
-                                                               level, level)) {
+                                                               level, 0)) {
                                pr_err("error binding cdev inst %d\n", i);
                                ret = -EINVAL;
                        }
@@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
 static int exynos_get_trend(struct thermal_zone_device *thermal,
                        int trip, enum thermal_trend *trend)
 {
-       if (thermal->temperature >= trip)
-               *trend = THERMAL_TREND_RAISING;
+       int ret;
+       unsigned long trip_temp;
+
+       ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+       if (ret < 0)
+               return ret;
+
+       if (thermal->temperature >= trip_temp)
+               *trend = THERMAL_TREND_RAISE_FULL;
        else
-               *trend = THERMAL_TREND_DROPPING;
+               *trend = THERMAL_TREND_DROP_FULL;
 
        return 0;
 }
@@ -413,7 +431,8 @@ static void exynos_report_trigger(void)
                        break;
        }
 
-       if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
+       if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+               !th_zone->sensor_conf->trip_data.trigger_falling) {
                if (i > 0)
                        th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
                else
@@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
 
        th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
                        EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
-                       IDLE_INTERVAL);
+                       sensor_conf->trip_data.trigger_falling ?
+                       0 : IDLE_INTERVAL);
 
        if (IS_ERR(th_zone->therm_dev)) {
                pr_err("Failed to register thermal zone device\n");
@@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
        struct exynos_tmu_platform_data *pdata = data->pdata;
-       unsigned int status, trim_info, rising_threshold;
-       int ret = 0, threshold_code;
+       unsigned int status, trim_info;
+       unsigned int rising_threshold = 0, falling_threshold = 0;
+       int ret = 0, threshold_code, i, trigger_levs = 0;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
@@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
                        (data->temp_error2 != 0))
                data->temp_error1 = pdata->efuse_value;
 
+       /* Count trigger levels to be enabled */
+       for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
+               if (pdata->trigger_levels[i])
+                       trigger_levs++;
+
        if (data->soc == SOC_ARCH_EXYNOS4210) {
                /* Write temperature code for threshold */
                threshold_code = temp_to_code(data, pdata->threshold);
@@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
                }
                writeb(threshold_code,
                        data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
-
-               writeb(pdata->trigger_levels[0],
-                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
-               writeb(pdata->trigger_levels[1],
-                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
-               writeb(pdata->trigger_levels[2],
-                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
-               writeb(pdata->trigger_levels[3],
-                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
+               for (i = 0; i < trigger_levs; i++)
+                       writeb(pdata->trigger_levels[i],
+                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
 
                writel(EXYNOS4210_TMU_INTCLEAR_VAL,
                        data->base + EXYNOS_TMU_REG_INTCLEAR);
        } else if (data->soc == SOC_ARCH_EXYNOS) {
-               /* Write temperature code for threshold */
-               threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
-               if (threshold_code < 0) {
-                       ret = threshold_code;
-                       goto out;
-               }
-               rising_threshold = threshold_code;
-               threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
-               if (threshold_code < 0) {
-                       ret = threshold_code;
-                       goto out;
-               }
-               rising_threshold |= (threshold_code << 8);
-               threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
-               if (threshold_code < 0) {
-                       ret = threshold_code;
-                       goto out;
+               /* Write temperature code for rising and falling threshold */
+               for (i = 0; i < trigger_levs; i++) {
+                       threshold_code = temp_to_code(data,
+                                               pdata->trigger_levels[i]);
+                       if (threshold_code < 0) {
+                               ret = threshold_code;
+                               goto out;
+                       }
+                       rising_threshold |= threshold_code << 8 * i;
+                       if (pdata->threshold_falling) {
+                               threshold_code = temp_to_code(data,
+                                               pdata->trigger_levels[i] -
+                                               pdata->threshold_falling);
+                               if (threshold_code > 0)
+                                       falling_threshold |=
+                                               threshold_code << 8 * i;
+                       }
                }
-               rising_threshold |= (threshold_code << 16);
 
                writel(rising_threshold,
                                data->base + EXYNOS_THD_TEMP_RISE);
-               writel(0, data->base + EXYNOS_THD_TEMP_FALL);
+               writel(falling_threshold,
+                               data->base + EXYNOS_THD_TEMP_FALL);
 
-               writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
+               writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
                                data->base + EXYNOS_TMU_REG_INTCLEAR);
        }
 out:
@@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
                        pdata->trigger_level2_en << 8 |
                        pdata->trigger_level1_en << 4 |
                        pdata->trigger_level0_en;
+               if (pdata->threshold_falling)
+                       interrupt_en |= interrupt_en << 16;
        } else {
                con |= EXYNOS_TMU_CORE_OFF;
                interrupt_en = 0; /* Disable all interrupts */
@@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work)
        struct exynos_tmu_data *data = container_of(work,
                        struct exynos_tmu_data, irq_work);
 
+       exynos_report_trigger();
        mutex_lock(&data->lock);
        clk_enable(data->clk);
-
-
        if (data->soc == SOC_ARCH_EXYNOS)
-               writel(EXYNOS_TMU_CLEAR_RISE_INT,
+               writel(EXYNOS_TMU_CLEAR_RISE_INT |
+                               EXYNOS_TMU_CLEAR_FALL_INT,
                                data->base + EXYNOS_TMU_REG_INTCLEAR);
        else
                writel(EXYNOS4210_TMU_INTCLEAR_VAL,
                                data->base + EXYNOS_TMU_REG_INTCLEAR);
-
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
-       exynos_report_trigger();
+
        enable_irq(data->irq);
 }
 
@@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
 
 #if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
 static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
+       .threshold_falling = 10,
        .trigger_levels[0] = 85,
        .trigger_levels[1] = 103,
        .trigger_levels[2] = 110,
@@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#else
-#define  exynos_tmu_match NULL
 #endif
 
 static struct platform_device_id exynos_tmu_driver_ids[] = {
@@ -832,6 +852,94 @@ static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
        return (struct exynos_tmu_platform_data *)
                        platform_get_device_id(pdev)->driver_data;
 }
+
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+static ssize_t exynos_tmu_emulation_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       unsigned int reg;
+       u8 temp_code;
+       int temp = 0;
+
+       if (data->soc == SOC_ARCH_EXYNOS4210)
+               goto out;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+       reg = readl(data->base + EXYNOS_EMUL_CON);
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+
+       if (reg & EXYNOS_EMUL_ENABLE) {
+               reg >>= EXYNOS_EMUL_DATA_SHIFT;
+               temp_code = reg & EXYNOS_EMUL_DATA_MASK;
+               temp = code_to_temp(data, temp_code);
+       }
+out:
+       return sprintf(buf, "%d\n", temp * MCELSIUS);
+}
+
+static ssize_t exynos_tmu_emulation_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       unsigned int reg;
+       int temp;
+
+       if (data->soc == SOC_ARCH_EXYNOS4210)
+               goto out;
+
+       if (!sscanf(buf, "%d\n", &temp) || temp < 0)
+               return -EINVAL;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       reg = readl(data->base + EXYNOS_EMUL_CON);
+
+       if (temp) {
+               /* Both CELSIUS and MCELSIUS type are available for input */
+               if (temp > MCELSIUS)
+                       temp /= MCELSIUS;
+
+               reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+                       (temp_to_code(data, (temp / MCELSIUS))
+                        << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+       } else {
+               reg &= ~EXYNOS_EMUL_ENABLE;
+       }
+
+       writel(reg, data->base + EXYNOS_EMUL_CON);
+
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+
+out:
+       return count;
+}
+
+static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
+                                       exynos_tmu_emulation_store);
+static int create_emulation_sysfs(struct device *dev)
+{
+       return device_create_file(dev, &dev_attr_emulation);
+}
+static void remove_emulation_sysfs(struct device *dev)
+{
+       device_remove_file(dev, &dev_attr_emulation);
+}
+#else
+static inline int create_emulation_sysfs(struct device *dev) { return 0; }
+static inline void remove_emulation_sysfs(struct device *dev) {}
+#endif
+
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data;
@@ -914,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
                exynos_sensor_conf.trip_data.trip_val[i] =
                        pdata->threshold + pdata->trigger_levels[i];
 
+       exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
+
        exynos_sensor_conf.cooling_data.freq_clip_count =
                                                pdata->freq_tab_count;
        for (i = 0; i < pdata->freq_tab_count; i++) {
@@ -928,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Failed to register thermal interface\n");
                goto err_clk;
        }
+
+       ret = create_emulation_sysfs(&pdev->dev);
+       if (ret)
+               dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
+
        return 0;
 err_clk:
        platform_set_drvdata(pdev, NULL);
@@ -939,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
 
+       remove_emulation_sysfs(&pdev->dev);
+
        exynos_tmu_control(pdev, false);
 
        exynos_unregister_thermal();
@@ -980,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = {
                .name   = "exynos-tmu",
                .owner  = THIS_MODULE,
                .pm     = EXYNOS_TMU_PM,
-               .of_match_table = exynos_tmu_match,
+               .of_match_table = of_match_ptr(exynos_tmu_match),
        },
        .probe = exynos_tmu_probe,
        .remove = exynos_tmu_remove,
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
new file mode 100644 (file)
index 0000000..b40b37c
--- /dev/null
@@ -0,0 +1,795 @@
+/*
+ * intel_powerclamp.c - package c-state idle injection
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Authors:
+ *     Arjan van de Ven <arjan@linux.intel.com>
+ *     Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ *     TODO:
+ *           1. better handle wakeup from external interrupts, currently a fixed
+ *              compensation is added to clamping duration when excessive amount
+ *              of wakeups are observed during idle time. the reason is that in
+ *              case of external interrupts without need for ack, clamping down
+ *              cpu in non-irq context does not reduce irq. for majority of the
+ *              cases, clamping down cpu does help reduce irq as well, we should
+ *              be able to differenciate the two cases and give a quantitative
+ *              solution for the irqs that we can control. perhaps based on
+ *              get_cpu_iowait_time_us()
+ *
+ *          2. synchronization with other hw blocks
+ *
+ *
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched/rt.h>
+
+#include <asm/nmi.h>
+#include <asm/msr.h>
+#include <asm/mwait.h>
+#include <asm/cpu_device_id.h>
+#include <asm/idle.h>
+#include <asm/hardirq.h>
+
+#define MAX_TARGET_RATIO (50U)
+/* For each undisturbed clamping period (no extra wake ups during idle time),
+ * we increment the confidence counter for the given target ratio.
+ * CONFIDENCE_OK defines the level where runtime calibration results are
+ * valid.
+ */
+#define CONFIDENCE_OK (3)
+/* Default idle injection duration, driver adjust sleep time to meet target
+ * idle ratio. Similar to frequency modulation.
+ */
+#define DEFAULT_DURATION_JIFFIES (6)
+
+static unsigned int target_mwait;
+static struct dentry *debug_dir;
+
+/* user selected target */
+static unsigned int set_target_ratio;
+static unsigned int current_ratio;
+static bool should_skip;
+static bool reduce_irq;
+static atomic_t idle_wakeup_counter;
+static unsigned int control_cpu; /* The cpu assigned to collect stat and update
+                                 * control parameters. default to BSP but BSP
+                                 * can be offlined.
+                                 */
+static bool clamping;
+
+
+static struct task_struct * __percpu *powerclamp_thread;
+static struct thermal_cooling_device *cooling_dev;
+static unsigned long *cpu_clamping_mask;  /* bit map for tracking per cpu
+                                          * clamping thread
+                                          */
+
+static unsigned int duration;
+static unsigned int pkg_cstate_ratio_cur;
+static unsigned int window_size;
+
+static int duration_set(const char *arg, const struct kernel_param *kp)
+{
+       int ret = 0;
+       unsigned long new_duration;
+
+       ret = kstrtoul(arg, 10, &new_duration);
+       if (ret)
+               goto exit;
+       if (new_duration > 25 || new_duration < 6) {
+               pr_err("Out of recommended range %lu, between 6-25ms\n",
+                       new_duration);
+               ret = -EINVAL;
+       }
+
+       duration = clamp(new_duration, 6ul, 25ul);
+       smp_mb();
+
+exit:
+
+       return ret;
+}
+
+static struct kernel_param_ops duration_ops = {
+       .set = duration_set,
+       .get = param_get_int,
+};
+
+
+module_param_cb(duration, &duration_ops, &duration, 0644);
+MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+
+struct powerclamp_calibration_data {
+       unsigned long confidence;  /* used for calibration, basically a counter
+                                   * gets incremented each time a clamping
+                                   * period is completed without extra wakeups
+                                   * once that counter is reached given level,
+                                   * compensation is deemed usable.
+                                   */
+       unsigned long steady_comp; /* steady state compensation used when
+                                   * no extra wakeups occurred.
+                                   */
+       unsigned long dynamic_comp; /* compensate excessive wakeup from idle
+                                    * mostly from external interrupts.
+                                    */
+};
+
+static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
+
+static int window_size_set(const char *arg, const struct kernel_param *kp)
+{
+       int ret = 0;
+       unsigned long new_window_size;
+
+       ret = kstrtoul(arg, 10, &new_window_size);
+       if (ret)
+               goto exit_win;
+       if (new_window_size > 10 || new_window_size < 2) {
+               pr_err("Out of recommended window size %lu, between 2-10\n",
+                       new_window_size);
+               ret = -EINVAL;
+       }
+
+       window_size = clamp(new_window_size, 2ul, 10ul);
+       smp_mb();
+
+exit_win:
+
+       return ret;
+}
+
+static struct kernel_param_ops window_size_ops = {
+       .set = window_size_set,
+       .get = param_get_int,
+};
+
+module_param_cb(window_size, &window_size_ops, &window_size, 0644);
+MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
+       "\tpowerclamp controls idle ratio within this window. larger\n"
+       "\twindow size results in slower response time but more smooth\n"
+       "\tclamping results. default to 2.");
+
+static void find_target_mwait(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int highest_cstate = 0;
+       unsigned int highest_subcstate = 0;
+       int i;
+
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+               return;
+
+       edx >>= MWAIT_SUBSTATE_SIZE;
+       for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+               if (edx & MWAIT_SUBSTATE_MASK) {
+                       highest_cstate = i;
+                       highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+               }
+       }
+       target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+               (highest_subcstate - 1);
+
+}
+
+static u64 pkg_state_counter(void)
+{
+       u64 val;
+       u64 count = 0;
+
+       static bool skip_c2;
+       static bool skip_c3;
+       static bool skip_c6;
+       static bool skip_c7;
+
+       if (!skip_c2) {
+               if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
+                       count += val;
+               else
+                       skip_c2 = true;
+       }
+
+       if (!skip_c3) {
+               if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
+                       count += val;
+               else
+                       skip_c3 = true;
+       }
+
+       if (!skip_c6) {
+               if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
+                       count += val;
+               else
+                       skip_c6 = true;
+       }
+
+       if (!skip_c7) {
+               if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
+                       count += val;
+               else
+                       skip_c7 = true;
+       }
+
+       return count;
+}
+
+static void noop_timer(unsigned long foo)
+{
+       /* empty... just the fact that we get the interrupt wakes us up */
+}
+
+static unsigned int get_compensation(int ratio)
+{
+       unsigned int comp = 0;
+
+       /* we only use compensation if all adjacent ones are good */
+       if (ratio == 1 &&
+               cal_data[ratio].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
+               comp = (cal_data[ratio].steady_comp +
+                       cal_data[ratio + 1].steady_comp +
+                       cal_data[ratio + 2].steady_comp) / 3;
+       } else if (ratio == MAX_TARGET_RATIO - 1 &&
+               cal_data[ratio].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
+               comp = (cal_data[ratio].steady_comp +
+                       cal_data[ratio - 1].steady_comp +
+                       cal_data[ratio - 2].steady_comp) / 3;
+       } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+               cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
+               comp = (cal_data[ratio].steady_comp +
+                       cal_data[ratio - 1].steady_comp +
+                       cal_data[ratio + 1].steady_comp) / 3;
+       }
+
+       /* REVISIT: simple penalty of double idle injection */
+       if (reduce_irq)
+               comp = ratio;
+       /* do not exceed limit */
+       if (comp + ratio >= MAX_TARGET_RATIO)
+               comp = MAX_TARGET_RATIO - ratio - 1;
+
+       return comp;
+}
+
+static void adjust_compensation(int target_ratio, unsigned int win)
+{
+       int delta;
+       struct powerclamp_calibration_data *d = &cal_data[target_ratio];
+
+       /*
+        * adjust compensations if confidence level has not been reached or
+        * there are too many wakeups during the last idle injection period, we
+        * cannot trust the data for compensation.
+        */
+       if (d->confidence >= CONFIDENCE_OK ||
+               atomic_read(&idle_wakeup_counter) >
+               win * num_online_cpus())
+               return;
+
+       delta = set_target_ratio - current_ratio;
+       /* filter out bad data */
+       if (delta >= 0 && delta <= (1+target_ratio/10)) {
+               if (d->steady_comp)
+                       d->steady_comp =
+                               roundup(delta+d->steady_comp, 2)/2;
+               else
+                       d->steady_comp = delta;
+               d->confidence++;
+       }
+}
+
+static bool powerclamp_adjust_controls(unsigned int target_ratio,
+                               unsigned int guard, unsigned int win)
+{
+       static u64 msr_last, tsc_last;
+       u64 msr_now, tsc_now;
+       u64 val64;
+
+       /* check result for the last window */
+       msr_now = pkg_state_counter();
+       rdtscll(tsc_now);
+
+       /* calculate pkg cstate vs tsc ratio */
+       if (!msr_last || !tsc_last)
+               current_ratio = 1;
+       else if (tsc_now-tsc_last) {
+               val64 = 100*(msr_now-msr_last);
+               do_div(val64, (tsc_now-tsc_last));
+               current_ratio = val64;
+       }
+
+       /* update record */
+       msr_last = msr_now;
+       tsc_last = tsc_now;
+
+       adjust_compensation(target_ratio, win);
+       /*
+        * too many external interrupts, set flag such
+        * that we can take measure later.
+        */
+       reduce_irq = atomic_read(&idle_wakeup_counter) >=
+               2 * win * num_online_cpus();
+
+       atomic_set(&idle_wakeup_counter, 0);
+       /* if we are above target+guard, skip */
+       return set_target_ratio + guard <= current_ratio;
+}
+
+static int clamp_thread(void *arg)
+{
+       int cpunr = (unsigned long)arg;
+       DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
+       static const struct sched_param param = {
+               .sched_priority = MAX_USER_RT_PRIO/2,
+       };
+       unsigned int count = 0;
+       unsigned int target_ratio;
+
+       set_bit(cpunr, cpu_clamping_mask);
+       set_freezable();
+       init_timer_on_stack(&wakeup_timer);
+       sched_setscheduler(current, SCHED_FIFO, &param);
+
+       while (true == clamping && !kthread_should_stop() &&
+               cpu_online(cpunr)) {
+               int sleeptime;
+               unsigned long target_jiffies;
+               unsigned int guard;
+               unsigned int compensation = 0;
+               int interval; /* jiffies to sleep for each attempt */
+               unsigned int duration_jiffies = msecs_to_jiffies(duration);
+               unsigned int window_size_now;
+
+               try_to_freeze();
+               /*
+                * make sure user selected ratio does not take effect until
+                * the next round. adjust target_ratio if user has changed
+                * target such that we can converge quickly.
+                */
+               target_ratio = set_target_ratio;
+               guard = 1 + target_ratio/20;
+               window_size_now = window_size;
+               count++;
+
+               /*
+                * systems may have different ability to enter package level
+                * c-states, thus we need to compensate the injected idle ratio
+                * to achieve the actual target reported by the HW.
+                */
+               compensation = get_compensation(target_ratio);
+               interval = duration_jiffies*100/(target_ratio+compensation);
+
+               /* align idle time */
+               target_jiffies = roundup(jiffies, interval);
+               sleeptime = target_jiffies - jiffies;
+               if (sleeptime <= 0)
+                       sleeptime = 1;
+               schedule_timeout_interruptible(sleeptime);
+               /*
+                * only elected controlling cpu can collect stats and update
+                * control parameters.
+                */
+               if (cpunr == control_cpu && !(count%window_size_now)) {
+                       should_skip =
+                               powerclamp_adjust_controls(target_ratio,
+                                                       guard, window_size_now);
+                       smp_mb();
+               }
+
+               if (should_skip)
+                       continue;
+
+               target_jiffies = jiffies + duration_jiffies;
+               mod_timer(&wakeup_timer, target_jiffies);
+               if (unlikely(local_softirq_pending()))
+                       continue;
+               /*
+                * stop tick sched during idle time, interrupts are still
+                * allowed. thus jiffies are updated properly.
+                */
+               preempt_disable();
+               tick_nohz_idle_enter();
+               /* mwait until target jiffies is reached */
+               while (time_before(jiffies, target_jiffies)) {
+                       unsigned long ecx = 1;
+                       unsigned long eax = target_mwait;
+
+                       /*
+                        * REVISIT: may call enter_idle() to notify drivers who
+                        * can save power during cpu idle. same for exit_idle()
+                        */
+                       local_touch_nmi();
+                       stop_critical_timings();
+                       __monitor((void *)&current_thread_info()->flags, 0, 0);
+                       cpu_relax(); /* allow HT sibling to run */
+                       __mwait(eax, ecx);
+                       start_critical_timings();
+                       atomic_inc(&idle_wakeup_counter);
+               }
+               tick_nohz_idle_exit();
+               preempt_enable_no_resched();
+       }
+       del_timer_sync(&wakeup_timer);
+       clear_bit(cpunr, cpu_clamping_mask);
+
+       return 0;
+}
+
+/*
+ * 1 HZ polling while clamping is active, useful for userspace
+ * to monitor actual idle ratio.
+ */
+static void poll_pkg_cstate(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
+static void poll_pkg_cstate(struct work_struct *dummy)
+{
+       static u64 msr_last;
+       static u64 tsc_last;
+       static unsigned long jiffies_last;
+
+       u64 msr_now;
+       unsigned long jiffies_now;
+       u64 tsc_now;
+       u64 val64;
+
+       msr_now = pkg_state_counter();
+       rdtscll(tsc_now);
+       jiffies_now = jiffies;
+
+       /* calculate pkg cstate vs tsc ratio */
+       if (!msr_last || !tsc_last)
+               pkg_cstate_ratio_cur = 1;
+       else {
+               if (tsc_now - tsc_last) {
+                       val64 = 100 * (msr_now - msr_last);
+                       do_div(val64, (tsc_now - tsc_last));
+                       pkg_cstate_ratio_cur = val64;
+               }
+       }
+
+       /* update record */
+       msr_last = msr_now;
+       jiffies_last = jiffies_now;
+       tsc_last = tsc_now;
+
+       if (true == clamping)
+               schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+}
+
+static int start_power_clamp(void)
+{
+       unsigned long cpu;
+       struct task_struct *thread;
+
+       /* check if pkg cstate counter is completely 0, abort in this case */
+       if (!pkg_state_counter()) {
+               pr_err("pkg cstate counter not functional, abort\n");
+               return -EINVAL;
+       }
+
+       set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
+       /* prevent cpu hotplug */
+       get_online_cpus();
+
+       /* prefer BSP */
+       control_cpu = 0;
+       if (!cpu_online(control_cpu))
+               control_cpu = smp_processor_id();
+
+       clamping = true;
+       schedule_delayed_work(&poll_pkg_cstate_work, 0);
+
+       /* start one thread per online cpu */
+       for_each_online_cpu(cpu) {
+               struct task_struct **p =
+                       per_cpu_ptr(powerclamp_thread, cpu);
+
+               thread = kthread_create_on_node(clamp_thread,
+                                               (void *) cpu,
+                                               cpu_to_node(cpu),
+                                               "kidle_inject/%ld", cpu);
+               /* bind to cpu here */
+               if (likely(!IS_ERR(thread))) {
+                       kthread_bind(thread, cpu);
+                       wake_up_process(thread);
+                       *p = thread;
+               }
+
+       }
+       put_online_cpus();
+
+       return 0;
+}
+
+static void end_power_clamp(void)
+{
+       int i;
+       struct task_struct *thread;
+
+       clamping = false;
+       /*
+        * make clamping visible to other cpus and give per cpu clamping threads
+        * sometime to exit, or gets killed later.
+        */
+       smp_mb();
+       msleep(20);
+       if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
+               for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+                       pr_debug("clamping thread for cpu %d alive, kill\n", i);
+                       thread = *per_cpu_ptr(powerclamp_thread, i);
+                       kthread_stop(thread);
+               }
+       }
+}
+
+static int powerclamp_cpu_callback(struct notifier_block *nfb,
+                               unsigned long action, void *hcpu)
+{
+       unsigned long cpu = (unsigned long)hcpu;
+       struct task_struct *thread;
+       struct task_struct **percpu_thread =
+               per_cpu_ptr(powerclamp_thread, cpu);
+
+       if (false == clamping)
+               goto exit_ok;
+
+       switch (action) {
+       case CPU_ONLINE:
+               thread = kthread_create_on_node(clamp_thread,
+                                               (void *) cpu,
+                                               cpu_to_node(cpu),
+                                               "kidle_inject/%lu", cpu);
+               if (likely(!IS_ERR(thread))) {
+                       kthread_bind(thread, cpu);
+                       wake_up_process(thread);
+                       *percpu_thread = thread;
+               }
+               /* prefer BSP as controlling CPU */
+               if (cpu == 0) {
+                       control_cpu = 0;
+                       smp_mb();
+               }
+               break;
+       case CPU_DEAD:
+               if (test_bit(cpu, cpu_clamping_mask)) {
+                       pr_err("cpu %lu dead but powerclamping thread is not\n",
+                               cpu);
+                       kthread_stop(*percpu_thread);
+               }
+               if (cpu == control_cpu) {
+                       control_cpu = smp_processor_id();
+                       smp_mb();
+               }
+       }
+
+exit_ok:
+       return NOTIFY_OK;
+}
+
+static struct notifier_block powerclamp_cpu_notifier = {
+       .notifier_call = powerclamp_cpu_callback,
+};
+
+static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+                                unsigned long *state)
+{
+       *state = MAX_TARGET_RATIO;
+
+       return 0;
+}
+
+static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+                                unsigned long *state)
+{
+       if (true == clamping)
+               *state = pkg_cstate_ratio_cur;
+       else
+               /* to save power, do not poll idle ratio while not clamping */
+               *state = -1; /* indicates invalid state */
+
+       return 0;
+}
+
+static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
+                                unsigned long new_target_ratio)
+{
+       int ret = 0;
+
+       new_target_ratio = clamp(new_target_ratio, 0UL,
+                               (unsigned long) (MAX_TARGET_RATIO-1));
+       if (set_target_ratio == 0 && new_target_ratio > 0) {
+               pr_info("Start idle injection to reduce power\n");
+               set_target_ratio = new_target_ratio;
+               ret = start_power_clamp();
+               goto exit_set;
+       } else  if (set_target_ratio > 0 && new_target_ratio == 0) {
+               pr_info("Stop forced idle injection\n");
+               set_target_ratio = 0;
+               end_power_clamp();
+       } else  /* adjust currently running */ {
+               set_target_ratio = new_target_ratio;
+               /* make new set_target_ratio visible to other cpus */
+               smp_mb();
+       }
+
+exit_set:
+       return ret;
+}
+
+/* bind to generic thermal layer as cooling device*/
+static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+       .get_max_state = powerclamp_get_max_state,
+       .get_cur_state = powerclamp_get_cur_state,
+       .set_cur_state = powerclamp_set_cur_state,
+};
+
+/* runs on Nehalem and later */
+static const struct x86_cpu_id intel_powerclamp_ids[] = {
+       { X86_VENDOR_INTEL, 6, 0x1a},
+       { X86_VENDOR_INTEL, 6, 0x1c},
+       { X86_VENDOR_INTEL, 6, 0x1e},
+       { X86_VENDOR_INTEL, 6, 0x1f},
+       { X86_VENDOR_INTEL, 6, 0x25},
+       { X86_VENDOR_INTEL, 6, 0x26},
+       { X86_VENDOR_INTEL, 6, 0x2a},
+       { X86_VENDOR_INTEL, 6, 0x2c},
+       { X86_VENDOR_INTEL, 6, 0x2d},
+       { X86_VENDOR_INTEL, 6, 0x2e},
+       { X86_VENDOR_INTEL, 6, 0x2f},
+       { X86_VENDOR_INTEL, 6, 0x3a},
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
+static int powerclamp_probe(void)
+{
+       if (!x86_match_cpu(intel_powerclamp_ids)) {
+               pr_err("Intel powerclamp does not run on family %d model %d\n",
+                               boot_cpu_data.x86, boot_cpu_data.x86_model);
+               return -ENODEV;
+       }
+       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
+               !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
+               !boot_cpu_has(X86_FEATURE_MWAIT) ||
+               !boot_cpu_has(X86_FEATURE_ARAT))
+               return -ENODEV;
+
+       /* find the deepest mwait value */
+       find_target_mwait();
+
+       return 0;
+}
+
+static int powerclamp_debug_show(struct seq_file *m, void *unused)
+{
+       int i = 0;
+
+       seq_printf(m, "controlling cpu: %d\n", control_cpu);
+       seq_printf(m, "pct confidence steady dynamic (compensation)\n");
+       for (i = 0; i < MAX_TARGET_RATIO; i++) {
+               seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
+                       i,
+                       cal_data[i].confidence,
+                       cal_data[i].steady_comp,
+                       cal_data[i].dynamic_comp);
+       }
+
+       return 0;
+}
+
+static int powerclamp_debug_open(struct inode *inode,
+                       struct file *file)
+{
+       return single_open(file, powerclamp_debug_show, inode->i_private);
+}
+
+static const struct file_operations powerclamp_debug_fops = {
+       .open           = powerclamp_debug_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .owner          = THIS_MODULE,
+};
+
+static inline void powerclamp_create_debug_files(void)
+{
+       debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
+       if (!debug_dir)
+               return;
+
+       if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
+                                       cal_data, &powerclamp_debug_fops))
+               goto file_error;
+
+       return;
+
+file_error:
+       debugfs_remove_recursive(debug_dir);
+}
+
+static int powerclamp_init(void)
+{
+       int retval;
+       int bitmap_size;
+
+       bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
+       cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!cpu_clamping_mask)
+               return -ENOMEM;
+
+       /* probe cpu features and ids here */
+       retval = powerclamp_probe();
+       if (retval)
+               return retval;
+       /* set default limit, maybe adjusted during runtime based on feedback */
+       window_size = 2;
+       register_hotcpu_notifier(&powerclamp_cpu_notifier);
+       powerclamp_thread = alloc_percpu(struct task_struct *);
+       cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+                                               &powerclamp_cooling_ops);
+       if (IS_ERR(cooling_dev))
+               return -ENODEV;
+
+       if (!duration)
+               duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+       powerclamp_create_debug_files();
+
+       return 0;
+}
+module_init(powerclamp_init);
+
+static void powerclamp_exit(void)
+{
+       unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+       end_power_clamp();
+       free_percpu(powerclamp_thread);
+       thermal_cooling_device_unregister(cooling_dev);
+       kfree(cpu_clamping_mask);
+
+       cancel_delayed_work_sync(&poll_pkg_cstate_work);
+       debugfs_remove_recursive(debug_dir);
+}
+module_exit(powerclamp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
new file mode 100644 (file)
index 0000000..65cb4f0
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Kirkwood thermal sensor driver
+ *
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define KIRKWOOD_THERMAL_VALID_OFFSET  9
+#define KIRKWOOD_THERMAL_VALID_MASK    0x1
+#define KIRKWOOD_THERMAL_TEMP_OFFSET   10
+#define KIRKWOOD_THERMAL_TEMP_MASK     0x1FF
+
+/* Kirkwood Thermal Sensor Dev Structure */
+struct kirkwood_thermal_priv {
+       void __iomem *sensor;
+};
+
+static int kirkwood_get_temp(struct thermal_zone_device *thermal,
+                         unsigned long *temp)
+{
+       unsigned long reg;
+       struct kirkwood_thermal_priv *priv = thermal->devdata;
+
+       reg = readl_relaxed(priv->sensor);
+
+       /* Valid check */
+       if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+           KIRKWOOD_THERMAL_VALID_MASK) {
+               dev_err(&thermal->device,
+                       "Temperature sensor reading not valid\n");
+               return -EIO;
+       }
+
+       /*
+        * Calculate temperature. See Section 8.10.1 of the 88AP510,
+        * datasheet, which has the same sensor.
+        * Documentation/arm/Marvell/README
+        */
+       reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
+               KIRKWOOD_THERMAL_TEMP_MASK;
+       *temp = ((2281638UL - (7298*reg)) / 10);
+
+       return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+       .get_temp = kirkwood_get_temp,
+};
+
+static const struct of_device_id kirkwood_thermal_id_table[] = {
+       { .compatible = "marvell,kirkwood-thermal" },
+       {}
+};
+
+static int kirkwood_thermal_probe(struct platform_device *pdev)
+{
+       struct thermal_zone_device *thermal = NULL;
+       struct kirkwood_thermal_priv *priv;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get platform resource\n");
+               return -ENODEV;
+       }
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+       if (!priv->sensor) {
+               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+               return -EADDRNOTAVAIL;
+       }
+
+       thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
+                                              priv, &ops, NULL, 0, 0);
+       if (IS_ERR(thermal)) {
+               dev_err(&pdev->dev,
+                       "Failed to register thermal zone device\n");
+               return PTR_ERR(thermal);
+       }
+
+       platform_set_drvdata(pdev, thermal);
+
+       return 0;
+}
+
+static int kirkwood_thermal_exit(struct platform_device *pdev)
+{
+       struct thermal_zone_device *kirkwood_thermal =
+               platform_get_drvdata(pdev);
+
+       thermal_zone_device_unregister(kirkwood_thermal);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table);
+
+static struct platform_driver kirkwood_thermal_driver = {
+       .probe = kirkwood_thermal_probe,
+       .remove = kirkwood_thermal_exit,
+       .driver = {
+               .name = "kirkwood_thermal",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(kirkwood_thermal_id_table),
+       },
+};
+
+module_platform_driver(kirkwood_thermal_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
+MODULE_DESCRIPTION("kirkwood thermal driver");
+MODULE_LICENSE("GPL");
index 90db951..28f0919 100644 (file)
  */
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/thermal.h>
 
-#define THSCR  0x2c
-#define THSSR  0x30
+#define IDLE_INTERVAL  5000
+
+#define COMMON_STR     0x00
+#define COMMON_ENR     0x04
+#define COMMON_INTMSK  0x0c
+
+#define REG_POSNEG     0x20
+#define REG_FILONOFF   0x28
+#define REG_THSCR      0x2c
+#define REG_THSSR      0x30
+#define REG_INTCTRL    0x34
 
 /* THSCR */
-#define CPTAP  0xf
+#define CPCTL  (1 << 12)
 
 /* THSSR */
 #define CTEMP  0x3f
 
-
-struct rcar_thermal_priv {
+struct rcar_thermal_common {
        void __iomem *base;
        struct device *dev;
+       struct list_head head;
        spinlock_t lock;
-       u32 comp;
 };
 
+struct rcar_thermal_priv {
+       void __iomem *base;
+       struct rcar_thermal_common *common;
+       struct thermal_zone_device *zone;
+       struct delayed_work work;
+       struct mutex lock;
+       struct list_head list;
+       int id;
+       int ctemp;
+};
+
+#define rcar_thermal_for_each_priv(pos, common)        \
+       list_for_each_entry(pos, &common->head, list)
+
 #define MCELSIUS(temp)                 ((temp) * 1000)
-#define rcar_zone_to_priv(zone)                (zone->devdata)
+#define rcar_zone_to_priv(zone)                ((zone)->devdata)
+#define rcar_priv_to_dev(priv)         ((priv)->common->dev)
+#define rcar_has_irq_support(priv)     ((priv)->common->base)
+#define rcar_id_to_shift(priv)         ((priv)->id * 8)
+
+#ifdef DEBUG
+# define rcar_force_update_temp(priv)  1
+#else
+# define rcar_force_update_temp(priv)  0
+#endif
 
 /*
  *             basic functions
  */
-static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
+#define rcar_thermal_common_read(c, r) \
+       _rcar_thermal_common_read(c, COMMON_ ##r)
+static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common,
+                                    u32 reg)
 {
-       unsigned long flags;
-       u32 ret;
-
-       spin_lock_irqsave(&priv->lock, flags);
+       return ioread32(common->base + reg);
+}
 
-       ret = ioread32(priv->base + reg);
+#define rcar_thermal_common_write(c, r, d) \
+       _rcar_thermal_common_write(c, COMMON_ ##r, d)
+static void _rcar_thermal_common_write(struct rcar_thermal_common *common,
+                                      u32 reg, u32 data)
+{
+       iowrite32(data, common->base + reg);
+}
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+#define rcar_thermal_common_bset(c, r, m, d) \
+       _rcar_thermal_common_bset(c, COMMON_ ##r, m, d)
+static void _rcar_thermal_common_bset(struct rcar_thermal_common *common,
+                                     u32 reg, u32 mask, u32 data)
+{
+       u32 val;
 
-       return ret;
+       val = ioread32(common->base + reg);
+       val &= ~mask;
+       val |= (data & mask);
+       iowrite32(val, common->base + reg);
 }
 
-#if 0 /* no user at this point */
-static void rcar_thermal_write(struct rcar_thermal_priv *priv,
-                              u32 reg, u32 data)
+#define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
+static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
+       return ioread32(priv->base + reg);
+}
 
+#define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
+static void _rcar_thermal_write(struct rcar_thermal_priv *priv,
+                               u32 reg, u32 data)
+{
        iowrite32(data, priv->base + reg);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
 }
-#endif
 
-static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
-                             u32 mask, u32 data)
+#define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
+static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
+                              u32 mask, u32 data)
 {
-       unsigned long flags;
        u32 val;
 
-       spin_lock_irqsave(&priv->lock, flags);
-
        val = ioread32(priv->base + reg);
        val &= ~mask;
        val |= (data & mask);
        iowrite32(val, priv->base + reg);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 /*
  *             zone device functions
  */
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
-                          unsigned long *temp)
+static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
 {
-       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
-       int val, min, max, tmp;
-
-       tmp = -200; /* default */
-       while (1) {
-               if (priv->comp < 1 || priv->comp > 12) {
-                       dev_err(priv->dev,
-                               "THSSR invalid data (%d)\n", priv->comp);
-                       priv->comp = 4; /* for next thermal */
-                       return -EINVAL;
-               }
+       struct device *dev = rcar_priv_to_dev(priv);
+       int i;
+       int ctemp, old, new;
 
-               /*
-                * THS comparator offset and the reference temperature
-                *
-                * Comparator   | reference     | Temperature field
-                * offset       | temperature   | measurement
-                *              | (degrees C)   | (degrees C)
-                * -------------+---------------+-------------------
-                *  1           |  -45          |  -45 to  -30
-                *  2           |  -30          |  -30 to  -15
-                *  3           |  -15          |  -15 to    0
-                *  4           |    0          |    0 to  +15
-                *  5           |  +15          |  +15 to  +30
-                *  6           |  +30          |  +30 to  +45
-                *  7           |  +45          |  +45 to  +60
-                *  8           |  +60          |  +60 to  +75
-                *  9           |  +75          |  +75 to  +90
-                * 10           |  +90          |  +90 to +105
-                * 11           | +105          | +105 to +120
-                * 12           | +120          | +120 to +135
-                */
+       mutex_lock(&priv->lock);
 
-               /* calculate thermal limitation */
-               min = (priv->comp * 15) - 60;
-               max = min + 15;
+       /*
+        * TSC decides a value of CPTAP automatically,
+        * and this is the conditions which validate interrupt.
+        */
+       rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL);
 
+       ctemp = 0;
+       old = ~0;
+       for (i = 0; i < 128; i++) {
                /*
                 * we need to wait 300us after changing comparator offset
                 * to get stable temperature.
                 * see "Usage Notes" on datasheet
                 */
-               rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
                udelay(300);
 
-               /* calculate current temperature */
-               val = rcar_thermal_read(priv, THSSR) & CTEMP;
-               val = (val * 5) - 65;
+               new = rcar_thermal_read(priv, THSSR) & CTEMP;
+               if (new == old) {
+                       ctemp = new;
+                       break;
+               }
+               old = new;
+       }
 
-               dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
-                       priv->comp, min, max, val);
+       if (!ctemp) {
+               dev_err(dev, "thermal sensor was broken\n");
+               return -EINVAL;
+       }
 
-               /*
-                * If val is same as min/max, then,
-                * it should try again on next comparator.
-                * But the val might be correct temperature.
-                * Keep it on "tmp" and compare with next val.
-                */
-               if (tmp == val)
-                       break;
+       /*
+        * enable IRQ
+        */
+       if (rcar_has_irq_support(priv)) {
+               rcar_thermal_write(priv, FILONOFF, 0);
 
-               if (val <= min) {
-                       tmp = min;
-                       priv->comp--; /* try again */
-               } else if (val >= max) {
-                       tmp = max;
-                       priv->comp++; /* try again */
-               } else {
-                       tmp = val;
-                       break;
-               }
+               /* enable Rising/Falling edge interrupt */
+               rcar_thermal_write(priv, POSNEG,  0x1);
+               rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) |
+                                                  ((ctemp - 1) << 0)));
+       }
+
+       dev_dbg(dev, "thermal%d  %d -> %d\n", priv->id, priv->ctemp, ctemp);
+
+       priv->ctemp = ctemp;
+
+       mutex_unlock(&priv->lock);
+
+       return 0;
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
+                                unsigned long *temp)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+       if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
+               rcar_thermal_update_temp(priv);
+
+       mutex_lock(&priv->lock);
+       *temp =  MCELSIUS((priv->ctemp * 5) - 65);
+       mutex_unlock(&priv->lock);
+
+       return 0;
+}
+
+static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
+                                     int trip, enum thermal_trip_type *type)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+       struct device *dev = rcar_priv_to_dev(priv);
+
+       /* see rcar_thermal_get_temp() */
+       switch (trip) {
+       case 0: /* +90 <= temp */
+               *type = THERMAL_TRIP_CRITICAL;
+               break;
+       default:
+               dev_err(dev, "rcar driver trip error\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
+                                     int trip, unsigned long *temp)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+       struct device *dev = rcar_priv_to_dev(priv);
+
+       /* see rcar_thermal_get_temp() */
+       switch (trip) {
+       case 0: /* +90 <= temp */
+               *temp = MCELSIUS(90);
+               break;
+       default:
+               dev_err(dev, "rcar driver trip error\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int rcar_thermal_notify(struct thermal_zone_device *zone,
+                              int trip, enum thermal_trip_type type)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+       struct device *dev = rcar_priv_to_dev(priv);
+
+       switch (type) {
+       case THERMAL_TRIP_CRITICAL:
+               /* FIXME */
+               dev_warn(dev, "Thermal reached to critical temperature\n");
+               break;
+       default:
+               break;
        }
 
-       *temp = MCELSIUS(tmp);
        return 0;
 }
 
 static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
-       .get_temp = rcar_thermal_get_temp,
+       .get_temp       = rcar_thermal_get_temp,
+       .get_trip_type  = rcar_thermal_get_trip_type,
+       .get_trip_temp  = rcar_thermal_get_trip_temp,
+       .notify         = rcar_thermal_notify,
 };
 
 /*
- *             platform functions
+ *             interrupt
  */
-static int rcar_thermal_probe(struct platform_device *pdev)
+#define rcar_thermal_irq_enable(p)     _rcar_thermal_irq_ctrl(p, 1)
+#define rcar_thermal_irq_disable(p)    _rcar_thermal_irq_ctrl(p, 0)
+static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
+{
+       struct rcar_thermal_common *common = priv->common;
+       unsigned long flags;
+       u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
+
+       spin_lock_irqsave(&common->lock, flags);
+
+       rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
+
+       spin_unlock_irqrestore(&common->lock, flags);
+}
+
+static void rcar_thermal_work(struct work_struct *work)
 {
-       struct thermal_zone_device *zone;
        struct rcar_thermal_priv *priv;
-       struct resource *res;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Could not get platform resource\n");
-               return -ENODEV;
+       priv = container_of(work, struct rcar_thermal_priv, work.work);
+
+       rcar_thermal_update_temp(priv);
+       rcar_thermal_irq_enable(priv);
+       thermal_zone_device_update(priv->zone);
+}
+
+static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
+{
+       struct device *dev = rcar_priv_to_dev(priv);
+
+       status = (status >> rcar_id_to_shift(priv)) & 0x3;
+
+       if (status & 0x3) {
+               dev_dbg(dev, "thermal%d %s%s\n",
+                       priv->id,
+                       (status & 0x2) ? "Rising " : "",
+                       (status & 0x1) ? "Falling" : "");
        }
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               dev_err(&pdev->dev, "Could not allocate priv\n");
-               return -ENOMEM;
+       return status;
+}
+
+static irqreturn_t rcar_thermal_irq(int irq, void *data)
+{
+       struct rcar_thermal_common *common = data;
+       struct rcar_thermal_priv *priv;
+       unsigned long flags;
+       u32 status, mask;
+
+       spin_lock_irqsave(&common->lock, flags);
+
+       mask    = rcar_thermal_common_read(common, INTMSK);
+       status  = rcar_thermal_common_read(common, STR);
+       rcar_thermal_common_write(common, STR, 0x000F0F0F & mask);
+
+       spin_unlock_irqrestore(&common->lock, flags);
+
+       status = status & ~mask;
+
+       /*
+        * check the status
+        */
+       rcar_thermal_for_each_priv(priv, common) {
+               if (rcar_thermal_had_changed(priv, status)) {
+                       rcar_thermal_irq_disable(priv);
+                       schedule_delayed_work(&priv->work,
+                                             msecs_to_jiffies(300));
+               }
        }
 
-       priv->comp = 4; /* basic setup */
-       priv->dev = &pdev->dev;
-       spin_lock_init(&priv->lock);
-       priv->base = devm_ioremap_nocache(&pdev->dev,
-                                         res->start, resource_size(res));
-       if (!priv->base) {
-               dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
+       return IRQ_HANDLED;
+}
+
+/*
+ *             platform functions
+ */
+static int rcar_thermal_probe(struct platform_device *pdev)
+{
+       struct rcar_thermal_common *common;
+       struct rcar_thermal_priv *priv;
+       struct device *dev = &pdev->dev;
+       struct resource *res, *irq;
+       int mres = 0;
+       int i;
+       int idle = IDLE_INTERVAL;
+
+       common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+       if (!common) {
+               dev_err(dev, "Could not allocate common\n");
                return -ENOMEM;
        }
 
-       zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
-                                   &rcar_thermal_zone_ops, NULL, 0, 0);
-       if (IS_ERR(zone)) {
-               dev_err(&pdev->dev, "thermal zone device is NULL\n");
-               return PTR_ERR(zone);
+       INIT_LIST_HEAD(&common->head);
+       spin_lock_init(&common->lock);
+       common->dev = dev;
+
+       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (irq) {
+               int ret;
+
+               /*
+                * platform has IRQ support.
+                * Then, drier use common register
+                */
+               res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+               if (!res) {
+                       dev_err(dev, "Could not get platform resource\n");
+                       return -ENODEV;
+               }
+
+               ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+                                      dev_name(dev), common);
+               if (ret) {
+                       dev_err(dev, "irq request failed\n ");
+                       return ret;
+               }
+
+               /*
+                * rcar_has_irq_support() will be enabled
+                */
+               common->base = devm_request_and_ioremap(dev, res);
+               if (!common->base) {
+                       dev_err(dev, "Unable to ioremap thermal register\n");
+                       return -ENOMEM;
+               }
+
+               /* enable temperature comparation */
+               rcar_thermal_common_write(common, ENR, 0x00030303);
+
+               idle = 0; /* polling delaye is not needed */
        }
 
-       platform_set_drvdata(pdev, zone);
+       for (i = 0;; i++) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+               if (!res)
+                       break;
+
+               priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+               if (!priv) {
+                       dev_err(dev, "Could not allocate priv\n");
+                       return -ENOMEM;
+               }
+
+               priv->base = devm_request_and_ioremap(dev, res);
+               if (!priv->base) {
+                       dev_err(dev, "Unable to ioremap priv register\n");
+                       return -ENOMEM;
+               }
 
-       dev_info(&pdev->dev, "proved\n");
+               priv->common = common;
+               priv->id = i;
+               mutex_init(&priv->lock);
+               INIT_LIST_HEAD(&priv->list);
+               INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
+               rcar_thermal_update_temp(priv);
+
+               priv->zone = thermal_zone_device_register("rcar_thermal",
+                                               1, 0, priv,
+                                               &rcar_thermal_zone_ops, NULL, 0,
+                                               idle);
+               if (IS_ERR(priv->zone)) {
+                       dev_err(dev, "can't register thermal zone\n");
+                       goto error_unregister;
+               }
+
+               list_move_tail(&priv->list, &common->head);
+
+               if (rcar_has_irq_support(priv))
+                       rcar_thermal_irq_enable(priv);
+       }
+
+       platform_set_drvdata(pdev, common);
+
+       dev_info(dev, "%d sensor proved\n", i);
 
        return 0;
+
+error_unregister:
+       rcar_thermal_for_each_priv(priv, common)
+               thermal_zone_device_unregister(priv->zone);
+
+       return -ENODEV;
 }
 
 static int rcar_thermal_remove(struct platform_device *pdev)
 {
-       struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+       struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+       struct rcar_thermal_priv *priv;
+
+       rcar_thermal_for_each_priv(priv, common)
+               thermal_zone_device_unregister(priv->zone);
 
-       thermal_zone_device_unregister(zone);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
 
+static const struct of_device_id rcar_thermal_dt_ids[] = {
+       { .compatible = "renesas,rcar-thermal", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
+
 static struct platform_driver rcar_thermal_driver = {
        .driver = {
                .name   = "rcar_thermal",
+               .of_match_table = rcar_thermal_dt_ids,
        },
        .probe          = rcar_thermal_probe,
        .remove         = rcar_thermal_remove,
index 6b2d8b2..3c5ee56 100644 (file)
@@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       stdev->clk = clk_get(&pdev->dev, NULL);
+       stdev->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(stdev->clk)) {
                dev_err(&pdev->dev, "Can't get clock\n");
                return PTR_ERR(stdev->clk);
@@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
        ret = clk_enable(stdev->clk);
        if (ret) {
                dev_err(&pdev->dev, "Can't enable clock\n");
-               goto put_clk;
+               return ret;
        }
 
        stdev->flags = val;
@@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
 
 disable_clk:
        clk_disable(stdev->clk);
-put_clk:
-       clk_put(stdev->clk);
 
        return ret;
 }
@@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev)
        writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
 
        clk_disable(stdev->clk);
-       clk_put(stdev->clk);
 
        return 0;
 }
index 0cd5e9f..407cde3 100644 (file)
  *       state for this trip point
  *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
  *       state for this trip point
+ *    c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
+ *       for this trip point
+ *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
+ *       for this trip point
+ * If the temperature is lower than a trip point,
+ *    a. if the trend is THERMAL_TREND_RAISING, do nothing
+ *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+ *       state for this trip point, if the cooling state already
+ *       equals lower limit, deactivate the thermal instance
+ *    c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
+ *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
+ *       if the cooling state already equals lower limit,
+ *       deactive the thermal instance
  */
 static unsigned long get_target_state(struct thermal_instance *instance,
-                                       enum thermal_trend trend)
+                               enum thermal_trend trend, bool throttle)
 {
        struct thermal_cooling_device *cdev = instance->cdev;
        unsigned long cur_state;
 
        cdev->ops->get_cur_state(cdev, &cur_state);
 
-       if (trend == THERMAL_TREND_RAISING) {
-               cur_state = cur_state < instance->upper ?
-                           (cur_state + 1) : instance->upper;
-       } else if (trend == THERMAL_TREND_DROPPING) {
-               cur_state = cur_state > instance->lower ?
-                           (cur_state - 1) : instance->lower;
+       switch (trend) {
+       case THERMAL_TREND_RAISING:
+               if (throttle)
+                       cur_state = cur_state < instance->upper ?
+                                   (cur_state + 1) : instance->upper;
+               break;
+       case THERMAL_TREND_RAISE_FULL:
+               if (throttle)
+                       cur_state = instance->upper;
+               break;
+       case THERMAL_TREND_DROPPING:
+               if (cur_state == instance->lower) {
+                       if (!throttle)
+                               cur_state = -1;
+               } else
+                       cur_state -= 1;
+               break;
+       case THERMAL_TREND_DROP_FULL:
+               if (cur_state == instance->lower) {
+                       if (!throttle)
+                               cur_state = -1;
+               } else
+                       cur_state = instance->lower;
+               break;
+       default:
+               break;
        }
 
        return cur_state;
@@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz,
                tz->passive += value;
 }
 
-static void update_instance_for_throttle(struct thermal_zone_device *tz,
-                               int trip, enum thermal_trip_type trip_type,
-                               enum thermal_trend trend)
-{
-       struct thermal_instance *instance;
-
-       list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
-               if (instance->trip != trip)
-                       continue;
-
-               instance->target = get_target_state(instance, trend);
-
-               /* Activate a passive thermal instance */
-               if (instance->target == THERMAL_NO_TARGET)
-                       update_passive_instance(tz, trip_type, 1);
-
-               instance->cdev->updated = false; /* cdev needs update */
-       }
-}
-
-static void update_instance_for_dethrottle(struct thermal_zone_device *tz,
-                               int trip, enum thermal_trip_type trip_type)
-{
-       struct thermal_instance *instance;
-       struct thermal_cooling_device *cdev;
-       unsigned long cur_state;
-
-       list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
-               if (instance->trip != trip ||
-                       instance->target == THERMAL_NO_TARGET)
-                       continue;
-
-               cdev = instance->cdev;
-               cdev->ops->get_cur_state(cdev, &cur_state);
-
-               instance->target = cur_state > instance->lower ?
-                           (cur_state - 1) : THERMAL_NO_TARGET;
-
-               /* Deactivate a passive thermal instance */
-               if (instance->target == THERMAL_NO_TARGET)
-                       update_passive_instance(tz, trip_type, -1);
-
-               cdev->updated = false; /* cdev needs update */
-       }
-}
-
 static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 {
        long trip_temp;
        enum thermal_trip_type trip_type;
        enum thermal_trend trend;
+       struct thermal_instance *instance;
+       bool throttle = false;
+       int old_target;
 
        if (trip == THERMAL_TRIPS_NONE) {
                trip_temp = tz->forced_passive;
@@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 
        trend = get_tz_trend(tz, trip);
 
+       if (tz->temperature >= trip_temp)
+               throttle = true;
+
        mutex_lock(&tz->lock);
 
-       if (tz->temperature >= trip_temp)
-               update_instance_for_throttle(tz, trip, trip_type, trend);
-       else
-               update_instance_for_dethrottle(tz, trip, trip_type);
+       list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+               if (instance->trip != trip)
+                       continue;
+
+               old_target = instance->target;
+               instance->target = get_target_state(instance, trend, throttle);
+
+               /* Activate a passive thermal instance */
+               if (old_target == THERMAL_NO_TARGET &&
+                       instance->target != THERMAL_NO_TARGET)
+                       update_passive_instance(tz, trip_type, 1);
+               /* Deactivate a passive thermal instance */
+               else if (old_target != THERMAL_NO_TARGET &&
+                       instance->target == THERMAL_NO_TARGET)
+                       update_passive_instance(tz, trip_type, -1);
+
+
+               instance->cdev->updated = false; /* cdev needs update */
+       }
 
        mutex_unlock(&tz->lock);
 }
index 8c8ce80..5b7863a 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/thermal.h>
-#include <linux/spinlock.h>
 #include <linux/reboot.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
@@ -132,23 +131,16 @@ EXPORT_SYMBOL_GPL(thermal_unregister_governor);
 
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
-       int err;
-
-again:
-       if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
-               return -ENOMEM;
+       int ret;
 
        if (lock)
                mutex_lock(lock);
-       err = idr_get_new(idr, NULL, id);
+       ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
        if (lock)
                mutex_unlock(lock);
-       if (unlikely(err == -EAGAIN))
-               goto again;
-       else if (unlikely(err))
-               return err;
-
-       *id = *id & MAX_IDR_MASK;
+       if (unlikely(ret < 0))
+               return ret;
+       *id = ret;
        return 0;
 }
 
@@ -355,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
                tz->ops->notify(tz, trip, trip_type);
 
        if (trip_type == THERMAL_TRIP_CRITICAL) {
-               pr_emerg("Critical temperature reached(%d C),shutting down\n",
-                        tz->temperature / 1000);
+               dev_emerg(&tz->device,
+                         "critical temperature reached(%d C),shutting down\n",
+                         tz->temperature / 1000);
                orderly_poweroff(true);
        }
 }
@@ -378,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
        monitor_thermal_zone(tz);
 }
 
+static int thermal_zone_get_temp(struct thermal_zone_device *tz,
+                               unsigned long *temp)
+{
+       int ret = 0;
+#ifdef CONFIG_THERMAL_EMULATION
+       int count;
+       unsigned long crit_temp = -1UL;
+       enum thermal_trip_type type;
+#endif
+
+       mutex_lock(&tz->lock);
+
+       ret = tz->ops->get_temp(tz, temp);
+#ifdef CONFIG_THERMAL_EMULATION
+       if (!tz->emul_temperature)
+               goto skip_emul;
+
+       for (count = 0; count < tz->trips; count++) {
+               ret = tz->ops->get_trip_type(tz, count, &type);
+               if (!ret && type == THERMAL_TRIP_CRITICAL) {
+                       ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
+                       break;
+               }
+       }
+
+       if (ret)
+               goto skip_emul;
+
+       if (*temp < crit_temp)
+               *temp = tz->emul_temperature;
+skip_emul:
+#endif
+       mutex_unlock(&tz->lock);
+       return ret;
+}
+
 static void update_temperature(struct thermal_zone_device *tz)
 {
        long temp;
        int ret;
 
-       mutex_lock(&tz->lock);
-
-       ret = tz->ops->get_temp(tz, &temp);
+       ret = thermal_zone_get_temp(tz, &temp);
        if (ret) {
-               pr_warn("failed to read out thermal zone %d\n", tz->id);
-               goto exit;
+               dev_warn(&tz->device, "failed to read out thermal zone %d\n",
+                        tz->id);
+               return;
        }
 
+       mutex_lock(&tz->lock);
        tz->last_temperature = tz->temperature;
        tz->temperature = temp;
-
-exit:
        mutex_unlock(&tz->lock);
 }
 
@@ -437,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
        long temperature;
        int ret;
 
-       if (!tz->ops->get_temp)
-               return -EPERM;
-
-       ret = tz->ops->get_temp(tz, &temperature);
+       ret = thermal_zone_get_temp(tz, &temperature);
 
        if (ret)
                return ret;
@@ -700,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
        return sprintf(buf, "%s\n", tz->governor->name);
 }
 
+#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+emul_temp_store(struct device *dev, struct device_attribute *attr,
+                    const char *buf, size_t count)
+{
+       struct thermal_zone_device *tz = to_thermal_zone(dev);
+       int ret = 0;
+       unsigned long temperature;
+
+       if (kstrtoul(buf, 10, &temperature))
+               return -EINVAL;
+
+       if (!tz->ops->set_emul_temp) {
+               mutex_lock(&tz->lock);
+               tz->emul_temperature = temperature;
+               mutex_unlock(&tz->lock);
+       } else {
+               ret = tz->ops->set_emul_temp(tz, temperature);
+       }
+
+       return ret ? ret : count;
+}
+static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+#endif/*CONFIG_THERMAL_EMULATION*/
+
 static DEVICE_ATTR(type, 0444, type_show, NULL);
 static DEVICE_ATTR(temp, 0444, temp_show, NULL);
 static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -842,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
                                       temp_input);
        struct thermal_zone_device *tz = temp->tz;
 
-       ret = tz->ops->get_temp(tz, &temperature);
+       ret = thermal_zone_get_temp(tz, &temperature);
 
        if (ret)
                return ret;
@@ -1529,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
        if (!ops || !ops->get_temp)
                return ERR_PTR(-EINVAL);
 
+       if (trips > 0 && !ops->get_trip_type)
+               return ERR_PTR(-EINVAL);
+
        tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
        if (!tz)
                return ERR_PTR(-ENOMEM);
@@ -1592,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
                        goto unregister;
        }
 
+#ifdef CONFIG_THERMAL_EMULATION
+       result = device_create_file(&tz->device, &dev_attr_emul_temp);
+       if (result)
+               goto unregister;
+#endif
        /* Create policy attribute */
        result = device_create_file(&tz->device, &dev_attr_policy);
        if (result)
@@ -1711,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = {
        .name = THERMAL_GENL_MCAST_GROUP_NAME,
 };
 
-int thermal_generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+                                       enum events event)
 {
        struct sk_buff *skb;
        struct nlattr *attr;
@@ -1721,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
        int result;
        static unsigned int thermal_event_seqnum;
 
+       if (!tz)
+               return -EINVAL;
+
        /* allocate memory */
        size = nla_total_size(sizeof(struct thermal_genl_event)) +
               nla_total_size(0);
@@ -1755,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
 
        memset(thermal_event, 0, sizeof(struct thermal_genl_event));
 
-       thermal_event->orig = orig;
+       thermal_event->orig = tz->id;
        thermal_event->event = event;
 
        /* send multicast genetlink message */
@@ -1767,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
 
        result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
        if (result)
-               pr_info("failed to send netlink event:%d\n", result);
+               dev_err(&tz->device, "Failed to send netlink event:%d", result);
 
        return result;
 }
@@ -1807,6 +1868,7 @@ static int __init thermal_init(void)
                idr_destroy(&thermal_cdev_idr);
                mutex_destroy(&thermal_idr_lock);
                mutex_destroy(&thermal_list_lock);
+               return result;
        }
        result = genetlink_init();
        return result;
index 1956593..81e939e 100644 (file)
@@ -881,17 +881,12 @@ static struct vio_driver hvcs_vio_driver = {
 /* Only called from hvcs_get_pi please */
 static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
 {
-       int clclength;
-
        hvcsd->p_unit_address = pi->unit_address;
        hvcsd->p_partition_ID  = pi->partition_ID;
-       clclength = strlen(&pi->location_code[0]);
-       if (clclength > HVCS_CLC_LENGTH)
-               clclength = HVCS_CLC_LENGTH;
 
        /* copy the null-term char too */
-       strncpy(&hvcsd->p_location_code[0],
-                       &pi->location_code[0], clclength + 1);
+       strlcpy(&hvcsd->p_location_code[0],
+                       &pi->location_code[0], sizeof(hvcsd->p_location_code));
 }
 
 /*
index a0162cb..cf9210d 100644 (file)
@@ -729,19 +729,19 @@ config SERIAL_SH_SCI_DMA
 
 config SERIAL_PNX8XXX
        bool "Enable PNX8XXX SoCs' UART Support"
-       depends on SOC_PNX8550 || SOC_PNX833X
+       depends on SOC_PNX833X
        select SERIAL_CORE
        help
-         If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
-         and you want to use serial ports, say Y.  Otherwise, say N.
+         If you have a MIPS-based Philips SoC such as PNX8330 and you want
+         to use serial ports, say Y.  Otherwise, say N.
 
 config SERIAL_PNX8XXX_CONSOLE
        bool "Enable PNX8XX0 serial console"
        depends on SERIAL_PNX8XXX
        select SERIAL_CORE_CONSOLE
        help
-         If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
-         and you want to use serial console, say Y. Otherwise, say N.
+         If you have a MIPS-based Philips SoC such as PNX8330 and you want
+         to use serial console, say Y. Otherwise, say N.
 
 config SERIAL_HS_LPC32XX
        tristate "LPC32XX high speed serial port support"
index 814655e..3687f0c 100644 (file)
@@ -870,21 +870,20 @@ static struct input_handler sysrq_handler = {
 
 static bool sysrq_handler_registered;
 
+unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
+
 static inline void sysrq_register_handler(void)
 {
-       extern unsigned short platform_sysrq_reset_seq[] __weak;
        unsigned short key;
        int error;
        int i;
 
-       if (platform_sysrq_reset_seq) {
-               for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
-                       key = platform_sysrq_reset_seq[i];
-                       if (key == KEY_RESERVED || key > KEY_MAX)
-                               break;
+       for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+               key = platform_sysrq_reset_seq[i];
+               if (key == KEY_RESERVED || key > KEY_MAX)
+                       break;
 
-                       sysrq_reset_seq[sysrq_reset_seq_len++] = key;
-               }
+               sysrq_reset_seq[sysrq_reset_seq_len++] = key;
        }
 
        error = input_register_handler(&sysrq_handler);
index fd47363..05400ac 100644 (file)
@@ -960,11 +960,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
                        loff_t *ppos)
 {
        int i;
-       struct inode *inode = file->f_path.dentry->d_inode;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
 
-       if (tty_paranoia_check(tty, inode, "tty_read"))
+       if (tty_paranoia_check(tty, file_inode(file), "tty_read"))
                return -EIO;
        if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
                return -EIO;
@@ -1132,12 +1131,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
 static ssize_t tty_write(struct file *file, const char __user *buf,
                                                size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        ssize_t ret;
 
-       if (tty_paranoia_check(tty, inode, "tty_write"))
+       if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
                return -EIO;
        if (!tty || !tty->ops->write ||
                (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -2047,7 +2045,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
        struct tty_ldisc *ld;
        int ret = 0;
 
-       if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
+       if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
                return 0;
 
        ld = tty_ldisc_ref_wait(tty);
@@ -2063,7 +2061,7 @@ static int __tty_fasync(int fd, struct file *filp, int on)
        unsigned long flags;
        int retval = 0;
 
-       if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
+       if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
                goto out;
 
        retval = fasync_helper(fd, filp, on, &tty->fasync);
@@ -2637,9 +2635,8 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        void __user *p = (void __user *)arg;
        int retval;
        struct tty_ldisc *ld;
-       struct inode *inode = file->f_dentry->d_inode;
 
-       if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+       if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
                return -EINVAL;
 
        real_tty = tty_pair_get_tty(tty);
@@ -2780,12 +2777,11 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg)
 {
-       struct inode *inode = file->f_dentry->d_inode;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        int retval = -ENOIOCTLCMD;
 
-       if (tty_paranoia_check(tty, inode, "tty_ioctl"))
+       if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
                return -EINVAL;
 
        if (tty->ops->compat_ioctl) {
index fa7268a..e4ca345 100644 (file)
@@ -101,7 +101,7 @@ vcs_poll_data_get(struct file *file)
        poll = kzalloc(sizeof(*poll), GFP_KERNEL);
        if (!poll)
                return NULL;
-       poll->cons_num = iminor(file->f_path.dentry->d_inode) & 127;
+       poll->cons_num = iminor(file_inode(file)) & 127;
        init_waitqueue_head(&poll->waitq);
        poll->notifier.notifier_call = vcs_notifier;
        if (register_vt_notifier(&poll->notifier) != 0) {
@@ -182,7 +182,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
        int size;
 
        console_lock();
-       size = vcs_size(file->f_path.dentry->d_inode);
+       size = vcs_size(file_inode(file));
        console_unlock();
        if (size < 0)
                return size;
@@ -208,7 +208,7 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
 static ssize_t
 vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        unsigned int currcons = iminor(inode);
        struct vc_data *vc;
        struct vcs_poll_data *poll;
@@ -386,7 +386,7 @@ unlock_out:
 static ssize_t
 vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        unsigned int currcons = iminor(inode);
        struct vc_data *vc;
        long pos;
index 5110f36..c8b9262 100644 (file)
@@ -369,26 +369,15 @@ static void uio_dev_del_attributes(struct uio_device *idev)
 static int uio_get_minor(struct uio_device *idev)
 {
        int retval = -ENOMEM;
-       int id;
 
        mutex_lock(&minor_lock);
-       if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
-               goto exit;
-
-       retval = idr_get_new(&uio_idr, idev, &id);
-       if (retval < 0) {
-               if (retval == -EAGAIN)
-                       retval = -ENOMEM;
-               goto exit;
-       }
-       if (id < UIO_MAX_DEVICES) {
-               idev->minor = id;
-       } else {
+       retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
+       if (retval >= 0) {
+               idev->minor = retval;
+       } else if (retval == -ENOSPC) {
                dev_err(idev->dev, "too many uio devices\n");
                retval = -EINVAL;
-               idr_remove(&uio_idr, id);
        }
-exit:
        mutex_unlock(&minor_lock);
        return retval;
 }
index e33224e..2a3bbdf 100644 (file)
@@ -665,7 +665,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
 {
        loff_t ret;
 
-       mutex_lock(&file->f_dentry->d_inode->i_mutex);
+       mutex_lock(&file_inode(file)->i_mutex);
 
        switch (orig) {
        case 0:
@@ -681,7 +681,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
                ret = -EINVAL;
        }
 
-       mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+       mutex_unlock(&file_inode(file)->i_mutex);
        return ret;
 }
 
index 4a863fd..8823e98 100644 (file)
@@ -161,7 +161,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
 {
        loff_t ret;
 
-       mutex_lock(&file->f_dentry->d_inode->i_mutex);
+       mutex_lock(&file_inode(file)->i_mutex);
 
        switch (orig) {
        case 0:
@@ -177,7 +177,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
                ret = -EINVAL;
        }
 
-       mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+       mutex_unlock(&file_inode(file)->i_mutex);
        return ret;
 }
 
@@ -1971,7 +1971,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
                                void __user *p)
 {
        struct dev_state *ps = file->private_data;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct usb_device *dev = ps->dev;
        int ret = -ENOTTY;
 
index bc19496..b66130c 100644 (file)
@@ -93,7 +93,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
        if (!access_ok(VERIFY_WRITE, buf, nbytes))
                return -EFAULT;
 
-       mutex_lock(&file->f_dentry->d_inode->i_mutex);
+       mutex_lock(&file_inode(file)->i_mutex);
        list_for_each_entry_safe(req, tmp_req, queue, queue) {
                len = snprintf(tmpbuf, sizeof(tmpbuf),
                                "%8p %08x %c%c%c %5d %c%c%c\n",
@@ -120,7 +120,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
                nbytes -= len;
                buf += len;
        }
-       mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+       mutex_unlock(&file_inode(file)->i_mutex);
 
        return actual;
 }
@@ -168,13 +168,13 @@ out:
 static ssize_t regs_dbg_read(struct file *file, char __user *buf,
                size_t nbytes, loff_t *ppos)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int ret;
 
        mutex_lock(&inode->i_mutex);
        ret = simple_read_from_buffer(buf, nbytes, ppos,
                        file->private_data,
-                       file->f_dentry->d_inode->i_size);
+                       file_inode(file)->i_size);
        mutex_unlock(&inode->i_mutex);
 
        return ret;
index fc5c16c..97666e8 100644 (file)
@@ -978,7 +978,7 @@ static int do_synchronize_cache(struct fsg_common *common)
 static void invalidate_sub(struct fsg_lun *curlun)
 {
        struct file     *filp = curlun->filp;
-       struct inode    *inode = filp->f_path.dentry->d_inode;
+       struct inode    *inode = file_inode(filp);
        unsigned long   rc;
 
        rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
index 35bcc83..bf7a56b 100644 (file)
@@ -688,7 +688,7 @@ static int
 printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
 {
        struct printer_dev      *dev = fd->private_data;
-       struct inode *inode = fd->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(fd);
        unsigned long           flags;
        int                     tx_list_empty;
 
index e4192b8..d9297ee 100644 (file)
@@ -1065,7 +1065,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
 static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
                                size_t count, loff_t *ppos)
 {
-       rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+       rndis_params *p = PDE(file_inode(file))->data;
        u32 speed = 0;
        int i, fl_speed = 0;
 
index 4ecbf84..dbce3a9 100644 (file)
@@ -440,7 +440,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
        if (!(filp->f_mode & FMODE_WRITE))
                ro = 1;
 
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
        if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
                LINFO(curlun, "invalid file type: %s\n", filename);
                goto out;
index f904071..20dbdcb 100644 (file)
@@ -113,15 +113,14 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later */
-               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
-               ++ehci->ASS_poll_count;
-               return;
+               /* Poll again later, but give up after about 20 ms */
+               if (ehci->ASS_poll_count++ < 20) {
+                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+                       return;
+               }
+               ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
+                               want, actual);
        }
-
-       if (ehci->ASS_poll_count > 20)
-               ehci_dbg(ehci, "ASS poll count reached %d\n",
-                               ehci->ASS_poll_count);
        ehci->ASS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
@@ -160,14 +159,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later */
-               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
-               return;
+               /* Poll again later, but give up after about 20 ms */
+               if (ehci->PSS_poll_count++ < 20) {
+                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+                       return;
+               }
+               ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+                               want, actual);
        }
-
-       if (ehci->PSS_poll_count > 20)
-               ehci_dbg(ehci, "PSS poll count reached %d\n",
-                               ehci->PSS_poll_count);
        ehci->PSS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
index 5980758..c41b01e 100644 (file)
@@ -6,3 +6,13 @@ config VFIO_PCI
          use of PCI drivers using the VFIO framework.
 
          If you don't know what to do here, say N.
+
+config VFIO_PCI_VGA
+       bool "VFIO PCI support for VGA devices"
+       depends on VFIO_PCI && X86 && VGA_ARB
+       help
+         Support for VGA extension to VFIO PCI.  This exposes an additional
+         region on VGA devices for accessing legacy VGA addresses used by
+         BIOS and generic video drivers.
+
+         If you don't know what to do here, say N.
index b28e66c..8189cb6 100644 (file)
@@ -84,6 +84,11 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
        } else
                vdev->msix_bar = 0xFF;
 
+#ifdef CONFIG_VFIO_PCI_VGA
+       if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+               vdev->has_vga = true;
+#endif
+
        return 0;
 }
 
@@ -285,6 +290,16 @@ static long vfio_pci_ioctl(void *device_data,
                        info.flags = VFIO_REGION_INFO_FLAG_READ;
                        break;
                }
+               case VFIO_PCI_VGA_REGION_INDEX:
+                       if (!vdev->has_vga)
+                               return -EINVAL;
+
+                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+                       info.size = 0xc0000;
+                       info.flags = VFIO_REGION_INFO_FLAG_READ |
+                                    VFIO_REGION_INFO_FLAG_WRITE;
+
+                       break;
                default:
                        return -EINVAL;
                }
@@ -366,52 +381,50 @@ static long vfio_pci_ioctl(void *device_data,
        return -ENOTTY;
 }
 
-static ssize_t vfio_pci_read(void *device_data, char __user *buf,
-                            size_t count, loff_t *ppos)
+static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
+                          size_t count, loff_t *ppos, bool iswrite)
 {
        unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
        struct vfio_pci_device *vdev = device_data;
-       struct pci_dev *pdev = vdev->pdev;
 
        if (index >= VFIO_PCI_NUM_REGIONS)
                return -EINVAL;
 
-       if (index == VFIO_PCI_CONFIG_REGION_INDEX)
-               return vfio_pci_config_readwrite(vdev, buf, count, ppos, false);
-       else if (index == VFIO_PCI_ROM_REGION_INDEX)
-               return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
-       else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
-               return vfio_pci_io_readwrite(vdev, buf, count, ppos, false);
-       else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM)
-               return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+       switch (index) {
+       case VFIO_PCI_CONFIG_REGION_INDEX:
+               return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
+
+       case VFIO_PCI_ROM_REGION_INDEX:
+               if (iswrite)
+                       return -EINVAL;
+               return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
+
+       case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+               return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
+
+       case VFIO_PCI_VGA_REGION_INDEX:
+               return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+       }
 
        return -EINVAL;
 }
 
-static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
-                             size_t count, loff_t *ppos)
+static ssize_t vfio_pci_read(void *device_data, char __user *buf,
+                            size_t count, loff_t *ppos)
 {
-       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
-       struct vfio_pci_device *vdev = device_data;
-       struct pci_dev *pdev = vdev->pdev;
+       if (!count)
+               return 0;
 
-       if (index >= VFIO_PCI_NUM_REGIONS)
-               return -EINVAL;
+       return vfio_pci_rw(device_data, buf, count, ppos, false);
+}
 
-       if (index == VFIO_PCI_CONFIG_REGION_INDEX)
-               return vfio_pci_config_readwrite(vdev, (char __user *)buf,
-                                                count, ppos, true);
-       else if (index == VFIO_PCI_ROM_REGION_INDEX)
-               return -EINVAL;
-       else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
-               return vfio_pci_io_readwrite(vdev, (char __user *)buf,
-                                            count, ppos, true);
-       else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM) {
-               return vfio_pci_mem_readwrite(vdev, (char __user *)buf,
-                                             count, ppos, true);
-       }
+static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+                             size_t count, loff_t *ppos)
+{
+       if (!count)
+               return 0;
 
-       return -EINVAL;
+       return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
 }
 
 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
index 8b8f7d1..964ff22 100644 (file)
@@ -587,12 +587,46 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
        return 0;
 }
 
+static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
+                               int count, struct perm_bits *perm,
+                               int offset, __le32 val)
+{
+       count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+       if (count < 0)
+               return count;
+
+       if (offset == PCI_PM_CTRL) {
+               pci_power_t state;
+
+               switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
+               case 0:
+                       state = PCI_D0;
+                       break;
+               case 1:
+                       state = PCI_D1;
+                       break;
+               case 2:
+                       state = PCI_D2;
+                       break;
+               case 3:
+                       state = PCI_D3hot;
+                       break;
+               }
+
+               pci_set_power_state(vdev->pdev, state);
+       }
+
+       return count;
+}
+
 /* Permissions for the Power Management capability */
 static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
 {
        if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
                return -ENOMEM;
 
+       perm->writefn = vfio_pm_config_write;
+
        /*
         * We always virtualize the next field so we can remove
         * capabilities from the chain if we want to.
@@ -600,10 +634,11 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
        p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
 
        /*
-        * Power management is defined *per function*,
-        * so we let the user write this
+        * Power management is defined *per function*, so we can let
+        * the user change power state, but we trap and initiate the
+        * change ourselves, so the state bits are read-only.
         */
-       p_setd(perm, PCI_PM_CTRL, NO_VIRT, ALL_WRITE);
+       p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK);
        return 0;
 }
 
@@ -985,12 +1020,12 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
                if (ret)
                        return pcibios_err_to_errno(ret);
 
+               vdev->extended_caps = true;
+
                if ((word & PCI_EXP_FLAGS_VERS) == 1)
                        return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
-               else {
-                       vdev->extended_caps = true;
+               else
                        return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
-               }
        case PCI_CAP_ID_HT:
                ret = pci_read_config_byte(pdev, pos + 3, &byte);
                if (ret)
@@ -1501,9 +1536,8 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
        return ret;
 }
 
-ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
-                                 char __user *buf, size_t count,
-                                 loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
+                          size_t count, loff_t *ppos, bool iswrite)
 {
        size_t done = 0;
        int ret = 0;
index 611827c..d7e55d0 100644 (file)
@@ -53,6 +53,7 @@ struct vfio_pci_device {
        bool                    reset_works;
        bool                    extended_caps;
        bool                    bardirty;
+       bool                    has_vga;
        struct pci_saved_state  *pci_saved_state;
        atomic_t                refcnt;
 };
@@ -70,15 +71,15 @@ extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
                                   uint32_t flags, unsigned index,
                                   unsigned start, unsigned count, void *data);
 
-extern ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
-                                        char __user *buf, size_t count,
-                                        loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev,
-                                     char __user *buf, size_t count,
-                                     loff_t *ppos, bool iswrite);
-extern ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev,
-                                    char __user *buf, size_t count,
-                                    loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
+                                 char __user *buf, size_t count,
+                                 loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+                              size_t count, loff_t *ppos, bool iswrite);
+
+extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+                              size_t count, loff_t *ppos, bool iswrite);
 
 extern int vfio_pci_init_perm_bits(void);
 extern void vfio_pci_uninit_perm_bits(void);
index f72323e..210db24 100644 (file)
 #include <linux/pci.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
+#include <linux/vgaarb.h>
 
 #include "vfio_pci_private.h"
 
-/* I/O Port BAR access */
-ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
-                             size_t count, loff_t *ppos, bool iswrite)
+/*
+ * Read or write from an __iomem region (MMIO or I/O port) with an excluded
+ * range which is inaccessible.  The excluded range drops writes and fills
+ * reads with -1.  This is intended for handling MSI-X vector tables and
+ * leftover space for ROM BARs.
+ */
+static ssize_t do_io_rw(void __iomem *io, char __user *buf,
+                       loff_t off, size_t count, size_t x_start,
+                       size_t x_end, bool iswrite)
 {
-       struct pci_dev *pdev = vdev->pdev;
-       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
-       int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
-       void __iomem *io;
-       size_t done = 0;
-
-       if (!pci_resource_start(pdev, bar))
-               return -EINVAL;
-
-       if (pos + count > pci_resource_len(pdev, bar))
-               return -EINVAL;
-
-       if (!vdev->barmap[bar]) {
-               int ret;
-
-               ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
-               if (ret)
-                       return ret;
-
-               vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
-
-               if (!vdev->barmap[bar]) {
-                       pci_release_selected_regions(pdev, 1 << bar);
-                       return -EINVAL;
-               }
-       }
-
-       io = vdev->barmap[bar];
+       ssize_t done = 0;
 
        while (count) {
-               int filled;
+               size_t fillable, filled;
+
+               if (off < x_start)
+                       fillable = min(count, (size_t)(x_start - off));
+               else if (off >= x_end)
+                       fillable = count;
+               else
+                       fillable = 0;
 
-               if (count >= 3 && !(pos % 4)) {
+               if (fillable >= 4 && !(off % 4)) {
                        __le32 val;
 
                        if (iswrite) {
                                if (copy_from_user(&val, buf, 4))
                                        return -EFAULT;
 
-                               iowrite32(le32_to_cpu(val), io + pos);
+                               iowrite32(le32_to_cpu(val), io + off);
                        } else {
-                               val = cpu_to_le32(ioread32(io + pos));
+                               val = cpu_to_le32(ioread32(io + off));
 
                                if (copy_to_user(buf, &val, 4))
                                        return -EFAULT;
                        }
 
                        filled = 4;
-
-               } else if ((pos % 2) == 0 && count >= 2) {
+               } else if (fillable >= 2 && !(off % 2)) {
                        __le16 val;
 
                        if (iswrite) {
                                if (copy_from_user(&val, buf, 2))
                                        return -EFAULT;
 
-                               iowrite16(le16_to_cpu(val), io + pos);
+                               iowrite16(le16_to_cpu(val), io + off);
                        } else {
-                               val = cpu_to_le16(ioread16(io + pos));
+                               val = cpu_to_le16(ioread16(io + off));
 
                                if (copy_to_user(buf, &val, 2))
                                        return -EFAULT;
                        }
 
                        filled = 2;
-               } else {
+               } else if (fillable) {
                        u8 val;
 
                        if (iswrite) {
                                if (copy_from_user(&val, buf, 1))
                                        return -EFAULT;
 
-                               iowrite8(val, io + pos);
+                               iowrite8(val, io + off);
                        } else {
-                               val = ioread8(io + pos);
+                               val = ioread8(io + off);
 
                                if (copy_to_user(buf, &val, 1))
                                        return -EFAULT;
                        }
 
                        filled = 1;
+               } else {
+                       /* Fill reads with -1, drop writes */
+                       filled = min(count, (size_t)(x_end - off));
+                       if (!iswrite) {
+                               u8 val = 0xFF;
+                               size_t i;
+
+                               for (i = 0; i < filled; i++)
+                                       if (copy_to_user(buf + i, &val, 1))
+                                               return -EFAULT;
+                       }
                }
 
                count -= filled;
                done += filled;
+               off += filled;
                buf += filled;
-               pos += filled;
        }
 
-       *ppos += done;
-
        return done;
 }
 
-/*
- * MMIO BAR access
- * We handle two excluded ranges here as well, if the user tries to read
- * the ROM beyond what PCI tells us is available or the MSI-X table region,
- * we return 0xFF and writes are dropped.
- */
-ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
-                              size_t count, loff_t *ppos, bool iswrite)
+ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
+                       size_t count, loff_t *ppos, bool iswrite)
 {
        struct pci_dev *pdev = vdev->pdev;
        loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
        int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
-       void __iomem *io;
+       size_t x_start = 0, x_end = 0;
        resource_size_t end;
-       size_t done = 0;
-       size_t x_start = 0, x_end = 0; /* excluded range */
+       void __iomem *io;
+       ssize_t done;
 
        if (!pci_resource_start(pdev, bar))
                return -EINVAL;
 
        end = pci_resource_len(pdev, bar);
 
-       if (pos > end)
+       if (pos >= end)
                return -EINVAL;
 
-       if (pos == end)
-               return 0;
-
-       if (pos + count > end)
-               count = end - pos;
+       count = min(count, (size_t)(end - pos));
 
        if (bar == PCI_ROM_RESOURCE) {
+               /*
+                * The ROM can fill less space than the BAR, so we start the
+                * excluded range at the end of the actual ROM.  This makes
+                * filling large ROM BARs much faster.
+                */
                io = pci_map_rom(pdev, &x_start);
+               if (!io)
+                       return -ENOMEM;
                x_end = end;
-       } else {
-               if (!vdev->barmap[bar]) {
-                       int ret;
-
-                       ret = pci_request_selected_regions(pdev, 1 << bar,
-                                                          "vfio");
-                       if (ret)
-                               return ret;
+       } else if (!vdev->barmap[bar]) {
+               int ret;
 
-                       vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+               ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+               if (ret)
+                       return ret;
 
-                       if (!vdev->barmap[bar]) {
-                               pci_release_selected_regions(pdev, 1 << bar);
-                               return -EINVAL;
-                       }
+               io = pci_iomap(pdev, bar, 0);
+               if (!io) {
+                       pci_release_selected_regions(pdev, 1 << bar);
+                       return -ENOMEM;
                }
 
+               vdev->barmap[bar] = io;
+       } else
                io = vdev->barmap[bar];
 
-               if (bar == vdev->msix_bar) {
-                       x_start = vdev->msix_offset;
-                       x_end = vdev->msix_offset + vdev->msix_size;
-               }
+       if (bar == vdev->msix_bar) {
+               x_start = vdev->msix_offset;
+               x_end = vdev->msix_offset + vdev->msix_size;
        }
 
-       if (!io)
-               return -EINVAL;
-
-       while (count) {
-               size_t fillable, filled;
-
-               if (pos < x_start)
-                       fillable = x_start - pos;
-               else if (pos >= x_end)
-                       fillable = end - pos;
-               else
-                       fillable = 0;
-
-               if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
-                       __le32 val;
-
-                       if (iswrite) {
-                               if (copy_from_user(&val, buf, 4))
-                                       goto out;
-
-                               iowrite32(le32_to_cpu(val), io + pos);
-                       } else {
-                               val = cpu_to_le32(ioread32(io + pos));
+       done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
 
-                               if (copy_to_user(buf, &val, 4))
-                                       goto out;
-                       }
+       if (done >= 0)
+               *ppos += done;
 
-                       filled = 4;
-               } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
-                       __le16 val;
-
-                       if (iswrite) {
-                               if (copy_from_user(&val, buf, 2))
-                                       goto out;
-
-                               iowrite16(le16_to_cpu(val), io + pos);
-                       } else {
-                               val = cpu_to_le16(ioread16(io + pos));
-
-                               if (copy_to_user(buf, &val, 2))
-                                       goto out;
-                       }
-
-                       filled = 2;
-               } else if (fillable) {
-                       u8 val;
+       if (bar == PCI_ROM_RESOURCE)
+               pci_unmap_rom(pdev, io);
 
-                       if (iswrite) {
-                               if (copy_from_user(&val, buf, 1))
-                                       goto out;
+       return done;
+}
 
-                               iowrite8(val, io + pos);
-                       } else {
-                               val = ioread8(io + pos);
+ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+                              size_t count, loff_t *ppos, bool iswrite)
+{
+       int ret;
+       loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       void __iomem *iomem = NULL;
+       unsigned int rsrc;
+       bool is_ioport;
+       ssize_t done;
+
+       if (!vdev->has_vga)
+               return -EINVAL;
 
-                               if (copy_to_user(buf, &val, 1))
-                                       goto out;
-                       }
+       switch (pos) {
+       case 0xa0000 ... 0xbffff:
+               count = min(count, (size_t)(0xc0000 - pos));
+               iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+               off = pos - 0xa0000;
+               rsrc = VGA_RSRC_LEGACY_MEM;
+               is_ioport = false;
+               break;
+       case 0x3b0 ... 0x3bb:
+               count = min(count, (size_t)(0x3bc - pos));
+               iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
+               off = pos - 0x3b0;
+               rsrc = VGA_RSRC_LEGACY_IO;
+               is_ioport = true;
+               break;
+       case 0x3c0 ... 0x3df:
+               count = min(count, (size_t)(0x3e0 - pos));
+               iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
+               off = pos - 0x3c0;
+               rsrc = VGA_RSRC_LEGACY_IO;
+               is_ioport = true;
+               break;
+       default:
+               return -EINVAL;
+       }
 
-                       filled = 1;
-               } else {
-                       /* Drop writes, fill reads with FF */
-                       filled = min((size_t)(x_end - pos), count);
-                       if (!iswrite) {
-                               char val = 0xFF;
-                               size_t i;
+       if (!iomem)
+               return -ENOMEM;
 
-                               for (i = 0; i < filled; i++) {
-                                       if (put_user(val, buf + i))
-                                               goto out;
-                               }
-                       }
+       ret = vga_get_interruptible(vdev->pdev, rsrc);
+       if (ret) {
+               is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
+               return ret;
+       }
 
-               }
+       done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
 
-               count -= filled;
-               done += filled;
-               buf += filled;
-               pos += filled;
-       }
+       vga_put(vdev->pdev, rsrc);
 
-       *ppos += done;
+       is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
 
-out:
-       if (bar == PCI_ROM_RESOURCE)
-               pci_unmap_rom(pdev, io);
+       if (done >= 0)
+               *ppos += done;
 
-       return count ? -EFAULT : done;
+       return done;
 }
index 12c264d..fcc12f3 100644 (file)
@@ -139,23 +139,8 @@ EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
  */
 static int vfio_alloc_group_minor(struct vfio_group *group)
 {
-       int ret, minor;
-
-again:
-       if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
-               return -ENOMEM;
-
        /* index 0 is used by /dev/vfio/vfio */
-       ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor);
-       if (ret == -EAGAIN)
-               goto again;
-       if (ret || minor > MINORMASK) {
-               if (minor > MINORMASK)
-                       idr_remove(&vfio.group_idr, minor);
-               return -ENOSPC;
-       }
-
-       return minor;
+       return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL);
 }
 
 static void vfio_free_group_minor(int minor)
@@ -442,7 +427,7 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
  * a device.  It's not always practical to leave a device within a group
  * driverless as it could get re-bound to something unsafe.
  */
-static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" };
 
 static bool vfio_whitelisted_driver(struct device_driver *drv)
 {
@@ -642,33 +627,16 @@ int vfio_add_group_dev(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
 
-/* Test whether a struct device is present in our tracking */
-static bool vfio_dev_present(struct device *dev)
+/* Given a referenced group, check if it contains the device */
+static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
 {
-       struct iommu_group *iommu_group;
-       struct vfio_group *group;
        struct vfio_device *device;
 
-       iommu_group = iommu_group_get(dev);
-       if (!iommu_group)
-               return false;
-
-       group = vfio_group_get_from_iommu(iommu_group);
-       if (!group) {
-               iommu_group_put(iommu_group);
-               return false;
-       }
-
        device = vfio_group_get_device(group, dev);
-       if (!device) {
-               vfio_group_put(group);
-               iommu_group_put(iommu_group);
+       if (!device)
                return false;
-       }
 
        vfio_device_put(device);
-       vfio_group_put(group);
-       iommu_group_put(iommu_group);
        return true;
 }
 
@@ -682,10 +650,18 @@ void *vfio_del_group_dev(struct device *dev)
        struct iommu_group *iommu_group = group->iommu_group;
        void *device_data = device->device_data;
 
+       /*
+        * The group exists so long as we have a device reference.  Get
+        * a group reference and use it to scan for the device going away.
+        */
+       vfio_group_get(group);
+
        vfio_device_put(device);
 
        /* TODO send a signal to encourage this to be released */
-       wait_event(vfio.release_q, !vfio_dev_present(dev));
+       wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+
+       vfio_group_put(group);
 
        iommu_group_put(iommu_group);
 
index be27b55..db10d01 100644 (file)
@@ -384,6 +384,12 @@ config BACKLIGHT_LP855X
          This supports TI LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
          backlight driver.
 
+config BACKLIGHT_LP8788
+       tristate "Backlight driver for TI LP8788 MFD"
+       depends on BACKLIGHT_CLASS_DEVICE && MFD_LP8788
+       help
+         This supports TI LP8788 backlight driver.
+
 config BACKLIGHT_OT200
        tristate "Backlight driver for ot200 visualisation device"
        depends on BACKLIGHT_CLASS_DEVICE && CS5535_MFGPT && GPIO_CS5535
index 4606c21..96c4d62 100644 (file)
@@ -38,6 +38,7 @@ obj-$(CONFIG_BACKLIGHT_LM3630)                += lm3630_bl.o
 obj-$(CONFIG_BACKLIGHT_LM3639)         += lm3639_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO)         += locomolcd.o
 obj-$(CONFIG_BACKLIGHT_LP855X)         += lp855x_bl.o
+obj-$(CONFIG_BACKLIGHT_LP8788)         += lp8788_bl.o
 obj-$(CONFIG_BACKLIGHT_MAX8925)                += max8925_bl.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)          += omap1_bl.o
 obj-$(CONFIG_BACKLIGHT_OT200)          += ot200_bl.o
index d29e494..c02aa2c 100644 (file)
@@ -317,10 +317,7 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
        pd = lcd->lcd_pd;
        bd = lcd->bd;
 
-       if (!pd->power_on) {
-               dev_err(lcd->dev, "power_on is NULL.\n");
-               return -EINVAL;
-       } else {
+       if (pd->power_on) {
                pd->power_on(lcd->ld, 1);
                msleep(pd->power_on_delay);
        }
@@ -370,7 +367,8 @@ static int ams369fg06_power_off(struct ams369fg06 *lcd)
 
        msleep(pd->power_off_delay);
 
-       pd->power_on(lcd->ld, 0);
+       if (pd->power_on)
+               pd->power_on(lcd->ld, 0);
 
        return 0;
 }
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
new file mode 100644 (file)
index 0000000..4bb8b4f
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * TI LP8788 MFD - backlight driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/* Register address */
+#define LP8788_BL_CONFIG               0x96
+#define LP8788_BL_EN                   BIT(0)
+#define LP8788_BL_PWM_INPUT_EN         BIT(5)
+#define LP8788_BL_FULLSCALE_SHIFT      2
+#define LP8788_BL_DIM_MODE_SHIFT       1
+#define LP8788_BL_PWM_POLARITY_SHIFT   6
+
+#define LP8788_BL_BRIGHTNESS           0x97
+
+#define LP8788_BL_RAMP                 0x98
+#define LP8788_BL_RAMP_RISE_SHIFT      4
+
+#define MAX_BRIGHTNESS                 127
+#define DEFAULT_BL_NAME                        "lcd-backlight"
+
+struct lp8788_bl_config {
+       enum lp8788_bl_ctrl_mode bl_mode;
+       enum lp8788_bl_dim_mode dim_mode;
+       enum lp8788_bl_full_scale_current full_scale;
+       enum lp8788_bl_ramp_step rise_time;
+       enum lp8788_bl_ramp_step fall_time;
+       enum pwm_polarity pwm_pol;
+};
+
+struct lp8788_bl {
+       struct lp8788 *lp;
+       struct backlight_device *bl_dev;
+       struct lp8788_backlight_platform_data *pdata;
+       enum lp8788_bl_ctrl_mode mode;
+       struct pwm_device *pwm;
+};
+
+struct lp8788_bl_config default_bl_config = {
+       .bl_mode    = LP8788_BL_REGISTER_ONLY,
+       .dim_mode   = LP8788_DIM_EXPONENTIAL,
+       .full_scale = LP8788_FULLSCALE_1900uA,
+       .rise_time  = LP8788_RAMP_8192us,
+       .fall_time  = LP8788_RAMP_8192us,
+       .pwm_pol    = PWM_POLARITY_NORMAL,
+};
+
+static inline bool is_brightness_ctrl_by_pwm(enum lp8788_bl_ctrl_mode mode)
+{
+       return (mode == LP8788_BL_COMB_PWM_BASED);
+}
+
+static inline bool is_brightness_ctrl_by_register(enum lp8788_bl_ctrl_mode mode)
+{
+       return (mode == LP8788_BL_REGISTER_ONLY ||
+               mode == LP8788_BL_COMB_REGISTER_BASED);
+}
+
+static int lp8788_backlight_configure(struct lp8788_bl *bl)
+{
+       struct lp8788_backlight_platform_data *pdata = bl->pdata;
+       struct lp8788_bl_config *cfg = &default_bl_config;
+       int ret;
+       u8 val;
+
+       /*
+        * Update chip configuration if platform data exists,
+        * otherwise use the default settings.
+        */
+       if (pdata) {
+               cfg->bl_mode    = pdata->bl_mode;
+               cfg->dim_mode   = pdata->dim_mode;
+               cfg->full_scale = pdata->full_scale;
+               cfg->rise_time  = pdata->rise_time;
+               cfg->fall_time  = pdata->fall_time;
+               cfg->pwm_pol    = pdata->pwm_pol;
+       }
+
+       /* Brightness ramp up/down */
+       val = (cfg->rise_time << LP8788_BL_RAMP_RISE_SHIFT) | cfg->fall_time;
+       ret = lp8788_write_byte(bl->lp, LP8788_BL_RAMP, val);
+       if (ret)
+               return ret;
+
+       /* Fullscale current setting */
+       val = (cfg->full_scale << LP8788_BL_FULLSCALE_SHIFT) |
+               (cfg->dim_mode << LP8788_BL_DIM_MODE_SHIFT);
+
+       /* Brightness control mode */
+       switch (cfg->bl_mode) {
+       case LP8788_BL_REGISTER_ONLY:
+               val |= LP8788_BL_EN;
+               break;
+       case LP8788_BL_COMB_PWM_BASED:
+       case LP8788_BL_COMB_REGISTER_BASED:
+               val |= LP8788_BL_EN | LP8788_BL_PWM_INPUT_EN |
+                       (cfg->pwm_pol << LP8788_BL_PWM_POLARITY_SHIFT);
+               break;
+       default:
+               dev_err(bl->lp->dev, "invalid mode: %d\n", cfg->bl_mode);
+               return -EINVAL;
+       }
+
+       bl->mode = cfg->bl_mode;
+
+       return lp8788_write_byte(bl->lp, LP8788_BL_CONFIG, val);
+}
+
+static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br)
+{
+       unsigned int period;
+       unsigned int duty;
+       struct device *dev;
+       struct pwm_device *pwm;
+
+       if (!bl->pdata)
+               return;
+
+       period = bl->pdata->period_ns;
+       duty = br * period / max_br;
+       dev = bl->lp->dev;
+
+       /* request PWM device with the consumer name */
+       if (!bl->pwm) {
+               pwm = devm_pwm_get(dev, LP8788_DEV_BACKLIGHT);
+               if (IS_ERR(pwm)) {
+                       dev_err(dev, "can not get PWM device\n");
+                       return;
+               }
+
+               bl->pwm = pwm;
+       }
+
+       pwm_config(bl->pwm, duty, period);
+       if (duty)
+               pwm_enable(bl->pwm);
+       else
+               pwm_disable(bl->pwm);
+}
+
+static int lp8788_bl_update_status(struct backlight_device *bl_dev)
+{
+       struct lp8788_bl *bl = bl_get_data(bl_dev);
+       enum lp8788_bl_ctrl_mode mode = bl->mode;
+
+       if (bl_dev->props.state & BL_CORE_SUSPENDED)
+               bl_dev->props.brightness = 0;
+
+       if (is_brightness_ctrl_by_pwm(mode)) {
+               int brt = bl_dev->props.brightness;
+               int max = bl_dev->props.max_brightness;
+
+               lp8788_pwm_ctrl(bl, brt, max);
+       } else if (is_brightness_ctrl_by_register(mode)) {
+               u8 brt = bl_dev->props.brightness;
+
+               lp8788_write_byte(bl->lp, LP8788_BL_BRIGHTNESS, brt);
+       }
+
+       return 0;
+}
+
+static int lp8788_bl_get_brightness(struct backlight_device *bl_dev)
+{
+       return bl_dev->props.brightness;
+}
+
+static const struct backlight_ops lp8788_bl_ops = {
+       .options = BL_CORE_SUSPENDRESUME,
+       .update_status = lp8788_bl_update_status,
+       .get_brightness = lp8788_bl_get_brightness,
+};
+
+static int lp8788_backlight_register(struct lp8788_bl *bl)
+{
+       struct backlight_device *bl_dev;
+       struct backlight_properties props;
+       struct lp8788_backlight_platform_data *pdata = bl->pdata;
+       int init_brt;
+       char *name;
+
+       props.type = BACKLIGHT_PLATFORM;
+       props.max_brightness = MAX_BRIGHTNESS;
+
+       /* Initial brightness */
+       if (pdata)
+               init_brt = min_t(int, pdata->initial_brightness,
+                               props.max_brightness);
+       else
+               init_brt = 0;
+
+       props.brightness = init_brt;
+
+       /* Backlight device name */
+       if (!pdata || !pdata->name)
+               name = DEFAULT_BL_NAME;
+       else
+               name = pdata->name;
+
+       bl_dev = backlight_device_register(name, bl->lp->dev, bl,
+                                      &lp8788_bl_ops, &props);
+       if (IS_ERR(bl_dev))
+               return PTR_ERR(bl_dev);
+
+       bl->bl_dev = bl_dev;
+
+       return 0;
+}
+
+static void lp8788_backlight_unregister(struct lp8788_bl *bl)
+{
+       struct backlight_device *bl_dev = bl->bl_dev;
+
+       if (bl_dev)
+               backlight_device_unregister(bl_dev);
+}
+
+static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct lp8788_bl *bl = dev_get_drvdata(dev);
+       enum lp8788_bl_ctrl_mode mode = bl->mode;
+       char *strmode;
+
+       if (is_brightness_ctrl_by_pwm(mode))
+               strmode = "PWM based";
+       else if (is_brightness_ctrl_by_register(mode))
+               strmode = "Register based";
+       else
+               strmode = "Invalid mode";
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", strmode);
+}
+
+static DEVICE_ATTR(bl_ctl_mode, S_IRUGO, lp8788_get_bl_ctl_mode, NULL);
+
+static struct attribute *lp8788_attributes[] = {
+       &dev_attr_bl_ctl_mode.attr,
+       NULL,
+};
+
+static const struct attribute_group lp8788_attr_group = {
+       .attrs = lp8788_attributes,
+};
+
+static int lp8788_backlight_probe(struct platform_device *pdev)
+{
+       struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+       struct lp8788_bl *bl;
+       int ret;
+
+       bl = devm_kzalloc(lp->dev, sizeof(struct lp8788_bl), GFP_KERNEL);
+       if (!bl)
+               return -ENOMEM;
+
+       bl->lp = lp;
+       if (lp->pdata)
+               bl->pdata = lp->pdata->bl_pdata;
+
+       platform_set_drvdata(pdev, bl);
+
+       ret = lp8788_backlight_configure(bl);
+       if (ret) {
+               dev_err(lp->dev, "backlight config err: %d\n", ret);
+               goto err_dev;
+       }
+
+       ret = lp8788_backlight_register(bl);
+       if (ret) {
+               dev_err(lp->dev, "register backlight err: %d\n", ret);
+               goto err_dev;
+       }
+
+       ret = sysfs_create_group(&pdev->dev.kobj, &lp8788_attr_group);
+       if (ret) {
+               dev_err(lp->dev, "register sysfs err: %d\n", ret);
+               goto err_sysfs;
+       }
+
+       backlight_update_status(bl->bl_dev);
+
+       return 0;
+
+err_sysfs:
+       lp8788_backlight_unregister(bl);
+err_dev:
+       return ret;
+}
+
+static int lp8788_backlight_remove(struct platform_device *pdev)
+{
+       struct lp8788_bl *bl = platform_get_drvdata(pdev);
+       struct backlight_device *bl_dev = bl->bl_dev;
+
+       bl_dev->props.brightness = 0;
+       backlight_update_status(bl_dev);
+       sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group);
+       lp8788_backlight_unregister(bl);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static struct platform_driver lp8788_bl_driver = {
+       .probe = lp8788_backlight_probe,
+       .remove = lp8788_backlight_remove,
+       .driver = {
+               .name = LP8788_DEV_BACKLIGHT,
+               .owner = THIS_MODULE,
+       },
+};
+module_platform_driver(lp8788_bl_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 Backlight Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-backlight");
index 88cad6b..900aa4e 100644 (file)
@@ -69,7 +69,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct fb_info *info = file->private_data;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
        if (err)
                return err;
index dc61c12..7c25408 100644 (file)
@@ -727,7 +727,7 @@ static const struct file_operations fb_proc_fops = {
  */
 static struct fb_info *file_fb_info(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
 
index f2566c1..113c787 100644 (file)
@@ -261,7 +261,7 @@ int get_img(struct mdp_img *img, struct fb_info *info,
        if (f.file == NULL)
                return -1;
 
-       if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+       if (MAJOR(file_inode(f.file)->i_rdev) == FB_MAJOR) {
                *start = info->fix.smem_start;
                *len = info->fix.smem_len;
        } else
index 797e1c7..8dab163 100644 (file)
@@ -560,18 +560,7 @@ static struct virtio_driver virtio_balloon_driver = {
 #endif
 };
 
-static int __init init(void)
-{
-       return register_virtio_driver(&virtio_balloon_driver);
-}
-
-static void __exit fini(void)
-{
-       unregister_virtio_driver(&virtio_balloon_driver);
-}
-module_init(init);
-module_exit(fini);
-
+module_virtio_driver(virtio_balloon_driver);
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio balloon driver");
 MODULE_LICENSE("GPL");
index 31f966f..1ba0d68 100644 (file)
@@ -75,7 +75,7 @@
  *
  * 0x050  W  QueueNotify      Queue notifier
  * 0x060  R  InterruptStatus  Interrupt status register
- * 0x060  W  InterruptACK     Interrupt acknowledge register
+ * 0x064  W  InterruptACK     Interrupt acknowledge register
  * 0x070  RW Status           Device status register
  *
  * 0x100+ RW                  Device-specific configuration space
@@ -423,7 +423,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
        return vm_dev->pdev->name;
 }
 
-static struct virtio_config_ops virtio_mmio_config_ops = {
+static const struct virtio_config_ops virtio_mmio_config_ops = {
        .get            = vm_get,
        .set            = vm_set,
        .get_status     = vm_get_status,
index 0c14289..a7ce730 100644 (file)
@@ -91,9 +91,9 @@ struct virtio_pci_vq_info
 };
 
 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static struct pci_device_id virtio_pci_id_table[] = {
-       { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-       { 0 },
+static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = {
+       { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+       { 0 }
 };
 
 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
@@ -652,7 +652,7 @@ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
-static struct virtio_config_ops virtio_pci_config_ops = {
+static const struct virtio_config_ops virtio_pci_config_ops = {
        .get            = vp_get,
        .set            = vp_set,
        .get_status     = vp_get_status,
index 372c8c0..950d354 100644 (file)
@@ -157,9 +157,16 @@ static int mxc_w1_remove(struct platform_device *pdev)
        return 0;
 }
 
+static struct of_device_id mxc_w1_dt_ids[] = {
+       { .compatible = "fsl,imx21-owire" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_w1_dt_ids);
+
 static struct platform_driver mxc_w1_driver = {
        .driver = {
-                  .name = "mxc_w1",
+               .name = "mxc_w1",
+               .of_match_table = mxc_w1_dt_ids,
        },
        .probe = mxc_w1_probe,
        .remove = mxc_w1_remove,
index 6752669..762561f 100644 (file)
@@ -17,11 +17,16 @@ config W1_SLAVE_SMEM
          simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
 config W1_SLAVE_DS2408
-        tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
-        help
-          Say Y here if you want to use a 1-wire
+       tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
+       help
+         Say Y here if you want to use a 1-wire
+         DS2408 8-Channel Addressable Switch device support
 
-                 DS2408 8-Channel Addressable Switch device support
+config W1_SLAVE_DS2413
+       tristate "Dual Channel Addressable Switch 0x3a family support (DS2413)"
+       help
+         Say Y here if you want to use a 1-wire
+         DS2413 Dual Channel Addressable Switch device support
 
 config W1_SLAVE_DS2423
        tristate "Counter 1-wire device (DS2423)"
index 05188f6..06529f3 100644 (file)
@@ -4,7 +4,8 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)   += w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)    += w1_smem.o
-obj-$(CONFIG_W1_SLAVE_DS2408)   += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2408)  += w1_ds2408.o
+obj-$(CONFIG_W1_SLAVE_DS2413)  += w1_ds2413.o
 obj-$(CONFIG_W1_SLAVE_DS2423)  += w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)  += w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)  += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
new file mode 100644 (file)
index 0000000..8297862
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * w1_ds2413.c - w1 family 3a (DS2413) driver
+ * based on w1_ds2408.c by Jean-Francois Dagenais <dagenaisj@sonatest.com>
+ *
+ * Copyright (c) 2013 Mariusz Bialonczyk <manio@skyboo.net>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
+
+#define W1_F3A_RETRIES                     3
+#define W1_F3A_FUNC_PIO_ACCESS_READ        0xF5
+#define W1_F3A_FUNC_PIO_ACCESS_WRITE       0x5A
+#define W1_F3A_SUCCESS_CONFIRM_BYTE        0xAA
+
+static ssize_t w1_f3a_read_state(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       dev_dbg(&sl->dev,
+               "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+               bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+
+       if (off != 0)
+               return 0;
+       if (!buf)
+               return -EINVAL;
+
+       mutex_lock(&sl->master->bus_mutex);
+       dev_dbg(&sl->dev, "mutex locked");
+
+       if (w1_reset_select_slave(sl)) {
+               mutex_unlock(&sl->master->bus_mutex);
+               return -EIO;
+       }
+
+       w1_write_8(sl->master, W1_F3A_FUNC_PIO_ACCESS_READ);
+       *buf = w1_read_8(sl->master);
+
+       mutex_unlock(&sl->master->bus_mutex);
+       dev_dbg(&sl->dev, "mutex unlocked");
+
+       /* check for correct complement */
+       if ((*buf & 0x0F) != ((~*buf >> 4) & 0x0F))
+               return -EIO;
+       else
+               return 1;
+}
+
+static ssize_t w1_f3a_write_output(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       u8 w1_buf[3];
+       unsigned int retries = W1_F3A_RETRIES;
+
+       if (count != 1 || off != 0)
+               return -EFAULT;
+
+       dev_dbg(&sl->dev, "locking mutex for write_output");
+       mutex_lock(&sl->master->bus_mutex);
+       dev_dbg(&sl->dev, "mutex locked");
+
+       if (w1_reset_select_slave(sl))
+               goto error;
+
+       /* according to the DS2413 datasheet the most significant 6 bits
+          should be set to "1"s, so do it now */
+       *buf = *buf | 0xFC;
+
+       while (retries--) {
+               w1_buf[0] = W1_F3A_FUNC_PIO_ACCESS_WRITE;
+               w1_buf[1] = *buf;
+               w1_buf[2] = ~(*buf);
+               w1_write_block(sl->master, w1_buf, 3);
+
+               if (w1_read_8(sl->master) == W1_F3A_SUCCESS_CONFIRM_BYTE) {
+                       mutex_unlock(&sl->master->bus_mutex);
+                       dev_dbg(&sl->dev, "mutex unlocked, retries:%d", retries);
+                       return 1;
+               }
+               if (w1_reset_resume_command(sl->master))
+                       goto error;
+       }
+
+error:
+       mutex_unlock(&sl->master->bus_mutex);
+       dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
+       return -EIO;
+}
+
+#define NB_SYSFS_BIN_FILES 2
+static struct bin_attribute w1_f3a_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+       {
+               .attr = {
+                       .name = "state",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f3a_read_state,
+       },
+       {
+               .attr = {
+                       .name = "output",
+                       .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+               },
+               .size = 1,
+               .write = w1_f3a_write_output,
+       }
+};
+
+static int w1_f3a_add_slave(struct w1_slave *sl)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
+               err = sysfs_create_bin_file(
+                       &sl->dev.kobj,
+                       &(w1_f3a_sysfs_bin_files[i]));
+       if (err)
+               while (--i >= 0)
+                       sysfs_remove_bin_file(&sl->dev.kobj,
+                               &(w1_f3a_sysfs_bin_files[i]));
+       return err;
+}
+
+static void w1_f3a_remove_slave(struct w1_slave *sl)
+{
+       int i;
+       for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
+               sysfs_remove_bin_file(&sl->dev.kobj,
+                       &(w1_f3a_sysfs_bin_files[i]));
+}
+
+static struct w1_family_ops w1_f3a_fops = {
+       .add_slave      = w1_f3a_add_slave,
+       .remove_slave   = w1_f3a_remove_slave,
+};
+
+static struct w1_family w1_family_3a = {
+       .fid = W1_FAMILY_DS2413,
+       .fops = &w1_f3a_fops,
+};
+
+static int __init w1_f3a_init(void)
+{
+       return w1_register_family(&w1_family_3a);
+}
+
+static void __exit w1_f3a_exit(void)
+{
+       w1_unregister_family(&w1_family_3a);
+}
+
+module_init(w1_f3a_init);
+module_exit(w1_f3a_exit);
index a1f0ce1..625dd08 100644 (file)
@@ -39,6 +39,7 @@
 #define W1_EEPROM_DS2431       0x2D
 #define W1_FAMILY_DS2760       0x30
 #define W1_FAMILY_DS2780       0x32
+#define W1_FAMILY_DS2413       0x3A
 #define W1_THERM_DS1825                0x3B
 #define W1_FAMILY_DS2781       0x3D
 #define W1_THERM_DS28EA00      0x42
index 26e1fdb..9fcc70c 100644 (file)
@@ -79,6 +79,7 @@ config DA9052_WATCHDOG
 config DA9055_WATCHDOG
        tristate "Dialog Semiconductor DA9055 Watchdog"
        depends on MFD_DA9055
+       select WATCHDOG_CORE
        help
          If you say yes here you get support for watchdog on the Dialog
          Semiconductor DA9055 PMIC.
@@ -108,7 +109,7 @@ config WM8350_WATCHDOG
 
 config ARM_SP805_WATCHDOG
        tristate "ARM SP805 Watchdog"
-       depends on ARM_AMBA
+       depends on ARM && ARM_AMBA
        select WATCHDOG_CORE
        help
          ARM Primecell SP805 Watchdog timer. This will reboot your system when
@@ -116,7 +117,7 @@ config ARM_SP805_WATCHDOG
 
 config AT91RM9200_WATCHDOG
        tristate "AT91RM9200 watchdog"
-       depends on ARCH_AT91RM9200
+       depends on ARCH_AT91
        help
          Watchdog timer embedded into AT91RM9200 chips. This will reboot your
          system when the timeout is reached.
@@ -124,6 +125,7 @@ config AT91RM9200_WATCHDOG
 config AT91SAM9X_WATCHDOG
        tristate "AT91SAM9X / AT91CAP9 watchdog"
        depends on ARCH_AT91 && !ARCH_AT91RM9200
+       select WATCHDOG_CORE
        help
          Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
          reboot your system when the timeout is reached.
@@ -316,14 +318,15 @@ config TWL4030_WATCHDOG
          Support for TI TWL4030 watchdog.  Say 'Y' here to enable the
          watchdog timer support for TWL4030 chips.
 
-config STMP3XXX_WATCHDOG
-       tristate "Freescale STMP3XXX watchdog"
-       depends on ARCH_STMP3XXX
+config STMP3XXX_RTC_WATCHDOG
+       tristate "Freescale STMP3XXX & i.MX23/28 watchdog"
+       depends on RTC_DRV_STMP
+       select WATCHDOG_CORE
        help
-         Say Y here if to include support for the watchdog timer
-         for the Sigmatel STMP37XX/378X SoC.
+         Say Y here to include support for the watchdog timer inside
+         the RTC for the STMP37XX/378X or i.MX23/28 SoC.
          To compile this driver as a module, choose M here: the
-         module will be called stmp3xxx_wdt.
+         module will be called stmp3xxx_rtc_wdt.
 
 config NUC900_WATCHDOG
        tristate "Nuvoton NUC900 watchdog"
@@ -376,6 +379,18 @@ config UX500_WATCHDOG
          To compile this driver as a module, choose M here: the
          module will be called ux500_wdt.
 
+config RETU_WATCHDOG
+       tristate "Retu watchdog"
+       depends on MFD_RETU
+       select WATCHDOG_CORE
+       help
+         Retu watchdog driver for Nokia Internet Tablets (770, N800,
+         N810). At least on N800 the watchdog cannot be disabled, so
+         this driver is essential and you should enable it.
+
+         To compile this driver as a module, choose M here: the
+         module will be called retu_wdt.
+
 # AVR32 Architecture
 
 config AT32AP700X_WDT
@@ -593,7 +608,7 @@ config IE6XX_WDT
 
 config INTEL_SCU_WATCHDOG
        bool "Intel SCU Watchdog for Mobile Platforms"
-       depends on X86_MRST
+       depends on X86_INTEL_MID
        ---help---
          Hardware driver for the watchdog time built into the Intel SCU
          for Intel Mobile Platforms.
@@ -983,6 +998,7 @@ config ATH79_WDT
 config BCM47XX_WDT
        tristate "Broadcom BCM47xx Watchdog Timer"
        depends on BCM47XX
+       select WATCHDOG_CORE
        help
          Hardware driver for the Broadcom BCM47xx Watchdog Timer.
 
@@ -1131,6 +1147,7 @@ config PIKA_WDT
 config BOOKE_WDT
        tristate "PowerPC Book-E Watchdog Timer"
        depends on BOOKE || 4xx
+       select WATCHDOG_CORE
        ---help---
          Watchdog driver for PowerPC Book-E chips, such as the Freescale
          MPC85xx SOCs and the IBM PowerPC 440.
index bec86ee..a300b94 100644 (file)
@@ -48,11 +48,12 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
 obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
 obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
 obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
-obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
+obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
 obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
 obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
 obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
 obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o
+obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
 
 # AVR32 Architecture
 obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
index 89831ed..1c75260 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/types.h>
 #include <linux/watchdog.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <mach/at91_st.h>
 
 #define WDT_DEFAULT_TIME       5       /* seconds */
@@ -252,6 +254,12 @@ static int at91wdt_resume(struct platform_device *pdev)
 #define at91wdt_resume NULL
 #endif
 
+static const struct of_device_id at91_wdt_dt_ids[] = {
+       { .compatible = "atmel,at91rm9200-wdt" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91_wdt_dt_ids);
+
 static struct platform_driver at91wdt_driver = {
        .probe          = at91wdt_probe,
        .remove         = at91wdt_remove,
@@ -261,6 +269,7 @@ static struct platform_driver at91wdt_driver = {
        .driver         = {
                .name   = "at91_wdt",
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(at91_wdt_dt_ids),
        },
 };
 
index c08933c..be37dde 100644 (file)
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/platform_device.h>
@@ -58,7 +56,7 @@
 
 /* User land timeout */
 #define WDT_HEARTBEAT 15
-static int heartbeat = WDT_HEARTBEAT;
+static int heartbeat;
 module_param(heartbeat, int, 0);
 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
        "(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
@@ -68,19 +66,17 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
        "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
+static struct watchdog_device at91_wdt_dev;
 static void at91_ping(unsigned long data);
 
 static struct {
        void __iomem *base;
        unsigned long next_heartbeat;   /* the next_heartbeat for the timer */
-       unsigned long open;
-       char expect_close;
        struct timer_list timer;        /* The timer that pings the watchdog */
 } at91wdt_private;
 
 /* ......................................................................... */
 
-
 /*
  * Reload the watchdog timer.  (ie, pat the watchdog)
  */
@@ -95,39 +91,37 @@ static inline void at91_wdt_reset(void)
 static void at91_ping(unsigned long data)
 {
        if (time_before(jiffies, at91wdt_private.next_heartbeat) ||
-                       (!nowayout && !at91wdt_private.open)) {
+           (!watchdog_active(&at91_wdt_dev))) {
                at91_wdt_reset();
                mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
        } else
                pr_crit("I will reset your machine !\n");
 }
 
-/*
- * Watchdog device is opened, and watchdog starts running.
- */
-static int at91_wdt_open(struct inode *inode, struct file *file)
+static int at91_wdt_ping(struct watchdog_device *wdd)
 {
-       if (test_and_set_bit(0, &at91wdt_private.open))
-               return -EBUSY;
+       /* calculate when the next userspace timeout will be */
+       at91wdt_private.next_heartbeat = jiffies + wdd->timeout * HZ;
+       return 0;
+}
 
-       at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
+static int at91_wdt_start(struct watchdog_device *wdd)
+{
+       /* calculate the next userspace timeout and modify the timer */
+       at91_wdt_ping(wdd);
        mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
-
-       return nonseekable_open(inode, file);
+       return 0;
 }
 
-/*
- * Close the watchdog device.
- */
-static int at91_wdt_close(struct inode *inode, struct file *file)
+static int at91_wdt_stop(struct watchdog_device *wdd)
 {
-       clear_bit(0, &at91wdt_private.open);
-
-       /* stop internal ping */
-       if (!at91wdt_private.expect_close)
-               del_timer(&at91wdt_private.timer);
+       /* The watchdog timer hardware can not be stopped... */
+       return 0;
+}
 
-       at91wdt_private.expect_close = 0;
+static int at91_wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout)
+{
+       wdd->timeout = new_timeout;
        return 0;
 }
 
@@ -163,96 +157,28 @@ static int at91_wdt_settimeout(unsigned int timeout)
        return 0;
 }
 
+/* ......................................................................... */
+
 static const struct watchdog_info at91_wdt_info = {
        .identity       = DRV_NAME,
        .options        = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
                                                WDIOF_MAGICCLOSE,
 };
 
-/*
- * Handle commands from user-space.
- */
-static long at91_wdt_ioctl(struct file *file,
-               unsigned int cmd, unsigned long arg)
-{
-       void __user *argp = (void __user *)arg;
-       int __user *p = argp;
-       int new_value;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               return copy_to_user(argp, &at91_wdt_info,
-                                   sizeof(at91_wdt_info)) ? -EFAULT : 0;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
-
-       case WDIOC_KEEPALIVE:
-               at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
-               return 0;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(new_value, p))
-                       return -EFAULT;
-
-               heartbeat = new_value;
-               at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
-
-               return put_user(new_value, p);  /* return current value */
-
-       case WDIOC_GETTIMEOUT:
-               return put_user(heartbeat, p);
-       }
-       return -ENOTTY;
-}
-
-/*
- * Pat the watchdog whenever device is written to.
- */
-static ssize_t at91_wdt_write(struct file *file, const char *data, size_t len,
-                                                               loff_t *ppos)
-{
-       if (!len)
-               return 0;
-
-       /* Scan for magic character */
-       if (!nowayout) {
-               size_t i;
-
-               at91wdt_private.expect_close = 0;
-
-               for (i = 0; i < len; i++) {
-                       char c;
-                       if (get_user(c, data + i))
-                               return -EFAULT;
-                       if (c == 'V') {
-                               at91wdt_private.expect_close = 42;
-                               break;
-                       }
-               }
-       }
-
-       at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
-
-       return len;
-}
-
-/* ......................................................................... */
-
-static const struct file_operations at91wdt_fops = {
-       .owner                  = THIS_MODULE,
-       .llseek                 = no_llseek,
-       .unlocked_ioctl = at91_wdt_ioctl,
-       .open                   = at91_wdt_open,
-       .release                = at91_wdt_close,
-       .write                  = at91_wdt_write,
+static const struct watchdog_ops at91_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        at91_wdt_start,
+       .stop =         at91_wdt_stop,
+       .ping =         at91_wdt_ping,
+       .set_timeout =  at91_wdt_set_timeout,
 };
 
-static struct miscdevice at91wdt_miscdev = {
-       .minor          = WATCHDOG_MINOR,
-       .name           = "watchdog",
-       .fops           = &at91wdt_fops,
+static struct watchdog_device at91_wdt_dev = {
+       .info =         &at91_wdt_info,
+       .ops =          &at91_wdt_ops,
+       .timeout =      WDT_HEARTBEAT,
+       .min_timeout =  1,
+       .max_timeout =  0xFFFF,
 };
 
 static int __init at91wdt_probe(struct platform_device *pdev)
@@ -260,10 +186,6 @@ static int __init at91wdt_probe(struct platform_device *pdev)
        struct resource *r;
        int res;
 
-       if (at91wdt_miscdev.parent)
-               return -EBUSY;
-       at91wdt_miscdev.parent = &pdev->dev;
-
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r)
                return -ENODEV;
@@ -273,38 +195,41 @@ static int __init at91wdt_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       at91_wdt_dev.parent = &pdev->dev;
+       watchdog_init_timeout(&at91_wdt_dev, heartbeat, &pdev->dev);
+       watchdog_set_nowayout(&at91_wdt_dev, nowayout);
+
        /* Set watchdog */
        res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
        if (res)
                return res;
 
-       res = misc_register(&at91wdt_miscdev);
+       res = watchdog_register_device(&at91_wdt_dev);
        if (res)
                return res;
 
-       at91wdt_private.next_heartbeat = jiffies + heartbeat * HZ;
+       at91wdt_private.next_heartbeat = jiffies + at91_wdt_dev.timeout * HZ;
        setup_timer(&at91wdt_private.timer, at91_ping, 0);
        mod_timer(&at91wdt_private.timer, jiffies + WDT_TIMEOUT);
 
        pr_info("enabled (heartbeat=%d sec, nowayout=%d)\n",
-               heartbeat, nowayout);
+               at91_wdt_dev.timeout, nowayout);
 
        return 0;
 }
 
 static int __exit at91wdt_remove(struct platform_device *pdev)
 {
-       int res;
+       watchdog_unregister_device(&at91_wdt_dev);
 
-       res = misc_deregister(&at91wdt_miscdev);
-       if (!res)
-               at91wdt_miscdev.parent = NULL;
+       pr_warn("I quit now, hardware will probably reboot!\n");
+       del_timer(&at91wdt_private.timer);
 
-       return res;
+       return 0;
 }
 
 #if defined(CONFIG_OF)
-static const struct of_device_id at91_wdt_dt_ids[] __initconst = {
+static const struct of_device_id at91_wdt_dt_ids[] = {
        { .compatible = "atmel,at91sam9260-wdt" },
        { /* sentinel */ }
 };
@@ -326,4 +251,3 @@ module_platform_driver_probe(at91wdt_driver, at91wdt_probe);
 MODULE_AUTHOR("Renaud CERRATO <r.cerrato@til-technologies.fr>");
 MODULE_DESCRIPTION("Watchdog driver for Atmel AT91SAM9x processors");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 38a999e..8987990 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/watchdog.h>
 #include <linux/clk.h>
 #include <linux/err.h>
-
-#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/ar71xx_regs.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
 
 #define DRIVER_NAME    "ath79-wdt"
 
 #define WDT_TIMEOUT    15      /* seconds */
 
+#define WDOG_REG_CTRL          0x00
+#define WDOG_REG_TIMER         0x04
+
 #define WDOG_CTRL_LAST_RESET   BIT(31)
 #define WDOG_CTRL_ACTION_MASK  3
 #define WDOG_CTRL_ACTION_NONE  0       /* no action */
@@ -66,27 +69,38 @@ static struct clk *wdt_clk;
 static unsigned long wdt_freq;
 static int boot_status;
 static int max_timeout;
+static void __iomem *wdt_base;
+
+static inline void ath79_wdt_wr(unsigned reg, u32 val)
+{
+       iowrite32(val, wdt_base + reg);
+}
+
+static inline u32 ath79_wdt_rr(unsigned reg)
+{
+       return ioread32(wdt_base + reg);
+}
 
 static inline void ath79_wdt_keepalive(void)
 {
-       ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+       ath79_wdt_wr(WDOG_REG_TIMER, wdt_freq * timeout);
        /* flush write */
-       ath79_reset_rr(AR71XX_RESET_REG_WDOG);
+       ath79_wdt_rr(WDOG_REG_TIMER);
 }
 
 static inline void ath79_wdt_enable(void)
 {
        ath79_wdt_keepalive();
-       ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+       ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_FCR);
        /* flush write */
-       ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+       ath79_wdt_rr(WDOG_REG_CTRL);
 }
 
 static inline void ath79_wdt_disable(void)
 {
-       ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+       ath79_wdt_wr(WDOG_REG_CTRL, WDOG_CTRL_ACTION_NONE);
        /* flush write */
-       ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+       ath79_wdt_rr(WDOG_REG_CTRL);
 }
 
 static int ath79_wdt_set_timeout(int val)
@@ -226,16 +240,32 @@ static struct miscdevice ath79_wdt_miscdev = {
 
 static int ath79_wdt_probe(struct platform_device *pdev)
 {
+       struct resource *res;
        u32 ctrl;
        int err;
 
-       wdt_clk = clk_get(&pdev->dev, "wdt");
+       if (wdt_base)
+               return -EBUSY;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "no memory resource found\n");
+               return -EINVAL;
+       }
+
+       wdt_base = devm_request_and_ioremap(&pdev->dev, res);
+       if (!wdt_base) {
+               dev_err(&pdev->dev, "unable to remap memory region\n");
+               return -ENOMEM;
+       }
+
+       wdt_clk = devm_clk_get(&pdev->dev, "wdt");
        if (IS_ERR(wdt_clk))
                return PTR_ERR(wdt_clk);
 
        err = clk_enable(wdt_clk);
        if (err)
-               goto err_clk_put;
+               return err;
 
        wdt_freq = clk_get_rate(wdt_clk);
        if (!wdt_freq) {
@@ -251,7 +281,7 @@ static int ath79_wdt_probe(struct platform_device *pdev)
                        max_timeout, timeout);
        }
 
-       ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+       ctrl = ath79_wdt_rr(WDOG_REG_CTRL);
        boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
 
        err = misc_register(&ath79_wdt_miscdev);
@@ -265,8 +295,6 @@ static int ath79_wdt_probe(struct platform_device *pdev)
 
 err_clk_disable:
        clk_disable(wdt_clk);
-err_clk_put:
-       clk_put(wdt_clk);
        return err;
 }
 
@@ -274,7 +302,6 @@ static int ath79_wdt_remove(struct platform_device *pdev)
 {
        misc_deregister(&ath79_wdt_miscdev);
        clk_disable(wdt_clk);
-       clk_put(wdt_clk);
        return 0;
 }
 
@@ -283,6 +310,14 @@ static void ath97_wdt_shutdown(struct platform_device *pdev)
        ath79_wdt_disable();
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id ath79_wdt_match[] = {
+       { .compatible = "qca,ar7130-wdt" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ath79_wdt_match);
+#endif
+
 static struct platform_driver ath79_wdt_driver = {
        .probe          = ath79_wdt_probe,
        .remove         = ath79_wdt_remove,
@@ -290,6 +325,7 @@ static struct platform_driver ath79_wdt_driver = {
        .driver         = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(ath79_wdt_match),
        },
 };
 
index bc0e91e..b4021a2 100644 (file)
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs>
  *  Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
+ *  Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de>
  *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/bcm47xx_wdt.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/platform_device.h>
 #include <linux/reboot.h>
 #include <linux/types.h>
-#include <linux/uaccess.h>
 #include <linux/watchdog.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
-#include <linux/ssb/ssb_embedded.h>
-#include <asm/mach-bcm47xx/bcm47xx.h>
 
 #define DRV_NAME               "bcm47xx_wdt"
 
 #define WDT_DEFAULT_TIME       30      /* seconds */
-#define WDT_MAX_TIME           255     /* seconds */
+#define WDT_SOFTTIMER_MAX      255     /* seconds */
+#define WDT_SOFTTIMER_THRESHOLD        60      /* seconds */
 
-static int wdt_time = WDT_DEFAULT_TIME;
+static int timeout = WDT_DEFAULT_TIME;
 static bool nowayout = WATCHDOG_NOWAYOUT;
 
-module_param(wdt_time, int, 0);
-MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog time in seconds. (default="
                                __MODULE_STRING(WDT_DEFAULT_TIME) ")");
 
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
 module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout,
                "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#endif
 
-static unsigned long bcm47xx_wdt_busy;
-static char expect_release;
-static struct timer_list wdt_timer;
-static atomic_t ticks;
-
-static inline void bcm47xx_wdt_hw_start(void)
+static inline struct bcm47xx_wdt *bcm47xx_wdt_get(struct watchdog_device *wdd)
 {
-       /* this is 2,5s on 100Mhz clock  and 2s on 133 Mhz */
-       switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
-       case BCM47XX_BUS_TYPE_SSB:
-               ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0xfffffff);
-               break;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
-       case BCM47XX_BUS_TYPE_BCMA:
-               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc,
-                                              0xfffffff);
-               break;
-#endif
-       }
+       return container_of(wdd, struct bcm47xx_wdt, wdd);
 }
 
-static inline int bcm47xx_wdt_hw_stop(void)
+static int bcm47xx_wdt_hard_keepalive(struct watchdog_device *wdd)
 {
-       switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
-       case BCM47XX_BUS_TYPE_SSB:
-               return ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 0);
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
-       case BCM47XX_BUS_TYPE_BCMA:
-               bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 0);
-               return 0;
-#endif
-       }
-       return -EINVAL;
-}
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
 
-static void bcm47xx_timer_tick(unsigned long unused)
-{
-       if (!atomic_dec_and_test(&ticks)) {
-               bcm47xx_wdt_hw_start();
-               mod_timer(&wdt_timer, jiffies + HZ);
-       } else {
-               pr_crit("Watchdog will fire soon!!!\n");
-       }
+       wdt->timer_set_ms(wdt, wdd->timeout * 1000);
+
+       return 0;
 }
 
-static inline void bcm47xx_wdt_pet(void)
+static int bcm47xx_wdt_hard_start(struct watchdog_device *wdd)
 {
-       atomic_set(&ticks, wdt_time);
+       return 0;
 }
 
-static void bcm47xx_wdt_start(void)
+static int bcm47xx_wdt_hard_stop(struct watchdog_device *wdd)
 {
-       bcm47xx_wdt_pet();
-       bcm47xx_timer_tick(0);
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+       wdt->timer_set(wdt, 0);
+
+       return 0;
 }
 
-static void bcm47xx_wdt_pause(void)
+static int bcm47xx_wdt_hard_set_timeout(struct watchdog_device *wdd,
+                                       unsigned int new_time)
 {
-       del_timer_sync(&wdt_timer);
-       bcm47xx_wdt_hw_stop();
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+       u32 max_timer = wdt->max_timer_ms;
+
+       if (new_time < 1 || new_time > max_timer / 1000) {
+               pr_warn("timeout value must be 1<=x<=%d, using %d\n",
+                       max_timer / 1000, new_time);
+               return -EINVAL;
+       }
+
+       wdd->timeout = new_time;
+       return 0;
 }
 
-static void bcm47xx_wdt_stop(void)
+static struct watchdog_ops bcm47xx_wdt_hard_ops = {
+       .owner          = THIS_MODULE,
+       .start          = bcm47xx_wdt_hard_start,
+       .stop           = bcm47xx_wdt_hard_stop,
+       .ping           = bcm47xx_wdt_hard_keepalive,
+       .set_timeout    = bcm47xx_wdt_hard_set_timeout,
+};
+
+static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
 {
-       bcm47xx_wdt_pause();
+       struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+       u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
+
+       if (!atomic_dec_and_test(&wdt->soft_ticks)) {
+               wdt->timer_set_ms(wdt, next_tick);
+               mod_timer(&wdt->soft_timer, jiffies + HZ);
+       } else {
+               pr_crit("Watchdog will fire soon!!!\n");
+       }
 }
 
-static int bcm47xx_wdt_settimeout(int new_time)
+static int bcm47xx_wdt_soft_keepalive(struct watchdog_device *wdd)
 {
-       if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
-               return -EINVAL;
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+       atomic_set(&wdt->soft_ticks, wdd->timeout);
 
-       wdt_time = new_time;
        return 0;
 }
 
-static int bcm47xx_wdt_open(struct inode *inode, struct file *file)
+static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
 {
-       if (test_and_set_bit(0, &bcm47xx_wdt_busy))
-               return -EBUSY;
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+       bcm47xx_wdt_soft_keepalive(wdd);
+       bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
 
-       bcm47xx_wdt_start();
-       return nonseekable_open(inode, file);
+       return 0;
 }
 
-static int bcm47xx_wdt_release(struct inode *inode, struct file *file)
+static int bcm47xx_wdt_soft_stop(struct watchdog_device *wdd)
 {
-       if (expect_release == 42) {
-               bcm47xx_wdt_stop();
-       } else {
-               pr_crit("Unexpected close, not stopping watchdog!\n");
-               bcm47xx_wdt_start();
-       }
+       struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
+
+       del_timer_sync(&wdt->soft_timer);
+       wdt->timer_set(wdt, 0);
 
-       clear_bit(0, &bcm47xx_wdt_busy);
-       expect_release = 0;
        return 0;
 }
 
-static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
-                               size_t len, loff_t *ppos)
+static int bcm47xx_wdt_soft_set_timeout(struct watchdog_device *wdd,
+                                       unsigned int new_time)
 {
-       if (len) {
-               if (!nowayout) {
-                       size_t i;
-
-                       expect_release = 0;
-
-                       for (i = 0; i != len; i++) {
-                               char c;
-                               if (get_user(c, data + i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       expect_release = 42;
-                       }
-               }
-               bcm47xx_wdt_pet();
+       if (new_time < 1 || new_time > WDT_SOFTTIMER_MAX) {
+               pr_warn("timeout value must be 1<=x<=%d, using %d\n",
+                       WDT_SOFTTIMER_MAX, new_time);
+               return -EINVAL;
        }
-       return len;
+
+       wdd->timeout = new_time;
+       return 0;
 }
 
 static const struct watchdog_info bcm47xx_wdt_info = {
@@ -180,130 +159,100 @@ static const struct watchdog_info bcm47xx_wdt_info = {
                                WDIOF_MAGICCLOSE,
 };
 
-static long bcm47xx_wdt_ioctl(struct file *file,
-                                       unsigned int cmd, unsigned long arg)
-{
-       void __user *argp = (void __user *)arg;
-       int __user *p = argp;
-       int new_value, retval = -EINVAL;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               return copy_to_user(argp, &bcm47xx_wdt_info,
-                               sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0;
-
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
-
-       case WDIOC_SETOPTIONS:
-               if (get_user(new_value, p))
-                       return -EFAULT;
-
-               if (new_value & WDIOS_DISABLECARD) {
-                       bcm47xx_wdt_stop();
-                       retval = 0;
-               }
-
-               if (new_value & WDIOS_ENABLECARD) {
-                       bcm47xx_wdt_start();
-                       retval = 0;
-               }
-
-               return retval;
-
-       case WDIOC_KEEPALIVE:
-               bcm47xx_wdt_pet();
-               return 0;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(new_value, p))
-                       return -EFAULT;
-
-               if (bcm47xx_wdt_settimeout(new_value))
-                       return -EINVAL;
-
-               bcm47xx_wdt_pet();
-
-       case WDIOC_GETTIMEOUT:
-               return put_user(wdt_time, p);
-
-       default:
-               return -ENOTTY;
-       }
-}
-
 static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
-       unsigned long code, void *unused)
+                                 unsigned long code, void *unused)
 {
+       struct bcm47xx_wdt *wdt;
+
+       wdt = container_of(this, struct bcm47xx_wdt, notifier);
        if (code == SYS_DOWN || code == SYS_HALT)
-               bcm47xx_wdt_stop();
+               wdt->wdd.ops->stop(&wdt->wdd);
        return NOTIFY_DONE;
 }
 
-static const struct file_operations bcm47xx_wdt_fops = {
+static struct watchdog_ops bcm47xx_wdt_soft_ops = {
        .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .unlocked_ioctl = bcm47xx_wdt_ioctl,
-       .open           = bcm47xx_wdt_open,
-       .release        = bcm47xx_wdt_release,
-       .write          = bcm47xx_wdt_write,
-};
-
-static struct miscdevice bcm47xx_wdt_miscdev = {
-       .minor          = WATCHDOG_MINOR,
-       .name           = "watchdog",
-       .fops           = &bcm47xx_wdt_fops,
-};
-
-static struct notifier_block bcm47xx_wdt_notifier = {
-       .notifier_call = bcm47xx_wdt_notify_sys,
+       .start          = bcm47xx_wdt_soft_start,
+       .stop           = bcm47xx_wdt_soft_stop,
+       .ping           = bcm47xx_wdt_soft_keepalive,
+       .set_timeout    = bcm47xx_wdt_soft_set_timeout,
 };
 
-static int __init bcm47xx_wdt_init(void)
+static int bcm47xx_wdt_probe(struct platform_device *pdev)
 {
        int ret;
+       bool soft;
+       struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev);
 
-       if (bcm47xx_wdt_hw_stop() < 0)
-               return -ENODEV;
+       if (!wdt)
+               return -ENXIO;
 
-       setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L);
+       soft = wdt->max_timer_ms < WDT_SOFTTIMER_THRESHOLD * 1000;
 
-       if (bcm47xx_wdt_settimeout(wdt_time)) {
-               bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME);
-               pr_info("wdt_time value must be 0 < wdt_time < %d, using %d\n",
-                       (WDT_MAX_TIME + 1), wdt_time);
+       if (soft) {
+               wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
+               setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
+                           (long unsigned int)wdt);
+       } else {
+               wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
        }
 
-       ret = register_reboot_notifier(&bcm47xx_wdt_notifier);
+       wdt->wdd.info = &bcm47xx_wdt_info;
+       wdt->wdd.timeout = WDT_DEFAULT_TIME;
+       ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout);
        if (ret)
-               return ret;
+               goto err_timer;
+       watchdog_set_nowayout(&wdt->wdd, nowayout);
 
-       ret = misc_register(&bcm47xx_wdt_miscdev);
-       if (ret) {
-               unregister_reboot_notifier(&bcm47xx_wdt_notifier);
-               return ret;
-       }
+       wdt->notifier.notifier_call = &bcm47xx_wdt_notify_sys;
+
+       ret = register_reboot_notifier(&wdt->notifier);
+       if (ret)
+               goto err_timer;
 
-       pr_info("BCM47xx Watchdog Timer enabled (%d seconds%s)\n",
-               wdt_time, nowayout ? ", nowayout" : "");
+       ret = watchdog_register_device(&wdt->wdd);
+       if (ret)
+               goto err_notifier;
+
+       dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n",
+               timeout, nowayout ? ", nowayout" : "",
+               soft ? ", Software Timer" : "");
        return 0;
+
+err_notifier:
+       unregister_reboot_notifier(&wdt->notifier);
+err_timer:
+       if (soft)
+               del_timer_sync(&wdt->soft_timer);
+
+       return ret;
 }
 
-static void __exit bcm47xx_wdt_exit(void)
+static int bcm47xx_wdt_remove(struct platform_device *pdev)
 {
-       if (!nowayout)
-               bcm47xx_wdt_stop();
+       struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev);
 
-       misc_deregister(&bcm47xx_wdt_miscdev);
+       if (!wdt)
+               return -ENXIO;
 
-       unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+       watchdog_unregister_device(&wdt->wdd);
+       unregister_reboot_notifier(&wdt->notifier);
+
+       return 0;
 }
 
-module_init(bcm47xx_wdt_init);
-module_exit(bcm47xx_wdt_exit);
+static struct platform_driver bcm47xx_wdt_driver = {
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = "bcm47xx-wdt",
+       },
+       .probe          = bcm47xx_wdt_probe,
+       .remove         = bcm47xx_wdt_remove,
+};
+
+module_platform_driver(bcm47xx_wdt_driver);
 
 MODULE_AUTHOR("Aleksandar Radovanovic");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
 MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index c0bc92d..a8dbceb 100644 (file)
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
-#include <linux/fs.h>
 #include <linux/smp.h>
-#include <linux/miscdevice.h>
-#include <linux/notifier.h>
 #include <linux/watchdog.h>
-#include <linux/uaccess.h>
 
 #include <asm/reg_booke.h>
 #include <asm/time.h>
@@ -45,7 +41,7 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
 #define WDTP_MASK      (TCR_WP_MASK)
 #endif
 
-static DEFINE_SPINLOCK(booke_wdt_lock);
+#ifdef CONFIG_PPC_FSL_BOOK3E
 
 /* For the specified period, determine the number of seconds
  * corresponding to the reset time.  There will be a watchdog
@@ -86,6 +82,24 @@ static unsigned int sec_to_period(unsigned int secs)
        return 0;
 }
 
+#define MAX_WDT_TIMEOUT                period_to_sec(1)
+
+#else /* CONFIG_PPC_FSL_BOOK3E */
+
+static unsigned long long period_to_sec(unsigned int period)
+{
+       return period;
+}
+
+static unsigned int sec_to_period(unsigned int secs)
+{
+       return secs;
+}
+
+#define MAX_WDT_TIMEOUT                3       /* from Kconfig */
+
+#endif /* !CONFIG_PPC_FSL_BOOK3E */
+
 static void __booke_wdt_set(void *data)
 {
        u32 val;
@@ -107,9 +121,11 @@ static void __booke_wdt_ping(void *data)
        mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
 }
 
-static void booke_wdt_ping(void)
+static int booke_wdt_ping(struct watchdog_device *wdog)
 {
        on_each_cpu(__booke_wdt_ping, NULL, 0);
+
+       return 0;
 }
 
 static void __booke_wdt_enable(void *data)
@@ -146,152 +162,81 @@ static void __booke_wdt_disable(void *data)
 
 }
 
-static ssize_t booke_wdt_write(struct file *file, const char __user *buf,
-                               size_t count, loff_t *ppos)
+static void __booke_wdt_start(struct watchdog_device *wdog)
 {
-       booke_wdt_ping();
-       return count;
+       on_each_cpu(__booke_wdt_enable, NULL, 0);
+       pr_debug("watchdog enabled (timeout = %u sec)\n", wdog->timeout);
 }
 
-static struct watchdog_info ident = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-       .identity = "PowerPC Book-E Watchdog",
-};
-
-static long booke_wdt_ioctl(struct file *file,
-                               unsigned int cmd, unsigned long arg)
+static int booke_wdt_start(struct watchdog_device *wdog)
 {
-       u32 tmp = 0;
-       u32 __user *p = (u32 __user *)arg;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
-       case WDIOC_GETSTATUS:
-               return put_user(0, p);
-       case WDIOC_GETBOOTSTATUS:
-               /* XXX: something is clearing TSR */
-               tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
-               /* returns CARDRESET if last reset was caused by the WDT */
-               return put_user((tmp ? WDIOF_CARDRESET : 0), p);
-       case WDIOC_SETOPTIONS:
-               if (get_user(tmp, p))
-                       return -EFAULT;
-               if (tmp == WDIOS_ENABLECARD) {
-                       booke_wdt_ping();
-                       break;
-               } else
-                       return -EINVAL;
-               return 0;
-       case WDIOC_KEEPALIVE:
-               booke_wdt_ping();
-               return 0;
-       case WDIOC_SETTIMEOUT:
-               if (get_user(tmp, p))
-                       return -EFAULT;
-#ifdef CONFIG_PPC_FSL_BOOK3E
-               /* period of 1 gives the largest possible timeout */
-               if (tmp > period_to_sec(1))
-                       return -EINVAL;
-               booke_wdt_period = sec_to_period(tmp);
-#else
-               booke_wdt_period = tmp;
-#endif
-               booke_wdt_set();
-               /* Fall */
-       case WDIOC_GETTIMEOUT:
-#ifdef CONFIG_FSL_BOOKE
-               return put_user(period_to_sec(booke_wdt_period), p);
-#else
-               return put_user(booke_wdt_period, p);
-#endif
-       default:
-               return -ENOTTY;
-       }
-
-       return 0;
-}
-
-/* wdt_is_active stores whether or not the /dev/watchdog device is opened */
-static unsigned long wdt_is_active;
-
-static int booke_wdt_open(struct inode *inode, struct file *file)
-{
-       /* /dev/watchdog can only be opened once */
-       if (test_and_set_bit(0, &wdt_is_active))
-               return -EBUSY;
-
-       spin_lock(&booke_wdt_lock);
        if (booke_wdt_enabled == 0) {
                booke_wdt_enabled = 1;
-               on_each_cpu(__booke_wdt_enable, NULL, 0);
-               pr_debug("watchdog enabled (timeout = %llu sec)\n",
-                        period_to_sec(booke_wdt_period));
+               __booke_wdt_start(wdog);
        }
-       spin_unlock(&booke_wdt_lock);
-
-       return nonseekable_open(inode, file);
+       return 0;
 }
 
-static int booke_wdt_release(struct inode *inode, struct file *file)
+static int booke_wdt_stop(struct watchdog_device *wdog)
 {
-#ifndef CONFIG_WATCHDOG_NOWAYOUT
-       /* Normally, the watchdog is disabled when /dev/watchdog is closed, but
-        * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
-        * watchdog should remain enabled.  So we disable it only if
-        * CONFIG_WATCHDOG_NOWAYOUT is not defined.
-        */
        on_each_cpu(__booke_wdt_disable, NULL, 0);
        booke_wdt_enabled = 0;
        pr_debug("watchdog disabled\n");
-#endif
 
-       clear_bit(0, &wdt_is_active);
+       return 0;
+}
+
+static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
+                                unsigned int timeout)
+{
+       if (timeout > MAX_WDT_TIMEOUT)
+               return -EINVAL;
+       booke_wdt_period = sec_to_period(timeout);
+       wdt_dev->timeout = timeout;
+       booke_wdt_set();
 
        return 0;
 }
 
-static const struct file_operations booke_wdt_fops = {
+static struct watchdog_info booke_wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = "PowerPC Book-E Watchdog",
+};
+
+static struct watchdog_ops booke_wdt_ops = {
        .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .write = booke_wdt_write,
-       .unlocked_ioctl = booke_wdt_ioctl,
-       .open = booke_wdt_open,
-       .release = booke_wdt_release,
+       .start = booke_wdt_start,
+       .stop = booke_wdt_stop,
+       .ping = booke_wdt_ping,
+       .set_timeout = booke_wdt_set_timeout,
 };
 
-static struct miscdevice booke_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &booke_wdt_fops,
+static struct watchdog_device booke_wdt_dev = {
+       .info = &booke_wdt_info,
+       .ops = &booke_wdt_ops,
+       .min_timeout = 1,
+       .max_timeout = 0xFFFF
 };
 
 static void __exit booke_wdt_exit(void)
 {
-       misc_deregister(&booke_wdt_miscdev);
+       watchdog_unregister_device(&booke_wdt_dev);
 }
 
 static int __init booke_wdt_init(void)
 {
        int ret = 0;
+       bool nowayout = WATCHDOG_NOWAYOUT;
 
        pr_info("powerpc book-e watchdog driver loaded\n");
-       ident.firmware_version = cur_cpu_spec->pvr_value;
-
-       ret = misc_register(&booke_wdt_miscdev);
-       if (ret) {
-               pr_err("cannot register device (minor=%u, ret=%i)\n",
-                      WATCHDOG_MINOR, ret);
-               return ret;
-       }
-
-       spin_lock(&booke_wdt_lock);
-       if (booke_wdt_enabled == 1) {
-               pr_info("watchdog enabled (timeout = %llu sec)\n",
-                       period_to_sec(booke_wdt_period));
-               on_each_cpu(__booke_wdt_enable, NULL, 0);
-       }
-       spin_unlock(&booke_wdt_lock);
+       booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value;
+       booke_wdt_set_timeout(&booke_wdt_dev,
+                             period_to_sec(CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT));
+       watchdog_set_nowayout(&booke_wdt_dev, nowayout);
+       if (booke_wdt_enabled)
+               __booke_wdt_start(&booke_wdt_dev);
+
+       ret = watchdog_register_device(&booke_wdt_dev);
 
        return ret;
 }
index 11d55ce..7038758 100644 (file)
@@ -411,7 +411,7 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                .identity               = DRIVER_NAME,
        };
        void __user *argp = (void __user *)arg;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int index = iminor(inode) - WD0_MINOR;
        struct cpwd *p = cpwd_device;
        int setopt = 0;
@@ -499,7 +499,7 @@ static long cpwd_compat_ioctl(struct file *file, unsigned int cmd,
 static ssize_t cpwd_write(struct file *file, const char __user *buf,
                          size_t count, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct cpwd *p = cpwd_device;
        int index = iminor(inode);
 
index e8e8724..7df1fdc 100644 (file)
@@ -69,7 +69,6 @@ static unsigned long wdt_status;
 #define WDT_REGION_INITED 2
 #define WDT_DEVICE_INITED 3
 
-static struct resource *wdt_mem;
 static void __iomem    *wdt_base;
 struct clk             *wdt_clk;
 
@@ -201,10 +200,11 @@ static struct miscdevice davinci_wdt_miscdev = {
 
 static int davinci_wdt_probe(struct platform_device *pdev)
 {
-       int ret = 0, size;
+       int ret = 0;
        struct device *dev = &pdev->dev;
+       struct resource  *wdt_mem;
 
-       wdt_clk = clk_get(dev, NULL);
+       wdt_clk = devm_clk_get(dev, NULL);
        if (WARN_ON(IS_ERR(wdt_clk)))
                return PTR_ERR(wdt_clk);
 
@@ -221,43 +221,26 @@ static int davinci_wdt_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       size = resource_size(wdt_mem);
-       if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
-               dev_err(dev, "failed to get memory region\n");
-               return -ENOENT;
-       }
-
-       wdt_base = ioremap(wdt_mem->start, size);
+       wdt_base = devm_request_and_ioremap(dev, wdt_mem);
        if (!wdt_base) {
-               dev_err(dev, "failed to map memory region\n");
-               release_mem_region(wdt_mem->start, size);
-               wdt_mem = NULL;
-               return -ENOMEM;
+               dev_err(dev, "ioremap failed\n");
+               return -EADDRNOTAVAIL;
        }
 
        ret = misc_register(&davinci_wdt_miscdev);
        if (ret < 0) {
                dev_err(dev, "cannot register misc device\n");
-               release_mem_region(wdt_mem->start, size);
-               wdt_mem = NULL;
        } else {
                set_bit(WDT_DEVICE_INITED, &wdt_status);
        }
 
-       iounmap(wdt_base);
        return ret;
 }
 
 static int davinci_wdt_remove(struct platform_device *pdev)
 {
        misc_deregister(&davinci_wdt_miscdev);
-       if (wdt_mem) {
-               release_mem_region(wdt_mem->start, resource_size(wdt_mem));
-               wdt_mem = NULL;
-       }
-
        clk_disable_unprepare(wdt_clk);
-       clk_put(wdt_clk);
 
        return 0;
 }
index b9c5b58..257cfba 100644 (file)
@@ -310,6 +310,7 @@ static struct platform_driver gef_wdt_driver = {
                .of_match_table = gef_wdt_ids,
        },
        .probe          = gef_wdt_probe,
+       .remove         = gef_wdt_remove,
 };
 
 static int __init gef_wdt_init(void)
index b0e541d..af88ffd 100644 (file)
 
 #include "omap_wdt.h"
 
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+       "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
 static unsigned timer_margin;
 module_param(timer_margin, uint, 0);
 MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
@@ -201,7 +206,6 @@ static const struct watchdog_ops omap_wdt_ops = {
 static int omap_wdt_probe(struct platform_device *pdev)
 {
        struct omap_wd_timer_platform_data *pdata = pdev->dev.platform_data;
-       bool nowayout = WATCHDOG_NOWAYOUT;
        struct watchdog_device *omap_wdt;
        struct resource *res, *mem;
        struct omap_wdt_dev *wdev;
index 7c18b3b..da57798 100644 (file)
@@ -140,6 +140,7 @@ static const struct watchdog_ops orion_wdt_ops = {
 static struct watchdog_device orion_wdt = {
        .info = &orion_wdt_info,
        .ops = &orion_wdt_ops,
+       .min_timeout = 1,
 };
 
 static int orion_wdt_probe(struct platform_device *pdev)
@@ -164,12 +165,9 @@ static int orion_wdt_probe(struct platform_device *pdev)
 
        wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
 
-       if ((heartbeat < 1) || (heartbeat > wdt_max_duration))
-               heartbeat = wdt_max_duration;
-
-       orion_wdt.timeout = heartbeat;
-       orion_wdt.min_timeout = 1;
+       orion_wdt.timeout = wdt_max_duration;
        orion_wdt.max_timeout = wdt_max_duration;
+       watchdog_init_timeout(&orion_wdt, heartbeat, &pdev->dev);
 
        watchdog_set_nowayout(&orion_wdt, nowayout);
        ret = watchdog_register_device(&orion_wdt);
@@ -179,7 +177,7 @@ static int orion_wdt_probe(struct platform_device *pdev)
        }
 
        pr_info("Initial timeout %d sec%s\n",
-               heartbeat, nowayout ? ", nowayout" : "");
+               orion_wdt.timeout, nowayout ? ", nowayout" : "");
        return 0;
 }
 
@@ -225,4 +223,5 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion_wdt");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index de1f3fa..a3684a3 100644 (file)
@@ -142,6 +142,7 @@ static const struct watchdog_ops pnx4008_wdt_ops = {
 static struct watchdog_device pnx4008_wdd = {
        .info = &pnx4008_wdt_ident,
        .ops = &pnx4008_wdt_ops,
+       .timeout = DEFAULT_HEARTBEAT,
        .min_timeout = 1,
        .max_timeout = MAX_HEARTBEAT,
 };
@@ -151,8 +152,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
        struct resource *r;
        int ret = 0;
 
-       if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
-               heartbeat = DEFAULT_HEARTBEAT;
+       watchdog_init_timeout(&pnx4008_wdd, heartbeat, &pdev->dev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        wdt_base = devm_ioremap_resource(&pdev->dev, r);
@@ -167,7 +167,6 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
        if (ret)
                goto out;
 
-       pnx4008_wdd.timeout = heartbeat;
        pnx4008_wdd.bootstatus = (readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
                        WDIOF_CARDRESET : 0;
        watchdog_set_nowayout(&pnx4008_wdd, nowayout);
@@ -181,7 +180,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
        }
 
        dev_info(&pdev->dev, "PNX4008 Watchdog Timer: heartbeat %d sec\n",
-                       heartbeat);
+                pnx4008_wdd.timeout);
 
        return 0;
 
diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c
new file mode 100644 (file)
index 0000000..f53615d
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Retu watchdog driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Amit Kucheria and Michael Buesch.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/retu.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+
+/* Watchdog timer values in seconds */
+#define RETU_WDT_MAX_TIMER     63
+
+struct retu_wdt_dev {
+       struct retu_dev         *rdev;
+       struct device           *dev;
+       struct delayed_work     ping_work;
+};
+
+/*
+ * Since Retu watchdog cannot be disabled in hardware, we must kick it
+ * with a timer until userspace watchdog software takes over. If
+ * CONFIG_WATCHDOG_NOWAYOUT is set, we never start the feeding.
+ */
+static void retu_wdt_ping_enable(struct retu_wdt_dev *wdev)
+{
+       retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
+       schedule_delayed_work(&wdev->ping_work,
+                       round_jiffies_relative(RETU_WDT_MAX_TIMER * HZ / 2));
+}
+
+static void retu_wdt_ping_disable(struct retu_wdt_dev *wdev)
+{
+       retu_write(wdev->rdev, RETU_REG_WATCHDOG, RETU_WDT_MAX_TIMER);
+       cancel_delayed_work_sync(&wdev->ping_work);
+}
+
+static void retu_wdt_ping_work(struct work_struct *work)
+{
+       struct retu_wdt_dev *wdev = container_of(to_delayed_work(work),
+                                               struct retu_wdt_dev, ping_work);
+       retu_wdt_ping_enable(wdev);
+}
+
+static int retu_wdt_start(struct watchdog_device *wdog)
+{
+       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+       retu_wdt_ping_disable(wdev);
+
+       return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static int retu_wdt_stop(struct watchdog_device *wdog)
+{
+       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+       retu_wdt_ping_enable(wdev);
+
+       return 0;
+}
+
+static int retu_wdt_ping(struct watchdog_device *wdog)
+{
+       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+       return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static int retu_wdt_set_timeout(struct watchdog_device *wdog,
+                               unsigned int timeout)
+{
+       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+       wdog->timeout = timeout;
+       return retu_write(wdev->rdev, RETU_REG_WATCHDOG, wdog->timeout);
+}
+
+static const struct watchdog_info retu_wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = "Retu watchdog",
+};
+
+static const struct watchdog_ops retu_wdt_ops = {
+       .owner          = THIS_MODULE,
+       .start          = retu_wdt_start,
+       .stop           = retu_wdt_stop,
+       .ping           = retu_wdt_ping,
+       .set_timeout    = retu_wdt_set_timeout,
+};
+
+static int retu_wdt_probe(struct platform_device *pdev)
+{
+       struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
+       bool nowayout = WATCHDOG_NOWAYOUT;
+       struct watchdog_device *retu_wdt;
+       struct retu_wdt_dev *wdev;
+       int ret;
+
+       retu_wdt = devm_kzalloc(&pdev->dev, sizeof(*retu_wdt), GFP_KERNEL);
+       if (!retu_wdt)
+               return -ENOMEM;
+
+       wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+       if (!wdev)
+               return -ENOMEM;
+
+       retu_wdt->info          = &retu_wdt_info;
+       retu_wdt->ops           = &retu_wdt_ops;
+       retu_wdt->timeout       = RETU_WDT_MAX_TIMER;
+       retu_wdt->min_timeout   = 0;
+       retu_wdt->max_timeout   = RETU_WDT_MAX_TIMER;
+
+       watchdog_set_drvdata(retu_wdt, wdev);
+       watchdog_set_nowayout(retu_wdt, nowayout);
+
+       wdev->rdev              = rdev;
+       wdev->dev               = &pdev->dev;
+
+       INIT_DELAYED_WORK(&wdev->ping_work, retu_wdt_ping_work);
+
+       ret = watchdog_register_device(retu_wdt);
+       if (ret < 0)
+               return ret;
+
+       if (nowayout)
+               retu_wdt_ping(retu_wdt);
+       else
+               retu_wdt_ping_enable(wdev);
+
+       platform_set_drvdata(pdev, retu_wdt);
+
+       return 0;
+}
+
+static int retu_wdt_remove(struct platform_device *pdev)
+{
+       struct watchdog_device *wdog = platform_get_drvdata(pdev);
+       struct retu_wdt_dev *wdev = watchdog_get_drvdata(wdog);
+
+       watchdog_unregister_device(wdog);
+       cancel_delayed_work_sync(&wdev->ping_work);
+
+       return 0;
+}
+
+static struct platform_driver retu_wdt_driver = {
+       .probe          = retu_wdt_probe,
+       .remove         = retu_wdt_remove,
+       .driver         = {
+               .name   = "retu-wdt",
+       },
+};
+module_platform_driver(retu_wdt_driver);
+
+MODULE_ALIAS("platform:retu-wdt");
+MODULE_DESCRIPTION("Retu watchdog");
+MODULE_AUTHOR("Amit Kucheria");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
index 27bcd4e..c1a221c 100644 (file)
@@ -53,7 +53,7 @@
 #define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME   (15)
 
 static bool nowayout   = WATCHDOG_NOWAYOUT;
-static int tmr_margin  = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME;
+static int tmr_margin;
 static int tmr_atboot  = CONFIG_S3C2410_WATCHDOG_ATBOOT;
 static int soft_noboot;
 static int debug;
@@ -226,6 +226,7 @@ static struct watchdog_ops s3c2410wdt_ops = {
 static struct watchdog_device s3c2410_wdd = {
        .info = &s3c2410_wdt_ident,
        .ops = &s3c2410wdt_ops,
+       .timeout = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME,
 };
 
 /* interrupt handler code */
@@ -309,7 +310,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        unsigned int wtcon;
        int started = 0;
        int ret;
-       int size;
 
        DBG("%s: probe=%p\n", __func__, pdev);
 
@@ -330,28 +330,20 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        }
 
        /* get the memory region for the watchdog timer */
-
-       size = resource_size(wdt_mem);
-       if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
-               dev_err(dev, "failed to get memory region\n");
-               ret = -EBUSY;
-               goto err;
-       }
-
-       wdt_base = ioremap(wdt_mem->start, size);
+       wdt_base = devm_request_and_ioremap(dev, wdt_mem);
        if (wdt_base == NULL) {
-               dev_err(dev, "failed to ioremap() region\n");
-               ret = -EINVAL;
-               goto err_req;
+               dev_err(dev, "failed to devm_request_and_ioremap() region\n");
+               ret = -ENOMEM;
+               goto err;
        }
 
        DBG("probe: mapped wdt_base=%p\n", wdt_base);
 
-       wdt_clock = clk_get(&pdev->dev, "watchdog");
+       wdt_clock = devm_clk_get(dev, "watchdog");
        if (IS_ERR(wdt_clock)) {
                dev_err(dev, "failed to find watchdog clock source\n");
                ret = PTR_ERR(wdt_clock);
-               goto err_map;
+               goto err;
        }
 
        clk_prepare_enable(wdt_clock);
@@ -365,7 +357,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        /* see if we can actually set the requested timer margin, and if
         * not, try the default value */
 
-       if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, tmr_margin)) {
+       watchdog_init_timeout(&s3c2410_wdd, tmr_margin,  &pdev->dev);
+       if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout)) {
                started = s3c2410wdt_set_heartbeat(&s3c2410_wdd,
                                        CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
 
@@ -378,7 +371,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
                                                        "cannot start\n");
        }
 
-       ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
+       ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0,
+                               pdev->name, pdev);
        if (ret != 0) {
                dev_err(dev, "failed to install irq (%d)\n", ret);
                goto err_cpufreq;
@@ -389,7 +383,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
        ret = watchdog_register_device(&s3c2410_wdd);
        if (ret) {
                dev_err(dev, "cannot register watchdog (%d)\n", ret);
-               goto err_irq;
+               goto err_cpufreq;
        }
 
        if (tmr_atboot && started == 0) {
@@ -414,23 +408,13 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
 
        return 0;
 
- err_irq:
-       free_irq(wdt_irq->start, pdev);
-
  err_cpufreq:
        s3c2410wdt_cpufreq_deregister();
 
  err_clk:
        clk_disable_unprepare(wdt_clock);
-       clk_put(wdt_clock);
        wdt_clock = NULL;
 
- err_map:
-       iounmap(wdt_base);
-
- err_req:
-       release_mem_region(wdt_mem->start, size);
-
  err:
        wdt_irq = NULL;
        wdt_mem = NULL;
@@ -441,17 +425,11 @@ static int s3c2410wdt_remove(struct platform_device *dev)
 {
        watchdog_unregister_device(&s3c2410_wdd);
 
-       free_irq(wdt_irq->start, dev);
-
        s3c2410wdt_cpufreq_deregister();
 
        clk_disable_unprepare(wdt_clock);
-       clk_put(wdt_clock);
        wdt_clock = NULL;
 
-       iounmap(wdt_base);
-
-       release_mem_region(wdt_mem->start, resource_size(wdt_mem));
        wdt_irq = NULL;
        wdt_mem = NULL;
        return 0;
index 2b0e000..e3b8f75 100644 (file)
@@ -361,7 +361,7 @@ static unsigned char sp5100_tco_setupdevice(void)
 {
        struct pci_dev *dev = NULL;
        const char *dev_name = NULL;
-       u32 val;
+       u32 val, tmp_val;
        u32 index_reg, data_reg, base_addr;
 
        /* Match the PCI device */
@@ -497,30 +497,19 @@ static unsigned char sp5100_tco_setupdevice(void)
                pr_debug("Got 0x%04x from resource tree\n", val);
        }
 
-       /* Restore to the low three bits, if chipset is SB8x0(or later) */
-       if (sp5100_tco_pci->revision >= 0x40) {
-               u8 reserved_bit;
-               reserved_bit = inb(base_addr) & 0x7;
-               val |= (u32)reserved_bit;
-       }
+       /* Restore to the low three bits */
+       outb(base_addr+0, index_reg);
+       tmp_val = val | (inb(data_reg) & 0x7);
 
        /* Re-programming the watchdog timer base address */
        outb(base_addr+0, index_reg);
-       /* Low three bits of BASE are reserved */
-       outb((val >>  0) & 0xf8, data_reg);
+       outb((tmp_val >>  0) & 0xff, data_reg);
        outb(base_addr+1, index_reg);
-       outb((val >>  8) & 0xff, data_reg);
+       outb((tmp_val >>  8) & 0xff, data_reg);
        outb(base_addr+2, index_reg);
-       outb((val >> 16) & 0xff, data_reg);
+       outb((tmp_val >> 16) & 0xff, data_reg);
        outb(base_addr+3, index_reg);
-       outb((val >> 24) & 0xff, data_reg);
-
-       /*
-        * Clear unnecessary the low three bits,
-        * if chipset is SB8x0(or later)
-        */
-       if (sp5100_tco_pci->revision >= 0x40)
-               val &= ~0x7;
+       outb((tmp_val >> 24) & 0xff, data_reg);
 
        if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
                                                                   dev_name)) {
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
new file mode 100644 (file)
index 0000000..c97e98d
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
+ *
+ * Author: Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * Copyright (C) 2011-12 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/stmp3xxx_rtc_wdt.h>
+
+#define WDOG_TICK_RATE 1000 /* 1 kHz clock */
+#define STMP3XXX_DEFAULT_TIMEOUT 19
+#define STMP3XXX_MAX_TIMEOUT (UINT_MAX / WDOG_TICK_RATE)
+
+static int heartbeat = STMP3XXX_DEFAULT_TIMEOUT;
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat period in seconds from 1 to "
+                __MODULE_STRING(STMP3XXX_MAX_TIMEOUT) ", default "
+                __MODULE_STRING(STMP3XXX_DEFAULT_TIMEOUT));
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+       struct device *dev = watchdog_get_drvdata(wdd);
+       struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+
+       pdata->wdt_set_timeout(dev->parent, wdd->timeout * WDOG_TICK_RATE);
+       return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+       struct device *dev = watchdog_get_drvdata(wdd);
+       struct stmp3xxx_wdt_pdata *pdata = dev->platform_data;
+
+       pdata->wdt_set_timeout(dev->parent, 0);
+       return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd, unsigned new_timeout)
+{
+       wdd->timeout = new_timeout;
+       return wdt_start(wdd);
+}
+
+static const struct watchdog_info stmp3xxx_wdt_ident = {
+       .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity = "STMP3XXX RTC Watchdog",
+};
+
+static const struct watchdog_ops stmp3xxx_wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = wdt_start,
+       .stop = wdt_stop,
+       .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_device stmp3xxx_wdd = {
+       .info = &stmp3xxx_wdt_ident,
+       .ops = &stmp3xxx_wdt_ops,
+       .min_timeout = 1,
+       .max_timeout = STMP3XXX_MAX_TIMEOUT,
+       .status = WATCHDOG_NOWAYOUT_INIT_STATUS,
+};
+
+static int stmp3xxx_wdt_probe(struct platform_device *pdev)
+{
+       int ret;
+
+       watchdog_set_drvdata(&stmp3xxx_wdd, &pdev->dev);
+
+       stmp3xxx_wdd.timeout = clamp_t(unsigned, heartbeat, 1, STMP3XXX_MAX_TIMEOUT);
+
+       ret = watchdog_register_device(&stmp3xxx_wdd);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot register watchdog device\n");
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "initialized watchdog with heartbeat %ds\n",
+                       stmp3xxx_wdd.timeout);
+       return 0;
+}
+
+static int stmp3xxx_wdt_remove(struct platform_device *pdev)
+{
+       watchdog_unregister_device(&stmp3xxx_wdd);
+       return 0;
+}
+
+static struct platform_driver stmp3xxx_wdt_driver = {
+       .driver = {
+               .name = "stmp3xxx_rtc_wdt",
+       },
+       .probe = stmp3xxx_wdt_probe,
+       .remove = stmp3xxx_wdt_remove,
+};
+module_platform_driver(stmp3xxx_wdt_driver);
+
+MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
deleted file mode 100644 (file)
index 1f4f697..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Watchdog driver for Freescale STMP37XX/STMP378X
- *
- * Author: Vitaly Wool <vital@embeddedalley.com>
- *
- * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-
-#include <mach/platform.h>
-#include <mach/regs-rtc.h>
-
-#define DEFAULT_HEARTBEAT      19
-#define MAX_HEARTBEAT          (0x10000000 >> 6)
-
-/* missing bitmask in headers */
-#define BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER     0x80000000
-
-#define WDT_IN_USE             0
-#define WDT_OK_TO_CLOSE                1
-
-#define WDOG_COUNTER_RATE      1000 /* 1 kHz clock */
-
-static DEFINE_SPINLOCK(stmp3xxx_wdt_io_lock);
-static unsigned long wdt_status;
-static const bool nowayout = WATCHDOG_NOWAYOUT;
-static int heartbeat = DEFAULT_HEARTBEAT;
-static unsigned long boot_status;
-
-static void wdt_enable(u32 value)
-{
-       spin_lock(&stmp3xxx_wdt_io_lock);
-       __raw_writel(value, REGS_RTC_BASE + HW_RTC_WATCHDOG);
-       stmp3xxx_setl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
-       stmp3xxx_setl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
-                       REGS_RTC_BASE + HW_RTC_PERSISTENT1);
-       spin_unlock(&stmp3xxx_wdt_io_lock);
-}
-
-static void wdt_disable(void)
-{
-       spin_lock(&stmp3xxx_wdt_io_lock);
-       stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
-                       REGS_RTC_BASE + HW_RTC_PERSISTENT1);
-       stmp3xxx_clearl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
-       spin_unlock(&stmp3xxx_wdt_io_lock);
-}
-
-static void wdt_ping(void)
-{
-       wdt_enable(heartbeat * WDOG_COUNTER_RATE);
-}
-
-static int stmp3xxx_wdt_open(struct inode *inode, struct file *file)
-{
-       if (test_and_set_bit(WDT_IN_USE, &wdt_status))
-               return -EBUSY;
-
-       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-       wdt_ping();
-
-       return nonseekable_open(inode, file);
-}
-
-static ssize_t stmp3xxx_wdt_write(struct file *file, const char __user *data,
-       size_t len, loff_t *ppos)
-{
-       if (len) {
-               if (!nowayout) {
-                       size_t i;
-
-                       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
-                       for (i = 0; i != len; i++) {
-                               char c;
-
-                               if (get_user(c, data + i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       set_bit(WDT_OK_TO_CLOSE, &wdt_status);
-                       }
-               }
-               wdt_ping();
-       }
-
-       return len;
-}
-
-static const struct watchdog_info ident = {
-       .options        = WDIOF_CARDRESET |
-                         WDIOF_MAGICCLOSE |
-                         WDIOF_SETTIMEOUT |
-                         WDIOF_KEEPALIVEPING,
-       .identity       = "STMP3XXX Watchdog",
-};
-
-static long stmp3xxx_wdt_ioctl(struct file *file, unsigned int cmd,
-       unsigned long arg)
-{
-       void __user *argp = (void __user *)arg;
-       int __user *p = argp;
-       int new_heartbeat, opts;
-       int ret = -ENOTTY;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
-               break;
-
-       case WDIOC_GETSTATUS:
-               ret = put_user(0, p);
-               break;
-
-       case WDIOC_GETBOOTSTATUS:
-               ret = put_user(boot_status, p);
-               break;
-
-       case WDIOC_SETOPTIONS:
-               if (get_user(opts, p)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               if (opts & WDIOS_DISABLECARD)
-                       wdt_disable();
-               else if (opts & WDIOS_ENABLECARD)
-                       wdt_ping();
-               else {
-                       pr_debug("%s: unknown option 0x%x\n", __func__, opts);
-                       ret = -EINVAL;
-                       break;
-               }
-               ret = 0;
-               break;
-
-       case WDIOC_KEEPALIVE:
-               wdt_ping();
-               ret = 0;
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               if (get_user(new_heartbeat, p)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               if (new_heartbeat <= 0 || new_heartbeat > MAX_HEARTBEAT) {
-                       ret = -EINVAL;
-                       break;
-               }
-
-               heartbeat = new_heartbeat;
-               wdt_ping();
-               /* Fall through */
-
-       case WDIOC_GETTIMEOUT:
-               ret = put_user(heartbeat, p);
-               break;
-       }
-       return ret;
-}
-
-static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
-{
-       int ret = 0;
-
-       if (!nowayout) {
-               if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
-                       wdt_ping();
-                       pr_debug("%s: Device closed unexpectedly\n", __func__);
-                       ret = -EINVAL;
-               } else {
-                       wdt_disable();
-                       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-               }
-       }
-       clear_bit(WDT_IN_USE, &wdt_status);
-
-       return ret;
-}
-
-static const struct file_operations stmp3xxx_wdt_fops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .write = stmp3xxx_wdt_write,
-       .unlocked_ioctl = stmp3xxx_wdt_ioctl,
-       .open = stmp3xxx_wdt_open,
-       .release = stmp3xxx_wdt_release,
-};
-
-static struct miscdevice stmp3xxx_wdt_miscdev = {
-       .minor = WATCHDOG_MINOR,
-       .name = "watchdog",
-       .fops = &stmp3xxx_wdt_fops,
-};
-
-static int stmp3xxx_wdt_probe(struct platform_device *pdev)
-{
-       int ret = 0;
-
-       if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
-               heartbeat = DEFAULT_HEARTBEAT;
-
-       boot_status = __raw_readl(REGS_RTC_BASE + HW_RTC_PERSISTENT1) &
-                       BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER;
-       boot_status = !!boot_status;
-       stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
-                       REGS_RTC_BASE + HW_RTC_PERSISTENT1);
-       wdt_disable();          /* disable for now */
-
-       ret = misc_register(&stmp3xxx_wdt_miscdev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "cannot register misc device\n");
-               return ret;
-       }
-
-       pr_info("initialized, heartbeat %d sec\n", heartbeat);
-
-       return ret;
-}
-
-static int stmp3xxx_wdt_remove(struct platform_device *pdev)
-{
-       misc_deregister(&stmp3xxx_wdt_miscdev);
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int wdt_suspended;
-static u32 wdt_saved_time;
-
-static int stmp3xxx_wdt_suspend(struct platform_device *pdev,
-                               pm_message_t state)
-{
-       if (__raw_readl(REGS_RTC_BASE + HW_RTC_CTRL) &
-               BM_RTC_CTRL_WATCHDOGEN) {
-               wdt_suspended = 1;
-               wdt_saved_time = __raw_readl(REGS_RTC_BASE + HW_RTC_WATCHDOG);
-               wdt_disable();
-       }
-       return 0;
-}
-
-static int stmp3xxx_wdt_resume(struct platform_device *pdev)
-{
-       if (wdt_suspended) {
-               wdt_enable(wdt_saved_time);
-               wdt_suspended = 0;
-       }
-       return 0;
-}
-#else
-#define stmp3xxx_wdt_suspend   NULL
-#define stmp3xxx_wdt_resume    NULL
-#endif
-
-static struct platform_driver platform_wdt_driver = {
-       .driver = {
-               .name = "stmp3xxx_wdt",
-       },
-       .probe = stmp3xxx_wdt_probe,
-       .remove = stmp3xxx_wdt_remove,
-       .suspend = stmp3xxx_wdt_suspend,
-       .resume = stmp3xxx_wdt_resume,
-};
-
-module_platform_driver(platform_wdt_driver);
-
-MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
-MODULE_LICENSE("GPL");
-
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat,
-                "Watchdog heartbeat period in seconds from 1 to "
-                __MODULE_STRING(MAX_HEARTBEAT) ", default "
-                __MODULE_STRING(DEFAULT_HEARTBEAT));
-
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
index 3796434..05d18b4 100644 (file)
 #include <linux/init.h>                /* For __init/__exit/... */
 #include <linux/idr.h>         /* For ida_* macros */
 #include <linux/err.h>         /* For IS_ERR macros */
+#include <linux/of.h>          /* For of_get_timeout_sec */
 
 #include "watchdog_core.h"     /* For watchdog_dev_register/... */
 
 static DEFINE_IDA(watchdog_ida);
 static struct class *watchdog_class;
 
+static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
+{
+       /*
+        * Check that we have valid min and max timeout values, if
+        * not reset them both to 0 (=not used or unknown)
+        */
+       if (wdd->min_timeout > wdd->max_timeout) {
+               pr_info("Invalid min and max timeout values, resetting to 0!\n");
+               wdd->min_timeout = 0;
+               wdd->max_timeout = 0;
+       }
+}
+
+/**
+ * watchdog_init_timeout() - initialize the timeout field
+ * @timeout_parm: timeout module parameter
+ * @dev: Device that stores the timeout-sec property
+ *
+ * Initialize the timeout field of the watchdog_device struct with either the
+ * timeout module parameter (if it is valid value) or the timeout-sec property
+ * (only if it is a valid value and the timeout_parm is out of bounds).
+ * If none of them are valid then we keep the old value (which should normally
+ * be the default timeout value.
+ *
+ * A zero is returned on success and -EINVAL for failure.
+ */
+int watchdog_init_timeout(struct watchdog_device *wdd,
+                               unsigned int timeout_parm, struct device *dev)
+{
+       unsigned int t = 0;
+       int ret = 0;
+
+       watchdog_check_min_max_timeout(wdd);
+
+       /* try to get the tiemout module parameter first */
+       if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
+               wdd->timeout = timeout_parm;
+               return ret;
+       }
+       if (timeout_parm)
+               ret = -EINVAL;
+
+       /* try to get the timeout_sec property */
+       if (dev == NULL || dev->of_node == NULL)
+               return ret;
+       of_property_read_u32(dev->of_node, "timeout-sec", &t);
+       if (!watchdog_timeout_invalid(wdd, t))
+               wdd->timeout = t;
+       else
+               ret = -EINVAL;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(watchdog_init_timeout);
+
 /**
  * watchdog_register_device() - register a watchdog device
  * @wdd: watchdog device
@@ -63,15 +119,7 @@ int watchdog_register_device(struct watchdog_device *wdd)
        if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
                return -EINVAL;
 
-       /*
-        * Check that we have valid min and max timeout values, if
-        * not reset them both to 0 (=not used or unknown)
-        */
-       if (wdd->min_timeout > wdd->max_timeout) {
-               pr_info("Invalid min and max timeout values, resetting to 0!\n");
-               wdd->min_timeout = 0;
-               wdd->max_timeout = 0;
-       }
+       watchdog_check_min_max_timeout(wdd);
 
        /*
         * Note: now that all watchdog_device data has been verified, we
index ef8edec..08b48bb 100644 (file)
@@ -200,8 +200,7 @@ static int watchdog_set_timeout(struct watchdog_device *wddev,
            !(wddev->info->options & WDIOF_SETTIMEOUT))
                return -EOPNOTSUPP;
 
-       if ((wddev->max_timeout != 0) &&
-           (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+       if (watchdog_timeout_invalid(wddev, timeout))
                return -EINVAL;
 
        mutex_lock(&wddev->lock);
index 7578279..18c742b 100644 (file)
@@ -239,24 +239,6 @@ static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr)
        return AE_OK;
 }
 
-static
-int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
-{
-       acpi_handle phandle;
-       struct acpi_device *pdev;
-
-       if (acpi_get_parent(handle, &phandle))
-               return -ENODEV;
-
-       if (acpi_bus_get_device(phandle, &pdev))
-               return -ENODEV;
-
-       if (acpi_bus_scan(handle))
-               return -ENODEV;
-
-       return 0;
-}
-
 static int acpi_processor_device_remove(struct acpi_device *device)
 {
        pr_debug(PREFIX "Xen does not support CPU hotremove\n");
@@ -272,6 +254,8 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
        u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
        int result;
 
+       acpi_scan_lock_acquire();
+
        switch (event) {
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
@@ -286,12 +270,16 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
                if (!acpi_bus_get_device(handle, &device))
                        break;
 
-               result = acpi_processor_device_add(handle, &device);
+               result = acpi_bus_scan(handle);
                if (result) {
                        pr_err(PREFIX "Unable to add the device\n");
                        break;
                }
-
+               result = acpi_bus_get_device(handle, &device);
+               if (result) {
+                       pr_err(PREFIX "Missing device object\n");
+                       break;
+               }
                ost_code = ACPI_OST_SC_SUCCESS;
                break;
 
@@ -321,11 +309,13 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
                                  "Unsupported event [0x%x]\n", event));
 
                /* non-hotplug event; possibly handled by other handler */
-               return;
+               goto out;
        }
 
        (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
-       return;
+
+out:
+       acpi_scan_lock_release();
 }
 
 static acpi_status is_processor_device(acpi_handle handle)
index 853b12d..faef5b3 100644 (file)
@@ -158,31 +158,17 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
        return 0;
 }
 
-static int
-acpi_memory_get_device(acpi_handle handle,
-                      struct acpi_memory_device **mem_device)
+static int acpi_memory_get_device(acpi_handle handle,
+                                 struct acpi_memory_device **mem_device)
 {
-       acpi_status status;
-       acpi_handle phandle;
        struct acpi_device *device = NULL;
-       struct acpi_device *pdevice = NULL;
-       int result;
+       int result = 0;
 
-       if (!acpi_bus_get_device(handle, &device) && device)
-               goto end;
+       acpi_scan_lock_acquire();
 
-       status = acpi_get_parent(handle, &phandle);
-       if (ACPI_FAILURE(status)) {
-               pr_warn(PREFIX "Cannot find acpi parent\n");
-               return -EINVAL;
-       }
-
-       /* Get the parent device */
-       result = acpi_bus_get_device(phandle, &pdevice);
-       if (result) {
-               pr_warn(PREFIX "Cannot get acpi bus device\n");
-               return -EINVAL;
-       }
+       acpi_bus_get_device(handle, &device);
+       if (device)
+               goto end;
 
        /*
         * Now add the notified device.  This creates the acpi_device
@@ -190,18 +176,28 @@ acpi_memory_get_device(acpi_handle handle,
         */
        result = acpi_bus_scan(handle);
        if (result) {
-               pr_warn(PREFIX "Cannot add acpi bus\n");
-               return -EINVAL;
+               pr_warn(PREFIX "ACPI namespace scan failed\n");
+               result = -EINVAL;
+               goto out;
+       }
+       result = acpi_bus_get_device(handle, &device);
+       if (result) {
+               pr_warn(PREFIX "Missing device object\n");
+               result = -EINVAL;
+               goto out;
        }
 
 end:
        *mem_device = acpi_driver_data(device);
        if (!(*mem_device)) {
-               pr_err(PREFIX "Driver data not found\n");
-               return -ENODEV;
+               pr_err(PREFIX "driver data not found\n");
+               result = -ENODEV;
+               goto out;
        }
 
-       return 0;
+out:
+       acpi_scan_lock_release();
+       return result;
 }
 
 static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
@@ -259,12 +255,15 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                        "\nReceived EJECT REQUEST notification for device\n"));
 
+               acpi_scan_lock_acquire();
                if (acpi_bus_get_device(handle, &device)) {
+                       acpi_scan_lock_release();
                        pr_err(PREFIX "Device doesn't exist\n");
                        break;
                }
                mem_device = acpi_driver_data(device);
                if (!mem_device) {
+                       acpi_scan_lock_release();
                        pr_err(PREFIX "Driver Data is NULL\n");
                        break;
                }
@@ -274,6 +273,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
                 * acpi_bus_remove if Xen support hotremove in the future
                 */
                acpi_memory_disable_device(mem_device);
+               acpi_scan_lock_release();
                break;
 
        default:
index bcf3ba4..61786be 100644 (file)
@@ -30,6 +30,7 @@
  * IN THE SOFTWARE.
  */
 
+#include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/spinlock.h>
index 459b9ac..ec0abb6 100644 (file)
 MODULE_DESCRIPTION("Xen filesystem");
 MODULE_LICENSE("GPL");
 
-static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
-{
-       struct inode *ret = new_inode(sb);
-
-       if (ret) {
-               ret->i_mode = mode;
-               ret->i_uid = GLOBAL_ROOT_UID;
-               ret->i_gid = GLOBAL_ROOT_GID;
-               ret->i_blocks = 0;
-               ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
-       }
-       return ret;
-}
-
-static struct dentry *xenfs_create_file(struct super_block *sb,
-                                       struct dentry *parent,
-                                       const char *name,
-                                       const struct file_operations *fops,
-                                       void *data,
-                                       int mode)
-{
-       struct dentry *dentry;
-       struct inode *inode;
-
-       dentry = d_alloc_name(parent, name);
-       if (!dentry)
-               return NULL;
-
-       inode = xenfs_make_inode(sb, S_IFREG | mode);
-       if (!inode) {
-               dput(dentry);
-               return NULL;
-       }
-
-       inode->i_fop = fops;
-       inode->i_private = data;
-
-       d_add(dentry, inode);
-       return dentry;
-}
-
 static ssize_t capabilities_read(struct file *file, char __user *buf,
                                 size_t size, loff_t *off)
 {
@@ -84,26 +43,23 @@ static const struct file_operations capabilities_file_ops = {
 static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
 {
        static struct tree_descr xenfs_files[] = {
-               [1] = {},
-               { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+               [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
                { "capabilities", &capabilities_file_ops, S_IRUGO },
                { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
                {""},
        };
-       int rc;
 
-       rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
-       if (rc < 0)
-               return rc;
-
-       if (xen_initial_domain()) {
-               xenfs_create_file(sb, sb->s_root, "xsd_kva",
-                                 &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR);
-               xenfs_create_file(sb, sb->s_root, "xsd_port",
-                                 &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR);
-       }
+       static struct tree_descr xenfs_init_files[] = {
+               [2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
+               { "capabilities", &capabilities_file_ops, S_IRUGO },
+               { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
+               { "xsd_kva", &xsd_kva_file_ops, S_IRUSR|S_IWUSR},
+               { "xsd_port", &xsd_port_file_ops, S_IRUSR|S_IWUSR},
+               {""},
+       };
 
-       return rc;
+       return simple_fill_super(sb, XENFS_SUPER_MAGIC,
+                       xen_initial_domain() ? xenfs_init_files : xenfs_files);
 }
 
 static struct dentry *xenfs_mount(struct file_system_type *fs_type,
index 988880d..73b3383 100644 (file)
@@ -22,7 +22,7 @@ static loff_t
 proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
 {
        loff_t new = -1;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        mutex_lock(&inode->i_mutex);
        switch (whence) {
@@ -47,7 +47,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
 static ssize_t
 proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
-       struct inode *ino = file->f_path.dentry->d_inode;
+       struct inode *ino = file_inode(file);
        struct proc_dir_entry *dp = PDE(ino);
        struct zorro_dev *z = dp->data;
        struct ConfigDev cd;
index 15b6791..7af425f 100644 (file)
@@ -23,6 +23,7 @@
 #include "acl.h"
 #include "v9fs.h"
 #include "v9fs_vfs.h"
+#include "fid.h"
 
 static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
 {
@@ -113,16 +114,12 @@ struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type)
 
 }
 
-static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
+static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl)
 {
        int retval;
        char *name;
        size_t size;
        void *buffer;
-       struct inode *inode = dentry->d_inode;
-
-       set_cached_acl(inode, type, acl);
-
        if (!acl)
                return 0;
 
@@ -144,17 +141,16 @@ static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
        default:
                BUG();
        }
-       retval = v9fs_xattr_set(dentry, name, buffer, size, 0);
+       retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0);
 err_free_out:
        kfree(buffer);
        return retval;
 }
 
-int v9fs_acl_chmod(struct dentry *dentry)
+int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid)
 {
        int retval = 0;
        struct posix_acl *acl;
-       struct inode *inode = dentry->d_inode;
 
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
@@ -163,25 +159,30 @@ int v9fs_acl_chmod(struct dentry *dentry)
                retval = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
                if (retval)
                        return retval;
-               retval = v9fs_set_acl(dentry, ACL_TYPE_ACCESS, acl);
+               set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+               retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
                posix_acl_release(acl);
        }
        return retval;
 }
 
-int v9fs_set_create_acl(struct dentry *dentry,
-                       struct posix_acl **dpacl, struct posix_acl **pacl)
+int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid,
+                       struct posix_acl *dacl, struct posix_acl *acl)
 {
-       if (dentry) {
-               v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, *dpacl);
-               v9fs_set_acl(dentry, ACL_TYPE_ACCESS, *pacl);
-       }
-       posix_acl_release(*dpacl);
-       posix_acl_release(*pacl);
-       *dpacl = *pacl = NULL;
+       set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
+       set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
+       v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl);
+       v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
        return 0;
 }
 
+void v9fs_put_acl(struct posix_acl *dacl,
+                 struct posix_acl *acl)
+{
+       posix_acl_release(dacl);
+       posix_acl_release(acl);
+}
+
 int v9fs_acl_mode(struct inode *dir, umode_t *modep,
                  struct posix_acl **dpacl, struct posix_acl **pacl)
 {
index 5595564..e4f7e88 100644 (file)
 #ifdef CONFIG_9P_FS_POSIX_ACL
 extern int v9fs_get_acl(struct inode *, struct p9_fid *);
 extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type);
-extern int v9fs_acl_chmod(struct dentry *);
-extern int v9fs_set_create_acl(struct dentry *,
-                              struct posix_acl **, struct posix_acl **);
+extern int v9fs_acl_chmod(struct inode *, struct p9_fid *);
+extern int v9fs_set_create_acl(struct inode *, struct p9_fid *,
+                              struct posix_acl *, struct posix_acl *);
 extern int v9fs_acl_mode(struct inode *dir, umode_t *modep,
                         struct posix_acl **dpacl, struct posix_acl **pacl);
+extern void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl);
 #else
 #define v9fs_iop_get_acl NULL
 static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
 {
        return 0;
 }
-static inline int v9fs_acl_chmod(struct dentry *dentry)
+static inline int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid)
 {
        return 0;
 }
-static inline int v9fs_set_create_acl(struct dentry *dentry,
-                                     struct posix_acl **dpacl,
-                                     struct posix_acl **pacl)
+static inline int v9fs_set_create_acl(struct inode *inode,
+                                     struct p9_fid *fid,
+                                     struct posix_acl *dacl,
+                                     struct posix_acl *acl)
 {
        return 0;
 }
+static inline void v9fs_put_acl(struct posix_acl *dacl,
+                               struct posix_acl *acl)
+{
+}
 static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep,
                                struct posix_acl **dpacl,
                                struct posix_acl **pacl)
index afd4724..d51ec9f 100644 (file)
  *
  */
 
-int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
+static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid)
 {
-       struct v9fs_dentry *dent;
-
-       p9_debug(P9_DEBUG_VFS, "fid %d dentry %s\n",
-                fid->fid, dentry->d_name.name);
-
-       dent = dentry->d_fsdata;
-       if (!dent) {
-               dent = kmalloc(sizeof(struct v9fs_dentry), GFP_KERNEL);
-               if (!dent)
-                       return -ENOMEM;
-
-               spin_lock_init(&dent->lock);
-               INIT_LIST_HEAD(&dent->fidlist);
-               dentry->d_fsdata = dent;
-       }
-
-       spin_lock(&dent->lock);
-       list_add(&fid->dlist, &dent->fidlist);
-       spin_unlock(&dent->lock);
+       hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata);
+}
 
-       return 0;
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
+{
+       spin_lock(&dentry->d_lock);
+       __add_fid(dentry, fid);
+       spin_unlock(&dentry->d_lock);
 }
 
 /**
@@ -76,23 +63,23 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid)
 
 static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
 {
-       struct v9fs_dentry *dent;
        struct p9_fid *fid, *ret;
 
        p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
                 dentry->d_name.name, dentry, from_kuid(&init_user_ns, uid),
                 any);
-       dent = (struct v9fs_dentry *) dentry->d_fsdata;
        ret = NULL;
-       if (dent) {
-               spin_lock(&dent->lock);
-               list_for_each_entry(fid, &dent->fidlist, dlist) {
+       /* we'll recheck under lock if there's anything to look in */
+       if (dentry->d_fsdata) {
+               struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata;
+               spin_lock(&dentry->d_lock);
+               hlist_for_each_entry(fid, h, dlist) {
                        if (any || uid_eq(fid->uid, uid)) {
                                ret = fid;
                                break;
                        }
                }
-               spin_unlock(&dent->lock);
+               spin_unlock(&dentry->d_lock);
        }
 
        return ret;
@@ -215,8 +202,17 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
        }
        kfree(wnames);
 fid_out:
-       if (!IS_ERR(fid))
-               v9fs_fid_add(dentry, fid);
+       if (!IS_ERR(fid)) {
+               spin_lock(&dentry->d_lock);
+               if (d_unhashed(dentry)) {
+                       spin_unlock(&dentry->d_lock);
+                       p9_client_clunk(fid);
+                       fid = ERR_PTR(-ENOENT);
+               } else {
+                       __add_fid(dentry, fid);
+                       spin_unlock(&dentry->d_lock);
+               }
+       }
 err_out:
        up_read(&v9ses->rename_sem);
        return fid;
index bb0b6e7..2b6787f 100644 (file)
 #define FS_9P_FID_H
 #include <linux/list.h>
 
-/**
- * struct v9fs_dentry - 9p private data stored in dentry d_fsdata
- * @lock: protects the fidlist
- * @fidlist: list of FIDs currently associated with this dentry
- *
- * This structure defines the 9p private data associated with
- * a particular dentry.  In particular, this private data is used
- * to lookup which 9P FID handle should be used for a particular VFS
- * operation.  FID handles are associated with dentries instead of
- * inodes in order to more closely map functionality to the Plan 9
- * expected behavior for FID reclaimation and tracking.
- *
- * See Also: Mapping FIDs to Linux VFS model in
- * Design and Implementation of the Linux 9P File System documentation
- */
-struct v9fs_dentry {
-       spinlock_t lock; /* protect fidlist */
-       struct list_head fidlist;
-};
-
 struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
 struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
-int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
+void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
 struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
 #endif
index 64600b5..f039b10 100644 (file)
@@ -83,21 +83,12 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
 
 static void v9fs_dentry_release(struct dentry *dentry)
 {
-       struct v9fs_dentry *dent;
-       struct p9_fid *temp, *current_fid;
-
+       struct hlist_node *p, *n;
        p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
                 dentry->d_name.name, dentry);
-       dent = dentry->d_fsdata;
-       if (dent) {
-               list_for_each_entry_safe(current_fid, temp, &dent->fidlist,
-                                                                       dlist) {
-                       p9_client_clunk(current_fid);
-               }
-
-               kfree(dent);
-               dentry->d_fsdata = NULL;
-       }
+       hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
+               p9_client_clunk(hlist_entry(p, struct p9_fid, dlist));
+       dentry->d_fsdata = NULL;
 }
 
 static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
@@ -137,6 +128,7 @@ out_valid:
 
 const struct dentry_operations v9fs_cached_dentry_operations = {
        .d_revalidate = v9fs_lookup_revalidate,
+       .d_weak_revalidate = v9fs_lookup_revalidate,
        .d_delete = v9fs_cached_dentry_delete,
        .d_release = v9fs_dentry_release,
 };
index c921ac9..d384a8b 100644 (file)
@@ -129,7 +129,7 @@ out_error:
 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
 {
        int res = 0;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
 
@@ -298,7 +298,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
 
 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int ret = -ENOLCK;
 
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
@@ -334,7 +334,7 @@ out_err:
 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
        struct file_lock *fl)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int ret = -ENOLCK;
 
        p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
@@ -525,7 +525,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
        if (!count)
                goto out;
 
-       retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
+       retval = v9fs_file_write_internal(file_inode(filp),
                                        filp->private_data,
                                        data, count, &origin, 1);
        /* update offset on successful write */
@@ -600,7 +600,7 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct v9fs_inode *v9inode;
        struct page *page = vmf->page;
        struct file *filp = vma->vm_file;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
 
        p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
index b5340c8..d86edc8 100644 (file)
@@ -692,9 +692,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                                   "inode creation failed %d\n", err);
                        goto error;
                }
-               err = v9fs_fid_add(dentry, fid);
-               if (err < 0)
-                       goto error;
+               v9fs_fid_add(dentry, fid);
                d_instantiate(dentry, inode);
        }
        return ofid;
@@ -790,7 +788,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        struct p9_fid *dfid, *fid;
        struct inode *inode;
        char *name;
-       int result = 0;
 
        p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p flags: %x\n",
                 dir, dentry->d_name.name, dentry, flags);
@@ -808,13 +805,11 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        name = (char *) dentry->d_name.name;
        fid = p9_client_walk(dfid, 1, &name, 1);
        if (IS_ERR(fid)) {
-               result = PTR_ERR(fid);
-               if (result == -ENOENT) {
-                       inode = NULL;
-                       goto inst_out;
+               if (fid == ERR_PTR(-ENOENT)) {
+                       d_add(dentry, NULL);
+                       return NULL;
                }
-
-               return ERR_PTR(result);
+               return ERR_CAST(fid);
        }
        /*
         * Make sure we don't use a wrong inode due to parallel
@@ -826,14 +821,9 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        else
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
-               result = PTR_ERR(inode);
-               inode = NULL;
-               goto error;
+               p9_client_clunk(fid);
+               return ERR_CAST(inode);
        }
-       result = v9fs_fid_add(dentry, fid);
-       if (result < 0)
-               goto error_iput;
-inst_out:
        /*
         * If we had a rename on the server and a parallel lookup
         * for the new name, then make sure we instantiate with
@@ -842,15 +832,13 @@ inst_out:
         * k/b.
         */
        res = d_materialise_unique(dentry, inode);
-       if (!IS_ERR(res))
-               return res;
-       result = PTR_ERR(res);
-error_iput:
-       iput(inode);
-error:
-       p9_client_clunk(fid);
-
-       return ERR_PTR(result);
+       if (!res)
+               v9fs_fid_add(dentry, fid);
+       else if (!IS_ERR(res))
+               v9fs_fid_add(res, fid);
+       else
+               p9_client_clunk(fid);
+       return res;
 }
 
 static int
index 07f4092..53687bb 100644 (file)
@@ -330,13 +330,11 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
                p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       err = v9fs_fid_add(dentry, fid);
-       if (err < 0)
-               goto error;
-       d_instantiate(dentry, inode);
-
        /* Now set the ACL based on the default value */
-       v9fs_set_create_acl(dentry, &dacl, &pacl);
+       v9fs_set_create_acl(inode, fid, dacl, pacl);
+
+       v9fs_fid_add(dentry, fid);
+       d_instantiate(dentry, inode);
 
        v9inode = V9FS_I(inode);
        mutex_lock(&v9inode->v_mutex);
@@ -369,6 +367,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
 #endif
        *opened |= FILE_CREATED;
 out:
+       v9fs_put_acl(dacl, pacl);
        dput(res);
        return err;
 
@@ -378,7 +377,6 @@ error:
 err_clunk_old_fid:
        if (ofid)
                p9_client_clunk(ofid);
-       v9fs_set_create_acl(NULL, &dacl, &pacl);
        goto out;
 }
 
@@ -435,17 +433,17 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
        if (err < 0)
                goto error;
 
+       fid = p9_client_walk(dfid, 1, &name, 1);
+       if (IS_ERR(fid)) {
+               err = PTR_ERR(fid);
+               p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+                        err);
+               fid = NULL;
+               goto error;
+       }
+
        /* instantiate inode and assign the unopened fid to the dentry */
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-               fid = p9_client_walk(dfid, 1, &name, 1);
-               if (IS_ERR(fid)) {
-                       err = PTR_ERR(fid);
-                       p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-                                err);
-                       fid = NULL;
-                       goto error;
-               }
-
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
@@ -453,11 +451,11 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                                 err);
                        goto error;
                }
-               err = v9fs_fid_add(dentry, fid);
-               if (err < 0)
-                       goto error;
+               v9fs_fid_add(dentry, fid);
+               v9fs_set_create_acl(inode, fid, dacl, pacl);
                d_instantiate(dentry, inode);
                fid = NULL;
+               err = 0;
        } else {
                /*
                 * Not in cached mode. No need to populate
@@ -469,16 +467,15 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                        err = PTR_ERR(inode);
                        goto error;
                }
+               v9fs_set_create_acl(inode, fid, dacl, pacl);
                d_instantiate(dentry, inode);
        }
-       /* Now set the ACL based on the default value */
-       v9fs_set_create_acl(dentry, &dacl, &pacl);
        inc_nlink(dir);
        v9fs_invalidate_inode_attr(dir);
 error:
        if (fid)
                p9_client_clunk(fid);
-       v9fs_set_create_acl(NULL, &dacl, &pacl);
+       v9fs_put_acl(dacl, pacl);
        return err;
 }
 
@@ -572,10 +569,11 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
        struct v9fs_session_info *v9ses;
        struct p9_fid *fid;
        struct p9_iattr_dotl p9attr;
+       struct inode *inode = dentry->d_inode;
 
        p9_debug(P9_DEBUG_VFS, "\n");
 
-       retval = inode_change_ok(dentry->d_inode, iattr);
+       retval = inode_change_ok(inode, iattr);
        if (retval)
                return retval;
 
@@ -596,23 +594,23 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
                return PTR_ERR(fid);
 
        /* Write all dirty data */
-       if (S_ISREG(dentry->d_inode->i_mode))
-               filemap_write_and_wait(dentry->d_inode->i_mapping);
+       if (S_ISREG(inode->i_mode))
+               filemap_write_and_wait(inode->i_mapping);
 
        retval = p9_client_setattr(fid, &p9attr);
        if (retval < 0)
                return retval;
 
        if ((iattr->ia_valid & ATTR_SIZE) &&
-           iattr->ia_size != i_size_read(dentry->d_inode))
-               truncate_setsize(dentry->d_inode, iattr->ia_size);
+           iattr->ia_size != i_size_read(inode))
+               truncate_setsize(inode, iattr->ia_size);
 
-       v9fs_invalidate_inode_attr(dentry->d_inode);
-       setattr_copy(dentry->d_inode, iattr);
-       mark_inode_dirty(dentry->d_inode);
+       v9fs_invalidate_inode_attr(inode);
+       setattr_copy(inode, iattr);
+       mark_inode_dirty(inode);
        if (iattr->ia_valid & ATTR_MODE) {
                /* We also want to update ACL when we update mode bits */
-               retval = v9fs_acl_chmod(dentry);
+               retval = v9fs_acl_chmod(inode, fid);
                if (retval < 0)
                        return retval;
        }
@@ -746,11 +744,10 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                                 err);
                        goto error;
                }
-               err = v9fs_fid_add(dentry, fid);
-               if (err < 0)
-                       goto error;
+               v9fs_fid_add(dentry, fid);
                d_instantiate(dentry, inode);
                fid = NULL;
+               err = 0;
        } else {
                /* Not in cached mode. No need to populate inode with stat */
                inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
@@ -880,17 +877,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                goto error;
 
        v9fs_invalidate_inode_attr(dir);
+       fid = p9_client_walk(dfid, 1, &name, 1);
+       if (IS_ERR(fid)) {
+               err = PTR_ERR(fid);
+               p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+                        err);
+               fid = NULL;
+               goto error;
+       }
+
        /* instantiate inode and assign the unopened fid to the dentry */
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-               fid = p9_client_walk(dfid, 1, &name, 1);
-               if (IS_ERR(fid)) {
-                       err = PTR_ERR(fid);
-                       p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-                                err);
-                       fid = NULL;
-                       goto error;
-               }
-
                inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
@@ -898,11 +895,11 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                                 err);
                        goto error;
                }
-               err = v9fs_fid_add(dentry, fid);
-               if (err < 0)
-                       goto error;
+               v9fs_set_create_acl(inode, fid, dacl, pacl);
+               v9fs_fid_add(dentry, fid);
                d_instantiate(dentry, inode);
                fid = NULL;
+               err = 0;
        } else {
                /*
                 * Not in cached mode. No need to populate inode with stat.
@@ -913,14 +910,13 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
                        err = PTR_ERR(inode);
                        goto error;
                }
+               v9fs_set_create_acl(inode, fid, dacl, pacl);
                d_instantiate(dentry, inode);
        }
-       /* Now set the ACL based on the default value */
-       v9fs_set_create_acl(dentry, &dacl, &pacl);
 error:
        if (fid)
                p9_client_clunk(fid);
-       v9fs_set_create_acl(NULL, &dacl, &pacl);
+       v9fs_put_acl(dacl, pacl);
        return err;
 }
 
index 137d503..91dad63 100644 (file)
@@ -363,5 +363,5 @@ struct file_system_type v9fs_fs_type = {
        .mount = v9fs_mount,
        .kill_sb = v9fs_kill_super,
        .owner = THIS_MODULE,
-       .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
+       .fs_flags = FS_RENAME_DOES_D_MOVE,
 };
index 29653b7..c45e016 100644 (file)
@@ -110,20 +110,27 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
  */
 int v9fs_xattr_set(struct dentry *dentry, const char *name,
                   const void *value, size_t value_len, int flags)
+{
+       struct p9_fid *fid = v9fs_fid_lookup(dentry);
+       if (IS_ERR(fid))
+               return PTR_ERR(fid);
+       return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
+}
+
+int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
+                  const void *value, size_t value_len, int flags)
 {
        u64 offset = 0;
        int retval, msize, write_count;
-       struct p9_fid *fid = NULL;
 
        p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
                 name, value_len, flags);
 
-       fid = v9fs_fid_clone(dentry);
-       if (IS_ERR(fid)) {
-               retval = PTR_ERR(fid);
-               fid = NULL;
-               goto error;
-       }
+       /* Clone it */
+       fid = p9_client_walk(fid, 0, NULL, 1);
+       if (IS_ERR(fid))
+               return PTR_ERR(fid);
+
        /*
         * On success fid points to xattr
         */
@@ -131,7 +138,8 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
        if (retval < 0) {
                p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
                         retval);
-               goto error;
+               p9_client_clunk(fid);
+               return retval;
        }
        msize = fid->clnt->msize;
        while (value_len) {
@@ -144,17 +152,12 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
                if (write_count < 0) {
                        /* error in xattr write */
                        retval = write_count;
-                       goto error;
+                       break;
                }
                offset += write_count;
                value_len -= write_count;
        }
-       /* Total read xattr bytes */
-       retval = offset;
-error:
-       if (fid)
-               retval = p9_client_clunk(fid);
-       return retval;
+       return p9_client_clunk(fid);
 }
 
 ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
index eaa837c..eec348a 100644 (file)
@@ -27,6 +27,8 @@ extern ssize_t v9fs_fid_xattr_get(struct p9_fid *, const char *,
                                  void *, size_t);
 extern ssize_t v9fs_xattr_get(struct dentry *, const char *,
                              void *, size_t);
+extern int v9fs_fid_xattr_set(struct p9_fid *, const char *,
+                         const void *, size_t, int);
 extern int v9fs_xattr_set(struct dentry *, const char *,
                          const void *, size_t, int);
 extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t);
index b3be2e7..9cf874c 100644 (file)
@@ -19,7 +19,7 @@ static DEFINE_RWLOCK(adfs_dir_lock);
 static int
 adfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
        struct object_info obj;
index eb82ee5..d9a4367 100644 (file)
@@ -125,9 +125,8 @@ static void
 affs_fix_dcache(struct inode *inode, u32 entry_ino)
 {
        struct dentry *dentry;
-       struct hlist_node *p;
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                if (entry_ino == (u32)(long)dentry->d_fsdata) {
                        dentry->d_fsdata = (void *)inode->i_ino;
                        break;
index 8ca8f3a..fd11a6d 100644 (file)
@@ -42,7 +42,7 @@ const struct inode_operations affs_dir_inode_operations = {
 static int
 affs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode            *inode = filp->f_path.dentry->d_inode;
+       struct inode            *inode = file_inode(filp);
        struct super_block      *sb = inode->i_sb;
        struct buffer_head      *dir_bh;
        struct buffer_head      *fh_bh;
index db47790..7a465ed 100644 (file)
@@ -393,12 +393,12 @@ static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
        int ret;
 
        _enter("{%Ld,{%lu}}",
-              file->f_pos, file->f_path.dentry->d_inode->i_ino);
+              file->f_pos, file_inode(file)->i_ino);
 
        ASSERT(file->private_data != NULL);
 
        fpos = file->f_pos;
-       ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos,
+       ret = afs_dir_iterate(file_inode(file), &fpos,
                              cookie, filldir, file->private_data);
        file->f_pos = fpos;
 
index 757d664..2497bf3 100644 (file)
@@ -514,7 +514,7 @@ error:
  */
 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
@@ -537,7 +537,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  */
 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 
        _enter("{%x:%u},%d,{t=%x,fl=%x}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
index 9aa52d9..7e03ead 100644 (file)
@@ -120,7 +120,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
                    struct page **pagep, void **fsdata)
 {
        struct afs_writeback *candidate, *wb;
-       struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        struct page *page;
        struct key *key = file->private_data;
        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
@@ -245,7 +245,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                  loff_t pos, unsigned len, unsigned copied,
                  struct page *page, void *fsdata)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        loff_t i_size, maybe_i_size;
 
        _enter("{%x:%u},{%lx}",
@@ -627,8 +627,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
                       unsigned long nr_segs, loff_t pos)
 {
-       struct dentry *dentry = iocb->ki_filp->f_path.dentry;
-       struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
        ssize_t result;
        size_t count = iov_length(iov, nr_segs);
 
index 064bfbe..3f941f2 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,11 +591,10 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
        struct mm_struct *mm = current->mm;
        struct kioctx *ctx, *ret = NULL;
-       struct hlist_node *n;
 
        rcu_read_lock();
 
-       hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
+       hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
                /*
                 * RCU protects us against accessing freed memory but
                 * we have to be careful not to get a reference when the
index 28d39fb..47a65df 100644 (file)
@@ -131,7 +131,6 @@ struct file *anon_inode_getfile(const char *name,
        struct qstr this;
        struct path path;
        struct file *file;
-       int error;
 
        if (IS_ERR(anon_inode_inode))
                return ERR_PTR(-ENODEV);
@@ -143,7 +142,7 @@ struct file *anon_inode_getfile(const char *name,
         * Link the inode to a directory entry by creating a unique name
         * using the inode sequence number.
         */
-       error = -ENOMEM;
+       file = ERR_PTR(-ENOMEM);
        this.name = name;
        this.len = strlen(name);
        this.hash = 0;
@@ -160,15 +159,12 @@ struct file *anon_inode_getfile(const char *name,
 
        d_instantiate(path.dentry, anon_inode_inode);
 
-       error = -ENFILE;
        file = alloc_file(&path, OPEN_FMODE(flags), fops);
-       if (!file)
+       if (IS_ERR(file))
                goto err_dput;
        file->f_mapping = anon_inode_inode->i_mapping;
 
-       file->f_pos = 0;
        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
-       file->f_version = 0;
        file->private_data = priv;
 
        return file;
@@ -177,7 +173,7 @@ err_dput:
        path_put(&path);
 err_module:
        module_put(fops->owner);
-       return ERR_PTR(error);
+       return file;
 }
 EXPORT_SYMBOL_GPL(anon_inode_getfile);
 
index b785e77..3f1128b 100644 (file)
@@ -273,7 +273,7 @@ static inline int autofs_prepare_pipe(struct file *pipe)
 {
        if (!pipe->f_op || !pipe->f_op->write)
                return -EINVAL;
-       if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
+       if (!S_ISFIFO(file_inode(pipe)->i_mode))
                return -EINVAL;
        /* We want a packet pipe */
        pipe->f_flags |= O_DIRECT;
index 9f68a37..743c7c2 100644 (file)
@@ -159,7 +159,7 @@ static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f)
        struct inode *inode;
 
        if (f) {
-               inode = f->f_path.dentry->d_inode;
+               inode = file_inode(f);
                sbi = autofs4_sbi(inode->i_sb);
        }
        return sbi;
index c934476..9bd1625 100644 (file)
@@ -383,8 +383,10 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
                                goto done;
                        }
                } else {
-                       if (!simple_empty(dentry))
+                       if (!simple_empty(dentry)) {
+                               spin_unlock(&sbi->fs_lock);
                                goto done;
+                       }
                }
                ino->flags |= AUTOFS_INF_PENDING;
                spin_unlock(&sbi->fs_lock);
@@ -587,7 +589,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
        
        /* This allows root to remove symlinks */
        if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (atomic_dec_and_test(&ino->count)) {
                p_ino = autofs4_dentry_ino(dentry->d_parent);
@@ -874,7 +876,7 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
 static long autofs4_root_ioctl(struct file *filp,
                               unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
 }
 
@@ -882,7 +884,7 @@ static long autofs4_root_ioctl(struct file *filp,
 static long autofs4_root_compat_ioctl(struct file *filp,
                             unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int ret;
 
        if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
index 03bc1d3..3db70da 100644 (file)
@@ -42,10 +42,8 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
        while (wq) {
                nwq = wq->next;
                wq->status = -ENOENT; /* Magic is gone - report failure */
-               if (wq->name.name) {
-                       kfree(wq->name.name);
-                       wq->name.name = NULL;
-               }
+               kfree(wq->name.name);
+               wq->name.name = NULL;
                wq->wait_ctr--;
                wake_up_interruptible(&wq->queue);
                wq = nwq;
index 2b3bda8..c8f4e25 100644 (file)
@@ -213,7 +213,7 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
 static int
 befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
        befs_off_t value;
index 2785ef9..3f422f6 100644 (file)
@@ -28,7 +28,7 @@ static struct buffer_head *bfs_find_entry(struct inode *dir,
 
 static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
 {
-       struct inode *dir = f->f_path.dentry->d_inode;
+       struct inode *dir = file_inode(f);
        struct buffer_head *bh;
        struct bfs_dirent *de;
        struct bfs_sb_info *info = BFS_SB(dir->i_sb);
index 6043567..bbc8f88 100644 (file)
@@ -214,7 +214,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
        if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
             N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
            N_TRSIZE(ex) || N_DRSIZE(ex) ||
-           i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+           i_size_read(file_inode(bprm->file)) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
                return -ENOEXEC;
        }
 
@@ -367,7 +367,7 @@ static int load_aout_library(struct file *file)
        int retval;
        struct exec ex;
 
-       inode = file->f_path.dentry->d_inode;
+       inode = file_inode(file);
 
        retval = -ENOEXEC;
        error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
index ff9dbc6..3939829 100644 (file)
@@ -322,6 +322,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        return 0;
 }
 
+#ifndef elf_map
+
 static unsigned long elf_map(struct file *filep, unsigned long addr,
                struct elf_phdr *eppnt, int prot, int type,
                unsigned long total_size)
@@ -356,6 +358,8 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
        return(map_addr);
 }
 
+#endif /* !elf_map */
+
 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 {
        int i, first_idx = -1, last_idx = -1;
@@ -1141,7 +1145,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
 
        /* By default, dump shared memory if mapped from an anonymous file. */
        if (vma->vm_flags & VM_SHARED) {
-               if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
+               if (file_inode(vma->vm_file)->i_nlink == 0 ?
                    FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
                        goto whole;
                return 0;
index cb240dd..9c13e02 100644 (file)
@@ -909,7 +909,7 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
 
 dynamic_error:
        printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n",
-              what, file->f_path.dentry->d_inode->i_ino);
+              what, file_inode(file)->i_ino);
        return -ELIBBAD;
 }
 
@@ -1219,7 +1219,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
 
        /* By default, dump shared memory if mapped from an anonymous file. */
        if (vma->vm_flags & VM_SHARED) {
-               if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) {
+               if (file_inode(vma->vm_file)->i_nlink == 0) {
                        dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
                        kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
                               vma->vm_flags, dump_ok ? "yes" : "no");
index b563719..2036d21 100644 (file)
@@ -438,7 +438,7 @@ static int load_flat_file(struct linux_binprm * bprm,
        int ret;
 
        hdr = ((struct flat_hdr *) bprm->buf);          /* exec-header */
-       inode = bprm->file->f_path.dentry->d_inode;
+       inode = file_inode(bprm->file);
 
        text_len  = ntohl(hdr->data_start);
        data_len  = ntohl(hdr->data_end) - ntohl(hdr->data_start);
index 0c8869f..fecbbf3 100644 (file)
@@ -531,7 +531,7 @@ static void kill_node(Node *e)
 static ssize_t
 bm_entry_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
 {
-       Node *e = file->f_path.dentry->d_inode->i_private;
+       Node *e = file_inode(file)->i_private;
        ssize_t res;
        char *page;
 
@@ -550,7 +550,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
                                size_t count, loff_t *ppos)
 {
        struct dentry *root;
-       Node *e = file->f_path.dentry->d_inode->i_private;
+       Node *e = file_inode(file)->i_private;
        int res = parse_command(buffer, count);
 
        switch (res) {
index b96fc6c..bb5768f 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1428,6 +1428,8 @@ void bio_endio(struct bio *bio, int error)
        else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
                error = -EIO;
 
+       trace_block_bio_complete(bio, error);
+
        if (bio->bi_end_io)
                bio->bi_end_io(bio, error);
 }
index 78333a3..aea605c 100644 (file)
@@ -318,7 +318,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping,
 
 /*
  * private llseek:
- * for a block special file file->f_path.dentry->d_inode->i_size is zero
+ * for a block special file file_inode(file)->i_size is zero
  * so we compute the size by hand (just as in block_read/write above)
  */
 static loff_t block_llseek(struct file *file, loff_t offset, int whence)
@@ -1033,7 +1033,9 @@ void bd_set_size(struct block_device *bdev, loff_t size)
 {
        unsigned bsize = bdev_logical_block_size(bdev);
 
-       bdev->bd_inode->i_size = size;
+       mutex_lock(&bdev->bd_inode->i_mutex);
+       i_size_write(bdev->bd_inode, size);
+       mutex_unlock(&bdev->bd_inode->i_mutex);
        while (bsize < PAGE_CACHE_SIZE) {
                if (size & bsize)
                        break;
@@ -1118,7 +1120,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                }
                        }
 
-                       if (!ret && !bdev->bd_openers) {
+                       if (!ret) {
                                bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
                                bdi = blk_get_backing_dev_info(bdev);
                                if (bdi == NULL)
index ccd25ba..9a8622a 100644 (file)
@@ -5,6 +5,9 @@ config BTRFS_FS
        select ZLIB_DEFLATE
        select LZO_COMPRESS
        select LZO_DECOMPRESS
+       select RAID6_PQ
+       select XOR_BLOCKS
+
        help
          Btrfs is a new filesystem with extents, writable snapshotting,
          support for multiple devices and many more features.
index 7df3e0f..3932224 100644 (file)
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o free-space-cache.o zlib.o lzo.o \
           compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
-          reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
+          reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
index 04edf69..bd605c8 100644 (file)
@@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
                err = __resolve_indirect_ref(fs_info, search_commit_root,
                                             time_seq, ref, parents,
                                             extent_item_pos);
-               if (err) {
-                       if (ret == 0)
-                               ret = err;
+               if (err)
                        continue;
-               }
 
                /* we put the first parent into the ref at hand */
                ULIST_ITER_INIT(&uiter);
index d61feca..310a7f6 100644 (file)
@@ -19,7 +19,7 @@
 #ifndef __BTRFS_BACKREF__
 #define __BTRFS_BACKREF__
 
-#include "ioctl.h"
+#include <linux/btrfs.h>
 #include "ulist.h"
 #include "extent_io.h"
 
index 2a8c242..d9b97d4 100644 (file)
@@ -40,6 +40,8 @@
 #define BTRFS_INODE_HAS_ASYNC_EXTENT           6
 #define BTRFS_INODE_NEEDS_FULL_SYNC            7
 #define BTRFS_INODE_COPY_EVERYTHING            8
+#define BTRFS_INODE_IN_DELALLOC_LIST           9
+#define BTRFS_INODE_READDIO_NEED_LOCK          10
 
 /* in memory btrfs inode */
 struct btrfs_inode {
@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
        return 0;
 }
 
+/*
+ * Disable DIO read nolock optimization, so new dio readers will be forced
+ * to grab i_mutex. It is used to avoid the endless truncate due to
+ * nonlocked dio read.
+ */
+static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
+{
+       set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
+       smp_mb();
+}
+
+static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
+{
+       smp_mb__before_clear_bit();
+       clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+                 &BTRFS_I(inode)->runtime_flags);
+}
+
 #endif
index 11d47bf..18af6f4 100644 (file)
@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
            (bh->b_data + (dev_bytenr & 4095));
 
        if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
-           strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
-                   sizeof(super_tmp->magic)) ||
+           super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
            memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
            btrfs_super_nodesize(super_tmp) != state->metablock_size ||
            btrfs_super_leafsize(super_tmp) != state->metablock_size ||
index 94ab2f8..15b9408 100644 (file)
@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_size)
-                       ret = io_tree->ops->merge_bio_hook(page, 0,
+                       ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
                                                           PAGE_CACHE_SIZE,
                                                           bio, 0);
                else
@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                page->index = em_start >> PAGE_CACHE_SHIFT;
 
                if (comp_bio->bi_size)
-                       ret = tree->ops->merge_bio_hook(page, 0,
+                       ret = tree->ops->merge_bio_hook(READ, page, 0,
                                                        PAGE_CACHE_SIZE,
                                                        comp_bio, 0);
                else
index eea5da7..ecd25a1 100644 (file)
@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
                switch (tm->op) {
                case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
                        BUG_ON(tm->slot < n);
+                       /* Fallthrough */
                case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
                case MOD_LOG_KEY_REMOVE:
                        btrfs_set_node_key(eb, &tm->key, tm->slot);
@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
 
        __tree_mod_log_rewind(eb_rewin, time_seq, tm);
        WARN_ON(btrfs_header_nritems(eb_rewin) >
-               BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
+               BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
 
        return eb_rewin;
 }
@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
  */
 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct extent_buffer *parent,
-                      int start_slot, int cache_only, u64 *last_ret,
+                      int start_slot, u64 *last_ret,
                       struct btrfs_key *progress)
 {
        struct extent_buffer *cur;
@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
        struct btrfs_disk_key disk_key;
 
        parent_level = btrfs_header_level(parent);
-       if (cache_only && parent_level != 1)
-               return 0;
 
        WARN_ON(trans->transaction != root->fs_info->running_transaction);
        WARN_ON(trans->transid != root->fs_info->generation);
@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                else
                        uptodate = 0;
                if (!cur || !uptodate) {
-                       if (cache_only) {
-                               free_extent_buffer(cur);
-                               continue;
-                       }
                        if (!cur) {
                                cur = read_tree_block(root, blocknr,
                                                         blocksize, gen);
@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
 
 /*
  * A helper function to walk down the tree starting at min_key, and looking
- * for nodes or leaves that are either in cache or have a minimum
- * transaction id.  This is used by the btree defrag code, and tree logging
+ * for nodes or leaves that are have a minimum transaction id.
+ * This is used by the btree defrag code, and tree logging
  *
  * This does not cow, but it does stuff the starting key it finds back
  * into min_key, so you can call btrfs_search_slot with cow=1 on the
@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
  */
 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
                         struct btrfs_key *max_key,
-                        struct btrfs_path *path, int cache_only,
+                        struct btrfs_path *path,
                         u64 min_trans)
 {
        struct extent_buffer *cur;
@@ -4887,15 +4882,12 @@ again:
                if (sret && slot > 0)
                        slot--;
                /*
-                * check this node pointer against the cache_only and
-                * min_trans parameters.  If it isn't in cache or is too
-                * old, skip to the next one.
+                * check this node pointer against the min_trans parameters.
+                * If it is too old, old, skip to the next one.
                 */
                while (slot < nritems) {
                        u64 blockptr;
                        u64 gen;
-                       struct extent_buffer *tmp;
-                       struct btrfs_disk_key disk_key;
 
                        blockptr = btrfs_node_blockptr(cur, slot);
                        gen = btrfs_node_ptr_generation(cur, slot);
@@ -4903,27 +4895,7 @@ again:
                                slot++;
                                continue;
                        }
-                       if (!cache_only)
-                               break;
-
-                       if (max_key) {
-                               btrfs_node_key(cur, &disk_key, slot);
-                               if (comp_keys(&disk_key, max_key) >= 0) {
-                                       ret = 1;
-                                       goto out;
-                               }
-                       }
-
-                       tmp = btrfs_find_tree_block(root, blockptr,
-                                           btrfs_level_size(root, level - 1));
-
-                       if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
-                               free_extent_buffer(tmp);
-                               break;
-                       }
-                       if (tmp)
-                               free_extent_buffer(tmp);
-                       slot++;
+                       break;
                }
 find_next_key:
                /*
@@ -4934,7 +4906,7 @@ find_next_key:
                        path->slots[level] = slot;
                        btrfs_set_path_blocking(path);
                        sret = btrfs_find_next_key(root, path, min_key, level,
-                                                 cache_only, min_trans);
+                                                 min_trans);
                        if (sret == 0) {
                                btrfs_release_path(path);
                                goto again;
@@ -5399,8 +5371,7 @@ out:
 /*
  * this is similar to btrfs_next_leaf, but does not try to preserve
  * and fixup the path.  It looks for and returns the next key in the
- * tree based on the current path and the cache_only and min_trans
- * parameters.
+ * tree based on the current path and the min_trans parameters.
  *
  * 0 is returned if another key is found, < 0 if there are any errors
  * and 1 is returned if there are no higher keys in the tree
@@ -5409,8 +5380,7 @@ out:
  * calling this function.
  */
 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
-                       struct btrfs_key *key, int level,
-                       int cache_only, u64 min_trans)
+                       struct btrfs_key *key, int level, u64 min_trans)
 {
        int slot;
        struct extent_buffer *c;
@@ -5461,22 +5431,8 @@ next:
                if (level == 0)
                        btrfs_item_key_to_cpu(c, key, slot);
                else {
-                       u64 blockptr = btrfs_node_blockptr(c, slot);
                        u64 gen = btrfs_node_ptr_generation(c, slot);
 
-                       if (cache_only) {
-                               struct extent_buffer *cur;
-                               cur = btrfs_find_tree_block(root, blockptr,
-                                           btrfs_level_size(root, level - 1));
-                               if (!cur ||
-                                   btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
-                                       slot++;
-                                       if (cur)
-                                               free_extent_buffer(cur);
-                                       goto next;
-                               }
-                               free_extent_buffer(cur);
-                       }
                        if (gen < min_trans) {
                                slot++;
                                goto next;
index 547b7b0..0d82922 100644 (file)
 #include <trace/events/btrfs.h>
 #include <asm/kmap_types.h>
 #include <linux/pagemap.h>
+#include <linux/btrfs.h>
 #include "extent_io.h"
 #include "extent_map.h"
 #include "async-thread.h"
-#include "ioctl.h"
 
 struct btrfs_trans_handle;
 struct btrfs_transaction;
@@ -46,7 +46,7 @@ extern struct kmem_cache *btrfs_path_cachep;
 extern struct kmem_cache *btrfs_free_space_cachep;
 struct btrfs_ordered_sum;
 
-#define BTRFS_MAGIC "_BHRfS_M"
+#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
 
 #define BTRFS_MAX_MIRRORS 3
 
@@ -191,6 +191,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
 /* ioprio of readahead is set to idle */
 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
 
+#define BTRFS_DIRTY_METADATA_THRESH    (32 * 1024 * 1024)
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -336,7 +338,10 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
 /*
  * File system states
  */
+#define BTRFS_FS_STATE_ERROR           0
+#define BTRFS_FS_STATE_REMOUNTING      1
 
+/* Super block flags */
 /* Errors detected */
 #define BTRFS_SUPER_FLAG_ERROR         (1ULL << 2)
 
@@ -502,6 +507,7 @@ struct btrfs_super_block {
 #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA    (1ULL << 5)
 
 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF   (1ULL << 6)
+#define BTRFS_FEATURE_INCOMPAT_RAID56          (1ULL << 7)
 
 #define BTRFS_FEATURE_COMPAT_SUPP              0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SUPP           0ULL
@@ -511,6 +517,7 @@ struct btrfs_super_block {
         BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS |          \
         BTRFS_FEATURE_INCOMPAT_BIG_METADATA |          \
         BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO |          \
+        BTRFS_FEATURE_INCOMPAT_RAID56 |                \
         BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
 
 /*
@@ -952,8 +959,20 @@ struct btrfs_dev_replace_item {
 #define BTRFS_BLOCK_GROUP_RAID1                (1ULL << 4)
 #define BTRFS_BLOCK_GROUP_DUP          (1ULL << 5)
 #define BTRFS_BLOCK_GROUP_RAID10       (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RAID5    (1 << 7)
+#define BTRFS_BLOCK_GROUP_RAID6    (1 << 8)
 #define BTRFS_BLOCK_GROUP_RESERVED     BTRFS_AVAIL_ALLOC_BIT_SINGLE
-#define BTRFS_NR_RAID_TYPES            5
+
+enum btrfs_raid_types {
+       BTRFS_RAID_RAID10,
+       BTRFS_RAID_RAID1,
+       BTRFS_RAID_DUP,
+       BTRFS_RAID_RAID0,
+       BTRFS_RAID_SINGLE,
+       BTRFS_RAID_RAID5,
+       BTRFS_RAID_RAID6,
+       BTRFS_NR_RAID_TYPES
+};
 
 #define BTRFS_BLOCK_GROUP_TYPE_MASK    (BTRFS_BLOCK_GROUP_DATA |    \
                                         BTRFS_BLOCK_GROUP_SYSTEM |  \
@@ -961,6 +980,8 @@ struct btrfs_dev_replace_item {
 
 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 |   \
                                         BTRFS_BLOCK_GROUP_RAID1 |   \
+                                        BTRFS_BLOCK_GROUP_RAID5 |   \
+                                        BTRFS_BLOCK_GROUP_RAID6 |   \
                                         BTRFS_BLOCK_GROUP_DUP |     \
                                         BTRFS_BLOCK_GROUP_RAID10)
 /*
@@ -1185,6 +1206,10 @@ struct btrfs_block_group_cache {
        u64 flags;
        u64 sectorsize;
        u64 cache_generation;
+
+       /* for raid56, this is a full stripe, without parity */
+       unsigned long full_stripe_len;
+
        unsigned int ro:1;
        unsigned int dirty:1;
        unsigned int iref:1;
@@ -1225,6 +1250,28 @@ struct seq_list {
        u64 seq;
 };
 
+enum btrfs_orphan_cleanup_state {
+       ORPHAN_CLEANUP_STARTED  = 1,
+       ORPHAN_CLEANUP_DONE     = 2,
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash {
+       struct list_head hash_list;
+       wait_queue_head_t wait;
+       spinlock_t lock;
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash_table {
+       struct list_head stripe_cache;
+       spinlock_t cache_lock;
+       int cache_size;
+       struct btrfs_stripe_hash table[];
+};
+
+#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+
 /* fs_info */
 struct reloc_control;
 struct btrfs_device;
@@ -1250,6 +1297,7 @@ struct btrfs_fs_info {
 
        /* block group cache stuff */
        spinlock_t block_group_cache_lock;
+       u64 first_logical_byte;
        struct rb_root block_group_cache_tree;
 
        /* keep track of unallocated space */
@@ -1288,7 +1336,23 @@ struct btrfs_fs_info {
        u64 last_trans_log_full_commit;
        unsigned long mount_opt;
        unsigned long compress_type:4;
+       /*
+        * It is a suggestive number, the read side is safe even it gets a
+        * wrong number because we will write out the data into a regular
+        * extent. The write side(mount/remount) is under ->s_umount lock,
+        * so it is also safe.
+        */
        u64 max_inline;
+       /*
+        * Protected by ->chunk_mutex and sb->s_umount.
+        *
+        * The reason that we use two lock to protect it is because only
+        * remount and mount operations can change it and these two operations
+        * are under sb->s_umount, but the read side (chunk allocation) can not
+        * acquire sb->s_umount or the deadlock would happen. So we use two
+        * locks to protect it. On the write side, we must acquire two locks,
+        * and on the read side, we just need acquire one of them.
+        */
        u64 alloc_start;
        struct btrfs_transaction *running_transaction;
        wait_queue_head_t transaction_throttle;
@@ -1307,6 +1371,13 @@ struct btrfs_fs_info {
        struct mutex cleaner_mutex;
        struct mutex chunk_mutex;
        struct mutex volume_mutex;
+
+       /* this is used during read/modify/write to make sure
+        * no two ios are trying to mod the same stripe at the same
+        * time
+        */
+       struct btrfs_stripe_hash_table *stripe_hash_table;
+
        /*
         * this protects the ordered operations list only while we are
         * processing all of the entries on it.  This way we make
@@ -1365,6 +1436,7 @@ struct btrfs_fs_info {
         */
        struct list_head ordered_extents;
 
+       spinlock_t delalloc_lock;
        /*
         * all of the inodes that have delalloc bytes.  It is possible for
         * this list to be empty even when there is still dirty data=ordered
@@ -1372,13 +1444,6 @@ struct btrfs_fs_info {
         */
        struct list_head delalloc_inodes;
 
-       /*
-        * special rename and truncate targets that must be on disk before
-        * we're allowed to commit.  This is basically the ext3 style
-        * data=ordered list.
-        */
-       struct list_head ordered_operations;
-
        /*
         * there is a pool of worker threads for checksumming during writes
         * and a pool for checksumming after reads.  This is because readers
@@ -1395,6 +1460,8 @@ struct btrfs_fs_info {
        struct btrfs_workers flush_workers;
        struct btrfs_workers endio_workers;
        struct btrfs_workers endio_meta_workers;
+       struct btrfs_workers endio_raid56_workers;
+       struct btrfs_workers rmw_workers;
        struct btrfs_workers endio_meta_write_workers;
        struct btrfs_workers endio_write_workers;
        struct btrfs_workers endio_freespace_worker;
@@ -1423,10 +1490,12 @@ struct btrfs_fs_info {
 
        u64 total_pinned;
 
-       /* protected by the delalloc lock, used to keep from writing
-        * metadata until there is a nice batch
-        */
-       u64 dirty_metadata_bytes;
+       /* used to keep from writing metadata until there is a nice batch */
+       struct percpu_counter dirty_metadata_bytes;
+       struct percpu_counter delalloc_bytes;
+       s32 dirty_metadata_batch;
+       s32 delalloc_batch;
+
        struct list_head dirty_cowonly_roots;
 
        struct btrfs_fs_devices *fs_devices;
@@ -1442,9 +1511,6 @@ struct btrfs_fs_info {
 
        struct reloc_control *reloc_ctl;
 
-       spinlock_t delalloc_lock;
-       u64 delalloc_bytes;
-
        /* data_alloc_cluster is only used in ssd mode */
        struct btrfs_free_cluster data_alloc_cluster;
 
@@ -1456,6 +1522,8 @@ struct btrfs_fs_info {
        struct rb_root defrag_inodes;
        atomic_t defrag_running;
 
+       /* Used to protect avail_{data, metadata, system}_alloc_bits */
+       seqlock_t profiles_lock;
        /*
         * these three are in extended format (availability of single
         * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
@@ -1520,7 +1588,7 @@ struct btrfs_fs_info {
        u64 qgroup_seq;
 
        /* filesystem state */
-       u64 fs_state;
+       unsigned long fs_state;
 
        struct btrfs_delayed_root *delayed_root;
 
@@ -1623,6 +1691,9 @@ struct btrfs_root {
 
        struct list_head root_list;
 
+       spinlock_t log_extents_lock[2];
+       struct list_head logged_list[2];
+
        spinlock_t orphan_lock;
        atomic_t orphan_inodes;
        struct btrfs_block_rsv *orphan_block_rsv;
@@ -1832,6 +1903,7 @@ struct btrfs_ioctl_defrag_range_args {
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
+#define btrfs_raw_test_opt(o, opt)     ((o) & BTRFS_MOUNT_##opt)
 #define btrfs_test_opt(root, opt)      ((root)->fs_info->mount_opt & \
                                         BTRFS_MOUNT_##opt)
 /*
@@ -2936,8 +3008,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
                             u64 num_bytes, u64 *refs, u64 *flags);
 int btrfs_pin_extent(struct btrfs_root *root,
                     u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
                                    u64 bytenr, u64 num_bytes);
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root,
@@ -3035,8 +3106,13 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
                                  struct inode *inode);
 void btrfs_orphan_release_metadata(struct inode *inode);
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
-                               struct btrfs_pending_snapshot *pending);
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+                                    struct btrfs_block_rsv *rsv,
+                                    int nitems,
+                                    u64 *qgroup_reserved);
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+                                     struct btrfs_block_rsv *rsv,
+                                     u64 qgroup_reserved);
 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
@@ -3092,10 +3168,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
                        struct btrfs_key *key, int lowest_level,
-                       int cache_only, u64 min_trans);
+                       u64 min_trans);
 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
                         struct btrfs_key *max_key,
-                        struct btrfs_path *path, int cache_only,
+                        struct btrfs_path *path,
                         u64 min_trans);
 enum btrfs_compare_tree_result {
        BTRFS_COMPARE_TREE_NEW,
@@ -3148,7 +3224,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
                               int find_higher, int return_any);
 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct extent_buffer *parent,
-                      int start_slot, int cache_only, u64 *last_ret,
+                      int start_slot, u64 *last_ret,
                       struct btrfs_key *progress);
 void btrfs_release_path(struct btrfs_path *p);
 struct btrfs_path *btrfs_alloc_path(void);
@@ -3459,9 +3535,9 @@ int btrfs_writepages(struct address_space *mapping,
                     struct writeback_control *wbc);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
                             struct btrfs_root *new_root, u64 new_dirid);
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
-                        size_t size, struct bio *bio, unsigned long bio_flags);
-
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+                        size_t size, struct bio *bio,
+                        unsigned long bio_flags);
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
@@ -3543,7 +3619,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 
 /* tree-defrag.c */
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, int cache_only);
+                       struct btrfs_root *root);
 
 /* sysfs.c */
 int btrfs_init_sysfs(void);
@@ -3620,11 +3696,14 @@ __printf(5, 6)
 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
                   unsigned int line, int errno, const char *fmt, ...);
 
+/*
+ * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
+ * will panic().  Otherwise we BUG() here.
+ */
 #define btrfs_panic(fs_info, errno, fmt, args...)                      \
 do {                                                                   \
-       struct btrfs_fs_info *_i = (fs_info);                           \
-       __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args);      \
-       BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR));    \
+       __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
+       BUG();                                                          \
 } while (0)
 
 /* acl.c */
@@ -3745,4 +3824,11 @@ static inline int is_fstree(u64 rootid)
                return 1;
        return 0;
 }
+
+static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
+{
+       return signal_pending(current);
+}
+
+
 #endif
index 3483603..0b278b1 100644 (file)
@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
                                     struct btrfs_delayed_item *delayed_item)
 {
        struct extent_buffer *leaf;
-       struct btrfs_item *item;
        char *ptr;
        int ret;
 
@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 
        leaf = path->nodes[0];
 
-       item = btrfs_item_nr(leaf, path->slots[0]);
        ptr = btrfs_item_ptr(leaf, path->slots[0], char);
 
        write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
@@ -1065,32 +1063,25 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
        }
 }
 
-static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
-                                     struct btrfs_root *root,
-                                     struct btrfs_path *path,
-                                     struct btrfs_delayed_node *node)
+static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+                                       struct btrfs_root *root,
+                                       struct btrfs_path *path,
+                                       struct btrfs_delayed_node *node)
 {
        struct btrfs_key key;
        struct btrfs_inode_item *inode_item;
        struct extent_buffer *leaf;
        int ret;
 
-       mutex_lock(&node->mutex);
-       if (!node->inode_dirty) {
-               mutex_unlock(&node->mutex);
-               return 0;
-       }
-
        key.objectid = node->inode_id;
        btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
        key.offset = 0;
+
        ret = btrfs_lookup_inode(trans, root, path, &key, 1);
        if (ret > 0) {
                btrfs_release_path(path);
-               mutex_unlock(&node->mutex);
                return -ENOENT;
        } else if (ret < 0) {
-               mutex_unlock(&node->mutex);
                return ret;
        }
 
@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 
        btrfs_delayed_inode_release_metadata(root, node);
        btrfs_release_delayed_inode(node);
-       mutex_unlock(&node->mutex);
 
        return 0;
 }
 
+static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+                                            struct btrfs_root *root,
+                                            struct btrfs_path *path,
+                                            struct btrfs_delayed_node *node)
+{
+       int ret;
+
+       mutex_lock(&node->mutex);
+       if (!node->inode_dirty) {
+               mutex_unlock(&node->mutex);
+               return 0;
+       }
+
+       ret = __btrfs_update_delayed_inode(trans, root, path, node);
+       mutex_unlock(&node->mutex);
+       return ret;
+}
+
+static inline int
+__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                  struct btrfs_path *path,
+                                  struct btrfs_delayed_node *node)
+{
+       int ret;
+
+       ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+       if (ret)
+               return ret;
+
+       ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+       if (ret)
+               return ret;
+
+       ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+       return ret;
+}
+
 /*
  * Called when committing the transaction.
  * Returns 0 on success.
@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root, int nr)
 {
-       struct btrfs_root *curr_root = root;
        struct btrfs_delayed_root *delayed_root;
        struct btrfs_delayed_node *curr_node, *prev_node;
        struct btrfs_path *path;
@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
 
        curr_node = btrfs_first_delayed_node(delayed_root);
        while (curr_node && (!count || (count && nr--))) {
-               curr_root = curr_node->root;
-               ret = btrfs_insert_delayed_items(trans, path, curr_root,
-                                                curr_node);
-               if (!ret)
-                       ret = btrfs_delete_delayed_items(trans, path,
-                                               curr_root, curr_node);
-               if (!ret)
-                       ret = btrfs_update_delayed_inode(trans, curr_root,
-                                               path, curr_node);
+               ret = __btrfs_commit_inode_delayed_items(trans, path,
+                                                        curr_node);
                if (ret) {
                        btrfs_release_delayed_node(curr_node);
                        curr_node = NULL;
@@ -1183,51 +1202,93 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
        return __btrfs_run_delayed_items(trans, root, nr);
 }
 
-static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
-                                             struct btrfs_delayed_node *node)
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                    struct inode *inode)
 {
+       struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
        struct btrfs_path *path;
        struct btrfs_block_rsv *block_rsv;
        int ret;
 
+       if (!delayed_node)
+               return 0;
+
+       mutex_lock(&delayed_node->mutex);
+       if (!delayed_node->count) {
+               mutex_unlock(&delayed_node->mutex);
+               btrfs_release_delayed_node(delayed_node);
+               return 0;
+       }
+       mutex_unlock(&delayed_node->mutex);
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
        path->leave_spinning = 1;
 
        block_rsv = trans->block_rsv;
-       trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
+       trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
 
-       ret = btrfs_insert_delayed_items(trans, path, node->root, node);
-       if (!ret)
-               ret = btrfs_delete_delayed_items(trans, path, node->root, node);
-       if (!ret)
-               ret = btrfs_update_delayed_inode(trans, node->root, path, node);
-       btrfs_free_path(path);
+       ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
 
+       btrfs_release_delayed_node(delayed_node);
+       btrfs_free_path(path);
        trans->block_rsv = block_rsv;
+
        return ret;
 }
 
-int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
-                                    struct inode *inode)
+int btrfs_commit_inode_delayed_inode(struct inode *inode)
 {
+       struct btrfs_trans_handle *trans;
        struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+       struct btrfs_path *path;
+       struct btrfs_block_rsv *block_rsv;
        int ret;
 
        if (!delayed_node)
                return 0;
 
        mutex_lock(&delayed_node->mutex);
-       if (!delayed_node->count) {
+       if (!delayed_node->inode_dirty) {
                mutex_unlock(&delayed_node->mutex);
                btrfs_release_delayed_node(delayed_node);
                return 0;
        }
        mutex_unlock(&delayed_node->mutex);
 
-       ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+       trans = btrfs_join_transaction(delayed_node->root);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out;
+       }
+
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto trans_out;
+       }
+       path->leave_spinning = 1;
+
+       block_rsv = trans->block_rsv;
+       trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->inode_dirty)
+               ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
+                                                  path, delayed_node);
+       else
+               ret = 0;
+       mutex_unlock(&delayed_node->mutex);
+
+       btrfs_free_path(path);
+       trans->block_rsv = block_rsv;
+trans_out:
+       btrfs_end_transaction(trans, delayed_node->root);
+       btrfs_btree_balance_dirty(delayed_node->root);
+out:
        btrfs_release_delayed_node(delayed_node);
+
        return ret;
 }
 
@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
        struct btrfs_root *root;
        struct btrfs_block_rsv *block_rsv;
        int need_requeue = 0;
-       int ret;
 
        async_node = container_of(work, struct btrfs_async_delayed_node, work);
 
@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
        block_rsv = trans->block_rsv;
        trans->block_rsv = &root->fs_info->delayed_block_rsv;
 
-       ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
-       if (!ret)
-               ret = btrfs_delete_delayed_items(trans, path, root,
-                                                delayed_node);
-
-       if (!ret)
-               btrfs_update_delayed_inode(trans, root, path, delayed_node);
-
+       __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
        /*
         * Maybe new delayed items have been inserted, so we need requeue
         * the work. Besides that, we must dequeue the empty delayed nodes
index 4f808e1..78b6ad0 100644 (file)
@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
 /* Used for evicting the inode. */
 void btrfs_remove_delayed_node(struct inode *inode);
 void btrfs_kill_delayed_inode_items(struct inode *inode);
+int btrfs_commit_inode_delayed_inode(struct inode *inode);
 
 
 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
index ae94117..b7a0641 100644 (file)
 #include "delayed-ref.h"
 #include "transaction.h"
 
+struct kmem_cache *btrfs_delayed_ref_head_cachep;
+struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_extent_op_cachep;
 /*
  * delayed back reference update tracking.  For subvolume trees
  * we queue up extent allocations and backref maintenance for
@@ -422,6 +426,14 @@ again:
        return 1;
 }
 
+void btrfs_release_ref_cluster(struct list_head *cluster)
+{
+       struct list_head *pos, *q;
+
+       list_for_each_safe(pos, q, cluster)
+               list_del_init(pos);
+}
+
 /*
  * helper function to update an extent delayed ref in the
  * rbtree.  existing and update must both have the same
@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
                                        ref->extent_op->flags_to_set;
                                existing_ref->extent_op->update_flags = 1;
                        }
-                       kfree(ref->extent_op);
+                       btrfs_free_delayed_extent_op(ref->extent_op);
                }
        }
        /*
@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
                 * we've updated the existing ref, free the newly
                 * allocated ref
                 */
-               kfree(head_ref);
+               kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
        } else {
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
                 * we've updated the existing ref, free the newly
                 * allocated ref
                 */
-               kfree(full_ref);
+               kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
        } else {
                delayed_refs->num_entries++;
                trans->delayed_ref_updates++;
@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
                 * we've updated the existing ref, free the newly
                 * allocated ref
                 */
-               kfree(full_ref);
+               kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
        } else {
                delayed_refs->num_entries++;
                trans->delayed_ref_updates++;
@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_ref_root *delayed_refs;
 
        BUG_ON(extent_op && extent_op->is_data);
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
+       ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
        if (!ref)
                return -ENOMEM;
 
-       head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+       head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
        if (!head_ref) {
-               kfree(ref);
+               kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
                return -ENOMEM;
        }
 
@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_ref_root *delayed_refs;
 
        BUG_ON(extent_op && !extent_op->is_data);
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
+       ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
        if (!ref)
                return -ENOMEM;
 
-       head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+       head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
        if (!head_ref) {
-               kfree(ref);
+               kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
                return -ENOMEM;
        }
 
@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_ref_head *head_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
 
-       head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+       head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
        if (!head_ref)
                return -ENOMEM;
 
@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
                return btrfs_delayed_node_to_head(ref);
        return NULL;
 }
+
+void btrfs_delayed_ref_exit(void)
+{
+       if (btrfs_delayed_ref_head_cachep)
+               kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
+       if (btrfs_delayed_tree_ref_cachep)
+               kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
+       if (btrfs_delayed_data_ref_cachep)
+               kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+       if (btrfs_delayed_extent_op_cachep)
+               kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
+}
+
+int btrfs_delayed_ref_init(void)
+{
+       btrfs_delayed_ref_head_cachep = kmem_cache_create(
+                               "btrfs_delayed_ref_head",
+                               sizeof(struct btrfs_delayed_ref_head), 0,
+                               SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+       if (!btrfs_delayed_ref_head_cachep)
+               goto fail;
+
+       btrfs_delayed_tree_ref_cachep = kmem_cache_create(
+                               "btrfs_delayed_tree_ref",
+                               sizeof(struct btrfs_delayed_tree_ref), 0,
+                               SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+       if (!btrfs_delayed_tree_ref_cachep)
+               goto fail;
+
+       btrfs_delayed_data_ref_cachep = kmem_cache_create(
+                               "btrfs_delayed_data_ref",
+                               sizeof(struct btrfs_delayed_data_ref), 0,
+                               SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+       if (!btrfs_delayed_data_ref_cachep)
+               goto fail;
+
+       btrfs_delayed_extent_op_cachep = kmem_cache_create(
+                               "btrfs_delayed_extent_op",
+                               sizeof(struct btrfs_delayed_extent_op), 0,
+                               SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+       if (!btrfs_delayed_extent_op_cachep)
+               goto fail;
+
+       return 0;
+fail:
+       btrfs_delayed_ref_exit();
+       return -ENOMEM;
+}
index c9d7036..f75fcaf 100644 (file)
@@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root {
        /* total number of head nodes ready for processing */
        unsigned long num_heads_ready;
 
+       /*
+        * bumped when someone is making progress on the delayed
+        * refs, so that other procs know they are just adding to
+        * contention intead of helping
+        */
+       atomic_t procs_running_refs;
+       atomic_t ref_seq;
+       wait_queue_head_t wait;
+
        /*
         * set when the tree is flushing before a transaction commit,
         * used by the throttling code to decide if new updates need
@@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root {
        u64 run_delayed_start;
 };
 
+extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
+extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
+
+int btrfs_delayed_ref_init(void);
+void btrfs_delayed_ref_exit(void);
+
+static inline struct btrfs_delayed_extent_op *
+btrfs_alloc_delayed_extent_op(void)
+{
+       return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
+}
+
+static inline void
+btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
+{
+       if (op)
+               kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
+}
+
 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
 {
        WARN_ON(atomic_read(&ref->refs) == 0);
        if (atomic_dec_and_test(&ref->refs)) {
                WARN_ON(ref->in_tree);
-               kfree(ref);
+               switch (ref->type) {
+               case BTRFS_TREE_BLOCK_REF_KEY:
+               case BTRFS_SHARED_BLOCK_REF_KEY:
+                       kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+                       break;
+               case BTRFS_EXTENT_DATA_REF_KEY:
+               case BTRFS_SHARED_DATA_REF_KEY:
+                       kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+                       break;
+               case 0:
+                       kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
+                       break;
+               default:
+                       BUG();
+               }
        }
 }
 
@@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
                           struct btrfs_delayed_ref_head *head);
+static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
+{
+       mutex_unlock(&head->mutex);
+}
+
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
+void btrfs_release_ref_cluster(struct list_head *cluster);
 
 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
                            struct btrfs_delayed_ref_root *delayed_refs,
index 66dbc8d..7ba7b39 100644 (file)
@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
         * flush all outstanding I/O and inode extent mappings before the
         * copy operation is declared as being finished
         */
-       btrfs_start_delalloc_inodes(root, 0);
+       ret = btrfs_start_delalloc_inodes(root, 0);
+       if (ret) {
+               mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+               return ret;
+       }
        btrfs_wait_ordered_extents(root, 0);
 
        trans = btrfs_start_transaction(root, 0);
index a8f652d..02369a3 100644 (file)
@@ -46,6 +46,7 @@
 #include "check-integrity.h"
 #include "rcu-string.h"
 #include "dev-replace.h"
+#include "raid56.h"
 
 #ifdef CONFIG_X86
 #include <asm/cpufeature.h>
@@ -56,7 +57,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
 static void free_fs_root(struct btrfs_root *root);
 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                                    int read_only);
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+                                            struct btrfs_root *root);
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                                      struct btrfs_root *root);
@@ -420,7 +422,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 {
        struct extent_io_tree *tree;
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 found_start;
        struct extent_buffer *eb;
 
@@ -639,8 +641,15 @@ err:
                btree_readahead_hook(root, eb, eb->start, ret);
        }
 
-       if (ret)
+       if (ret) {
+               /*
+                * our io error hook is going to dec the io pages
+                * again, we have to make sure it has something
+                * to decrement
+                */
+               atomic_inc(&eb->io_pages);
                clear_extent_buffer_uptodate(eb);
+       }
        free_extent_buffer(eb);
 out:
        return ret;
@@ -654,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
        eb = (struct extent_buffer *)page->private;
        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
        eb->read_mirror = failed_mirror;
+       atomic_dec(&eb->io_pages);
        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
                btree_readahead_hook(root, eb, eb->start, -EIO);
        return -EIO;    /* we fixed nothing */
@@ -670,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
        end_io_wq->work.flags = 0;
 
        if (bio->bi_rw & REQ_WRITE) {
-               if (end_io_wq->metadata == 1)
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
                        btrfs_queue_worker(&fs_info->endio_meta_write_workers,
                                           &end_io_wq->work);
-               else if (end_io_wq->metadata == 2)
+               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
                        btrfs_queue_worker(&fs_info->endio_freespace_worker,
                                           &end_io_wq->work);
+               else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+                       btrfs_queue_worker(&fs_info->endio_raid56_workers,
+                                          &end_io_wq->work);
                else
                        btrfs_queue_worker(&fs_info->endio_write_workers,
                                           &end_io_wq->work);
        } else {
-               if (end_io_wq->metadata)
+               if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+                       btrfs_queue_worker(&fs_info->endio_raid56_workers,
+                                          &end_io_wq->work);
+               else if (end_io_wq->metadata)
                        btrfs_queue_worker(&fs_info->endio_meta_workers,
                                           &end_io_wq->work);
                else
@@ -695,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
  * 0 - if data
  * 1 - if normal metadta
  * 2 - if writing to the free space cache area
+ * 3 - raid parity work
  */
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        int metadata)
@@ -946,18 +963,20 @@ static int btree_writepages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
        struct extent_io_tree *tree;
+       struct btrfs_fs_info *fs_info;
+       int ret;
+
        tree = &BTRFS_I(mapping->host)->io_tree;
        if (wbc->sync_mode == WB_SYNC_NONE) {
-               struct btrfs_root *root = BTRFS_I(mapping->host)->root;
-               u64 num_dirty;
-               unsigned long thresh = 32 * 1024 * 1024;
 
                if (wbc->for_kupdate)
                        return 0;
 
+               fs_info = BTRFS_I(mapping->host)->root->fs_info;
                /* this is a bit racy, but that's ok */
-               num_dirty = root->fs_info->dirty_metadata_bytes;
-               if (num_dirty < thresh)
+               ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+                                            BTRFS_DIRTY_METADATA_THRESH);
+               if (ret < 0)
                        return 0;
        }
        return btree_write_cache_pages(mapping, wbc);
@@ -1125,24 +1144,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                      struct extent_buffer *buf)
 {
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
        if (btrfs_header_generation(buf) ==
-           root->fs_info->running_transaction->transid) {
+           fs_info->running_transaction->transid) {
                btrfs_assert_tree_locked(buf);
 
                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
-                       spin_lock(&root->fs_info->delalloc_lock);
-                       if (root->fs_info->dirty_metadata_bytes >= buf->len)
-                               root->fs_info->dirty_metadata_bytes -= buf->len;
-                       else {
-                               spin_unlock(&root->fs_info->delalloc_lock);
-                               btrfs_panic(root->fs_info, -EOVERFLOW,
-                                         "Can't clear %lu bytes from "
-                                         " dirty_mdatadata_bytes (%llu)",
-                                         buf->len,
-                                         root->fs_info->dirty_metadata_bytes);
-                       }
-                       spin_unlock(&root->fs_info->delalloc_lock);
-
+                       __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+                                            -buf->len,
+                                            fs_info->dirty_metadata_batch);
                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
                        btrfs_set_lock_blocking(buf);
                        clear_extent_buffer_dirty(buf);
@@ -1178,9 +1189,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
 
        INIT_LIST_HEAD(&root->dirty_list);
        INIT_LIST_HEAD(&root->root_list);
+       INIT_LIST_HEAD(&root->logged_list[0]);
+       INIT_LIST_HEAD(&root->logged_list[1]);
        spin_lock_init(&root->orphan_lock);
        spin_lock_init(&root->inode_lock);
        spin_lock_init(&root->accounting_lock);
+       spin_lock_init(&root->log_extents_lock[0]);
+       spin_lock_init(&root->log_extents_lock[1]);
        mutex_init(&root->objectid_mutex);
        mutex_init(&root->log_mutex);
        init_waitqueue_head(&root->log_writer_wait);
@@ -2004,10 +2019,24 @@ int open_ctree(struct super_block *sb,
                goto fail_srcu;
        }
 
+       ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
+       if (ret) {
+               err = ret;
+               goto fail_bdi;
+       }
+       fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+                                       (1 + ilog2(nr_cpu_ids));
+
+       ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
+       if (ret) {
+               err = ret;
+               goto fail_dirty_metadata_bytes;
+       }
+
        fs_info->btree_inode = new_inode(sb);
        if (!fs_info->btree_inode) {
                err = -ENOMEM;
-               goto fail_bdi;
+               goto fail_delalloc_bytes;
        }
 
        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -2017,7 +2046,6 @@ int open_ctree(struct super_block *sb,
        INIT_LIST_HEAD(&fs_info->dead_roots);
        INIT_LIST_HEAD(&fs_info->delayed_iputs);
        INIT_LIST_HEAD(&fs_info->delalloc_inodes);
-       INIT_LIST_HEAD(&fs_info->ordered_operations);
        INIT_LIST_HEAD(&fs_info->caching_block_groups);
        spin_lock_init(&fs_info->delalloc_lock);
        spin_lock_init(&fs_info->trans_lock);
@@ -2028,6 +2056,7 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->tree_mod_seq_lock);
        rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->reloc_mutex);
+       seqlock_init(&fs_info->profiles_lock);
 
        init_completion(&fs_info->kobj_unregister);
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2126,6 +2155,7 @@ int open_ctree(struct super_block *sb,
 
        spin_lock_init(&fs_info->block_group_cache_lock);
        fs_info->block_group_cache_tree = RB_ROOT;
+       fs_info->first_logical_byte = (u64)-1;
 
        extent_io_tree_init(&fs_info->freed_extents[0],
                             fs_info->btree_inode->i_mapping);
@@ -2165,6 +2195,12 @@ int open_ctree(struct super_block *sb,
        init_waitqueue_head(&fs_info->transaction_blocked_wait);
        init_waitqueue_head(&fs_info->async_submit_wait);
 
+       ret = btrfs_alloc_stripe_hash_table(fs_info);
+       if (ret) {
+               err = ret;
+               goto fail_alloc;
+       }
+
        __setup_root(4096, 4096, 4096, 4096, tree_root,
                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
@@ -2187,7 +2223,8 @@ int open_ctree(struct super_block *sb,
                goto fail_alloc;
 
        /* check FS state, whether FS is broken. */
-       fs_info->fs_state |= btrfs_super_flags(disk_super);
+       if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
+               set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 
        ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
        if (ret) {
@@ -2261,6 +2298,8 @@ int open_ctree(struct super_block *sb,
        leafsize = btrfs_super_leafsize(disk_super);
        sectorsize = btrfs_super_sectorsize(disk_super);
        stripesize = btrfs_super_stripesize(disk_super);
+       fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
+       fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
 
        /*
         * mixed block groups end up with duplicate but slightly offset
@@ -2332,6 +2371,12 @@ int open_ctree(struct super_block *sb,
        btrfs_init_workers(&fs_info->endio_meta_write_workers,
                           "endio-meta-write", fs_info->thread_pool_size,
                           &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->endio_raid56_workers,
+                          "endio-raid56", fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->rmw_workers,
+                          "rmw", fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
        btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
                           fs_info->thread_pool_size,
                           &fs_info->generic_worker);
@@ -2350,6 +2395,8 @@ int open_ctree(struct super_block *sb,
         */
        fs_info->endio_workers.idle_thresh = 4;
        fs_info->endio_meta_workers.idle_thresh = 4;
+       fs_info->endio_raid56_workers.idle_thresh = 4;
+       fs_info->rmw_workers.idle_thresh = 2;
 
        fs_info->endio_write_workers.idle_thresh = 2;
        fs_info->endio_meta_write_workers.idle_thresh = 2;
@@ -2366,6 +2413,8 @@ int open_ctree(struct super_block *sb,
        ret |= btrfs_start_workers(&fs_info->fixup_workers);
        ret |= btrfs_start_workers(&fs_info->endio_workers);
        ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+       ret |= btrfs_start_workers(&fs_info->rmw_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
        ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
        ret |= btrfs_start_workers(&fs_info->endio_write_workers);
        ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
@@ -2390,8 +2439,7 @@ int open_ctree(struct super_block *sb,
        sb->s_blocksize = sectorsize;
        sb->s_blocksize_bits = blksize_bits(sectorsize);
 
-       if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
-                   sizeof(disk_super->magic))) {
+       if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
                printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
                goto fail_sb_buffer;
        }
@@ -2694,13 +2742,13 @@ fail_cleaner:
         * kthreads
         */
        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
-       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 
 fail_block_groups:
        btrfs_free_block_groups(fs_info);
 
 fail_tree_roots:
        free_root_pointers(fs_info, 1);
+       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 
 fail_sb_buffer:
        btrfs_stop_workers(&fs_info->generic_worker);
@@ -2710,6 +2758,8 @@ fail_sb_buffer:
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
        btrfs_stop_workers(&fs_info->endio_meta_workers);
+       btrfs_stop_workers(&fs_info->endio_raid56_workers);
+       btrfs_stop_workers(&fs_info->rmw_workers);
        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -2721,13 +2771,17 @@ fail_alloc:
 fail_iput:
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
-       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
+fail_delalloc_bytes:
+       percpu_counter_destroy(&fs_info->delalloc_bytes);
+fail_dirty_metadata_bytes:
+       percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
 fail_bdi:
        bdi_destroy(&fs_info->bdi);
 fail_srcu:
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 fail:
+       btrfs_free_stripe_hash_table(fs_info);
        btrfs_close_devices(fs_info->fs_devices);
        return err;
 
@@ -2795,8 +2849,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
 
                super = (struct btrfs_super_block *)bh->b_data;
                if (btrfs_super_bytenr(super) != bytenr ||
-                   strncmp((char *)(&super->magic), BTRFS_MAGIC,
-                           sizeof(super->magic))) {
+                   super->magic != cpu_to_le64(BTRFS_MAGIC)) {
                        brelse(bh);
                        continue;
                }
@@ -3076,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
                                     ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
                                      == 0)))
                                        num_tolerated_disk_barrier_failures = 0;
-                               else if (num_tolerated_disk_barrier_failures > 1
-                                        &&
-                                        (flags & (BTRFS_BLOCK_GROUP_RAID1 |
-                                                  BTRFS_BLOCK_GROUP_RAID10)))
-                                       num_tolerated_disk_barrier_failures = 1;
+                               else if (num_tolerated_disk_barrier_failures > 1) {
+                                       if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+                                           BTRFS_BLOCK_GROUP_RAID5 |
+                                           BTRFS_BLOCK_GROUP_RAID10)) {
+                                               num_tolerated_disk_barrier_failures = 1;
+                                       } else if (flags &
+                                                  BTRFS_BLOCK_GROUP_RAID5) {
+                                               num_tolerated_disk_barrier_failures = 2;
+                                       }
+                               }
                        }
                }
                up_read(&sinfo->groups_sem);
@@ -3195,6 +3253,11 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
+       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               btrfs_free_log(NULL, root);
+               btrfs_free_log_root_tree(NULL, fs_info);
+       }
+
        __btrfs_remove_free_space_cache(root->free_ino_pinned);
        __btrfs_remove_free_space_cache(root->free_ino_ctl);
        free_fs_root(root);
@@ -3339,7 +3402,7 @@ int close_ctree(struct btrfs_root *root)
                        printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
        }
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                btrfs_error_commit_super(root);
 
        btrfs_put_block_group_cache(fs_info);
@@ -3352,9 +3415,9 @@ int close_ctree(struct btrfs_root *root)
 
        btrfs_free_qgroup_config(root->fs_info);
 
-       if (fs_info->delalloc_bytes) {
-               printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
-                      (unsigned long long)fs_info->delalloc_bytes);
+       if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
+               printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
+                      percpu_counter_sum(&fs_info->delalloc_bytes));
        }
 
        free_extent_buffer(fs_info->extent_root->node);
@@ -3384,6 +3447,8 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
        btrfs_stop_workers(&fs_info->endio_meta_workers);
+       btrfs_stop_workers(&fs_info->endio_raid56_workers);
+       btrfs_stop_workers(&fs_info->rmw_workers);
        btrfs_stop_workers(&fs_info->endio_meta_write_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -3401,9 +3466,13 @@ int close_ctree(struct btrfs_root *root)
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
+       percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
+       percpu_counter_destroy(&fs_info->delalloc_bytes);
        bdi_destroy(&fs_info->bdi);
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 
+       btrfs_free_stripe_hash_table(fs_info);
+
        return 0;
 }
 
@@ -3443,11 +3512,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
                        (unsigned long long)transid,
                        (unsigned long long)root->fs_info->generation);
        was_dirty = set_extent_buffer_dirty(buf);
-       if (!was_dirty) {
-               spin_lock(&root->fs_info->delalloc_lock);
-               root->fs_info->dirty_metadata_bytes += buf->len;
-               spin_unlock(&root->fs_info->delalloc_lock);
-       }
+       if (!was_dirty)
+               __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+                                    buf->len,
+                                    root->fs_info->dirty_metadata_batch);
 }
 
 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
@@ -3457,8 +3525,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
         * looks as though older kernels can get into trouble with
         * this code, they end up stuck in balance_dirty_pages forever
         */
-       u64 num_dirty;
-       unsigned long thresh = 32 * 1024 * 1024;
+       int ret;
 
        if (current->flags & PF_MEMALLOC)
                return;
@@ -3466,9 +3533,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
        if (flush_delayed)
                btrfs_balance_delayed_items(root);
 
-       num_dirty = root->fs_info->dirty_metadata_bytes;
-
-       if (num_dirty > thresh) {
+       ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+                                    BTRFS_DIRTY_METADATA_THRESH);
+       if (ret > 0) {
                balance_dirty_pages_ratelimited(
                                   root->fs_info->btree_inode->i_mapping);
        }
@@ -3518,7 +3585,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
        btrfs_cleanup_transaction(root);
 }
 
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+                                            struct btrfs_root *root)
 {
        struct btrfs_inode *btrfs_inode;
        struct list_head splice;
@@ -3528,7 +3596,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
        mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
 
-       list_splice_init(&root->fs_info->ordered_operations, &splice);
+       list_splice_init(&t->ordered_operations, &splice);
        while (!list_empty(&splice)) {
                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
                                         ordered_operations);
@@ -3544,35 +3612,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
 
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
 {
-       struct list_head splice;
        struct btrfs_ordered_extent *ordered;
-       struct inode *inode;
-
-       INIT_LIST_HEAD(&splice);
 
        spin_lock(&root->fs_info->ordered_extent_lock);
-
-       list_splice_init(&root->fs_info->ordered_extents, &splice);
-       while (!list_empty(&splice)) {
-               ordered = list_entry(splice.next, struct btrfs_ordered_extent,
-                                    root_extent_list);
-
-               list_del_init(&ordered->root_extent_list);
-               atomic_inc(&ordered->refs);
-
-               /* the inode may be getting freed (in sys_unlink path). */
-               inode = igrab(ordered->inode);
-
-               spin_unlock(&root->fs_info->ordered_extent_lock);
-               if (inode)
-                       iput(inode);
-
-               atomic_set(&ordered->refs, 1);
-               btrfs_put_ordered_extent(ordered);
-
-               spin_lock(&root->fs_info->ordered_extent_lock);
-       }
-
+       /*
+        * This will just short circuit the ordered completion stuff which will
+        * make sure the ordered extent gets properly cleaned up.
+        */
+       list_for_each_entry(ordered, &root->fs_info->ordered_extents,
+                           root_extent_list)
+               set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
        spin_unlock(&root->fs_info->ordered_extent_lock);
 }
 
@@ -3594,11 +3643,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
        }
 
        while ((node = rb_first(&delayed_refs->root)) != NULL) {
-               ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+               struct btrfs_delayed_ref_head *head = NULL;
 
+               ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
                atomic_set(&ref->refs, 1);
                if (btrfs_delayed_ref_is_head(ref)) {
-                       struct btrfs_delayed_ref_head *head;
 
                        head = btrfs_delayed_node_to_head(ref);
                        if (!mutex_trylock(&head->mutex)) {
@@ -3614,16 +3663,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                                continue;
                        }
 
-                       kfree(head->extent_op);
+                       btrfs_free_delayed_extent_op(head->extent_op);
                        delayed_refs->num_heads--;
                        if (list_empty(&head->cluster))
                                delayed_refs->num_heads_ready--;
                        list_del_init(&head->cluster);
                }
+
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-
+               if (head)
+                       mutex_unlock(&head->mutex);
                spin_unlock(&delayed_refs->lock);
                btrfs_put_delayed_ref(ref);
 
@@ -3671,6 +3722,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
                                    delalloc_inodes);
 
                list_del_init(&btrfs_inode->delalloc_inodes);
+               clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                         &btrfs_inode->runtime_flags);
 
                btrfs_invalidate_inodes(btrfs_inode->root);
        }
@@ -3823,10 +3876,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
 
        while (!list_empty(&list)) {
                t = list_entry(list.next, struct btrfs_transaction, list);
-               if (!t)
-                       break;
 
-               btrfs_destroy_ordered_operations(root);
+               btrfs_destroy_ordered_operations(t, root);
 
                btrfs_destroy_ordered_extents(root);
 
index 305c33e..034d7dc 100644 (file)
 #define BTRFS_SUPER_MIRROR_MAX  3
 #define BTRFS_SUPER_MIRROR_SHIFT 12
 
+enum {
+       BTRFS_WQ_ENDIO_DATA = 0,
+       BTRFS_WQ_ENDIO_METADATA = 1,
+       BTRFS_WQ_ENDIO_FREE_SPACE = 2,
+       BTRFS_WQ_ENDIO_RAID56 = 3,
+};
+
 static inline u64 btrfs_sb_offset(int mirror)
 {
        u64 start = 16 * 1024;
index 614f34a..81ee29e 100644 (file)
@@ -22,10 +22,10 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
 
        if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
                *max_len = BTRFS_FID_SIZE_CONNECTABLE;
-               return 255;
+               return FILEID_INVALID;
        } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
                *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
-               return 255;
+               return FILEID_INVALID;
        }
 
        len  = BTRFS_FID_SIZE_NON_CONNECTABLE;
index 1e59ed5..3e074da 100644 (file)
@@ -31,6 +31,7 @@
 #include "print-tree.h"
 #include "transaction.h"
 #include "volumes.h"
+#include "raid56.h"
 #include "locking.h"
 #include "free-space-cache.h"
 #include "math.h"
@@ -72,8 +73,7 @@ enum {
        RESERVE_ALLOC_NO_ACCOUNT = 2,
 };
 
-static int update_block_group(struct btrfs_trans_handle *trans,
-                             struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
                              u64 bytenr, u64 num_bytes, int alloc);
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -103,6 +103,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                                       u64 num_bytes, int reserve);
+static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
+                              u64 num_bytes);
 
 static noinline int
 block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -162,6 +164,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
        rb_link_node(&block_group->cache_node, parent, p);
        rb_insert_color(&block_group->cache_node,
                        &info->block_group_cache_tree);
+
+       if (info->first_logical_byte > block_group->key.objectid)
+               info->first_logical_byte = block_group->key.objectid;
+
        spin_unlock(&info->block_group_cache_lock);
 
        return 0;
@@ -203,8 +209,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
                        break;
                }
        }
-       if (ret)
+       if (ret) {
                btrfs_get_block_group(ret);
+               if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
+                       info->first_logical_byte = ret->key.objectid;
+       }
        spin_unlock(&info->block_group_cache_lock);
 
        return ret;
@@ -468,8 +477,6 @@ out:
 }
 
 static int cache_block_group(struct btrfs_block_group_cache *cache,
-                            struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root,
                             int load_cache_only)
 {
        DEFINE_WAIT(wait);
@@ -527,12 +534,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        cache->cached = BTRFS_CACHE_FAST;
        spin_unlock(&cache->lock);
 
-       /*
-        * We can't do the read from on-disk cache during a commit since we need
-        * to have the normal tree locking.  Also if we are currently trying to
-        * allocate blocks for the tree root we can't do the fast caching since
-        * we likely hold important locks.
-        */
        if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
                ret = load_free_space_cache(fs_info, cache);
 
@@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                *actual_bytes = discarded_bytes;
 
 
+       if (ret == -EOPNOTSUPP)
+               ret = 0;
        return ret;
 }
 
@@ -2143,7 +2146,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
                                                      node->num_bytes);
                        }
                }
-               mutex_unlock(&head->mutex);
                return ret;
        }
 
@@ -2258,7 +2260,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                         * process of being added. Don't run this ref yet.
                         */
                        list_del_init(&locked_ref->cluster);
-                       mutex_unlock(&locked_ref->mutex);
+                       btrfs_delayed_ref_unlock(locked_ref);
                        locked_ref = NULL;
                        delayed_refs->num_heads_ready++;
                        spin_unlock(&delayed_refs->lock);
@@ -2285,7 +2287,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                        ref = &locked_ref->node;
 
                        if (extent_op && must_insert_reserved) {
-                               kfree(extent_op);
+                               btrfs_free_delayed_extent_op(extent_op);
                                extent_op = NULL;
                        }
 
@@ -2294,28 +2296,25 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
 
                                ret = run_delayed_extent_op(trans, root,
                                                            ref, extent_op);
-                               kfree(extent_op);
+                               btrfs_free_delayed_extent_op(extent_op);
 
                                if (ret) {
-                                       list_del_init(&locked_ref->cluster);
-                                       mutex_unlock(&locked_ref->mutex);
-
-                                       printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+                                       printk(KERN_DEBUG
+                                              "btrfs: run_delayed_extent_op "
+                                              "returned %d\n", ret);
                                        spin_lock(&delayed_refs->lock);
+                                       btrfs_delayed_ref_unlock(locked_ref);
                                        return ret;
                                }
 
                                goto next;
                        }
-
-                       list_del_init(&locked_ref->cluster);
-                       locked_ref = NULL;
                }
 
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-               if (locked_ref) {
+               if (!btrfs_delayed_ref_is_head(ref)) {
                        /*
                         * when we play the delayed ref, also correct the
                         * ref_mod on head
@@ -2337,20 +2336,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
                                          must_insert_reserved);
 
-               btrfs_put_delayed_ref(ref);
-               kfree(extent_op);
-               count++;
-
+               btrfs_free_delayed_extent_op(extent_op);
                if (ret) {
-                       if (locked_ref) {
-                               list_del_init(&locked_ref->cluster);
-                               mutex_unlock(&locked_ref->mutex);
-                       }
-                       printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       btrfs_put_delayed_ref(ref);
+                       printk(KERN_DEBUG
+                              "btrfs: run_one_delayed_ref returned %d\n", ret);
                        spin_lock(&delayed_refs->lock);
                        return ret;
                }
 
+               /*
+                * If this node is a head, that means all the refs in this head
+                * have been dealt with, and we will pick the next head to deal
+                * with, so we must unlock the head and drop it from the cluster
+                * list before we release it.
+                */
+               if (btrfs_delayed_ref_is_head(ref)) {
+                       list_del_init(&locked_ref->cluster);
+                       btrfs_delayed_ref_unlock(locked_ref);
+                       locked_ref = NULL;
+               }
+               btrfs_put_delayed_ref(ref);
+               count++;
 next:
                cond_resched();
                spin_lock(&delayed_refs->lock);
@@ -2435,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
+                     int count)
+{
+       int val = atomic_read(&delayed_refs->ref_seq);
+
+       if (val < seq || val >= seq + count)
+               return 1;
+       return 0;
+}
+
 /*
  * this starts processing the delayed reference count updates and
  * extent insertions we have queued up so far.  count can be
@@ -2469,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
        delayed_refs = &trans->transaction->delayed_refs;
        INIT_LIST_HEAD(&cluster);
+       if (count == 0) {
+               count = delayed_refs->num_entries * 2;
+               run_most = 1;
+       }
+
+       if (!run_all && !run_most) {
+               int old;
+               int seq = atomic_read(&delayed_refs->ref_seq);
+
+progress:
+               old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+               if (old) {
+                       DEFINE_WAIT(__wait);
+                       if (delayed_refs->num_entries < 16348)
+                               return 0;
+
+                       prepare_to_wait(&delayed_refs->wait, &__wait,
+                                       TASK_UNINTERRUPTIBLE);
+
+                       old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+                       if (old) {
+                               schedule();
+                               finish_wait(&delayed_refs->wait, &__wait);
+
+                               if (!refs_newer(delayed_refs, seq, 256))
+                                       goto progress;
+                               else
+                                       return 0;
+                       } else {
+                               finish_wait(&delayed_refs->wait, &__wait);
+                               goto again;
+                       }
+               }
+
+       } else {
+               atomic_inc(&delayed_refs->procs_running_refs);
+       }
+
 again:
        loops = 0;
        spin_lock(&delayed_refs->lock);
@@ -2477,10 +2533,6 @@ again:
        delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
 
-       if (count == 0) {
-               count = delayed_refs->num_entries * 2;
-               run_most = 1;
-       }
        while (1) {
                if (!(run_all || run_most) &&
                    delayed_refs->num_heads_ready < 64)
@@ -2500,11 +2552,15 @@ again:
 
                ret = run_clustered_refs(trans, root, &cluster);
                if (ret < 0) {
+                       btrfs_release_ref_cluster(&cluster);
                        spin_unlock(&delayed_refs->lock);
                        btrfs_abort_transaction(trans, root, ret);
+                       atomic_dec(&delayed_refs->procs_running_refs);
                        return ret;
                }
 
+               atomic_add(ret, &delayed_refs->ref_seq);
+
                count -= min_t(unsigned long, ret, count);
 
                if (count == 0)
@@ -2573,6 +2629,11 @@ again:
                goto again;
        }
 out:
+       atomic_dec(&delayed_refs->procs_running_refs);
+       smp_mb();
+       if (waitqueue_active(&delayed_refs->wait))
+               wake_up(&delayed_refs->wait);
+
        spin_unlock(&delayed_refs->lock);
        assert_qgroups_uptodate(trans);
        return 0;
@@ -2586,7 +2647,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_extent_op *extent_op;
        int ret;
 
-       extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+       extent_op = btrfs_alloc_delayed_extent_op();
        if (!extent_op)
                return -ENOMEM;
 
@@ -2598,7 +2659,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
                                          num_bytes, extent_op);
        if (ret)
-               kfree(extent_op);
+               btrfs_free_delayed_extent_op(extent_op);
        return ret;
 }
 
@@ -3223,12 +3284,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
        u64 extra_flags = chunk_to_extended(flags) &
                                BTRFS_EXTENDED_PROFILE_MASK;
 
+       write_seqlock(&fs_info->profiles_lock);
        if (flags & BTRFS_BLOCK_GROUP_DATA)
                fs_info->avail_data_alloc_bits |= extra_flags;
        if (flags & BTRFS_BLOCK_GROUP_METADATA)
                fs_info->avail_metadata_alloc_bits |= extra_flags;
        if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
                fs_info->avail_system_alloc_bits |= extra_flags;
+       write_sequnlock(&fs_info->profiles_lock);
 }
 
 /*
@@ -3276,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        u64 num_devices = root->fs_info->fs_devices->rw_devices +
                root->fs_info->fs_devices->missing_devices;
        u64 target;
+       u64 tmp;
 
        /*
         * see if restripe for this chunk_type is in progress, if so
@@ -3292,40 +3356,48 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        }
        spin_unlock(&root->fs_info->balance_lock);
 
+       /* First, mask out the RAID levels which aren't possible */
        if (num_devices == 1)
-               flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
+               flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
+                          BTRFS_BLOCK_GROUP_RAID5);
+       if (num_devices < 3)
+               flags &= ~BTRFS_BLOCK_GROUP_RAID6;
        if (num_devices < 4)
                flags &= ~BTRFS_BLOCK_GROUP_RAID10;
 
-       if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
-           (flags & (BTRFS_BLOCK_GROUP_RAID1 |
-                     BTRFS_BLOCK_GROUP_RAID10))) {
-               flags &= ~BTRFS_BLOCK_GROUP_DUP;
-       }
-
-       if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
-           (flags & BTRFS_BLOCK_GROUP_RAID10)) {
-               flags &= ~BTRFS_BLOCK_GROUP_RAID1;
-       }
+       tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
+                      BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
+                      BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
+       flags &= ~tmp;
 
-       if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
-           ((flags & BTRFS_BLOCK_GROUP_RAID1) |
-            (flags & BTRFS_BLOCK_GROUP_RAID10) |
-            (flags & BTRFS_BLOCK_GROUP_DUP))) {
-               flags &= ~BTRFS_BLOCK_GROUP_RAID0;
-       }
+       if (tmp & BTRFS_BLOCK_GROUP_RAID6)
+               tmp = BTRFS_BLOCK_GROUP_RAID6;
+       else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
+               tmp = BTRFS_BLOCK_GROUP_RAID5;
+       else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
+               tmp = BTRFS_BLOCK_GROUP_RAID10;
+       else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
+               tmp = BTRFS_BLOCK_GROUP_RAID1;
+       else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
+               tmp = BTRFS_BLOCK_GROUP_RAID0;
 
-       return extended_to_chunk(flags);
+       return extended_to_chunk(flags | tmp);
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
 {
-       if (flags & BTRFS_BLOCK_GROUP_DATA)
-               flags |= root->fs_info->avail_data_alloc_bits;
-       else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-               flags |= root->fs_info->avail_system_alloc_bits;
-       else if (flags & BTRFS_BLOCK_GROUP_METADATA)
-               flags |= root->fs_info->avail_metadata_alloc_bits;
+       unsigned seq;
+
+       do {
+               seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+               if (flags & BTRFS_BLOCK_GROUP_DATA)
+                       flags |= root->fs_info->avail_data_alloc_bits;
+               else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+                       flags |= root->fs_info->avail_system_alloc_bits;
+               else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+                       flags |= root->fs_info->avail_metadata_alloc_bits;
+       } while (read_seqretry(&root->fs_info->profiles_lock, seq));
 
        return btrfs_reduce_alloc_profile(root, flags);
 }
@@ -3333,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
 {
        u64 flags;
+       u64 ret;
 
        if (data)
                flags = BTRFS_BLOCK_GROUP_DATA;
@@ -3341,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
        else
                flags = BTRFS_BLOCK_GROUP_METADATA;
 
-       return get_alloc_profile(root, flags);
+       ret = get_alloc_profile(root, flags);
+       return ret;
 }
 
 /*
@@ -3357,7 +3431,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
        int ret = 0, committed = 0, alloc_chunk = 1;
 
        /* make sure bytes are sectorsize aligned */
-       bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+       bytes = ALIGN(bytes, root->sectorsize);
 
        if (root == root->fs_info->tree_root ||
            BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
@@ -3452,7 +3526,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
        struct btrfs_space_info *data_sinfo;
 
        /* make sure bytes are sectorsize aligned */
-       bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+       bytes = ALIGN(bytes, root->sectorsize);
 
        data_sinfo = root->fs_info->data_sinfo;
        spin_lock(&data_sinfo->lock);
@@ -3516,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
 {
        u64 num_dev;
 
-       if (type & BTRFS_BLOCK_GROUP_RAID10 ||
-           type & BTRFS_BLOCK_GROUP_RAID0)
+       if (type & (BTRFS_BLOCK_GROUP_RAID10 |
+                   BTRFS_BLOCK_GROUP_RAID0 |
+                   BTRFS_BLOCK_GROUP_RAID5 |
+                   BTRFS_BLOCK_GROUP_RAID6))
                num_dev = root->fs_info->fs_devices->rw_devices;
        else if (type & BTRFS_BLOCK_GROUP_RAID1)
                num_dev = 2;
@@ -3564,6 +3640,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        int wait_for_alloc = 0;
        int ret = 0;
 
+       /* Don't re-enter if we're already allocating a chunk */
+       if (trans->allocating_chunk)
+               return -ENOSPC;
+
        space_info = __find_space_info(extent_root->fs_info, flags);
        if (!space_info) {
                ret = update_space_info(extent_root->fs_info, flags,
@@ -3606,6 +3686,8 @@ again:
                goto again;
        }
 
+       trans->allocating_chunk = true;
+
        /*
         * If we have mixed data/metadata chunks we want to make sure we keep
         * allocating mixed chunks instead of individual chunks.
@@ -3632,19 +3714,20 @@ again:
        check_system_chunk(trans, extent_root, flags);
 
        ret = btrfs_alloc_chunk(trans, extent_root, flags);
-       if (ret < 0 && ret != -ENOSPC)
-               goto out;
+       trans->allocating_chunk = false;
 
        spin_lock(&space_info->lock);
+       if (ret < 0 && ret != -ENOSPC)
+               goto out;
        if (ret)
                space_info->full = 1;
        else
                ret = 1;
 
        space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+out:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
-out:
        mutex_unlock(&fs_info->chunk_mutex);
        return ret;
 }
@@ -3653,13 +3736,31 @@ static int can_overcommit(struct btrfs_root *root,
                          struct btrfs_space_info *space_info, u64 bytes,
                          enum btrfs_reserve_flush_enum flush)
 {
+       struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
        u64 profile = btrfs_get_alloc_profile(root, 0);
+       u64 rsv_size = 0;
        u64 avail;
        u64 used;
+       u64 to_add;
 
        used = space_info->bytes_used + space_info->bytes_reserved +
-               space_info->bytes_pinned + space_info->bytes_readonly +
-               space_info->bytes_may_use;
+               space_info->bytes_pinned + space_info->bytes_readonly;
+
+       spin_lock(&global_rsv->lock);
+       rsv_size = global_rsv->size;
+       spin_unlock(&global_rsv->lock);
+
+       /*
+        * We only want to allow over committing if we have lots of actual space
+        * free, but if we don't have enough space to handle the global reserve
+        * space then we could end up having a real enospc problem when trying
+        * to allocate a chunk or some other such important allocation.
+        */
+       rsv_size <<= 1;
+       if (used + rsv_size >= space_info->total_bytes)
+               return 0;
+
+       used += space_info->bytes_may_use;
 
        spin_lock(&root->fs_info->free_chunk_lock);
        avail = root->fs_info->free_chunk_space;
@@ -3667,40 +3768,58 @@ static int can_overcommit(struct btrfs_root *root,
 
        /*
         * If we have dup, raid1 or raid10 then only half of the free
-        * space is actually useable.
+        * space is actually useable.  For raid56, the space info used
+        * doesn't include the parity drive, so we don't have to
+        * change the math
         */
        if (profile & (BTRFS_BLOCK_GROUP_DUP |
                       BTRFS_BLOCK_GROUP_RAID1 |
                       BTRFS_BLOCK_GROUP_RAID10))
                avail >>= 1;
 
+       to_add = space_info->total_bytes;
+
        /*
         * If we aren't flushing all things, let us overcommit up to
         * 1/2th of the space. If we can flush, don't let us overcommit
         * too much, let it overcommit up to 1/8 of the space.
         */
        if (flush == BTRFS_RESERVE_FLUSH_ALL)
-               avail >>= 3;
+               to_add >>= 3;
        else
-               avail >>= 1;
+               to_add >>= 1;
+
+       /*
+        * Limit the overcommit to the amount of free space we could possibly
+        * allocate for chunks.
+        */
+       to_add = min(avail, to_add);
 
-       if (used + bytes < space_info->total_bytes + avail)
+       if (used + bytes < space_info->total_bytes + to_add)
                return 1;
        return 0;
 }
 
-static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
-                                              unsigned long nr_pages,
-                                              enum wb_reason reason)
+void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+                                 unsigned long nr_pages)
 {
-       if (!writeback_in_progress(sb->s_bdi) &&
-           down_read_trylock(&sb->s_umount)) {
-               writeback_inodes_sb_nr(sb, nr_pages, reason);
-               up_read(&sb->s_umount);
-               return 1;
-       }
+       struct super_block *sb = root->fs_info->sb;
+       int started;
 
-       return 0;
+       /* If we can not start writeback, just sync all the delalloc file. */
+       started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
+                                                     WB_REASON_FS_FREE_SPACE);
+       if (!started) {
+               /*
+                * We needn't worry the filesystem going from r/w to r/o though
+                * we don't acquire ->s_umount mutex, because the filesystem
+                * should guarantee the delalloc inodes list be empty after
+                * the filesystem is readonly(all dirty pages are written to
+                * the disk).
+                */
+               btrfs_start_delalloc_inodes(root, 0);
+               btrfs_wait_ordered_extents(root, 0);
+       }
 }
 
 /*
@@ -3724,7 +3843,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
        space_info = block_rsv->space_info;
 
        smp_mb();
-       delalloc_bytes = root->fs_info->delalloc_bytes;
+       delalloc_bytes = percpu_counter_sum_positive(
+                                               &root->fs_info->delalloc_bytes);
        if (delalloc_bytes == 0) {
                if (trans)
                        return;
@@ -3735,10 +3855,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
        while (delalloc_bytes && loops < 3) {
                max_reclaim = min(delalloc_bytes, to_reclaim);
                nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
-               writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb,
-                                                   nr_pages,
-                                                   WB_REASON_FS_FREE_SPACE);
-
+               btrfs_writeback_inodes_sb_nr(root, nr_pages);
                /*
                 * We need to wait for the async pages to actually start before
                 * we do anything.
@@ -3766,7 +3883,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
                                break;
                }
                smp_mb();
-               delalloc_bytes = root->fs_info->delalloc_bytes;
+               delalloc_bytes = percpu_counter_sum_positive(
+                                               &root->fs_info->delalloc_bytes);
        }
 }
 
@@ -4030,6 +4148,15 @@ again:
                goto again;
 
 out:
+       if (ret == -ENOSPC &&
+           unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
+               struct btrfs_block_rsv *global_rsv =
+                       &root->fs_info->global_block_rsv;
+
+               if (block_rsv != global_rsv &&
+                   !block_rsv_use_bytes(global_rsv, orig_bytes))
+                       ret = 0;
+       }
        if (flushing) {
                spin_lock(&space_info->lock);
                space_info->flush = 0;
@@ -4416,19 +4543,60 @@ void btrfs_orphan_release_metadata(struct inode *inode)
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
-                               struct btrfs_pending_snapshot *pending)
+/*
+ * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
+ * root: the root of the parent directory
+ * rsv: block reservation
+ * items: the number of items that we need do reservation
+ * qgroup_reserved: used to return the reserved size in qgroup
+ *
+ * This function is used to reserve the space for snapshot/subvolume
+ * creation and deletion. Those operations are different with the
+ * common file/directory operations, they change two fs/file trees
+ * and root tree, the number of items that the qgroup reserves is
+ * different with the free space reservation. So we can not use
+ * the space reseravtion mechanism in start_transaction().
+ */
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+                                    struct btrfs_block_rsv *rsv,
+                                    int items,
+                                    u64 *qgroup_reserved)
 {
-       struct btrfs_root *root = pending->root;
-       struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
-       struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
-       /*
-        * two for root back/forward refs, two for directory entries,
-        * one for root of the snapshot and one for parent inode.
-        */
-       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
-       dst_rsv->space_info = src_rsv->space_info;
-       return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
+       u64 num_bytes;
+       int ret;
+
+       if (root->fs_info->quota_enabled) {
+               /* One for parent inode, two for dir entries */
+               num_bytes = 3 * root->leafsize;
+               ret = btrfs_qgroup_reserve(root, num_bytes);
+               if (ret)
+                       return ret;
+       } else {
+               num_bytes = 0;
+       }
+
+       *qgroup_reserved = num_bytes;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, items);
+       rsv->space_info = __find_space_info(root->fs_info,
+                                           BTRFS_BLOCK_GROUP_METADATA);
+       ret = btrfs_block_rsv_add(root, rsv, num_bytes,
+                                 BTRFS_RESERVE_FLUSH_ALL);
+       if (ret) {
+               if (*qgroup_reserved)
+                       btrfs_qgroup_free(root, *qgroup_reserved);
+       }
+
+       return ret;
+}
+
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+                                     struct btrfs_block_rsv *rsv,
+                                     u64 qgroup_reserved)
+{
+       btrfs_block_rsv_release(root, rsv, (u64)-1);
+       if (qgroup_reserved)
+               btrfs_qgroup_free(root, qgroup_reserved);
 }
 
 /**
@@ -4536,6 +4704,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
        int ret = 0;
        bool delalloc_lock = true;
+       u64 to_free = 0;
+       unsigned dropped;
 
        /* If we are a free space inode we need to not flush since we will be in
         * the middle of a transaction commit.  We also don't need the delalloc
@@ -4579,54 +4749,19 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
-       if (root->fs_info->quota_enabled)
+       if (root->fs_info->quota_enabled) {
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
+               if (ret)
+                       goto out_fail;
+       }
 
-       /*
-        * ret != 0 here means the qgroup reservation failed, we go straight to
-        * the shared error handling then.
-        */
-       if (ret == 0)
-               ret = reserve_metadata_bytes(root, block_rsv,
-                                            to_reserve, flush);
-
-       if (ret) {
-               u64 to_free = 0;
-               unsigned dropped;
-
-               spin_lock(&BTRFS_I(inode)->lock);
-               dropped = drop_outstanding_extent(inode);
-               /*
-                * If the inodes csum_bytes is the same as the original
-                * csum_bytes then we know we haven't raced with any free()ers
-                * so we can just reduce our inodes csum bytes and carry on.
-                * Otherwise we have to do the normal free thing to account for
-                * the case that the free side didn't free up its reserve
-                * because of this outstanding reservation.
-                */
-               if (BTRFS_I(inode)->csum_bytes == csum_bytes)
-                       calc_csum_metadata_size(inode, num_bytes, 0);
-               else
-                       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-               spin_unlock(&BTRFS_I(inode)->lock);
-               if (dropped)
-                       to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
-               if (to_free) {
-                       btrfs_block_rsv_release(root, block_rsv, to_free);
-                       trace_btrfs_space_reservation(root->fs_info,
-                                                     "delalloc",
-                                                     btrfs_ino(inode),
-                                                     to_free, 0);
-               }
-               if (root->fs_info->quota_enabled) {
+       ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+       if (unlikely(ret)) {
+               if (root->fs_info->quota_enabled)
                        btrfs_qgroup_free(root, num_bytes +
                                                nr_extents * root->leafsize);
-               }
-               if (delalloc_lock)
-                       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
-               return ret;
+               goto out_fail;
        }
 
        spin_lock(&BTRFS_I(inode)->lock);
@@ -4647,6 +4782,34 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
+
+out_fail:
+       spin_lock(&BTRFS_I(inode)->lock);
+       dropped = drop_outstanding_extent(inode);
+       /*
+        * If the inodes csum_bytes is the same as the original
+        * csum_bytes then we know we haven't raced with any free()ers
+        * so we can just reduce our inodes csum bytes and carry on.
+        * Otherwise we have to do the normal free thing to account for
+        * the case that the free side didn't free up its reserve
+        * because of this outstanding reservation.
+        */
+       if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+               calc_csum_metadata_size(inode, num_bytes, 0);
+       else
+               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+       spin_unlock(&BTRFS_I(inode)->lock);
+       if (dropped)
+               to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
+       if (to_free) {
+               btrfs_block_rsv_release(root, block_rsv, to_free);
+               trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                             btrfs_ino(inode), to_free, 0);
+       }
+       if (delalloc_lock)
+               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+       return ret;
 }
 
 /**
@@ -4668,7 +4831,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
        spin_lock(&BTRFS_I(inode)->lock);
        dropped = drop_outstanding_extent(inode);
 
-       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+       if (num_bytes)
+               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
        spin_unlock(&BTRFS_I(inode)->lock);
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -4735,8 +4899,7 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
        btrfs_free_reserved_data_space(inode, num_bytes);
 }
 
-static int update_block_group(struct btrfs_trans_handle *trans,
-                             struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
                              u64 bytenr, u64 num_bytes, int alloc)
 {
        struct btrfs_block_group_cache *cache = NULL;
@@ -4773,7 +4936,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                 * space back to the block group, otherwise we will leak space.
                 */
                if (!alloc && cache->cached == BTRFS_CACHE_NO)
-                       cache_block_group(cache, trans, NULL, 1);
+                       cache_block_group(cache, 1);
 
                byte_in_group = bytenr - cache->key.objectid;
                WARN_ON(byte_in_group > cache->key.offset);
@@ -4823,6 +4986,13 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
        struct btrfs_block_group_cache *cache;
        u64 bytenr;
 
+       spin_lock(&root->fs_info->block_group_cache_lock);
+       bytenr = root->fs_info->first_logical_byte;
+       spin_unlock(&root->fs_info->block_group_cache_lock);
+
+       if (bytenr < (u64)-1)
+               return bytenr;
+
        cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
        if (!cache)
                return 0;
@@ -4873,8 +5043,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
 /*
  * this function must be called within transaction
  */
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
                                    u64 bytenr, u64 num_bytes)
 {
        struct btrfs_block_group_cache *cache;
@@ -4888,7 +5057,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
         * to one because the slow code to read in the free extents does check
         * the pinned extents.
         */
-       cache_block_group(cache, trans, root, 1);
+       cache_block_group(cache, 1);
 
        pin_down_extent(root, cache, bytenr, num_bytes, 0);
 
@@ -5285,7 +5454,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                        }
                }
 
-               ret = update_block_group(trans, root, bytenr, num_bytes, 0);
+               ret = update_block_group(root, bytenr, num_bytes, 0);
                if (ret) {
                        btrfs_abort_transaction(trans, extent_root, ret);
                        goto out;
@@ -5330,7 +5499,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        if (head->extent_op) {
                if (!head->must_insert_reserved)
                        goto out;
-               kfree(head->extent_op);
+               btrfs_free_delayed_extent_op(head->extent_op);
                head->extent_op = NULL;
        }
 
@@ -5453,10 +5622,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        return ret;
 }
 
-static u64 stripe_align(struct btrfs_root *root, u64 val)
+static u64 stripe_align(struct btrfs_root *root,
+                       struct btrfs_block_group_cache *cache,
+                       u64 val, u64 num_bytes)
 {
-       u64 mask = ((u64)root->stripesize - 1);
-       u64 ret = (val + mask) & ~mask;
+       u64 ret = ALIGN(val, root->stripesize);
        return ret;
 }
 
@@ -5476,7 +5646,6 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
                                u64 num_bytes)
 {
        struct btrfs_caching_control *caching_ctl;
-       DEFINE_WAIT(wait);
 
        caching_ctl = get_caching_control(cache);
        if (!caching_ctl)
@@ -5493,7 +5662,6 @@ static noinline int
 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
 {
        struct btrfs_caching_control *caching_ctl;
-       DEFINE_WAIT(wait);
 
        caching_ctl = get_caching_control(cache);
        if (!caching_ctl)
@@ -5507,20 +5675,20 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
 
 int __get_raid_index(u64 flags)
 {
-       int index;
-
        if (flags & BTRFS_BLOCK_GROUP_RAID10)
-               index = 0;
+               return BTRFS_RAID_RAID10;
        else if (flags & BTRFS_BLOCK_GROUP_RAID1)
-               index = 1;
+               return BTRFS_RAID_RAID1;
        else if (flags & BTRFS_BLOCK_GROUP_DUP)
-               index = 2;
+               return BTRFS_RAID_DUP;
        else if (flags & BTRFS_BLOCK_GROUP_RAID0)
-               index = 3;
-       else
-               index = 4;
+               return BTRFS_RAID_RAID0;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID5)
+               return BTRFS_RAID_RAID5;
+       else if (flags & BTRFS_BLOCK_GROUP_RAID6)
+               return BTRFS_RAID_RAID6;
 
-       return index;
+       return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
 }
 
 static int get_block_group_index(struct btrfs_block_group_cache *cache)
@@ -5663,6 +5831,8 @@ search:
                if (!block_group_bits(block_group, data)) {
                    u64 extra = BTRFS_BLOCK_GROUP_DUP |
                                BTRFS_BLOCK_GROUP_RAID1 |
+                               BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6 |
                                BTRFS_BLOCK_GROUP_RAID10;
 
                        /*
@@ -5678,8 +5848,7 @@ have_block_group:
                cached = block_group_cache_done(block_group);
                if (unlikely(!cached)) {
                        found_uncached_bg = true;
-                       ret = cache_block_group(block_group, trans,
-                                               orig_root, 0);
+                       ret = cache_block_group(block_group, 0);
                        BUG_ON(ret < 0);
                        ret = 0;
                }
@@ -5692,6 +5861,7 @@ have_block_group:
                 * lets look there
                 */
                if (last_ptr) {
+                       unsigned long aligned_cluster;
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
@@ -5758,11 +5928,15 @@ refill_cluster:
                                goto unclustered_alloc;
                        }
 
+                       aligned_cluster = max_t(unsigned long,
+                                               empty_cluster + empty_size,
+                                             block_group->full_stripe_len);
+
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
                                               search_start, num_bytes,
-                                              empty_cluster + empty_size);
+                                              aligned_cluster);
                        if (ret == 0) {
                                /*
                                 * now pull our allocation out of this
@@ -5833,7 +6007,8 @@ unclustered_alloc:
                        goto loop;
                }
 checks:
-               search_start = stripe_align(root, offset);
+               search_start = stripe_align(root, used_block_group,
+                                           offset, num_bytes);
 
                /* move on to the next group */
                if (search_start + num_bytes >
@@ -5984,7 +6159,7 @@ again:
        if (ret == -ENOSPC) {
                if (!final_tried) {
                        num_bytes = num_bytes >> 1;
-                       num_bytes = num_bytes & ~(root->sectorsize - 1);
+                       num_bytes = round_down(num_bytes, root->sectorsize);
                        num_bytes = max(num_bytes, min_alloc_size);
                        if (num_bytes == min_alloc_size)
                                final_tried = true;
@@ -6108,7 +6283,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_free_path(path);
 
-       ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+       ret = update_block_group(root, ins->objectid, ins->offset, 1);
        if (ret) { /* -ENOENT, logic error */
                printk(KERN_ERR "btrfs update block group failed for %llu "
                       "%llu\n", (unsigned long long)ins->objectid,
@@ -6172,7 +6347,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_free_path(path);
 
-       ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+       ret = update_block_group(root, ins->objectid, ins->offset, 1);
        if (ret) { /* -ENOENT, logic error */
                printk(KERN_ERR "btrfs update block group failed for %llu "
                       "%llu\n", (unsigned long long)ins->objectid,
@@ -6215,7 +6390,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        u64 num_bytes = ins->offset;
 
        block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
-       cache_block_group(block_group, trans, NULL, 0);
+       cache_block_group(block_group, 0);
        caching_ctl = get_caching_control(block_group);
 
        if (!caching_ctl) {
@@ -6329,12 +6504,14 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        if (!ret)
                return block_rsv;
        if (ret && !block_rsv->failfast) {
-               static DEFINE_RATELIMIT_STATE(_rs,
-                               DEFAULT_RATELIMIT_INTERVAL,
-                               /*DEFAULT_RATELIMIT_BURST*/ 2);
-               if (__ratelimit(&_rs))
-                       WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
-                            ret);
+               if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+                       static DEFINE_RATELIMIT_STATE(_rs,
+                                       DEFAULT_RATELIMIT_INTERVAL * 10,
+                                       /*DEFAULT_RATELIMIT_BURST*/ 1);
+                       if (__ratelimit(&_rs))
+                               WARN(1, KERN_DEBUG
+                                       "btrfs: block rsv returned %d\n", ret);
+               }
                ret = reserve_metadata_bytes(root, block_rsv, blocksize,
                                             BTRFS_RESERVE_NO_FLUSH);
                if (!ret) {
@@ -6400,7 +6577,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
 
        if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
                struct btrfs_delayed_extent_op *extent_op;
-               extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+               extent_op = btrfs_alloc_delayed_extent_op();
                BUG_ON(!extent_op); /* -ENOMEM */
                if (key)
                        memcpy(&extent_op->key, key, sizeof(extent_op->key));
@@ -7203,6 +7380,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
                root->fs_info->fs_devices->missing_devices;
 
        stripped = BTRFS_BLOCK_GROUP_RAID0 |
+               BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
                BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
 
        if (num_devices == 1) {
@@ -7481,16 +7659,16 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                index = get_block_group_index(block_group);
        }
 
-       if (index == 0) {
+       if (index == BTRFS_RAID_RAID10) {
                dev_min = 4;
                /* Divide by 2 */
                min_free >>= 1;
-       } else if (index == 1) {
+       } else if (index == BTRFS_RAID_RAID1) {
                dev_min = 2;
-       } else if (index == 2) {
+       } else if (index == BTRFS_RAID_DUP) {
                /* Multiply by 2 */
                min_free <<= 1;
-       } else if (index == 3) {
+       } else if (index == BTRFS_RAID_RAID0) {
                dev_min = fs_devices->rw_devices;
                do_div(min_free, dev_min);
        }
@@ -7651,11 +7829,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                space_info = list_entry(info->space_info.next,
                                        struct btrfs_space_info,
                                        list);
-               if (space_info->bytes_pinned > 0 ||
-                   space_info->bytes_reserved > 0 ||
-                   space_info->bytes_may_use > 0) {
-                       WARN_ON(1);
-                       dump_space_info(space_info, 0, 0);
+               if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
+                       if (space_info->bytes_pinned > 0 ||
+                           space_info->bytes_reserved > 0 ||
+                           space_info->bytes_may_use > 0) {
+                               WARN_ON(1);
+                               dump_space_info(space_info, 0, 0);
+                       }
                }
                list_del(&space_info->list);
                kfree(space_info);
@@ -7754,7 +7934,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                btrfs_release_path(path);
                cache->flags = btrfs_block_group_flags(&cache->item);
                cache->sectorsize = root->sectorsize;
-
+               cache->full_stripe_len = btrfs_full_stripe_len(root,
+                                              &root->fs_info->mapping_tree,
+                                              found_key.objectid);
                btrfs_init_free_space_ctl(cache);
 
                /*
@@ -7808,6 +7990,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                if (!(get_alloc_profile(root, space_info->flags) &
                      (BTRFS_BLOCK_GROUP_RAID10 |
                       BTRFS_BLOCK_GROUP_RAID1 |
+                      BTRFS_BLOCK_GROUP_RAID5 |
+                      BTRFS_BLOCK_GROUP_RAID6 |
                       BTRFS_BLOCK_GROUP_DUP)))
                        continue;
                /*
@@ -7883,6 +8067,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
        cache->sectorsize = root->sectorsize;
        cache->fs_info = root->fs_info;
+       cache->full_stripe_len = btrfs_full_stripe_len(root,
+                                              &root->fs_info->mapping_tree,
+                                              chunk_offset);
 
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
@@ -7932,12 +8119,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
        u64 extra_flags = chunk_to_extended(flags) &
                                BTRFS_EXTENDED_PROFILE_MASK;
 
+       write_seqlock(&fs_info->profiles_lock);
        if (flags & BTRFS_BLOCK_GROUP_DATA)
                fs_info->avail_data_alloc_bits &= ~extra_flags;
        if (flags & BTRFS_BLOCK_GROUP_METADATA)
                fs_info->avail_metadata_alloc_bits &= ~extra_flags;
        if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
                fs_info->avail_system_alloc_bits &= ~extra_flags;
+       write_sequnlock(&fs_info->profiles_lock);
 }
 
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
@@ -8036,6 +8225,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        spin_lock(&root->fs_info->block_group_cache_lock);
        rb_erase(&block_group->cache_node,
                 &root->fs_info->block_group_cache_tree);
+
+       if (root->fs_info->first_logical_byte == block_group->key.objectid)
+               root->fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&root->fs_info->block_group_cache_lock);
 
        down_write(&block_group->space_info->groups_sem);
@@ -8158,7 +8350,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
 
                if (end - start >= range->minlen) {
                        if (!block_group_cache_done(cache)) {
-                               ret = cache_block_group(cache, NULL, root, 0);
+                               ret = cache_block_group(cache, 0);
                                if (!ret)
                                        wait_block_group_cache_done(cache);
                        }
index 1b319df..f173c5a 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/page-flags.h>
-#include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
 #include <linux/swap.h>
@@ -1834,7 +1833,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  */
 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
 {
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 end = start + PAGE_CACHE_SIZE - 1;
        if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
                SetPageUptodate(page);
@@ -1846,7 +1845,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
  */
 static void check_page_locked(struct extent_io_tree *tree, struct page *page)
 {
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 end = start + PAGE_CACHE_SIZE - 1;
        if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
                unlock_page(page);
@@ -1895,13 +1894,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
        if (ret)
                err = ret;
 
-       if (did_repair) {
-               ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
-                                       rec->start + rec->len - 1,
-                                       EXTENT_DAMAGED, GFP_NOFS);
-               if (ret && !err)
-                       err = ret;
-       }
+       ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
+                               rec->start + rec->len - 1,
+                               EXTENT_DAMAGED, GFP_NOFS);
+       if (ret && !err)
+               err = ret;
 
        kfree(rec);
        return err;
@@ -1932,10 +1929,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        u64 map_length = 0;
        u64 sector;
        struct btrfs_bio *bbio = NULL;
+       struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
        int ret;
 
        BUG_ON(!mirror_num);
 
+       /* we can't repair anything in raid56 yet */
+       if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
+               return 0;
+
        bio = bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
@@ -1960,7 +1962,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
                return -EIO;
        }
        bio->bi_bdev = dev->bdev;
-       bio_add_page(bio, page, length, start-page_offset(page));
+       bio_add_page(bio, page, length, start - page_offset(page));
        btrfsic_submit_bio(WRITE_SYNC, bio);
        wait_for_completion(&compl);
 
@@ -2052,6 +2054,7 @@ static int clean_io_failure(u64 start, struct page *page)
                                                failrec->failed_mirror);
                        did_repair = !ret;
                }
+               ret = 0;
        }
 
 out:
@@ -2293,8 +2296,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = ((u64)page->index << PAGE_CACHE_SHIFT) +
-                        bvec->bv_offset;
+               start = page_offset(page) + bvec->bv_offset;
                end = start + bvec->bv_len - 1;
 
                if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2353,8 +2355,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                         (long int)bio->bi_bdev);
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = ((u64)page->index << PAGE_CACHE_SHIFT) +
-                       bvec->bv_offset;
+               start = page_offset(page) + bvec->bv_offset;
                end = start + bvec->bv_len - 1;
 
                if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2471,7 +2472,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
        struct extent_io_tree *tree = bio->bi_private;
        u64 start;
 
-       start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
+       start = page_offset(page) + bvec->bv_offset;
 
        bio->bi_private = NULL;
 
@@ -2489,13 +2490,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
        return ret;
 }
 
-static int merge_bio(struct extent_io_tree *tree, struct page *page,
+static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
                     unsigned long offset, size_t size, struct bio *bio,
                     unsigned long bio_flags)
 {
        int ret = 0;
        if (tree->ops && tree->ops->merge_bio_hook)
-               ret = tree->ops->merge_bio_hook(page, offset, size, bio,
+               ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
                                                bio_flags);
        BUG_ON(ret < 0);
        return ret;
@@ -2530,7 +2531,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                                sector;
 
                if (prev_bio_flags != bio_flags || !contig ||
-                   merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
+                   merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
                    bio_add_page(bio, page, page_size, offset) < page_size) {
                        ret = submit_one_bio(rw, bio, mirror_num,
                                             prev_bio_flags);
@@ -2595,7 +2596,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                                   unsigned long *bio_flags)
 {
        struct inode *inode = page->mapping->host;
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 page_end = start + PAGE_CACHE_SIZE - 1;
        u64 end;
        u64 cur = start;
@@ -2648,6 +2649,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                }
        }
        while (cur <= end) {
+               unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+
                if (cur >= last_byte) {
                        char *userpage;
                        struct extent_state *cached = NULL;
@@ -2682,7 +2685,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
                iosize = min(extent_map_end(em) - cur, end - cur + 1);
                cur_end = min(extent_map_end(em) - 1, end);
-               iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+               iosize = ALIGN(iosize, blocksize);
                if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
                        disk_io_size = em->block_len;
                        sector = em->block_start >> 9;
@@ -2735,26 +2738,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        continue;
                }
 
-               ret = 0;
-               if (tree->ops && tree->ops->readpage_io_hook) {
-                       ret = tree->ops->readpage_io_hook(page, cur,
-                                                         cur + iosize - 1);
-               }
-               if (!ret) {
-                       unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
-                       pnr -= page->index;
-                       ret = submit_extent_page(READ, tree, page,
+               pnr -= page->index;
+               ret = submit_extent_page(READ, tree, page,
                                         sector, disk_io_size, pg_offset,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
                                         this_bio_flag);
-                       if (!ret) {
-                               nr++;
-                               *bio_flags = this_bio_flag;
-                       }
-               }
-               if (ret) {
+               if (!ret) {
+                       nr++;
+                       *bio_flags = this_bio_flag;
+               } else {
                        SetPageError(page);
                        unlock_extent(tree, cur, cur + iosize - 1);
                }
@@ -2806,7 +2800,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        struct inode *inode = page->mapping->host;
        struct extent_page_data *epd = data;
        struct extent_io_tree *tree = epd->tree;
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 delalloc_start;
        u64 page_end = start + PAGE_CACHE_SIZE - 1;
        u64 end;
@@ -2982,7 +2976,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                BUG_ON(extent_map_end(em) <= cur);
                BUG_ON(end < cur);
                iosize = min(extent_map_end(em) - cur, end - cur + 1);
-               iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+               iosize = ALIGN(iosize, blocksize);
                sector = (em->block_start + extent_offset) >> 9;
                bdev = em->bdev;
                block_start = em->block_start;
@@ -3124,12 +3118,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
                set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
                spin_unlock(&eb->refs_lock);
                btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
-               spin_lock(&fs_info->delalloc_lock);
-               if (fs_info->dirty_metadata_bytes >= eb->len)
-                       fs_info->dirty_metadata_bytes -= eb->len;
-               else
-                       WARN_ON(1);
-               spin_unlock(&fs_info->delalloc_lock);
+               __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+                                    -eb->len,
+                                    fs_info->dirty_metadata_batch);
                ret = 1;
        } else {
                spin_unlock(&eb->refs_lock);
@@ -3446,15 +3437,9 @@ retry:
                         * swizzled back from swapper_space to tmpfs file
                         * mapping
                         */
-                       if (tree->ops &&
-                           tree->ops->write_cache_pages_lock_hook) {
-                               tree->ops->write_cache_pages_lock_hook(page,
-                                                              data, flush_fn);
-                       } else {
-                               if (!trylock_page(page)) {
-                                       flush_fn(data);
-                                       lock_page(page);
-                               }
+                       if (!trylock_page(page)) {
+                               flush_fn(data);
+                               lock_page(page);
                        }
 
                        if (unlikely(page->mapping != mapping)) {
@@ -3674,11 +3659,11 @@ int extent_invalidatepage(struct extent_io_tree *tree,
                          struct page *page, unsigned long offset)
 {
        struct extent_state *cached_state = NULL;
-       u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
+       u64 start = page_offset(page);
        u64 end = start + PAGE_CACHE_SIZE - 1;
        size_t blocksize = page->mapping->host->i_sb->s_blocksize;
 
-       start += (offset + blocksize - 1) & ~(blocksize - 1);
+       start += ALIGN(offset, blocksize);
        if (start > end)
                return 0;
 
@@ -3700,7 +3685,7 @@ int try_release_extent_state(struct extent_map_tree *map,
                             struct extent_io_tree *tree, struct page *page,
                             gfp_t mask)
 {
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 end = start + PAGE_CACHE_SIZE - 1;
        int ret = 1;
 
@@ -3739,7 +3724,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                               gfp_t mask)
 {
        struct extent_map *em;
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+       u64 start = page_offset(page);
        u64 end = start + PAGE_CACHE_SIZE - 1;
 
        if ((mask & __GFP_WAIT) &&
@@ -3797,7 +3782,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
                len = last - offset;
                if (len == 0)
                        break;
-               len = (len + sectorsize - 1) & ~(sectorsize - 1);
+               len = ALIGN(len, sectorsize);
                em = get_extent(inode, NULL, 0, offset, len, 0);
                if (IS_ERR_OR_NULL(em))
                        return em;
@@ -3995,8 +3980,6 @@ static void __free_extent_buffer(struct extent_buffer *eb)
        list_del(&eb->leak_list);
        spin_unlock_irqrestore(&leak_lock, flags);
 #endif
-       if (eb->pages && eb->pages != eb->inline_pages)
-               kfree(eb->pages);
        kmem_cache_free(extent_buffer_cache, eb);
 }
 
@@ -4037,19 +4020,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        atomic_set(&eb->refs, 1);
        atomic_set(&eb->io_pages, 0);
 
-       if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
-               struct page **pages;
-               int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
-                       PAGE_CACHE_SHIFT;
-               pages = kzalloc(num_pages, mask);
-               if (!pages) {
-                       __free_extent_buffer(eb);
-                       return NULL;
-               }
-               eb->pages = pages;
-       } else {
-               eb->pages = eb->inline_pages;
-       }
+       /*
+        * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
+        */
+       BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
+               > MAX_INLINE_EXTENT_BUFFER_SIZE);
+       BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
 
        return eb;
 }
@@ -4180,6 +4156,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
 static void check_buffer_tree_ref(struct extent_buffer *eb)
 {
+       int refs;
        /* the ref bit is tricky.  We have to make sure it is set
         * if we have the buffer dirty.   Otherwise the
         * code to free a buffer can end up dropping a dirty
@@ -4200,6 +4177,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
         * So bump the ref count first, then set the bit.  If someone
         * beat us to it, drop the ref we added.
         */
+       refs = atomic_read(&eb->refs);
+       if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+               return;
+
        spin_lock(&eb->refs_lock);
        if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
                atomic_inc(&eb->refs);
@@ -4401,9 +4382,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
 
 void free_extent_buffer(struct extent_buffer *eb)
 {
+       int refs;
+       int old;
        if (!eb)
                return;
 
+       while (1) {
+               refs = atomic_read(&eb->refs);
+               if (refs <= 3)
+                       break;
+               old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
+               if (old == refs)
+                       return;
+       }
+
        spin_lock(&eb->refs_lock);
        if (atomic_read(&eb->refs) == 2 &&
            test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
index 2eacfab..6068a19 100644 (file)
@@ -72,10 +72,9 @@ struct extent_io_ops {
        int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
        int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
        extent_submit_bio_hook_t *submit_bio_hook;
-       int (*merge_bio_hook)(struct page *page, unsigned long offset,
+       int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
                              size_t size, struct bio *bio,
                              unsigned long bio_flags);
-       int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
        int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
        int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
                                    struct extent_state *state, int mirror);
@@ -90,8 +89,6 @@ struct extent_io_ops {
                                  struct extent_state *other);
        void (*split_extent_hook)(struct inode *inode,
                                  struct extent_state *orig, u64 split);
-       int (*write_cache_pages_lock_hook)(struct page *page, void *data,
-                                          void (*flush_fn)(void *));
 };
 
 struct extent_io_tree {
@@ -161,8 +158,7 @@ struct extent_buffer {
         */
        wait_queue_head_t read_lock_wq;
        wait_queue_head_t lock_wq;
-       struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
-       struct page **pages;
+       struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
 };
 
 static inline void extent_set_compress_type(unsigned long *bio_flags,
index fdb7a8d..2834ca5 100644 (file)
@@ -1,6 +1,5 @@
 #include <linux/err.h>
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
 #include "ctree.h"
index 94aa53b..ec16020 100644 (file)
@@ -684,6 +684,24 @@ out:
        return ret;
 }
 
+static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
+                                struct btrfs_sector_sum *sector_sum,
+                                u64 total_bytes, u64 sectorsize)
+{
+       u64 tmp = sectorsize;
+       u64 next_sector = sector_sum->bytenr;
+       struct btrfs_sector_sum *next = sector_sum + 1;
+
+       while ((tmp + total_bytes) < sums->len) {
+               if (next_sector + sectorsize != next->bytenr)
+                       break;
+               tmp += sectorsize;
+               next_sector = next->bytenr;
+               next++;
+       }
+       return tmp;
+}
+
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums)
@@ -789,20 +807,32 @@ again:
                goto insert;
        }
 
-       if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
+       if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
            csum_size) {
-               u32 diff = (csum_offset + 1) * csum_size;
+               int extend_nr;
+               u64 tmp;
+               u32 diff;
+               u32 free_space;
 
-               /*
-                * is the item big enough already?  we dropped our lock
-                * before and need to recheck
-                */
-               if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
-                       goto csum;
+               if (btrfs_leaf_free_space(root, leaf) <
+                                sizeof(struct btrfs_item) + csum_size * 2)
+                       goto insert;
+
+               free_space = btrfs_leaf_free_space(root, leaf) -
+                                        sizeof(struct btrfs_item) - csum_size;
+               tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+                                           root->sectorsize);
+               tmp >>= root->fs_info->sb->s_blocksize_bits;
+               WARN_ON(tmp < 1);
+
+               extend_nr = max_t(int, 1, (int)tmp);
+               diff = (csum_offset + extend_nr) * csum_size;
+               diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
 
                diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
-               if (diff != csum_size)
-                       goto insert;
+               diff = min(free_space, diff);
+               diff /= csum_size;
+               diff *= csum_size;
 
                btrfs_extend_item(trans, root, path, diff);
                goto csum;
@@ -812,19 +842,14 @@ insert:
        btrfs_release_path(path);
        csum_offset = 0;
        if (found_next) {
-               u64 tmp = total_bytes + root->sectorsize;
-               u64 next_sector = sector_sum->bytenr;
-               struct btrfs_sector_sum *next = sector_sum + 1;
+               u64 tmp;
 
-               while (tmp < sums->len) {
-                       if (next_sector + root->sectorsize != next->bytenr)
-                               break;
-                       tmp += root->sectorsize;
-                       next_sector = next->bytenr;
-                       next++;
-               }
-               tmp = min(tmp, next_offset - file_key.offset);
+               tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+                                           root->sectorsize);
                tmp >>= root->fs_info->sb->s_blocksize_bits;
+               tmp = min(tmp, (next_offset - file_key.offset) >>
+                                        root->fs_info->sb->s_blocksize_bits);
+
                tmp = max((u64)1, tmp);
                tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
                ins_size = csum_size * tmp;
index aeb8446..af1d060 100644 (file)
 #include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/slab.h>
+#include <linux/btrfs.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ioctl.h"
 #include "print-tree.h"
 #include "tree-log.h"
 #include "locking.h"
@@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 
        atomic_inc(&fs_info->defrag_running);
        while(1) {
+               /* Pause the auto defragger. */
+               if (test_bit(BTRFS_FS_STATE_REMOUNTING,
+                            &fs_info->fs_state))
+                       break;
+
                if (!__need_auto_defrag(fs_info->tree_root))
                        break;
 
@@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
        loff_t isize = i_size_read(inode);
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
-       num_bytes = (write_bytes + pos - start_pos +
-                   root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+       num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1225,7 +1229,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
        struct extent_state *cached_state = NULL;
        int i;
        unsigned long index = pos >> PAGE_CACHE_SHIFT;
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
        int err = 0;
        int faili = 0;
@@ -1312,7 +1316,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                                               struct iov_iter *i,
                                               loff_t pos)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct page **pages = NULL;
        unsigned long first_index;
@@ -1500,7 +1504,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                                    unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        loff_t *ppos = &iocb->ki_pos;
        u64 start_pos;
@@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
         * although we have opened a file as writable, we have
         * to stop this write operation to ensure FS consistency.
         */
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
                mutex_unlock(&inode->i_mutex);
                err = -EROFS;
                goto out;
@@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
         */
        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
                               &BTRFS_I(inode)->runtime_flags)) {
-               btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
+               struct btrfs_trans_handle *trans;
+               struct btrfs_root *root = BTRFS_I(inode)->root;
+
+               /*
+                * We need to block on a committing transaction to keep us from
+                * throwing a ordered operation on to the list and causing
+                * something like sync to deadlock trying to flush out this
+                * inode.
+                */
+               trans = btrfs_start_transaction(root, 0);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+               btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
+               btrfs_end_transaction(trans, root);
                if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
                        filemap_flush(inode->i_mapping);
        }
@@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
        struct btrfs_trans_handle *trans;
+       bool full_sync = 0;
 
        trace_btrfs_sync_file(file, datasync);
 
        /*
         * We write the dirty pages in the range and wait until they complete
         * out of the ->i_mutex. If so, we can flush the dirty pages by
-        * multi-task, and make the performance up.
+        * multi-task, and make the performance up.  See
+        * btrfs_wait_ordered_range for an explanation of the ASYNC check.
         */
        atomic_inc(&BTRFS_I(inode)->sync_writers);
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+       if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                            &BTRFS_I(inode)->runtime_flags))
+               ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
        atomic_dec(&BTRFS_I(inode)->sync_writers);
        if (ret)
                return ret;
@@ -1675,7 +1697,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * range being left.
         */
        atomic_inc(&root->log_batch);
-       btrfs_wait_ordered_range(inode, start, end - start + 1);
+       full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                            &BTRFS_I(inode)->runtime_flags);
+       if (full_sync)
+               btrfs_wait_ordered_range(inode, start, end - start + 1);
        atomic_inc(&root->log_batch);
 
        /*
@@ -1742,13 +1767,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        if (ret != BTRFS_NO_LOG_SYNC) {
                if (ret > 0) {
+                       /*
+                        * If we didn't already wait for ordered extents we need
+                        * to do that now.
+                        */
+                       if (!full_sync)
+                               btrfs_wait_ordered_range(inode, start,
+                                                        end - start + 1);
                        ret = btrfs_commit_transaction(trans, root);
                } else {
                        ret = btrfs_sync_log(trans, root);
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = btrfs_end_transaction(trans, root);
-                       else
+                       } else {
+                               if (!full_sync)
+                                       btrfs_wait_ordered_range(inode, start,
+                                                                end -
+                                                                start + 1);
                                ret = btrfs_commit_transaction(trans, root);
+                       }
                }
        } else {
                ret = btrfs_end_transaction(trans, root);
@@ -2102,7 +2139,7 @@ out:
 static long btrfs_fallocate(struct file *file, int mode,
                            loff_t offset, loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct extent_state *cached_state = NULL;
        u64 cur_offset;
        u64 last_byte;
index 0be7a87..1f84fc0 100644 (file)
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
        int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
+       max_bitmaps = max(max_bitmaps, 1);
+
        BUG_ON(ctl->total_bitmaps > max_bitmaps);
 
        /*
@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
 }
 
 static struct btrfs_free_space *
-find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+               unsigned long align)
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
+       u64 ctl_off;
+       u64 tmp;
+       u64 align_off;
        int ret;
 
        if (!ctl->free_space_offset.rb_node)
@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
                if (entry->bytes < *bytes)
                        continue;
 
+               /* make sure the space returned is big enough
+                * to match our requested alignment
+                */
+               if (*bytes >= align) {
+                       ctl_off = entry->offset - ctl->start;
+                       tmp = ctl_off + align - 1;;
+                       do_div(tmp, align);
+                       tmp = tmp * align + ctl->start;
+                       align_off = tmp - entry->offset;
+               } else {
+                       align_off = 0;
+                       tmp = entry->offset;
+               }
+
+               if (entry->bytes < *bytes + align_off)
+                       continue;
+
                if (entry->bitmap) {
-                       ret = search_bitmap(ctl, entry, offset, bytes);
-                       if (!ret)
+                       ret = search_bitmap(ctl, entry, &tmp, bytes);
+                       if (!ret) {
+                               *offset = tmp;
                                return entry;
+                       }
                        continue;
                }
 
-               *offset = entry->offset;
-               *bytes = entry->bytes;
+               *offset = tmp;
+               *bytes = entry->bytes - align_off;
                return entry;
        }
 
@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
        }
 
        /*
-        * some block groups are so tiny they can't be enveloped by a bitmap, so
-        * don't even bother to create a bitmap for this
+        * The original block groups from mkfs can be really small, like 8
+        * megabytes, so don't bother with a bitmap for those entries.  However
+        * some block groups can be smaller than what a bitmap would cover but
+        * are still large enough that they could overflow the 32k memory limit,
+        * so allow those block groups to still be allowed to have a bitmap
+        * entry.
         */
-       if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
+       if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
                return false;
 
        return true;
@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
        struct btrfs_free_space *entry = NULL;
        u64 bytes_search = bytes + empty_size;
        u64 ret = 0;
+       u64 align_gap = 0;
+       u64 align_gap_len = 0;
 
        spin_lock(&ctl->tree_lock);
-       entry = find_free_space(ctl, &offset, &bytes_search);
+       entry = find_free_space(ctl, &offset, &bytes_search,
+                               block_group->full_stripe_len);
        if (!entry)
                goto out;
 
@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                if (!entry->bytes)
                        free_bitmap(ctl, entry);
        } else {
+
                unlink_free_space(ctl, entry);
-               entry->offset += bytes;
-               entry->bytes -= bytes;
+               align_gap_len = offset - entry->offset;
+               align_gap = entry->offset;
+
+               entry->offset = offset + bytes;
+               WARN_ON(entry->bytes < bytes + align_gap_len);
+
+               entry->bytes -= bytes + align_gap_len;
                if (!entry->bytes)
                        kmem_cache_free(btrfs_free_space_cachep, entry);
                else
@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 out:
        spin_unlock(&ctl->tree_lock);
 
+       if (align_gap_len)
+               __btrfs_add_free_space(ctl, align_gap, align_gap_len);
        return ret;
 }
 
index cc93b23..c226dae 100644 (file)
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/mount.h>
+#include <linux/btrfs.h>
+#include <linux/blkdev.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ioctl.h"
 #include "print-tree.h"
 #include "ordered-data.h"
 #include "xattr.h"
@@ -54,6 +55,7 @@
 #include "locking.h"
 #include "free-space-cache.h"
 #include "inode-map.h"
+#include "backref.h"
 
 struct btrfs_iget_args {
        u64 ino;
@@ -231,8 +233,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
        u64 isize = i_size_read(inode);
        u64 actual_end = min(end + 1, isize);
        u64 inline_len = actual_end - start;
-       u64 aligned_end = (end + root->sectorsize - 1) &
-                       ~((u64)root->sectorsize - 1);
+       u64 aligned_end = ALIGN(end, root->sectorsize);
        u64 data_len = inline_len;
        int ret;
 
@@ -265,6 +266,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
                return 1;
        }
 
+       set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
        btrfs_delalloc_release_metadata(inode, end + 1 - start);
        btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
        return 0;
@@ -389,7 +391,7 @@ again:
         * a compressed extent to 128k.
         */
        total_compressed = min(total_compressed, max_uncompressed);
-       num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+       num_bytes = ALIGN(end - start + 1, blocksize);
        num_bytes = max(blocksize,  num_bytes);
        total_in = 0;
        ret = 0;
@@ -488,15 +490,13 @@ cont:
                 * up to a block size boundary so the allocator does sane
                 * things
                 */
-               total_compressed = (total_compressed + blocksize - 1) &
-                       ~(blocksize - 1);
+               total_compressed = ALIGN(total_compressed, blocksize);
 
                /*
                 * one last check to make sure the compression is really a
                 * win, compare the page count read with the blocks on disk
                 */
-               total_in = (total_in + PAGE_CACHE_SIZE - 1) &
-                       ~(PAGE_CACHE_SIZE - 1);
+               total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
                if (total_compressed >= total_in) {
                        will_compress = 0;
                } else {
@@ -608,7 +608,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
        if (list_empty(&async_cow->extents))
                return 0;
 
-
+again:
        while (!list_empty(&async_cow->extents)) {
                async_extent = list_entry(async_cow->extents.next,
                                          struct async_extent, list);
@@ -648,6 +648,8 @@ retry:
                                                  async_extent->ram_size - 1,
                                                  btrfs_get_extent,
                                                  WB_SYNC_ALL);
+                       else if (ret)
+                               unlock_page(async_cow->locked_page);
                        kfree(async_extent);
                        cond_resched();
                        continue;
@@ -672,6 +674,7 @@ retry:
 
                if (ret) {
                        int i;
+
                        for (i = 0; i < async_extent->nr_pages; i++) {
                                WARN_ON(async_extent->pages[i]->mapping);
                                page_cache_release(async_extent->pages[i]);
@@ -679,12 +682,10 @@ retry:
                        kfree(async_extent->pages);
                        async_extent->nr_pages = 0;
                        async_extent->pages = NULL;
-                       unlock_extent(io_tree, async_extent->start,
-                                     async_extent->start +
-                                     async_extent->ram_size - 1);
+
                        if (ret == -ENOSPC)
                                goto retry;
-                       goto out_free; /* JDM: Requeue? */
+                       goto out_free;
                }
 
                /*
@@ -696,10 +697,13 @@ retry:
                                        async_extent->ram_size - 1, 0);
 
                em = alloc_extent_map();
-               BUG_ON(!em); /* -ENOMEM */
+               if (!em)
+                       goto out_free_reserve;
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
                em->orig_start = em->start;
+               em->mod_start = em->start;
+               em->mod_len = em->len;
 
                em->block_start = ins.objectid;
                em->block_len = ins.offset;
@@ -726,6 +730,9 @@ retry:
                                                async_extent->ram_size - 1, 0);
                }
 
+               if (ret)
+                       goto out_free_reserve;
+
                ret = btrfs_add_ordered_extent_compress(inode,
                                                async_extent->start,
                                                ins.objectid,
@@ -733,7 +740,8 @@ retry:
                                                ins.offset,
                                                BTRFS_ORDERED_COMPRESSED,
                                                async_extent->compress_type);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret)
+                       goto out_free_reserve;
 
                /*
                 * clear dirty, set writeback and unlock the pages.
@@ -754,18 +762,30 @@ retry:
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
                                    async_extent->nr_pages);
-
-               BUG_ON(ret); /* -ENOMEM */
                alloc_hint = ins.objectid + ins.offset;
                kfree(async_extent);
+               if (ret)
+                       goto out;
                cond_resched();
        }
        ret = 0;
 out:
        return ret;
+out_free_reserve:
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
 out_free:
+       extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+                                    async_extent->start,
+                                    async_extent->start +
+                                    async_extent->ram_size - 1,
+                                    NULL, EXTENT_CLEAR_UNLOCK_PAGE |
+                                    EXTENT_CLEAR_UNLOCK |
+                                    EXTENT_CLEAR_DELALLOC |
+                                    EXTENT_CLEAR_DIRTY |
+                                    EXTENT_SET_WRITEBACK |
+                                    EXTENT_END_WRITEBACK);
        kfree(async_extent);
-       goto out;
+       goto again;
 }
 
 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -834,7 +854,7 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
 
        BUG_ON(btrfs_is_free_space_inode(inode));
 
-       num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+       num_bytes = ALIGN(end - start + 1, blocksize);
        num_bytes = max(blocksize,  num_bytes);
        disk_num_bytes = num_bytes;
 
@@ -892,6 +912,8 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
                em->orig_start = em->start;
                ram_size = ins.offset;
                em->len = ins.offset;
+               em->mod_start = em->start;
+               em->mod_len = em->len;
 
                em->block_start = ins.objectid;
                em->block_len = ins.offset;
@@ -1338,6 +1360,8 @@ out_check:
                        em->block_start = disk_bytenr;
                        em->orig_block_len = disk_num_bytes;
                        em->bdev = root->fs_info->fs_devices->latest_bdev;
+                       em->mod_start = em->start;
+                       em->mod_len = em->len;
                        set_bit(EXTENT_FLAG_PINNED, &em->flags);
                        set_bit(EXTENT_FLAG_FILLING, &em->flags);
                        em->generation = -1;
@@ -1508,14 +1532,22 @@ static void btrfs_set_bit_hook(struct inode *inode,
                        spin_unlock(&BTRFS_I(inode)->lock);
                }
 
-               spin_lock(&root->fs_info->delalloc_lock);
+               __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
+                                    root->fs_info->delalloc_batch);
+               spin_lock(&BTRFS_I(inode)->lock);
                BTRFS_I(inode)->delalloc_bytes += len;
-               root->fs_info->delalloc_bytes += len;
-               if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
-                       list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
-                                     &root->fs_info->delalloc_inodes);
+               if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                                        &BTRFS_I(inode)->runtime_flags)) {
+                       spin_lock(&root->fs_info->delalloc_lock);
+                       if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+                               list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
+                                             &root->fs_info->delalloc_inodes);
+                               set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                                       &BTRFS_I(inode)->runtime_flags);
+                       }
+                       spin_unlock(&root->fs_info->delalloc_lock);
                }
-               spin_unlock(&root->fs_info->delalloc_lock);
+               spin_unlock(&BTRFS_I(inode)->lock);
        }
 }
 
@@ -1550,15 +1582,22 @@ static void btrfs_clear_bit_hook(struct inode *inode,
                    && do_list)
                        btrfs_free_reserved_data_space(inode, len);
 
-               spin_lock(&root->fs_info->delalloc_lock);
-               root->fs_info->delalloc_bytes -= len;
+               __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
+                                    root->fs_info->delalloc_batch);
+               spin_lock(&BTRFS_I(inode)->lock);
                BTRFS_I(inode)->delalloc_bytes -= len;
-
                if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
-                   !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
-                       list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+                   test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                            &BTRFS_I(inode)->runtime_flags)) {
+                       spin_lock(&root->fs_info->delalloc_lock);
+                       if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+                               list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+                               clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                                         &BTRFS_I(inode)->runtime_flags);
+                       }
+                       spin_unlock(&root->fs_info->delalloc_lock);
                }
-               spin_unlock(&root->fs_info->delalloc_lock);
+               spin_unlock(&BTRFS_I(inode)->lock);
        }
 }
 
@@ -1566,7 +1605,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
  * we don't create bios that span stripes or chunks
  */
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
                         size_t size, struct bio *bio,
                         unsigned long bio_flags)
 {
@@ -1581,7 +1620,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
 
        length = bio->bi_size;
        map_length = length;
-       ret = btrfs_map_block(root->fs_info, READ, logical,
+       ret = btrfs_map_block(root->fs_info, rw, logical,
                              &map_length, NULL, 0);
        /* Will always return 0 with map_multi == NULL */
        BUG_ON(ret < 0);
@@ -1892,6 +1931,640 @@ out:
        return ret;
 }
 
+/* snapshot-aware defrag */
+struct sa_defrag_extent_backref {
+       struct rb_node node;
+       struct old_sa_defrag_extent *old;
+       u64 root_id;
+       u64 inum;
+       u64 file_pos;
+       u64 extent_offset;
+       u64 num_bytes;
+       u64 generation;
+};
+
+struct old_sa_defrag_extent {
+       struct list_head list;
+       struct new_sa_defrag_extent *new;
+
+       u64 extent_offset;
+       u64 bytenr;
+       u64 offset;
+       u64 len;
+       int count;
+};
+
+struct new_sa_defrag_extent {
+       struct rb_root root;
+       struct list_head head;
+       struct btrfs_path *path;
+       struct inode *inode;
+       u64 file_pos;
+       u64 len;
+       u64 bytenr;
+       u64 disk_len;
+       u8 compress_type;
+};
+
+static int backref_comp(struct sa_defrag_extent_backref *b1,
+                       struct sa_defrag_extent_backref *b2)
+{
+       if (b1->root_id < b2->root_id)
+               return -1;
+       else if (b1->root_id > b2->root_id)
+               return 1;
+
+       if (b1->inum < b2->inum)
+               return -1;
+       else if (b1->inum > b2->inum)
+               return 1;
+
+       if (b1->file_pos < b2->file_pos)
+               return -1;
+       else if (b1->file_pos > b2->file_pos)
+               return 1;
+
+       /*
+        * [------------------------------] ===> (a range of space)
+        *     |<--->|   |<---->| =============> (fs/file tree A)
+        * |<---------------------------->| ===> (fs/file tree B)
+        *
+        * A range of space can refer to two file extents in one tree while
+        * refer to only one file extent in another tree.
+        *
+        * So we may process a disk offset more than one time(two extents in A)
+        * and locate at the same extent(one extent in B), then insert two same
+        * backrefs(both refer to the extent in B).
+        */
+       return 0;
+}
+
+static void backref_insert(struct rb_root *root,
+                          struct sa_defrag_extent_backref *backref)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct sa_defrag_extent_backref *entry;
+       int ret;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
+
+               ret = backref_comp(backref, entry);
+               if (ret < 0)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+
+       rb_link_node(&backref->node, parent, p);
+       rb_insert_color(&backref->node, root);
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
+                                      void *ctx)
+{
+       struct btrfs_file_extent_item *extent;
+       struct btrfs_fs_info *fs_info;
+       struct old_sa_defrag_extent *old = ctx;
+       struct new_sa_defrag_extent *new = old->new;
+       struct btrfs_path *path = new->path;
+       struct btrfs_key key;
+       struct btrfs_root *root;
+       struct sa_defrag_extent_backref *backref;
+       struct extent_buffer *leaf;
+       struct inode *inode = new->inode;
+       int slot;
+       int ret;
+       u64 extent_offset;
+       u64 num_bytes;
+
+       if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
+           inum == btrfs_ino(inode))
+               return 0;
+
+       key.objectid = root_id;
+       key.type = BTRFS_ROOT_ITEM_KEY;
+       key.offset = (u64)-1;
+
+       fs_info = BTRFS_I(inode)->root->fs_info;
+       root = btrfs_read_fs_root_no_name(fs_info, &key);
+       if (IS_ERR(root)) {
+               if (PTR_ERR(root) == -ENOENT)
+                       return 0;
+               WARN_ON(1);
+               pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
+                        inum, offset, root_id);
+               return PTR_ERR(root);
+       }
+
+       key.objectid = inum;
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       if (offset > (u64)-1 << 32)
+               key.offset = 0;
+       else
+               key.offset = offset;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0) {
+               WARN_ON(1);
+               return ret;
+       }
+
+       while (1) {
+               cond_resched();
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0) {
+                               goto out;
+                       } else if (ret > 0) {
+                               ret = 0;
+                               goto out;
+                       }
+                       continue;
+               }
+
+               path->slots[0]++;
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+
+               if (key.objectid > inum)
+                       goto out;
+
+               if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
+                       continue;
+
+               extent = btrfs_item_ptr(leaf, slot,
+                                       struct btrfs_file_extent_item);
+
+               if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
+                       continue;
+
+               extent_offset = btrfs_file_extent_offset(leaf, extent);
+               if (key.offset - extent_offset != offset)
+                       continue;
+
+               num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+               if (extent_offset >= old->extent_offset + old->offset +
+                   old->len || extent_offset + num_bytes <=
+                   old->extent_offset + old->offset)
+                       continue;
+
+               break;
+       }
+
+       backref = kmalloc(sizeof(*backref), GFP_NOFS);
+       if (!backref) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       backref->root_id = root_id;
+       backref->inum = inum;
+       backref->file_pos = offset + extent_offset;
+       backref->num_bytes = num_bytes;
+       backref->extent_offset = extent_offset;
+       backref->generation = btrfs_file_extent_generation(leaf, extent);
+       backref->old = old;
+       backref_insert(&new->root, backref);
+       old->count++;
+out:
+       btrfs_release_path(path);
+       WARN_ON(ret);
+       return ret;
+}
+
+static noinline bool record_extent_backrefs(struct btrfs_path *path,
+                                  struct new_sa_defrag_extent *new)
+{
+       struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
+       struct old_sa_defrag_extent *old, *tmp;
+       int ret;
+
+       new->path = path;
+
+       list_for_each_entry_safe(old, tmp, &new->head, list) {
+               ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+                                                 path, record_one_backref,
+                                                 old);
+               BUG_ON(ret < 0 && ret != -ENOENT);
+
+               /* no backref to be processed for this extent */
+               if (!old->count) {
+                       list_del(&old->list);
+                       kfree(old);
+               }
+       }
+
+       if (list_empty(&new->head))
+               return false;
+
+       return true;
+}
+
+static int relink_is_mergable(struct extent_buffer *leaf,
+                             struct btrfs_file_extent_item *fi,
+                             u64 disk_bytenr)
+{
+       if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
+               return 0;
+
+       if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
+               return 0;
+
+       if (btrfs_file_extent_compression(leaf, fi) ||
+           btrfs_file_extent_encryption(leaf, fi) ||
+           btrfs_file_extent_other_encoding(leaf, fi))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int relink_extent_backref(struct btrfs_path *path,
+                                struct sa_defrag_extent_backref *prev,
+                                struct sa_defrag_extent_backref *backref)
+{
+       struct btrfs_file_extent_item *extent;
+       struct btrfs_file_extent_item *item;
+       struct btrfs_ordered_extent *ordered;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_root *root;
+       struct btrfs_key key;
+       struct extent_buffer *leaf;
+       struct old_sa_defrag_extent *old = backref->old;
+       struct new_sa_defrag_extent *new = old->new;
+       struct inode *src_inode = new->inode;
+       struct inode *inode;
+       struct extent_state *cached = NULL;
+       int ret = 0;
+       u64 start;
+       u64 len;
+       u64 lock_start;
+       u64 lock_end;
+       bool merge = false;
+       int index;
+
+       if (prev && prev->root_id == backref->root_id &&
+           prev->inum == backref->inum &&
+           prev->file_pos + prev->num_bytes == backref->file_pos)
+               merge = true;
+
+       /* step 1: get root */
+       key.objectid = backref->root_id;
+       key.type = BTRFS_ROOT_ITEM_KEY;
+       key.offset = (u64)-1;
+
+       fs_info = BTRFS_I(src_inode)->root->fs_info;
+       index = srcu_read_lock(&fs_info->subvol_srcu);
+
+       root = btrfs_read_fs_root_no_name(fs_info, &key);
+       if (IS_ERR(root)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, index);
+               if (PTR_ERR(root) == -ENOENT)
+                       return 0;
+               return PTR_ERR(root);
+       }
+       if (btrfs_root_refs(&root->root_item) == 0) {
+               srcu_read_unlock(&fs_info->subvol_srcu, index);
+               /* parse ENOENT to 0 */
+               return 0;
+       }
+
+       /* step 2: get inode */
+       key.objectid = backref->inum;
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+
+       inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+       if (IS_ERR(inode)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, index);
+               return 0;
+       }
+
+       srcu_read_unlock(&fs_info->subvol_srcu, index);
+
+       /* step 3: relink backref */
+       lock_start = backref->file_pos;
+       lock_end = backref->file_pos + backref->num_bytes - 1;
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+                        0, &cached);
+
+       ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
+       if (ordered) {
+               btrfs_put_ordered_extent(ordered);
+               goto out_unlock;
+       }
+
+       trans = btrfs_join_transaction(root);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out_unlock;
+       }
+
+       key.objectid = backref->inum;
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = backref->file_pos;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0) {
+               goto out_free_path;
+       } else if (ret > 0) {
+               ret = 0;
+               goto out_free_path;
+       }
+
+       extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                               struct btrfs_file_extent_item);
+
+       if (btrfs_file_extent_generation(path->nodes[0], extent) !=
+           backref->generation)
+               goto out_free_path;
+
+       btrfs_release_path(path);
+
+       start = backref->file_pos;
+       if (backref->extent_offset < old->extent_offset + old->offset)
+               start += old->extent_offset + old->offset -
+                        backref->extent_offset;
+
+       len = min(backref->extent_offset + backref->num_bytes,
+                 old->extent_offset + old->offset + old->len);
+       len -= max(backref->extent_offset, old->extent_offset + old->offset);
+
+       ret = btrfs_drop_extents(trans, root, inode, start,
+                                start + len, 1);
+       if (ret)
+               goto out_free_path;
+again:
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = start;
+
+       if (merge) {
+               struct btrfs_file_extent_item *fi;
+               u64 extent_len;
+               struct btrfs_key found_key;
+
+               ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
+               if (ret < 0)
+                       goto out_free_path;
+
+               path->slots[0]--;
+               leaf = path->nodes[0];
+               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+               fi = btrfs_item_ptr(leaf, path->slots[0],
+                                   struct btrfs_file_extent_item);
+               extent_len = btrfs_file_extent_num_bytes(leaf, fi);
+
+               if (relink_is_mergable(leaf, fi, new->bytenr) &&
+                   extent_len + found_key.offset == start) {
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       extent_len + len);
+                       btrfs_mark_buffer_dirty(leaf);
+                       inode_add_bytes(inode, len);
+
+                       ret = 1;
+                       goto out_free_path;
+               } else {
+                       merge = false;
+                       btrfs_release_path(path);
+                       goto again;
+               }
+       }
+
+       ret = btrfs_insert_empty_item(trans, root, path, &key,
+                                       sizeof(*extent));
+       if (ret) {
+               btrfs_abort_transaction(trans, root, ret);
+               goto out_free_path;
+       }
+
+       leaf = path->nodes[0];
+       item = btrfs_item_ptr(leaf, path->slots[0],
+                               struct btrfs_file_extent_item);
+       btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
+       btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
+       btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
+       btrfs_set_file_extent_num_bytes(leaf, item, len);
+       btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
+       btrfs_set_file_extent_generation(leaf, item, trans->transid);
+       btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
+       btrfs_set_file_extent_compression(leaf, item, new->compress_type);
+       btrfs_set_file_extent_encryption(leaf, item, 0);
+       btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+       btrfs_mark_buffer_dirty(leaf);
+       inode_add_bytes(inode, len);
+
+       ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+                       new->disk_len, 0,
+                       backref->root_id, backref->inum,
+                       new->file_pos, 0);      /* start - extent_offset */
+       if (ret) {
+               btrfs_abort_transaction(trans, root, ret);
+               goto out_free_path;
+       }
+
+       ret = 1;
+out_free_path:
+       btrfs_release_path(path);
+       btrfs_end_transaction(trans, root);
+out_unlock:
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+                            &cached, GFP_NOFS);
+       iput(inode);
+       return ret;
+}
+
+static void relink_file_extents(struct new_sa_defrag_extent *new)
+{
+       struct btrfs_path *path;
+       struct old_sa_defrag_extent *old, *tmp;
+       struct sa_defrag_extent_backref *backref;
+       struct sa_defrag_extent_backref *prev = NULL;
+       struct inode *inode;
+       struct btrfs_root *root;
+       struct rb_node *node;
+       int ret;
+
+       inode = new->inode;
+       root = BTRFS_I(inode)->root;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return;
+
+       if (!record_extent_backrefs(path, new)) {
+               btrfs_free_path(path);
+               goto out;
+       }
+       btrfs_release_path(path);
+
+       while (1) {
+               node = rb_first(&new->root);
+               if (!node)
+                       break;
+               rb_erase(node, &new->root);
+
+               backref = rb_entry(node, struct sa_defrag_extent_backref, node);
+
+               ret = relink_extent_backref(path, prev, backref);
+               WARN_ON(ret < 0);
+
+               kfree(prev);
+
+               if (ret == 1)
+                       prev = backref;
+               else
+                       prev = NULL;
+               cond_resched();
+       }
+       kfree(prev);
+
+       btrfs_free_path(path);
+
+       list_for_each_entry_safe(old, tmp, &new->head, list) {
+               list_del(&old->list);
+               kfree(old);
+       }
+out:
+       atomic_dec(&root->fs_info->defrag_running);
+       wake_up(&root->fs_info->transaction_wait);
+
+       kfree(new);
+}
+
+static struct new_sa_defrag_extent *
+record_old_file_extents(struct inode *inode,
+                       struct btrfs_ordered_extent *ordered)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       struct old_sa_defrag_extent *old, *tmp;
+       struct new_sa_defrag_extent *new;
+       int ret;
+
+       new = kmalloc(sizeof(*new), GFP_NOFS);
+       if (!new)
+               return NULL;
+
+       new->inode = inode;
+       new->file_pos = ordered->file_offset;
+       new->len = ordered->len;
+       new->bytenr = ordered->start;
+       new->disk_len = ordered->disk_len;
+       new->compress_type = ordered->compress_type;
+       new->root = RB_ROOT;
+       INIT_LIST_HEAD(&new->head);
+
+       path = btrfs_alloc_path();
+       if (!path)
+               goto out_kfree;
+
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = new->file_pos;
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out_free_path;
+       if (ret > 0 && path->slots[0] > 0)
+               path->slots[0]--;
+
+       /* find out all the old extents for the file range */
+       while (1) {
+               struct btrfs_file_extent_item *extent;
+               struct extent_buffer *l;
+               int slot;
+               u64 num_bytes;
+               u64 offset;
+               u64 end;
+               u64 disk_bytenr;
+               u64 extent_offset;
+
+               l = path->nodes[0];
+               slot = path->slots[0];
+
+               if (slot >= btrfs_header_nritems(l)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out_free_list;
+                       else if (ret > 0)
+                               break;
+                       continue;
+               }
+
+               btrfs_item_key_to_cpu(l, &key, slot);
+
+               if (key.objectid != btrfs_ino(inode))
+                       break;
+               if (key.type != BTRFS_EXTENT_DATA_KEY)
+                       break;
+               if (key.offset >= new->file_pos + new->len)
+                       break;
+
+               extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
+
+               num_bytes = btrfs_file_extent_num_bytes(l, extent);
+               if (key.offset + num_bytes < new->file_pos)
+                       goto next;
+
+               disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
+               if (!disk_bytenr)
+                       goto next;
+
+               extent_offset = btrfs_file_extent_offset(l, extent);
+
+               old = kmalloc(sizeof(*old), GFP_NOFS);
+               if (!old)
+                       goto out_free_list;
+
+               offset = max(new->file_pos, key.offset);
+               end = min(new->file_pos + new->len, key.offset + num_bytes);
+
+               old->bytenr = disk_bytenr;
+               old->extent_offset = extent_offset;
+               old->offset = offset - key.offset;
+               old->len = end - offset;
+               old->new = new;
+               old->count = 0;
+               list_add_tail(&old->list, &new->head);
+next:
+               path->slots[0]++;
+               cond_resched();
+       }
+
+       btrfs_free_path(path);
+       atomic_inc(&root->fs_info->defrag_running);
+
+       return new;
+
+out_free_list:
+       list_for_each_entry_safe(old, tmp, &new->head, list) {
+               list_del(&old->list);
+               kfree(old);
+       }
+out_free_path:
+       btrfs_free_path(path);
+out_kfree:
+       kfree(new);
+       return NULL;
+}
+
 /*
  * helper function for btrfs_finish_ordered_io, this
  * just reads in some of the csum leaves to prime them into ram
@@ -1909,6 +2582,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
        struct btrfs_trans_handle *trans = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_state *cached_state = NULL;
+       struct new_sa_defrag_extent *new = NULL;
        int compress_type = 0;
        int ret;
        bool nolock;
@@ -1943,6 +2617,20 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                         ordered_extent->file_offset + ordered_extent->len - 1,
                         0, &cached_state);
 
+       ret = test_range_bit(io_tree, ordered_extent->file_offset,
+                       ordered_extent->file_offset + ordered_extent->len - 1,
+                       EXTENT_DEFRAG, 1, cached_state);
+       if (ret) {
+               u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+               if (last_snapshot >= BTRFS_I(inode)->generation)
+                       /* the inode is shared */
+                       new = record_old_file_extents(inode, ordered_extent);
+
+               clear_extent_bit(io_tree, ordered_extent->file_offset,
+                       ordered_extent->file_offset + ordered_extent->len - 1,
+                       EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
+       }
+
        if (nolock)
                trans = btrfs_join_transaction_nolock(root);
        else
@@ -2001,17 +2689,33 @@ out:
        if (trans)
                btrfs_end_transaction(trans, root);
 
-       if (ret)
+       if (ret) {
                clear_extent_uptodate(io_tree, ordered_extent->file_offset,
                                      ordered_extent->file_offset +
                                      ordered_extent->len - 1, NULL, GFP_NOFS);
 
+               /*
+                * If the ordered extent had an IOERR or something else went
+                * wrong we need to return the space for this ordered extent
+                * back to the allocator.
+                */
+               if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
+                   !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
+                       btrfs_free_reserved_extent(root, ordered_extent->start,
+                                                  ordered_extent->disk_len);
+       }
+
+
        /*
         * This needs to be done to make sure anybody waiting knows we are done
         * updating everything for this ordered extent.
         */
        btrfs_remove_ordered_extent(inode, ordered_extent);
 
+       /* for snapshot-aware defrag */
+       if (new)
+               relink_file_extents(new);
+
        /* once for us */
        btrfs_put_ordered_extent(ordered_extent);
        /* once for the tree */
@@ -2062,7 +2766,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                               struct extent_state *state, int mirror)
 {
-       size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
+       size_t offset = start - page_offset(page);
        struct inode *inode = page->mapping->host;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        char *kaddr;
@@ -2167,11 +2871,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
        }
 }
 
-enum btrfs_orphan_cleanup_state {
-       ORPHAN_CLEANUP_STARTED  = 1,
-       ORPHAN_CLEANUP_DONE     = 2,
-};
-
 /*
  * This is called in transaction commit time. If there are no orphan
  * files in the subvolume, it removes orphan item and frees block_rsv
@@ -2469,6 +3168,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                 */
                set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                        &BTRFS_I(inode)->runtime_flags);
+               atomic_inc(&root->orphan_inodes);
 
                /* if we have links, this was a truncate, lets do that */
                if (inode->i_nlink) {
@@ -2491,6 +3191,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                goto out;
 
                        ret = btrfs_truncate(inode);
+                       if (ret)
+                               btrfs_orphan_del(NULL, inode);
                } else {
                        nr_unlink++;
                }
@@ -2709,34 +3411,41 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct btrfs_inode_item *item,
                            struct inode *inode)
 {
-       btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
-       btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
-       btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
-       btrfs_set_inode_mode(leaf, item, inode->i_mode);
-       btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+       struct btrfs_map_token token;
+
+       btrfs_init_map_token(&token);
+
+       btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
+       btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+       btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
+                                  &token);
+       btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
+       btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
 
-       btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
-                              inode->i_atime.tv_sec);
-       btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
-                               inode->i_atime.tv_nsec);
+       btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+                                    inode->i_atime.tv_sec, &token);
+       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+                                     inode->i_atime.tv_nsec, &token);
 
-       btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
-                              inode->i_mtime.tv_sec);
-       btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
-                               inode->i_mtime.tv_nsec);
+       btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+                                    inode->i_mtime.tv_sec, &token);
+       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+                                     inode->i_mtime.tv_nsec, &token);
 
-       btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
-                              inode->i_ctime.tv_sec);
-       btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
-                               inode->i_ctime.tv_nsec);
+       btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+                                    inode->i_ctime.tv_sec, &token);
+       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+                                     inode->i_ctime.tv_nsec, &token);
 
-       btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
-       btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
-       btrfs_set_inode_sequence(leaf, item, inode->i_version);
-       btrfs_set_inode_transid(leaf, item, trans->transid);
-       btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
-       btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
-       btrfs_set_inode_block_group(leaf, item, 0);
+       btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
+                                    &token);
+       btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
+                                        &token);
+       btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+       btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
+       btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
+       btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
+       btrfs_set_token_inode_block_group(leaf, item, 0, &token);
 }
 
 /*
@@ -3304,7 +4013,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        u64 extent_num_bytes = 0;
        u64 extent_offset = 0;
        u64 item_end = 0;
-       u64 mask = root->sectorsize - 1;
        u32 found_type = (u8)-1;
        int found_extent;
        int del_item;
@@ -3328,7 +4036,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
         * extent just the way it is.
         */
        if (root->ref_cows || root == root->fs_info->tree_root)
-               btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
+               btrfs_drop_extent_cache(inode, ALIGN(new_size,
+                                       root->sectorsize), (u64)-1, 0);
 
        /*
         * This function is also used to drop the items in the log tree before
@@ -3407,10 +4116,9 @@ search_again:
                        if (!del_item) {
                                u64 orig_num_bytes =
                                        btrfs_file_extent_num_bytes(leaf, fi);
-                               extent_num_bytes = new_size -
-                                       found_key.offset + root->sectorsize - 1;
-                               extent_num_bytes = extent_num_bytes &
-                                       ~((u64)root->sectorsize - 1);
+                               extent_num_bytes = ALIGN(new_size -
+                                               found_key.offset,
+                                               root->sectorsize);
                                btrfs_set_file_extent_num_bytes(leaf, fi,
                                                         extent_num_bytes);
                                num_dec = (orig_num_bytes -
@@ -3646,9 +4354,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        struct extent_map *em = NULL;
        struct extent_state *cached_state = NULL;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-       u64 mask = root->sectorsize - 1;
-       u64 hole_start = (oldsize + mask) & ~mask;
-       u64 block_end = (size + mask) & ~mask;
+       u64 hole_start = ALIGN(oldsize, root->sectorsize);
+       u64 block_end = ALIGN(size, root->sectorsize);
        u64 last_byte;
        u64 cur_offset;
        u64 hole_size;
@@ -3681,7 +4388,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        break;
                }
                last_byte = min(extent_map_end(em), block_end);
-               last_byte = (last_byte + mask) & ~mask;
+               last_byte = ALIGN(last_byte , root->sectorsize);
                if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
                        struct extent_map *hole_em;
                        hole_size = last_byte - cur_offset;
@@ -3832,6 +4539,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 
                /* we don't support swapfiles, so vmtruncate shouldn't fail */
                truncate_setsize(inode, newsize);
+
+               /* Disable nonlocked read DIO to avoid the end less truncate */
+               btrfs_inode_block_unlocked_dio(inode);
+               inode_dio_wait(inode);
+               btrfs_inode_resume_unlocked_dio(inode);
+
                ret = btrfs_truncate(inode);
                if (ret && inode->i_nlink)
                        btrfs_orphan_del(NULL, inode);
@@ -3904,6 +4617,12 @@ void btrfs_evict_inode(struct inode *inode)
                goto no_delete;
        }
 
+       ret = btrfs_commit_inode_delayed_inode(inode);
+       if (ret) {
+               btrfs_orphan_del(NULL, inode);
+               goto no_delete;
+       }
+
        rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
        if (!rsv) {
                btrfs_orphan_del(NULL, inode);
@@ -3941,7 +4660,7 @@ void btrfs_evict_inode(struct inode *inode)
                        goto no_delete;
                }
 
-               trans = btrfs_start_transaction_lflush(root, 1);
+               trans = btrfs_join_transaction(root);
                if (IS_ERR(trans)) {
                        btrfs_orphan_del(NULL, inode);
                        btrfs_free_block_rsv(root, rsv);
@@ -3955,9 +4674,6 @@ void btrfs_evict_inode(struct inode *inode)
                        break;
 
                trans->block_rsv = &root->fs_info->trans_block_rsv;
-               ret = btrfs_update_inode(trans, root, inode);
-               BUG_ON(ret);
-
                btrfs_end_transaction(trans, root);
                trans = NULL;
                btrfs_btree_balance_dirty(root);
@@ -4391,7 +5107,7 @@ unsigned char btrfs_filetype_table[] = {
 static int btrfs_real_readdir(struct file *filp, void *dirent,
                              filldir_t filldir)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_item *item;
        struct btrfs_dir_item *di;
@@ -4854,7 +5570,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                if (btrfs_test_opt(root, NODATASUM))
                        BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
                if (btrfs_test_opt(root, NODATACOW))
-                       BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+                       BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
+                               BTRFS_INODE_NODATASUM;
        }
 
        insert_inode_hash(inode);
@@ -5006,12 +5723,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
-       err = btrfs_update_inode(trans, root, inode);
-       if (err) {
-               drop_inode = 1;
-               goto out_unlock;
-       }
-
        /*
        * If the active LSM wants to access the inode during
        * d_instantiate it needs these. Smack checks to see
@@ -5396,8 +6107,7 @@ again:
        } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                size_t size;
                size = btrfs_file_extent_inline_len(leaf, item);
-               extent_end = (extent_start + size + root->sectorsize - 1) &
-                       ~((u64)root->sectorsize - 1);
+               extent_end = ALIGN(extent_start + size, root->sectorsize);
        }
 
        if (start >= extent_end) {
@@ -5469,8 +6179,7 @@ again:
                copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
                                size - extent_offset);
                em->start = extent_start + extent_offset;
-               em->len = (copy_size + root->sectorsize - 1) &
-                       ~((u64)root->sectorsize - 1);
+               em->len = ALIGN(copy_size, root->sectorsize);
                em->orig_block_len = em->len;
                em->orig_start = em->start;
                if (compress_type) {
@@ -5949,6 +6658,8 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
 
        em->start = start;
        em->orig_start = orig_start;
+       em->mod_start = start;
+       em->mod_len = len;
        em->len = len;
        em->block_len = block_len;
        em->block_start = block_start;
@@ -5990,16 +6701,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        u64 len = bh_result->b_size;
        struct btrfs_trans_handle *trans;
        int unlock_bits = EXTENT_LOCKED;
-       int ret;
+       int ret = 0;
 
-       if (create) {
-               ret = btrfs_delalloc_reserve_space(inode, len);
-               if (ret)
-                       return ret;
+       if (create)
                unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
-       } else {
+       else
                len = min_t(u64, len, root->sectorsize);
-       }
 
        lockstart = start;
        lockend = start + len - 1;
@@ -6011,14 +6718,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
                return -ENOTBLK;
 
-       if (create) {
-               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                    lockend, EXTENT_DELALLOC, NULL,
-                                    &cached_state, GFP_NOFS);
-               if (ret)
-                       goto unlock_err;
-       }
-
        em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
        if (IS_ERR(em)) {
                ret = PTR_ERR(em);
@@ -6050,7 +6749,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        if (!create && (em->block_start == EXTENT_MAP_HOLE ||
                        test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
                free_extent_map(em);
-               ret = 0;
                goto unlock_err;
        }
 
@@ -6148,6 +6846,15 @@ unlock:
                 */
                if (start + len > i_size_read(inode))
                        i_size_write(inode, start + len);
+
+               spin_lock(&BTRFS_I(inode)->lock);
+               BTRFS_I(inode)->outstanding_extents++;
+               spin_unlock(&BTRFS_I(inode)->lock);
+
+               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                    lockstart + len - 1, EXTENT_DELALLOC, NULL,
+                                    &cached_state, GFP_NOFS);
+               BUG_ON(ret);
        }
 
        /*
@@ -6156,24 +6863,9 @@ unlock:
         * aren't using if there is any left over space.
         */
        if (lockstart < lockend) {
-               if (create && len < lockend - lockstart) {
-                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                        lockstart + len - 1,
-                                        unlock_bits | EXTENT_DEFRAG, 1, 0,
-                                        &cached_state, GFP_NOFS);
-                       /*
-                        * Beside unlock, we also need to cleanup reserved space
-                        * for the left range by attaching EXTENT_DO_ACCOUNTING.
-                        */
-                       clear_extent_bit(&BTRFS_I(inode)->io_tree,
-                                        lockstart + len, lockend,
-                                        unlock_bits | EXTENT_DO_ACCOUNTING |
-                                        EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
-               } else {
-                       clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                        lockend, unlock_bits, 1, 0,
-                                        &cached_state, GFP_NOFS);
-               }
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+                                lockend, unlock_bits, 1, 0,
+                                &cached_state, GFP_NOFS);
        } else {
                free_extent_state(cached_state);
        }
@@ -6183,9 +6875,6 @@ unlock:
        return 0;
 
 unlock_err:
-       if (create)
-               unlock_bits |= EXTENT_DO_ACCOUNTING;
-
        clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                         unlock_bits, 1, 0, &cached_state, GFP_NOFS);
        return ret;
@@ -6426,19 +7115,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        int async_submit = 0;
 
        map_length = orig_bio->bi_size;
-       ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
+       ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
                              &map_length, NULL, 0);
        if (ret) {
                bio_put(orig_bio);
                return -EIO;
        }
-
        if (map_length >= orig_bio->bi_size) {
                bio = orig_bio;
                goto submit;
        }
 
-       async_submit = 1;
+       /* async crcs make it difficult to collect full stripe writes. */
+       if (btrfs_get_alloc_profile(root, 1) &
+           (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
+               async_submit = 0;
+       else
+               async_submit = 1;
+
        bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
        if (!bio)
                return -ENOMEM;
@@ -6480,7 +7174,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        bio->bi_end_io = btrfs_end_dio_bio;
 
                        map_length = orig_bio->bi_size;
-                       ret = btrfs_map_block(root->fs_info, READ,
+                       ret = btrfs_map_block(root->fs_info, rw,
                                              start_sector << 9,
                                              &map_length, NULL, 0);
                        if (ret) {
@@ -6623,15 +7317,60 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
+       size_t count = 0;
+       int flags = 0;
+       bool wakeup = true;
+       bool relock = false;
+       ssize_t ret;
 
        if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
                            offset, nr_segs))
                return 0;
 
-       return __blockdev_direct_IO(rw, iocb, inode,
-                  BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                  iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
-                  btrfs_submit_direct, 0);
+       atomic_inc(&inode->i_dio_count);
+       smp_mb__after_atomic_inc();
+
+       if (rw & WRITE) {
+               count = iov_length(iov, nr_segs);
+               /*
+                * If the write DIO is beyond the EOF, we need update
+                * the isize, but it is protected by i_mutex. So we can
+                * not unlock the i_mutex at this case.
+                */
+               if (offset + count <= inode->i_size) {
+                       mutex_unlock(&inode->i_mutex);
+                       relock = true;
+               }
+               ret = btrfs_delalloc_reserve_space(inode, count);
+               if (ret)
+                       goto out;
+       } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+                                    &BTRFS_I(inode)->runtime_flags))) {
+               inode_dio_done(inode);
+               flags = DIO_LOCKING | DIO_SKIP_HOLES;
+               wakeup = false;
+       }
+
+       ret = __blockdev_direct_IO(rw, iocb, inode,
+                       BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+                       iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+                       btrfs_submit_direct, flags);
+       if (rw & WRITE) {
+               if (ret < 0 && ret != -EIOCBQUEUED)
+                       btrfs_delalloc_release_space(inode, count);
+               else if (ret >= 0 && (size_t)ret < count)
+                       btrfs_delalloc_release_space(inode,
+                                                    count - (size_t)ret);
+               else
+                       btrfs_delalloc_release_metadata(inode, 0);
+       }
+out:
+       if (wakeup)
+               inode_dio_done(inode);
+       if (relock)
+               mutex_lock(&inode->i_mutex);
+
+       return ret;
 }
 
 #define BTRFS_FIEMAP_FLAGS     (FIEMAP_FLAG_SYNC)
@@ -6735,8 +7474,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
                return;
        }
        lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
-       ordered = btrfs_lookup_ordered_extent(inode,
-                                          page_offset(page));
+       ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
        if (ordered) {
                /*
                 * IO on this page will never be started, so we need
@@ -6791,7 +7529,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = fdentry(vma->vm_file)->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
@@ -7216,8 +7954,9 @@ int btrfs_drop_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
+       /* the snap/subvol tree is on deleting */
        if (btrfs_root_refs(&root->root_item) == 0 &&
-           !btrfs_is_free_space_inode(inode))
+           root != root->fs_info->tree_root)
                return 1;
        else
                return generic_drop_inode(inode);
@@ -7299,40 +8038,22 @@ fail:
 static int btrfs_getattr(struct vfsmount *mnt,
                         struct dentry *dentry, struct kstat *stat)
 {
+       u64 delalloc_bytes;
        struct inode *inode = dentry->d_inode;
        u32 blocksize = inode->i_sb->s_blocksize;
 
        generic_fillattr(inode, stat);
        stat->dev = BTRFS_I(inode)->root->anon_dev;
        stat->blksize = PAGE_CACHE_SIZE;
+
+       spin_lock(&BTRFS_I(inode)->lock);
+       delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
+       spin_unlock(&BTRFS_I(inode)->lock);
        stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
-               ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
+                       ALIGN(delalloc_bytes, blocksize)) >> 9;
        return 0;
 }
 
-/*
- * If a file is moved, it will inherit the cow and compression flags of the new
- * directory.
- */
-static void fixup_inode_flags(struct inode *dir, struct inode *inode)
-{
-       struct btrfs_inode *b_dir = BTRFS_I(dir);
-       struct btrfs_inode *b_inode = BTRFS_I(inode);
-
-       if (b_dir->flags & BTRFS_INODE_NODATACOW)
-               b_inode->flags |= BTRFS_INODE_NODATACOW;
-       else
-               b_inode->flags &= ~BTRFS_INODE_NODATACOW;
-
-       if (b_dir->flags & BTRFS_INODE_COMPRESS) {
-               b_inode->flags |= BTRFS_INODE_COMPRESS;
-               b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
-       } else {
-               b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
-                                   BTRFS_INODE_NOCOMPRESS);
-       }
-}
-
 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                           struct inode *new_dir, struct dentry *new_dentry)
 {
@@ -7498,8 +8219,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
-       fixup_inode_flags(new_dir, old_inode);
-
        ret = btrfs_add_link(trans, new_dir, old_inode,
                             new_dentry->d_name.name,
                             new_dentry->d_name.len, 0, index);
@@ -7583,7 +8302,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 
        INIT_LIST_HEAD(&works);
        INIT_LIST_HEAD(&splice);
-again:
+
        spin_lock(&root->fs_info->delalloc_lock);
        list_splice_init(&root->fs_info->delalloc_inodes, &splice);
        while (!list_empty(&splice)) {
@@ -7593,8 +8312,11 @@ again:
                list_del_init(&binode->delalloc_inodes);
 
                inode = igrab(&binode->vfs_inode);
-               if (!inode)
+               if (!inode) {
+                       clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+                                 &binode->runtime_flags);
                        continue;
+               }
 
                list_add_tail(&binode->delalloc_inodes,
                              &root->fs_info->delalloc_inodes);
@@ -7619,13 +8341,6 @@ again:
                btrfs_wait_and_free_delalloc_work(work);
        }
 
-       spin_lock(&root->fs_info->delalloc_lock);
-       if (!list_empty(&root->fs_info->delalloc_inodes)) {
-               spin_unlock(&root->fs_info->delalloc_lock);
-               goto again;
-       }
-       spin_unlock(&root->fs_info->delalloc_lock);
-
        /* the filemap_flush will queue IO into the worker threads, but
         * we have to make sure the IO is actually started and that
         * ordered extents get created before we return
@@ -7801,8 +8516,9 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                        }
                }
 
-               ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
-                                          0, *alloc_hint, &ins, 1);
+               ret = btrfs_reserve_extent(trans, root,
+                                          min(num_bytes, 256ULL * 1024 * 1024),
+                                          min_size, 0, *alloc_hint, &ins, 1);
                if (ret) {
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
index 338f259..c83086f 100644 (file)
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/uuid.h>
+#include <linux/btrfs.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ioctl.h"
 #include "print-tree.h"
 #include "volumes.h"
 #include "locking.h"
@@ -152,7 +152,7 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
 
 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
 {
-       struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
+       struct btrfs_inode *ip = BTRFS_I(file_inode(file));
        unsigned int flags = btrfs_flags_to_ioctl(ip->flags);
 
        if (copy_to_user(arg, &flags, sizeof(flags)))
@@ -177,7 +177,7 @@ static int check_flags(unsigned int flags)
 
 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_inode *ip = BTRFS_I(inode);
        struct btrfs_root *root = ip->root;
        struct btrfs_trans_handle *trans;
@@ -310,7 +310,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 
 static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        return put_user(inode->i_generation, arg);
 }
@@ -363,46 +363,52 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
        return 0;
 }
 
-static noinline int create_subvol(struct btrfs_root *root,
+static noinline int create_subvol(struct inode *dir,
                                  struct dentry *dentry,
                                  char *name, int namelen,
                                  u64 *async_transid,
-                                 struct btrfs_qgroup_inherit **inherit)
+                                 struct btrfs_qgroup_inherit *inherit)
 {
        struct btrfs_trans_handle *trans;
        struct btrfs_key key;
        struct btrfs_root_item root_item;
        struct btrfs_inode_item *inode_item;
        struct extent_buffer *leaf;
+       struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_root *new_root;
-       struct dentry *parent = dentry->d_parent;
-       struct inode *dir;
+       struct btrfs_block_rsv block_rsv;
        struct timespec cur_time = CURRENT_TIME;
        int ret;
        int err;
        u64 objectid;
        u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
        u64 index = 0;
+       u64 qgroup_reserved;
        uuid_le new_uuid;
 
        ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
        if (ret)
                return ret;
 
-       dir = parent->d_inode;
-
+       btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
        /*
-        * 1 - inode item
-        * 2 - refs
-        * 1 - root item
-        * 2 - dir items
+        * The same as the snapshot creation, please see the comment
+        * of create_snapshot().
         */
-       trans = btrfs_start_transaction(root, 6);
-       if (IS_ERR(trans))
-               return PTR_ERR(trans);
+       ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+                                              7, &qgroup_reserved);
+       if (ret)
+               return ret;
 
-       ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
-                                  inherit ? *inherit : NULL);
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out;
+       }
+       trans->block_rsv = &block_rsv;
+       trans->bytes_reserved = block_rsv.size;
+
+       ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
        if (ret)
                goto fail;
 
@@ -516,6 +522,8 @@ static noinline int create_subvol(struct btrfs_root *root,
        BUG_ON(ret);
 
 fail:
+       trans->block_rsv = NULL;
+       trans->bytes_reserved = 0;
        if (async_transid) {
                *async_transid = trans->transid;
                err = btrfs_commit_transaction_async(trans, root, 1);
@@ -527,13 +535,15 @@ fail:
 
        if (!ret)
                d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
-
+out:
+       btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
        return ret;
 }
 
-static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
-                          char *name, int namelen, u64 *async_transid,
-                          bool readonly, struct btrfs_qgroup_inherit **inherit)
+static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+                          struct dentry *dentry, char *name, int namelen,
+                          u64 *async_transid, bool readonly,
+                          struct btrfs_qgroup_inherit *inherit)
 {
        struct inode *inode;
        struct btrfs_pending_snapshot *pending_snapshot;
@@ -549,23 +559,31 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
 
        btrfs_init_block_rsv(&pending_snapshot->block_rsv,
                             BTRFS_BLOCK_RSV_TEMP);
+       /*
+        * 1 - parent dir inode
+        * 2 - dir entries
+        * 1 - root item
+        * 2 - root ref/backref
+        * 1 - root of snapshot
+        */
+       ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+                                       &pending_snapshot->block_rsv, 7,
+                                       &pending_snapshot->qgroup_reserved);
+       if (ret)
+               goto out;
+
        pending_snapshot->dentry = dentry;
        pending_snapshot->root = root;
        pending_snapshot->readonly = readonly;
-       if (inherit) {
-               pending_snapshot->inherit = *inherit;
-               *inherit = NULL;        /* take responsibility to free it */
-       }
+       pending_snapshot->dir = dir;
+       pending_snapshot->inherit = inherit;
 
-       trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
+       trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
                goto fail;
        }
 
-       ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
-       BUG_ON(ret);
-
        spin_lock(&root->fs_info->trans_lock);
        list_add(&pending_snapshot->list,
                 &trans->transaction->pending_snapshots);
@@ -602,6 +620,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
        d_instantiate(dentry, inode);
        ret = 0;
 fail:
+       btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+                                        &pending_snapshot->block_rsv,
+                                        pending_snapshot->qgroup_reserved);
+out:
        kfree(pending_snapshot);
        return ret;
 }
@@ -695,7 +717,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
                                   char *name, int namelen,
                                   struct btrfs_root *snap_src,
                                   u64 *async_transid, bool readonly,
-                                  struct btrfs_qgroup_inherit **inherit)
+                                  struct btrfs_qgroup_inherit *inherit)
 {
        struct inode *dir  = parent->dentry->d_inode;
        struct dentry *dentry;
@@ -732,11 +754,11 @@ static noinline int btrfs_mksubvol(struct path *parent,
                goto out_up_read;
 
        if (snap_src) {
-               error = create_snapshot(snap_src, dentry, name, namelen,
+               error = create_snapshot(snap_src, dir, dentry, name, namelen,
                                        async_transid, readonly, inherit);
        } else {
-               error = create_subvol(BTRFS_I(dir)->root, dentry,
-                                     name, namelen, async_transid, inherit);
+               error = create_subvol(dir, dentry, name, namelen,
+                                     async_transid, inherit);
        }
        if (!error)
                fsnotify_mkdir(dir, dentry);
@@ -818,7 +840,7 @@ static int find_new_extents(struct btrfs_root *root,
 
        while(1) {
                ret = btrfs_search_forward(root, &min_key, &max_key,
-                                          path, 0, newer_than);
+                                          path, newer_than);
                if (ret != 0)
                        goto none;
                if (min_key.objectid != ino)
@@ -1206,6 +1228,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                if (!(inode->i_sb->s_flags & MS_ACTIVE))
                        break;
 
+               if (btrfs_defrag_cancelled(root->fs_info)) {
+                       printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
+                       ret = -EAGAIN;
+                       break;
+               }
+
                if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
                                         extent_thresh, &last_len, &skip,
                                         &defrag_end, range->flags &
@@ -1320,7 +1348,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        u64 new_size;
        u64 old_size;
        u64 devid = 1;
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_vol_args *vol_args;
        struct btrfs_trans_handle *trans;
        struct btrfs_device *device = NULL;
@@ -1329,9 +1357,6 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        int ret = 0;
        int mod = 0;
 
-       if (root->fs_info->sb->s_flags & MS_RDONLY)
-               return -EROFS;
-
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
@@ -1363,6 +1388,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                *devstr = '\0';
                devstr = vol_args->name;
                devid = simple_strtoull(devstr, &end, 10);
+               if (!devid) {
+                       ret = -EINVAL;
+                       goto out_free;
+               }
                printk(KERN_INFO "btrfs: resizing devid %llu\n",
                       (unsigned long long)devid);
        }
@@ -1371,7 +1400,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        if (!device) {
                printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
                       (unsigned long long)devid);
-               ret = -EINVAL;
+               ret = -ENODEV;
                goto out_free;
        }
 
@@ -1379,7 +1408,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                printk(KERN_INFO "btrfs: resizer unable to apply on "
                       "readonly device %llu\n",
                       (unsigned long long)devid);
-               ret = -EINVAL;
+               ret = -EPERM;
                goto out_free;
        }
 
@@ -1401,7 +1430,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        }
 
        if (device->is_tgtdev_for_dev_replace) {
-               ret = -EINVAL;
+               ret = -EPERM;
                goto out_free;
        }
 
@@ -1457,7 +1486,7 @@ out:
 static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
                                char *name, unsigned long fd, int subvol,
                                u64 *transid, bool readonly,
-                               struct btrfs_qgroup_inherit **inherit)
+                               struct btrfs_qgroup_inherit *inherit)
 {
        int namelen;
        int ret = 0;
@@ -1489,8 +1518,8 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
                        goto out_drop_write;
                }
 
-               src_inode = src.file->f_path.dentry->d_inode;
-               if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
+               src_inode = file_inode(src.file);
+               if (src_inode->i_sb != file_inode(file)->i_sb) {
                        printk(KERN_INFO "btrfs: Snapshot src from "
                               "another FS\n");
                        ret = -EINVAL;
@@ -1566,7 +1595,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
 
        ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
                                              vol_args->fd, subvol, ptr,
-                                             readonly, &inherit);
+                                             readonly, inherit);
 
        if (ret == 0 && ptr &&
            copy_to_user(arg +
@@ -1582,7 +1611,7 @@ out:
 static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
                                                void __user *arg)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
        u64 flags = 0;
@@ -1604,7 +1633,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
 static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
                                              void __user *arg)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        u64 root_flags;
@@ -1863,7 +1892,7 @@ static noinline int search_ioctl(struct inode *inode,
        path->keep_locks = 1;
 
        while(1) {
-               ret = btrfs_search_forward(root, &key, &max_key, path, 0,
+               ret = btrfs_search_forward(root, &key, &max_key, path,
                                           sk->min_transid);
                if (ret != 0) {
                        if (ret > 0)
@@ -1898,7 +1927,7 @@ static noinline int btrfs_ioctl_tree_search(struct file *file,
        if (IS_ERR(args))
                return PTR_ERR(args);
 
-       inode = fdentry(file)->d_inode;
+       inode = file_inode(file);
        ret = search_ioctl(inode, args);
        if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
                ret = -EFAULT;
@@ -2008,7 +2037,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
        if (IS_ERR(args))
                return PTR_ERR(args);
 
-       inode = fdentry(file)->d_inode;
+       inode = file_inode(file);
 
        if (args->treeid == 0)
                args->treeid = BTRFS_I(inode)->root->root_key.objectid;
@@ -2035,6 +2064,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        struct btrfs_root *dest = NULL;
        struct btrfs_ioctl_vol_args *vol_args;
        struct btrfs_trans_handle *trans;
+       struct btrfs_block_rsv block_rsv;
+       u64 qgroup_reserved;
        int namelen;
        int ret;
        int err = 0;
@@ -2124,12 +2155,23 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        if (err)
                goto out_up_write;
 
+       btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+       /*
+        * One for dir inode, two for dir entries, two for root
+        * ref/backref.
+        */
+       err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+                                              5, &qgroup_reserved);
+       if (err)
+               goto out_up_write;
+
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-               goto out_up_write;
+               goto out_release;
        }
-       trans->block_rsv = &root->fs_info->global_block_rsv;
+       trans->block_rsv = &block_rsv;
+       trans->bytes_reserved = block_rsv.size;
 
        ret = btrfs_unlink_subvol(trans, root, dir,
                                dest->root_key.objectid,
@@ -2159,10 +2201,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                }
        }
 out_end_trans:
+       trans->block_rsv = NULL;
+       trans->bytes_reserved = 0;
        ret = btrfs_end_transaction(trans, root);
        if (ret && !err)
                err = ret;
        inode->i_flags |= S_DEAD;
+out_release:
+       btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
 out_up_write:
        up_write(&root->fs_info->subvol_sem);
 out_unlock:
@@ -2171,6 +2217,12 @@ out_unlock:
                shrink_dcache_sb(root->fs_info->sb);
                btrfs_invalidate_inodes(dest);
                d_delete(dentry);
+
+               /* the last ref */
+               if (dest->cache_inode) {
+                       iput(dest->cache_inode);
+                       dest->cache_inode = NULL;
+               }
        }
 out_dput:
        dput(dentry);
@@ -2184,7 +2236,7 @@ out:
 
 static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_ioctl_defrag_range_args *range;
        int ret;
@@ -2211,10 +2263,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                        ret = -EPERM;
                        goto out;
                }
-               ret = btrfs_defrag_root(root, 0);
+               ret = btrfs_defrag_root(root);
                if (ret)
                        goto out;
-               ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
+               ret = btrfs_defrag_root(root->fs_info->extent_root);
                break;
        case S_IFREG:
                if (!(file->f_mode & FMODE_WRITE)) {
@@ -2244,7 +2296,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                        /* the rest are all set to zero by kzalloc */
                        range->len = (u64)-1;
                }
-               ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
+               ret = btrfs_defrag_file(file_inode(file), file,
                                        range, 0, 0);
                if (ret > 0)
                        ret = 0;
@@ -2292,7 +2344,7 @@ out:
 
 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_vol_args *vol_args;
        int ret;
 
@@ -2415,7 +2467,7 @@ out:
 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                       u64 off, u64 olen, u64 destoff)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct fd src_file;
        struct inode *src;
@@ -2461,7 +2513,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        if (src_file.file->f_path.mnt != file->f_path.mnt)
                goto out_fput;
 
-       src = src_file.file->f_dentry->d_inode;
+       src = file_inode(src_file.file);
 
        ret = -EINVAL;
        if (src == inode)
@@ -2823,7 +2875,7 @@ static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
  */
 static long btrfs_ioctl_trans_start(struct file *file)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
@@ -2863,7 +2915,7 @@ out:
 
 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_root *new_root;
        struct btrfs_dir_item *di;
@@ -3087,7 +3139,7 @@ out:
  */
 long btrfs_ioctl_trans_end(struct file *file)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
 
@@ -3111,7 +3163,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
        u64 transid;
        int ret;
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                if (PTR_ERR(trans) != -ENOENT)
                        return PTR_ERR(trans);
@@ -3149,7 +3201,7 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
 
 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_scrub_args *sa;
        int ret;
 
@@ -3289,7 +3341,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
        struct inode_fs_paths *ipath = NULL;
        struct btrfs_path *path;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!capable(CAP_DAC_READ_SEARCH))
                return -EPERM;
 
        path = btrfs_alloc_path();
@@ -3440,7 +3492,7 @@ void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
 
 static long btrfs_ioctl_balance(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_ioctl_balance_args *bargs;
        struct btrfs_balance_control *bctl;
@@ -3630,7 +3682,7 @@ out:
 
 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_quota_ctl_args *sa;
        struct btrfs_trans_handle *trans = NULL;
        int ret;
@@ -3689,7 +3741,7 @@ drop_write:
 
 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_qgroup_assign_args *sa;
        struct btrfs_trans_handle *trans;
        int ret;
@@ -3736,7 +3788,7 @@ drop_write:
 
 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_qgroup_create_args *sa;
        struct btrfs_trans_handle *trans;
        int ret;
@@ -3787,7 +3839,7 @@ drop_write:
 
 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        struct btrfs_ioctl_qgroup_limit_args *sa;
        struct btrfs_trans_handle *trans;
        int ret;
@@ -3837,7 +3889,7 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
                                            void __user *arg)
 {
        struct btrfs_ioctl_received_subvol_args *sa = NULL;
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_root_item *root_item = &root->root_item;
        struct btrfs_trans_handle *trans;
@@ -3914,10 +3966,69 @@ out:
        return ret;
 }
 
+static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
+{
+       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       const char *label = root->fs_info->super_copy->label;
+       size_t len = strnlen(label, BTRFS_LABEL_SIZE);
+       int ret;
+
+       if (len == BTRFS_LABEL_SIZE) {
+               pr_warn("btrfs: label is too long, return the first %zu bytes\n",
+                       --len);
+       }
+
+       mutex_lock(&root->fs_info->volume_mutex);
+       ret = copy_to_user(arg, label, len);
+       mutex_unlock(&root->fs_info->volume_mutex);
+
+       return ret ? -EFAULT : 0;
+}
+
+static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
+{
+       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_super_block *super_block = root->fs_info->super_copy;
+       struct btrfs_trans_handle *trans;
+       char label[BTRFS_LABEL_SIZE];
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (copy_from_user(label, arg, sizeof(label)))
+               return -EFAULT;
+
+       if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
+               pr_err("btrfs: unable to set label with more than %d bytes\n",
+                      BTRFS_LABEL_SIZE - 1);
+               return -EINVAL;
+       }
+
+       ret = mnt_want_write_file(file);
+       if (ret)
+               return ret;
+
+       mutex_lock(&root->fs_info->volume_mutex);
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out_unlock;
+       }
+
+       strcpy(super_block->label, label);
+       ret = btrfs_end_transaction(trans, root);
+
+out_unlock:
+       mutex_unlock(&root->fs_info->volume_mutex);
+       mnt_drop_write_file(file);
+       return ret;
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
                cmd, unsigned long arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
        void __user *argp = (void __user *)arg;
 
        switch (cmd) {
@@ -4014,6 +4125,10 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_qgroup_limit(file, argp);
        case BTRFS_IOC_DEV_REPLACE:
                return btrfs_ioctl_dev_replace(root, argp);
+       case BTRFS_IOC_GET_FSLABEL:
+               return btrfs_ioctl_get_fslabel(file, argp);
+       case BTRFS_IOC_SET_FSLABEL:
+               return btrfs_ioctl_set_fslabel(file, argp);
        }
 
        return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
deleted file mode 100644 (file)
index dabca9c..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (C) 2007 Oracle.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef __IOCTL_
-#define __IOCTL_
-#include <linux/ioctl.h>
-
-#define BTRFS_IOCTL_MAGIC 0x94
-#define BTRFS_VOL_NAME_MAX 255
-
-/* this should be 4k */
-#define BTRFS_PATH_NAME_MAX 4087
-struct btrfs_ioctl_vol_args {
-       __s64 fd;
-       char name[BTRFS_PATH_NAME_MAX + 1];
-};
-
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
-
-#define BTRFS_SUBVOL_CREATE_ASYNC      (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY            (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT    (1ULL << 2)
-#define BTRFS_FSID_SIZE 16
-#define BTRFS_UUID_SIZE 16
-
-#define BTRFS_QGROUP_INHERIT_SET_LIMITS        (1ULL << 0)
-
-struct btrfs_qgroup_limit {
-       __u64   flags;
-       __u64   max_rfer;
-       __u64   max_excl;
-       __u64   rsv_rfer;
-       __u64   rsv_excl;
-};
-
-struct btrfs_qgroup_inherit {
-       __u64   flags;
-       __u64   num_qgroups;
-       __u64   num_ref_copies;
-       __u64   num_excl_copies;
-       struct btrfs_qgroup_limit lim;
-       __u64   qgroups[0];
-};
-
-struct btrfs_ioctl_qgroup_limit_args {
-       __u64   qgroupid;
-       struct btrfs_qgroup_limit lim;
-};
-
-#define BTRFS_SUBVOL_NAME_MAX 4039
-struct btrfs_ioctl_vol_args_v2 {
-       __s64 fd;
-       __u64 transid;
-       __u64 flags;
-       union {
-               struct {
-                       __u64 size;
-                       struct btrfs_qgroup_inherit __user *qgroup_inherit;
-               };
-               __u64 unused[4];
-       };
-       char name[BTRFS_SUBVOL_NAME_MAX + 1];
-};
-
-/*
- * structure to report errors and progress to userspace, either as a
- * result of a finished scrub, a canceled scrub or a progress inquiry
- */
-struct btrfs_scrub_progress {
-       __u64 data_extents_scrubbed;    /* # of data extents scrubbed */
-       __u64 tree_extents_scrubbed;    /* # of tree extents scrubbed */
-       __u64 data_bytes_scrubbed;      /* # of data bytes scrubbed */
-       __u64 tree_bytes_scrubbed;      /* # of tree bytes scrubbed */
-       __u64 read_errors;              /* # of read errors encountered (EIO) */
-       __u64 csum_errors;              /* # of failed csum checks */
-       __u64 verify_errors;            /* # of occurences, where the metadata
-                                        * of a tree block did not match the
-                                        * expected values, like generation or
-                                        * logical */
-       __u64 no_csum;                  /* # of 4k data block for which no csum
-                                        * is present, probably the result of
-                                        * data written with nodatasum */
-       __u64 csum_discards;            /* # of csum for which no data was found
-                                        * in the extent tree. */
-       __u64 super_errors;             /* # of bad super blocks encountered */
-       __u64 malloc_errors;            /* # of internal kmalloc errors. These
-                                        * will likely cause an incomplete
-                                        * scrub */
-       __u64 uncorrectable_errors;     /* # of errors where either no intact
-                                        * copy was found or the writeback
-                                        * failed */
-       __u64 corrected_errors;         /* # of errors corrected */
-       __u64 last_physical;            /* last physical address scrubbed. In
-                                        * case a scrub was aborted, this can
-                                        * be used to restart the scrub */
-       __u64 unverified_errors;        /* # of occurences where a read for a
-                                        * full (64k) bio failed, but the re-
-                                        * check succeeded for each 4k piece.
-                                        * Intermittent error. */
-};
-
-#define BTRFS_SCRUB_READONLY   1
-struct btrfs_ioctl_scrub_args {
-       __u64 devid;                            /* in */
-       __u64 start;                            /* in */
-       __u64 end;                              /* in */
-       __u64 flags;                            /* in */
-       struct btrfs_scrub_progress progress;   /* out */
-       /* pad to 1k */
-       __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
-};
-
-#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS   0
-#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID    1
-struct btrfs_ioctl_dev_replace_start_params {
-       __u64 srcdevid; /* in, if 0, use srcdev_name instead */
-       __u64 cont_reading_from_srcdev_mode;    /* in, see #define
-                                                * above */
-       __u8 srcdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1];       /* in */
-       __u8 tgtdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1];       /* in */
-};
-
-#define BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED    0
-#define BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED          1
-#define BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED         2
-#define BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED         3
-#define BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED                4
-struct btrfs_ioctl_dev_replace_status_params {
-       __u64 replace_state;    /* out, see #define above */
-       __u64 progress_1000;    /* out, 0 <= x <= 1000 */
-       __u64 time_started;     /* out, seconds since 1-Jan-1970 */
-       __u64 time_stopped;     /* out, seconds since 1-Jan-1970 */
-       __u64 num_write_errors; /* out */
-       __u64 num_uncorrectable_read_errors;    /* out */
-};
-
-#define BTRFS_IOCTL_DEV_REPLACE_CMD_START                      0
-#define BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS                     1
-#define BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL                     2
-#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR                        0
-#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED             1
-#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED         2
-struct btrfs_ioctl_dev_replace_args {
-       __u64 cmd;      /* in */
-       __u64 result;   /* out */
-
-       union {
-               struct btrfs_ioctl_dev_replace_start_params start;
-               struct btrfs_ioctl_dev_replace_status_params status;
-       };      /* in/out */
-
-       __u64 spare[64];
-};
-
-struct btrfs_ioctl_dev_info_args {
-       __u64 devid;                            /* in/out */
-       __u8 uuid[BTRFS_UUID_SIZE];             /* in/out */
-       __u64 bytes_used;                       /* out */
-       __u64 total_bytes;                      /* out */
-       __u64 unused[379];                      /* pad to 4k */
-       __u8 path[BTRFS_DEVICE_PATH_NAME_MAX];  /* out */
-};
-
-struct btrfs_ioctl_fs_info_args {
-       __u64 max_id;                           /* out */
-       __u64 num_devices;                      /* out */
-       __u8 fsid[BTRFS_FSID_SIZE];             /* out */
-       __u64 reserved[124];                    /* pad to 1k */
-};
-
-/* balance control ioctl modes */
-#define BTRFS_BALANCE_CTL_PAUSE                1
-#define BTRFS_BALANCE_CTL_CANCEL       2
-
-/*
- * this is packed, because it should be exactly the same as its disk
- * byte order counterpart (struct btrfs_disk_balance_args)
- */
-struct btrfs_balance_args {
-       __u64 profiles;
-       __u64 usage;
-       __u64 devid;
-       __u64 pstart;
-       __u64 pend;
-       __u64 vstart;
-       __u64 vend;
-
-       __u64 target;
-
-       __u64 flags;
-
-       __u64 unused[8];
-} __attribute__ ((__packed__));
-
-/* report balance progress to userspace */
-struct btrfs_balance_progress {
-       __u64 expected;         /* estimated # of chunks that will be
-                                * relocated to fulfill the request */
-       __u64 considered;       /* # of chunks we have considered so far */
-       __u64 completed;        /* # of chunks relocated so far */
-};
-
-#define BTRFS_BALANCE_STATE_RUNNING    (1ULL << 0)
-#define BTRFS_BALANCE_STATE_PAUSE_REQ  (1ULL << 1)
-#define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2)
-
-struct btrfs_ioctl_balance_args {
-       __u64 flags;                            /* in/out */
-       __u64 state;                            /* out */
-
-       struct btrfs_balance_args data;         /* in/out */
-       struct btrfs_balance_args meta;         /* in/out */
-       struct btrfs_balance_args sys;          /* in/out */
-
-       struct btrfs_balance_progress stat;     /* out */
-
-       __u64 unused[72];                       /* pad to 1k */
-};
-
-#define BTRFS_INO_LOOKUP_PATH_MAX 4080
-struct btrfs_ioctl_ino_lookup_args {
-       __u64 treeid;
-       __u64 objectid;
-       char name[BTRFS_INO_LOOKUP_PATH_MAX];
-};
-
-struct btrfs_ioctl_search_key {
-       /* which root are we searching.  0 is the tree of tree roots */
-       __u64 tree_id;
-
-       /* keys returned will be >= min and <= max */
-       __u64 min_objectid;
-       __u64 max_objectid;
-
-       /* keys returned will be >= min and <= max */
-       __u64 min_offset;
-       __u64 max_offset;
-
-       /* max and min transids to search for */
-       __u64 min_transid;
-       __u64 max_transid;
-
-       /* keys returned will be >= min and <= max */
-       __u32 min_type;
-       __u32 max_type;
-
-       /*
-        * how many items did userland ask for, and how many are we
-        * returning
-        */
-       __u32 nr_items;
-
-       /* align to 64 bits */
-       __u32 unused;
-
-       /* some extra for later */
-       __u64 unused1;
-       __u64 unused2;
-       __u64 unused3;
-       __u64 unused4;
-};
-
-struct btrfs_ioctl_search_header {
-       __u64 transid;
-       __u64 objectid;
-       __u64 offset;
-       __u32 type;
-       __u32 len;
-};
-
-#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
-/*
- * the buf is an array of search headers where
- * each header is followed by the actual item
- * the type field is expanded to 32 bits for alignment
- */
-struct btrfs_ioctl_search_args {
-       struct btrfs_ioctl_search_key key;
-       char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
-};
-
-struct btrfs_ioctl_clone_range_args {
-  __s64 src_fd;
-  __u64 src_offset, src_length;
-  __u64 dest_offset;
-};
-
-/* flags for the defrag range ioctl */
-#define BTRFS_DEFRAG_RANGE_COMPRESS 1
-#define BTRFS_DEFRAG_RANGE_START_IO 2
-
-struct btrfs_ioctl_space_info {
-       __u64 flags;
-       __u64 total_bytes;
-       __u64 used_bytes;
-};
-
-struct btrfs_ioctl_space_args {
-       __u64 space_slots;
-       __u64 total_spaces;
-       struct btrfs_ioctl_space_info spaces[0];
-};
-
-struct btrfs_data_container {
-       __u32   bytes_left;     /* out -- bytes not needed to deliver output */
-       __u32   bytes_missing;  /* out -- additional bytes needed for result */
-       __u32   elem_cnt;       /* out */
-       __u32   elem_missed;    /* out */
-       __u64   val[0];         /* out */
-};
-
-struct btrfs_ioctl_ino_path_args {
-       __u64                           inum;           /* in */
-       __u64                           size;           /* in */
-       __u64                           reserved[4];
-       /* struct btrfs_data_container  *fspath;           out */
-       __u64                           fspath;         /* out */
-};
-
-struct btrfs_ioctl_logical_ino_args {
-       __u64                           logical;        /* in */
-       __u64                           size;           /* in */
-       __u64                           reserved[4];
-       /* struct btrfs_data_container  *inodes;        out   */
-       __u64                           inodes;
-};
-
-enum btrfs_dev_stat_values {
-       /* disk I/O failure stats */
-       BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
-       BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
-       BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
-
-       /* stats for indirect indications for I/O failures */
-       BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
-                                        * contents is illegal: this is an
-                                        * indication that the block was damaged
-                                        * during read or write, or written to
-                                        * wrong location or read from wrong
-                                        * location */
-       BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
-                                        * been written */
-
-       BTRFS_DEV_STAT_VALUES_MAX
-};
-
-/* Reset statistics after reading; needs SYS_ADMIN capability */
-#define        BTRFS_DEV_STATS_RESET           (1ULL << 0)
-
-struct btrfs_ioctl_get_dev_stats {
-       __u64 devid;                            /* in */
-       __u64 nr_items;                         /* in/out */
-       __u64 flags;                            /* in/out */
-
-       /* out values: */
-       __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
-
-       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
-};
-
-#define BTRFS_QUOTA_CTL_ENABLE 1
-#define BTRFS_QUOTA_CTL_DISABLE        2
-#define BTRFS_QUOTA_CTL_RESCAN 3
-struct btrfs_ioctl_quota_ctl_args {
-       __u64 cmd;
-       __u64 status;
-};
-
-struct btrfs_ioctl_qgroup_assign_args {
-       __u64 assign;
-       __u64 src;
-       __u64 dst;
-};
-
-struct btrfs_ioctl_qgroup_create_args {
-       __u64 create;
-       __u64 qgroupid;
-};
-struct btrfs_ioctl_timespec {
-       __u64 sec;
-       __u32 nsec;
-};
-
-struct btrfs_ioctl_received_subvol_args {
-       char    uuid[BTRFS_UUID_SIZE];  /* in */
-       __u64   stransid;               /* in */
-       __u64   rtransid;               /* out */
-       struct btrfs_ioctl_timespec stime; /* in */
-       struct btrfs_ioctl_timespec rtime; /* out */
-       __u64   flags;                  /* in */
-       __u64   reserved[16];           /* in */
-};
-
-struct btrfs_ioctl_send_args {
-       __s64 send_fd;                  /* in */
-       __u64 clone_sources_count;      /* in */
-       __u64 __user *clone_sources;    /* in */
-       __u64 parent_root;              /* in */
-       __u64 flags;                    /* in */
-       __u64 reserved[4];              /* in */
-};
-
-#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_RESIZE _IOW(BTRFS_IOCTL_MAGIC, 3, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_SCAN_DEV _IOW(BTRFS_IOCTL_MAGIC, 4, \
-                                  struct btrfs_ioctl_vol_args)
-/* trans start and trans end are dangerous, and only for
- * use by applications that know how to avoid the
- * resulting deadlocks
- */
-#define BTRFS_IOC_TRANS_START  _IO(BTRFS_IOCTL_MAGIC, 6)
-#define BTRFS_IOC_TRANS_END    _IO(BTRFS_IOCTL_MAGIC, 7)
-#define BTRFS_IOC_SYNC         _IO(BTRFS_IOCTL_MAGIC, 8)
-
-#define BTRFS_IOC_CLONE        _IOW(BTRFS_IOCTL_MAGIC, 9, int)
-#define BTRFS_IOC_ADD_DEV _IOW(BTRFS_IOCTL_MAGIC, 10, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_RM_DEV _IOW(BTRFS_IOCTL_MAGIC, 11, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_BALANCE _IOW(BTRFS_IOCTL_MAGIC, 12, \
-                                  struct btrfs_ioctl_vol_args)
-
-#define BTRFS_IOC_CLONE_RANGE _IOW(BTRFS_IOCTL_MAGIC, 13, \
-                                 struct btrfs_ioctl_clone_range_args)
-
-#define BTRFS_IOC_SUBVOL_CREATE _IOW(BTRFS_IOCTL_MAGIC, 14, \
-                                  struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
-                               struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \
-                               struct btrfs_ioctl_defrag_range_args)
-#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
-                                  struct btrfs_ioctl_search_args)
-#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \
-                                  struct btrfs_ioctl_ino_lookup_args)
-#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64)
-#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
-                                   struct btrfs_ioctl_space_args)
-#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
-#define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
-#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
-                                  struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 24, \
-                                  struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
-#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
-#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
-                             struct btrfs_ioctl_scrub_args)
-#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28)
-#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \
-                                      struct btrfs_ioctl_scrub_args)
-#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \
-                                struct btrfs_ioctl_dev_info_args)
-#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
-                              struct btrfs_ioctl_fs_info_args)
-#define BTRFS_IOC_BALANCE_V2 _IOWR(BTRFS_IOCTL_MAGIC, 32, \
-                                  struct btrfs_ioctl_balance_args)
-#define BTRFS_IOC_BALANCE_CTL _IOW(BTRFS_IOCTL_MAGIC, 33, int)
-#define BTRFS_IOC_BALANCE_PROGRESS _IOR(BTRFS_IOCTL_MAGIC, 34, \
-                                       struct btrfs_ioctl_balance_args)
-#define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
-                                       struct btrfs_ioctl_ino_path_args)
-#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
-                                       struct btrfs_ioctl_ino_path_args)
-#define BTRFS_IOC_SET_RECEIVED_SUBVOL _IOWR(BTRFS_IOCTL_MAGIC, 37, \
-                               struct btrfs_ioctl_received_subvol_args)
-#define BTRFS_IOC_SEND _IOW(BTRFS_IOCTL_MAGIC, 38, struct btrfs_ioctl_send_args)
-#define BTRFS_IOC_DEVICES_READY _IOR(BTRFS_IOCTL_MAGIC, 39, \
-                                    struct btrfs_ioctl_vol_args)
-#define BTRFS_IOC_QUOTA_CTL _IOWR(BTRFS_IOCTL_MAGIC, 40, \
-                              struct btrfs_ioctl_quota_ctl_args)
-#define BTRFS_IOC_QGROUP_ASSIGN _IOW(BTRFS_IOCTL_MAGIC, 41, \
-                              struct btrfs_ioctl_qgroup_assign_args)
-#define BTRFS_IOC_QGROUP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 42, \
-                              struct btrfs_ioctl_qgroup_create_args)
-#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
-                              struct btrfs_ioctl_qgroup_limit_args)
-#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
-                                     struct btrfs_ioctl_get_dev_stats)
-#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
-                                   struct btrfs_ioctl_dev_replace_args)
-
-#endif
index 2a1762c..e95df43 100644 (file)
@@ -113,11 +113,10 @@ again:
                read_unlock(&eb->lock);
                return;
        }
-       read_unlock(&eb->lock);
-       wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
-       read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
+               wait_event(eb->write_lock_wq,
+                          atomic_read(&eb->blocking_writers) == 0);
                goto again;
        }
        atomic_inc(&eb->read_locks);
index e5ed567..dc08d77 100644 (file)
@@ -196,6 +196,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        entry->file_offset = file_offset;
        entry->start = start;
        entry->len = len;
+       if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
+           !(type == BTRFS_ORDERED_NOCOW))
+               entry->csum_bytes_left = disk_len;
        entry->disk_len = disk_len;
        entry->bytes_left = len;
        entry->inode = igrab(inode);
@@ -213,6 +216,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        INIT_LIST_HEAD(&entry->root_extent_list);
        INIT_LIST_HEAD(&entry->work_list);
        init_completion(&entry->completion);
+       INIT_LIST_HEAD(&entry->log_list);
 
        trace_btrfs_ordered_extent_add(inode, entry);
 
@@ -270,6 +274,10 @@ void btrfs_add_ordered_sum(struct inode *inode,
        tree = &BTRFS_I(inode)->ordered_tree;
        spin_lock_irq(&tree->lock);
        list_add_tail(&sum->list, &entry->list);
+       WARN_ON(entry->csum_bytes_left < sum->len);
+       entry->csum_bytes_left -= sum->len;
+       if (entry->csum_bytes_left == 0)
+               wake_up(&entry->wait);
        spin_unlock_irq(&tree->lock);
 }
 
@@ -405,6 +413,66 @@ out:
        return ret == 0;
 }
 
+/* Needs to either be called under a log transaction or the log_mutex */
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
+{
+       struct btrfs_ordered_inode_tree *tree;
+       struct btrfs_ordered_extent *ordered;
+       struct rb_node *n;
+       int index = log->log_transid % 2;
+
+       tree = &BTRFS_I(inode)->ordered_tree;
+       spin_lock_irq(&tree->lock);
+       for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+               ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+               spin_lock(&log->log_extents_lock[index]);
+               if (list_empty(&ordered->log_list)) {
+                       list_add_tail(&ordered->log_list, &log->logged_list[index]);
+                       atomic_inc(&ordered->refs);
+               }
+               spin_unlock(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&tree->lock);
+}
+
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+{
+       struct btrfs_ordered_extent *ordered;
+       int index = transid % 2;
+
+       spin_lock_irq(&log->log_extents_lock[index]);
+       while (!list_empty(&log->logged_list[index])) {
+               ordered = list_first_entry(&log->logged_list[index],
+                                          struct btrfs_ordered_extent,
+                                          log_list);
+               list_del_init(&ordered->log_list);
+               spin_unlock_irq(&log->log_extents_lock[index]);
+               wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+                                                  &ordered->flags));
+               btrfs_put_ordered_extent(ordered);
+               spin_lock_irq(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
+{
+       struct btrfs_ordered_extent *ordered;
+       int index = transid % 2;
+
+       spin_lock_irq(&log->log_extents_lock[index]);
+       while (!list_empty(&log->logged_list[index])) {
+               ordered = list_first_entry(&log->logged_list[index],
+                                          struct btrfs_ordered_extent,
+                                          log_list);
+               list_del_init(&ordered->log_list);
+               spin_unlock_irq(&log->log_extents_lock[index]);
+               btrfs_put_ordered_extent(ordered);
+               spin_lock_irq(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
 /*
  * used to drop a reference on an ordered extent.  This will free
  * the extent if the last reference is dropped
@@ -544,10 +612,12 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
  * extra check to make sure the ordered operation list really is empty
  * before we return
  */
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+                                struct btrfs_root *root, int wait)
 {
        struct btrfs_inode *btrfs_inode;
        struct inode *inode;
+       struct btrfs_transaction *cur_trans = trans->transaction;
        struct list_head splice;
        struct list_head works;
        struct btrfs_delalloc_work *work, *next;
@@ -558,14 +628,10 @@ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
 
        mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
-again:
-       list_splice_init(&root->fs_info->ordered_operations, &splice);
-
+       list_splice_init(&cur_trans->ordered_operations, &splice);
        while (!list_empty(&splice)) {
-
                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
                                   ordered_operations);
-
                inode = &btrfs_inode->vfs_inode;
 
                list_del_init(&btrfs_inode->ordered_operations);
@@ -574,24 +640,22 @@ again:
                 * the inode may be getting freed (in sys_unlink path).
                 */
                inode = igrab(inode);
-
-               if (!wait && inode) {
-                       list_add_tail(&BTRFS_I(inode)->ordered_operations,
-                             &root->fs_info->ordered_operations);
-               }
-
                if (!inode)
                        continue;
+
+               if (!wait)
+                       list_add_tail(&BTRFS_I(inode)->ordered_operations,
+                                     &cur_trans->ordered_operations);
                spin_unlock(&root->fs_info->ordered_extent_lock);
 
                work = btrfs_alloc_delalloc_work(inode, wait, 1);
                if (!work) {
+                       spin_lock(&root->fs_info->ordered_extent_lock);
                        if (list_empty(&BTRFS_I(inode)->ordered_operations))
                                list_add_tail(&btrfs_inode->ordered_operations,
                                              &splice);
-                       spin_lock(&root->fs_info->ordered_extent_lock);
                        list_splice_tail(&splice,
-                                        &root->fs_info->ordered_operations);
+                                        &cur_trans->ordered_operations);
                        spin_unlock(&root->fs_info->ordered_extent_lock);
                        ret = -ENOMEM;
                        goto out;
@@ -603,9 +667,6 @@ again:
                cond_resched();
                spin_lock(&root->fs_info->ordered_extent_lock);
        }
-       if (wait && !list_empty(&root->fs_info->ordered_operations))
-               goto again;
-
        spin_unlock(&root->fs_info->ordered_extent_lock);
 out:
        list_for_each_entry_safe(work, next, &works, list) {
@@ -974,6 +1035,7 @@ out:
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root, struct inode *inode)
 {
+       struct btrfs_transaction *cur_trans = trans->transaction;
        u64 last_mod;
 
        last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
@@ -988,7 +1050,7 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
        spin_lock(&root->fs_info->ordered_extent_lock);
        if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
                list_add_tail(&BTRFS_I(inode)->ordered_operations,
-                             &root->fs_info->ordered_operations);
+                             &cur_trans->ordered_operations);
        }
        spin_unlock(&root->fs_info->ordered_extent_lock);
 }
index f29d4bf..8eadfe4 100644 (file)
@@ -79,6 +79,8 @@ struct btrfs_ordered_sum {
 #define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
                                       * has done its due diligence in updating
                                       * the isize. */
+#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
+                                      ordered extent */
 
 struct btrfs_ordered_extent {
        /* logical offset in the file */
@@ -96,6 +98,9 @@ struct btrfs_ordered_extent {
        /* number of bytes that still need writing */
        u64 bytes_left;
 
+       /* number of bytes that still need csumming */
+       u64 csum_bytes_left;
+
        /*
         * the end of the ordered extent which is behind it but
         * didn't update disk_i_size. Please see the comment of
@@ -118,6 +123,9 @@ struct btrfs_ordered_extent {
        /* list of checksums for insertion when the extent io is done */
        struct list_head list;
 
+       /* If we need to wait on this to be done */
+       struct list_head log_list;
+
        /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
        wait_queue_head_t wait;
 
@@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                                struct btrfs_ordered_extent *ordered);
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+                                struct btrfs_root *root, int wait);
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 struct inode *inode);
 void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
 int __init ordered_data_init(void);
 void ordered_data_exit(void);
 #endif
index 50d95fd..920957e 100644 (file)
@@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
                               btrfs_dev_extent_chunk_offset(l, dev_extent),
                               (unsigned long long)
                               btrfs_dev_extent_length(l, dev_extent));
+                       break;
                case BTRFS_DEV_STATS_KEY:
                        printk(KERN_INFO "\t\tdevice stats\n");
                        break;
index a5c8562..aee4b1c 100644 (file)
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/btrfs.h>
 
 #include "ctree.h"
 #include "transaction.h"
 #include "disk-io.h"
 #include "locking.h"
 #include "ulist.h"
-#include "ioctl.h"
 #include "backref.h"
 
 /* TODO XXX FIXME
@@ -620,7 +620,9 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
        key.offset = qgroupid;
 
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return -ENOMEM;
+
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        if (ret > 0)
                ret = -ENOENT;
@@ -661,7 +663,9 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
        key.offset = qgroup->qgroupid;
 
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return -ENOMEM;
+
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        if (ret > 0)
                ret = -ENOENT;
@@ -702,7 +706,9 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
        key.offset = 0;
 
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return -ENOMEM;
+
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        if (ret > 0)
                ret = -ENOENT;
@@ -732,33 +738,38 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
 {
        struct btrfs_path *path;
        struct btrfs_key key;
+       struct extent_buffer *leaf = NULL;
        int ret;
-
-       if (!root)
-               return -EINVAL;
+       int nr = 0;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       while (1) {
-               key.objectid = 0;
-               key.offset = 0;
-               key.type = 0;
+       path->leave_spinning = 1;
 
-               path->leave_spinning = 1;
+       key.objectid = 0;
+       key.offset = 0;
+       key.type = 0;
+
+       while (1) {
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-               if (ret > 0) {
-                       if (path->slots[0] == 0)
-                               break;
-                       path->slots[0]--;
-               } else if (ret < 0) {
+               if (ret < 0)
+                       goto out;
+               leaf = path->nodes[0];
+               nr = btrfs_header_nritems(leaf);
+               if (!nr)
                        break;
-               }
-
-               ret = btrfs_del_item(trans, root, path);
+               /*
+                * delete the leaf one by one
+                * since the whole tree is going
+                * to be deleted.
+                */
+               path->slots[0] = 0;
+               ret = btrfs_del_items(trans, root, path, 0, nr);
                if (ret)
                        goto out;
+
                btrfs_release_path(path);
        }
        ret = 0;
@@ -847,6 +858,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
        int ret = 0;
 
        spin_lock(&fs_info->qgroup_lock);
+       if (!fs_info->quota_root) {
+               spin_unlock(&fs_info->qgroup_lock);
+               return 0;
+       }
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
        quota_root = fs_info->quota_root;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
new file mode 100644 (file)
index 0000000..9a79fb7
--- /dev/null
@@ -0,0 +1,2100 @@
+/*
+ * Copyright (C) 2012 Fusion-io  All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/iocontext.h>
+#include <linux/capability.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <linux/hash.h>
+#include <linux/list_sort.h>
+#include <linux/raid/xor.h>
+#include <linux/vmalloc.h>
+#include <asm/div64.h>
+#include "compat.h"
+#include "ctree.h"
+#include "extent_map.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "raid56.h"
+#include "async-thread.h"
+#include "check-integrity.h"
+#include "rcu-string.h"
+
+/* set when additional merges to this rbio are not allowed */
+#define RBIO_RMW_LOCKED_BIT    1
+
+/*
+ * set when this rbio is sitting in the hash, but it is just a cache
+ * of past RMW
+ */
+#define RBIO_CACHE_BIT         2
+
+/*
+ * set when it is safe to trust the stripe_pages for caching
+ */
+#define RBIO_CACHE_READY_BIT   3
+
+
+#define RBIO_CACHE_SIZE 1024
+
+struct btrfs_raid_bio {
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_bio *bbio;
+
+       /*
+        * logical block numbers for the start of each stripe
+        * The last one or two are p/q.  These are sorted,
+        * so raid_map[0] is the start of our full stripe
+        */
+       u64 *raid_map;
+
+       /* while we're doing rmw on a stripe
+        * we put it into a hash table so we can
+        * lock the stripe and merge more rbios
+        * into it.
+        */
+       struct list_head hash_list;
+
+       /*
+        * LRU list for the stripe cache
+        */
+       struct list_head stripe_cache;
+
+       /*
+        * for scheduling work in the helper threads
+        */
+       struct btrfs_work work;
+
+       /*
+        * bio list and bio_list_lock are used
+        * to add more bios into the stripe
+        * in hopes of avoiding the full rmw
+        */
+       struct bio_list bio_list;
+       spinlock_t bio_list_lock;
+
+       /* also protected by the bio_list_lock, the
+        * plug list is used by the plugging code
+        * to collect partial bios while plugged.  The
+        * stripe locking code also uses it to hand off
+        * the stripe lock to the next pending IO
+        */
+       struct list_head plug_list;
+
+       /*
+        * flags that tell us if it is safe to
+        * merge with this bio
+        */
+       unsigned long flags;
+
+       /* size of each individual stripe on disk */
+       int stripe_len;
+
+       /* number of data stripes (no p/q) */
+       int nr_data;
+
+       /*
+        * set if we're doing a parity rebuild
+        * for a read from higher up, which is handled
+        * differently from a parity rebuild as part of
+        * rmw
+        */
+       int read_rebuild;
+
+       /* first bad stripe */
+       int faila;
+
+       /* second bad stripe (for raid6 use) */
+       int failb;
+
+       /*
+        * number of pages needed to represent the full
+        * stripe
+        */
+       int nr_pages;
+
+       /*
+        * size of all the bios in the bio_list.  This
+        * helps us decide if the rbio maps to a full
+        * stripe or not
+        */
+       int bio_list_bytes;
+
+       atomic_t refs;
+
+       /*
+        * these are two arrays of pointers.  We allocate the
+        * rbio big enough to hold them both and setup their
+        * locations when the rbio is allocated
+        */
+
+       /* pointers to pages that we allocated for
+        * reading/writing stripes directly from the disk (including P/Q)
+        */
+       struct page **stripe_pages;
+
+       /*
+        * pointers to the pages in the bio_list.  Stored
+        * here for faster lookup
+        */
+       struct page **bio_pages;
+};
+
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
+static void rmw_work(struct btrfs_work *work);
+static void read_rebuild_work(struct btrfs_work *work);
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
+static void async_read_rebuild(struct btrfs_raid_bio *rbio);
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
+static void __free_raid_bio(struct btrfs_raid_bio *rbio);
+static void index_rbio_pages(struct btrfs_raid_bio *rbio);
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+
+/*
+ * the stripe hash table is used for locking, and to collect
+ * bios in hopes of making a full stripe
+ */
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+{
+       struct btrfs_stripe_hash_table *table;
+       struct btrfs_stripe_hash_table *x;
+       struct btrfs_stripe_hash *cur;
+       struct btrfs_stripe_hash *h;
+       int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
+       int i;
+       int table_size;
+
+       if (info->stripe_hash_table)
+               return 0;
+
+       /*
+        * The table is large, starting with order 4 and can go as high as
+        * order 7 in case lock debugging is turned on.
+        *
+        * Try harder to allocate and fallback to vmalloc to lower the chance
+        * of a failing mount.
+        */
+       table_size = sizeof(*table) + sizeof(*h) * num_entries;
+       table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!table) {
+               table = vzalloc(table_size);
+               if (!table)
+                       return -ENOMEM;
+       }
+
+       spin_lock_init(&table->cache_lock);
+       INIT_LIST_HEAD(&table->stripe_cache);
+
+       h = table->table;
+
+       for (i = 0; i < num_entries; i++) {
+               cur = h + i;
+               INIT_LIST_HEAD(&cur->hash_list);
+               spin_lock_init(&cur->lock);
+               init_waitqueue_head(&cur->wait);
+       }
+
+       x = cmpxchg(&info->stripe_hash_table, NULL, table);
+       if (x) {
+               if (is_vmalloc_addr(x))
+                       vfree(x);
+               else
+                       kfree(x);
+       }
+       return 0;
+}
+
+/*
+ * caching an rbio means to copy anything from the
+ * bio_pages array into the stripe_pages array.  We
+ * use the page uptodate bit in the stripe cache array
+ * to indicate if it has valid data
+ *
+ * once the caching is done, we set the cache ready
+ * bit.
+ */
+static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+       int i;
+       char *s;
+       char *d;
+       int ret;
+
+       ret = alloc_rbio_pages(rbio);
+       if (ret)
+               return;
+
+       for (i = 0; i < rbio->nr_pages; i++) {
+               if (!rbio->bio_pages[i])
+                       continue;
+
+               s = kmap(rbio->bio_pages[i]);
+               d = kmap(rbio->stripe_pages[i]);
+
+               memcpy(d, s, PAGE_CACHE_SIZE);
+
+               kunmap(rbio->bio_pages[i]);
+               kunmap(rbio->stripe_pages[i]);
+               SetPageUptodate(rbio->stripe_pages[i]);
+       }
+       set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+}
+
+/*
+ * we hash on the first logical address of the stripe
+ */
+static int rbio_bucket(struct btrfs_raid_bio *rbio)
+{
+       u64 num = rbio->raid_map[0];
+
+       /*
+        * we shift down quite a bit.  We're using byte
+        * addressing, and most of the lower bits are zeros.
+        * This tends to upset hash_64, and it consistently
+        * returns just one or two different values.
+        *
+        * shifting off the lower bits fixes things.
+        */
+       return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
+}
+
+/*
+ * stealing an rbio means taking all the uptodate pages from the stripe
+ * array in the source rbio and putting them into the destination rbio
+ */
+static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
+{
+       int i;
+       struct page *s;
+       struct page *d;
+
+       if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
+               return;
+
+       for (i = 0; i < dest->nr_pages; i++) {
+               s = src->stripe_pages[i];
+               if (!s || !PageUptodate(s)) {
+                       continue;
+               }
+
+               d = dest->stripe_pages[i];
+               if (d)
+                       __free_page(d);
+
+               dest->stripe_pages[i] = s;
+               src->stripe_pages[i] = NULL;
+       }
+}
+
+/*
+ * merging means we take the bio_list from the victim and
+ * splice it into the destination.  The victim should
+ * be discarded afterwards.
+ *
+ * must be called with dest->rbio_list_lock held
+ */
+static void merge_rbio(struct btrfs_raid_bio *dest,
+                      struct btrfs_raid_bio *victim)
+{
+       bio_list_merge(&dest->bio_list, &victim->bio_list);
+       dest->bio_list_bytes += victim->bio_list_bytes;
+       bio_list_init(&victim->bio_list);
+}
+
+/*
+ * used to prune items that are in the cache.  The caller
+ * must hold the hash table lock.
+ */
+static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+       int bucket = rbio_bucket(rbio);
+       struct btrfs_stripe_hash_table *table;
+       struct btrfs_stripe_hash *h;
+       int freeit = 0;
+
+       /*
+        * check the bit again under the hash table lock.
+        */
+       if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+               return;
+
+       table = rbio->fs_info->stripe_hash_table;
+       h = table->table + bucket;
+
+       /* hold the lock for the bucket because we may be
+        * removing it from the hash table
+        */
+       spin_lock(&h->lock);
+
+       /*
+        * hold the lock for the bio list because we need
+        * to make sure the bio list is empty
+        */
+       spin_lock(&rbio->bio_list_lock);
+
+       if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+               list_del_init(&rbio->stripe_cache);
+               table->cache_size -= 1;
+               freeit = 1;
+
+               /* if the bio list isn't empty, this rbio is
+                * still involved in an IO.  We take it out
+                * of the cache list, and drop the ref that
+                * was held for the list.
+                *
+                * If the bio_list was empty, we also remove
+                * the rbio from the hash_table, and drop
+                * the corresponding ref
+                */
+               if (bio_list_empty(&rbio->bio_list)) {
+                       if (!list_empty(&rbio->hash_list)) {
+                               list_del_init(&rbio->hash_list);
+                               atomic_dec(&rbio->refs);
+                               BUG_ON(!list_empty(&rbio->plug_list));
+                       }
+               }
+       }
+
+       spin_unlock(&rbio->bio_list_lock);
+       spin_unlock(&h->lock);
+
+       if (freeit)
+               __free_raid_bio(rbio);
+}
+
+/*
+ * prune a given rbio from the cache
+ */
+static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+       struct btrfs_stripe_hash_table *table;
+       unsigned long flags;
+
+       if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+               return;
+
+       table = rbio->fs_info->stripe_hash_table;
+
+       spin_lock_irqsave(&table->cache_lock, flags);
+       __remove_rbio_from_cache(rbio);
+       spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove everything in the cache
+ */
+void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
+{
+       struct btrfs_stripe_hash_table *table;
+       unsigned long flags;
+       struct btrfs_raid_bio *rbio;
+
+       table = info->stripe_hash_table;
+
+       spin_lock_irqsave(&table->cache_lock, flags);
+       while (!list_empty(&table->stripe_cache)) {
+               rbio = list_entry(table->stripe_cache.next,
+                                 struct btrfs_raid_bio,
+                                 stripe_cache);
+               __remove_rbio_from_cache(rbio);
+       }
+       spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove all cached entries and free the hash table
+ * used by unmount
+ */
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
+{
+       if (!info->stripe_hash_table)
+               return;
+       btrfs_clear_rbio_cache(info);
+       if (is_vmalloc_addr(info->stripe_hash_table))
+               vfree(info->stripe_hash_table);
+       else
+               kfree(info->stripe_hash_table);
+       info->stripe_hash_table = NULL;
+}
+
+/*
+ * insert an rbio into the stripe cache.  It
+ * must have already been prepared by calling
+ * cache_rbio_pages
+ *
+ * If this rbio was already cached, it gets
+ * moved to the front of the lru.
+ *
+ * If the size of the rbio cache is too big, we
+ * prune an item.
+ */
+static void cache_rbio(struct btrfs_raid_bio *rbio)
+{
+       struct btrfs_stripe_hash_table *table;
+       unsigned long flags;
+
+       if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
+               return;
+
+       table = rbio->fs_info->stripe_hash_table;
+
+       spin_lock_irqsave(&table->cache_lock, flags);
+       spin_lock(&rbio->bio_list_lock);
+
+       /* bump our ref if we were not in the list before */
+       if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
+               atomic_inc(&rbio->refs);
+
+       if (!list_empty(&rbio->stripe_cache)){
+               list_move(&rbio->stripe_cache, &table->stripe_cache);
+       } else {
+               list_add(&rbio->stripe_cache, &table->stripe_cache);
+               table->cache_size += 1;
+       }
+
+       spin_unlock(&rbio->bio_list_lock);
+
+       if (table->cache_size > RBIO_CACHE_SIZE) {
+               struct btrfs_raid_bio *found;
+
+               found = list_entry(table->stripe_cache.prev,
+                                 struct btrfs_raid_bio,
+                                 stripe_cache);
+
+               if (found != rbio)
+                       __remove_rbio_from_cache(found);
+       }
+
+       spin_unlock_irqrestore(&table->cache_lock, flags);
+       return;
+}
+
+/*
+ * helper function to run the xor_blocks api.  It is only
+ * able to do MAX_XOR_BLOCKS at a time, so we need to
+ * loop through.
+ */
+static void run_xor(void **pages, int src_cnt, ssize_t len)
+{
+       int src_off = 0;
+       int xor_src_cnt = 0;
+       void *dest = pages[src_cnt];
+
+       while(src_cnt > 0) {
+               xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
+               xor_blocks(xor_src_cnt, len, dest, pages + src_off);
+
+               src_cnt -= xor_src_cnt;
+               src_off += xor_src_cnt;
+       }
+}
+
+/*
+ * returns true if the bio list inside this rbio
+ * covers an entire stripe (no rmw required).
+ * Must be called with the bio list lock held, or
+ * at a time when you know it is impossible to add
+ * new bios into the list
+ */
+static int __rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+       unsigned long size = rbio->bio_list_bytes;
+       int ret = 1;
+
+       if (size != rbio->nr_data * rbio->stripe_len)
+               ret = 0;
+
+       BUG_ON(size > rbio->nr_data * rbio->stripe_len);
+       return ret;
+}
+
+static int rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&rbio->bio_list_lock, flags);
+       ret = __rbio_is_full(rbio);
+       spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+       return ret;
+}
+
+/*
+ * returns 1 if it is safe to merge two rbios together.
+ * The merging is safe if the two rbios correspond to
+ * the same stripe and if they are both going in the same
+ * direction (read vs write), and if neither one is
+ * locked for final IO
+ *
+ * The caller is responsible for locking such that
+ * rmw_locked is safe to test
+ */
+static int rbio_can_merge(struct btrfs_raid_bio *last,
+                         struct btrfs_raid_bio *cur)
+{
+       if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
+           test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
+               return 0;
+
+       /*
+        * we can't merge with cached rbios, since the
+        * idea is that when we merge the destination
+        * rbio is going to run our IO for us.  We can
+        * steal from cached rbio's though, other functions
+        * handle that.
+        */
+       if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
+           test_bit(RBIO_CACHE_BIT, &cur->flags))
+               return 0;
+
+       if (last->raid_map[0] !=
+           cur->raid_map[0])
+               return 0;
+
+       /* reads can't merge with writes */
+       if (last->read_rebuild !=
+           cur->read_rebuild) {
+               return 0;
+       }
+
+       return 1;
+}
+
+/*
+ * helper to index into the pstripe
+ */
+static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+       index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+       return rbio->stripe_pages[index];
+}
+
+/*
+ * helper to index into the qstripe, returns null
+ * if there is no qstripe
+ */
+static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+       if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
+               return NULL;
+
+       index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
+               PAGE_CACHE_SHIFT;
+       return rbio->stripe_pages[index];
+}
+
+/*
+ * The first stripe in the table for a logical address
+ * has the lock.  rbios are added in one of three ways:
+ *
+ * 1) Nobody has the stripe locked yet.  The rbio is given
+ * the lock and 0 is returned.  The caller must start the IO
+ * themselves.
+ *
+ * 2) Someone has the stripe locked, but we're able to merge
+ * with the lock owner.  The rbio is freed and the IO will
+ * start automatically along with the existing rbio.  1 is returned.
+ *
+ * 3) Someone has the stripe locked, but we're not able to merge.
+ * The rbio is added to the lock owner's plug list, or merged into
+ * an rbio already on the plug list.  When the lock owner unlocks,
+ * the next rbio on the list is run and the IO is started automatically.
+ * 1 is returned
+ *
+ * If we return 0, the caller still owns the rbio and must continue with
+ * IO submission.  If we return 1, the caller must assume the rbio has
+ * already been freed.
+ */
+static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
+{
+       int bucket = rbio_bucket(rbio);
+       struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
+       struct btrfs_raid_bio *cur;
+       struct btrfs_raid_bio *pending;
+       unsigned long flags;
+       DEFINE_WAIT(wait);
+       struct btrfs_raid_bio *freeit = NULL;
+       struct btrfs_raid_bio *cache_drop = NULL;
+       int ret = 0;
+       int walk = 0;
+
+       spin_lock_irqsave(&h->lock, flags);
+       list_for_each_entry(cur, &h->hash_list, hash_list) {
+               walk++;
+               if (cur->raid_map[0] == rbio->raid_map[0]) {
+                       spin_lock(&cur->bio_list_lock);
+
+                       /* can we steal this cached rbio's pages? */
+                       if (bio_list_empty(&cur->bio_list) &&
+                           list_empty(&cur->plug_list) &&
+                           test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+                           !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+                               list_del_init(&cur->hash_list);
+                               atomic_dec(&cur->refs);
+
+                               steal_rbio(cur, rbio);
+                               cache_drop = cur;
+                               spin_unlock(&cur->bio_list_lock);
+
+                               goto lockit;
+                       }
+
+                       /* can we merge into the lock owner? */
+                       if (rbio_can_merge(cur, rbio)) {
+                               merge_rbio(cur, rbio);
+                               spin_unlock(&cur->bio_list_lock);
+                               freeit = rbio;
+                               ret = 1;
+                               goto out;
+                       }
+
+
+                       /*
+                        * we couldn't merge with the running
+                        * rbio, see if we can merge with the
+                        * pending ones.  We don't have to
+                        * check for rmw_locked because there
+                        * is no way they are inside finish_rmw
+                        * right now
+                        */
+                       list_for_each_entry(pending, &cur->plug_list,
+                                           plug_list) {
+                               if (rbio_can_merge(pending, rbio)) {
+                                       merge_rbio(pending, rbio);
+                                       spin_unlock(&cur->bio_list_lock);
+                                       freeit = rbio;
+                                       ret = 1;
+                                       goto out;
+                               }
+                       }
+
+                       /* no merging, put us on the tail of the plug list,
+                        * our rbio will be started with the currently
+                        * running rbio unlocks
+                        */
+                       list_add_tail(&rbio->plug_list, &cur->plug_list);
+                       spin_unlock(&cur->bio_list_lock);
+                       ret = 1;
+                       goto out;
+               }
+       }
+lockit:
+       atomic_inc(&rbio->refs);
+       list_add(&rbio->hash_list, &h->hash_list);
+out:
+       spin_unlock_irqrestore(&h->lock, flags);
+       if (cache_drop)
+               remove_rbio_from_cache(cache_drop);
+       if (freeit)
+               __free_raid_bio(freeit);
+       return ret;
+}
+
+/*
+ * called as rmw or parity rebuild is completed.  If the plug list has more
+ * rbios waiting for this stripe, the next one on the list will be started
+ */
+static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
+{
+       int bucket;
+       struct btrfs_stripe_hash *h;
+       unsigned long flags;
+       int keep_cache = 0;
+
+       bucket = rbio_bucket(rbio);
+       h = rbio->fs_info->stripe_hash_table->table + bucket;
+
+       if (list_empty(&rbio->plug_list))
+               cache_rbio(rbio);
+
+       spin_lock_irqsave(&h->lock, flags);
+       spin_lock(&rbio->bio_list_lock);
+
+       if (!list_empty(&rbio->hash_list)) {
+               /*
+                * if we're still cached and there is no other IO
+                * to perform, just leave this rbio here for others
+                * to steal from later
+                */
+               if (list_empty(&rbio->plug_list) &&
+                   test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+                       keep_cache = 1;
+                       clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+                       BUG_ON(!bio_list_empty(&rbio->bio_list));
+                       goto done;
+               }
+
+               list_del_init(&rbio->hash_list);
+               atomic_dec(&rbio->refs);
+
+               /*
+                * we use the plug list to hold all the rbios
+                * waiting for the chance to lock this stripe.
+                * hand the lock over to one of them.
+                */
+               if (!list_empty(&rbio->plug_list)) {
+                       struct btrfs_raid_bio *next;
+                       struct list_head *head = rbio->plug_list.next;
+
+                       next = list_entry(head, struct btrfs_raid_bio,
+                                         plug_list);
+
+                       list_del_init(&rbio->plug_list);
+
+                       list_add(&next->hash_list, &h->hash_list);
+                       atomic_inc(&next->refs);
+                       spin_unlock(&rbio->bio_list_lock);
+                       spin_unlock_irqrestore(&h->lock, flags);
+
+                       if (next->read_rebuild)
+                               async_read_rebuild(next);
+                       else {
+                               steal_rbio(rbio, next);
+                               async_rmw_stripe(next);
+                       }
+
+                       goto done_nolock;
+               } else  if (waitqueue_active(&h->wait)) {
+                       spin_unlock(&rbio->bio_list_lock);
+                       spin_unlock_irqrestore(&h->lock, flags);
+                       wake_up(&h->wait);
+                       goto done_nolock;
+               }
+       }
+done:
+       spin_unlock(&rbio->bio_list_lock);
+       spin_unlock_irqrestore(&h->lock, flags);
+
+done_nolock:
+       if (!keep_cache)
+               remove_rbio_from_cache(rbio);
+}
+
+static void __free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+       int i;
+
+       WARN_ON(atomic_read(&rbio->refs) < 0);
+       if (!atomic_dec_and_test(&rbio->refs))
+               return;
+
+       WARN_ON(!list_empty(&rbio->stripe_cache));
+       WARN_ON(!list_empty(&rbio->hash_list));
+       WARN_ON(!bio_list_empty(&rbio->bio_list));
+
+       for (i = 0; i < rbio->nr_pages; i++) {
+               if (rbio->stripe_pages[i]) {
+                       __free_page(rbio->stripe_pages[i]);
+                       rbio->stripe_pages[i] = NULL;
+               }
+       }
+       kfree(rbio->raid_map);
+       kfree(rbio->bbio);
+       kfree(rbio);
+}
+
+static void free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+       unlock_stripe(rbio);
+       __free_raid_bio(rbio);
+}
+
+/*
+ * this frees the rbio and runs through all the bios in the
+ * bio_list and calls end_io on them
+ */
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
+{
+       struct bio *cur = bio_list_get(&rbio->bio_list);
+       struct bio *next;
+       free_raid_bio(rbio);
+
+       while (cur) {
+               next = cur->bi_next;
+               cur->bi_next = NULL;
+               if (uptodate)
+                       set_bit(BIO_UPTODATE, &cur->bi_flags);
+               bio_endio(cur, err);
+               cur = next;
+       }
+}
+
+/*
+ * end io function used by finish_rmw.  When we finally
+ * get here, we've written a full stripe
+ */
+static void raid_write_end_io(struct bio *bio, int err)
+{
+       struct btrfs_raid_bio *rbio = bio->bi_private;
+
+       if (err)
+               fail_bio_stripe(rbio, bio);
+
+       bio_put(bio);
+
+       if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+               return;
+
+       err = 0;
+
+       /* OK, we have read all the stripes we need to. */
+       if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+               err = -EIO;
+
+       rbio_orig_end_io(rbio, err, 0);
+       return;
+}
+
+/*
+ * the read/modify/write code wants to use the original bio for
+ * any pages it included, and then use the rbio for everything
+ * else.  This function decides if a given index (stripe number)
+ * and page number in that stripe fall inside the original bio
+ * or the rbio.
+ *
+ * if you set bio_list_only, you'll get a NULL back for any ranges
+ * that are outside the bio_list
+ *
+ * This doesn't take any refs on anything, you get a bare page pointer
+ * and the caller must bump refs as required.
+ *
+ * You must call index_rbio_pages once before you can trust
+ * the answers from this function.
+ */
+static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
+                                int index, int pagenr, int bio_list_only)
+{
+       int chunk_page;
+       struct page *p = NULL;
+
+       chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
+
+       spin_lock_irq(&rbio->bio_list_lock);
+       p = rbio->bio_pages[chunk_page];
+       spin_unlock_irq(&rbio->bio_list_lock);
+
+       if (p || bio_list_only)
+               return p;
+
+       return rbio->stripe_pages[chunk_page];
+}
+
+/*
+ * number of pages we need for the entire stripe across all the
+ * drives
+ */
+static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
+{
+       unsigned long nr = stripe_len * nr_stripes;
+       return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+}
+
+/*
+ * allocation and initial setup for the btrfs_raid_bio.  Not
+ * this does not allocate any pages for rbio->pages.
+ */
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
+                         struct btrfs_bio *bbio, u64 *raid_map,
+                         u64 stripe_len)
+{
+       struct btrfs_raid_bio *rbio;
+       int nr_data = 0;
+       int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
+       void *p;
+
+       rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
+                       GFP_NOFS);
+       if (!rbio) {
+               kfree(raid_map);
+               kfree(bbio);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       bio_list_init(&rbio->bio_list);
+       INIT_LIST_HEAD(&rbio->plug_list);
+       spin_lock_init(&rbio->bio_list_lock);
+       INIT_LIST_HEAD(&rbio->stripe_cache);
+       INIT_LIST_HEAD(&rbio->hash_list);
+       rbio->bbio = bbio;
+       rbio->raid_map = raid_map;
+       rbio->fs_info = root->fs_info;
+       rbio->stripe_len = stripe_len;
+       rbio->nr_pages = num_pages;
+       rbio->faila = -1;
+       rbio->failb = -1;
+       atomic_set(&rbio->refs, 1);
+
+       /*
+        * the stripe_pages and bio_pages array point to the extra
+        * memory we allocated past the end of the rbio
+        */
+       p = rbio + 1;
+       rbio->stripe_pages = p;
+       rbio->bio_pages = p + sizeof(struct page *) * num_pages;
+
+       if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
+               nr_data = bbio->num_stripes - 2;
+       else
+               nr_data = bbio->num_stripes - 1;
+
+       rbio->nr_data = nr_data;
+       return rbio;
+}
+
+/* allocate pages for all the stripes in the bio, including parity */
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+       int i;
+       struct page *page;
+
+       for (i = 0; i < rbio->nr_pages; i++) {
+               if (rbio->stripe_pages[i])
+                       continue;
+               page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               if (!page)
+                       return -ENOMEM;
+               rbio->stripe_pages[i] = page;
+               ClearPageUptodate(page);
+       }
+       return 0;
+}
+
+/* allocate pages for just the p/q stripes */
+static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
+{
+       int i;
+       struct page *page;
+
+       i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+
+       for (; i < rbio->nr_pages; i++) {
+               if (rbio->stripe_pages[i])
+                       continue;
+               page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+               if (!page)
+                       return -ENOMEM;
+               rbio->stripe_pages[i] = page;
+       }
+       return 0;
+}
+
+/*
+ * add a single page from a specific stripe into our list of bios for IO
+ * this will try to merge into existing bios if possible, and returns
+ * zero if all went well.
+ */
+int rbio_add_io_page(struct btrfs_raid_bio *rbio,
+                    struct bio_list *bio_list,
+                    struct page *page,
+                    int stripe_nr,
+                    unsigned long page_index,
+                    unsigned long bio_max_len)
+{
+       struct bio *last = bio_list->tail;
+       u64 last_end = 0;
+       int ret;
+       struct bio *bio;
+       struct btrfs_bio_stripe *stripe;
+       u64 disk_start;
+
+       stripe = &rbio->bbio->stripes[stripe_nr];
+       disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+
+       /* if the device is missing, just fail this stripe */
+       if (!stripe->dev->bdev)
+               return fail_rbio_index(rbio, stripe_nr);
+
+       /* see if we can add this page onto our existing bio */
+       if (last) {
+               last_end = (u64)last->bi_sector << 9;
+               last_end += last->bi_size;
+
+               /*
+                * we can't merge these if they are from different
+                * devices or if they are not contiguous
+                */
+               if (last_end == disk_start && stripe->dev->bdev &&
+                   test_bit(BIO_UPTODATE, &last->bi_flags) &&
+                   last->bi_bdev == stripe->dev->bdev) {
+                       ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
+                       if (ret == PAGE_CACHE_SIZE)
+                               return 0;
+               }
+       }
+
+       /* put a new bio on the list */
+       bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+       if (!bio)
+               return -ENOMEM;
+
+       bio->bi_size = 0;
+       bio->bi_bdev = stripe->dev->bdev;
+       bio->bi_sector = disk_start >> 9;
+       set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+       bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+       bio_list_add(bio_list, bio);
+       return 0;
+}
+
+/*
+ * while we're doing the read/modify/write cycle, we could
+ * have errors in reading pages off the disk.  This checks
+ * for errors and if we're not able to read the page it'll
+ * trigger parity reconstruction.  The rmw will be finished
+ * after we've reconstructed the failed stripes
+ */
+static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
+{
+       if (rbio->faila >= 0 || rbio->failb >= 0) {
+               BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
+               __raid56_parity_recover(rbio);
+       } else {
+               finish_rmw(rbio);
+       }
+}
+
+/*
+ * these are just the pages from the rbio array, not from anything
+ * the FS sent down to us
+ */
+static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
+{
+       int index;
+       index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
+       index += page;
+       return rbio->stripe_pages[index];
+}
+
+/*
+ * helper function to walk our bio list and populate the bio_pages array with
+ * the result.  This seems expensive, but it is faster than constantly
+ * searching through the bio list as we setup the IO in finish_rmw or stripe
+ * reconstruction.
+ *
+ * This must be called before you trust the answers from page_in_rbio
+ */
+static void index_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+       struct bio *bio;
+       u64 start;
+       unsigned long stripe_offset;
+       unsigned long page_index;
+       struct page *p;
+       int i;
+
+       spin_lock_irq(&rbio->bio_list_lock);
+       bio_list_for_each(bio, &rbio->bio_list) {
+               start = (u64)bio->bi_sector << 9;
+               stripe_offset = start - rbio->raid_map[0];
+               page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+
+               for (i = 0; i < bio->bi_vcnt; i++) {
+                       p = bio->bi_io_vec[i].bv_page;
+                       rbio->bio_pages[page_index + i] = p;
+               }
+       }
+       spin_unlock_irq(&rbio->bio_list_lock);
+}
+
+/*
+ * this is called from one of two situations.  We either
+ * have a full stripe from the higher layers, or we've read all
+ * the missing bits off disk.
+ *
+ * This will calculate the parity and then send down any
+ * changed blocks.
+ */
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+{
+       struct btrfs_bio *bbio = rbio->bbio;
+       void *pointers[bbio->num_stripes];
+       int stripe_len = rbio->stripe_len;
+       int nr_data = rbio->nr_data;
+       int stripe;
+       int pagenr;
+       int p_stripe = -1;
+       int q_stripe = -1;
+       struct bio_list bio_list;
+       struct bio *bio;
+       int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
+       int ret;
+
+       bio_list_init(&bio_list);
+
+       if (bbio->num_stripes - rbio->nr_data == 1) {
+               p_stripe = bbio->num_stripes - 1;
+       } else if (bbio->num_stripes - rbio->nr_data == 2) {
+               p_stripe = bbio->num_stripes - 2;
+               q_stripe = bbio->num_stripes - 1;
+       } else {
+               BUG();
+       }
+
+       /* at this point we either have a full stripe,
+        * or we've read the full stripe from the drive.
+        * recalculate the parity and write the new results.
+        *
+        * We're not allowed to add any new bios to the
+        * bio list here, anyone else that wants to
+        * change this stripe needs to do their own rmw.
+        */
+       spin_lock_irq(&rbio->bio_list_lock);
+       set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+       spin_unlock_irq(&rbio->bio_list_lock);
+
+       atomic_set(&rbio->bbio->error, 0);
+
+       /*
+        * now that we've set rmw_locked, run through the
+        * bio list one last time and map the page pointers
+        *
+        * We don't cache full rbios because we're assuming
+        * the higher layers are unlikely to use this area of
+        * the disk again soon.  If they do use it again,
+        * hopefully they will send another full bio.
+        */
+       index_rbio_pages(rbio);
+       if (!rbio_is_full(rbio))
+               cache_rbio_pages(rbio);
+       else
+               clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+       for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+               struct page *p;
+               /* first collect one page from each data stripe */
+               for (stripe = 0; stripe < nr_data; stripe++) {
+                       p = page_in_rbio(rbio, stripe, pagenr, 0);
+                       pointers[stripe] = kmap(p);
+               }
+
+               /* then add the parity stripe */
+               p = rbio_pstripe_page(rbio, pagenr);
+               SetPageUptodate(p);
+               pointers[stripe++] = kmap(p);
+
+               if (q_stripe != -1) {
+
+                       /*
+                        * raid6, add the qstripe and call the
+                        * library function to fill in our p/q
+                        */
+                       p = rbio_qstripe_page(rbio, pagenr);
+                       SetPageUptodate(p);
+                       pointers[stripe++] = kmap(p);
+
+                       raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
+                                               pointers);
+               } else {
+                       /* raid5 */
+                       memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+                       run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+               }
+
+
+               for (stripe = 0; stripe < bbio->num_stripes; stripe++)
+                       kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+       }
+
+       /*
+        * time to start writing.  Make bios for everything from the
+        * higher layers (the bio_list in our rbio) and our p/q.  Ignore
+        * everything else.
+        */
+       for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+               for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+                       struct page *page;
+                       if (stripe < rbio->nr_data) {
+                               page = page_in_rbio(rbio, stripe, pagenr, 1);
+                               if (!page)
+                                       continue;
+                       } else {
+                              page = rbio_stripe_page(rbio, stripe, pagenr);
+                       }
+
+                       ret = rbio_add_io_page(rbio, &bio_list,
+                                      page, stripe, pagenr, rbio->stripe_len);
+                       if (ret)
+                               goto cleanup;
+               }
+       }
+
+       atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
+       BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
+
+       while (1) {
+               bio = bio_list_pop(&bio_list);
+               if (!bio)
+                       break;
+
+               bio->bi_private = rbio;
+               bio->bi_end_io = raid_write_end_io;
+               BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+               submit_bio(WRITE, bio);
+       }
+       return;
+
+cleanup:
+       rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+/*
+ * helper to find the stripe number for a given bio.  Used to figure out which
+ * stripe has failed.  This expects the bio to correspond to a physical disk,
+ * so it looks up based on physical sector numbers.
+ */
+static int find_bio_stripe(struct btrfs_raid_bio *rbio,
+                          struct bio *bio)
+{
+       u64 physical = bio->bi_sector;
+       u64 stripe_start;
+       int i;
+       struct btrfs_bio_stripe *stripe;
+
+       physical <<= 9;
+
+       for (i = 0; i < rbio->bbio->num_stripes; i++) {
+               stripe = &rbio->bbio->stripes[i];
+               stripe_start = stripe->physical;
+               if (physical >= stripe_start &&
+                   physical < stripe_start + rbio->stripe_len) {
+                       return i;
+               }
+       }
+       return -1;
+}
+
+/*
+ * helper to find the stripe number for a given
+ * bio (before mapping).  Used to figure out which stripe has
+ * failed.  This looks up based on logical block numbers.
+ */
+static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
+                                  struct bio *bio)
+{
+       u64 logical = bio->bi_sector;
+       u64 stripe_start;
+       int i;
+
+       logical <<= 9;
+
+       for (i = 0; i < rbio->nr_data; i++) {
+               stripe_start = rbio->raid_map[i];
+               if (logical >= stripe_start &&
+                   logical < stripe_start + rbio->stripe_len) {
+                       return i;
+               }
+       }
+       return -1;
+}
+
+/*
+ * returns -EIO if we had too many failures
+ */
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&rbio->bio_list_lock, flags);
+
+       /* we already know this stripe is bad, move on */
+       if (rbio->faila == failed || rbio->failb == failed)
+               goto out;
+
+       if (rbio->faila == -1) {
+               /* first failure on this rbio */
+               rbio->faila = failed;
+               atomic_inc(&rbio->bbio->error);
+       } else if (rbio->failb == -1) {
+               /* second failure on this rbio */
+               rbio->failb = failed;
+               atomic_inc(&rbio->bbio->error);
+       } else {
+               ret = -EIO;
+       }
+out:
+       spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+
+       return ret;
+}
+
+/*
+ * helper to fail a stripe based on a physical disk
+ * bio.
+ */
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
+                          struct bio *bio)
+{
+       int failed = find_bio_stripe(rbio, bio);
+
+       if (failed < 0)
+               return -EIO;
+
+       return fail_rbio_index(rbio, failed);
+}
+
+/*
+ * this sets each page in the bio uptodate.  It should only be used on private
+ * rbio pages, nothing that comes in from the higher layers
+ */
+static void set_bio_pages_uptodate(struct bio *bio)
+{
+       int i;
+       struct page *p;
+
+       for (i = 0; i < bio->bi_vcnt; i++) {
+               p = bio->bi_io_vec[i].bv_page;
+               SetPageUptodate(p);
+       }
+}
+
+/*
+ * end io for the read phase of the rmw cycle.  All the bios here are physical
+ * stripe bios we've read from the disk so we can recalculate the parity of the
+ * stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid_rmw_end_io(struct bio *bio, int err)
+{
+       struct btrfs_raid_bio *rbio = bio->bi_private;
+
+       if (err)
+               fail_bio_stripe(rbio, bio);
+       else
+               set_bio_pages_uptodate(bio);
+
+       bio_put(bio);
+
+       if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+               return;
+
+       err = 0;
+       if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+               goto cleanup;
+
+       /*
+        * this will normally call finish_rmw to start our write
+        * but if there are any failed stripes we'll reconstruct
+        * from parity first
+        */
+       validate_rbio_for_rmw(rbio);
+       return;
+
+cleanup:
+
+       rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+       rbio->work.flags = 0;
+       rbio->work.func = rmw_work;
+
+       btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+                          &rbio->work);
+}
+
+static void async_read_rebuild(struct btrfs_raid_bio *rbio)
+{
+       rbio->work.flags = 0;
+       rbio->work.func = read_rebuild_work;
+
+       btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+                          &rbio->work);
+}
+
+/*
+ * the stripe must be locked by the caller.  It will
+ * unlock after all the writes are done
+ */
+static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+       int bios_to_read = 0;
+       struct btrfs_bio *bbio = rbio->bbio;
+       struct bio_list bio_list;
+       int ret;
+       int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       int pagenr;
+       int stripe;
+       struct bio *bio;
+
+       bio_list_init(&bio_list);
+
+       ret = alloc_rbio_pages(rbio);
+       if (ret)
+               goto cleanup;
+
+       index_rbio_pages(rbio);
+
+       atomic_set(&rbio->bbio->error, 0);
+       /*
+        * build a list of bios to read all the missing parts of this
+        * stripe
+        */
+       for (stripe = 0; stripe < rbio->nr_data; stripe++) {
+               for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+                       struct page *page;
+                       /*
+                        * we want to find all the pages missing from
+                        * the rbio and read them from the disk.  If
+                        * page_in_rbio finds a page in the bio list
+                        * we don't need to read it off the stripe.
+                        */
+                       page = page_in_rbio(rbio, stripe, pagenr, 1);
+                       if (page)
+                               continue;
+
+                       page = rbio_stripe_page(rbio, stripe, pagenr);
+                       /*
+                        * the bio cache may have handed us an uptodate
+                        * page.  If so, be happy and use it
+                        */
+                       if (PageUptodate(page))
+                               continue;
+
+                       ret = rbio_add_io_page(rbio, &bio_list, page,
+                                      stripe, pagenr, rbio->stripe_len);
+                       if (ret)
+                               goto cleanup;
+               }
+       }
+
+       bios_to_read = bio_list_size(&bio_list);
+       if (!bios_to_read) {
+               /*
+                * this can happen if others have merged with
+                * us, it means there is nothing left to read.
+                * But if there are missing devices it may not be
+                * safe to do the full stripe write yet.
+                */
+               goto finish;
+       }
+
+       /*
+        * the bbio may be freed once we submit the last bio.  Make sure
+        * not to touch it after that
+        */
+       atomic_set(&bbio->stripes_pending, bios_to_read);
+       while (1) {
+               bio = bio_list_pop(&bio_list);
+               if (!bio)
+                       break;
+
+               bio->bi_private = rbio;
+               bio->bi_end_io = raid_rmw_end_io;
+
+               btrfs_bio_wq_end_io(rbio->fs_info, bio,
+                                   BTRFS_WQ_ENDIO_RAID56);
+
+               BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+               submit_bio(READ, bio);
+       }
+       /* the actual write will happen once the reads are done */
+       return 0;
+
+cleanup:
+       rbio_orig_end_io(rbio, -EIO, 0);
+       return -EIO;
+
+finish:
+       validate_rbio_for_rmw(rbio);
+       return 0;
+}
+
+/*
+ * if the upper layers pass in a full stripe, we thank them by only allocating
+ * enough pages to hold the parity, and sending it all down quickly.
+ */
+static int full_stripe_write(struct btrfs_raid_bio *rbio)
+{
+       int ret;
+
+       ret = alloc_rbio_parity_pages(rbio);
+       if (ret)
+               return ret;
+
+       ret = lock_stripe_add(rbio);
+       if (ret == 0)
+               finish_rmw(rbio);
+       return 0;
+}
+
+/*
+ * partial stripe writes get handed over to async helpers.
+ * We're really hoping to merge a few more writes into this
+ * rbio before calculating new parity
+ */
+static int partial_stripe_write(struct btrfs_raid_bio *rbio)
+{
+       int ret;
+
+       ret = lock_stripe_add(rbio);
+       if (ret == 0)
+               async_rmw_stripe(rbio);
+       return 0;
+}
+
+/*
+ * sometimes while we were reading from the drive to
+ * recalculate parity, enough new bios come into create
+ * a full stripe.  So we do a check here to see if we can
+ * go directly to finish_rmw
+ */
+static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
+{
+       /* head off into rmw land if we don't have a full stripe */
+       if (!rbio_is_full(rbio))
+               return partial_stripe_write(rbio);
+       return full_stripe_write(rbio);
+}
+
+/*
+ * We use plugging call backs to collect full stripes.
+ * Any time we get a partial stripe write while plugged
+ * we collect it into a list.  When the unplug comes down,
+ * we sort the list by logical block number and merge
+ * everything we can into the same rbios
+ */
+struct btrfs_plug_cb {
+       struct blk_plug_cb cb;
+       struct btrfs_fs_info *info;
+       struct list_head rbio_list;
+       struct btrfs_work work;
+};
+
+/*
+ * rbios on the plug list are sorted for easier merging.
+ */
+static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+       struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
+                                                plug_list);
+       struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
+                                                plug_list);
+       u64 a_sector = ra->bio_list.head->bi_sector;
+       u64 b_sector = rb->bio_list.head->bi_sector;
+
+       if (a_sector < b_sector)
+               return -1;
+       if (a_sector > b_sector)
+               return 1;
+       return 0;
+}
+
+static void run_plug(struct btrfs_plug_cb *plug)
+{
+       struct btrfs_raid_bio *cur;
+       struct btrfs_raid_bio *last = NULL;
+
+       /*
+        * sort our plug list then try to merge
+        * everything we can in hopes of creating full
+        * stripes.
+        */
+       list_sort(NULL, &plug->rbio_list, plug_cmp);
+       while (!list_empty(&plug->rbio_list)) {
+               cur = list_entry(plug->rbio_list.next,
+                                struct btrfs_raid_bio, plug_list);
+               list_del_init(&cur->plug_list);
+
+               if (rbio_is_full(cur)) {
+                       /* we have a full stripe, send it down */
+                       full_stripe_write(cur);
+                       continue;
+               }
+               if (last) {
+                       if (rbio_can_merge(last, cur)) {
+                               merge_rbio(last, cur);
+                               __free_raid_bio(cur);
+                               continue;
+
+                       }
+                       __raid56_parity_write(last);
+               }
+               last = cur;
+       }
+       if (last) {
+               __raid56_parity_write(last);
+       }
+       kfree(plug);
+}
+
+/*
+ * if the unplug comes from schedule, we have to push the
+ * work off to a helper thread
+ */
+static void unplug_work(struct btrfs_work *work)
+{
+       struct btrfs_plug_cb *plug;
+       plug = container_of(work, struct btrfs_plug_cb, work);
+       run_plug(plug);
+}
+
+static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+       struct btrfs_plug_cb *plug;
+       plug = container_of(cb, struct btrfs_plug_cb, cb);
+
+       if (from_schedule) {
+               plug->work.flags = 0;
+               plug->work.func = unplug_work;
+               btrfs_queue_worker(&plug->info->rmw_workers,
+                                  &plug->work);
+               return;
+       }
+       run_plug(plug);
+}
+
+/*
+ * our main entry point for writes from the rest of the FS.
+ */
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+                       struct btrfs_bio *bbio, u64 *raid_map,
+                       u64 stripe_len)
+{
+       struct btrfs_raid_bio *rbio;
+       struct btrfs_plug_cb *plug = NULL;
+       struct blk_plug_cb *cb;
+
+       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       if (IS_ERR(rbio)) {
+               kfree(raid_map);
+               kfree(bbio);
+               return PTR_ERR(rbio);
+       }
+       bio_list_add(&rbio->bio_list, bio);
+       rbio->bio_list_bytes = bio->bi_size;
+
+       /*
+        * don't plug on full rbios, just get them out the door
+        * as quickly as we can
+        */
+       if (rbio_is_full(rbio))
+               return full_stripe_write(rbio);
+
+       cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
+                              sizeof(*plug));
+       if (cb) {
+               plug = container_of(cb, struct btrfs_plug_cb, cb);
+               if (!plug->info) {
+                       plug->info = root->fs_info;
+                       INIT_LIST_HEAD(&plug->rbio_list);
+               }
+               list_add_tail(&rbio->plug_list, &plug->rbio_list);
+       } else {
+               return __raid56_parity_write(rbio);
+       }
+       return 0;
+}
+
+/*
+ * all parity reconstruction happens here.  We've read in everything
+ * we can find from the drives and this does the heavy lifting of
+ * sorting the good from the bad.
+ */
+static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
+{
+       int pagenr, stripe;
+       void **pointers;
+       int faila = -1, failb = -1;
+       int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       struct page *page;
+       int err;
+       int i;
+
+       pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
+                          GFP_NOFS);
+       if (!pointers) {
+               err = -ENOMEM;
+               goto cleanup_io;
+       }
+
+       faila = rbio->faila;
+       failb = rbio->failb;
+
+       if (rbio->read_rebuild) {
+               spin_lock_irq(&rbio->bio_list_lock);
+               set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+               spin_unlock_irq(&rbio->bio_list_lock);
+       }
+
+       index_rbio_pages(rbio);
+
+       for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+               /* setup our array of pointers with pages
+                * from each stripe
+                */
+               for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+                       /*
+                        * if we're rebuilding a read, we have to use
+                        * pages from the bio list
+                        */
+                       if (rbio->read_rebuild &&
+                           (stripe == faila || stripe == failb)) {
+                               page = page_in_rbio(rbio, stripe, pagenr, 0);
+                       } else {
+                               page = rbio_stripe_page(rbio, stripe, pagenr);
+                       }
+                       pointers[stripe] = kmap(page);
+               }
+
+               /* all raid6 handling here */
+               if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
+                   RAID6_Q_STRIPE) {
+
+                       /*
+                        * single failure, rebuild from parity raid5
+                        * style
+                        */
+                       if (failb < 0) {
+                               if (faila == rbio->nr_data) {
+                                       /*
+                                        * Just the P stripe has failed, without
+                                        * a bad data or Q stripe.
+                                        * TODO, we should redo the xor here.
+                                        */
+                                       err = -EIO;
+                                       goto cleanup;
+                               }
+                               /*
+                                * a single failure in raid6 is rebuilt
+                                * in the pstripe code below
+                                */
+                               goto pstripe;
+                       }
+
+                       /* make sure our ps and qs are in order */
+                       if (faila > failb) {
+                               int tmp = failb;
+                               failb = faila;
+                               faila = tmp;
+                       }
+
+                       /* if the q stripe is failed, do a pstripe reconstruction
+                        * from the xors.
+                        * If both the q stripe and the P stripe are failed, we're
+                        * here due to a crc mismatch and we can't give them the
+                        * data they want
+                        */
+                       if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
+                               if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
+                                       err = -EIO;
+                                       goto cleanup;
+                               }
+                               /*
+                                * otherwise we have one bad data stripe and
+                                * a good P stripe.  raid5!
+                                */
+                               goto pstripe;
+                       }
+
+                       if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
+                               raid6_datap_recov(rbio->bbio->num_stripes,
+                                                 PAGE_SIZE, faila, pointers);
+                       } else {
+                               raid6_2data_recov(rbio->bbio->num_stripes,
+                                                 PAGE_SIZE, faila, failb,
+                                                 pointers);
+                       }
+               } else {
+                       void *p;
+
+                       /* rebuild from P stripe here (raid5 or raid6) */
+                       BUG_ON(failb != -1);
+pstripe:
+                       /* Copy parity block into failed block to start with */
+                       memcpy(pointers[faila],
+                              pointers[rbio->nr_data],
+                              PAGE_CACHE_SIZE);
+
+                       /* rearrange the pointer array */
+                       p = pointers[faila];
+                       for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
+                               pointers[stripe] = pointers[stripe + 1];
+                       pointers[rbio->nr_data - 1] = p;
+
+                       /* xor in the rest */
+                       run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+               }
+               /* if we're doing this rebuild as part of an rmw, go through
+                * and set all of our private rbio pages in the
+                * failed stripes as uptodate.  This way finish_rmw will
+                * know they can be trusted.  If this was a read reconstruction,
+                * other endio functions will fiddle the uptodate bits
+                */
+               if (!rbio->read_rebuild) {
+                       for (i = 0;  i < nr_pages; i++) {
+                               if (faila != -1) {
+                                       page = rbio_stripe_page(rbio, faila, i);
+                                       SetPageUptodate(page);
+                               }
+                               if (failb != -1) {
+                                       page = rbio_stripe_page(rbio, failb, i);
+                                       SetPageUptodate(page);
+                               }
+                       }
+               }
+               for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+                       /*
+                        * if we're rebuilding a read, we have to use
+                        * pages from the bio list
+                        */
+                       if (rbio->read_rebuild &&
+                           (stripe == faila || stripe == failb)) {
+                               page = page_in_rbio(rbio, stripe, pagenr, 0);
+                       } else {
+                               page = rbio_stripe_page(rbio, stripe, pagenr);
+                       }
+                       kunmap(page);
+               }
+       }
+
+       err = 0;
+cleanup:
+       kfree(pointers);
+
+cleanup_io:
+
+       if (rbio->read_rebuild) {
+               if (err == 0)
+                       cache_rbio_pages(rbio);
+               else
+                       clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+               rbio_orig_end_io(rbio, err, err == 0);
+       } else if (err == 0) {
+               rbio->faila = -1;
+               rbio->failb = -1;
+               finish_rmw(rbio);
+       } else {
+               rbio_orig_end_io(rbio, err, 0);
+       }
+}
+
+/*
+ * This is called only for stripes we've read from disk to
+ * reconstruct the parity.
+ */
+static void raid_recover_end_io(struct bio *bio, int err)
+{
+       struct btrfs_raid_bio *rbio = bio->bi_private;
+
+       /*
+        * we only read stripe pages off the disk, set them
+        * up to date if there were no errors
+        */
+       if (err)
+               fail_bio_stripe(rbio, bio);
+       else
+               set_bio_pages_uptodate(bio);
+       bio_put(bio);
+
+       if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+               return;
+
+       if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+               rbio_orig_end_io(rbio, -EIO, 0);
+       else
+               __raid_recover_end_io(rbio);
+}
+
+/*
+ * reads everything we need off the disk to reconstruct
+ * the parity. endio handlers trigger final reconstruction
+ * when the IO is done.
+ *
+ * This is used both for reads from the higher layers and for
+ * parity construction required to finish a rmw cycle.
+ */
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+{
+       int bios_to_read = 0;
+       struct btrfs_bio *bbio = rbio->bbio;
+       struct bio_list bio_list;
+       int ret;
+       int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       int pagenr;
+       int stripe;
+       struct bio *bio;
+
+       bio_list_init(&bio_list);
+
+       ret = alloc_rbio_pages(rbio);
+       if (ret)
+               goto cleanup;
+
+       atomic_set(&rbio->bbio->error, 0);
+
+       /*
+        * read everything that hasn't failed.  Thanks to the
+        * stripe cache, it is possible that some or all of these
+        * pages are going to be uptodate.
+        */
+       for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+               if (rbio->faila == stripe ||
+                   rbio->failb == stripe)
+                       continue;
+
+               for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+                       struct page *p;
+
+                       /*
+                        * the rmw code may have already read this
+                        * page in
+                        */
+                       p = rbio_stripe_page(rbio, stripe, pagenr);
+                       if (PageUptodate(p))
+                               continue;
+
+                       ret = rbio_add_io_page(rbio, &bio_list,
+                                      rbio_stripe_page(rbio, stripe, pagenr),
+                                      stripe, pagenr, rbio->stripe_len);
+                       if (ret < 0)
+                               goto cleanup;
+               }
+       }
+
+       bios_to_read = bio_list_size(&bio_list);
+       if (!bios_to_read) {
+               /*
+                * we might have no bios to read just because the pages
+                * were up to date, or we might have no bios to read because
+                * the devices were gone.
+                */
+               if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
+                       __raid_recover_end_io(rbio);
+                       goto out;
+               } else {
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * the bbio may be freed once we submit the last bio.  Make sure
+        * not to touch it after that
+        */
+       atomic_set(&bbio->stripes_pending, bios_to_read);
+       while (1) {
+               bio = bio_list_pop(&bio_list);
+               if (!bio)
+                       break;
+
+               bio->bi_private = rbio;
+               bio->bi_end_io = raid_recover_end_io;
+
+               btrfs_bio_wq_end_io(rbio->fs_info, bio,
+                                   BTRFS_WQ_ENDIO_RAID56);
+
+               BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+               submit_bio(READ, bio);
+       }
+out:
+       return 0;
+
+cleanup:
+       if (rbio->read_rebuild)
+               rbio_orig_end_io(rbio, -EIO, 0);
+       return -EIO;
+}
+
+/*
+ * the main entry point for reads from the higher layers.  This
+ * is really only called when the normal read path had a failure,
+ * so we assume the bio they send down corresponds to a failed part
+ * of the drive.
+ */
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+                         struct btrfs_bio *bbio, u64 *raid_map,
+                         u64 stripe_len, int mirror_num)
+{
+       struct btrfs_raid_bio *rbio;
+       int ret;
+
+       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       if (IS_ERR(rbio)) {
+               return PTR_ERR(rbio);
+       }
+
+       rbio->read_rebuild = 1;
+       bio_list_add(&rbio->bio_list, bio);
+       rbio->bio_list_bytes = bio->bi_size;
+
+       rbio->faila = find_logical_bio_stripe(rbio, bio);
+       if (rbio->faila == -1) {
+               BUG();
+               kfree(rbio);
+               return -EIO;
+       }
+
+       /*
+        * reconstruct from the q stripe if they are
+        * asking for mirror 3
+        */
+       if (mirror_num == 3)
+               rbio->failb = bbio->num_stripes - 2;
+
+       ret = lock_stripe_add(rbio);
+
+       /*
+        * __raid56_parity_recover will end the bio with
+        * any errors it hits.  We don't want to return
+        * its error value up the stack because our caller
+        * will end up calling bio_endio with any nonzero
+        * return
+        */
+       if (ret == 0)
+               __raid56_parity_recover(rbio);
+       /*
+        * our rbio has been added to the list of
+        * rbios that will be handled after the
+        * currently lock owner is done
+        */
+       return 0;
+
+}
+
+static void rmw_work(struct btrfs_work *work)
+{
+       struct btrfs_raid_bio *rbio;
+
+       rbio = container_of(work, struct btrfs_raid_bio, work);
+       raid56_rmw_stripe(rbio);
+}
+
+static void read_rebuild_work(struct btrfs_work *work)
+{
+       struct btrfs_raid_bio *rbio;
+
+       rbio = container_of(work, struct btrfs_raid_bio, work);
+       __raid56_parity_recover(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
new file mode 100644 (file)
index 0000000..ea5d73b
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Fusion-io  All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_RAID56__
+#define __BTRFS_RAID56__
+static inline int nr_parity_stripes(struct map_lookup *map)
+{
+       if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+               return 1;
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+               return 2;
+       else
+               return 0;
+}
+
+static inline int nr_data_stripes(struct map_lookup *map)
+{
+       return map->num_stripes - nr_parity_stripes(map);
+}
+#define RAID5_P_STRIPE ((u64)-2)
+#define RAID6_Q_STRIPE ((u64)-1)
+
+#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) ||                \
+                            ((x) == RAID6_Q_STRIPE))
+
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+                                struct btrfs_bio *bbio, u64 *raid_map,
+                                u64 stripe_len, int mirror_num);
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+                              struct btrfs_bio *bbio, u64 *raid_map,
+                              u64 stripe_len);
+
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
+#endif
index 17c306b..50695dc 100644 (file)
@@ -3017,7 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        }
                }
 
-               page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+               page_start = page_offset(page);
                page_end = page_start + PAGE_CACHE_SIZE - 1;
 
                lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
index 67783e0..53c3501 100644 (file)
@@ -28,6 +28,7 @@
 #include "dev-replace.h"
 #include "check-integrity.h"
 #include "rcu-string.h"
+#include "raid56.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        struct btrfs_device *extent_dev;
        int extent_mirror_num;
 
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                        BTRFS_BLOCK_GROUP_RAID6)) {
+               if (num >= nr_data_stripes(map)) {
+                       return 0;
+               }
+       }
+
        nstripes = length;
        offset = 0;
        do_div(nstripes, map->stripe_len);
@@ -2708,7 +2716,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
        int     ret;
        struct btrfs_root *root = sctx->dev_root;
 
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+       if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                return -EIO;
 
        gen = root->fs_info->last_trans_committed;
index 321b7fb..f7a8b86 100644 (file)
@@ -85,6 +85,7 @@ struct send_ctx {
        u32 send_max_size;
        u64 total_send_size;
        u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+       u64 flags;      /* 'flags' member of btrfs_ioctl_send_args is u64 */
 
        struct vfsmount *mnt;
 
@@ -3709,6 +3710,39 @@ out:
        return ret;
 }
 
+/*
+ * Send an update extent command to user space.
+ */
+static int send_update_extent(struct send_ctx *sctx,
+                             u64 offset, u32 len)
+{
+       int ret = 0;
+       struct fs_path *p;
+
+       p = fs_path_alloc(sctx);
+       if (!p)
+               return -ENOMEM;
+
+       ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
+       if (ret < 0)
+               goto out;
+
+       ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+       if (ret < 0)
+               goto out;
+
+       TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+       TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+       TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
+
+       ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+       fs_path_free(sctx, p);
+       return ret;
+}
+
 static int send_write_or_clone(struct send_ctx *sctx,
                               struct btrfs_path *path,
                               struct btrfs_key *key,
@@ -3744,7 +3778,11 @@ static int send_write_or_clone(struct send_ctx *sctx,
                goto out;
        }
 
-       if (!clone_root) {
+       if (clone_root) {
+               ret = send_clone(sctx, offset, len, clone_root);
+       } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
+               ret = send_update_extent(sctx, offset, len);
+       } else {
                while (pos < len) {
                        l = len - pos;
                        if (l > BTRFS_SEND_READ_SIZE)
@@ -3757,10 +3795,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
                        pos += ret;
                }
                ret = 0;
-       } else {
-               ret = send_clone(sctx, offset, len, clone_root);
        }
-
 out:
        return ret;
 }
@@ -4536,7 +4571,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
        struct btrfs_fs_info *fs_info;
        struct btrfs_ioctl_send_args *arg = NULL;
        struct btrfs_key key;
-       struct file *filp = NULL;
        struct send_ctx *sctx = NULL;
        u32 i;
        u64 *clone_sources_tmp = NULL;
@@ -4544,7 +4578,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root;
+       send_root = BTRFS_I(file_inode(mnt_file))->root;
        fs_info = send_root->fs_info;
 
        arg = memdup_user(arg_, sizeof(*arg));
@@ -4561,6 +4595,11 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                goto out;
        }
 
+       if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
        if (!sctx) {
                ret = -ENOMEM;
@@ -4572,6 +4611,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
        INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
        INIT_LIST_HEAD(&sctx->name_cache_list);
 
+       sctx->flags = arg->flags;
+
        sctx->send_filp = fget(arg->send_fd);
        if (IS_ERR(sctx->send_filp)) {
                ret = PTR_ERR(sctx->send_filp);
@@ -4673,8 +4714,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                goto out;
 
 out:
-       if (filp)
-               fput(filp);
        kfree(arg);
        vfree(clone_sources_tmp);
 
index 1bf4f32..8bb18f7 100644 (file)
@@ -86,6 +86,7 @@ enum btrfs_send_cmd {
        BTRFS_SEND_C_UTIMES,
 
        BTRFS_SEND_C_END,
+       BTRFS_SEND_C_UPDATE_EXTENT,
        __BTRFS_SEND_C_MAX,
 };
 #define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
index d8982e9..68a29a1 100644 (file)
 #include <linux/slab.h>
 #include <linux/cleancache.h>
 #include <linux/ratelimit.h>
+#include <linux/btrfs.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ioctl.h"
 #include "print-tree.h"
 #include "xattr.h"
 #include "volumes.h"
@@ -63,8 +63,7 @@
 static const struct super_operations btrfs_super_ops;
 static struct file_system_type btrfs_fs_type;
 
-static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
-                                     char nbuf[16])
+static const char *btrfs_decode_error(int errno, char nbuf[16])
 {
        char *errstr = NULL;
 
@@ -98,7 +97,7 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
         * today we only save the error info into ram.  Long term we'll
         * also send it down to the disk
         */
-       fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
+       set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 }
 
 static void save_error_info(struct btrfs_fs_info *fs_info)
@@ -114,7 +113,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
        if (sb->s_flags & MS_RDONLY)
                return;
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                sb->s_flags |= MS_RDONLY;
                printk(KERN_INFO "btrfs is forced readonly\n");
                /*
@@ -142,8 +141,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
        struct super_block *sb = fs_info->sb;
        char nbuf[16];
        const char *errstr;
-       va_list args;
-       va_start(args, fmt);
 
        /*
         * Special case: if the error is EROFS, and we're already
@@ -152,15 +149,18 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
        if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
                return;
 
-       errstr = btrfs_decode_error(fs_info, errno, nbuf);
+       errstr = btrfs_decode_error(errno, nbuf);
        if (fmt) {
-               struct va_format vaf = {
-                       .fmt = fmt,
-                       .va = &args,
-               };
+               struct va_format vaf;
+               va_list args;
+
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
 
                printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
                        sb->s_id, function, line, errstr, &vaf);
+               va_end(args);
        } else {
                printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
                        sb->s_id, function, line, errstr);
@@ -171,7 +171,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
                save_error_info(fs_info);
                btrfs_handle_error(fs_info);
        }
-       va_end(args);
 }
 
 static const char * const logtypes[] = {
@@ -261,7 +260,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
                char nbuf[16];
                const char *errstr;
 
-               errstr = btrfs_decode_error(root->fs_info, errno, nbuf);
+               errstr = btrfs_decode_error(errno, nbuf);
                btrfs_printk(root->fs_info,
                             "%s:%d: Aborting unused transaction(%s).\n",
                             function, line, errstr);
@@ -289,8 +288,8 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
        va_start(args, fmt);
        vaf.va = &args;
 
-       errstr = btrfs_decode_error(fs_info, errno, nbuf);
-       if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+       errstr = btrfs_decode_error(errno, nbuf);
+       if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
                panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
                        s_id, function, line, &vaf, errstr);
 
@@ -438,6 +437,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_compress_force:
                case Opt_compress_force_type:
                        compress_force = true;
+                       /* Fallthrough */
                case Opt_compress:
                case Opt_compress_type:
                        if (token == Opt_compress ||
@@ -519,7 +519,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_alloc_start:
                        num = match_strdup(&args[0]);
                        if (num) {
+                               mutex_lock(&info->chunk_mutex);
                                info->alloc_start = memparse(num, NULL);
+                               mutex_unlock(&info->chunk_mutex);
                                kfree(num);
                                printk(KERN_INFO
                                        "btrfs: allocations start at %llu\n",
@@ -876,7 +878,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
 
        btrfs_wait_ordered_extents(root, 0);
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                /* no transaction, don't bother */
                if (PTR_ERR(trans) == -ENOENT)
@@ -1200,6 +1202,38 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
                              new_pool_size);
 }
 
+static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
+                                        unsigned long old_opts, int flags)
+{
+       set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+
+       if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+           (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+            (flags & MS_RDONLY))) {
+               /* wait for any defraggers to finish */
+               wait_event(fs_info->transaction_wait,
+                          (atomic_read(&fs_info->defrag_running) == 0));
+               if (flags & MS_RDONLY)
+                       sync_filesystem(fs_info->sb);
+       }
+}
+
+static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
+                                        unsigned long old_opts)
+{
+       /*
+        * We need cleanup all defragable inodes if the autodefragment is
+        * close or the fs is R/O.
+        */
+       if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+           (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+            (fs_info->sb->s_flags & MS_RDONLY))) {
+               btrfs_cleanup_defrag_inodes(fs_info);
+       }
+
+       clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1213,6 +1247,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        unsigned int old_metadata_ratio = fs_info->metadata_ratio;
        int ret;
 
+       btrfs_remount_prepare(fs_info, old_opts, *flags);
+
        ret = btrfs_parse_options(root, data);
        if (ret) {
                ret = -EINVAL;
@@ -1223,7 +1259,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                fs_info->thread_pool_size, old_thread_pool_size);
 
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
-               return 0;
+               goto out;
 
        if (*flags & MS_RDONLY) {
                /*
@@ -1278,7 +1314,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                }
                sb->s_flags &= ~MS_RDONLY;
        }
-
+out:
+       btrfs_remount_cleanup(fs_info, old_opts);
        return 0;
 
 restore:
@@ -1289,10 +1326,13 @@ restore:
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
        fs_info->max_inline = old_max_inline;
+       mutex_lock(&fs_info->chunk_mutex);
        fs_info->alloc_start = old_alloc_start;
+       mutex_unlock(&fs_info->chunk_mutex);
        btrfs_resize_thread_pool(fs_info,
                old_thread_pool_size, fs_info->thread_pool_size);
        fs_info->metadata_ratio = old_metadata_ratio;
+       btrfs_remount_cleanup(fs_info, old_opts);
        return ret;
 }
 
@@ -1559,7 +1599,7 @@ static int btrfs_freeze(struct super_block *sb)
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = btrfs_sb(sb)->tree_root;
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                /* no transaction, don't bother */
                if (PTR_ERR(trans) == -ENOENT)
@@ -1684,10 +1724,14 @@ static int __init init_btrfs_fs(void)
        if (err)
                goto free_delayed_inode;
 
-       err = btrfs_interface_init();
+       err = btrfs_delayed_ref_init();
        if (err)
                goto free_auto_defrag;
 
+       err = btrfs_interface_init();
+       if (err)
+               goto free_delayed_ref;
+
        err = register_filesystem(&btrfs_fs_type);
        if (err)
                goto unregister_ioctl;
@@ -1699,6 +1743,8 @@ static int __init init_btrfs_fs(void)
 
 unregister_ioctl:
        btrfs_interface_exit();
+free_delayed_ref:
+       btrfs_delayed_ref_exit();
 free_auto_defrag:
        btrfs_auto_defrag_exit();
 free_delayed_inode:
@@ -1720,6 +1766,7 @@ free_compress:
 static void __exit exit_btrfs_fs(void)
 {
        btrfs_destroy_cachep();
+       btrfs_delayed_ref_exit();
        btrfs_auto_defrag_exit();
        btrfs_delayed_inode_exit();
        ordered_data_exit();
index daac9ae..5b326cd 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/spinlock.h>
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
-#include <linux/module.h>
 #include <linux/kobject.h>
 
 #include "ctree.h"
index 4c0067c..e52da6f 100644 (file)
@@ -40,7 +40,6 @@ void put_transaction(struct btrfs_transaction *transaction)
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
                WARN_ON(transaction->delayed_refs.root.rb_node);
-               memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
 }
@@ -51,6 +50,14 @@ static noinline void switch_commit_root(struct btrfs_root *root)
        root->commit_root = btrfs_root_node(root);
 }
 
+static inline int can_join_transaction(struct btrfs_transaction *trans,
+                                      int type)
+{
+       return !(trans->in_commit &&
+                type != TRANS_JOIN &&
+                type != TRANS_JOIN_NOLOCK);
+}
+
 /*
  * either allocate a new transaction or hop into the existing one
  */
@@ -62,7 +69,7 @@ static noinline int join_transaction(struct btrfs_root *root, int type)
        spin_lock(&fs_info->trans_lock);
 loop:
        /* The file system has been taken offline. No new transactions. */
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                spin_unlock(&fs_info->trans_lock);
                return -EROFS;
        }
@@ -86,6 +93,10 @@ loop:
                        spin_unlock(&fs_info->trans_lock);
                        return cur_trans->aborted;
                }
+               if (!can_join_transaction(cur_trans, type)) {
+                       spin_unlock(&fs_info->trans_lock);
+                       return -EBUSY;
+               }
                atomic_inc(&cur_trans->use_count);
                atomic_inc(&cur_trans->num_writers);
                cur_trans->num_joined++;
@@ -113,7 +124,7 @@ loop:
                 */
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
                goto loop;
-       } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                spin_unlock(&fs_info->trans_lock);
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
                return -EROFS;
@@ -155,8 +166,12 @@ loop:
 
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
+       atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
+       atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
+       init_waitqueue_head(&cur_trans->delayed_refs.wait);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+       INIT_LIST_HEAD(&cur_trans->ordered_operations);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
                             fs_info->btree_inode->i_mapping);
@@ -301,7 +316,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
        int ret;
        u64 qgroup_reserved = 0;
 
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+       if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                return ERR_PTR(-EROFS);
 
        if (current->journal_info) {
@@ -359,8 +374,11 @@ again:
 
        do {
                ret = join_transaction(root, type);
-               if (ret == -EBUSY)
+               if (ret == -EBUSY) {
                        wait_current_trans(root);
+                       if (unlikely(type == TRANS_ATTACH))
+                               ret = -ENOENT;
+               }
        } while (ret == -EBUSY);
 
        if (ret < 0) {
@@ -382,9 +400,10 @@ again:
        h->block_rsv = NULL;
        h->orig_rsv = NULL;
        h->aborted = 0;
-       h->qgroup_reserved = qgroup_reserved;
+       h->qgroup_reserved = 0;
        h->delayed_ref_elem.seq = 0;
        h->type = type;
+       h->allocating_chunk = false;
        INIT_LIST_HEAD(&h->qgroup_ref_list);
        INIT_LIST_HEAD(&h->new_bgs);
 
@@ -400,6 +419,7 @@ again:
                h->block_rsv = &root->fs_info->trans_block_rsv;
                h->bytes_reserved = num_bytes;
        }
+       h->qgroup_reserved = qgroup_reserved;
 
 got_it:
        btrfs_record_root_in_trans(h, root);
@@ -451,11 +471,43 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
        return start_transaction(root, 0, TRANS_USERSPACE, 0);
 }
 
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is used when we want to commit the current the transaction, but
+ * don't want to start a new one.
+ *
+ * Note: If this function return -ENOENT, it just means there is no
+ * running transaction. But it is possible that the inactive transaction
+ * is still in the memory, not fully on disk. If you hope there is no
+ * inactive transaction in the fs when -ENOENT is returned, you should
+ * invoke
+ *     btrfs_attach_transaction_barrier()
+ */
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
 {
        return start_transaction(root, 0, TRANS_ATTACH, 0);
 }
 
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is similar to the above function, the differentia is this one
+ * will wait for all the inactive transactions until they fully
+ * complete.
+ */
+struct btrfs_trans_handle *
+btrfs_attach_transaction_barrier(struct btrfs_root *root)
+{
+       struct btrfs_trans_handle *trans;
+
+       trans = start_transaction(root, 0, TRANS_ATTACH, 0);
+       if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
+               btrfs_wait_for_commit(root, 0);
+
+       return trans;
+}
+
 /* wait for a transaction commit to be fully complete */
 static noinline void wait_for_commit(struct btrfs_root *root,
                                    struct btrfs_transaction *commit)
@@ -587,7 +639,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
        if (!list_empty(&trans->new_bgs))
                btrfs_create_pending_block_groups(trans, root);
 
-       while (count < 2) {
+       while (count < 1) {
                unsigned long cur = trans->delayed_ref_updates;
                trans->delayed_ref_updates = 0;
                if (cur &&
@@ -599,6 +651,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                }
                count++;
        }
+
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
 
@@ -644,12 +697,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                btrfs_run_delayed_iputs(root);
 
        if (trans->aborted ||
-           root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+           test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                err = -EIO;
-       }
        assert_qgroups_uptodate(trans);
 
-       memset(trans, 0, sizeof(*trans));
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
        return err;
 }
@@ -696,7 +747,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
        struct extent_state *cached_state = NULL;
        u64 start = 0;
        u64 end;
+       struct blk_plug plug;
 
+       blk_start_plug(&plug);
        while (!find_first_extent_bit(dirty_pages, start, &start, &end,
                                      mark, &cached_state)) {
                convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
@@ -710,6 +763,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
        }
        if (err)
                werr = err;
+       blk_finish_plug(&plug);
        return werr;
 }
 
@@ -960,10 +1014,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
 }
 
 /*
- * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
- * otherwise every leaf in the btree is read and defragged.
+ * defrag a given btree.
+ * Every leaf in the btree is read and defragged.
  */
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
+int btrfs_defrag_root(struct btrfs_root *root)
 {
        struct btrfs_fs_info *info = root->fs_info;
        struct btrfs_trans_handle *trans;
@@ -977,7 +1031,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
                if (IS_ERR(trans))
                        return PTR_ERR(trans);
 
-               ret = btrfs_defrag_leaves(trans, root, cacheonly);
+               ret = btrfs_defrag_leaves(trans, root);
 
                btrfs_end_transaction(trans, root);
                btrfs_btree_balance_dirty(info->tree_root);
@@ -985,6 +1039,12 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
 
                if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
                        break;
+
+               if (btrfs_defrag_cancelled(root->fs_info)) {
+                       printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
+                       ret = -EAGAIN;
+                       break;
+               }
        }
        root->defrag_running = 0;
        return ret;
@@ -1007,7 +1067,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        struct inode *parent_inode;
        struct btrfs_path *path;
        struct btrfs_dir_item *dir_item;
-       struct dentry *parent;
        struct dentry *dentry;
        struct extent_buffer *tmp;
        struct extent_buffer *old;
@@ -1022,7 +1081,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = pending->error = -ENOMEM;
-               goto path_alloc_fail;
+               return ret;
        }
 
        new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
@@ -1062,10 +1121,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 
        rsv = trans->block_rsv;
        trans->block_rsv = &pending->block_rsv;
+       trans->bytes_reserved = trans->block_rsv->reserved;
 
        dentry = pending->dentry;
-       parent = dget_parent(dentry);
-       parent_inode = parent->d_inode;
+       parent_inode = pending->dir;
        parent_root = BTRFS_I(parent_inode)->root;
        record_root_in_trans(trans, parent_root);
 
@@ -1213,14 +1272,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
 fail:
-       dput(parent);
        trans->block_rsv = rsv;
+       trans->bytes_reserved = 0;
 no_free_objectid:
        kfree(new_root_item);
 root_item_alloc_fail:
        btrfs_free_path(path);
-path_alloc_fail:
-       btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
        return ret;
 }
 
@@ -1306,13 +1363,13 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
 struct btrfs_async_commit {
        struct btrfs_trans_handle *newtrans;
        struct btrfs_root *root;
-       struct delayed_work work;
+       struct work_struct work;
 };
 
 static void do_async_commit(struct work_struct *work)
 {
        struct btrfs_async_commit *ac =
-               container_of(work, struct btrfs_async_commit, work.work);
+               container_of(work, struct btrfs_async_commit, work);
 
        /*
         * We've got freeze protection passed with the transaction.
@@ -1340,7 +1397,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
        if (!ac)
                return -ENOMEM;
 
-       INIT_DELAYED_WORK(&ac->work, do_async_commit);
+       INIT_WORK(&ac->work, do_async_commit);
        ac->root = root;
        ac->newtrans = btrfs_join_transaction(root);
        if (IS_ERR(ac->newtrans)) {
@@ -1364,7 +1421,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
                        &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
                        1, _THIS_IP_);
 
-       schedule_delayed_work(&ac->work, 0);
+       schedule_work(&ac->work);
 
        /* wait for transaction to start and unblock */
        if (wait_for_unblock)
@@ -1384,6 +1441,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root, int err)
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
+       DEFINE_WAIT(wait);
 
        WARN_ON(trans->use_count > 1);
 
@@ -1392,8 +1450,13 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
        spin_lock(&root->fs_info->trans_lock);
        list_del_init(&cur_trans->list);
        if (cur_trans == root->fs_info->running_transaction) {
+               root->fs_info->trans_no_join = 1;
+               spin_unlock(&root->fs_info->trans_lock);
+               wait_event(cur_trans->writer_wait,
+                          atomic_read(&cur_trans->num_writers) == 1);
+
+               spin_lock(&root->fs_info->trans_lock);
                root->fs_info->running_transaction = NULL;
-               root->fs_info->trans_no_join = 0;
        }
        spin_unlock(&root->fs_info->trans_lock);
 
@@ -1427,7 +1490,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
        }
 
        if (flush_on_commit || snap_pending) {
-               btrfs_start_delalloc_inodes(root, 1);
+               ret = btrfs_start_delalloc_inodes(root, 1);
+               if (ret)
+                       return ret;
                btrfs_wait_ordered_extents(root, 1);
        }
 
@@ -1449,9 +1514,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
         * it here and no for sure that nothing new will be added
         * to the list
         */
-       btrfs_run_ordered_operations(root, 1);
+       ret = btrfs_run_ordered_operations(trans, root, 1);
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -1472,27 +1537,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        int should_grow = 0;
        unsigned long now = get_seconds();
 
-       ret = btrfs_run_ordered_operations(root, 0);
+       ret = btrfs_run_ordered_operations(trans, root, 0);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
-               goto cleanup_transaction;
+               btrfs_end_transaction(trans, root);
+               return ret;
        }
 
        /* Stop the commit early if ->aborted is set */
        if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
                ret = cur_trans->aborted;
-               goto cleanup_transaction;
+               btrfs_end_transaction(trans, root);
+               return ret;
        }
 
        /* make a pass through all the delayed refs we have so far
         * any runnings procs may add more while we are here
         */
        ret = btrfs_run_delayed_refs(trans, root, 0);
-       if (ret)
-               goto cleanup_transaction;
+       if (ret) {
+               btrfs_end_transaction(trans, root);
+               return ret;
+       }
 
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
+       if (trans->qgroup_reserved) {
+               btrfs_qgroup_free(root, trans->qgroup_reserved);
+               trans->qgroup_reserved = 0;
+       }
 
        cur_trans = trans->transaction;
 
@@ -1506,8 +1579,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                btrfs_create_pending_block_groups(trans, root);
 
        ret = btrfs_run_delayed_refs(trans, root, 0);
-       if (ret)
-               goto cleanup_transaction;
+       if (ret) {
+               btrfs_end_transaction(trans, root);
+               return ret;
+       }
 
        spin_lock(&cur_trans->commit_lock);
        if (cur_trans->in_commit) {
@@ -1771,6 +1846,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 cleanup_transaction:
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
+       if (trans->qgroup_reserved) {
+               btrfs_qgroup_free(root, trans->qgroup_reserved);
+               trans->qgroup_reserved = 0;
+       }
        btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
 //     WARN_ON(1);
        if (current->journal_info == trans)
index 0e8aa1e..3c8e0d2 100644 (file)
@@ -43,6 +43,7 @@ struct btrfs_transaction {
        wait_queue_head_t writer_wait;
        wait_queue_head_t commit_wait;
        struct list_head pending_snapshots;
+       struct list_head ordered_operations;
        struct btrfs_delayed_ref_root delayed_refs;
        int aborted;
 };
@@ -68,6 +69,7 @@ struct btrfs_trans_handle {
        struct btrfs_block_rsv *orig_rsv;
        short aborted;
        short adding_csums;
+       bool allocating_chunk;
        enum btrfs_trans_type type;
        /*
         * this root is only needed to validate that the root passed to
@@ -82,11 +84,13 @@ struct btrfs_trans_handle {
 
 struct btrfs_pending_snapshot {
        struct dentry *dentry;
+       struct inode *dir;
        struct btrfs_root *root;
        struct btrfs_root *snap;
        struct btrfs_qgroup_inherit *inherit;
        /* block reservation for the operation */
        struct btrfs_block_rsv block_rsv;
+       u64 qgroup_reserved;
        /* extra metadata reseration for relocation */
        int error;
        bool readonly;
@@ -110,13 +114,15 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
+                                       struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root);
 
 int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
+int btrfs_defrag_root(struct btrfs_root *root);
 int btrfs_clean_old_snapshots(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root);
index 3b580ee..94e05c1 100644 (file)
 #include "transaction.h"
 #include "locking.h"
 
-/* defrag all the leaves in a given btree.  If cache_only == 1, don't read
- * things from disk, otherwise read all the leaves and try to get key order to
+/*
+ * Defrag all the leaves in a given btree.
+ * Read all the leaves and try to get key order to
  * better reflect disk order
  */
 
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, int cache_only)
+                       struct btrfs_root *root)
 {
        struct btrfs_path *path = NULL;
        struct btrfs_key key;
@@ -41,9 +42,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
        u64 last_ret = 0;
        u64 min_trans = 0;
 
-       if (cache_only)
-               goto out;
-
        if (root->fs_info->extent_root == root) {
                /*
                 * there's recursion here right now in the tree locking,
@@ -86,11 +84,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
        }
 
        path->keep_locks = 1;
-       if (cache_only)
-               min_trans = root->defrag_trans_start;
 
-       ret = btrfs_search_forward(root, &key, NULL, path,
-                                  cache_only, min_trans);
+       ret = btrfs_search_forward(root, &key, NULL, path, min_trans);
        if (ret < 0)
                goto out;
        if (ret > 0) {
@@ -109,11 +104,11 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
                goto out;
        }
        path->slots[1] = btrfs_header_nritems(path->nodes[1]);
-       next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
+       next_key_ret = btrfs_find_next_key(root, path, &key, 1,
                                           min_trans);
        ret = btrfs_realloc_node(trans, root,
                                 path->nodes[1], 0,
-                                cache_only, &last_ret,
+                                &last_ret,
                                 &root->defrag_progress);
        if (ret) {
                WARN_ON(ret == -EAGAIN);
index 9027bb1..c7ef569 100644 (file)
@@ -278,8 +278,7 @@ static int process_one_buffer(struct btrfs_root *log,
                              struct walk_control *wc, u64 gen)
 {
        if (wc->pin)
-               btrfs_pin_extent_for_log_replay(wc->trans,
-                                               log->fs_info->extent_root,
+               btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
                                                eb->start, eb->len);
 
        if (btrfs_buffer_uptodate(eb, gen, 0)) {
@@ -485,7 +484,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                      struct btrfs_key *key)
 {
        int found_type;
-       u64 mask = root->sectorsize - 1;
        u64 extent_end;
        u64 start = key->offset;
        u64 saved_nbytes;
@@ -502,7 +500,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                extent_end = start + btrfs_file_extent_num_bytes(eb, item);
        else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                size = btrfs_file_extent_inline_len(eb, item);
-               extent_end = (start + size + mask) & ~mask;
+               extent_end = ALIGN(start + size, root->sectorsize);
        } else {
                ret = 0;
                goto out;
@@ -2281,6 +2279,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        unsigned long log_transid = 0;
 
        mutex_lock(&root->log_mutex);
+       log_transid = root->log_transid;
        index1 = root->log_transid % 2;
        if (atomic_read(&root->log_commit[index1])) {
                wait_log_commit(trans, root, root->log_transid);
@@ -2308,11 +2307,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        /* bail out if we need to do a full commit */
        if (root->fs_info->last_trans_log_full_commit == trans->transid) {
                ret = -EAGAIN;
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&root->log_mutex);
                goto out;
        }
 
-       log_transid = root->log_transid;
        if (log_transid % 2 == 0)
                mark = EXTENT_DIRTY;
        else
@@ -2324,6 +2323,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&root->log_mutex);
                goto out;
        }
@@ -2363,6 +2363,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                }
                root->fs_info->last_trans_log_full_commit = trans->transid;
                btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = -EAGAIN;
                goto out;
@@ -2373,6 +2374,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
                wait_log_commit(trans, log_root_tree,
                                log_root_tree->log_transid);
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = 0;
                goto out;
@@ -2392,6 +2394,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         */
        if (root->fs_info->last_trans_log_full_commit == trans->transid) {
                btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = -EAGAIN;
                goto out_wake_log_root;
@@ -2402,10 +2405,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                                EXTENT_DIRTY | EXTENT_NEW);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
+               btrfs_free_logged_extents(log, log_transid);
                mutex_unlock(&log_root_tree->log_mutex);
                goto out_wake_log_root;
        }
        btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+       btrfs_wait_logged_extents(log, log_transid);
 
        btrfs_set_super_log_root(root->fs_info->super_for_commit,
                                log_root_tree->node->start);
@@ -2461,8 +2466,10 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
                .process_func = process_one_buffer
        };
 
-       ret = walk_log_tree(trans, log, &wc);
-       BUG_ON(ret);
+       if (trans) {
+               ret = walk_log_tree(trans, log, &wc);
+               BUG_ON(ret);
+       }
 
        while (1) {
                ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -2475,6 +2482,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
                                  EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
        }
 
+       /*
+        * We may have short-circuited the log tree with the full commit logic
+        * and left ordered extents on our list, so clear these out to keep us
+        * from leaking inodes and memory.
+        */
+       btrfs_free_logged_extents(log, 0);
+       btrfs_free_logged_extents(log, 1);
+
        free_extent_buffer(log->node);
        kfree(log);
 }
@@ -2724,7 +2739,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        path->keep_locks = 1;
 
        ret = btrfs_search_forward(root, &min_key, &max_key,
-                                  path, 0, trans->transid);
+                                  path, trans->transid);
 
        /*
         * we didn't find anything from this transaction, see if there
@@ -3271,16 +3286,21 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *log = root->log_root;
        struct btrfs_file_extent_item *fi;
        struct extent_buffer *leaf;
+       struct btrfs_ordered_extent *ordered;
        struct list_head ordered_sums;
        struct btrfs_map_token token;
        struct btrfs_key key;
-       u64 csum_offset = em->mod_start - em->start;
-       u64 csum_len = em->mod_len;
+       u64 mod_start = em->mod_start;
+       u64 mod_len = em->mod_len;
+       u64 csum_offset;
+       u64 csum_len;
        u64 extent_offset = em->start - em->orig_start;
        u64 block_len;
        int ret;
+       int index = log->log_transid % 2;
        bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
+insert:
        INIT_LIST_HEAD(&ordered_sums);
        btrfs_init_map_token(&token);
        key.objectid = btrfs_ino(inode);
@@ -3296,6 +3316,23 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
        leaf = path->nodes[0];
        fi = btrfs_item_ptr(leaf, path->slots[0],
                            struct btrfs_file_extent_item);
+
+       /*
+        * If we are overwriting an inline extent with a real one then we need
+        * to just delete the inline extent as it may not be large enough to
+        * have the entire file_extent_item.
+        */
+       if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
+           BTRFS_FILE_EXTENT_INLINE) {
+               ret = btrfs_del_item(trans, log, path);
+               btrfs_release_path(path);
+               if (ret) {
+                       path->really_keep_locks = 0;
+                       return ret;
+               }
+               goto insert;
+       }
+
        btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
                                               &token);
        if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3362,6 +3399,92 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
                csum_len = block_len;
        }
 
+       /*
+        * First check and see if our csums are on our outstanding ordered
+        * extents.
+        */
+again:
+       spin_lock_irq(&log->log_extents_lock[index]);
+       list_for_each_entry(ordered, &log->logged_list[index], log_list) {
+               struct btrfs_ordered_sum *sum;
+
+               if (!mod_len)
+                       break;
+
+               if (ordered->inode != inode)
+                       continue;
+
+               if (ordered->file_offset + ordered->len <= mod_start ||
+                   mod_start + mod_len <= ordered->file_offset)
+                       continue;
+
+               /*
+                * We are going to copy all the csums on this ordered extent, so
+                * go ahead and adjust mod_start and mod_len in case this
+                * ordered extent has already been logged.
+                */
+               if (ordered->file_offset > mod_start) {
+                       if (ordered->file_offset + ordered->len >=
+                           mod_start + mod_len)
+                               mod_len = ordered->file_offset - mod_start;
+                       /*
+                        * If we have this case
+                        *
+                        * |--------- logged extent ---------|
+                        *       |----- ordered extent ----|
+                        *
+                        * Just don't mess with mod_start and mod_len, we'll
+                        * just end up logging more csums than we need and it
+                        * will be ok.
+                        */
+               } else {
+                       if (ordered->file_offset + ordered->len <
+                           mod_start + mod_len) {
+                               mod_len = (mod_start + mod_len) -
+                                       (ordered->file_offset + ordered->len);
+                               mod_start = ordered->file_offset +
+                                       ordered->len;
+                       } else {
+                               mod_len = 0;
+                       }
+               }
+
+               /*
+                * To keep us from looping for the above case of an ordered
+                * extent that falls inside of the logged extent.
+                */
+               if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
+                                    &ordered->flags))
+                       continue;
+               atomic_inc(&ordered->refs);
+               spin_unlock_irq(&log->log_extents_lock[index]);
+               /*
+                * we've dropped the lock, we must either break or
+                * start over after this.
+                */
+
+               wait_event(ordered->wait, ordered->csum_bytes_left == 0);
+
+               list_for_each_entry(sum, &ordered->list, list) {
+                       ret = btrfs_csum_file_blocks(trans, log, sum);
+                       if (ret) {
+                               btrfs_put_ordered_extent(ordered);
+                               goto unlocked;
+                       }
+               }
+               btrfs_put_ordered_extent(ordered);
+               goto again;
+
+       }
+       spin_unlock_irq(&log->log_extents_lock[index]);
+unlocked:
+
+       if (!mod_len || ret)
+               return ret;
+
+       csum_offset = mod_start - em->start;
+       csum_len = mod_len;
+
        /* block start is already adjusted for the file extent offset. */
        ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
                                       em->block_start + csum_offset,
@@ -3393,6 +3516,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
        u64 test_gen;
        int ret = 0;
+       int num = 0;
 
        INIT_LIST_HEAD(&extents);
 
@@ -3401,16 +3525,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
 
        list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
                list_del_init(&em->list);
+
+               /*
+                * Just an arbitrary number, this can be really CPU intensive
+                * once we start getting a lot of extents, and really once we
+                * have a bunch of extents we just want to commit since it will
+                * be faster.
+                */
+               if (++num > 32768) {
+                       list_del_init(&tree->modified_extents);
+                       ret = -EFBIG;
+                       goto process;
+               }
+
                if (em->generation <= test_gen)
                        continue;
                /* Need a ref to keep it from getting evicted from cache */
                atomic_inc(&em->refs);
                set_bit(EXTENT_FLAG_LOGGING, &em->flags);
                list_add_tail(&em->list, &extents);
+               num++;
        }
 
        list_sort(NULL, &extents, extent_cmp);
 
+process:
        while (!list_empty(&extents)) {
                em = list_entry(extents.next, struct extent_map, list);
 
@@ -3513,6 +3652,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
+       btrfs_get_logged_extents(log, inode);
+
        /*
         * a brute force approach to making sure we get the most uptodate
         * copies of everything.
@@ -3558,7 +3699,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        while (1) {
                ins_nr = 0;
                ret = btrfs_search_forward(root, &min_key, &max_key,
-                                          path, 0, trans->transid);
+                                          path, trans->transid);
                if (ret != 0)
                        break;
 again:
@@ -3656,6 +3797,8 @@ log_extents:
        BTRFS_I(inode)->logged_trans = trans->transid;
        BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
 out_unlock:
+       if (err)
+               btrfs_free_logged_extents(log, log->log_transid);
        mutex_unlock(&BTRFS_I(inode)->log_mutex);
 
        btrfs_free_path(path);
@@ -3822,7 +3965,6 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 end_trans:
        dput(old_parent);
        if (ret < 0) {
-               WARN_ON(ret != -ENOSPC);
                root->fs_info->last_trans_log_full_commit = trans->transid;
                ret = 1;
        }
index 99be4c1..ddc61ca 100644 (file)
@@ -5,7 +5,7 @@
  */
 
 #include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include "ulist.h"
 
 /*
index 5cbb7f4..35bb2d4 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/capability.h>
 #include <linux/ratelimit.h>
 #include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <asm/div64.h>
 #include "compat.h"
 #include "ctree.h"
 #include "extent_map.h"
@@ -32,6 +34,7 @@
 #include "transaction.h"
 #include "print-tree.h"
 #include "volumes.h"
+#include "raid56.h"
 #include "async-thread.h"
 #include "check-integrity.h"
 #include "rcu-string.h"
@@ -647,6 +650,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
                new_device->writeable = 0;
                new_device->in_fs_metadata = 0;
                new_device->can_discard = 0;
+               spin_lock_init(&new_device->io_lock);
                list_replace_rcu(&device->dev_list, &new_device->dev_list);
 
                call_rcu(&device->rcu, free_device);
@@ -792,26 +796,75 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
        return ret;
 }
 
+/*
+ * Look for a btrfs signature on a device. This may be called out of the mount path
+ * and we are not allowed to call set_blocksize during the scan. The superblock
+ * is read via pagecache
+ */
 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                          struct btrfs_fs_devices **fs_devices_ret)
 {
        struct btrfs_super_block *disk_super;
        struct block_device *bdev;
-       struct buffer_head *bh;
-       int ret;
+       struct page *page;
+       void *p;
+       int ret = -EINVAL;
        u64 devid;
        u64 transid;
        u64 total_devices;
+       u64 bytenr;
+       pgoff_t index;
 
+       /*
+        * we would like to check all the supers, but that would make
+        * a btrfs mount succeed after a mkfs from a different FS.
+        * So, we need to add a special mount option to scan for
+        * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+        */
+       bytenr = btrfs_sb_offset(0);
        flags |= FMODE_EXCL;
        mutex_lock(&uuid_mutex);
-       ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
-       if (ret)
+
+       bdev = blkdev_get_by_path(path, flags, holder);
+
+       if (IS_ERR(bdev)) {
+               ret = PTR_ERR(bdev);
                goto error;
-       disk_super = (struct btrfs_super_block *)bh->b_data;
+       }
+
+       /* make sure our super fits in the device */
+       if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+               goto error_bdev_put;
+
+       /* make sure our super fits in the page */
+       if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+               goto error_bdev_put;
+
+       /* make sure our super doesn't straddle pages on disk */
+       index = bytenr >> PAGE_CACHE_SHIFT;
+       if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+               goto error_bdev_put;
+
+       /* pull in the page with our super */
+       page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+                                  index, GFP_NOFS);
+
+       if (IS_ERR_OR_NULL(page))
+               goto error_bdev_put;
+
+       p = kmap(page);
+
+       /* align our pointer to the offset of the super block */
+       disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+
+       if (btrfs_super_bytenr(disk_super) != bytenr ||
+           disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
+               goto error_unmap;
+
        devid = btrfs_stack_device_id(&disk_super->dev_item);
        transid = btrfs_super_generation(disk_super);
        total_devices = btrfs_super_num_devices(disk_super);
+
        if (disk_super->label[0]) {
                if (disk_super->label[BTRFS_LABEL_SIZE - 1])
                        disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
@@ -819,12 +872,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        } else {
                printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
        }
+
        printk(KERN_CONT "devid %llu transid %llu %s\n",
               (unsigned long long)devid, (unsigned long long)transid, path);
+
        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
        if (!ret && fs_devices_ret)
                (*fs_devices_ret)->total_devices = total_devices;
-       brelse(bh);
+
+error_unmap:
+       kunmap(page);
+       page_cache_release(page);
+
+error_bdev_put:
        blkdev_put(bdev, flags);
 error:
        mutex_unlock(&uuid_mutex);
@@ -1372,14 +1432,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        u64 devid;
        u64 num_devices;
        u8 *dev_uuid;
+       unsigned seq;
        int ret = 0;
        bool clear_super = false;
 
        mutex_lock(&uuid_mutex);
 
-       all_avail = root->fs_info->avail_data_alloc_bits |
-               root->fs_info->avail_system_alloc_bits |
-               root->fs_info->avail_metadata_alloc_bits;
+       do {
+               seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+               all_avail = root->fs_info->avail_data_alloc_bits |
+                           root->fs_info->avail_system_alloc_bits |
+                           root->fs_info->avail_metadata_alloc_bits;
+       } while (read_seqretry(&root->fs_info->profiles_lock, seq));
 
        num_devices = root->fs_info->fs_devices->num_devices;
        btrfs_dev_replace_lock(&root->fs_info->dev_replace);
@@ -1403,6 +1468,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                goto out;
        }
 
+       if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
+           root->fs_info->fs_devices->rw_devices <= 2) {
+               printk(KERN_ERR "btrfs: unable to go below two "
+                      "devices on raid5\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
+           root->fs_info->fs_devices->rw_devices <= 3) {
+               printk(KERN_ERR "btrfs: unable to go below three "
+                      "devices on raid6\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (strcmp(device_path, "missing") == 0) {
                struct list_head *devices;
                struct btrfs_device *tmp;
@@ -2616,7 +2696,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
        chunk_used = btrfs_block_group_used(&cache->item);
 
        if (bargs->usage == 0)
-               user_thresh = 0;
+               user_thresh = 1;
        else if (bargs->usage > 100)
                user_thresh = cache->key.offset;
        else
@@ -2664,11 +2744,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
                return 0;
 
        if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
-            BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
-               factor = 2;
-       else
-               factor = 1;
-       factor = num_stripes / factor;
+            BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
+               factor = num_stripes / 2;
+       } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
+               factor = num_stripes - 1;
+       } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
+               factor = num_stripes - 2;
+       } else {
+               factor = num_stripes;
+       }
 
        for (i = 0; i < num_stripes; i++) {
                stripe = btrfs_stripe_nr(chunk, i);
@@ -2985,6 +3069,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
        int mixed = 0;
        int ret;
        u64 num_devices;
+       unsigned seq;
 
        if (btrfs_fs_closing(fs_info) ||
            atomic_read(&fs_info->balance_pause_req) ||
@@ -3027,7 +3112,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
                allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
        else
                allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
-                               BTRFS_BLOCK_GROUP_RAID10);
+                               BTRFS_BLOCK_GROUP_RAID10 |
+                               BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6);
 
        if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
            (!alloc_profile_is_valid(bctl->data.target, 1) ||
@@ -3067,23 +3154,29 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
 
        /* allow to reduce meta or sys integrity only if force set */
        allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
-                       BTRFS_BLOCK_GROUP_RAID10;
-       if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-            (fs_info->avail_system_alloc_bits & allowed) &&
-            !(bctl->sys.target & allowed)) ||
-           ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
-            (fs_info->avail_metadata_alloc_bits & allowed) &&
-            !(bctl->meta.target & allowed))) {
-               if (bctl->flags & BTRFS_BALANCE_FORCE) {
-                       printk(KERN_INFO "btrfs: force reducing metadata "
-                              "integrity\n");
-               } else {
-                       printk(KERN_ERR "btrfs: balance will reduce metadata "
-                              "integrity, use force if you want this\n");
-                       ret = -EINVAL;
-                       goto out;
+                       BTRFS_BLOCK_GROUP_RAID10 |
+                       BTRFS_BLOCK_GROUP_RAID5 |
+                       BTRFS_BLOCK_GROUP_RAID6;
+       do {
+               seq = read_seqbegin(&fs_info->profiles_lock);
+
+               if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                    (fs_info->avail_system_alloc_bits & allowed) &&
+                    !(bctl->sys.target & allowed)) ||
+                   ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                    (fs_info->avail_metadata_alloc_bits & allowed) &&
+                    !(bctl->meta.target & allowed))) {
+                       if (bctl->flags & BTRFS_BALANCE_FORCE) {
+                               printk(KERN_INFO "btrfs: force reducing metadata "
+                                      "integrity\n");
+                       } else {
+                               printk(KERN_ERR "btrfs: balance will reduce metadata "
+                                      "integrity, use force if you want this\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
                }
-       }
+       } while (read_seqretry(&fs_info->profiles_lock, seq));
 
        if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
                int num_tolerated_disk_barrier_failures;
@@ -3127,21 +3220,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
        mutex_lock(&fs_info->balance_mutex);
        atomic_dec(&fs_info->balance_running);
 
-       if (bargs) {
-               memset(bargs, 0, sizeof(*bargs));
-               update_ioctl_balance_args(fs_info, 0, bargs);
-       }
-
-       if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
-           balance_need_close(fs_info)) {
-               __cancel_balance(fs_info);
-       }
-
        if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
                fs_info->num_tolerated_disk_barrier_failures =
                        btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
        }
 
+       if (bargs) {
+               memset(bargs, 0, sizeof(*bargs));
+               update_ioctl_balance_args(fs_info, 0, bargs);
+       }
+
        wake_up(&fs_info->balance_wait_q);
 
        return ret;
@@ -3504,13 +3592,86 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
 }
 
 struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
-       { 2, 1, 0, 4, 2, 2 /* raid10 */ },
-       { 1, 1, 2, 2, 2, 2 /* raid1 */ },
-       { 1, 2, 1, 1, 1, 2 /* dup */ },
-       { 1, 1, 0, 2, 1, 1 /* raid0 */ },
-       { 1, 1, 1, 1, 1, 1 /* single */ },
+       [BTRFS_RAID_RAID10] = {
+               .sub_stripes    = 2,
+               .dev_stripes    = 1,
+               .devs_max       = 0,    /* 0 == as many as possible */
+               .devs_min       = 4,
+               .devs_increment = 2,
+               .ncopies        = 2,
+       },
+       [BTRFS_RAID_RAID1] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 1,
+               .devs_max       = 2,
+               .devs_min       = 2,
+               .devs_increment = 2,
+               .ncopies        = 2,
+       },
+       [BTRFS_RAID_DUP] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 2,
+               .devs_max       = 1,
+               .devs_min       = 1,
+               .devs_increment = 1,
+               .ncopies        = 2,
+       },
+       [BTRFS_RAID_RAID0] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 1,
+               .devs_max       = 0,
+               .devs_min       = 2,
+               .devs_increment = 1,
+               .ncopies        = 1,
+       },
+       [BTRFS_RAID_SINGLE] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 1,
+               .devs_max       = 1,
+               .devs_min       = 1,
+               .devs_increment = 1,
+               .ncopies        = 1,
+       },
+       [BTRFS_RAID_RAID5] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 1,
+               .devs_max       = 0,
+               .devs_min       = 2,
+               .devs_increment = 1,
+               .ncopies        = 2,
+       },
+       [BTRFS_RAID_RAID6] = {
+               .sub_stripes    = 1,
+               .dev_stripes    = 1,
+               .devs_max       = 0,
+               .devs_min       = 3,
+               .devs_increment = 1,
+               .ncopies        = 3,
+       },
 };
 
+static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
+{
+       /* TODO allow them to set a preferred stripe size */
+       return 64 * 1024;
+}
+
+static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
+{
+       u64 features;
+
+       if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
+               return;
+
+       features = btrfs_super_incompat_flags(info->super_copy);
+       if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
+               return;
+
+       features |= BTRFS_FEATURE_INCOMPAT_RAID56;
+       btrfs_set_super_incompat_flags(info->super_copy, features);
+       printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
+}
+
 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                               struct btrfs_root *extent_root,
                               struct map_lookup **map_ret,
@@ -3526,6 +3687,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        struct btrfs_device_info *devices_info = NULL;
        u64 total_avail;
        int num_stripes;        /* total number of stripes to allocate */
+       int data_stripes;       /* number of stripes that count for
+                                  block group size */
        int sub_stripes;        /* sub_stripes info for map */
        int dev_stripes;        /* stripes per dev */
        int devs_max;           /* max devs to use */
@@ -3537,6 +3700,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        u64 max_chunk_size;
        u64 stripe_size;
        u64 num_bytes;
+       u64 raid_stripe_len = BTRFS_STRIPE_LEN;
        int ndevs;
        int i;
        int j;
@@ -3631,12 +3795,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
                        continue;
 
+               if (ndevs == fs_devices->rw_devices) {
+                       WARN(1, "%s: found more than %llu devices\n",
+                            __func__, fs_devices->rw_devices);
+                       break;
+               }
                devices_info[ndevs].dev_offset = dev_offset;
                devices_info[ndevs].max_avail = max_avail;
                devices_info[ndevs].total_avail = total_avail;
                devices_info[ndevs].dev = device;
                ++ndevs;
-               WARN_ON(ndevs > fs_devices->rw_devices);
        }
 
        /*
@@ -3662,16 +3830,48 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        stripe_size = devices_info[ndevs-1].max_avail;
        num_stripes = ndevs * dev_stripes;
 
-       if (stripe_size * ndevs > max_chunk_size * ncopies) {
-               stripe_size = max_chunk_size * ncopies;
-               do_div(stripe_size, ndevs);
+       /*
+        * this will have to be fixed for RAID1 and RAID10 over
+        * more drives
+        */
+       data_stripes = num_stripes / ncopies;
+
+       if (type & BTRFS_BLOCK_GROUP_RAID5) {
+               raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
+                                btrfs_super_stripesize(info->super_copy));
+               data_stripes = num_stripes - 1;
+       }
+       if (type & BTRFS_BLOCK_GROUP_RAID6) {
+               raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
+                                btrfs_super_stripesize(info->super_copy));
+               data_stripes = num_stripes - 2;
+       }
+
+       /*
+        * Use the number of data stripes to figure out how big this chunk
+        * is really going to be in terms of logical address space,
+        * and compare that answer with the max chunk size
+        */
+       if (stripe_size * data_stripes > max_chunk_size) {
+               u64 mask = (1ULL << 24) - 1;
+               stripe_size = max_chunk_size;
+               do_div(stripe_size, data_stripes);
+
+               /* bump the answer up to a 16MB boundary */
+               stripe_size = (stripe_size + mask) & ~mask;
+
+               /* but don't go higher than the limits we found
+                * while searching for free extents
+                */
+               if (stripe_size > devices_info[ndevs-1].max_avail)
+                       stripe_size = devices_info[ndevs-1].max_avail;
        }
 
        do_div(stripe_size, dev_stripes);
 
        /* align to BTRFS_STRIPE_LEN */
-       do_div(stripe_size, BTRFS_STRIPE_LEN);
-       stripe_size *= BTRFS_STRIPE_LEN;
+       do_div(stripe_size, raid_stripe_len);
+       stripe_size *= raid_stripe_len;
 
        map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
        if (!map) {
@@ -3689,14 +3889,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                }
        }
        map->sector_size = extent_root->sectorsize;
-       map->stripe_len = BTRFS_STRIPE_LEN;
-       map->io_align = BTRFS_STRIPE_LEN;
-       map->io_width = BTRFS_STRIPE_LEN;
+       map->stripe_len = raid_stripe_len;
+       map->io_align = raid_stripe_len;
+       map->io_width = raid_stripe_len;
        map->type = type;
        map->sub_stripes = sub_stripes;
 
        *map_ret = map;
-       num_bytes = stripe_size * (num_stripes / ncopies);
+       num_bytes = stripe_size * data_stripes;
 
        *stripe_size_out = stripe_size;
        *num_bytes_out = num_bytes;
@@ -3718,15 +3918,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        write_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
        write_unlock(&em_tree->lock);
-       free_extent_map(em);
-       if (ret)
-               goto error;
-
-       ret = btrfs_make_block_group(trans, extent_root, 0, type,
-                                    BTRFS_FIRST_CHUNK_TREE_OBJECTID,
-                                    start, num_bytes);
-       if (ret)
+       if (ret) {
+               free_extent_map(em);
                goto error;
+       }
 
        for (i = 0; i < map->num_stripes; ++i) {
                struct btrfs_device *device;
@@ -3739,15 +3934,44 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                                info->chunk_root->root_key.objectid,
                                BTRFS_FIRST_CHUNK_TREE_OBJECTID,
                                start, dev_offset, stripe_size);
-               if (ret) {
-                       btrfs_abort_transaction(trans, extent_root, ret);
-                       goto error;
-               }
+               if (ret)
+                       goto error_dev_extent;
+       }
+
+       ret = btrfs_make_block_group(trans, extent_root, 0, type,
+                                    BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+                                    start, num_bytes);
+       if (ret) {
+               i = map->num_stripes - 1;
+               goto error_dev_extent;
        }
 
+       free_extent_map(em);
+       check_raid56_incompat_flag(extent_root->fs_info, type);
+
        kfree(devices_info);
        return 0;
 
+error_dev_extent:
+       for (; i >= 0; i--) {
+               struct btrfs_device *device;
+               int err;
+
+               device = map->stripes[i].dev;
+               err = btrfs_free_dev_extent(trans, device, start);
+               if (err) {
+                       btrfs_abort_transaction(trans, extent_root, err);
+                       break;
+               }
+       }
+       write_lock(&em_tree->lock);
+       remove_extent_mapping(em_tree, em);
+       write_unlock(&em_tree->lock);
+
+       /* One for our allocation */
+       free_extent_map(em);
+       /* One for the tree reference */
+       free_extent_map(em);
 error:
        kfree(map);
        kfree(devices_info);
@@ -3887,10 +4111,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
-       alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
-                               fs_info->avail_metadata_alloc_bits;
-       alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+       alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
        ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
                                  &stripe_size, chunk_offset, alloc_profile);
        if (ret)
@@ -3898,10 +4119,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
 
        sys_chunk_offset = chunk_offset + chunk_size;
 
-       alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
-                               fs_info->avail_system_alloc_bits;
-       alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+       alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
        ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
                                  &sys_chunk_size, &sys_stripe_size,
                                  sys_chunk_offset, alloc_profile);
@@ -4014,6 +4232,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
                ret = map->num_stripes;
        else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
                ret = map->sub_stripes;
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+               ret = 2;
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+               ret = 3;
        else
                ret = 1;
        free_extent_map(em);
@@ -4026,6 +4248,52 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
        return ret;
 }
 
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+                                   struct btrfs_mapping_tree *map_tree,
+                                   u64 logical)
+{
+       struct extent_map *em;
+       struct map_lookup *map;
+       struct extent_map_tree *em_tree = &map_tree->map_tree;
+       unsigned long len = root->sectorsize;
+
+       read_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, logical, len);
+       read_unlock(&em_tree->lock);
+       BUG_ON(!em);
+
+       BUG_ON(em->start > logical || em->start + em->len < logical);
+       map = (struct map_lookup *)em->bdev;
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                        BTRFS_BLOCK_GROUP_RAID6)) {
+               len = map->stripe_len * nr_data_stripes(map);
+       }
+       free_extent_map(em);
+       return len;
+}
+
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+                          u64 logical, u64 len, int mirror_num)
+{
+       struct extent_map *em;
+       struct map_lookup *map;
+       struct extent_map_tree *em_tree = &map_tree->map_tree;
+       int ret = 0;
+
+       read_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, logical, len);
+       read_unlock(&em_tree->lock);
+       BUG_ON(!em);
+
+       BUG_ON(em->start > logical || em->start + em->len < logical);
+       map = (struct map_lookup *)em->bdev;
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                        BTRFS_BLOCK_GROUP_RAID6))
+               ret = 1;
+       free_extent_map(em);
+       return ret;
+}
+
 static int find_live_mirror(struct btrfs_fs_info *fs_info,
                            struct map_lookup *map, int first, int num,
                            int optimal, int dev_replace_is_ongoing)
@@ -4063,10 +4331,39 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
        return optimal;
 }
 
+static inline int parity_smaller(u64 a, u64 b)
+{
+       return a > b;
+}
+
+/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
+static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
+{
+       struct btrfs_bio_stripe s;
+       int i;
+       u64 l;
+       int again = 1;
+
+       while (again) {
+               again = 0;
+               for (i = 0; i < bbio->num_stripes - 1; i++) {
+                       if (parity_smaller(raid_map[i], raid_map[i+1])) {
+                               s = bbio->stripes[i];
+                               l = raid_map[i];
+                               bbio->stripes[i] = bbio->stripes[i+1];
+                               raid_map[i] = raid_map[i+1];
+                               bbio->stripes[i+1] = s;
+                               raid_map[i+1] = l;
+                               again = 1;
+                       }
+               }
+       }
+}
+
 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                             u64 logical, u64 *length,
                             struct btrfs_bio **bbio_ret,
-                            int mirror_num)
+                            int mirror_num, u64 **raid_map_ret)
 {
        struct extent_map *em;
        struct map_lookup *map;
@@ -4078,6 +4375,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        u64 stripe_nr;
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
+       u64 stripe_len;
+       u64 *raid_map = NULL;
        int stripe_index;
        int i;
        int ret = 0;
@@ -4089,6 +4388,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        int num_alloc_stripes;
        int patch_the_first_stripe_for_dev_replace = 0;
        u64 physical_to_patch_in_first_stripe = 0;
+       u64 raid56_full_stripe_start = (u64)-1;
 
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
@@ -4105,29 +4405,63 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        map = (struct map_lookup *)em->bdev;
        offset = logical - em->start;
 
+       if (mirror_num > map->num_stripes)
+               mirror_num = 0;
+
+       stripe_len = map->stripe_len;
        stripe_nr = offset;
        /*
         * stripe_nr counts the total number of stripes we have to stride
         * to get to this block
         */
-       do_div(stripe_nr, map->stripe_len);
+       do_div(stripe_nr, stripe_len);
 
-       stripe_offset = stripe_nr * map->stripe_len;
+       stripe_offset = stripe_nr * stripe_len;
        BUG_ON(offset < stripe_offset);
 
        /* stripe_offset is the offset of this block in its stripe*/
        stripe_offset = offset - stripe_offset;
 
-       if (rw & REQ_DISCARD)
+       /* if we're here for raid56, we need to know the stripe aligned start */
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+               unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
+               raid56_full_stripe_start = offset;
+
+               /* allow a write of a full stripe, but make sure we don't
+                * allow straddling of stripes
+                */
+               do_div(raid56_full_stripe_start, full_stripe_len);
+               raid56_full_stripe_start *= full_stripe_len;
+       }
+
+       if (rw & REQ_DISCARD) {
+               /* we don't discard raid56 yet */
+               if (map->type &
+                   (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+                       ret = -EOPNOTSUPP;
+                       goto out;
+               }
                *length = min_t(u64, em->len - offset, *length);
-       else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
-               /* we limit the length of each bio to what fits in a stripe */
-               *length = min_t(u64, em->len - offset,
-                               map->stripe_len - stripe_offset);
+       } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+               u64 max_len;
+               /* For writes to RAID[56], allow a full stripeset across all disks.
+                  For other RAID types and for RAID[56] reads, just allow a single
+                  stripe (on a single disk). */
+               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
+                   (rw & REQ_WRITE)) {
+                       max_len = stripe_len * nr_data_stripes(map) -
+                               (offset - raid56_full_stripe_start);
+               } else {
+                       /* we limit the length of each bio to what fits in a stripe */
+                       max_len = stripe_len - stripe_offset;
+               }
+               *length = min_t(u64, em->len - offset, max_len);
        } else {
                *length = em->len - offset;
        }
 
+       /* This is for when we're called from btrfs_merge_bio_hook() and all
+          it cares about is the length */
        if (!bbio_ret)
                goto out;
 
@@ -4160,7 +4494,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                u64 physical_of_found = 0;
 
                ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
-                            logical, &tmp_length, &tmp_bbio, 0);
+                            logical, &tmp_length, &tmp_bbio, 0, NULL);
                if (ret) {
                        WARN_ON(tmp_bbio != NULL);
                        goto out;
@@ -4221,11 +4555,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        num_stripes = 1;
        stripe_index = 0;
        stripe_nr_orig = stripe_nr;
-       stripe_nr_end = (offset + *length + map->stripe_len - 1) &
-                       (~(map->stripe_len - 1));
+       stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
        do_div(stripe_nr_end, map->stripe_len);
        stripe_end_offset = stripe_nr_end * map->stripe_len -
                            (offset + *length);
+
        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
                if (rw & REQ_DISCARD)
                        num_stripes = min_t(u64, map->num_stripes,
@@ -4276,6 +4610,65 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                                              dev_replace_is_ongoing);
                        mirror_num = stripe_index - old_stripe_index + 1;
                }
+
+       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6)) {
+               u64 tmp;
+
+               if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
+                   && raid_map_ret) {
+                       int i, rot;
+
+                       /* push stripe_nr back to the start of the full stripe */
+                       stripe_nr = raid56_full_stripe_start;
+                       do_div(stripe_nr, stripe_len);
+
+                       stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+
+                       /* RAID[56] write or recovery. Return all stripes */
+                       num_stripes = map->num_stripes;
+                       max_errors = nr_parity_stripes(map);
+
+                       raid_map = kmalloc(sizeof(u64) * num_stripes,
+                                          GFP_NOFS);
+                       if (!raid_map) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+
+                       /* Work out the disk rotation on this stripe-set */
+                       tmp = stripe_nr;
+                       rot = do_div(tmp, num_stripes);
+
+                       /* Fill in the logical address of each stripe */
+                       tmp = stripe_nr * nr_data_stripes(map);
+                       for (i = 0; i < nr_data_stripes(map); i++)
+                               raid_map[(i+rot) % num_stripes] =
+                                       em->start + (tmp + i) * map->stripe_len;
+
+                       raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
+                       if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+                               raid_map[(i+rot+1) % num_stripes] =
+                                       RAID6_Q_STRIPE;
+
+                       *length = map->stripe_len;
+                       stripe_index = 0;
+                       stripe_offset = 0;
+               } else {
+                       /*
+                        * Mirror #0 or #1 means the original data block.
+                        * Mirror #2 is RAID5 parity block.
+                        * Mirror #3 is RAID6 Q block.
+                        */
+                       stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+                       if (mirror_num > 1)
+                               stripe_index = nr_data_stripes(map) +
+                                               mirror_num - 2;
+
+                       /* We distribute the parity blocks across stripes */
+                       tmp = stripe_nr + stripe_index;
+                       stripe_index = do_div(tmp, map->num_stripes);
+               }
        } else {
                /*
                 * after this do_div call, stripe_nr is the number of stripes
@@ -4384,8 +4777,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
                if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
                                 BTRFS_BLOCK_GROUP_RAID10 |
+                                BTRFS_BLOCK_GROUP_RAID5 |
                                 BTRFS_BLOCK_GROUP_DUP)) {
                        max_errors = 1;
+               } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+                       max_errors = 2;
                }
        }
 
@@ -4486,6 +4882,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
                bbio->mirror_num = map->num_stripes + 1;
        }
+       if (raid_map) {
+               sort_parity_stripes(bbio, raid_map);
+               *raid_map_ret = raid_map;
+       }
 out:
        if (dev_replace_is_ongoing)
                btrfs_dev_replace_unlock(dev_replace);
@@ -4498,7 +4898,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                      struct btrfs_bio **bbio_ret, int mirror_num)
 {
        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
-                                mirror_num);
+                                mirror_num, NULL);
 }
 
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -4512,6 +4912,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        u64 bytenr;
        u64 length;
        u64 stripe_nr;
+       u64 rmap_len;
        int i, j, nr = 0;
 
        read_lock(&em_tree->lock);
@@ -4522,10 +4923,17 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        map = (struct map_lookup *)em->bdev;
 
        length = em->len;
+       rmap_len = map->stripe_len;
+
        if (map->type & BTRFS_BLOCK_GROUP_RAID10)
                do_div(length, map->num_stripes / map->sub_stripes);
        else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
                do_div(length, map->num_stripes);
+       else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                             BTRFS_BLOCK_GROUP_RAID6)) {
+               do_div(length, nr_data_stripes(map));
+               rmap_len = map->stripe_len * nr_data_stripes(map);
+       }
 
        buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
        BUG_ON(!buf); /* -ENOMEM */
@@ -4545,8 +4953,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                        do_div(stripe_nr, map->sub_stripes);
                } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
                        stripe_nr = stripe_nr * map->num_stripes + i;
-               }
-               bytenr = chunk_start + stripe_nr * map->stripe_len;
+               } /* else if RAID[56], multiply by nr_data_stripes().
+                  * Alternatively, just use rmap_len below instead of
+                  * map->stripe_len */
+
+               bytenr = chunk_start + stripe_nr * rmap_len;
                WARN_ON(nr >= map->num_stripes);
                for (j = 0; j < nr; j++) {
                        if (buf[j] == bytenr)
@@ -4560,7 +4971,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
 
        *logical = buf;
        *naddrs = nr;
-       *stripe_len = map->stripe_len;
+       *stripe_len = rmap_len;
 
        free_extent_map(em);
        return 0;
@@ -4634,7 +5045,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                bio->bi_bdev = (struct block_device *)
                                        (unsigned long)bbio->mirror_num;
                /* only send an error to the higher layers if it is
-                * beyond the tolerance of the multi-bio
+                * beyond the tolerance of the btrfs bio
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
                        err = -EIO;
@@ -4668,13 +5079,18 @@ struct async_sched {
  * This will add one bio to the pending list for a device and make sure
  * the work struct is scheduled.
  */
-static noinline void schedule_bio(struct btrfs_root *root,
+noinline void btrfs_schedule_bio(struct btrfs_root *root,
                                 struct btrfs_device *device,
                                 int rw, struct bio *bio)
 {
        int should_queue = 1;
        struct btrfs_pending_bios *pending_bios;
 
+       if (device->missing || !device->bdev) {
+               bio_endio(bio, -EIO);
+               return;
+       }
+
        /* don't bother with additional async steps for reads, right now */
        if (!(rw & REQ_WRITE)) {
                bio_get(bio);
@@ -4772,7 +5188,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
 #endif
        bio->bi_bdev = dev->bdev;
        if (async)
-               schedule_bio(root, dev, rw, bio);
+               btrfs_schedule_bio(root, dev, rw, bio);
        else
                btrfsic_submit_bio(rw, bio);
 }
@@ -4831,6 +5247,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        u64 logical = (u64)bio->bi_sector << 9;
        u64 length = 0;
        u64 map_length;
+       u64 *raid_map = NULL;
        int ret;
        int dev_nr = 0;
        int total_devs = 1;
@@ -4839,12 +5256,30 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        length = bio->bi_size;
        map_length = length;
 
-       ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
-                             mirror_num);
-       if (ret)
+       ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
+                             mirror_num, &raid_map);
+       if (ret) /* -ENOMEM */
                return ret;
 
        total_devs = bbio->num_stripes;
+       bbio->orig_bio = first_bio;
+       bbio->private = first_bio->bi_private;
+       bbio->end_io = first_bio->bi_end_io;
+       atomic_set(&bbio->stripes_pending, bbio->num_stripes);
+
+       if (raid_map) {
+               /* In this case, map_length has been set to the length of
+                  a single stripe; not the whole write */
+               if (rw & WRITE) {
+                       return raid56_parity_write(root, bio, bbio,
+                                                  raid_map, map_length);
+               } else {
+                       return raid56_parity_recover(root, bio, bbio,
+                                                    raid_map, map_length,
+                                                    mirror_num);
+               }
+       }
+
        if (map_length < length) {
                printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
                       "len %llu\n", (unsigned long long)logical,
@@ -4853,11 +5288,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                BUG();
        }
 
-       bbio->orig_bio = first_bio;
-       bbio->private = first_bio->bi_private;
-       bbio->end_io = first_bio->bi_end_io;
-       atomic_set(&bbio->stripes_pending, bbio->num_stripes);
-
        while (dev_nr < total_devs) {
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
index d3c3939..062d860 100644 (file)
@@ -21,8 +21,8 @@
 
 #include <linux/bio.h>
 #include <linux/sort.h>
+#include <linux/btrfs.h>
 #include "async-thread.h"
-#include "ioctl.h"
 
 #define BTRFS_STRIPE_LEN       (64 * 1024)
 
@@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
                                              struct btrfs_device *tgtdev);
 int btrfs_scratch_superblock(struct btrfs_device *device);
-
+void btrfs_schedule_bio(struct btrfs_root *root,
+                       struct btrfs_device *device,
+                       int rw, struct bio *bio);
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+                          u64 logical, u64 len, int mirror_num);
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+                                   struct btrfs_mapping_tree *map_tree,
+                                   u64 logical);
 static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
                                      int index)
 {
index 62169c1..b4dcb34 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/bitops.h>
 #include <linux/mpage.h>
 #include <linux/bit_spinlock.h>
+#include <trace/events/block.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 
@@ -53,6 +54,13 @@ void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 }
 EXPORT_SYMBOL(init_buffer);
 
+inline void touch_buffer(struct buffer_head *bh)
+{
+       trace_block_touch_buffer(bh);
+       mark_page_accessed(bh->b_page);
+}
+EXPORT_SYMBOL(touch_buffer);
+
 static int sleep_on_buffer(void *word)
 {
        io_schedule();
@@ -1113,6 +1121,8 @@ void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
 
+       trace_block_dirty_buffer(bh);
+
        /*
         * Very *carefully* optimize the it-is-already-dirty case.
         *
@@ -2332,7 +2342,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                         get_block_t get_block)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        unsigned long end;
        loff_t size;
        int ret;
@@ -2371,7 +2381,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                   get_block_t get_block)
 {
        int ret;
-       struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+       struct super_block *sb = file_inode(vma->vm_file)->i_sb;
 
        sb_start_pagefault(sb);
 
index 064d1a6..a60ea97 100644 (file)
@@ -195,7 +195,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
  */
 static int readpage_nounlock(struct file *filp, struct page *page)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc = 
                &ceph_inode_to_client(inode)->client->osdc;
@@ -236,16 +236,10 @@ static int ceph_readpage(struct file *filp, struct page *page)
 static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
 {
        struct inode *inode = req->r_inode;
-       struct ceph_osd_reply_head *replyhead;
-       int rc, bytes;
+       int rc = req->r_result;
+       int bytes = le32_to_cpu(msg->hdr.data_len);
        int i;
 
-       /* parse reply */
-       replyhead = msg->front.iov_base;
-       WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
-       rc = le32_to_cpu(replyhead->result);
-       bytes = le32_to_cpu(msg->hdr.data_len);
-
        dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
 
        /* unlock all pages, zeroing any data we didn't read */
@@ -315,7 +309,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
                                    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
                                    NULL, 0,
                                    ci->i_truncate_seq, ci->i_truncate_size,
-                                   NULL, false, 1, 0);
+                                   NULL, false, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -370,7 +364,7 @@ out:
 static int ceph_readpages(struct file *file, struct address_space *mapping,
                          struct list_head *page_list, unsigned nr_pages)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        int rc = 0;
        int max = 0;
@@ -492,8 +486,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                                   &ci->i_layout, snapc,
                                   page_off, len,
                                   ci->i_truncate_seq, ci->i_truncate_size,
-                                  &inode->i_mtime,
-                                  &page, 1, 0, 0, true);
+                                  &inode->i_mtime, &page, 1);
        if (err < 0) {
                dout("writepage setting page/mapping error %d %p\n", err, page);
                SetPageError(page);
@@ -554,27 +547,18 @@ static void writepages_finish(struct ceph_osd_request *req,
                              struct ceph_msg *msg)
 {
        struct inode *inode = req->r_inode;
-       struct ceph_osd_reply_head *replyhead;
-       struct ceph_osd_op *op;
        struct ceph_inode_info *ci = ceph_inode(inode);
        unsigned wrote;
        struct page *page;
        int i;
        struct ceph_snap_context *snapc = req->r_snapc;
        struct address_space *mapping = inode->i_mapping;
-       __s32 rc = -EIO;
-       u64 bytes = 0;
+       int rc = req->r_result;
+       u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        long writeback_stat;
        unsigned issued = ceph_caps_issued(ci);
 
-       /* parse reply */
-       replyhead = msg->front.iov_base;
-       WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
-       op = (void *)(replyhead + 1);
-       rc = le32_to_cpu(replyhead->result);
-       bytes = le64_to_cpu(op->extent.length);
-
        if (rc >= 0) {
                /*
                 * Assume we wrote the pages we originally sent.  The
@@ -741,8 +725,6 @@ retry:
                struct page *page;
                int want;
                u64 offset, len;
-               struct ceph_osd_request_head *reqhead;
-               struct ceph_osd_op *op;
                long writeback_stat;
 
                next = 0;
@@ -838,7 +820,7 @@ get_more_pages:
                                            snapc, do_sync,
                                            ci->i_truncate_seq,
                                            ci->i_truncate_size,
-                                           &inode->i_mtime, true, 1, 0);
+                                           &inode->i_mtime, true, 0);
 
                                if (IS_ERR(req)) {
                                        rc = PTR_ERR(req);
@@ -906,10 +888,8 @@ get_more_pages:
 
                /* revise final length, page count */
                req->r_num_pages = locked_pages;
-               reqhead = req->r_request->front.iov_base;
-               op = (void *)(reqhead + 1);
-               op->extent.length = cpu_to_le64(len);
-               op->payload_len = cpu_to_le32(len);
+               req->r_request_ops[0].extent.length = cpu_to_le64(len);
+               req->r_request_ops[0].payload_len = cpu_to_le32(len);
                req->r_request->hdr.data_len = cpu_to_le32(len);
 
                rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
@@ -977,7 +957,7 @@ static int ceph_update_writeable_page(struct file *file,
                            loff_t pos, unsigned len,
                            struct page *page)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        loff_t page_off = pos & PAGE_CACHE_MASK;
@@ -1086,7 +1066,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_file_info *fi = file->private_data;
        struct page *page;
@@ -1144,7 +1124,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1228,7 +1208,7 @@ const struct address_space_operations ceph_aops = {
  */
 static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct inode *inode = vma->vm_file->f_dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct page *page = vmf->page;
        struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
        loff_t off = page_offset(page);
index ae2be69..78e2f57 100644 (file)
@@ -611,8 +611,16 @@ retry:
 
        if (flags & CEPH_CAP_FLAG_AUTH)
                ci->i_auth_cap = cap;
-       else if (ci->i_auth_cap == cap)
+       else if (ci->i_auth_cap == cap) {
                ci->i_auth_cap = NULL;
+               spin_lock(&mdsc->cap_dirty_lock);
+               if (!list_empty(&ci->i_dirty_item)) {
+                       dout(" moving %p to cap_dirty_migrating\n", inode);
+                       list_move(&ci->i_dirty_item,
+                                 &mdsc->cap_dirty_migrating);
+               }
+               spin_unlock(&mdsc->cap_dirty_lock);
+       }
 
        dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
@@ -1460,7 +1468,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap *cap;
-       int file_wanted, used;
+       int file_wanted, used, cap_used;
        int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
        int issued, implemented, want, retain, revoking, flushing = 0;
        int mds = -1;   /* keep track of how far we've gone through i_caps list
@@ -1563,9 +1571,14 @@ retry_locked:
 
                /* NOTE: no side-effects allowed, until we take s_mutex */
 
+               cap_used = used;
+               if (ci->i_auth_cap && cap != ci->i_auth_cap)
+                       cap_used &= ~ci->i_auth_cap->issued;
+
                revoking = cap->implemented & ~cap->issued;
-               dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
+               dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
                     cap->mds, cap, ceph_cap_string(cap->issued),
+                    ceph_cap_string(cap_used),
                     ceph_cap_string(cap->implemented),
                     ceph_cap_string(revoking));
 
@@ -1593,7 +1606,7 @@ retry_locked:
                }
 
                /* completed revocation? going down and there are no caps? */
-               if (revoking && (revoking & used) == 0) {
+               if (revoking && (revoking & cap_used) == 0) {
                        dout("completed revocation of %s\n",
                             ceph_cap_string(cap->implemented & ~cap->issued));
                        goto ack;
@@ -1670,8 +1683,8 @@ ack:
                sent++;
 
                /* __send_cap drops i_ceph_lock */
-               delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
-                                     retain, flushing, NULL);
+               delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
+                                     want, retain, flushing, NULL);
                goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
@@ -2417,7 +2430,9 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                dout("mds wanted %s -> %s\n",
                     ceph_cap_string(le32_to_cpu(grant->wanted)),
                     ceph_cap_string(wanted));
-               grant->wanted = cpu_to_le32(wanted);
+               /* imported cap may not have correct mds_wanted */
+               if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
+                       check_caps = 1;
        }
 
        cap->seq = seq;
@@ -2821,6 +2836,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
             (unsigned)seq);
 
+       if (op == CEPH_CAP_OP_IMPORT)
+               ceph_add_cap_releases(mdsc, session);
+
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
        ci = ceph_inode(inode);
index 8c1aabe..6d797f4 100644 (file)
@@ -238,7 +238,7 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name,
 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        struct ceph_file_info *fi = filp->private_data;
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1138,7 +1138,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
                             loff_t *ppos)
 {
        struct ceph_file_info *cf = file->private_data;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        int left;
        const int bufsize = 1024;
@@ -1188,7 +1188,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
                          int datasync)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct list_head *head = &ci->i_unsafe_dirops;
        struct ceph_mds_request *req;
index ca3ab3f..16796be 100644 (file)
@@ -81,7 +81,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
                if (parent_inode) {
                        /* nfsd wants connectable */
                        *max_len = connected_handle_length;
-                       type = 255;
+                       type = FILEID_INVALID;
                } else {
                        dout("encode_fh %p\n", dentry);
                        fh->ino = ceph_ino(inode);
@@ -90,7 +90,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
                }
        } else {
                *max_len = handle_length;
-               type = 255;
+               type = FILEID_INVALID;
        }
        if (dentry)
                dput(dentry);
index e51558f..bf338d9 100644 (file)
@@ -243,6 +243,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        err = ceph_mdsc_do_request(mdsc,
                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
                                   req);
+       if (err)
+               goto out_err;
+
        err = ceph_handle_snapdir(req, dentry, err);
        if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
@@ -263,6 +266,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                err = finish_no_open(file, dn);
        } else {
                dout("atomic_open finish_open on dn %p\n", dn);
+               if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
+                       *opened |= FILE_CREATED;
+               }
                err = finish_open(file, dentry, ceph_open, opened);
        }
 
@@ -393,7 +399,7 @@ more:
 static ssize_t ceph_sync_read(struct file *file, char __user *data,
                              unsigned len, loff_t *poff, int *checkeof)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct page **pages;
        u64 off = *poff;
        int num_pages, ret;
@@ -466,7 +472,7 @@ static void sync_write_commit(struct ceph_osd_request *req,
 static ssize_t ceph_sync_write(struct file *file, const char __user *data,
                               size_t left, loff_t *offset)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_osd_request *req;
@@ -483,7 +489,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
        int ret;
        struct timespec mtime = CURRENT_TIME;
 
-       if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
+       if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
        dout("sync_write on file %p %lld~%u %s\n", file, *offset,
@@ -535,7 +541,7 @@ more:
                                    ci->i_snap_realm->cached_context,
                                    do_sync,
                                    ci->i_truncate_seq, ci->i_truncate_size,
-                                   &mtime, false, 2, page_align);
+                                   &mtime, false, page_align);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -637,7 +643,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
        struct ceph_file_info *fi = filp->private_data;
        loff_t *ppos = &iocb->ki_pos;
        size_t len = iov->iov_len;
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        void __user *base = iov->iov_base;
        ssize_t ret;
@@ -707,7 +713,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        struct ceph_file_info *fi = file->private_data;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
                &ceph_sb_to_client(inode->i_sb)->client->osdc;
index d45895f..851814d 100644 (file)
@@ -1131,8 +1131,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                                            req->r_request_started);
                dout(" final dn %p\n", dn);
                i++;
-       } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
-                  req->r_op == CEPH_MDS_OP_MKSNAP) {
+       } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+                  req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
                struct dentry *dn = req->r_dentry;
 
                /* fill out a snapdir LOOKUPSNAP dentry */
@@ -1196,6 +1196,39 @@ done:
 /*
  * Prepopulate our cache with readdir results, leases, etc.
  */
+static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
+                                          struct ceph_mds_session *session)
+{
+       struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+       int i, err = 0;
+
+       for (i = 0; i < rinfo->dir_nr; i++) {
+               struct ceph_vino vino;
+               struct inode *in;
+               int rc;
+
+               vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
+               vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+
+               in = ceph_get_inode(req->r_dentry->d_sb, vino);
+               if (IS_ERR(in)) {
+                       err = PTR_ERR(in);
+                       dout("new_inode badness got %d\n", err);
+                       continue;
+               }
+               rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
+                               req->r_request_started, -1,
+                               &req->r_caps_reservation);
+               if (rc < 0) {
+                       pr_err("fill_inode badness on %p got %d\n", in, rc);
+                       err = rc;
+                       continue;
+               }
+       }
+
+       return err;
+}
+
 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
                             struct ceph_mds_session *session)
 {
@@ -1210,6 +1243,9 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        u64 frag = le32_to_cpu(rhead->args.readdir.frag);
        struct ceph_dentry_info *di;
 
+       if (req->r_aborted)
+               return readdir_prepopulate_inodes_only(req, session);
+
        if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
                snapdir = ceph_get_snapdir(parent->d_inode);
                parent = d_find_alias(snapdir);
index 36549a4..4a98934 100644 (file)
  */
 static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
 {
-       struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+       struct ceph_inode_info *ci = ceph_inode(file_inode(file));
        struct ceph_ioctl_layout l;
        int err;
 
-       err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+       err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
        if (!err) {
                l.stripe_unit = ceph_file_layout_su(ci->i_layout);
                l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
@@ -63,12 +63,12 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
 
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct inode *parent_inode;
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
-       struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+       struct ceph_inode_info *ci = ceph_inode(file_inode(file));
        struct ceph_ioctl_layout nl;
        int err;
 
@@ -76,7 +76,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
                return -EFAULT;
 
        /* validate changed params against current layout */
-       err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+       err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT);
        if (err)
                return err;
 
@@ -136,7 +136,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
  */
 static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
        int err;
@@ -179,13 +179,12 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
 static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
 {
        struct ceph_ioctl_dataloc dl;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
                &ceph_sb_to_client(inode->i_sb)->client->osdc;
        u64 len = 1, olen;
        u64 tmp;
-       struct ceph_object_layout ol;
        struct ceph_pg pgid;
        int r;
 
@@ -194,7 +193,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
                return -EFAULT;
 
        down_read(&osdc->map_sem);
-       r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
+       r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
                                          &dl.object_no, &dl.object_offset,
                                          &olen);
        if (r < 0)
@@ -209,10 +208,9 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
 
        snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
                 ceph_ino(inode), dl.object_no);
-       ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout,
+       ceph_calc_object_layout(&pgid, dl.object_name, &ci->i_layout,
                                osdc->osdmap);
 
-       pgid = ol.ol_pgid;
        dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
        if (dl.osd >= 0) {
                struct ceph_entity_addr *a =
@@ -234,7 +232,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
 static long ceph_ioctl_lazyio(struct file *file)
 {
        struct ceph_file_info *fi = file->private_data;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
index 80576d0..202dd3d 100644 (file)
@@ -13,7 +13,7 @@
 static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
                             int cmd, u8 wait, struct file_lock *fl)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ceph_mds_client *mdsc =
                ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
index 7a3dfe0..442880d 100644 (file)
@@ -232,6 +232,30 @@ bad:
        return -EIO;
 }
 
+/*
+ * parse create results
+ */
+static int parse_reply_info_create(void **p, void *end,
+                                 struct ceph_mds_reply_info_parsed *info,
+                                 int features)
+{
+       if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
+               if (*p == end) {
+                       info->has_create_ino = false;
+               } else {
+                       info->has_create_ino = true;
+                       info->ino = ceph_decode_64(p);
+               }
+       }
+
+       if (unlikely(*p != end))
+               goto bad;
+       return 0;
+
+bad:
+       return -EIO;
+}
+
 /*
  * parse extra results
  */
@@ -241,8 +265,12 @@ static int parse_reply_info_extra(void **p, void *end,
 {
        if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
                return parse_reply_info_filelock(p, end, info, features);
-       else
+       else if (info->head->op == CEPH_MDS_OP_READDIR)
                return parse_reply_info_dir(p, end, info, features);
+       else if (info->head->op == CEPH_MDS_OP_CREATE)
+               return parse_reply_info_create(p, end, info, features);
+       else
+               return -EIO;
 }
 
 /*
@@ -2170,7 +2198,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        mutex_lock(&req->r_fill_mutex);
        err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
        if (err == 0) {
-               if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK &&
+               if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
+                                   req->r_op == CEPH_MDS_OP_LSSNAP) &&
                    rinfo->dir_nr)
                        ceph_readdir_prepopulate(req, req->r_session);
                ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
index ff4188b..c2a19fb 100644 (file)
@@ -74,6 +74,12 @@ struct ceph_mds_reply_info_parsed {
                        struct ceph_mds_reply_info_in *dir_in;
                        u8                            dir_complete, dir_end;
                };
+
+               /* for create results */
+               struct {
+                       bool has_create_ino;
+                       u64 ino;
+               };
        };
 
        /* encoded blob describing snapshot contexts for certain
index 73b7d44..0d3c924 100644 (file)
@@ -59,6 +59,10 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
                return ERR_PTR(-ENOMEM);
 
        ceph_decode_16_safe(p, end, version, bad);
+       if (version > 3) {
+               pr_warning("got mdsmap version %d > 3, failing", version);
+               goto bad;
+       }
 
        ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
        m->m_epoch = ceph_decode_32(p);
@@ -144,13 +148,13 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
        /* pg_pools */
        ceph_decode_32_safe(p, end, n, bad);
        m->m_num_data_pg_pools = n;
-       m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
+       m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
        if (!m->m_data_pg_pools)
                goto badmem;
-       ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
+       ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
        for (i = 0; i < n; i++)
-               m->m_data_pg_pools[i] = ceph_decode_32(p);
-       m->m_cas_pg_pool = ceph_decode_32(p);
+               m->m_data_pg_pools[i] = ceph_decode_64(p);
+       m->m_cas_pg_pool = ceph_decode_64(p);
 
        /* ok, we don't care about the rest. */
        dout("mdsmap_decode success epoch %u\n", m->m_epoch);
index cd5097d..89fa4a9 100644 (file)
@@ -15,6 +15,7 @@ const char *ceph_mds_state_name(int s)
        case CEPH_MDS_STATE_BOOT:       return "up:boot";
        case CEPH_MDS_STATE_STANDBY:    return "up:standby";
        case CEPH_MDS_STATE_STANDBY_REPLAY:    return "up:standby-replay";
+       case CEPH_MDS_STATE_REPLAYONCE: return "up:oneshot-replay";
        case CEPH_MDS_STATE_CREATING:   return "up:creating";
        case CEPH_MDS_STATE_STARTING:   return "up:starting";
                /* up and in */
@@ -50,10 +51,13 @@ const char *ceph_mds_op_name(int op)
        case CEPH_MDS_OP_LOOKUP:  return "lookup";
        case CEPH_MDS_OP_LOOKUPHASH:  return "lookuphash";
        case CEPH_MDS_OP_LOOKUPPARENT:  return "lookupparent";
+       case CEPH_MDS_OP_LOOKUPINO:  return "lookupino";
        case CEPH_MDS_OP_GETATTR:  return "getattr";
        case CEPH_MDS_OP_SETXATTR: return "setxattr";
        case CEPH_MDS_OP_SETATTR: return "setattr";
        case CEPH_MDS_OP_RMXATTR: return "rmxattr";
+       case CEPH_MDS_OP_SETLAYOUT: return "setlayou";
+       case CEPH_MDS_OP_SETDIRLAYOUT: return "setdirlayout";
        case CEPH_MDS_OP_READDIR: return "readdir";
        case CEPH_MDS_OP_MKNOD: return "mknod";
        case CEPH_MDS_OP_LINK: return "link";
index e86aa99..9fe17c6 100644 (file)
@@ -71,8 +71,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
        /*
         * express utilization in terms of large blocks to avoid
         * overflow on 32-bit machines.
+        *
+        * NOTE: for the time being, we make bsize == frsize to humor
+        * not-yet-ancient versions of glibc that are broken.
+        * Someday, we will probably want to report a real block
+        * size...  whatever that may mean for a network file system!
         */
        buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
+       buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
        buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
        buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
        buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
@@ -80,7 +86,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_files = le64_to_cpu(st.num_objects);
        buf->f_ffree = -1;
        buf->f_namelen = NAME_MAX;
-       buf->f_frsize = PAGE_CACHE_SIZE;
 
        /* leave fsid little-endian, regardless of host endianness */
        fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
index f053bbd..c7b3097 100644 (file)
@@ -21,7 +21,7 @@
 
 /* large granularity for statfs utilization stats to facilitate
  * large volume sizes on 32-bit machines. */
-#define CEPH_BLOCK_SHIFT   20  /* 1 MB */
+#define CEPH_BLOCK_SHIFT   22  /* 4 MB */
 #define CEPH_BLOCK         (1 << CEPH_BLOCK_SHIFT)
 
 #define CEPH_MOUNT_OPT_DIRSTAT         (1<<4) /* `cat dirname` for stats */
@@ -798,13 +798,7 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
 /* file.c */
 extern const struct file_operations ceph_file_fops;
 extern const struct address_space_operations ceph_aops;
-extern int ceph_copy_to_page_vector(struct page **pages,
-                                   const char *data,
-                                   loff_t off, size_t len);
-extern int ceph_copy_from_page_vector(struct page **pages,
-                                   char *data,
-                                   loff_t off, size_t len);
-extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
+
 extern int ceph_open(struct inode *inode, struct file *file);
 extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                            struct file *file, unsigned flags, umode_t mode,
index 2c2ae5b..9b6b2b6 100644 (file)
@@ -29,9 +29,94 @@ struct ceph_vxattr {
        size_t name_size;       /* strlen(name) + 1 (for '\0') */
        size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
                              size_t size);
-       bool readonly;
+       bool readonly, hidden;
+       bool (*exists_cb)(struct ceph_inode_info *ci);
 };
 
+/* layouts */
+
+static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
+{
+       size_t s;
+       char *p = (char *)&ci->i_layout;
+
+       for (s = 0; s < sizeof(ci->i_layout); s++, p++)
+               if (*p)
+                       return true;
+       return false;
+}
+
+static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+                                       size_t size)
+{
+       int ret;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_osd_client *osdc = &fsc->client->osdc;
+       s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+       const char *pool_name;
+
+       dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
+       down_read(&osdc->map_sem);
+       pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
+       if (pool_name)
+               ret = snprintf(val, size,
+               "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%s",
+               (unsigned long long)ceph_file_layout_su(ci->i_layout),
+               (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
+               (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
+               pool_name);
+       else
+               ret = snprintf(val, size,
+               "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
+               (unsigned long long)ceph_file_layout_su(ci->i_layout),
+               (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
+               (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
+               (unsigned long long)pool);
+
+       up_read(&osdc->map_sem);
+       return ret;
+}
+
+static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
+                                              char *val, size_t size)
+{
+       return snprintf(val, size, "%lld",
+                       (unsigned long long)ceph_file_layout_su(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
+                                               char *val, size_t size)
+{
+       return snprintf(val, size, "%lld",
+              (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
+                                              char *val, size_t size)
+{
+       return snprintf(val, size, "%lld",
+              (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
+}
+
+static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
+                                       char *val, size_t size)
+{
+       int ret;
+       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+       struct ceph_osd_client *osdc = &fsc->client->osdc;
+       s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+       const char *pool_name;
+
+       down_read(&osdc->map_sem);
+       pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
+       if (pool_name)
+               ret = snprintf(val, size, "%s", pool_name);
+       else
+               ret = snprintf(val, size, "%lld", (unsigned long long)pool);
+       up_read(&osdc->map_sem);
+       return ret;
+}
+
 /* directories */
 
 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
@@ -83,17 +168,43 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
                        (long)ci->i_rctime.tv_nsec);
 }
 
-#define CEPH_XATTR_NAME(_type, _name)  XATTR_CEPH_PREFIX #_type "." #_name
 
-#define XATTR_NAME_CEPH(_type, _name) \
-               { \
-                       .name = CEPH_XATTR_NAME(_type, _name), \
-                       .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
-                       .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
-                       .readonly = true, \
-               }
+#define CEPH_XATTR_NAME(_type, _name)  XATTR_CEPH_PREFIX #_type "." #_name
+#define CEPH_XATTR_NAME2(_type, _name, _name2) \
+       XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
+
+#define XATTR_NAME_CEPH(_type, _name)                                  \
+       {                                                               \
+               .name = CEPH_XATTR_NAME(_type, _name),                  \
+               .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \
+               .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
+               .readonly = true,                               \
+               .hidden = false,                                \
+               .exists_cb = NULL,                      \
+       }
+#define XATTR_LAYOUT_FIELD(_type, _name, _field)                       \
+       {                                                               \
+               .name = CEPH_XATTR_NAME2(_type, _name, _field), \
+               .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \
+               .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \
+               .readonly = false,                              \
+               .hidden = true,                 \
+               .exists_cb = ceph_vxattrcb_layout_exists,       \
+       }
 
 static struct ceph_vxattr ceph_dir_vxattrs[] = {
+       {
+               .name = "ceph.dir.layout",
+               .name_size = sizeof("ceph.dir.layout"),
+               .getxattr_cb = ceph_vxattrcb_layout,
+               .readonly = false,
+               .hidden = false,
+               .exists_cb = ceph_vxattrcb_layout_exists,
+       },
+       XATTR_LAYOUT_FIELD(dir, layout, stripe_unit),
+       XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
+       XATTR_LAYOUT_FIELD(dir, layout, object_size),
+       XATTR_LAYOUT_FIELD(dir, layout, pool),
        XATTR_NAME_CEPH(dir, entries),
        XATTR_NAME_CEPH(dir, files),
        XATTR_NAME_CEPH(dir, subdirs),
@@ -102,35 +213,26 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
        XATTR_NAME_CEPH(dir, rsubdirs),
        XATTR_NAME_CEPH(dir, rbytes),
        XATTR_NAME_CEPH(dir, rctime),
-       { 0 }   /* Required table terminator */
+       { .name = NULL, 0 }     /* Required table terminator */
 };
 static size_t ceph_dir_vxattrs_name_size;      /* total size of all names */
 
 /* files */
 
-static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
-                                  size_t size)
-{
-       int ret;
-
-       ret = snprintf(val, size,
-               "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
-               (unsigned long long)ceph_file_layout_su(ci->i_layout),
-               (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
-               (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
-       return ret;
-}
-
 static struct ceph_vxattr ceph_file_vxattrs[] = {
-       XATTR_NAME_CEPH(file, layout),
-       /* The following extended attribute name is deprecated */
        {
-               .name = XATTR_CEPH_PREFIX "layout",
-               .name_size = sizeof (XATTR_CEPH_PREFIX "layout"),
-               .getxattr_cb = ceph_vxattrcb_file_layout,
-               .readonly = true,
+               .name = "ceph.file.layout",
+               .name_size = sizeof("ceph.file.layout"),
+               .getxattr_cb = ceph_vxattrcb_layout,
+               .readonly = false,
+               .hidden = false,
+               .exists_cb = ceph_vxattrcb_layout_exists,
        },
-       { 0 }   /* Required table terminator */
+       XATTR_LAYOUT_FIELD(file, layout, stripe_unit),
+       XATTR_LAYOUT_FIELD(file, layout, stripe_count),
+       XATTR_LAYOUT_FIELD(file, layout, object_size),
+       XATTR_LAYOUT_FIELD(file, layout, pool),
+       { .name = NULL, 0 }     /* Required table terminator */
 };
 static size_t ceph_file_vxattrs_name_size;     /* total size of all names */
 
@@ -164,7 +266,8 @@ static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs)
        size_t size = 0;
 
        for (vxattr = vxattrs; vxattr->name; vxattr++)
-               size += vxattr->name_size;
+               if (!vxattr->hidden)
+                       size += vxattr->name_size;
 
        return size;
 }
@@ -572,13 +675,17 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
        if (!ceph_is_valid_xattr(name))
                return -ENODATA;
 
-       /* let's see if a virtual xattr was requested */
-       vxattr = ceph_match_vxattr(inode, name);
-
        spin_lock(&ci->i_ceph_lock);
        dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
+       /* let's see if a virtual xattr was requested */
+       vxattr = ceph_match_vxattr(inode, name);
+       if (vxattr && !(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
+               err = vxattr->getxattr_cb(ci, value, size);
+               goto out;
+       }
+
        if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto get_xattr;
@@ -592,11 +699,6 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
 
        spin_lock(&ci->i_ceph_lock);
 
-       if (vxattr && vxattr->readonly) {
-               err = vxattr->getxattr_cb(ci, value, size);
-               goto out;
-       }
-
        err = __build_xattrs(inode);
        if (err < 0)
                goto out;
@@ -604,11 +706,8 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
 get_xattr:
        err = -ENODATA;  /* == ENOATTR */
        xattr = __get_xattr(ci, name);
-       if (!xattr) {
-               if (vxattr)
-                       err = vxattr->getxattr_cb(ci, value, size);
+       if (!xattr)
                goto out;
-       }
 
        err = -ERANGE;
        if (size && size < xattr->val_len)
@@ -664,23 +763,30 @@ list_xattr:
        vir_namelen = ceph_vxattrs_name_size(vxattrs);
 
        /* adding 1 byte per each variable due to the null termination */
-       namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
+       namelen = ci->i_xattrs.names_size + ci->i_xattrs.count;
        err = -ERANGE;
-       if (size && namelen > size)
+       if (size && vir_namelen + namelen > size)
                goto out;
 
-       err = namelen;
+       err = namelen + vir_namelen;
        if (size == 0)
                goto out;
 
        names = __copy_xattr_names(ci, names);
 
        /* virtual xattr names, too */
-       if (vxattrs)
+       err = namelen;
+       if (vxattrs) {
                for (i = 0; vxattrs[i].name; i++) {
-                       len = sprintf(names, "%s", vxattrs[i].name);
-                       names += len + 1;
+                       if (!vxattrs[i].hidden &&
+                           !(vxattrs[i].exists_cb &&
+                             !vxattrs[i].exists_cb(ci))) {
+                               len = sprintf(names, "%s", vxattrs[i].name);
+                               names += len + 1;
+                               err += len + 1;
+                       }
                }
+       }
 
 out:
        spin_unlock(&ci->i_ceph_lock);
@@ -782,6 +888,10 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        if (vxattr && vxattr->readonly)
                return -EOPNOTSUPP;
 
+       /* pass any unhandled ceph.* xattrs through to the MDS */
+       if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
+               goto do_sync_unlocked;
+
        /* preallocate memory for xattr name, value, index node */
        err = -ENOMEM;
        newname = kmemdup(name, name_len + 1, GFP_NOFS);
@@ -838,6 +948,7 @@ retry:
 
 do_sync:
        spin_unlock(&ci->i_ceph_lock);
+do_sync_unlocked:
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
        kfree(newname);
@@ -892,6 +1003,10 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        if (vxattr && vxattr->readonly)
                return -EOPNOTSUPP;
 
+       /* pass any unhandled ceph.* xattrs through to the MDS */
+       if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
+               goto do_sync_unlocked;
+
        err = -ENOMEM;
        spin_lock(&ci->i_ceph_lock);
 retry:
@@ -931,6 +1046,7 @@ retry:
        return err;
 do_sync:
        spin_unlock(&ci->i_ceph_lock);
+do_sync_unlocked:
        err = ceph_send_removexattr(dentry, name);
 out:
        return err;
index 9be09b2..1a052c0 100644 (file)
@@ -564,6 +564,11 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
                        dentry = ERR_PTR(-ENOENT);
                        break;
                }
+               if (!S_ISDIR(dir->i_mode)) {
+                       dput(dentry);
+                       dentry = ERR_PTR(-ENOTDIR);
+                       break;
+               }
 
                /* skip separators */
                while (*s == sep)
@@ -683,7 +688,7 @@ out_nls:
 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                   unsigned long nr_segs, loff_t pos)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t written;
        int rc;
 
@@ -707,7 +712,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
         */
        if (whence != SEEK_SET && whence != SEEK_CUR) {
                int rc;
-               struct inode *inode = file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(file);
 
                /*
                 * We need to be sure that all dirty pages are written and the
@@ -739,7 +744,7 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
 {
        /* note that this is called by vfs setlease with lock_flocks held
           to protect *lease from going away */
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct cifsFileInfo *cfile = file->private_data;
 
        if (!(S_ISREG(inode->i_mode)))
index 00e12f2..7353bc5 100644 (file)
@@ -1909,8 +1909,11 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
        } while (rc == -EAGAIN);
 
        for (i = 0; i < wdata->nr_pages; i++) {
-               if (rc != 0)
+               if (rc != 0) {
                        SetPageError(wdata->pages[i]);
+                       end_page_writeback(wdata->pages[i]);
+                       page_cache_release(wdata->pages[i]);
+               }
                unlock_page(wdata->pages[i]);
        }
 
index 4474a57..54125e0 100644 (file)
@@ -1031,7 +1031,7 @@ static int cifs_parse_security_flavors(char *value,
 
        switch (match_token(value, cifs_secflavor_tokens, args)) {
        case Opt_sec_krb5:
-               vol->secFlg |= CIFSSEC_MAY_KRB5;
+               vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN;
                break;
        case Opt_sec_krb5i:
                vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
index a8d8b58..8c0d855 100644 (file)
@@ -43,6 +43,7 @@
 #include "cifs_fs_sb.h"
 #include "fscache.h"
 
+
 static inline int cifs_convert_flags(unsigned int flags)
 {
        if ((flags & O_ACCMODE) == O_RDONLY)
@@ -72,10 +73,15 @@ static u32 cifs_posix_convert_flags(unsigned int flags)
        else if ((flags & O_ACCMODE) == O_RDWR)
                posix_flags = SMB_O_RDWR;
 
-       if (flags & O_CREAT)
+       if (flags & O_CREAT) {
                posix_flags |= SMB_O_CREAT;
-       if (flags & O_EXCL)
-               posix_flags |= SMB_O_EXCL;
+               if (flags & O_EXCL)
+                       posix_flags |= SMB_O_EXCL;
+       } else if (flags & O_EXCL)
+               cFYI(1, "Application %s pid %d has incorrectly set O_EXCL flag"
+                       "but not O_CREAT on file open. Ignoring O_EXCL",
+                       current->comm, current->tgid);
+
        if (flags & O_TRUNC)
                posix_flags |= SMB_O_TRUNC;
        /* be safe and imply O_SYNC for O_DSYNC */
@@ -947,7 +953,7 @@ static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
        int rc = 0;
-       struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+       struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
        unsigned char saved_type = flock->fl_type;
 
        if ((flock->fl_flags & FL_POSIX) == 0)
@@ -974,7 +980,7 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
-       struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+       struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
        int rc = 1;
 
        if ((flock->fl_flags & FL_POSIX) == 0)
@@ -1548,7 +1554,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
 
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        netfid = cfile->fid.netfid;
-       cinode = CIFS_I(file->f_path.dentry->d_inode);
+       cinode = CIFS_I(file_inode(file));
 
        if (cap_unix(tcon->ses) &&
            (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -2171,7 +2177,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
        struct cifs_tcon *tcon;
        struct TCP_Server_Info *server;
        struct cifsFileInfo *smbfile = file->private_data;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
        rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2246,7 +2252,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  */
 int cifs_flush(struct file *file, fl_owner_t id)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int rc = 0;
 
        if (file->f_mode & FMODE_WRITE)
@@ -2480,7 +2486,7 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
        ssize_t written;
        struct inode *inode;
 
-       inode = iocb->ki_filp->f_path.dentry->d_inode;
+       inode = file_inode(iocb->ki_filp);
 
        /*
         * BB - optimize the way when signing is disabled. We can drop this
@@ -2543,7 +2549,7 @@ ssize_t
 cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
                   unsigned long nr_segs, loff_t pos)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        struct cifsInodeInfo *cinode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = (struct cifsFileInfo *)
@@ -2915,7 +2921,7 @@ ssize_t
 cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
                  unsigned long nr_segs, loff_t pos)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        struct cifsInodeInfo *cinode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = (struct cifsFileInfo *)
@@ -3063,7 +3069,7 @@ static struct vm_operations_struct cifs_file_vm_ops = {
 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
 {
        int rc, xid;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        xid = get_xid();
 
@@ -3356,7 +3362,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        int rc;
 
        /* Is the page cached? */
-       rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
+       rc = cifs_readpage_from_fscache(file_inode(file), page);
        if (rc == 0)
                goto read_complete;
 
@@ -3371,8 +3377,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        else
                cFYI(1, "Bytes read %d", rc);
 
-       file->f_path.dentry->d_inode->i_atime =
-               current_fs_time(file->f_path.dentry->d_inode->i_sb);
+       file_inode(file)->i_atime =
+               current_fs_time(file_inode(file)->i_sb);
 
        if (PAGE_CACHE_SIZE > rc)
                memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
@@ -3381,7 +3387,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
        SetPageUptodate(page);
 
        /* send this page to the cache */
-       cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
+       cifs_readpage_to_fscache(file_inode(file), page);
 
        rc = 0;
 
index 9638233..83f2606 100644 (file)
@@ -299,7 +299,7 @@ cifs_get_file_info_unix(struct file *filp)
        unsigned int xid;
        FILE_UNIX_BASIC_INFO find_data;
        struct cifs_fattr fattr;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = filp->private_data;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -568,7 +568,7 @@ cifs_get_file_info(struct file *filp)
        unsigned int xid;
        FILE_ALL_INFO find_data;
        struct cifs_fattr fattr;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = filp->private_data;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -816,10 +816,9 @@ static bool
 inode_has_hashed_dentries(struct inode *inode)
 {
        struct dentry *dentry;
-       struct hlist_node *p;
 
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
                        spin_unlock(&inode->i_lock);
                        return true;
@@ -1688,7 +1687,7 @@ cifs_invalidate_mapping(struct inode *inode)
 int cifs_revalidate_file_attr(struct file *filp)
 {
        int rc = 0;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
 
        if (!cifs_inode_needs_reval(inode))
@@ -1745,7 +1744,7 @@ out:
 int cifs_revalidate_file(struct file *filp)
 {
        int rc;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        rc = cifs_revalidate_file_attr(filp);
        if (rc)
index fd5009d..6c9f121 100644 (file)
@@ -30,7 +30,7 @@
 
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 {
-       struct inode *inode = filep->f_dentry->d_inode;
+       struct inode *inode = file_inode(filep);
        int rc = -ENOTTY; /* strange error - but the precedent */
        unsigned int xid;
        struct cifs_sb_info *cifs_sb;
index cdd6ff4..df40cc5 100644 (file)
@@ -82,12 +82,10 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
 
        cFYI(1, "%s: for %s", __func__, name->name);
 
-       if (parent->d_op && parent->d_op->d_hash)
-               parent->d_op->d_hash(parent, parent->d_inode, name);
-       else
-               name->hash = full_name_hash(name->name, name->len);
+       dentry = d_hash_and_lookup(parent, name);
+       if (unlikely(IS_ERR(dentry)))
+               return;
 
-       dentry = d_lookup(parent, name);
        if (dentry) {
                int err;
 
@@ -505,7 +503,7 @@ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
    whether we can use the cached search results from the previous search */
 static int is_dir_changed(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
 
        if (cifsInfo->time == 0)
@@ -778,7 +776,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
        switch ((int) file->f_pos) {
        case 0:
                if (filldir(direntry, ".", 1, file->f_pos,
-                    file->f_path.dentry->d_inode->i_ino, DT_DIR) < 0) {
+                    file_inode(file)->i_ino, DT_DIR) < 0) {
                        cERROR(1, "Filldir for current dir failed");
                        rc = -ENOMEM;
                        break;
index 49fe52d..b7d3a05 100644 (file)
@@ -397,7 +397,7 @@ static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
                 * We can't use vfs_readdir because we have to keep the file
                 * position in sync between the coda_file and the host_file.
                 * and as such we need grab the inode mutex. */
-               struct inode *host_inode = host_file->f_path.dentry->d_inode;
+               struct inode *host_inode = file_inode(host_file);
 
                mutex_lock(&host_inode->i_mutex);
                host_file->f_pos = coda_file->f_pos;
index 8edd404..fa4c100 100644 (file)
@@ -66,7 +66,7 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos,
 static ssize_t
 coda_file_write(struct file *coda_file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       struct inode *host_inode, *coda_inode = coda_file->f_path.dentry->d_inode;
+       struct inode *host_inode, *coda_inode = file_inode(coda_file);
        struct coda_file_info *cfi;
        struct file *host_file;
        ssize_t ret;
@@ -78,7 +78,7 @@ coda_file_write(struct file *coda_file, const char __user *buf, size_t count, lo
        if (!host_file->f_op || !host_file->f_op->write)
                return -EINVAL;
 
-       host_inode = host_file->f_path.dentry->d_inode;
+       host_inode = file_inode(host_file);
        mutex_lock(&coda_inode->i_mutex);
 
        ret = host_file->f_op->write(host_file, buf, count, ppos);
@@ -106,8 +106,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
        if (!host_file->f_op || !host_file->f_op->mmap)
                return -ENODEV;
 
-       coda_inode = coda_file->f_path.dentry->d_inode;
-       host_inode = host_file->f_path.dentry->d_inode;
+       coda_inode = file_inode(coda_file);
+       host_inode = file_inode(host_file);
 
        cii = ITOC(coda_inode);
        spin_lock(&cii->c_lock);
@@ -178,7 +178,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
        err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
                          coda_flags, coda_file->f_cred->fsuid);
 
-       host_inode = cfi->cfi_container->f_path.dentry->d_inode;
+       host_inode = file_inode(cfi->cfi_container);
        cii = ITOC(coda_inode);
 
        /* did we mmap this file? */
@@ -202,7 +202,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
 int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
 {
        struct file *host_file;
-       struct inode *coda_inode = coda_file->f_path.dentry->d_inode;
+       struct inode *coda_inode = file_inode(coda_file);
        struct coda_file_info *cfi;
        int err;
 
index cf674e9..dada9d0 100644 (file)
@@ -130,7 +130,7 @@ static int get_device_index(struct coda_mount_data *data)
        f = fdget(data->fd);
        if (!f.file)
                goto Ebadf;
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
        if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
                fdput(f);
                goto Ebadf;
index ee0981f..3f5de96 100644 (file)
@@ -52,7 +52,7 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
        struct path path;
        int error;
        struct PioctlData data;
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct inode *target_inode = NULL;
        struct coda_inode_info *cnp;
 
index e2f57a0..3ced75f 100644 (file)
@@ -1582,7 +1582,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
        case FIBMAP:
        case FIGETBSZ:
        case FIONREAD:
-               if (S_ISREG(f.file->f_path.dentry->d_inode->i_mode))
+               if (S_ISREG(file_inode(f.file)->i_mode))
                        break;
                /*FALL THROUGH*/
 
index e9dcfa3..7aabc6a 100644 (file)
@@ -1626,7 +1626,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
                        if (offset >= 0)
                                break;
                default:
-                       mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+                       mutex_unlock(&file_inode(file)->i_mutex);
                        return -EINVAL;
        }
        if (offset != file->f_pos) {
index 1774932..c647965 100644 (file)
@@ -411,7 +411,7 @@ static void wait_for_dump_helpers(struct file *file)
 {
        struct pipe_inode_info *pipe;
 
-       pipe = file->f_path.dentry->d_inode->i_pipe;
+       pipe = file_inode(file)->i_pipe;
 
        pipe_lock(pipe);
        pipe->readers++;
@@ -501,7 +501,7 @@ void do_coredump(siginfo_t *siginfo)
         * so we dump it as root in mode 2, and only into a controlled
         * environment (pipe handler or fully qualified path).
         */
-       if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
+       if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
                /* Setuid core dump mode */
                flag = O_EXCL;          /* Stop rewrite attacks */
                cred->fsuid = GLOBAL_ROOT_UID;  /* Dump root private */
@@ -600,7 +600,7 @@ void do_coredump(siginfo_t *siginfo)
                if (IS_ERR(cprm.file))
                        goto fail_unlock;
 
-               inode = cprm.file->f_path.dentry->d_inode;
+               inode = file_inode(cprm.file);
                if (inode->i_nlink > 1)
                        goto close_fail;
                if (d_unhashed(cprm.file->f_path.dentry))
index c6c3f91..3ceb9ec 100644 (file)
@@ -351,7 +351,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  */
 static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        char *buf;
        unsigned int offset;
index 19153a0..fbfae00 100644 (file)
@@ -675,11 +675,10 @@ EXPORT_SYMBOL(dget_parent);
 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 {
        struct dentry *alias, *discon_alias;
-       struct hlist_node *p;
 
 again:
        discon_alias = NULL;
-       hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
                spin_lock(&alias->d_lock);
                if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
                        if (IS_ROOT(alias) &&
@@ -730,10 +729,9 @@ EXPORT_SYMBOL(d_find_alias);
 void d_prune_aliases(struct inode *inode)
 {
        struct dentry *dentry;
-       struct hlist_node *p;
 restart:
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                spin_lock(&dentry->d_lock);
                if (!dentry->d_count) {
                        __dget_dlock(dentry);
@@ -1358,6 +1356,7 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
        WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH  |
                                DCACHE_OP_COMPARE       |
                                DCACHE_OP_REVALIDATE    |
+                               DCACHE_OP_WEAK_REVALIDATE       |
                                DCACHE_OP_DELETE ));
        dentry->d_op = op;
        if (!op)
@@ -1368,6 +1367,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
                dentry->d_flags |= DCACHE_OP_COMPARE;
        if (op->d_revalidate)
                dentry->d_flags |= DCACHE_OP_REVALIDATE;
+       if (op->d_weak_revalidate)
+               dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
        if (op->d_delete)
                dentry->d_flags |= DCACHE_OP_DELETE;
        if (op->d_prune)
@@ -1440,14 +1441,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
        int len = entry->d_name.len;
        const char *name = entry->d_name.name;
        unsigned int hash = entry->d_name.hash;
-       struct hlist_node *p;
 
        if (!inode) {
                __d_instantiate(entry, NULL);
                return NULL;
        }
 
-       hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
                /*
                 * Don't need alias->d_lock here, because aliases with
                 * d_parent == entry->d_parent are not subject to name or
@@ -1672,7 +1672,6 @@ EXPORT_SYMBOL(d_splice_alias);
 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
                        struct qstr *name)
 {
-       int error;
        struct dentry *found;
        struct dentry *new;
 
@@ -1681,10 +1680,12 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
         * if not go ahead and create it now.
         */
        found = d_hash_and_lookup(dentry->d_parent, name);
+       if (unlikely(IS_ERR(found)))
+               goto err_out;
        if (!found) {
                new = d_alloc(dentry->d_parent, name);
                if (!new) {
-                       error = -ENOMEM;
+                       found = ERR_PTR(-ENOMEM);
                        goto err_out;
                }
 
@@ -1725,7 +1726,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
 
 err_out:
        iput(inode);
-       return ERR_PTR(error);
+       return found;
 }
 EXPORT_SYMBOL(d_add_ci);
 
@@ -1889,7 +1890,7 @@ seqretry:
  * dentry is returned. The caller must use dput to free the entry when it has
  * finished using it. %NULL is returned if the dentry does not exist.
  */
-struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
+struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
 {
        struct dentry *dentry;
        unsigned seq;
@@ -1919,7 +1920,7 @@ EXPORT_SYMBOL(d_lookup);
  *
  * __d_lookup callers must be commented.
  */
-struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
+struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
 {
        unsigned int len = name->len;
        unsigned int hash = name->hash;
@@ -1997,12 +1998,10 @@ next:
  * @dir: Directory to search in
  * @name: qstr of name we wish to find
  *
- * On hash failure or on lookup failure NULL is returned.
+ * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
  */
 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
 {
-       struct dentry *dentry = NULL;
-
        /*
         * Check for a fs-specific hash function. Note that we must
         * calculate the standard hash first, as the d_op->d_hash()
@@ -2010,13 +2009,13 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
         */
        name->hash = full_name_hash(name->name, name->len);
        if (dir->d_flags & DCACHE_OP_HASH) {
-               if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
-                       goto out;
+               int err = dir->d_op->d_hash(dir, dir->d_inode, name);
+               if (unlikely(err < 0))
+                       return ERR_PTR(err);
        }
-       dentry = d_lookup(dir, name);
-out:
-       return dentry;
+       return d_lookup(dir, name);
 }
+EXPORT_SYMBOL(d_hash_and_lookup);
 
 /**
  * d_validate - verify dentry provided from insecure source (deprecated)
@@ -2394,7 +2393,7 @@ out_err:
  */
 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
 {
-       struct dentry *dparent, *aparent;
+       struct dentry *dparent;
 
        dentry_lock_for_move(anon, dentry);
 
@@ -2402,24 +2401,15 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
        write_seqcount_begin(&anon->d_seq);
 
        dparent = dentry->d_parent;
-       aparent = anon->d_parent;
 
        switch_names(dentry, anon);
        swap(dentry->d_name.hash, anon->d_name.hash);
 
-       dentry->d_parent = (aparent == anon) ? dentry : aparent;
-       list_del(&dentry->d_u.d_child);
-       if (!IS_ROOT(dentry))
-               list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
-       else
-               INIT_LIST_HEAD(&dentry->d_u.d_child);
-
-       anon->d_parent = (dparent == dentry) ? anon : dparent;
+       dentry->d_parent = dentry;
+       list_del_init(&dentry->d_u.d_child);
+       anon->d_parent = dparent;
        list_del(&anon->d_u.d_child);
-       if (!IS_ROOT(anon))
-               list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
-       else
-               INIT_LIST_HEAD(&anon->d_u.d_child);
+       list_add(&anon->d_u.d_child, &dparent->d_subdirs);
 
        write_seqcount_end(&dentry->d_seq);
        write_seqcount_end(&anon->d_seq);
@@ -2722,37 +2712,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 }
 EXPORT_SYMBOL(d_path);
 
-/**
- * d_path_with_unreachable - return the path of a dentry
- * @path: path to report
- * @buf: buffer to return value in
- * @buflen: buffer length
- *
- * The difference from d_path() is that this prepends "(unreachable)"
- * to paths which are unreachable from the current process' root.
- */
-char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
-{
-       char *res = buf + buflen;
-       struct path root;
-       int error;
-
-       if (path->dentry->d_op && path->dentry->d_op->d_dname)
-               return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
-
-       get_fs_root(current->fs, &root);
-       write_seqlock(&rename_lock);
-       error = path_with_deleted(path, &root, &res, &buflen);
-       if (error > 0)
-               error = prepend_unreachable(&res, &buflen);
-       write_sequnlock(&rename_lock);
-       path_put(&root);
-       if (error)
-               res =  ERR_PTR(error);
-
-       return res;
-}
-
 /*
  * Helper function for dentry_operations.d_dname() members
  */
@@ -3035,7 +2994,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
        ino_t ino = 0;
 
        dentry = d_hash_and_lookup(dir, name);
-       if (dentry) {
+       if (!IS_ERR_OR_NULL(dentry)) {
                if (dentry->d_inode)
                        ino = dentry->d_inode->i_ino;
                dput(dentry);
index cf5b44b..f853263 100644 (file)
@@ -261,9 +261,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
                dio->end_io(dio->iocb, offset, transferred,
                            dio->private, ret, is_async);
        } else {
+               inode_dio_done(dio->inode);
                if (is_async)
                        aio_complete(dio->iocb, ret, 0);
-               inode_dio_done(dio->inode);
        }
 
        return ret;
index a0387dd..7d58d5b 100644 (file)
@@ -158,7 +158,7 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
        unsigned int x;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        x = simple_strtoul(buf, NULL, 0);
 
index f750165..1b11466 100644 (file)
@@ -1183,7 +1183,7 @@ static void detach_lkb(struct dlm_lkb *lkb)
 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
 {
        struct dlm_lkb *lkb;
-       int rv, id;
+       int rv;
 
        lkb = dlm_allocate_lkb(ls);
        if (!lkb)
@@ -1199,19 +1199,13 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
        mutex_init(&lkb->lkb_cb_mutex);
        INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
 
- retry:
-       rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
-       if (!rv)
-               return -ENOMEM;
-
+       idr_preload(GFP_NOFS);
        spin_lock(&ls->ls_lkbidr_spin);
-       rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
-       if (!rv)
-               lkb->lkb_id = id;
+       rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
+       if (rv >= 0)
+               lkb->lkb_id = rv;
        spin_unlock(&ls->ls_lkbidr_spin);
-
-       if (rv == -EAGAIN)
-               goto retry;
+       idr_preload_end();
 
        if (rv < 0) {
                log_error(ls, "create_lkb idr error %d", rv);
index 2e99fb0..3ca79d3 100644 (file)
@@ -796,7 +796,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
         */
 
        idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
-       idr_remove_all(&ls->ls_lkbidr);
        idr_destroy(&ls->ls_lkbidr);
 
        /*
index dd87a31..4f5ad24 100644 (file)
@@ -177,12 +177,11 @@ static inline int nodeid_hash(int nodeid)
 static struct connection *__find_con(int nodeid)
 {
        int r;
-       struct hlist_node *h;
        struct connection *con;
 
        r = nodeid_hash(nodeid);
 
-       hlist_for_each_entry(con, h, &connection_hash[r], list) {
+       hlist_for_each_entry(con, &connection_hash[r], list) {
                if (con->nodeid == nodeid)
                        return con;
        }
@@ -232,13 +231,12 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
 static void foreach_conn(void (*conn_func)(struct connection *c))
 {
        int i;
-       struct hlist_node *h, *n;
+       struct hlist_node *n;
        struct connection *con;
 
        for (i = 0; i < CONN_HASH_SIZE; i++) {
-               hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+               hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
                        conn_func(con);
-               }
        }
 }
 
@@ -257,13 +255,12 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
 static struct connection *assoc2con(int assoc_id)
 {
        int i;
-       struct hlist_node *h;
        struct connection *con;
 
        mutex_lock(&connections_lock);
 
        for (i = 0 ; i < CONN_HASH_SIZE; i++) {
-               hlist_for_each_entry(con, h, &connection_hash[i], list) {
+               hlist_for_each_entry(con, &connection_hash[i], list) {
                        if (con->sctp_assoc == assoc_id) {
                                mutex_unlock(&connections_lock);
                                return con;
index aedea28..a6bc63f 100644 (file)
@@ -305,27 +305,26 @@ static int recover_idr_empty(struct dlm_ls *ls)
 static int recover_idr_add(struct dlm_rsb *r)
 {
        struct dlm_ls *ls = r->res_ls;
-       int rv, id;
-
-       rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS);
-       if (!rv)
-               return -ENOMEM;
+       int rv;
 
+       idr_preload(GFP_NOFS);
        spin_lock(&ls->ls_recover_idr_lock);
        if (r->res_id) {
-               spin_unlock(&ls->ls_recover_idr_lock);
-               return -1;
-       }
-       rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id);
-       if (rv) {
-               spin_unlock(&ls->ls_recover_idr_lock);
-               return rv;
+               rv = -1;
+               goto out_unlock;
        }
-       r->res_id = id;
+       rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
+       if (rv < 0)
+               goto out_unlock;
+
+       r->res_id = rv;
        ls->ls_recover_list_count++;
        dlm_hold_rsb(r);
+       rv = 0;
+out_unlock:
        spin_unlock(&ls->ls_recover_idr_lock);
-       return 0;
+       idr_preload_end();
+       return rv;
 }
 
 static void recover_idr_del(struct dlm_rsb *r)
@@ -351,24 +350,21 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
        return r;
 }
 
-static int recover_idr_clear_rsb(int id, void *p, void *data)
+static void recover_idr_clear(struct dlm_ls *ls)
 {
-       struct dlm_ls *ls = data;
-       struct dlm_rsb *r = p;
+       struct dlm_rsb *r;
+       int id;
 
-       r->res_id = 0;
-       r->res_recover_locks_count = 0;
-       ls->ls_recover_list_count--;
+       spin_lock(&ls->ls_recover_idr_lock);
 
-       dlm_put_rsb(r);
-       return 0;
-}
+       idr_for_each_entry(&ls->ls_recover_idr, r, id) {
+               idr_remove(&ls->ls_recover_idr, id);
+               r->res_id = 0;
+               r->res_recover_locks_count = 0;
+               ls->ls_recover_list_count--;
 
-static void recover_idr_clear(struct dlm_ls *ls)
-{
-       spin_lock(&ls->ls_recover_idr_lock);
-       idr_for_each(&ls->ls_recover_idr, recover_idr_clear_rsb, ls);
-       idr_remove_all(&ls->ls_recover_idr);
+               dlm_put_rsb(r);
+       }
 
        if (ls->ls_recover_list_count != 0) {
                log_error(ls, "warning: recover_list_count %d",
index cfb4b9f..7e2c6f5 100644 (file)
@@ -509,6 +509,12 @@ ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
        return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt;
 }
 
+static inline struct path *
+ecryptfs_dentry_to_lower_path(struct dentry *dentry)
+{
+       return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
+}
+
 static inline void
 ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
 {
index d45ba45..53acc9d 100644 (file)
@@ -118,7 +118,7 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
 
        lower_file = ecryptfs_file_to_lower(file);
        lower_file->f_pos = file->f_pos;
-       inode = file->f_path.dentry->d_inode;
+       inode = file_inode(file);
        memset(&buf, 0, sizeof(buf));
        buf.dirent = dirent;
        buf.dentry = file->f_path.dentry;
@@ -133,7 +133,7 @@ static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
                goto out;
        if (rc >= 0)
                fsstack_copy_attr_atime(inode,
-                                       lower_file->f_path.dentry->d_inode);
+                                       file_inode(lower_file));
 out:
        return rc;
 }
index cc7709e..e0f07fb 100644 (file)
@@ -1027,8 +1027,7 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct kstat lower_stat;
        int rc;
 
-       rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
-                        ecryptfs_dentry_to_lower(dentry), &lower_stat);
+       rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat);
        if (!rc) {
                fsstack_copy_attr_all(dentry->d_inode,
                                      ecryptfs_inode_to_lower(dentry->d_inode));
index 5fa2471..8d7a577 100644 (file)
@@ -115,10 +115,9 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
  */
 int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
 {
-       struct hlist_node *elem;
        int rc;
 
-       hlist_for_each_entry(*daemon, elem,
+       hlist_for_each_entry(*daemon,
                            &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
                            euid_chain) {
                if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
@@ -445,7 +444,6 @@ void ecryptfs_release_messaging(void)
                mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
        }
        if (ecryptfs_daemon_hash) {
-               struct hlist_node *elem;
                struct ecryptfs_daemon *daemon;
                int i;
 
@@ -453,7 +451,7 @@ void ecryptfs_release_messaging(void)
                for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
                        int rc;
 
-                       hlist_for_each_entry(daemon, elem,
+                       hlist_for_each_entry(daemon,
                                             &ecryptfs_daemon_hash[i],
                                             euid_chain) {
                                rc = ecryptfs_exorcise_daemon(daemon);
index b2a34a1..6a16053 100644 (file)
@@ -40,16 +40,12 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
                         loff_t offset, size_t size)
 {
        struct file *lower_file;
-       mm_segment_t fs_save;
        ssize_t rc;
 
        lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
        if (!lower_file)
                return -EIO;
-       fs_save = get_fs();
-       set_fs(get_ds());
-       rc = vfs_write(lower_file, data, size, &offset);
-       set_fs(fs_save);
+       rc = kernel_write(lower_file, data, size, offset);
        mark_inode_dirty_sync(ecryptfs_inode);
        return rc;
 }
index 7ee6f7e..055a9e9 100644 (file)
@@ -20,7 +20,7 @@ const struct inode_operations efs_dir_inode_operations = {
 };
 
 static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct buffer_head *bh;
 
        struct efs_dir          *dirblock;
index 20df02c..a96a488 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -123,7 +123,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
                goto out;
 
        error = -EINVAL;
-       if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+       if (!S_ISREG(file_inode(file)->i_mode))
                goto exit;
 
        error = -EACCES;
@@ -355,7 +355,7 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
  * flags, permissions, and offset, so we use temporary values.  We'll update
  * them later in setup_arg_pages().
  */
-int bprm_mm_init(struct linux_binprm *bprm)
+static int bprm_mm_init(struct linux_binprm *bprm)
 {
        int err;
        struct mm_struct *mm = NULL;
@@ -764,7 +764,7 @@ struct file *open_exec(const char *name)
                goto out;
 
        err = -EACCES;
-       if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+       if (!S_ISREG(file_inode(file)->i_mode))
                goto exit;
 
        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
@@ -1098,7 +1098,7 @@ EXPORT_SYMBOL(flush_old_exec);
 
 void would_dump(struct linux_binprm *bprm, struct file *file)
 {
-       if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
+       if (inode_permission(file_inode(file), MAY_READ) < 0)
                bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
 }
 EXPORT_SYMBOL(would_dump);
@@ -1111,7 +1111,7 @@ void setup_new_exec(struct linux_binprm * bprm)
        current->sas_ss_sp = current->sas_ss_size = 0;
 
        if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
-               set_dumpable(current->mm, SUID_DUMPABLE_ENABLED);
+               set_dumpable(current->mm, SUID_DUMP_USER);
        else
                set_dumpable(current->mm, suid_dumpable);
 
@@ -1270,7 +1270,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
 int prepare_binprm(struct linux_binprm *bprm)
 {
        umode_t mode;
-       struct inode * inode = bprm->file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(bprm->file);
        int retval;
 
        mode = inode->i_mode;
@@ -1639,17 +1639,17 @@ EXPORT_SYMBOL(set_binfmt);
 void set_dumpable(struct mm_struct *mm, int value)
 {
        switch (value) {
-       case SUID_DUMPABLE_DISABLED:
+       case SUID_DUMP_DISABLE:
                clear_bit(MMF_DUMPABLE, &mm->flags);
                smp_wmb();
                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
                break;
-       case SUID_DUMPABLE_ENABLED:
+       case SUID_DUMP_USER:
                set_bit(MMF_DUMPABLE, &mm->flags);
                smp_wmb();
                clear_bit(MMF_DUMP_SECURELY, &mm->flags);
                break;
-       case SUID_DUMPABLE_SAFE:
+       case SUID_DUMP_ROOT:
                set_bit(MMF_DUMP_SECURELY, &mm->flags);
                smp_wmb();
                set_bit(MMF_DUMPABLE, &mm->flags);
@@ -1662,7 +1662,7 @@ int __get_dumpable(unsigned long mm_flags)
        int ret;
 
        ret = mm_flags & MMF_DUMPABLE_MASK;
-       return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
+       return (ret > SUID_DUMP_USER) ? SUID_DUMP_ROOT : ret;
 }
 
 int get_dumpable(struct mm_struct *mm)
index c61e62a..4637589 100644 (file)
@@ -242,7 +242,7 @@ static int
 exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        loff_t pos = filp->f_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned long npages = dir_pages(inode);
index 5df4bb4..262fc99 100644 (file)
@@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result,
 {
        struct dentry *dentry, *toput = NULL;
        struct inode *inode;
-       struct hlist_node *p;
 
        if (acceptable(context, result))
                return result;
 
        inode = result->d_inode;
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                dget(dentry);
                spin_unlock(&inode->i_lock);
                if (toput)
index 2616d0e..9f9992b 100644 (file)
@@ -159,15 +159,6 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
        return bh;
 }
 
-static void release_blocks(struct super_block *sb, int count)
-{
-       if (count) {
-               struct ext2_sb_info *sbi = EXT2_SB(sb);
-
-               percpu_counter_add(&sbi->s_freeblocks_counter, count);
-       }
-}
-
 static void group_adjust_blocks(struct super_block *sb, int group_no,
        struct ext2_group_desc *desc, struct buffer_head *bh, int count)
 {
@@ -568,8 +559,11 @@ do_more:
        }
 error_return:
        brelse(bitmap_bh);
-       release_blocks(sb, freed);
-       dquot_free_block_nodirty(inode, freed);
+       if (freed) {
+               percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+               dquot_free_block_nodirty(inode, freed);
+               mark_inode_dirty(inode);
+       }
 }
 
 /**
@@ -1239,10 +1233,6 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
 
        *errp = -ENOSPC;
        sb = inode->i_sb;
-       if (!sb) {
-               printk("ext2_new_blocks: nonexistent device");
-               return 0;
-       }
 
        /*
         * Check quota for allocation of this block.
@@ -1416,9 +1406,11 @@ allocated:
 
        *errp = 0;
        brelse(bitmap_bh);
-       dquot_free_block_nodirty(inode, *count-num);
-       mark_inode_dirty(inode);
-       *count = num;
+       if (num < *count) {
+               dquot_free_block_nodirty(inode, *count-num);
+               mark_inode_dirty(inode);
+               *count = num;
+       }
        return ret_block;
 
 io_error:
index 0f4f5c9..4237722 100644 (file)
@@ -290,7 +290,7 @@ static int
 ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
 {
        loff_t pos = filp->f_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
index 6363ac6..c3881e5 100644 (file)
@@ -495,6 +495,10 @@ static int ext2_alloc_branch(struct inode *inode,
                 * parent to disk.
                 */
                bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
+                       goto failed;
+               }
                branch[n].bh = bh;
                lock_buffer(bh);
                memset(bh->b_data, 0, blocksize);
@@ -523,6 +527,14 @@ static int ext2_alloc_branch(struct inode *inode,
        }
        *blks = num;
        return err;
+
+failed:
+       for (i = 1; i < n; i++)
+               bforget(branch[i].bh);
+       for (i = 0; i < indirect_blks; i++)
+               ext2_free_blocks(inode, new_blocks[i], 1);
+       ext2_free_blocks(inode, new_blocks[i], num);
+       return err;
 }
 
 /**
index 2de655f..5d46c09 100644 (file)
@@ -19,7 +19,7 @@
 
 long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ext2_inode_info *ei = EXT2_I(inode);
        unsigned int flags;
        unsigned short rsv_window_size;
index fa04d02..7f68c81 100644 (file)
@@ -1500,7 +1500,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
                        bh = sb_bread(sb, tmp_bh.b_blocknr);
                else
                        bh = sb_getblk(sb, tmp_bh.b_blocknr);
-               if (!bh) {
+               if (unlikely(!bh)) {
                        err = -EIO;
                        goto out;
                }
index b6754db..2d7557d 100644 (file)
@@ -662,10 +662,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                        ea_idebug(inode, "creating block %d", block);
 
                        new_bh = sb_getblk(sb, block);
-                       if (!new_bh) {
+                       if (unlikely(!new_bh)) {
                                ext2_free_blocks(inode, block, 1);
                                mark_inode_dirty(inode);
-                               error = -EIO;
+                               error = -ENOMEM;
                                goto cleanup;
                        }
                        lock_buffer(new_bh);
index dd91264..87eccbb 100644 (file)
@@ -99,7 +99,7 @@ static int ext3_readdir(struct file * filp,
        int i, stored;
        struct ext3_dir_entry_2 *de;
        int err;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        int ret = 0;
        int dir_has_error = 0;
@@ -114,7 +114,7 @@ static int ext3_readdir(struct file * filp,
                 * We don't set the inode dirty flag since it's not
                 * critical that it get flushed back to the disk.
                 */
-               EXT3_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL;
+               EXT3_I(file_inode(filp))->i_flags &= ~EXT3_INDEX_FL;
        }
        stored = 0;
        offset = filp->f_pos & (sb->s_blocksize - 1);
@@ -457,7 +457,7 @@ static int call_filldir(struct file * filp, void * dirent,
 {
        struct dir_private_info *info = filp->private_data;
        loff_t  curr_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block * sb;
        int error;
 
@@ -487,7 +487,7 @@ static int ext3_dx_readdir(struct file * filp,
                         void * dirent, filldir_t filldir)
 {
        struct dir_private_info *info = filp->private_data;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct fname *fname;
        int     ret;
 
index b176d42..d512c4b 100644 (file)
@@ -676,6 +676,10 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
                 * parent to disk.
                 */
                bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
+                       goto failed;
+               }
                branch[n].bh = bh;
                lock_buffer(bh);
                BUFFER_TRACE(bh, "call get_create_access");
@@ -717,7 +721,7 @@ failed:
                BUFFER_TRACE(branch[i].bh, "call journal_forget");
                ext3_journal_forget(handle, branch[i].bh);
        }
-       for (i = 0; i <indirect_blks; i++)
+       for (i = 0; i < indirect_blks; i++)
                ext3_free_blocks(handle, inode, new_blocks[i], 1);
 
        ext3_free_blocks(handle, inode, new_blocks[i], num);
@@ -1078,8 +1082,8 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
        if (!err && buffer_mapped(&dummy)) {
                struct buffer_head *bh;
                bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-               if (!bh) {
-                       *errp = -EIO;
+               if (unlikely(!bh)) {
+                       *errp = -ENOMEM;
                        goto err;
                }
                if (buffer_new(&dummy)) {
@@ -2729,12 +2733,12 @@ static int __ext3_get_inode_loc(struct inode *inode,
                return -EIO;
 
        bh = sb_getblk(inode->i_sb, block);
-       if (!bh) {
+       if (unlikely(!bh)) {
                ext3_error (inode->i_sb, "ext3_get_inode_loc",
                                "unable to read inode block - "
                                "inode=%lu, block="E3FSBLK,
                                 inode->i_ino, block);
-               return -EIO;
+               return -ENOMEM;
        }
        if (!buffer_uptodate(bh)) {
                lock_buffer(bh);
@@ -2783,7 +2787,7 @@ static int __ext3_get_inode_loc(struct inode *inode,
 
                        bitmap_bh = sb_getblk(inode->i_sb,
                                        le32_to_cpu(desc->bg_inode_bitmap));
-                       if (!bitmap_bh)
+                       if (unlikely(!bitmap_bh))
                                goto make_io;
 
                        /*
index 677a5c2..4d96e9a 100644 (file)
@@ -14,7 +14,7 @@
 
 long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ext3_inode_info *ei = EXT3_I(inode);
        unsigned int flags;
        unsigned short rsv_window_size;
index 890b894..692de13 100644 (file)
@@ -36,7 +36,6 @@
 #define NAMEI_RA_CHUNKS  2
 #define NAMEI_RA_BLOCKS  4
 #define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
 
 static struct buffer_head *ext3_append(handle_t *handle,
                                        struct inode *inode,
@@ -624,7 +623,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
 
        dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
                       start_minor_hash));
-       dir = dir_file->f_path.dentry->d_inode;
+       dir = file_inode(dir_file);
        if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
                hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
                if (hinfo.hash_version <= DX_HASH_TEA)
@@ -638,7 +637,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
        }
        hinfo.hash = start_hash;
        hinfo.minor_hash = 0;
-       frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err);
+       frame = dx_probe(NULL, file_inode(dir_file), &hinfo, frames, &err);
        if (!frame)
                return err;
 
index 0f814f3..2710565 100644 (file)
@@ -116,8 +116,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
        int err;
 
        bh = sb_getblk(sb, blk);
-       if (!bh)
-               return ERR_PTR(-EIO);
+       if (unlikely(!bh))
+               return ERR_PTR(-ENOMEM);
        if ((err = ext3_journal_get_write_access(handle, bh))) {
                brelse(bh);
                bh = ERR_PTR(err);
@@ -234,8 +234,8 @@ static int setup_new_group_blocks(struct super_block *sb,
                        goto exit_bh;
 
                gdb = sb_getblk(sb, block);
-               if (!gdb) {
-                       err = -EIO;
+               if (unlikely(!gdb)) {
+                       err = -ENOMEM;
                        goto exit_bh;
                }
                if ((err = ext3_journal_get_write_access(handle, gdb))) {
@@ -722,8 +722,8 @@ static void update_backups(struct super_block *sb,
                        break;
 
                bh = sb_getblk(sb, group * bpg + blk_off);
-               if (!bh) {
-                       err = -EIO;
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
                        break;
                }
                ext3_debug("update metadata backup %#04lx\n",
index 4ba2683..5546ca2 100644 (file)
@@ -916,21 +916,24 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
                        "Not enough memory for storing quotafile name");
                return 0;
        }
-       if (sbi->s_qf_names[qtype] &&
-               strcmp(sbi->s_qf_names[qtype], qname)) {
-               ext3_msg(sb, KERN_ERR,
-                       "%s quota file already specified", QTYPE2NAME(qtype));
+       if (sbi->s_qf_names[qtype]) {
+               int same = !strcmp(sbi->s_qf_names[qtype], qname);
+
                kfree(qname);
-               return 0;
+               if (!same) {
+                       ext3_msg(sb, KERN_ERR,
+                                "%s quota file already specified",
+                                QTYPE2NAME(qtype));
+               }
+               return same;
        }
-       sbi->s_qf_names[qtype] = qname;
-       if (strchr(sbi->s_qf_names[qtype], '/')) {
+       if (strchr(qname, '/')) {
                ext3_msg(sb, KERN_ERR,
                        "quotafile must be on filesystem root");
-               kfree(sbi->s_qf_names[qtype]);
-               sbi->s_qf_names[qtype] = NULL;
+               kfree(qname);
                return 0;
        }
+       sbi->s_qf_names[qtype] = qname;
        set_opt(sbi->s_mount_opt, QUOTA);
        return 1;
 }
@@ -945,11 +948,10 @@ static int clear_qf_name(struct super_block *sb, int qtype) {
                        " when quota turned on");
                return 0;
        }
-       /*
-        * The space will be released later when all options are confirmed
-        * to be correct
-        */
-       sbi->s_qf_names[qtype] = NULL;
+       if (sbi->s_qf_names[qtype]) {
+               kfree(sbi->s_qf_names[qtype]);
+               sbi->s_qf_names[qtype] = NULL;
+       }
        return 1;
 }
 #endif
@@ -2606,7 +2608,18 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
 #ifdef CONFIG_QUOTA
        old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
        for (i = 0; i < MAXQUOTAS; i++)
-               old_opts.s_qf_names[i] = sbi->s_qf_names[i];
+               if (sbi->s_qf_names[i]) {
+                       old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+                                                        GFP_KERNEL);
+                       if (!old_opts.s_qf_names[i]) {
+                               int j;
+
+                               for (j = 0; j < i; j++)
+                                       kfree(old_opts.s_qf_names[j]);
+                               return -ENOMEM;
+                       }
+               } else
+                       old_opts.s_qf_names[i] = NULL;
 #endif
 
        /*
@@ -2699,9 +2712,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
 #ifdef CONFIG_QUOTA
        /* Release old quota file names */
        for (i = 0; i < MAXQUOTAS; i++)
-               if (old_opts.s_qf_names[i] &&
-                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
-                       kfree(old_opts.s_qf_names[i]);
+               kfree(old_opts.s_qf_names[i]);
 #endif
        if (enable_quota)
                dquot_resume(sb, -1);
@@ -2715,9 +2726,7 @@ restore_opts:
 #ifdef CONFIG_QUOTA
        sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
        for (i = 0; i < MAXQUOTAS; i++) {
-               if (sbi->s_qf_names[i] &&
-                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
-                       kfree(sbi->s_qf_names[i]);
+               kfree(sbi->s_qf_names[i]);
                sbi->s_qf_names[i] = old_opts.s_qf_names[i];
        }
 #endif
index d22ebb7..b1fc963 100644 (file)
@@ -813,10 +813,10 @@ inserted:
                        ea_idebug(inode, "creating block %d", block);
 
                        new_bh = sb_getblk(sb, block);
-                       if (!new_bh) {
+                       if (unlikely(!new_bh)) {
 getblk_failed:
                                ext3_free_blocks(handle, inode, block, 1);
-                               error = -EIO;
+                               error = -ENOMEM;
                                goto cleanup;
                        }
                        lock_buffer(new_bh);
index e6e0d98..39a54a0 100644 (file)
@@ -324,8 +324,8 @@ ext4_acl_chmod(struct inode *inode)
        if (error)
                return error;
 retry:
-       handle = ext4_journal_start(inode,
-                       EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+       handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+                                   ext4_jbd2_credits_xattr(inode));
        if (IS_ERR(handle)) {
                error = PTR_ERR(handle);
                ext4_std_error(inode->i_sb, error);
@@ -422,7 +422,8 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
                acl = NULL;
 
 retry:
-       handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+       handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+                                   ext4_jbd2_credits_xattr(inode));
        if (IS_ERR(handle)) {
                error = PTR_ERR(handle);
                goto release_and_out;
index cf18217..92e68b3 100644 (file)
@@ -358,7 +358,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
 }
 
 /**
- * ext4_read_block_bitmap()
+ * ext4_read_block_bitmap_nowait()
  * @sb:                        super block
  * @block_group:       given block group
  *
@@ -457,6 +457,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
        struct buffer_head *bh;
 
        bh = ext4_read_block_bitmap_nowait(sb, block_group);
+       if (!bh)
+               return NULL;
        if (ext4_wait_block_bitmap(sb, block_group, bh)) {
                put_bh(bh);
                return NULL;
@@ -482,11 +484,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
 
        free_clusters  = percpu_counter_read_positive(fcc);
        dirty_clusters = percpu_counter_read_positive(dcc);
-       root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
+
+       /*
+        * r_blocks_count should always be multiple of the cluster ratio so
+        * we are safe to do a plane bit shift only.
+        */
+       root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
 
        if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
                                        EXT4_FREECLUSTERS_WATERMARK) {
-               free_clusters  = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
+               free_clusters  = percpu_counter_sum_positive(fcc);
                dirty_clusters = percpu_counter_sum_positive(dcc);
        }
        /* Check whether we have space after accounting for current
@@ -628,7 +635,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
        brelse(bitmap_bh);
        printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
               ", computed = %llu, %llu\n",
-              EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+              EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
               desc_count, bitmap_count);
        return bitmap_count;
 #else
index 80a28b2..d8cd1f0 100644 (file)
@@ -110,7 +110,7 @@ static int ext4_readdir(struct file *filp,
        int i, stored;
        struct ext4_dir_entry_2 *de;
        int err;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        int ret = 0;
        int dir_has_error = 0;
@@ -133,7 +133,7 @@ static int ext4_readdir(struct file *filp,
                 * We don't set the inode dirty flag since it's not
                 * critical that it get flushed back to the disk.
                 */
-               ext4_clear_inode_flag(filp->f_path.dentry->d_inode,
+               ext4_clear_inode_flag(file_inode(filp),
                                      EXT4_INODE_INDEX);
        }
        stored = 0;
@@ -185,6 +185,7 @@ static int ext4_readdir(struct file *filp,
                                        "at offset %llu",
                                        (unsigned long long)filp->f_pos);
                        filp->f_pos += sb->s_blocksize - offset;
+                       brelse(bh);
                        continue;
                }
                set_buffer_verified(bh);
@@ -333,7 +334,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
  *
  * For non-htree, ext4_llseek already chooses the proper max offset.
  */
-loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
+static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
 {
        struct inode *inode = file->f_mapping->host;
        int dx_dir = is_dx_dir(inode);
@@ -494,7 +495,7 @@ static int call_filldir(struct file *filp, void *dirent,
 {
        struct dir_private_info *info = filp->private_data;
        loff_t  curr_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb;
        int error;
 
@@ -526,7 +527,7 @@ static int ext4_dx_readdir(struct file *filp,
                         void *dirent, filldir_t filldir)
 {
        struct dir_private_info *info = filp->private_data;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct fname *fname;
        int     ret;
 
index 8462eb3..4a01ba3 100644 (file)
@@ -194,8 +194,7 @@ struct mpage_da_data {
  */
 #define        EXT4_IO_END_UNWRITTEN   0x0001
 #define EXT4_IO_END_ERROR      0x0002
-#define EXT4_IO_END_QUEUED     0x0004
-#define EXT4_IO_END_DIRECT     0x0008
+#define EXT4_IO_END_DIRECT     0x0004
 
 struct ext4_io_page {
        struct page     *p_page;
@@ -215,10 +214,8 @@ typedef struct ext4_io_end {
        struct list_head        list;           /* per-file finished IO list */
        struct inode            *inode;         /* file being written to */
        unsigned int            flag;           /* unwritten or not */
-       struct page             *page;          /* for writepage() path */
        loff_t                  offset;         /* offset in the file */
        ssize_t                 size;           /* size of the extent */
-       struct work_struct      work;           /* data work queue */
        struct kiocb            *iocb;          /* iocb struct for AIO */
        int                     result;         /* error value for AIO */
        int                     num_io_pages;   /* for writepages() */
@@ -582,6 +579,8 @@ enum {
 #define EXT4_GET_BLOCKS_KEEP_SIZE              0x0080
        /* Do not take i_data_sem locking in ext4_map_blocks */
 #define EXT4_GET_BLOCKS_NO_LOCK                        0x0100
+       /* Do not put hole in extent cache */
+#define EXT4_GET_BLOCKS_NO_PUT_HOLE            0x0200
 
 /*
  * Flags used by ext4_free_blocks
@@ -810,17 +809,6 @@ do {                                                                              \
 
 #endif /* defined(__KERNEL__) || defined(__linux__) */
 
-/*
- * storage for cached extent
- * If ec_len == 0, then the cache is invalid.
- * If ec_start == 0, then the cache represents a gap (null mapping)
- */
-struct ext4_ext_cache {
-       ext4_fsblk_t    ec_start;
-       ext4_lblk_t     ec_block;
-       __u32           ec_len; /* must be 32bit to return holes */
-};
-
 #include "extents_status.h"
 
 /*
@@ -887,7 +875,6 @@ struct ext4_inode_info {
        struct inode vfs_inode;
        struct jbd2_inode *jinode;
 
-       struct ext4_ext_cache i_cached_extent;
        /*
         * File creation time. Its function is same as that of
         * struct timespec i_{a,c,m}time in the generic inode.
@@ -901,6 +888,8 @@ struct ext4_inode_info {
        /* extents status tree */
        struct ext4_es_tree i_es_tree;
        rwlock_t i_es_lock;
+       struct list_head i_es_lru;
+       unsigned int i_es_lru_nr;       /* protected by i_es_lock */
 
        /* ialloc */
        ext4_group_t    i_last_alloc_group;
@@ -930,6 +919,7 @@ struct ext4_inode_info {
        spinlock_t i_completed_io_lock;
        atomic_t i_ioend_count; /* Number of outstanding io_end structs */
        atomic_t i_unwritten; /* Nr. of inflight conversions pending */
+       struct work_struct i_unwritten_work;    /* deferred extent conversion */
 
        spinlock_t i_block_reservation_lock;
 
@@ -985,7 +975,6 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_DIOREAD_NOLOCK      0x400000 /* Enable support for dio read nolocking */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM    0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT        0x1000000 /* Journal Async Commit */
-#define EXT4_MOUNT_MBLK_IO_SUBMIT      0x4000000 /* multi-block io submits */
 #define EXT4_MOUNT_DELALLOC            0x8000000 /* Delalloc support */
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
@@ -1316,6 +1305,12 @@ struct ext4_sb_info {
 
        /* Precomputed FS UUID checksum for seeding other checksums */
        __u32 s_csum_seed;
+
+       /* Reclaim extents from extent status tree */
+       struct shrinker s_es_shrinker;
+       struct list_head s_es_lru;
+       struct percpu_counter s_extent_cache_cnt;
+       spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2007,9 +2002,20 @@ extern int ext4fs_dirhash(const char *name, int len, struct
                          dx_hash_info *hinfo);
 
 /* ialloc.c */
-extern struct inode *ext4_new_inode(handle_t *, struct inode *, umode_t,
-                                   const struct qstr *qstr, __u32 goal,
-                                   uid_t *owner);
+extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
+                                     const struct qstr *qstr, __u32 goal,
+                                     uid_t *owner, int handle_type,
+                                     unsigned int line_no, int nblocks);
+
+#define ext4_new_inode(handle, dir, mode, qstr, goal, owner) \
+       __ext4_new_inode((handle), (dir), (mode), (qstr), (goal), (owner), \
+                        0, 0, 0)
+#define ext4_new_inode_start_handle(dir, mode, qstr, goal, owner, \
+                                   type, nblocks)                  \
+       __ext4_new_inode(NULL, (dir), (mode), (qstr), (goal), (owner), \
+                        (type), __LINE__, (nblocks))
+
+
 extern void ext4_free_inode(handle_t *, struct inode *);
 extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
 extern unsigned long ext4_count_free_inodes(struct super_block *);
@@ -2103,6 +2109,7 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
 extern void ext4_ind_truncate(struct inode *inode);
+extern int ext4_ind_punch_hole(struct file *file, loff_t offset, loff_t length);
 
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@@ -2151,6 +2158,8 @@ extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
 extern int ext4_alloc_flex_bg_array(struct super_block *sb,
                                    ext4_group_t ngroup);
+extern const char *ext4_decode_error(struct super_block *sb, int errno,
+                                    char nbuf[16]);
 extern __printf(4, 5)
 void __ext4_error(struct super_block *, const char *, unsigned int,
                  const char *, ...);
@@ -2227,6 +2236,8 @@ extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
                                       struct ext4_group_desc *gdp);
 extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
                                     struct ext4_group_desc *gdp);
+extern int ext4_register_li_request(struct super_block *sb,
+                                   ext4_group_t first_not_zeroed);
 
 static inline int ext4_has_group_desc_csum(struct super_block *sb)
 {
@@ -2454,6 +2465,75 @@ extern const struct file_operations ext4_file_operations;
 extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
 extern void ext4_unwritten_wait(struct inode *inode);
 
+/* inline.c */
+extern int ext4_has_inline_data(struct inode *inode);
+extern int ext4_get_inline_size(struct inode *inode);
+extern int ext4_get_max_inline_size(struct inode *inode);
+extern int ext4_find_inline_data_nolock(struct inode *inode);
+extern void ext4_write_inline_data(struct inode *inode,
+                                  struct ext4_iloc *iloc,
+                                  void *buffer, loff_t pos,
+                                  unsigned int len);
+extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+                                   unsigned int len);
+extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
+                                unsigned int len);
+extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+
+extern int ext4_readpage_inline(struct inode *inode, struct page *page);
+extern int ext4_try_to_write_inline_data(struct address_space *mapping,
+                                        struct inode *inode,
+                                        loff_t pos, unsigned len,
+                                        unsigned flags,
+                                        struct page **pagep);
+extern int ext4_write_inline_data_end(struct inode *inode,
+                                     loff_t pos, unsigned len,
+                                     unsigned copied,
+                                     struct page *page);
+extern struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+                                 unsigned len,
+                                 struct page *page);
+extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
+                                          struct inode *inode,
+                                          loff_t pos, unsigned len,
+                                          unsigned flags,
+                                          struct page **pagep,
+                                          void **fsdata);
+extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+                                        unsigned len, unsigned copied,
+                                        struct page *page);
+extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+                                    struct inode *inode);
+extern int ext4_try_create_inline_dir(handle_t *handle,
+                                     struct inode *parent,
+                                     struct inode *inode);
+extern int ext4_read_inline_dir(struct file *filp,
+                               void *dirent, filldir_t filldir,
+                               int *has_inline_data);
+extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+                                       const struct qstr *d_name,
+                                       struct ext4_dir_entry_2 **res_dir,
+                                       int *has_inline_data);
+extern int ext4_delete_inline_entry(handle_t *handle,
+                                   struct inode *dir,
+                                   struct ext4_dir_entry_2 *de_del,
+                                   struct buffer_head *bh,
+                                   int *has_inline_data);
+extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
+extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+                                       struct ext4_dir_entry_2 **parent_de,
+                                       int *retval);
+extern int ext4_inline_data_fiemap(struct inode *inode,
+                                  struct fiemap_extent_info *fieinfo,
+                                  int *has_inline);
+extern int ext4_try_to_evict_inline_data(handle_t *handle,
+                                        struct inode *inode,
+                                        int needed);
+extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
+
+extern int ext4_convert_inline_data(struct inode *inode);
+
 /* namei.c */
 extern const struct inode_operations ext4_dir_inode_operations;
 extern const struct inode_operations ext4_special_inode_operations;
@@ -2520,6 +2600,9 @@ extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
                                                  struct ext4_ext_path *);
 extern void ext4_ext_drop_refs(struct ext4_ext_path *);
 extern int ext4_ext_check_inode(struct inode *inode);
+extern int ext4_find_delalloc_range(struct inode *inode,
+                                   ext4_lblk_t lblk_start,
+                                   ext4_lblk_t lblk_end);
 extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        __u64 start, __u64 len);
@@ -2537,6 +2620,7 @@ extern void ext4_exit_pageio(void);
 extern void ext4_ioend_wait(struct inode *);
 extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
+extern void ext4_end_io_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
 extern int ext4_bio_write_page(struct ext4_io_submit *io,
                               struct page *page,
index 487fda1..8643ff5 100644 (file)
@@ -193,12 +193,6 @@ static inline unsigned short ext_depth(struct inode *inode)
        return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
 }
 
-static inline void
-ext4_ext_invalidate_cache(struct inode *inode)
-{
-       EXT4_I(inode)->i_cached_extent.ec_len = 0;
-}
-
 static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
 {
        /* We can not have an uninitialized extent of zero length! */
index b4323ba..7058975 100644 (file)
@@ -6,6 +6,108 @@
 
 #include <trace/events/ext4.h>
 
+/* Just increment the non-pointer handle value */
+static handle_t *ext4_get_nojournal(void)
+{
+       handle_t *handle = current->journal_info;
+       unsigned long ref_cnt = (unsigned long)handle;
+
+       BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
+
+       ref_cnt++;
+       handle = (handle_t *)ref_cnt;
+
+       current->journal_info = handle;
+       return handle;
+}
+
+
+/* Decrement the non-pointer handle value */
+static void ext4_put_nojournal(handle_t *handle)
+{
+       unsigned long ref_cnt = (unsigned long)handle;
+
+       BUG_ON(ref_cnt == 0);
+
+       ref_cnt--;
+       handle = (handle_t *)ref_cnt;
+
+       current->journal_info = handle;
+}
+
+/*
+ * Wrappers for jbd2_journal_start/end.
+ */
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+                                 int type, int nblocks)
+{
+       journal_t *journal;
+
+       trace_ext4_journal_start(sb, nblocks, _RET_IP_);
+       if (sb->s_flags & MS_RDONLY)
+               return ERR_PTR(-EROFS);
+
+       WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
+       journal = EXT4_SB(sb)->s_journal;
+       if (!journal)
+               return ext4_get_nojournal();
+       /*
+        * Special case here: if the journal has aborted behind our
+        * backs (eg. EIO in the commit thread), then we still need to
+        * take the FS itself readonly cleanly.
+        */
+       if (is_journal_aborted(journal)) {
+               ext4_abort(sb, "Detected aborted journal");
+               return ERR_PTR(-EROFS);
+       }
+       return jbd2__journal_start(journal, nblocks, GFP_NOFS, type, line);
+}
+
+int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+{
+       struct super_block *sb;
+       int err;
+       int rc;
+
+       if (!ext4_handle_valid(handle)) {
+               ext4_put_nojournal(handle);
+               return 0;
+       }
+       sb = handle->h_transaction->t_journal->j_private;
+       err = handle->h_err;
+       rc = jbd2_journal_stop(handle);
+
+       if (!err)
+               err = rc;
+       if (err)
+               __ext4_std_error(sb, where, line, err);
+       return err;
+}
+
+void ext4_journal_abort_handle(const char *caller, unsigned int line,
+                              const char *err_fn, struct buffer_head *bh,
+                              handle_t *handle, int err)
+{
+       char nbuf[16];
+       const char *errstr = ext4_decode_error(NULL, err, nbuf);
+
+       BUG_ON(!ext4_handle_valid(handle));
+
+       if (bh)
+               BUFFER_TRACE(bh, "abort");
+
+       if (!handle->h_err)
+               handle->h_err = err;
+
+       if (is_handle_aborted(handle))
+               return;
+
+       printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
+              caller, line, errstr, err_fn);
+
+       jbd2_journal_abort_handle(handle);
+}
+
 int __ext4_journal_get_write_access(const char *where, unsigned int line,
                                    handle_t *handle, struct buffer_head *bh)
 {
index 7177f9b..4c216b1 100644 (file)
 #define EXT4_META_TRANS_BLOCKS(sb)     (EXT4_XATTR_TRANS_BLOCKS + \
                                        EXT4_MAXQUOTAS_TRANS_BLOCKS(sb))
 
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT4_DELETE_TRANS_BLOCKS(sb)   (2 * EXT4_DATA_TRANS_BLOCKS(sb) + 64)
-
 /* Define an arbitrary limit for the amount of data we will anticipate
  * writing to any given transaction.  For unbounded transactions such as
  * write(2) and truncate(2) we can write more than this, but we always
 #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
 #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
 
+static inline int ext4_jbd2_credits_xattr(struct inode *inode)
+{
+       int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
+
+       /*
+        * In case of inline data, we may push out the data to a block,
+        * so we need to reserve credits for this eventuality
+        */
+       if (ext4_has_inline_data(inode))
+               credits += ext4_writepage_trans_blocks(inode) + 1;
+       return credits;
+}
+
+
+/*
+ * Ext4 handle operation types -- for logging purposes
+ */
+#define EXT4_HT_MISC             0
+#define EXT4_HT_INODE            1
+#define EXT4_HT_WRITE_PAGE       2
+#define EXT4_HT_MAP_BLOCKS       3
+#define EXT4_HT_DIR              4
+#define EXT4_HT_TRUNCATE         5
+#define EXT4_HT_QUOTA            6
+#define EXT4_HT_RESIZE           7
+#define EXT4_HT_MIGRATE          8
+#define EXT4_HT_MOVE_EXTENTS     9
+#define EXT4_HT_XATTR           10
+#define EXT4_HT_MAX             11
+
 /**
  *   struct ext4_journal_cb_entry - Base structure for callback information.
  *
@@ -234,7 +258,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_super(handle, sb) \
        __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
-handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
+handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
+                                 int type, int nblocks);
 int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
 
 #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -268,9 +293,17 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
        return 1;
 }
 
-static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
+#define ext4_journal_start_sb(sb, type, nblocks)                       \
+       __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks))
+
+#define ext4_journal_start(inode, type, nblocks)                       \
+       __ext4_journal_start((inode), __LINE__, (type), (nblocks))
+
+static inline handle_t *__ext4_journal_start(struct inode *inode,
+                                            unsigned int line, int type,
+                                            int nblocks)
 {
-       return ext4_journal_start_sb(inode->i_sb, nblocks);
+       return __ext4_journal_start_sb(inode->i_sb, line, type, nblocks);
 }
 
 #define ext4_journal_stop(handle) \
index 5ae1674..28dd8ee 100644 (file)
@@ -112,7 +112,7 @@ static int ext4_split_extent_at(handle_t *handle,
                             int flags);
 
 static int ext4_find_delayed_extent(struct inode *inode,
-                                   struct ext4_ext_cache *newex);
+                                   struct extent_status *newes);
 
 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                            struct inode *inode,
@@ -714,7 +714,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
        eh->eh_magic = EXT4_EXT_MAGIC;
        eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
        ext4_mark_inode_dirty(handle, inode);
-       ext4_ext_invalidate_cache(inode);
        return 0;
 }
 
@@ -725,6 +724,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
        struct ext4_extent_header *eh;
        struct buffer_head *bh;
        short int depth, i, ppos = 0, alloc = 0;
+       int ret;
 
        eh = ext_inode_hdr(inode);
        depth = ext_depth(inode);
@@ -752,12 +752,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                path[ppos].p_ext = NULL;
 
                bh = sb_getblk(inode->i_sb, path[ppos].p_block);
-               if (unlikely(!bh))
+               if (unlikely(!bh)) {
+                       ret = -ENOMEM;
                        goto err;
+               }
                if (!bh_uptodate_or_lock(bh)) {
                        trace_ext4_ext_load_extent(inode, block,
                                                path[ppos].p_block);
-                       if (bh_submit_read(bh) < 0) {
+                       ret = bh_submit_read(bh);
+                       if (ret < 0) {
                                put_bh(bh);
                                goto err;
                        }
@@ -768,13 +771,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                        put_bh(bh);
                        EXT4_ERROR_INODE(inode,
                                         "ppos %d > depth %d", ppos, depth);
+                       ret = -EIO;
                        goto err;
                }
                path[ppos].p_bh = bh;
                path[ppos].p_hdr = eh;
                i--;
 
-               if (ext4_ext_check_block(inode, eh, i, bh))
+               ret = ext4_ext_check_block(inode, eh, i, bh);
+               if (ret < 0)
                        goto err;
        }
 
@@ -796,7 +801,7 @@ err:
        ext4_ext_drop_refs(path);
        if (alloc)
                kfree(path);
-       return ERR_PTR(-EIO);
+       return ERR_PTR(ret);
 }
 
 /*
@@ -950,8 +955,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                goto cleanup;
        }
        bh = sb_getblk(inode->i_sb, newblock);
-       if (!bh) {
-               err = -EIO;
+       if (unlikely(!bh)) {
+               err = -ENOMEM;
                goto cleanup;
        }
        lock_buffer(bh);
@@ -1023,8 +1028,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                oldblock = newblock;
                newblock = ablocks[--a];
                bh = sb_getblk(inode->i_sb, newblock);
-               if (!bh) {
-                       err = -EIO;
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
                        goto cleanup;
                }
                lock_buffer(bh);
@@ -1136,11 +1141,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                return err;
 
        bh = sb_getblk(inode->i_sb, newblock);
-       if (!bh) {
-               err = -EIO;
-               ext4_std_error(inode->i_sb, err);
-               return err;
-       }
+       if (unlikely(!bh))
+               return -ENOMEM;
        lock_buffer(bh);
 
        err = ext4_journal_get_create_access(handle, bh);
@@ -1960,7 +1962,6 @@ cleanup:
                ext4_ext_drop_refs(npath);
                kfree(npath);
        }
-       ext4_ext_invalidate_cache(inode);
        return err;
 }
 
@@ -1969,8 +1970,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                                    struct fiemap_extent_info *fieinfo)
 {
        struct ext4_ext_path *path = NULL;
-       struct ext4_ext_cache newex;
        struct ext4_extent *ex;
+       struct extent_status es;
        ext4_lblk_t next, next_del, start = 0, end = 0;
        ext4_lblk_t last = block + num;
        int exists, depth = 0, err = 0;
@@ -2044,37 +2045,47 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                BUG_ON(end <= start);
 
                if (!exists) {
-                       newex.ec_block = start;
-                       newex.ec_len = end - start;
-                       newex.ec_start = 0;
+                       es.es_lblk = start;
+                       es.es_len = end - start;
+                       es.es_pblk = 0;
                } else {
-                       newex.ec_block = le32_to_cpu(ex->ee_block);
-                       newex.ec_len = ext4_ext_get_actual_len(ex);
-                       newex.ec_start = ext4_ext_pblock(ex);
+                       es.es_lblk = le32_to_cpu(ex->ee_block);
+                       es.es_len = ext4_ext_get_actual_len(ex);
+                       es.es_pblk = ext4_ext_pblock(ex);
                        if (ext4_ext_is_uninitialized(ex))
                                flags |= FIEMAP_EXTENT_UNWRITTEN;
                }
 
                /*
-                * Find delayed extent and update newex accordingly. We call
-                * it even in !exists case to find out whether newex is the
+                * Find delayed extent and update es accordingly. We call
+                * it even in !exists case to find out whether es is the
                 * last existing extent or not.
                 */
-               next_del = ext4_find_delayed_extent(inode, &newex);
+               next_del = ext4_find_delayed_extent(inode, &es);
                if (!exists && next_del) {
                        exists = 1;
                        flags |= FIEMAP_EXTENT_DELALLOC;
                }
                up_read(&EXT4_I(inode)->i_data_sem);
 
-               if (unlikely(newex.ec_len == 0)) {
-                       EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
+               if (unlikely(es.es_len == 0)) {
+                       EXT4_ERROR_INODE(inode, "es.es_len == 0");
                        err = -EIO;
                        break;
                }
 
-               /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
-               if (next == next_del) {
+               /*
+                * This is possible iff next == next_del == EXT_MAX_BLOCKS.
+                * we need to check next == EXT_MAX_BLOCKS because it is
+                * possible that an extent is with unwritten and delayed
+                * status due to when an extent is delayed allocated and
+                * is allocated by fallocate status tree will track both of
+                * them in a extent.
+                *
+                * So we could return a unwritten and delayed extent, and
+                * its block is equal to 'next'.
+                */
+               if (next == next_del && next == EXT_MAX_BLOCKS) {
                        flags |= FIEMAP_EXTENT_LAST;
                        if (unlikely(next_del != EXT_MAX_BLOCKS ||
                                     next != EXT_MAX_BLOCKS)) {
@@ -2089,9 +2100,9 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
 
                if (exists) {
                        err = fiemap_fill_next_extent(fieinfo,
-                               (__u64)newex.ec_block << blksize_bits,
-                               (__u64)newex.ec_start << blksize_bits,
-                               (__u64)newex.ec_len << blksize_bits,
+                               (__u64)es.es_lblk << blksize_bits,
+                               (__u64)es.es_pblk << blksize_bits,
+                               (__u64)es.es_len << blksize_bits,
                                flags);
                        if (err < 0)
                                break;
@@ -2101,7 +2112,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                        }
                }
 
-               block = newex.ec_block + newex.ec_len;
+               block = es.es_lblk + es.es_len;
        }
 
        if (path) {
@@ -2112,21 +2123,6 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
        return err;
 }
 
-static void
-ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
-                       __u32 len, ext4_fsblk_t start)
-{
-       struct ext4_ext_cache *cex;
-       BUG_ON(len == 0);
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-       trace_ext4_ext_put_in_cache(inode, block, len, start);
-       cex = &EXT4_I(inode)->i_cached_extent;
-       cex->ec_block = block;
-       cex->ec_len = len;
-       cex->ec_start = start;
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-}
-
 /*
  * ext4_ext_put_gap_in_cache:
  * calculate boundaries of the gap that the requested block fits into
@@ -2143,9 +2139,10 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
 
        ex = path[depth].p_ext;
        if (ex == NULL) {
-               /* there is no extent yet, so gap is [0;-] */
-               lblock = 0;
-               len = EXT_MAX_BLOCKS;
+               /*
+                * there is no extent yet, so gap is [0;-] and we
+                * don't cache it
+                */
                ext_debug("cache gap(whole file):");
        } else if (block < le32_to_cpu(ex->ee_block)) {
                lblock = block;
@@ -2154,6 +2151,9 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
                                block,
                                le32_to_cpu(ex->ee_block),
                                 ext4_ext_get_actual_len(ex));
+               if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+                       ext4_es_insert_extent(inode, lblock, len, ~0,
+                                             EXTENT_STATUS_HOLE);
        } else if (block >= le32_to_cpu(ex->ee_block)
                        + ext4_ext_get_actual_len(ex)) {
                ext4_lblk_t next;
@@ -2167,58 +2167,15 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
                                block);
                BUG_ON(next == lblock);
                len = next - lblock;
+               if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+                       ext4_es_insert_extent(inode, lblock, len, ~0,
+                                             EXTENT_STATUS_HOLE);
        } else {
                lblock = len = 0;
                BUG();
        }
 
        ext_debug(" -> %u:%lu\n", lblock, len);
-       ext4_ext_put_in_cache(inode, lblock, len, 0);
-}
-
-/*
- * ext4_ext_in_cache()
- * Checks to see if the given block is in the cache.
- * If it is, the cached extent is stored in the given
- * cache extent pointer.
- *
- * @inode: The files inode
- * @block: The block to look for in the cache
- * @ex:    Pointer where the cached extent will be stored
- *         if it contains block
- *
- * Return 0 if cache is invalid; 1 if the cache is valid
- */
-static int
-ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
-                 struct ext4_extent *ex)
-{
-       struct ext4_ext_cache *cex;
-       int ret = 0;
-
-       /*
-        * We borrow i_block_reservation_lock to protect i_cached_extent
-        */
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-       cex = &EXT4_I(inode)->i_cached_extent;
-
-       /* has cache valid data? */
-       if (cex->ec_len == 0)
-               goto errout;
-
-       if (in_range(block, cex->ec_block, cex->ec_len)) {
-               ex->ee_block = cpu_to_le32(cex->ec_block);
-               ext4_ext_store_pblock(ex, cex->ec_start);
-               ex->ee_len = cpu_to_le16(cex->ec_len);
-               ext_debug("%u cached by %u:%u:%llu\n",
-                               block,
-                               cex->ec_block, cex->ec_len, cex->ec_start);
-               ret = 1;
-       }
-errout:
-       trace_ext4_ext_in_cache(inode, block, ret);
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-       return ret;
 }
 
 /*
@@ -2653,13 +2610,11 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
        ext_debug("truncate since %u to %u\n", start, end);
 
        /* probably first extent we're gonna free will be last in block */
-       handle = ext4_journal_start(inode, depth + 1);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
 again:
-       ext4_ext_invalidate_cache(inode);
-
        trace_ext4_ext_remove_space(inode, start, depth);
 
        /*
@@ -3519,19 +3474,19 @@ out:
  *
  * Return 1 if there is a delalloc block in the range, otherwise 0.
  */
-static int ext4_find_delalloc_range(struct inode *inode,
-                                   ext4_lblk_t lblk_start,
-                                   ext4_lblk_t lblk_end)
+int ext4_find_delalloc_range(struct inode *inode,
+                            ext4_lblk_t lblk_start,
+                            ext4_lblk_t lblk_end)
 {
        struct extent_status es;
 
-       es.start = lblk_start;
-       ext4_es_find_extent(inode, &es);
-       if (es.len == 0)
+       ext4_es_find_delayed_extent(inode, lblk_start, &es);
+       if (es.es_len == 0)
                return 0; /* there is no delay extent in this tree */
-       else if (es.start <= lblk_start && lblk_start < es.start + es.len)
+       else if (es.es_lblk <= lblk_start &&
+                lblk_start < es.es_lblk + es.es_len)
                return 1;
-       else if (lblk_start <= es.start && es.start <= lblk_end)
+       else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
                return 1;
        else
                return 0;
@@ -3656,6 +3611,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                        ext4_set_io_unwritten_flag(inode, io);
                else
                        ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                if (ext4_should_dioread_nolock(inode))
                        map->m_flags |= EXT4_MAP_UNINIT;
                goto out;
@@ -3677,8 +3633,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
         * repeat fallocate creation request
         * we already have an unwritten extent
         */
-       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
+       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                goto map_out;
+       }
 
        /* buffered READ or buffered write_begin() lookup */
        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -3898,35 +3856,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                  map->m_lblk, map->m_len, inode->i_ino);
        trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
 
-       /* check in cache */
-       if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
-               if (!newex.ee_start_lo && !newex.ee_start_hi) {
-                       if ((sbi->s_cluster_ratio > 1) &&
-                           ext4_find_delalloc_cluster(inode, map->m_lblk))
-                               map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-
-                       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
-                               /*
-                                * block isn't allocated yet and
-                                * user doesn't want to allocate it
-                                */
-                               goto out2;
-                       }
-                       /* we should allocate requested block */
-               } else {
-                       /* block is already allocated */
-                       if (sbi->s_cluster_ratio > 1)
-                               map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-                       newblock = map->m_lblk
-                                  - le32_to_cpu(newex.ee_block)
-                                  + ext4_ext_pblock(&newex);
-                       /* number of remaining blocks in the extent */
-                       allocated = ext4_ext_get_actual_len(&newex) -
-                               (map->m_lblk - le32_to_cpu(newex.ee_block));
-                       goto out;
-               }
-       }
-
        /* find extent for this block */
        path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
        if (IS_ERR(path)) {
@@ -3973,15 +3902,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                        ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
                                  ee_block, ee_len, newblock);
 
-                       /*
-                        * Do not put uninitialized extent
-                        * in the cache
-                        */
-                       if (!ext4_ext_is_uninitialized(ex)) {
-                               ext4_ext_put_in_cache(inode, ee_block,
-                                       ee_len, ee_start);
+                       if (!ext4_ext_is_uninitialized(ex))
                                goto out;
-                       }
+
                        allocated = ext4_ext_handle_uninitialized_extents(
                                handle, inode, map, path, flags,
                                allocated, newblock);
@@ -4002,7 +3925,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 * put just found gap into cache to speed up
                 * subsequent requests
                 */
-               ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+               if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
+                       ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
                goto out2;
        }
 
@@ -4108,6 +4032,7 @@ got_allocated_blocks:
        /* Mark uninitialized */
        if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
                ext4_ext_mark_uninitialized(&newex);
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                /*
                 * io_end structure was created for every IO write to an
                 * uninitialized extent. To avoid unnecessary conversion,
@@ -4241,10 +4166,9 @@ got_allocated_blocks:
         * Cache the extent and update transaction to commit on fdatasync only
         * when it is _not_ an uninitialized extent.
         */
-       if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-               ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+       if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
                ext4_update_inode_fsync_trans(handle, inode, 1);
-       else
+       else
                ext4_update_inode_fsync_trans(handle, inode, 0);
 out:
        if (allocated > map->m_len)
@@ -4284,7 +4208,7 @@ void ext4_ext_truncate(struct inode *inode)
         * probably first extent we're gonna free will be last in block
         */
        err = ext4_writepage_trans_blocks(inode);
-       handle = ext4_journal_start(inode, err);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err);
        if (IS_ERR(handle))
                return;
 
@@ -4303,7 +4227,6 @@ void ext4_ext_truncate(struct inode *inode)
                goto out_stop;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_ext_invalidate_cache(inode);
 
        ext4_discard_preallocations(inode);
 
@@ -4386,7 +4309,7 @@ static void ext4_falloc_update_inode(struct inode *inode,
  */
 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        handle_t *handle;
        loff_t new_size;
        unsigned int max_blocks;
@@ -4397,13 +4320,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        struct ext4_map_blocks map;
        unsigned int credits, blkbits = inode->i_blkbits;
 
-       /*
-        * currently supporting (pre)allocate mode for extent-based
-        * files _only_
-        */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return -EOPNOTSUPP;
-
        /* Return error if mode is not supported */
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
                return -EOPNOTSUPP;
@@ -4415,6 +4331,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (ret)
                return ret;
 
+       /*
+        * currently supporting (pre)allocate mode for extent-based
+        * files _only_
+        */
+       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+               return -EOPNOTSUPP;
+
        trace_ext4_fallocate_enter(inode, offset, len, mode);
        map.m_lblk = offset >> blkbits;
        /*
@@ -4451,7 +4374,8 @@ retry:
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk = map.m_lblk + ret;
                map.m_len = max_blocks = max_blocks - ret;
-               handle = ext4_journal_start(inode, credits);
+               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+                                           credits);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        break;
@@ -4459,11 +4383,11 @@ retry:
                ret = ext4_map_blocks(handle, inode, &map, flags);
                if (ret <= 0) {
 #ifdef EXT4FS_DEBUG
-                       WARN_ON(ret <= 0);
-                       printk(KERN_ERR "%s: ext4_ext_map_blocks "
-                                   "returned error inode#%lu, block=%u, "
-                                   "max_blocks=%u", __func__,
-                                   inode->i_ino, map.m_lblk, max_blocks);
+                       ext4_warning(inode->i_sb,
+                                    "inode #%lu: block %u: len %u: "
+                                    "ext4_ext_map_blocks returned %d",
+                                    inode->i_ino, map.m_lblk,
+                                    map.m_len, ret);
 #endif
                        ext4_mark_inode_dirty(handle, inode);
                        ret2 = ext4_journal_stop(handle);
@@ -4529,21 +4453,19 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk += ret;
                map.m_len = (max_blocks -= ret);
-               handle = ext4_journal_start(inode, credits);
+               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        break;
                }
                ret = ext4_map_blocks(handle, inode, &map,
                                      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
-               if (ret <= 0) {
-                       WARN_ON(ret <= 0);
-                       ext4_msg(inode->i_sb, KERN_ERR,
-                                "%s:%d: inode #%lu: block %u: len %u: "
-                                "ext4_ext_map_blocks returned %d",
-                                __func__, __LINE__, inode->i_ino, map.m_lblk,
-                                map.m_len, ret);
-               }
+               if (ret <= 0)
+                       ext4_warning(inode->i_sb,
+                                    "inode #%lu: block %u: len %u: "
+                                    "ext4_ext_map_blocks returned %d",
+                                    inode->i_ino, map.m_lblk,
+                                    map.m_len, ret);
                ext4_mark_inode_dirty(handle, inode);
                ret2 = ext4_journal_stop(handle);
                if (ret <= 0 || ret2 )
@@ -4553,42 +4475,48 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 }
 
 /*
- * If newex is not existing extent (newex->ec_start equals zero) find
- * delayed extent at start of newex and update newex accordingly and
+ * If newes is not existing extent (newes->ec_pblk equals zero) find
+ * delayed extent at start of newes and update newes accordingly and
  * return start of the next delayed extent.
  *
- * If newex is existing extent (newex->ec_start is not equal zero)
+ * If newes is existing extent (newes->ec_pblk is not equal zero)
  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
- * extent found. Leave newex unmodified.
+ * extent found. Leave newes unmodified.
  */
 static int ext4_find_delayed_extent(struct inode *inode,
-                                   struct ext4_ext_cache *newex)
+                                   struct extent_status *newes)
 {
        struct extent_status es;
-       ext4_lblk_t next_del;
+       ext4_lblk_t block, next_del;
 
-       es.start = newex->ec_block;
-       next_del = ext4_es_find_extent(inode, &es);
+       ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
 
-       if (newex->ec_start == 0) {
+       if (newes->es_pblk == 0) {
                /*
-                * No extent in extent-tree contains block @newex->ec_start,
+                * No extent in extent-tree contains block @newes->es_pblk,
                 * then the block may stay in 1)a hole or 2)delayed-extent.
                 */
-               if (es.len == 0)
+               if (es.es_len == 0)
                        /* A hole found. */
                        return 0;
 
-               if (es.start > newex->ec_block) {
+               if (es.es_lblk > newes->es_lblk) {
                        /* A hole found. */
-                       newex->ec_len = min(es.start - newex->ec_block,
-                                           newex->ec_len);
+                       newes->es_len = min(es.es_lblk - newes->es_lblk,
+                                           newes->es_len);
                        return 0;
                }
 
-               newex->ec_len = es.start + es.len - newex->ec_block;
+               newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
        }
 
+       block = newes->es_lblk + newes->es_len;
+       ext4_es_find_delayed_extent(inode, block, &es);
+       if (es.es_len == 0)
+               next_del = EXT_MAX_BLOCKS;
+       else
+               next_del = es.es_lblk;
+
        return next_del;
 }
 /* fiemap flags we can handle specified here */
@@ -4643,7 +4571,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
  */
 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
        struct address_space *mapping = inode->i_mapping;
@@ -4709,7 +4637,7 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
        inode_dio_wait(inode);
 
        credits = ext4_writepage_trans_blocks(inode);
-       handle = ext4_journal_start(inode, credits);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
                goto out_dio;
@@ -4786,14 +4714,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
                goto out;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_ext_invalidate_cache(inode);
        ext4_discard_preallocations(inode);
 
        err = ext4_es_remove_extent(inode, first_block,
                                    stop_block - first_block);
        err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
 
-       ext4_ext_invalidate_cache(inode);
        ext4_discard_preallocations(inode);
 
        if (IS_SYNC(inode))
index 564d981..95796a1 100644 (file)
  * (e.g. Reservation space warning), and provide extent-level locking.
  * Delay extent tree is the first step to achieve this goal.  It is
  * original built by Yongqiang Yang.  At that time it is called delay
- * extent tree, whose goal is only track delay extent in memory to
+ * extent tree, whose goal is only track delayed extents in memory to
  * simplify the implementation of fiemap and bigalloc, and introduce
  * lseek SEEK_DATA/SEEK_HOLE support.  That is why it is still called
- * delay extent tree at the following comment.  But for better
- * understand what it does, it has been rename to extent status tree.
+ * delay extent tree at the first commit.  But for better understand
+ * what it does, it has been rename to extent status tree.
  *
- * Currently the first step has been done.  All delay extents are
- * tracked in the tree.  It maintains the delay extent when a delay
- * allocation is issued, and the delay extent is written out or
+ * Step1:
+ * Currently the first step has been done.  All delayed extents are
+ * tracked in the tree.  It maintains the delayed extent when a delayed
+ * allocation is issued, and the delayed extent is written out or
  * invalidated.  Therefore the implementation of fiemap and bigalloc
  * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
  *
  * The following comment describes the implemenmtation of extent
  * status tree and future works.
+ *
+ * Step2:
+ * In this step all extent status are tracked by extent status tree.
+ * Thus, we can first try to lookup a block mapping in this tree before
+ * finding it in extent tree.  Hence, single extent cache can be removed
+ * because extent status tree can do a better job.  Extents in status
+ * tree are loaded on-demand.  Therefore, the extent status tree may not
+ * contain all of the extents in a file.  Meanwhile we define a shrinker
+ * to reclaim memory from extent status tree because fragmented extent
+ * tree will make status tree cost too much memory.  written/unwritten/-
+ * hole extents in the tree will be reclaimed by this shrinker when we
+ * are under high memory pressure.  Delayed extents will not be
+ * reclimed because fiemap, bigalloc, and seek_data/hole need it.
  */
 
 /*
- * extents status tree implementation for ext4.
+ * Extent status tree implementation for ext4.
  *
  *
  * ==========================================================================
- * Extents status encompass delayed extents and extent locks
+ * Extent status tree tracks all extent status.
  *
- * 1. Why delayed extent implementation ?
+ * 1. Why we need to implement extent status tree?
  *
- * Without delayed extent, ext4 identifies a delayed extent by looking
+ * Without extent status tree, ext4 identifies a delayed extent by looking
  * up page cache, this has several deficiencies - complicated, buggy,
  * and inefficient code.
  *
- * FIEMAP, SEEK_HOLE/DATA, bigalloc, punch hole and writeout all need
- * to know if a block or a range of blocks are belonged to a delayed
- * extent.
+ * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
+ * block or a range of blocks are belonged to a delayed extent.
  *
- * Let us have a look at how they do without delayed extents implementation.
+ * Let us have a look at how they do without extent status tree.
  *   --        FIEMAP
  *     FIEMAP looks up page cache to identify delayed allocations from holes.
  *
  *     already under delayed allocation or not to determine whether
  *     quota reserving is needed for the cluster.
  *
- *   -- punch hole
- *     punch hole looks up page cache to identify a delayed extent.
- *
  *   --        writeout
  *     Writeout looks up whole page cache to see if a buffer is
  *     mapped, If there are not very many delayed buffers, then it is
  *     time comsuming.
  *
- * With delayed extents implementation, FIEMAP, SEEK_HOLE/DATA,
+ * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
  * bigalloc and writeout can figure out if a block or a range of
  * blocks is under delayed allocation(belonged to a delayed extent) or
- * not by searching the delayed extent tree.
+ * not by searching the extent tree.
  *
  *
  * ==========================================================================
- * 2. ext4 delayed extents impelmentation
+ * 2. Ext4 extent status tree impelmentation
+ *
+ *   --        extent
+ *     A extent is a range of blocks which are contiguous logically and
+ *     physically.  Unlike extent in extent tree, this extent in ext4 is
+ *     a in-memory struct, there is no corresponding on-disk data.  There
+ *     is no limit on length of extent, so an extent can contain as many
+ *     blocks as they are contiguous logically and physically.
  *
- *   --        delayed extent
- *     A delayed extent is a range of blocks which are contiguous
- *     logically and under delayed allocation.  Unlike extent in
- *     ext4, delayed extent in ext4 is a in-memory struct, there is
- *     no corresponding on-disk data.  There is no limit on length of
- *     delayed extent, so a delayed extent can contain as many blocks
- *     as they are contiguous logically.
+ *   --        extent status tree
+ *     Every inode has an extent status tree and all allocation blocks
+ *     are added to the tree with different status.  The extent in the
+ *     tree are ordered by logical block no.
  *
- *   --        delayed extent tree
- *     Every inode has a delayed extent tree and all under delayed
- *     allocation blocks are added to the tree as delayed extents.
- *     Delayed extents in the tree are ordered by logical block no.
+ *   --        operations on a extent status tree
+ *     There are three important operations on a delayed extent tree: find
+ *     next extent, adding a extent(a range of blocks) and removing a extent.
  *
- *   --        operations on a delayed extent tree
- *     There are three operations on a delayed extent tree: find next
- *     delayed extent, adding a space(a range of blocks) and removing
- *     a space.
+ *   --        race on a extent status tree
+ *     Extent status tree is protected by inode->i_es_lock.
  *
- *   --        race on a delayed extent tree
- *     Delayed extent tree is protected inode->i_es_lock.
+ *   --        memory consumption
+ *      Fragmented extent tree will make extent status tree cost too much
+ *      memory.  Hence, we will reclaim written/unwritten/hole extents from
+ *      the tree under a heavy memory pressure.
  *
  *
  * ==========================================================================
- * 3. performance analysis
+ * 3. Performance analysis
+ *
  *   --        overhead
  *     1. There is a cache extent for write access, so if writes are
  *     not very random, adding space operaions are in O(1) time.
  *
  * ==========================================================================
  * 4. TODO list
- *   -- Track all extent status
  *
- *   -- Improve get block process
+ *   -- Refactor delayed space reservation
  *
  *   -- Extent-level locking
  */
 
 static struct kmem_cache *ext4_es_cachep;
 
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
+static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+                             ext4_lblk_t end);
+static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
+                                      int nr_to_scan);
+
 int __init ext4_init_es(void)
 {
-       ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
+       ext4_es_cachep = kmem_cache_create("ext4_extent_status",
+                                          sizeof(struct extent_status),
+                                          0, (SLAB_RECLAIM_ACCOUNT), NULL);
        if (ext4_es_cachep == NULL)
                return -ENOMEM;
        return 0;
@@ -161,7 +182,9 @@ static void ext4_es_print_tree(struct inode *inode)
        while (node) {
                struct extent_status *es;
                es = rb_entry(node, struct extent_status, rb_node);
-               printk(KERN_DEBUG " [%u/%u)", es->start, es->len);
+               printk(KERN_DEBUG " [%u/%u) %llu %llx",
+                      es->es_lblk, es->es_len,
+                      ext4_es_pblock(es), ext4_es_status(es));
                node = rb_next(node);
        }
        printk(KERN_DEBUG "\n");
@@ -170,10 +193,10 @@ static void ext4_es_print_tree(struct inode *inode)
 #define ext4_es_print_tree(inode)
 #endif
 
-static inline ext4_lblk_t extent_status_end(struct extent_status *es)
+static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
 {
-       BUG_ON(es->start + es->len < es->start);
-       return es->start + es->len - 1;
+       BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
+       return es->es_lblk + es->es_len - 1;
 }
 
 /*
@@ -181,25 +204,25 @@ static inline ext4_lblk_t extent_status_end(struct extent_status *es)
  * it can't be found, try to find next extent.
  */
 static struct extent_status *__es_tree_search(struct rb_root *root,
-                                             ext4_lblk_t offset)
+                                             ext4_lblk_t lblk)
 {
        struct rb_node *node = root->rb_node;
        struct extent_status *es = NULL;
 
        while (node) {
                es = rb_entry(node, struct extent_status, rb_node);
-               if (offset < es->start)
+               if (lblk < es->es_lblk)
                        node = node->rb_left;
-               else if (offset > extent_status_end(es))
+               else if (lblk > ext4_es_end(es))
                        node = node->rb_right;
                else
                        return es;
        }
 
-       if (es && offset < es->start)
+       if (es && lblk < es->es_lblk)
                return es;
 
-       if (es && offset > extent_status_end(es)) {
+       if (es && lblk > ext4_es_end(es)) {
                node = rb_next(&es->rb_node);
                return node ? rb_entry(node, struct extent_status, rb_node) :
                              NULL;
@@ -209,79 +232,124 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
 }
 
 /*
- * ext4_es_find_extent: find the 1st delayed extent covering @es->start
- * if it exists, otherwise, the next extent after @es->start.
+ * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
+ * if it exists, otherwise, the next extent after @es->lblk.
  *
  * @inode: the inode which owns delayed extents
+ * @lblk: the offset where we start to search
  * @es: delayed extent that we found
- *
- * Returns the first block of the next extent after es, otherwise
- * EXT_MAX_BLOCKS if no delay extent is found.
- * Delayed extent is returned via @es.
  */
-ext4_lblk_t ext4_es_find_extent(struct inode *inode, struct extent_status *es)
+void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+                                struct extent_status *es)
 {
        struct ext4_es_tree *tree = NULL;
        struct extent_status *es1 = NULL;
        struct rb_node *node;
-       ext4_lblk_t ret = EXT_MAX_BLOCKS;
 
-       trace_ext4_es_find_extent_enter(inode, es->start);
+       BUG_ON(es == NULL);
+       trace_ext4_es_find_delayed_extent_enter(inode, lblk);
 
        read_lock(&EXT4_I(inode)->i_es_lock);
        tree = &EXT4_I(inode)->i_es_tree;
 
-       /* find delay extent in cache firstly */
+       /* find extent in cache firstly */
+       es->es_lblk = es->es_len = es->es_pblk = 0;
        if (tree->cache_es) {
                es1 = tree->cache_es;
-               if (in_range(es->start, es1->start, es1->len)) {
-                       es_debug("%u cached by [%u/%u)\n",
-                                es->start, es1->start, es1->len);
+               if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+                       es_debug("%u cached by [%u/%u) %llu %llx\n",
+                                lblk, es1->es_lblk, es1->es_len,
+                                ext4_es_pblock(es1), ext4_es_status(es1));
                        goto out;
                }
        }
 
-       es->len = 0;
-       es1 = __es_tree_search(&tree->root, es->start);
+       es1 = __es_tree_search(&tree->root, lblk);
 
 out:
-       if (es1) {
-               tree->cache_es = es1;
-               es->start = es1->start;
-               es->len = es1->len;
-               node = rb_next(&es1->rb_node);
-               if (node) {
+       if (es1 && !ext4_es_is_delayed(es1)) {
+               while ((node = rb_next(&es1->rb_node)) != NULL) {
                        es1 = rb_entry(node, struct extent_status, rb_node);
-                       ret = es1->start;
+                       if (ext4_es_is_delayed(es1))
+                               break;
                }
        }
 
+       if (es1 && ext4_es_is_delayed(es1)) {
+               tree->cache_es = es1;
+               es->es_lblk = es1->es_lblk;
+               es->es_len = es1->es_len;
+               es->es_pblk = es1->es_pblk;
+       }
+
        read_unlock(&EXT4_I(inode)->i_es_lock);
 
-       trace_ext4_es_find_extent_exit(inode, es, ret);
-       return ret;
+       ext4_es_lru_add(inode);
+       trace_ext4_es_find_delayed_extent_exit(inode, es);
 }
 
 static struct extent_status *
-ext4_es_alloc_extent(ext4_lblk_t start, ext4_lblk_t len)
+ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
+                    ext4_fsblk_t pblk)
 {
        struct extent_status *es;
        es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
        if (es == NULL)
                return NULL;
-       es->start = start;
-       es->len = len;
+       es->es_lblk = lblk;
+       es->es_len = len;
+       es->es_pblk = pblk;
+
+       /*
+        * We don't count delayed extent because we never try to reclaim them
+        */
+       if (!ext4_es_is_delayed(es)) {
+               EXT4_I(inode)->i_es_lru_nr++;
+               percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+       }
+
        return es;
 }
 
-static void ext4_es_free_extent(struct extent_status *es)
+static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
 {
+       /* Decrease the lru counter when this es is not delayed */
+       if (!ext4_es_is_delayed(es)) {
+               BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
+               EXT4_I(inode)->i_es_lru_nr--;
+               percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+       }
+
        kmem_cache_free(ext4_es_cachep, es);
 }
 
+/*
+ * Check whether or not two extents can be merged
+ * Condition:
+ *  - logical block number is contiguous
+ *  - physical block number is contiguous
+ *  - status is equal
+ */
+static int ext4_es_can_be_merged(struct extent_status *es1,
+                                struct extent_status *es2)
+{
+       if (es1->es_lblk + es1->es_len != es2->es_lblk)
+               return 0;
+
+       if (ext4_es_status(es1) != ext4_es_status(es2))
+               return 0;
+
+       if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
+           (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
+               return 0;
+
+       return 1;
+}
+
 static struct extent_status *
-ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
+ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
 {
+       struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
        struct extent_status *es1;
        struct rb_node *node;
 
@@ -290,10 +358,10 @@ ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
                return es;
 
        es1 = rb_entry(node, struct extent_status, rb_node);
-       if (es->start == extent_status_end(es1) + 1) {
-               es1->len += es->len;
+       if (ext4_es_can_be_merged(es1, es)) {
+               es1->es_len += es->es_len;
                rb_erase(&es->rb_node, &tree->root);
-               ext4_es_free_extent(es);
+               ext4_es_free_extent(inode, es);
                es = es1;
        }
 
@@ -301,8 +369,9 @@ ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
 }
 
 static struct extent_status *
-ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
+ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
 {
+       struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
        struct extent_status *es1;
        struct rb_node *node;
 
@@ -311,69 +380,57 @@ ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
                return es;
 
        es1 = rb_entry(node, struct extent_status, rb_node);
-       if (es1->start == extent_status_end(es) + 1) {
-               es->len += es1->len;
+       if (ext4_es_can_be_merged(es, es1)) {
+               es->es_len += es1->es_len;
                rb_erase(node, &tree->root);
-               ext4_es_free_extent(es1);
+               ext4_es_free_extent(inode, es1);
        }
 
        return es;
 }
 
-static int __es_insert_extent(struct ext4_es_tree *tree, ext4_lblk_t offset,
-                             ext4_lblk_t len)
+static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
 {
+       struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
        struct rb_node **p = &tree->root.rb_node;
        struct rb_node *parent = NULL;
        struct extent_status *es;
-       ext4_lblk_t end = offset + len - 1;
-
-       BUG_ON(end < offset);
-       es = tree->cache_es;
-       if (es && offset == (extent_status_end(es) + 1)) {
-               es_debug("cached by [%u/%u)\n", es->start, es->len);
-               es->len += len;
-               es = ext4_es_try_to_merge_right(tree, es);
-               goto out;
-       } else if (es && es->start == end + 1) {
-               es_debug("cached by [%u/%u)\n", es->start, es->len);
-               es->start = offset;
-               es->len += len;
-               es = ext4_es_try_to_merge_left(tree, es);
-               goto out;
-       } else if (es && es->start <= offset &&
-                  end <= extent_status_end(es)) {
-               es_debug("cached by [%u/%u)\n", es->start, es->len);
-               goto out;
-       }
 
        while (*p) {
                parent = *p;
                es = rb_entry(parent, struct extent_status, rb_node);
 
-               if (offset < es->start) {
-                       if (es->start == end + 1) {
-                               es->start = offset;
-                               es->len += len;
-                               es = ext4_es_try_to_merge_left(tree, es);
+               if (newes->es_lblk < es->es_lblk) {
+                       if (ext4_es_can_be_merged(newes, es)) {
+                               /*
+                                * Here we can modify es_lblk directly
+                                * because it isn't overlapped.
+                                */
+                               es->es_lblk = newes->es_lblk;
+                               es->es_len += newes->es_len;
+                               if (ext4_es_is_written(es) ||
+                                   ext4_es_is_unwritten(es))
+                                       ext4_es_store_pblock(es,
+                                                            newes->es_pblk);
+                               es = ext4_es_try_to_merge_left(inode, es);
                                goto out;
                        }
                        p = &(*p)->rb_left;
-               } else if (offset > extent_status_end(es)) {
-                       if (offset == extent_status_end(es) + 1) {
-                               es->len += len;
-                               es = ext4_es_try_to_merge_right(tree, es);
+               } else if (newes->es_lblk > ext4_es_end(es)) {
+                       if (ext4_es_can_be_merged(es, newes)) {
+                               es->es_len += newes->es_len;
+                               es = ext4_es_try_to_merge_right(inode, es);
                                goto out;
                        }
                        p = &(*p)->rb_right;
                } else {
-                       if (extent_status_end(es) <= end)
-                               es->len = offset - es->start + len;
-                       goto out;
+                       BUG_ON(1);
+                       return -EINVAL;
                }
        }
 
-       es = ext4_es_alloc_extent(offset, len);
+       es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
+                                 newes->es_pblk);
        if (!es)
                return -ENOMEM;
        rb_link_node(&es->rb_node, parent, p);
@@ -385,85 +442,166 @@ out:
 }
 
 /*
- * ext4_es_insert_extent() adds a space to a delayed extent tree.
- * Caller holds inode->i_es_lock.
+ * ext4_es_insert_extent() adds a space to a extent status tree.
  *
  * ext4_es_insert_extent is called by ext4_da_write_begin and
  * ext4_es_remove_extent.
  *
  * Return 0 on success, error code on failure.
  */
-int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t offset,
-                         ext4_lblk_t len)
+int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+                         ext4_lblk_t len, ext4_fsblk_t pblk,
+                         unsigned long long status)
 {
-       struct ext4_es_tree *tree;
+       struct extent_status newes;
+       ext4_lblk_t end = lblk + len - 1;
        int err = 0;
 
-       trace_ext4_es_insert_extent(inode, offset, len);
-       es_debug("add [%u/%u) to extent status tree of inode %lu\n",
-                offset, len, inode->i_ino);
+       es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
+                lblk, len, pblk, status, inode->i_ino);
+
+       if (!len)
+               return 0;
+
+       BUG_ON(end < lblk);
+
+       newes.es_lblk = lblk;
+       newes.es_len = len;
+       ext4_es_store_pblock(&newes, pblk);
+       ext4_es_store_status(&newes, status);
+       trace_ext4_es_insert_extent(inode, &newes);
 
        write_lock(&EXT4_I(inode)->i_es_lock);
-       tree = &EXT4_I(inode)->i_es_tree;
-       err = __es_insert_extent(tree, offset, len);
+       err = __es_remove_extent(inode, lblk, end);
+       if (err != 0)
+               goto error;
+       err = __es_insert_extent(inode, &newes);
+
+error:
        write_unlock(&EXT4_I(inode)->i_es_lock);
 
+       ext4_es_lru_add(inode);
        ext4_es_print_tree(inode);
 
        return err;
 }
 
 /*
- * ext4_es_remove_extent() removes a space from a delayed extent tree.
- * Caller holds inode->i_es_lock.
+ * ext4_es_lookup_extent() looks up an extent in extent status tree.
  *
- * Return 0 on success, error code on failure.
+ * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
+ *
+ * Return: 1 on found, 0 on not
  */
-int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
-                         ext4_lblk_t len)
+int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+                         struct extent_status *es)
 {
-       struct rb_node *node;
        struct ext4_es_tree *tree;
+       struct extent_status *es1 = NULL;
+       struct rb_node *node;
+       int found = 0;
+
+       trace_ext4_es_lookup_extent_enter(inode, lblk);
+       es_debug("lookup extent in block %u\n", lblk);
+
+       tree = &EXT4_I(inode)->i_es_tree;
+       read_lock(&EXT4_I(inode)->i_es_lock);
+
+       /* find extent in cache firstly */
+       es->es_lblk = es->es_len = es->es_pblk = 0;
+       if (tree->cache_es) {
+               es1 = tree->cache_es;
+               if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+                       es_debug("%u cached by [%u/%u)\n",
+                                lblk, es1->es_lblk, es1->es_len);
+                       found = 1;
+                       goto out;
+               }
+       }
+
+       node = tree->root.rb_node;
+       while (node) {
+               es1 = rb_entry(node, struct extent_status, rb_node);
+               if (lblk < es1->es_lblk)
+                       node = node->rb_left;
+               else if (lblk > ext4_es_end(es1))
+                       node = node->rb_right;
+               else {
+                       found = 1;
+                       break;
+               }
+       }
+
+out:
+       if (found) {
+               BUG_ON(!es1);
+               es->es_lblk = es1->es_lblk;
+               es->es_len = es1->es_len;
+               es->es_pblk = es1->es_pblk;
+       }
+
+       read_unlock(&EXT4_I(inode)->i_es_lock);
+
+       ext4_es_lru_add(inode);
+       trace_ext4_es_lookup_extent_exit(inode, es, found);
+       return found;
+}
+
+static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+                             ext4_lblk_t end)
+{
+       struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+       struct rb_node *node;
        struct extent_status *es;
        struct extent_status orig_es;
-       ext4_lblk_t len1, len2, end;
+       ext4_lblk_t len1, len2;
+       ext4_fsblk_t block;
        int err = 0;
 
-       trace_ext4_es_remove_extent(inode, offset, len);
-       es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
-                offset, len, inode->i_ino);
-
-       end = offset + len - 1;
-       BUG_ON(end < offset);
-       write_lock(&EXT4_I(inode)->i_es_lock);
-       tree = &EXT4_I(inode)->i_es_tree;
-       es = __es_tree_search(&tree->root, offset);
+       es = __es_tree_search(&tree->root, lblk);
        if (!es)
                goto out;
-       if (es->start > end)
+       if (es->es_lblk > end)
                goto out;
 
        /* Simply invalidate cache_es. */
        tree->cache_es = NULL;
 
-       orig_es.start = es->start;
-       orig_es.len = es->len;
-       len1 = offset > es->start ? offset - es->start : 0;
-       len2 = extent_status_end(es) > end ?
-              extent_status_end(es) - end : 0;
+       orig_es.es_lblk = es->es_lblk;
+       orig_es.es_len = es->es_len;
+       orig_es.es_pblk = es->es_pblk;
+
+       len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
+       len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
        if (len1 > 0)
-               es->len = len1;
+               es->es_len = len1;
        if (len2 > 0) {
                if (len1 > 0) {
-                       err = __es_insert_extent(tree, end + 1, len2);
+                       struct extent_status newes;
+
+                       newes.es_lblk = end + 1;
+                       newes.es_len = len2;
+                       if (ext4_es_is_written(&orig_es) ||
+                           ext4_es_is_unwritten(&orig_es)) {
+                               block = ext4_es_pblock(&orig_es) +
+                                       orig_es.es_len - len2;
+                               ext4_es_store_pblock(&newes, block);
+                       }
+                       ext4_es_store_status(&newes, ext4_es_status(&orig_es));
+                       err = __es_insert_extent(inode, &newes);
                        if (err) {
-                               es->start = orig_es.start;
-                               es->len = orig_es.len;
+                               es->es_lblk = orig_es.es_lblk;
+                               es->es_len = orig_es.es_len;
                                goto out;
                        }
                } else {
-                       es->start = end + 1;
-                       es->len = len2;
+                       es->es_lblk = end + 1;
+                       es->es_len = len2;
+                       if (ext4_es_is_written(es) ||
+                           ext4_es_is_unwritten(es)) {
+                               block = orig_es.es_pblk + orig_es.es_len - len2;
+                               ext4_es_store_pblock(es, block);
+                       }
                }
                goto out;
        }
@@ -476,10 +614,10 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
                        es = NULL;
        }
 
-       while (es && extent_status_end(es) <= end) {
+       while (es && ext4_es_end(es) <= end) {
                node = rb_next(&es->rb_node);
                rb_erase(&es->rb_node, &tree->root);
-               ext4_es_free_extent(es);
+               ext4_es_free_extent(inode, es);
                if (!node) {
                        es = NULL;
                        break;
@@ -487,14 +625,166 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
                es = rb_entry(node, struct extent_status, rb_node);
        }
 
-       if (es && es->start < end + 1) {
-               len1 = extent_status_end(es) - end;
-               es->start = end + 1;
-               es->len = len1;
+       if (es && es->es_lblk < end + 1) {
+               ext4_lblk_t orig_len = es->es_len;
+
+               len1 = ext4_es_end(es) - end;
+               es->es_lblk = end + 1;
+               es->es_len = len1;
+               if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
+                       block = es->es_pblk + orig_len - len1;
+                       ext4_es_store_pblock(es, block);
+               }
        }
 
 out:
+       return err;
+}
+
+/*
+ * ext4_es_remove_extent() removes a space from a extent status tree.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
+                         ext4_lblk_t len)
+{
+       ext4_lblk_t end;
+       int err = 0;
+
+       trace_ext4_es_remove_extent(inode, lblk, len);
+       es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+                lblk, len, inode->i_ino);
+
+       if (!len)
+               return err;
+
+       end = lblk + len - 1;
+       BUG_ON(end < lblk);
+
+       write_lock(&EXT4_I(inode)->i_es_lock);
+       err = __es_remove_extent(inode, lblk, end);
        write_unlock(&EXT4_I(inode)->i_es_lock);
        ext4_es_print_tree(inode);
        return err;
 }
+
+static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+       struct ext4_sb_info *sbi = container_of(shrink,
+                                       struct ext4_sb_info, s_es_shrinker);
+       struct ext4_inode_info *ei;
+       struct list_head *cur, *tmp, scanned;
+       int nr_to_scan = sc->nr_to_scan;
+       int ret, nr_shrunk = 0;
+
+       ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+       trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
+
+       if (!nr_to_scan)
+               return ret;
+
+       INIT_LIST_HEAD(&scanned);
+
+       spin_lock(&sbi->s_es_lru_lock);
+       list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
+               list_move_tail(cur, &scanned);
+
+               ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
+
+               read_lock(&ei->i_es_lock);
+               if (ei->i_es_lru_nr == 0) {
+                       read_unlock(&ei->i_es_lock);
+                       continue;
+               }
+               read_unlock(&ei->i_es_lock);
+
+               write_lock(&ei->i_es_lock);
+               ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
+               write_unlock(&ei->i_es_lock);
+
+               nr_shrunk += ret;
+               nr_to_scan -= ret;
+               if (nr_to_scan == 0)
+                       break;
+       }
+       list_splice_tail(&scanned, &sbi->s_es_lru);
+       spin_unlock(&sbi->s_es_lru_lock);
+
+       ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+       trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
+       return ret;
+}
+
+void ext4_es_register_shrinker(struct super_block *sb)
+{
+       struct ext4_sb_info *sbi;
+
+       sbi = EXT4_SB(sb);
+       INIT_LIST_HEAD(&sbi->s_es_lru);
+       spin_lock_init(&sbi->s_es_lru_lock);
+       sbi->s_es_shrinker.shrink = ext4_es_shrink;
+       sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
+       register_shrinker(&sbi->s_es_shrinker);
+}
+
+void ext4_es_unregister_shrinker(struct super_block *sb)
+{
+       unregister_shrinker(&EXT4_SB(sb)->s_es_shrinker);
+}
+
+void ext4_es_lru_add(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+       spin_lock(&sbi->s_es_lru_lock);
+       if (list_empty(&ei->i_es_lru))
+               list_add_tail(&ei->i_es_lru, &sbi->s_es_lru);
+       else
+               list_move_tail(&ei->i_es_lru, &sbi->s_es_lru);
+       spin_unlock(&sbi->s_es_lru_lock);
+}
+
+void ext4_es_lru_del(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+       spin_lock(&sbi->s_es_lru_lock);
+       if (!list_empty(&ei->i_es_lru))
+               list_del_init(&ei->i_es_lru);
+       spin_unlock(&sbi->s_es_lru_lock);
+}
+
+static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
+                                      int nr_to_scan)
+{
+       struct inode *inode = &ei->vfs_inode;
+       struct ext4_es_tree *tree = &ei->i_es_tree;
+       struct rb_node *node;
+       struct extent_status *es;
+       int nr_shrunk = 0;
+
+       if (ei->i_es_lru_nr == 0)
+               return 0;
+
+       node = rb_first(&tree->root);
+       while (node != NULL) {
+               es = rb_entry(node, struct extent_status, rb_node);
+               node = rb_next(&es->rb_node);
+               /*
+                * We can't reclaim delayed extent from status tree because
+                * fiemap, bigallic, and seek_data/hole need to use it.
+                */
+               if (!ext4_es_is_delayed(es)) {
+                       rb_erase(&es->rb_node, &tree->root);
+                       ext4_es_free_extent(inode, es);
+                       nr_shrunk++;
+                       if (--nr_to_scan == 0)
+                               break;
+               }
+       }
+       tree->cache_es = NULL;
+       return nr_shrunk;
+}
index 077f82d..f190dfe 100644 (file)
 #define es_debug(fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
 #endif
 
+/*
+ * These flags live in the high bits of extent_status.es_pblk
+ */
+#define EXTENT_STATUS_WRITTEN  (1ULL << 63)
+#define EXTENT_STATUS_UNWRITTEN (1ULL << 62)
+#define EXTENT_STATUS_DELAYED  (1ULL << 61)
+#define EXTENT_STATUS_HOLE     (1ULL << 60)
+
+#define EXTENT_STATUS_FLAGS    (EXTENT_STATUS_WRITTEN | \
+                                EXTENT_STATUS_UNWRITTEN | \
+                                EXTENT_STATUS_DELAYED | \
+                                EXTENT_STATUS_HOLE)
+
 struct extent_status {
        struct rb_node rb_node;
-       ext4_lblk_t start;      /* first block extent covers */
-       ext4_lblk_t len;        /* length of extent in block */
+       ext4_lblk_t es_lblk;    /* first logical block extent covers */
+       ext4_lblk_t es_len;     /* length of extent in block */
+       ext4_fsblk_t es_pblk;   /* first physical block */
 };
 
 struct ext4_es_tree {
@@ -35,11 +49,69 @@ extern int __init ext4_init_es(void);
 extern void ext4_exit_es(void);
 extern void ext4_es_init_tree(struct ext4_es_tree *tree);
 
-extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t start,
+extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
+                                ext4_lblk_t len, ext4_fsblk_t pblk,
+                                unsigned long long status);
+extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
                                 ext4_lblk_t len);
-extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t start,
-                                ext4_lblk_t len);
-extern ext4_lblk_t ext4_es_find_extent(struct inode *inode,
-                               struct extent_status *es);
+extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+                                       struct extent_status *es);
+extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+                                struct extent_status *es);
+
+static inline int ext4_es_is_written(struct extent_status *es)
+{
+       return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0;
+}
+
+static inline int ext4_es_is_unwritten(struct extent_status *es)
+{
+       return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0;
+}
+
+static inline int ext4_es_is_delayed(struct extent_status *es)
+{
+       return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0;
+}
+
+static inline int ext4_es_is_hole(struct extent_status *es)
+{
+       return (es->es_pblk & EXTENT_STATUS_HOLE) != 0;
+}
+
+static inline ext4_fsblk_t ext4_es_status(struct extent_status *es)
+{
+       return (es->es_pblk & EXTENT_STATUS_FLAGS);
+}
+
+static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
+{
+       return (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+}
+
+static inline void ext4_es_store_pblock(struct extent_status *es,
+                                       ext4_fsblk_t pb)
+{
+       ext4_fsblk_t block;
+
+       block = (pb & ~EXTENT_STATUS_FLAGS) |
+               (es->es_pblk & EXTENT_STATUS_FLAGS);
+       es->es_pblk = block;
+}
+
+static inline void ext4_es_store_status(struct extent_status *es,
+                                       unsigned long long status)
+{
+       ext4_fsblk_t block;
+
+       block = (status & EXTENT_STATUS_FLAGS) |
+               (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+       es->es_pblk = block;
+}
+
+extern void ext4_es_register_shrinker(struct super_block *sb);
+extern void ext4_es_unregister_shrinker(struct super_block *sb);
+extern void ext4_es_lru_add(struct inode *inode);
+extern void ext4_es_lru_del(struct inode *inode);
 
 #endif /* _EXT4_EXTENTS_STATUS_H */
index 405565a..64848b5 100644 (file)
@@ -167,7 +167,7 @@ static ssize_t
 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
 
        /*
@@ -240,7 +240,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                        handle_t *handle;
                        int err;
 
-                       handle = ext4_journal_start_sb(sb, 1);
+                       handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
                        if (IS_ERR(handle))
                                return PTR_ERR(handle);
                        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
@@ -464,10 +464,8 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * it will be as a data.
                 */
-               es.start = last;
-               (void)ext4_es_find_extent(inode, &es);
-               if (last >= es.start &&
-                   last < es.start + es.len) {
+               ext4_es_find_delayed_extent(inode, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        if (last != start)
                                dataoff = last << blkbits;
                        break;
@@ -549,11 +547,9 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * we will skip this extent.
                 */
-               es.start = last;
-               (void)ext4_es_find_extent(inode, &es);
-               if (last >= es.start &&
-                   last < es.start + es.len) {
-                       last = es.start + es.len;
+               ext4_es_find_delayed_extent(inode, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       last = es.es_lblk + es.es_len;
                        holeoff = last << blkbits;
                        continue;
                }
index fa8e491..3d586f0 100644 (file)
@@ -155,11 +155,11 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
        /* Check to see if the seed is all zero's */
        if (hinfo->seed) {
                for (i = 0; i < 4; i++) {
-                       if (hinfo->seed[i])
+                       if (hinfo->seed[i]) {
+                               memcpy(buf, hinfo->seed, sizeof(buf));
                                break;
+                       }
                }
-               if (i < 4)
-                       memcpy(buf, hinfo->seed, sizeof(buf));
        }
 
        switch (hinfo->hash_version) {
index 3f32c80..32fd2b9 100644 (file)
@@ -634,8 +634,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
  * For other inodes, search forward from the parent directory's block
  * group to find a free inode.
  */
-struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
-                            const struct qstr *qstr, __u32 goal, uid_t *owner)
+struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
+                              umode_t mode, const struct qstr *qstr,
+                              __u32 goal, uid_t *owner, int handle_type,
+                              unsigned int line_no, int nblocks)
 {
        struct super_block *sb;
        struct buffer_head *inode_bitmap_bh = NULL;
@@ -725,6 +727,15 @@ repeat_in_this_group:
                                   "inode=%lu", ino + 1);
                        continue;
                }
+               if (!handle) {
+                       BUG_ON(nblocks <= 0);
+                       handle = __ext4_journal_start_sb(dir->i_sb, line_no,
+                                                        handle_type, nblocks);
+                       if (IS_ERR(handle)) {
+                               err = PTR_ERR(handle);
+                               goto fail;
+                       }
+               }
                BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
                err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
                if (err)
@@ -1017,17 +1028,17 @@ iget_failed:
        inode = NULL;
 bad_orphan:
        ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
-       printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+       printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
               bit, (unsigned long long)bitmap_bh->b_blocknr,
               ext4_test_bit(bit, bitmap_bh->b_data));
-       printk(KERN_NOTICE "inode=%p\n", inode);
+       printk(KERN_WARNING "inode=%p\n", inode);
        if (inode) {
-               printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+               printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
                       is_bad_inode(inode));
-               printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+               printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
                       NEXT_ORPHAN(inode));
-               printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-               printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
+               printk(KERN_WARNING "max_ino=%lu\n", max_ino);
+               printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
                /* Avoid freeing blocks if we got a bad deleted inode */
                if (inode->i_nlink == 0)
                        inode->i_blocks = 0;
@@ -1137,7 +1148,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
        if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
                goto out;
 
-       handle = ext4_journal_start_sb(sb, 1);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                goto out;
index 20862f9..b505a14 100644 (file)
@@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
        struct super_block *sb = inode->i_sb;
        Indirect *p = chain;
        struct buffer_head *bh;
+       int ret = -EIO;
 
        *err = 0;
        /* i_data is not going away, no lock needed */
@@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
                goto no_block;
        while (--depth) {
                bh = sb_getblk(sb, le32_to_cpu(p->key));
-               if (unlikely(!bh))
+               if (unlikely(!bh)) {
+                       ret = -ENOMEM;
                        goto failure;
+               }
 
                if (!bh_uptodate_or_lock(bh)) {
                        if (bh_submit_read(bh) < 0) {
@@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
        return NULL;
 
 failure:
-       *err = -EIO;
+       *err = ret;
 no_block:
        return p;
 }
@@ -355,9 +358,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                         * for the first direct block
                         */
                        new_blocks[index] = current_block;
-                       printk(KERN_INFO "%s returned more blocks than "
+                       WARN(1, KERN_INFO "%s returned more blocks than "
                                                "requested\n", __func__);
-                       WARN_ON(1);
                        break;
                }
        }
@@ -471,7 +473,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                 */
                bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
                if (unlikely(!bh)) {
-                       err = -EIO;
+                       err = -ENOMEM;
                        goto failed;
                }
 
@@ -789,7 +791,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
 
                if (final_size > inode->i_size) {
                        /* Credits for sb + inode write */
-                       handle = ext4_journal_start(inode, 2);
+                       handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
                        if (IS_ERR(handle)) {
                                ret = PTR_ERR(handle);
                                goto out;
@@ -849,7 +851,7 @@ locked:
                int err;
 
                /* Credits for sb + inode write */
-               handle = ext4_journal_start(inode, 2);
+               handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
                if (IS_ERR(handle)) {
                        /* This is really bad luck. We've written the data
                         * but cannot extend i_size. Bail out and pretend
@@ -948,7 +950,8 @@ static handle_t *start_transaction(struct inode *inode)
 {
        handle_t *result;
 
-       result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
+       result = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
+                                   ext4_blocks_for_truncate(inode));
        if (!IS_ERR(result))
                return result;
 
@@ -1515,3 +1518,243 @@ out_stop:
        trace_ext4_truncate_exit(inode);
 }
 
+static int free_hole_blocks(handle_t *handle, struct inode *inode,
+                           struct buffer_head *parent_bh, __le32 *i_data,
+                           int level, ext4_lblk_t first,
+                           ext4_lblk_t count, int max)
+{
+       struct buffer_head *bh = NULL;
+       int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+       int ret = 0;
+       int i, inc;
+       ext4_lblk_t offset;
+       __le32 blk;
+
+       inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
+       for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
+               if (offset >= count + first)
+                       break;
+               if (*i_data == 0 || (offset + inc) <= first)
+                       continue;
+               blk = *i_data;
+               if (level > 0) {
+                       ext4_lblk_t first2;
+                       bh = sb_bread(inode->i_sb, blk);
+                       if (!bh) {
+                               EXT4_ERROR_INODE_BLOCK(inode, blk,
+                                                      "Read failure");
+                               return -EIO;
+                       }
+                       first2 = (first > offset) ? first - offset : 0;
+                       ret = free_hole_blocks(handle, inode, bh,
+                                              (__le32 *)bh->b_data, level - 1,
+                                              first2, count - offset,
+                                              inode->i_sb->s_blocksize >> 2);
+                       if (ret) {
+                               brelse(bh);
+                               goto err;
+                       }
+               }
+               if (level == 0 ||
+                   (bh && all_zeroes((__le32 *)bh->b_data,
+                                     (__le32 *)bh->b_data + addr_per_block))) {
+                       ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
+                       *i_data = 0;
+               }
+               brelse(bh);
+               bh = NULL;
+       }
+
+err:
+       return ret;
+}
+
+static int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
+                                ext4_lblk_t first, ext4_lblk_t stop)
+{
+       int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+       int level, ret = 0;
+       int num = EXT4_NDIR_BLOCKS;
+       ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
+       __le32 *i_data = EXT4_I(inode)->i_data;
+
+       count = stop - first;
+       for (level = 0; level < 4; level++, max *= addr_per_block) {
+               if (first < max) {
+                       ret = free_hole_blocks(handle, inode, NULL, i_data,
+                                              level, first, count, num);
+                       if (ret)
+                               goto err;
+                       if (count > max - first)
+                               count -= max - first;
+                       else
+                               break;
+                       first = 0;
+               } else {
+                       first -= max;
+               }
+               i_data += num;
+               if (level == 0) {
+                       num = 1;
+                       max = 1;
+               }
+       }
+
+err:
+       return ret;
+}
+
+int ext4_ind_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+       struct inode *inode = file_inode(file);
+       struct super_block *sb = inode->i_sb;
+       ext4_lblk_t first_block, stop_block;
+       struct address_space *mapping = inode->i_mapping;
+       handle_t *handle = NULL;
+       loff_t first_page, last_page, page_len;
+       loff_t first_page_offset, last_page_offset;
+       int err = 0;
+
+       /*
+        * Write out all dirty pages to avoid race conditions
+        * Then release them.
+        */
+       if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+               err = filemap_write_and_wait_range(mapping,
+                       offset, offset + length - 1);
+               if (err)
+                       return err;
+       }
+
+       mutex_lock(&inode->i_mutex);
+       /* It's not possible punch hole on append only file */
+       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
+               err = -EPERM;
+               goto out_mutex;
+       }
+       if (IS_SWAPFILE(inode)) {
+               err = -ETXTBSY;
+               goto out_mutex;
+       }
+
+       /* No need to punch hole beyond i_size */
+       if (offset >= inode->i_size)
+               goto out_mutex;
+
+       /*
+        * If the hole extents beyond i_size, set the hole
+        * to end after the page that contains i_size
+        */
+       if (offset + length > inode->i_size) {
+               length = inode->i_size +
+                   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
+                   offset;
+       }
+
+       first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+
+       first_page_offset = first_page << PAGE_CACHE_SHIFT;
+       last_page_offset = last_page << PAGE_CACHE_SHIFT;
+
+       /* Now release the pages */
+       if (last_page_offset > first_page_offset) {
+               truncate_pagecache_range(inode, first_page_offset,
+                                        last_page_offset - 1);
+       }
+
+       /* Wait all existing dio works, newcomers will block on i_mutex */
+       inode_dio_wait(inode);
+
+       handle = start_transaction(inode);
+       if (IS_ERR(handle))
+               goto out_mutex;
+
+       /*
+        * Now we need to zero out the non-page-aligned data in the
+        * pages at the start and tail of the hole, and unmap the buffer
+        * heads for the block aligned regions of the page that were
+        * completely zerod.
+        */
+       if (first_page > last_page) {
+               /*
+                * If the file space being truncated is contained within a page
+                * just zero out and unmap the middle of that page
+                */
+               err = ext4_discard_partial_page_buffers(handle,
+                       mapping, offset, length, 0);
+               if (err)
+                       goto out;
+       } else {
+               /*
+                * Zero out and unmap the paritial page that contains
+                * the start of the hole
+                */
+               page_len = first_page_offset - offset;
+               if (page_len > 0) {
+                       err = ext4_discard_partial_page_buffers(handle, mapping,
+                                                       offset, page_len, 0);
+                       if (err)
+                               goto out;
+               }
+
+               /*
+                * Zero out and unmap the partial page that contains
+                * the end of the hole
+                */
+               page_len = offset + length - last_page_offset;
+               if (page_len > 0) {
+                       err = ext4_discard_partial_page_buffers(handle, mapping,
+                                               last_page_offset, page_len, 0);
+                       if (err)
+                               goto out;
+               }
+       }
+
+       /*
+        * If i_size contained in the last page, we need to
+        * unmap and zero the paritial page after i_size
+        */
+       if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
+           inode->i_size % PAGE_CACHE_SIZE != 0) {
+               page_len = PAGE_CACHE_SIZE -
+                       (inode->i_size & (PAGE_CACHE_SIZE - 1));
+               if (page_len > 0) {
+                       err = ext4_discard_partial_page_buffers(handle,
+                               mapping, inode->i_size, page_len, 0);
+                       if (err)
+                               goto out;
+               }
+       }
+
+       first_block = (offset + sb->s_blocksize - 1) >>
+               EXT4_BLOCK_SIZE_BITS(sb);
+       stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+
+       if (first_block >= stop_block)
+               goto out;
+
+       down_write(&EXT4_I(inode)->i_data_sem);
+       ext4_discard_preallocations(inode);
+
+       err = ext4_es_remove_extent(inode, first_block,
+                                   stop_block - first_block);
+       err = ext4_free_hole_blocks(handle, inode, first_block, stop_block);
+
+       ext4_discard_preallocations(inode);
+
+       if (IS_SYNC(inode))
+               ext4_handle_sync(handle);
+
+       up_write(&EXT4_I(inode)->i_data_sem);
+
+out:
+       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+       ext4_mark_inode_dirty(handle, inode);
+       ext4_journal_stop(handle);
+
+out_mutex:
+       mutex_unlock(&inode->i_mutex);
+
+       return err;
+}
index 387c47c..c0fd1a1 100644 (file)
@@ -545,7 +545,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
                return ret;
 
 retry:
-       handle = ext4_journal_start(inode, needed_blocks);
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                handle = NULL;
@@ -657,7 +657,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
         * The possible write could happen in the inode,
         * so try to reserve the space in inode first.
         */
-       handle = ext4_journal_start(inode, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                handle = NULL;
@@ -853,7 +853,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
        if (ret)
                return ret;
 
-       handle = ext4_journal_start(inode, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                handle = NULL;
@@ -1188,7 +1188,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
 
        data_bh = sb_getblk(inode->i_sb, map.m_pblk);
        if (!data_bh) {
-               error = -EIO;
+               error = -ENOMEM;
                goto out_restore;
        }
 
@@ -1298,7 +1298,7 @@ int ext4_read_inline_dir(struct file *filp,
        int i, stored;
        struct ext4_dir_entry_2 *de;
        struct super_block *sb;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int ret, inline_size = 0;
        struct ext4_iloc iloc;
        void *dir_buf = NULL;
@@ -1770,7 +1770,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 
 
        needed_blocks = ext4_writepage_trans_blocks(inode);
-       handle = ext4_journal_start(inode, needed_blocks);
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, needed_blocks);
        if (IS_ERR(handle))
                return;
 
@@ -1862,7 +1862,7 @@ int ext4_convert_inline_data(struct inode *inode)
        if (error)
                return error;
 
-       handle = ext4_journal_start(inode, needed_blocks);
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
        if (IS_ERR(handle)) {
                error = PTR_ERR(handle);
                goto out_free;
index cd818d8..9ea0cde 100644 (file)
@@ -132,10 +132,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset);
-static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
-                                  struct buffer_head *bh_result, int create);
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
@@ -238,7 +234,8 @@ void ext4_evict_inode(struct inode *inode)
         * protection against it
         */
        sb_start_intwrite(inode->i_sb);
-       handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
+                                   ext4_blocks_for_truncate(inode)+3);
        if (IS_ERR(handle)) {
                ext4_std_error(inode->i_sb, PTR_ERR(handle));
                /*
@@ -346,7 +343,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
        spin_lock(&ei->i_block_reservation_lock);
        trace_ext4_da_update_reserve_space(inode, used, quota_claim);
        if (unlikely(used > ei->i_reserved_data_blocks)) {
-               ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
+               ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
                         "with only %d reserved data blocks",
                         __func__, inode->i_ino, used,
                         ei->i_reserved_data_blocks);
@@ -355,10 +352,12 @@ void ext4_da_update_reserve_space(struct inode *inode,
        }
 
        if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
-               ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
-                        "with only %d reserved metadata blocks\n", __func__,
-                        inode->i_ino, ei->i_allocated_meta_blocks,
-                        ei->i_reserved_meta_blocks);
+               ext4_warning(inode->i_sb, "ino %lu, allocated %d "
+                       "with only %d reserved metadata blocks "
+                       "(releasing %d blocks with reserved %d data blocks)",
+                       inode->i_ino, ei->i_allocated_meta_blocks,
+                            ei->i_reserved_meta_blocks, used,
+                            ei->i_reserved_data_blocks);
                WARN_ON(1);
                ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
        }
@@ -508,12 +507,33 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
 int ext4_map_blocks(handle_t *handle, struct inode *inode,
                    struct ext4_map_blocks *map, int flags)
 {
+       struct extent_status es;
        int retval;
 
        map->m_flags = 0;
        ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
                  "logical block %lu\n", inode->i_ino, flags, map->m_len,
                  (unsigned long) map->m_lblk);
+
+       /* Lookup extent status tree firstly */
+       if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+               if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
+                       map->m_pblk = ext4_es_pblock(&es) +
+                                       map->m_lblk - es.es_lblk;
+                       map->m_flags |= ext4_es_is_written(&es) ?
+                                       EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
+                       retval = es.es_len - (map->m_lblk - es.es_lblk);
+                       if (retval > map->m_len)
+                               retval = map->m_len;
+                       map->m_len = retval;
+               } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
+                       retval = 0;
+               } else {
+                       BUG_ON(1);
+               }
+               goto found;
+       }
+
        /*
         * Try to see if we can get the block without requesting a new
         * file system block.
@@ -527,20 +547,27 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                retval = ext4_ind_map_blocks(handle, inode, map, flags &
                                             EXT4_GET_BLOCKS_KEEP_SIZE);
        }
+       if (retval > 0) {
+               int ret;
+               unsigned long long status;
+
+               status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+                               EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+               if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   ext4_find_delalloc_range(inode, map->m_lblk,
+                                            map->m_lblk + map->m_len - 1))
+                       status |= EXTENT_STATUS_DELAYED;
+               ret = ext4_es_insert_extent(inode, map->m_lblk,
+                                           map->m_len, map->m_pblk, status);
+               if (ret < 0)
+                       retval = ret;
+       }
        if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
                up_read((&EXT4_I(inode)->i_data_sem));
 
+found:
        if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
-               int ret;
-               if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
-                       /* delayed alloc may be allocated by fallocate and
-                        * coverted to initialized by directIO.
-                        * we need to handle delayed extent here.
-                        */
-                       down_write((&EXT4_I(inode)->i_data_sem));
-                       goto delayed_mapped;
-               }
-               ret = check_block_validity(inode, map);
+               int ret = check_block_validity(inode, map);
                if (ret != 0)
                        return ret;
        }
@@ -560,16 +587,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                return retval;
 
        /*
-        * When we call get_blocks without the create flag, the
-        * BH_Unwritten flag could have gotten set if the blocks
-        * requested were part of a uninitialized extent.  We need to
-        * clear this flag now that we are committed to convert all or
-        * part of the uninitialized extent to be an initialized
-        * extent.  This is because we need to avoid the combination
-        * of BH_Unwritten and BH_Mapped flags being simultaneously
-        * set on the buffer_head.
+        * Here we clear m_flags because after allocating an new extent,
+        * it will be set again.
         */
-       map->m_flags &= ~EXT4_MAP_UNWRITTEN;
+       map->m_flags &= ~EXT4_MAP_FLAGS;
 
        /*
         * New blocks allocate and/or writing to uninitialized extent
@@ -615,18 +636,23 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                        (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
                        ext4_da_update_reserve_space(inode, retval, 1);
        }
-       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
+       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
-               if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
-                       int ret;
-delayed_mapped:
-                       /* delayed allocation blocks has been allocated */
-                       ret = ext4_es_remove_extent(inode, map->m_lblk,
-                                                   map->m_len);
-                       if (ret < 0)
-                               retval = ret;
-               }
+       if (retval > 0) {
+               int ret;
+               unsigned long long status;
+
+               status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+                               EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+               if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+                   ext4_find_delalloc_range(inode, map->m_lblk,
+                                            map->m_lblk + map->m_len - 1))
+                       status |= EXTENT_STATUS_DELAYED;
+               ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+                                           map->m_pblk, status);
+               if (ret < 0)
+                       retval = ret;
        }
 
        up_write((&EXT4_I(inode)->i_data_sem));
@@ -660,7 +686,8 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
                if (map.m_len > DIO_MAX_BLOCKS)
                        map.m_len = DIO_MAX_BLOCKS;
                dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
-               handle = ext4_journal_start(inode, dio_credits);
+               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+                                           dio_credits);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        return ret;
@@ -707,14 +734,16 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
        /* ensure we send some value back into *errp */
        *errp = 0;
 
+       if (create && err == 0)
+               err = -ENOSPC;  /* should never happen */
        if (err < 0)
                *errp = err;
        if (err <= 0)
                return NULL;
 
        bh = sb_getblk(inode->i_sb, map.m_pblk);
-       if (!bh) {
-               *errp = -EIO;
+       if (unlikely(!bh)) {
+               *errp = -ENOMEM;
                return NULL;
        }
        if (map.m_flags & EXT4_MAP_NEW) {
@@ -808,11 +837,10 @@ int ext4_walk_page_buffers(handle_t *handle,
  * and the commit_write().  So doing the jbd2_journal_start at the start of
  * prepare_write() is the right place.
  *
- * Also, this function can nest inside ext4_writepage() ->
- * block_write_full_page(). In that case, we *know* that ext4_writepage()
- * has generated enough buffer credits to do the whole page.  So we won't
- * block on the journal in that case, which is good, because the caller may
- * be PF_MEMALLOC.
+ * Also, this function can nest inside ext4_writepage().  In that case, we
+ * *know* that ext4_writepage() has generated enough buffer credits to do the
+ * whole page.  So we won't block on the journal in that case, which is good,
+ * because the caller may be PF_MEMALLOC.
  *
  * By accident, ext4 can be reentered when a transaction is open via
  * quota file writes.  If we were to commit the transaction while thus
@@ -878,32 +906,40 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
                ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
                                                    flags, pagep);
                if (ret < 0)
-                       goto out;
-               if (ret == 1) {
-                       ret = 0;
-                       goto out;
-               }
+                       return ret;
+               if (ret == 1)
+                       return 0;
        }
 
-retry:
-       handle = ext4_journal_start(inode, needed_blocks);
+       /*
+        * grab_cache_page_write_begin() can take a long time if the
+        * system is thrashing due to memory pressure, or if the page
+        * is being written back.  So grab it first before we start
+        * the transaction handle.  This also allows us to allocate
+        * the page (if needed) without using GFP_NOFS.
+        */
+retry_grab:
+       page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       unlock_page(page);
+
+retry_journal:
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
        if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
+               page_cache_release(page);
+               return PTR_ERR(handle);
        }
 
-       /* We cannot recurse into the filesystem as the transaction is already
-        * started */
-       flags |= AOP_FLAG_NOFS;
-
-       page = grab_cache_page_write_begin(mapping, index, flags);
-       if (!page) {
+       lock_page(page);
+       if (page->mapping != mapping) {
+               /* The page got truncated from under us */
+               unlock_page(page);
+               page_cache_release(page);
                ext4_journal_stop(handle);
-               ret = -ENOMEM;
-               goto out;
+               goto retry_grab;
        }
-
-       *pagep = page;
+       wait_on_page_writeback(page);
 
        if (ext4_should_dioread_nolock(inode))
                ret = __block_write_begin(page, pos, len, ext4_get_block_write);
@@ -918,7 +954,6 @@ retry:
 
        if (ret) {
                unlock_page(page);
-               page_cache_release(page);
                /*
                 * __block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
@@ -942,11 +977,14 @@ retry:
                        if (inode->i_nlink)
                                ext4_orphan_del(NULL, inode);
                }
-       }
 
-       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
-               goto retry;
-out:
+               if (ret == -ENOSPC &&
+                   ext4_should_retry_alloc(inode->i_sb, &retries))
+                       goto retry_journal;
+               page_cache_release(page);
+               return ret;
+       }
+       *pagep = page;
        return ret;
 }
 
@@ -1256,7 +1294,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
                 * function is called from invalidate page, it's
                 * harmless to return without any action.
                 */
-               ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
+               ext4_warning(inode->i_sb, "ext4_da_release_space: "
                         "ino %lu, to_free %d with only %d reserved "
                         "data blocks", inode->i_ino, to_free,
                         ei->i_reserved_data_blocks);
@@ -1357,7 +1395,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
        loff_t size = i_size_read(inode);
        unsigned int len, block_start;
        struct buffer_head *bh, *page_bufs = NULL;
-       int journal_data = ext4_should_journal_data(inode);
        sector_t pblock = 0, cur_logical = 0;
        struct ext4_io_submit io_submit;
 
@@ -1378,7 +1415,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                if (nr_pages == 0)
                        break;
                for (i = 0; i < nr_pages; i++) {
-                       int commit_write = 0, skip_page = 0;
+                       int skip_page = 0;
                        struct page *page = pvec.pages[i];
 
                        index = page->index;
@@ -1400,27 +1437,9 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                        BUG_ON(!PageLocked(page));
                        BUG_ON(PageWriteback(page));
 
-                       /*
-                        * If the page does not have buffers (for
-                        * whatever reason), try to create them using
-                        * __block_write_begin.  If this fails,
-                        * skip the page and move on.
-                        */
-                       if (!page_has_buffers(page)) {
-                               if (__block_write_begin(page, 0, len,
-                                               noalloc_get_block_write)) {
-                               skip_page:
-                                       unlock_page(page);
-                                       continue;
-                               }
-                               commit_write = 1;
-                       }
-
                        bh = page_bufs = page_buffers(page);
                        block_start = 0;
                        do {
-                               if (!bh)
-                                       goto skip_page;
                                if (map && (cur_logical >= map->m_lblk) &&
                                    (cur_logical <= (map->m_lblk +
                                                     (map->m_len - 1)))) {
@@ -1448,33 +1467,14 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                pblock++;
                        } while (bh != page_bufs);
 
-                       if (skip_page)
-                               goto skip_page;
-
-                       if (commit_write)
-                               /* mark the buffer_heads as dirty & uptodate */
-                               block_commit_write(page, 0, len);
+                       if (skip_page) {
+                               unlock_page(page);
+                               continue;
+                       }
 
                        clear_page_dirty_for_io(page);
-                       /*
-                        * Delalloc doesn't support data journalling,
-                        * but eventually maybe we'll lift this
-                        * restriction.
-                        */
-                       if (unlikely(journal_data && PageChecked(page)))
-                               err = __ext4_journalled_writepage(page, len);
-                       else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
-                               err = ext4_bio_write_page(&io_submit, page,
-                                                         len, mpd->wbc);
-                       else if (buffer_uninit(page_bufs)) {
-                               ext4_set_bh_endio(page_bufs, inode);
-                               err = block_write_full_page_endio(page,
-                                       noalloc_get_block_write,
-                                       mpd->wbc, ext4_end_io_buffer_write);
-                       } else
-                               err = block_write_full_page(page,
-                                       noalloc_get_block_write, mpd->wbc);
-
+                       err = ext4_bio_write_page(&io_submit, page, len,
+                                                 mpd->wbc);
                        if (!err)
                                mpd->pages_written++;
                        /*
@@ -1640,7 +1640,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
                                 (unsigned long long) next,
                                 mpd->b_size >> mpd->inode->i_blkbits, err);
                        ext4_msg(sb, KERN_CRIT,
-                               "This should not happen!! Data will be lost\n");
+                               "This should not happen!! Data will be lost");
                        if (err == -ENOSPC)
                                ext4_print_free_blocks(mpd->inode);
                }
@@ -1690,16 +1690,16 @@ submit_io:
  *
  * @mpd->lbh - extent of blocks
  * @logical - logical number of the block in the file
- * @bh - bh of the block (used to access block's state)
+ * @b_state - b_state of the buffer head added
  *
  * the function is used to collect contig. blocks in same state
  */
-static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
-                                  sector_t logical, size_t b_size,
+static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
                                   unsigned long b_state)
 {
        sector_t next;
-       int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
+       int blkbits = mpd->inode->i_blkbits;
+       int nrblocks = mpd->b_size >> blkbits;
 
        /*
         * XXX Don't go larger than mballoc is willing to allocate
@@ -1707,11 +1707,11 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
         * mpage_da_submit_io() into this function and then call
         * ext4_map_blocks() multiple times in a loop
         */
-       if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
+       if (nrblocks >= (8*1024*1024 >> blkbits))
                goto flush_it;
 
-       /* check if thereserved journal credits might overflow */
-       if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
+       /* check if the reserved journal credits might overflow */
+       if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
                if (nrblocks >= EXT4_MAX_TRANS_DATA) {
                        /*
                         * With non-extent format we are limited by the journal
@@ -1720,16 +1720,6 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
                         * nrblocks.  So limit nrblocks.
                         */
                        goto flush_it;
-               } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
-                               EXT4_MAX_TRANS_DATA) {
-                       /*
-                        * Adding the new buffer_head would make it cross the
-                        * allowed limit for which we have journal credit
-                        * reserved. So limit the new bh->b_size
-                        */
-                       b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
-                                               mpd->inode->i_blkbits;
-                       /* we will do mpage_da_submit_io in the next loop */
                }
        }
        /*
@@ -1737,7 +1727,7 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
         */
        if (mpd->b_size == 0) {
                mpd->b_blocknr = logical;
-               mpd->b_size = b_size;
+               mpd->b_size = 1 << blkbits;
                mpd->b_state = b_state & BH_FLAGS;
                return;
        }
@@ -1747,7 +1737,7 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
         * Can we merge the block to our big extent?
         */
        if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
-               mpd->b_size += b_size;
+               mpd->b_size += 1 << blkbits;
                return;
        }
 
@@ -1775,6 +1765,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                              struct ext4_map_blocks *map,
                              struct buffer_head *bh)
 {
+       struct extent_status es;
        int retval;
        sector_t invalid_block = ~((sector_t) 0xffff);
 
@@ -1785,6 +1776,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
        ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
                  "logical block %lu\n", inode->i_ino, map->m_len,
                  (unsigned long) map->m_lblk);
+
+       /* Lookup extent status tree firstly */
+       if (ext4_es_lookup_extent(inode, iblock, &es)) {
+
+               if (ext4_es_is_hole(&es)) {
+                       retval = 0;
+                       down_read((&EXT4_I(inode)->i_data_sem));
+                       goto add_delayed;
+               }
+
+               /*
+                * Delayed extent could be allocated by fallocate.
+                * So we need to check it.
+                */
+               if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
+                       map_bh(bh, inode->i_sb, invalid_block);
+                       set_buffer_new(bh);
+                       set_buffer_delay(bh);
+                       return 0;
+               }
+
+               map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
+               retval = es.es_len - (iblock - es.es_lblk);
+               if (retval > map->m_len)
+                       retval = map->m_len;
+               map->m_len = retval;
+               if (ext4_es_is_written(&es))
+                       map->m_flags |= EXT4_MAP_MAPPED;
+               else if (ext4_es_is_unwritten(&es))
+                       map->m_flags |= EXT4_MAP_UNWRITTEN;
+               else
+                       BUG_ON(1);
+
+               return retval;
+       }
+
        /*
         * Try to see if we can get the block without requesting a new
         * file system block.
@@ -1803,11 +1830,15 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                        map->m_flags |= EXT4_MAP_FROM_CLUSTER;
                retval = 0;
        } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               retval = ext4_ext_map_blocks(NULL, inode, map, 0);
+               retval = ext4_ext_map_blocks(NULL, inode, map,
+                                            EXT4_GET_BLOCKS_NO_PUT_HOLE);
        else
-               retval = ext4_ind_map_blocks(NULL, inode, map, 0);
+               retval = ext4_ind_map_blocks(NULL, inode, map,
+                                            EXT4_GET_BLOCKS_NO_PUT_HOLE);
 
+add_delayed:
        if (retval == 0) {
+               int ret;
                /*
                 * XXX: __block_prepare_write() unmaps passed block,
                 * is it OK?
@@ -1815,15 +1846,20 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                /* If the block was allocated from previously allocated cluster,
                 * then we dont need to reserve it again. */
                if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
-                       retval = ext4_da_reserve_space(inode, iblock);
-                       if (retval)
+                       ret = ext4_da_reserve_space(inode, iblock);
+                       if (ret) {
                                /* not enough space to reserve */
+                               retval = ret;
                                goto out_unlock;
+                       }
                }
 
-               retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len);
-               if (retval)
+               ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+                                           ~0, EXTENT_STATUS_DELAYED);
+               if (ret) {
+                       retval = ret;
                        goto out_unlock;
+               }
 
                /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
                 * and it should not appear on the bh->b_state.
@@ -1833,6 +1869,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                map_bh(bh, inode->i_sb, invalid_block);
                set_buffer_new(bh);
                set_buffer_delay(bh);
+       } else if (retval > 0) {
+               int ret;
+               unsigned long long status;
+
+               status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+                               EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+               ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+                                           map->m_pblk, status);
+               if (ret != 0)
+                       retval = ret;
        }
 
 out_unlock:
@@ -1890,27 +1936,6 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
        return 0;
 }
 
-/*
- * This function is used as a standard get_block_t calback function
- * when there is no desire to allocate any blocks.  It is used as a
- * callback function for block_write_begin() and block_write_full_page().
- * These functions should only try to map a single block at a time.
- *
- * Since this function doesn't do block allocations even if the caller
- * requests it by passing in create=1, it is critically important that
- * any caller checks to make sure that any buffer heads are returned
- * by this function are either all already mapped or marked for
- * delayed allocation before calling  block_write_full_page().  Otherwise,
- * b_blocknr could be left unitialized, and the page write functions will
- * be taken by surprise.
- */
-static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
-                                  struct buffer_head *bh_result, int create)
-{
-       BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
-       return _ext4_get_block(inode, iblock, bh_result, 0);
-}
-
 static int bget_one(handle_t *handle, struct buffer_head *bh)
 {
        get_bh(bh);
@@ -1955,7 +1980,8 @@ static int __ext4_journalled_writepage(struct page *page,
         * references to buffers so we are safe */
        unlock_page(page);
 
-       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+                                   ext4_writepage_trans_blocks(inode));
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                goto out;
@@ -2035,11 +2061,12 @@ out:
 static int ext4_writepage(struct page *page,
                          struct writeback_control *wbc)
 {
-       int ret = 0, commit_write = 0;
+       int ret = 0;
        loff_t size;
        unsigned int len;
        struct buffer_head *page_bufs = NULL;
        struct inode *inode = page->mapping->host;
+       struct ext4_io_submit io_submit;
 
        trace_ext4_writepage(page);
        size = i_size_read(inode);
@@ -2048,39 +2075,29 @@ static int ext4_writepage(struct page *page,
        else
                len = PAGE_CACHE_SIZE;
 
+       page_bufs = page_buffers(page);
        /*
-        * If the page does not have buffers (for whatever reason),
-        * try to create them using __block_write_begin.  If this
-        * fails, redirty the page and move on.
+        * We cannot do block allocation or other extent handling in this
+        * function. If there are buffers needing that, we have to redirty
+        * the page. But we may reach here when we do a journal commit via
+        * journal_submit_inode_data_buffers() and in that case we must write
+        * allocated buffers to achieve data=ordered mode guarantees.
         */
-       if (!page_has_buffers(page)) {
-               if (__block_write_begin(page, 0, len,
-                                       noalloc_get_block_write)) {
-               redirty_page:
-                       redirty_page_for_writepage(wbc, page);
+       if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+                                  ext4_bh_delay_or_unwritten)) {
+               redirty_page_for_writepage(wbc, page);
+               if (current->flags & PF_MEMALLOC) {
+                       /*
+                        * For memory cleaning there's no point in writing only
+                        * some buffers. So just bail out. Warn if we came here
+                        * from direct reclaim.
+                        */
+                       WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
+                                                       == PF_MEMALLOC);
                        unlock_page(page);
                        return 0;
                }
-               commit_write = 1;
-       }
-       page_bufs = page_buffers(page);
-       if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                  ext4_bh_delay_or_unwritten)) {
-               /*
-                * We don't want to do block allocation, so redirty
-                * the page and return.  We may reach here when we do
-                * a journal commit via journal_submit_inode_data_buffers.
-                * We can also reach here via shrink_page_list but it
-                * should never be for direct reclaim so warn if that
-                * happens
-                */
-               WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
-                                                               PF_MEMALLOC);
-               goto redirty_page;
        }
-       if (commit_write)
-               /* now mark the buffer_heads as dirty and uptodate */
-               block_commit_write(page, 0, len);
 
        if (PageChecked(page) && ext4_should_journal_data(inode))
                /*
@@ -2089,14 +2106,9 @@ static int ext4_writepage(struct page *page,
                 */
                return __ext4_journalled_writepage(page, len);
 
-       if (buffer_uninit(page_bufs)) {
-               ext4_set_bh_endio(page_bufs, inode);
-               ret = block_write_full_page_endio(page, noalloc_get_block_write,
-                                           wbc, ext4_end_io_buffer_write);
-       } else
-               ret = block_write_full_page(page, noalloc_get_block_write,
-                                           wbc);
-
+       memset(&io_submit, 0, sizeof(io_submit));
+       ret = ext4_bio_write_page(&io_submit, page, len, wbc);
+       ext4_io_submit(&io_submit);
        return ret;
 }
 
@@ -2228,51 +2240,38 @@ static int write_cache_pages_da(handle_t *handle,
                        logical = (sector_t) page->index <<
                                (PAGE_CACHE_SHIFT - inode->i_blkbits);
 
-                       if (!page_has_buffers(page)) {
-                               mpage_add_bh_to_extent(mpd, logical,
-                                                      PAGE_CACHE_SIZE,
-                                                      (1 << BH_Dirty) | (1 << BH_Uptodate));
-                               if (mpd->io_done)
-                                       goto ret_extent_tail;
-                       } else {
+                       /* Add all dirty buffers to mpd */
+                       head = page_buffers(page);
+                       bh = head;
+                       do {
+                               BUG_ON(buffer_locked(bh));
                                /*
-                                * Page with regular buffer heads,
-                                * just add all dirty ones
+                                * We need to try to allocate unmapped blocks
+                                * in the same page.  Otherwise we won't make
+                                * progress with the page in ext4_writepage
                                 */
-                               head = page_buffers(page);
-                               bh = head;
-                               do {
-                                       BUG_ON(buffer_locked(bh));
+                               if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+                                       mpage_add_bh_to_extent(mpd, logical,
+                                                              bh->b_state);
+                                       if (mpd->io_done)
+                                               goto ret_extent_tail;
+                               } else if (buffer_dirty(bh) &&
+                                          buffer_mapped(bh)) {
                                        /*
-                                        * We need to try to allocate
-                                        * unmapped blocks in the same page.
-                                        * Otherwise we won't make progress
-                                        * with the page in ext4_writepage
+                                        * mapped dirty buffer. We need to
+                                        * update the b_state because we look
+                                        * at b_state in mpage_da_map_blocks.
+                                        * We don't update b_size because if we
+                                        * find an unmapped buffer_head later
+                                        * we need to use the b_state flag of
+                                        * that buffer_head.
                                         */
-                                       if (ext4_bh_delay_or_unwritten(NULL, bh)) {
-                                               mpage_add_bh_to_extent(mpd, logical,
-                                                                      bh->b_size,
-                                                                      bh->b_state);
-                                               if (mpd->io_done)
-                                                       goto ret_extent_tail;
-                                       } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
-                                               /*
-                                                * mapped dirty buffer. We need
-                                                * to update the b_state
-                                                * because we look at b_state
-                                                * in mpage_da_map_blocks.  We
-                                                * don't update b_size because
-                                                * if we find an unmapped
-                                                * buffer_head later we need to
-                                                * use the b_state flag of that
-                                                * buffer_head.
-                                                */
-                                               if (mpd->b_size == 0)
-                                                       mpd->b_state = bh->b_state & BH_FLAGS;
-                                       }
-                                       logical++;
-                               } while ((bh = bh->b_this_page) != head);
-                       }
+                                       if (mpd->b_size == 0)
+                                               mpd->b_state =
+                                                       bh->b_state & BH_FLAGS;
+                               }
+                               logical++;
+                       } while ((bh = bh->b_this_page) != head);
 
                        if (nr_to_write > 0) {
                                nr_to_write--;
@@ -2413,7 +2412,8 @@ retry:
                needed_blocks = ext4_da_writepages_trans_blocks(inode);
 
                /* start a new transaction*/
-               handle = ext4_journal_start(inode, needed_blocks);
+               handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+                                           needed_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
@@ -2512,12 +2512,8 @@ static int ext4_nonda_switch(struct super_block *sb)
        /*
         * Start pushing delalloc when 1/2 of free blocks are dirty.
         */
-       if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
-           !writeback_in_progress(sb->s_bdi) &&
-           down_read_trylock(&sb->s_umount)) {
-               writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
-               up_read(&sb->s_umount);
-       }
+       if (dirty_blocks && (free_blocks < 2 * dirty_blocks))
+               try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
 
        if (2 * free_blocks < 3 * dirty_blocks ||
                free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
@@ -2555,42 +2551,52 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                                                      pos, len, flags,
                                                      pagep, fsdata);
                if (ret < 0)
-                       goto out;
-               if (ret == 1) {
-                       ret = 0;
-                       goto out;
-               }
+                       return ret;
+               if (ret == 1)
+                       return 0;
        }
 
-retry:
+       /*
+        * grab_cache_page_write_begin() can take a long time if the
+        * system is thrashing due to memory pressure, or if the page
+        * is being written back.  So grab it first before we start
+        * the transaction handle.  This also allows us to allocate
+        * the page (if needed) without using GFP_NOFS.
+        */
+retry_grab:
+       page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       unlock_page(page);
+
        /*
         * With delayed allocation, we don't log the i_disksize update
         * if there is delayed block allocation. But we still need
         * to journalling the i_disksize update if writes to the end
         * of file which has an already mapped buffer.
         */
-       handle = ext4_journal_start(inode, 1);
+retry_journal:
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
        if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
+               page_cache_release(page);
+               return PTR_ERR(handle);
        }
-       /* We cannot recurse into the filesystem as the transaction is already
-        * started */
-       flags |= AOP_FLAG_NOFS;
 
-       page = grab_cache_page_write_begin(mapping, index, flags);
-       if (!page) {
+       lock_page(page);
+       if (page->mapping != mapping) {
+               /* The page got truncated from under us */
+               unlock_page(page);
+               page_cache_release(page);
                ext4_journal_stop(handle);
-               ret = -ENOMEM;
-               goto out;
+               goto retry_grab;
        }
-       *pagep = page;
+       /* In case writeback began while the page was unlocked */
+       wait_on_page_writeback(page);
 
        ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
        if (ret < 0) {
                unlock_page(page);
                ext4_journal_stop(handle);
-               page_cache_release(page);
                /*
                 * block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
@@ -2598,11 +2604,16 @@ retry:
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
+
+               if (ret == -ENOSPC &&
+                   ext4_should_retry_alloc(inode->i_sb, &retries))
+                       goto retry_journal;
+
+               page_cache_release(page);
+               return ret;
        }
 
-       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
-               goto retry;
-out:
+       *pagep = page;
        return ret;
 }
 
@@ -2858,36 +2869,10 @@ ext4_readpages(struct file *file, struct address_space *mapping,
        return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
 }
 
-static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
-{
-       struct buffer_head *head, *bh;
-       unsigned int curr_off = 0;
-
-       if (!page_has_buffers(page))
-               return;
-       head = bh = page_buffers(page);
-       do {
-               if (offset <= curr_off && test_clear_buffer_uninit(bh)
-                                       && bh->b_private) {
-                       ext4_free_io_end(bh->b_private);
-                       bh->b_private = NULL;
-                       bh->b_end_io = NULL;
-               }
-               curr_off = curr_off + bh->b_size;
-               bh = bh->b_this_page;
-       } while (bh != head);
-}
-
 static void ext4_invalidatepage(struct page *page, unsigned long offset)
 {
        trace_ext4_invalidatepage(page, offset);
 
-       /*
-        * free any io_end structure allocated for buffers to be discarded
-        */
-       if (ext4_should_dioread_nolock(page->mapping->host))
-               ext4_invalidatepage_free_endio(page, offset);
-
        /* No journalling happens on data buffers when this function is used */
        WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
 
@@ -2959,7 +2944,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                            ssize_t size, void *private, int ret,
                            bool is_async)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
         ext4_io_end_t *io_end = iocb->private;
 
        /* if not async direct IO or dio with 0 bytes write, just return */
@@ -2977,9 +2962,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
 out:
+               inode_dio_done(inode);
                if (is_async)
                        aio_complete(iocb, ret, 0);
-               inode_dio_done(inode);
                return;
        }
 
@@ -2993,65 +2978,6 @@ out:
        ext4_add_complete_io(io_end);
 }
 
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
-{
-       ext4_io_end_t *io_end = bh->b_private;
-       struct inode *inode;
-
-       if (!test_clear_buffer_uninit(bh) || !io_end)
-               goto out;
-
-       if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
-               ext4_msg(io_end->inode->i_sb, KERN_INFO,
-                        "sb umounted, discard end_io request for inode %lu",
-                        io_end->inode->i_ino);
-               ext4_free_io_end(io_end);
-               goto out;
-       }
-
-       /*
-        * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
-        * but being more careful is always safe for the future change.
-        */
-       inode = io_end->inode;
-       ext4_set_io_unwritten_flag(inode, io_end);
-       ext4_add_complete_io(io_end);
-out:
-       bh->b_private = NULL;
-       bh->b_end_io = NULL;
-       clear_buffer_uninit(bh);
-       end_buffer_async_write(bh, uptodate);
-}
-
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
-{
-       ext4_io_end_t *io_end;
-       struct page *page = bh->b_page;
-       loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
-       size_t size = bh->b_size;
-
-retry:
-       io_end = ext4_init_io_end(inode, GFP_ATOMIC);
-       if (!io_end) {
-               pr_warn_ratelimited("%s: allocation fail\n", __func__);
-               schedule();
-               goto retry;
-       }
-       io_end->offset = offset;
-       io_end->size = size;
-       /*
-        * We need to hold a reference to the page to make sure it
-        * doesn't get evicted before ext4_end_io_work() has a chance
-        * to convert the extent from written to unwritten.
-        */
-       io_end->page = page;
-       get_page(io_end->page);
-
-       bh->b_private = io_end;
-       bh->b_end_io = ext4_end_io_buffer_write;
-       return 0;
-}
-
 /*
  * For ext4 extent files, ext4 will do direct-io write to holes,
  * preallocated extents, and those write extend the file, no need to
@@ -3553,20 +3479,20 @@ int ext4_can_truncate(struct inode *inode)
 
 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        if (!S_ISREG(inode->i_mode))
                return -EOPNOTSUPP;
 
-       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-               /* TODO: Add support for non extent hole punching */
-               return -EOPNOTSUPP;
-       }
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               return ext4_ind_punch_hole(file, offset, length);
 
        if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
                /* TODO: Add support for bigalloc file systems */
                return -EOPNOTSUPP;
        }
 
+       trace_ext4_punch_hole(inode, offset, length);
+
        return ext4_ext_punch_hole(file, offset, length);
 }
 
@@ -3660,11 +3586,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
        iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
 
        bh = sb_getblk(sb, block);
-       if (!bh) {
-               EXT4_ERROR_INODE_BLOCK(inode, block,
-                                      "unable to read itable block");
-               return -EIO;
-       }
+       if (unlikely(!bh))
+               return -ENOMEM;
        if (!buffer_uptodate(bh)) {
                lock_buffer(bh);
 
@@ -3696,7 +3619,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
 
                        /* Is the inode bitmap in cache? */
                        bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
-                       if (!bitmap_bh)
+                       if (unlikely(!bitmap_bh))
                                goto make_io;
 
                        /*
@@ -4404,8 +4327,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 
                /* (user+group)*(old+new) structure, inode write (sb,
                 * inode block, ? - but truncate inode update has it) */
-               handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
-                                       EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
+               handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
+                       (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
+                        EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
                if (IS_ERR(handle)) {
                        error = PTR_ERR(handle);
                        goto err_out;
@@ -4440,7 +4364,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
            (attr->ia_size < inode->i_size)) {
                handle_t *handle;
 
-               handle = ext4_journal_start(inode, 3);
+               handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
                if (IS_ERR(handle)) {
                        error = PTR_ERR(handle);
                        goto err_out;
@@ -4460,7 +4384,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                                                            attr->ia_size);
                        if (error) {
                                /* Do as much error cleanup as possible */
-                               handle = ext4_journal_start(inode, 3);
+                               handle = ext4_journal_start(inode,
+                                                           EXT4_HT_INODE, 3);
                                if (IS_ERR(handle)) {
                                        ext4_orphan_del(NULL, inode);
                                        goto err_out;
@@ -4801,7 +4726,7 @@ void ext4_dirty_inode(struct inode *inode, int flags)
 {
        handle_t *handle;
 
-       handle = ext4_journal_start(inode, 2);
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
        if (IS_ERR(handle))
                goto out;
 
@@ -4902,7 +4827,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
 
        /* Finally we can mark the inode as dirty. */
 
-       handle = ext4_journal_start(inode, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
@@ -4926,7 +4851,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        unsigned long len;
        int ret;
        struct file *file = vma->vm_file;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
        get_block_t *get_block;
@@ -4980,7 +4905,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        else
                get_block = ext4_get_block;
 retry_alloc:
-       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+                                   ext4_writepage_trans_blocks(inode));
        if (IS_ERR(handle)) {
                ret = VM_FAULT_SIGBUS;
                goto out;
index 5747f52..721f4d3 100644 (file)
@@ -22,7 +22,7 @@
 
 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int flags;
@@ -104,7 +104,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                } else if (oldflags & EXT4_EOFBLOCKS_FL)
                        ext4_truncate(inode);
 
-               handle = ext4_journal_start(inode, 1);
+               handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
                if (IS_ERR(handle)) {
                        err = PTR_ERR(handle);
                        goto flags_out;
@@ -173,7 +173,7 @@ flags_out:
                }
 
                mutex_lock(&inode->i_mutex);
-               handle = ext4_journal_start(inode, 1);
+               handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
                if (IS_ERR(handle)) {
                        err = PTR_ERR(handle);
                        goto unlock_out;
@@ -313,6 +313,9 @@ mext_out:
                if (err == 0)
                        err = err2;
                mnt_drop_write_file(filp);
+               if (!err && ext4_has_group_desc_csum(sb) &&
+                   test_opt(sb, INIT_INODE_TABLE))
+                       err = ext4_register_li_request(sb, input.group);
 group_add_out:
                ext4_resize_end(sb);
                return err;
@@ -358,6 +361,7 @@ group_add_out:
                ext4_fsblk_t n_blocks_count;
                struct super_block *sb = inode->i_sb;
                int err = 0, err2 = 0;
+               ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
 
                if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                               EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
@@ -388,6 +392,11 @@ group_add_out:
                if (err == 0)
                        err = err2;
                mnt_drop_write_file(filp);
+               if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
+                   ext4_has_group_desc_csum(sb) &&
+                   test_opt(sb, INIT_INODE_TABLE))
+                       err = ext4_register_li_request(sb, o_group);
+
 resizefs_out:
                ext4_resize_end(sb);
                return err;
index 1bf6fe7..7bb713a 100644 (file)
 
 #include "ext4_jbd2.h"
 #include "mballoc.h"
-#include <linux/debugfs.h>
 #include <linux/log2.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <trace/events/ext4.h>
 
+#ifdef CONFIG_EXT4_DEBUG
+ushort ext4_mballoc_debug __read_mostly;
+
+module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
+MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
+#endif
+
 /*
  * MUSTDO:
  *   - test ext4_ext_search_left() and ext4_ext_search_right()
@@ -1884,15 +1891,19 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
        case 0:
                BUG_ON(ac->ac_2order == 0);
 
-               if (grp->bb_largest_free_order < ac->ac_2order)
-                       return 0;
-
                /* Avoid using the first bg of a flexgroup for data files */
                if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
                    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
                    ((group % flex_size) == 0))
                        return 0;
 
+               if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
+                   (free / fragments) >= ac->ac_g_ex.fe_len)
+                       return 1;
+
+               if (grp->bb_largest_free_order < ac->ac_2order)
+                       return 0;
+
                return 1;
        case 1:
                if ((free / fragments) >= ac->ac_g_ex.fe_len)
@@ -2007,7 +2018,7 @@ repeat:
                        }
 
                        ac->ac_groups_scanned++;
-                       if (cr == 0)
+                       if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
                                ext4_mb_simple_scan_group(ac, &e4b);
                        else if (cr == 1 && sbi->s_stripe &&
                                        !(ac->ac_g_ex.fe_len % sbi->s_stripe))
@@ -2656,40 +2667,6 @@ static void ext4_free_data_callback(struct super_block *sb,
        mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
 }
 
-#ifdef CONFIG_EXT4_DEBUG
-u8 mb_enable_debug __read_mostly;
-
-static struct dentry *debugfs_dir;
-static struct dentry *debugfs_debug;
-
-static void __init ext4_create_debugfs_entry(void)
-{
-       debugfs_dir = debugfs_create_dir("ext4", NULL);
-       if (debugfs_dir)
-               debugfs_debug = debugfs_create_u8("mballoc-debug",
-                                                 S_IRUGO | S_IWUSR,
-                                                 debugfs_dir,
-                                                 &mb_enable_debug);
-}
-
-static void ext4_remove_debugfs_entry(void)
-{
-       debugfs_remove(debugfs_debug);
-       debugfs_remove(debugfs_dir);
-}
-
-#else
-
-static void __init ext4_create_debugfs_entry(void)
-{
-}
-
-static void ext4_remove_debugfs_entry(void)
-{
-}
-
-#endif
-
 int __init ext4_init_mballoc(void)
 {
        ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
@@ -2711,7 +2688,6 @@ int __init ext4_init_mballoc(void)
                kmem_cache_destroy(ext4_ac_cachep);
                return -ENOMEM;
        }
-       ext4_create_debugfs_entry();
        return 0;
 }
 
@@ -2726,7 +2702,6 @@ void ext4_exit_mballoc(void)
        kmem_cache_destroy(ext4_ac_cachep);
        kmem_cache_destroy(ext4_free_data_cachep);
        ext4_groupinfo_destroy_slabs();
-       ext4_remove_debugfs_entry();
 }
 
 
@@ -3444,7 +3419,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
                        win = offs;
 
                ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
-                       EXT4_B2C(sbi, win);
+                       EXT4_NUM_B2C(sbi, win);
                BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
                BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
        }
@@ -3872,7 +3847,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
        struct super_block *sb = ac->ac_sb;
        ext4_group_t ngroups, i;
 
-       if (!mb_enable_debug ||
+       if (!ext4_mballoc_debug ||
            (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
                return;
 
@@ -4005,8 +3980,8 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
        len = ar->len;
 
        /* just a dirty hack to filter too big requests  */
-       if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
-               len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
+       if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
+               len = EXT4_CLUSTERS_PER_GROUP(sb);
 
        /* start searching from the goal */
        goal = ar->goal;
@@ -4136,7 +4111,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
                /* The max size of hash table is PREALLOC_TB_SIZE */
                order = PREALLOC_TB_SIZE - 1;
        /* Add the prealloc space to lg */
-       rcu_read_lock();
+       spin_lock(&lg->lg_prealloc_lock);
        list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
                                                pa_inode_list) {
                spin_lock(&tmp_pa->pa_lock);
@@ -4160,12 +4135,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
        if (!added)
                list_add_tail_rcu(&pa->pa_inode_list,
                                        &lg->lg_prealloc_list[order]);
-       rcu_read_unlock();
+       spin_unlock(&lg->lg_prealloc_lock);
 
        /* Now trim the list to be not more than 8 elements */
        if (lg_prealloc_count > 8) {
                ext4_mb_discard_lg_preallocations(sb, lg,
-                                               order, lg_prealloc_count);
+                                                 order, lg_prealloc_count);
                return;
        }
        return ;
@@ -4590,7 +4565,7 @@ do_more:
                        EXT4_BLOCKS_PER_GROUP(sb);
                count -= overflow;
        }
-       count_clusters = EXT4_B2C(sbi, count);
+       count_clusters = EXT4_NUM_B2C(sbi, count);
        bitmap_bh = ext4_read_block_bitmap(sb, block_group);
        if (!bitmap_bh) {
                err = -EIO;
@@ -4832,11 +4807,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
        ext4_group_desc_csum_set(sb, block_group, desc);
        ext4_unlock_group(sb, block_group);
        percpu_counter_add(&sbi->s_freeclusters_counter,
-                          EXT4_B2C(sbi, blocks_freed));
+                          EXT4_NUM_B2C(sbi, blocks_freed));
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(EXT4_B2C(sbi, blocks_freed),
+               atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
                           &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
index 3ccd889..08481ee 100644 (file)
 /*
  */
 #ifdef CONFIG_EXT4_DEBUG
-extern u8 mb_enable_debug;
+extern ushort ext4_mballoc_debug;
 
 #define mb_debug(n, fmt, a...)                                         \
        do {                                                            \
-               if ((n) <= mb_enable_debug) {                           \
+               if ((n) <= ext4_mballoc_debug) {                        \
                        printk(KERN_DEBUG "(%s, %d): %s: ",             \
                               __FILE__, __LINE__, __func__);           \
                        printk(fmt, ## a);                              \
index db8226d..480acf4 100644 (file)
@@ -456,11 +456,14 @@ int ext4_ext_migrate(struct inode *inode)
                 */
                return retval;
 
-       handle = ext4_journal_start(inode,
-                                       EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-                                       EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
-                                       + 1);
+       /*
+        * Worst case we can touch the allocation bitmaps, a bgd
+        * block, and a block to link in the orphan list.  We do need
+        * need to worry about credits for modifying the quota inode.
+        */
+       handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
+               4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
+
        if (IS_ERR(handle)) {
                retval = PTR_ERR(handle);
                return retval;
@@ -507,7 +510,7 @@ int ext4_ext_migrate(struct inode *inode)
        ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
        up_read((&EXT4_I(inode)->i_data_sem));
 
-       handle = ext4_journal_start(inode, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
        if (IS_ERR(handle)) {
                /*
                 * It is impossible to update on-disk structures without
index fe7c63f..f9b5515 100644 (file)
@@ -80,6 +80,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
         * is not blocked in the elevator. */
        if (!*bh)
                *bh = sb_getblk(sb, mmp_block);
+       if (!*bh)
+               return -ENOMEM;
        if (*bh) {
                get_bh(*bh);
                lock_buffer(*bh);
@@ -91,7 +93,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
                        *bh = NULL;
                }
        }
-       if (!*bh) {
+       if (unlikely(!*bh)) {
                ext4_warning(sb, "Error while reading MMP block %llu",
                             mmp_block);
                return -EIO;
index d9cc5ee..4e81d47 100644 (file)
@@ -681,6 +681,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
 
        depth = ext_depth(donor_inode);
        dext = donor_path[depth].p_ext;
+       if (unlikely(!dext))
+               goto missing_donor_extent;
        tmp_dext = *dext;
 
        *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
@@ -691,7 +693,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
        /* Loop for the donor extents */
        while (1) {
                /* The extent for donor must be found. */
-               if (!dext) {
+               if (unlikely(!dext)) {
+               missing_donor_extent:
                        EXT4_ERROR_INODE(donor_inode,
                                   "The extent for donor must be found");
                        *err = -EIO;
@@ -761,9 +764,6 @@ out:
                kfree(donor_path);
        }
 
-       ext4_ext_invalidate_cache(orig_inode);
-       ext4_ext_invalidate_cache(donor_inode);
-
        return replaced_count;
 }
 
@@ -900,7 +900,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
                  pgoff_t orig_page_offset, int data_offset_in_page,
                  int block_len_in_page, int uninit, int *err)
 {
-       struct inode *orig_inode = o_filp->f_dentry->d_inode;
+       struct inode *orig_inode = file_inode(o_filp);
        struct page *pagep[2] = {NULL, NULL};
        handle_t *handle;
        ext4_lblk_t orig_blk_offset;
@@ -920,7 +920,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
 again:
        *err = 0;
        jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
-       handle = ext4_journal_start(orig_inode, jblocks);
+       handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
        if (IS_ERR(handle)) {
                *err = PTR_ERR(handle);
                return 0;
@@ -1279,8 +1279,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
                 __u64 orig_start, __u64 donor_start, __u64 len,
                 __u64 *moved_len)
 {
-       struct inode *orig_inode = o_filp->f_dentry->d_inode;
-       struct inode *donor_inode = d_filp->f_dentry->d_inode;
+       struct inode *orig_inode = file_inode(o_filp);
+       struct inode *donor_inode = file_inode(d_filp);
        struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL;
        struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
        ext4_lblk_t block_start = orig_start;
index f9ed946..3825d6a 100644 (file)
 #define NAMEI_RA_CHUNKS  2
 #define NAMEI_RA_BLOCKS  4
 #define NAMEI_RA_SIZE       (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
 
 static struct buffer_head *ext4_append(handle_t *handle,
                                        struct inode *inode,
-                                       ext4_lblk_t *block, int *err)
+                                       ext4_lblk_t *block)
 {
        struct buffer_head *bh;
+       int err = 0;
 
        if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
                     ((inode->i_size >> 10) >=
-                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) {
-               *err = -ENOSPC;
-               return NULL;
-       }
+                     EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
+               return ERR_PTR(-ENOSPC);
 
        *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
 
-       bh = ext4_bread(handle, inode, *block, 1, err);
-       if (bh) {
-               inode->i_size += inode->i_sb->s_blocksize;
-               EXT4_I(inode)->i_disksize = inode->i_size;
-               *err = ext4_journal_get_write_access(handle, bh);
-               if (*err) {
+       bh = ext4_bread(handle, inode, *block, 1, &err);
+       if (!bh)
+               return ERR_PTR(err);
+       inode->i_size += inode->i_sb->s_blocksize;
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       err = ext4_journal_get_write_access(handle, bh);
+       if (err) {
+               brelse(bh);
+               ext4_std_error(inode->i_sb, err);
+               return ERR_PTR(err);
+       }
+       return bh;
+}
+
+static int ext4_dx_csum_verify(struct inode *inode,
+                              struct ext4_dir_entry *dirent);
+
+typedef enum {
+       EITHER, INDEX, DIRENT
+} dirblock_type_t;
+
+#define ext4_read_dirblock(inode, block, type) \
+       __ext4_read_dirblock((inode), (block), (type), __LINE__)
+
+static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+                                             ext4_lblk_t block,
+                                             dirblock_type_t type,
+                                             unsigned int line)
+{
+       struct buffer_head *bh;
+       struct ext4_dir_entry *dirent;
+       int err = 0, is_dx_block = 0;
+
+       bh = ext4_bread(NULL, inode, block, 0, &err);
+       if (!bh) {
+               if (err == 0) {
+                       ext4_error_inode(inode, __func__, line, block,
+                                              "Directory hole found");
+                       return ERR_PTR(-EIO);
+               }
+               __ext4_warning(inode->i_sb, __func__, line,
+                              "error reading directory block "
+                              "(ino %lu, block %lu)", inode->i_ino,
+                              (unsigned long) block);
+               return ERR_PTR(err);
+       }
+       dirent = (struct ext4_dir_entry *) bh->b_data;
+       /* Determine whether or not we have an index block */
+       if (is_dx(inode)) {
+               if (block == 0)
+                       is_dx_block = 1;
+               else if (ext4_rec_len_from_disk(dirent->rec_len,
+                                               inode->i_sb->s_blocksize) ==
+                        inode->i_sb->s_blocksize)
+                       is_dx_block = 1;
+       }
+       if (!is_dx_block && type == INDEX) {
+               ext4_error_inode(inode, __func__, line, block,
+                      "directory leaf block found instead of index block");
+               return ERR_PTR(-EIO);
+       }
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
+           buffer_verified(bh))
+               return bh;
+
+       /*
+        * An empty leaf block can get mistaken for a index block; for
+        * this reason, we can only check the index checksum when the
+        * caller is sure it should be an index block.
+        */
+       if (is_dx_block && type == INDEX) {
+               if (ext4_dx_csum_verify(inode, dirent))
+                       set_buffer_verified(bh);
+               else {
+                       ext4_error_inode(inode, __func__, line, block,
+                               "Directory index failed checksum");
                        brelse(bh);
-                       bh = NULL;
+                       return ERR_PTR(-EIO);
                }
        }
-       if (!bh && !(*err)) {
-               *err = -EIO;
-               ext4_error(inode->i_sb,
-                          "Directory hole detected on inode %lu\n",
-                          inode->i_ino);
+       if (!is_dx_block) {
+               if (ext4_dirent_csum_verify(inode, dirent))
+                       set_buffer_verified(bh);
+               else {
+                       ext4_error_inode(inode, __func__, line, block,
+                               "Directory block failed checksum");
+                       brelse(bh);
+                       return ERR_PTR(-EIO);
+               }
        }
        return bh;
 }
@@ -604,9 +677,9 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
        u32 hash;
 
        frame->bh = NULL;
-       if (!(bh = ext4_bread(NULL, dir, 0, 0, err))) {
-               if (*err == 0)
-                       *err = ERR_BAD_DX_DIR;
+       bh = ext4_read_dirblock(dir, 0, INDEX);
+       if (IS_ERR(bh)) {
+               *err = PTR_ERR(bh);
                goto fail;
        }
        root = (struct dx_root *) bh->b_data;
@@ -643,15 +716,6 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                goto fail;
        }
 
-       if (!buffer_verified(bh) &&
-           !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
-               ext4_warning(dir->i_sb, "Root failed checksum");
-               brelse(bh);
-               *err = ERR_BAD_DX_DIR;
-               goto fail;
-       }
-       set_buffer_verified(bh);
-
        entries = (struct dx_entry *) (((char *)&root->info) +
                                       root->info.info_length);
 
@@ -709,22 +773,12 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
                frame->entries = entries;
                frame->at = at;
                if (!indirect--) return frame;
-               if (!(bh = ext4_bread(NULL, dir, dx_get_block(at), 0, err))) {
-                       if (!(*err))
-                               *err = ERR_BAD_DX_DIR;
-                       goto fail2;
-               }
-               at = entries = ((struct dx_node *) bh->b_data)->entries;
-
-               if (!buffer_verified(bh) &&
-                   !ext4_dx_csum_verify(dir,
-                                        (struct ext4_dir_entry *)bh->b_data)) {
-                       ext4_warning(dir->i_sb, "Node failed checksum");
-                       brelse(bh);
-                       *err = ERR_BAD_DX_DIR;
+               bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
+               if (IS_ERR(bh)) {
+                       *err = PTR_ERR(bh);
                        goto fail2;
                }
-               set_buffer_verified(bh);
+               entries = ((struct dx_node *) bh->b_data)->entries;
 
                if (dx_get_limit(entries) != dx_node_limit (dir)) {
                        ext4_warning(dir->i_sb,
@@ -783,7 +837,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
 {
        struct dx_frame *p;
        struct buffer_head *bh;
-       int err, num_frames = 0;
+       int num_frames = 0;
        __u32 bhash;
 
        p = frame;
@@ -822,25 +876,9 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
         * block so no check is necessary
         */
        while (num_frames--) {
-               if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
-                                     0, &err))) {
-                       if (!err) {
-                               ext4_error(dir->i_sb,
-                                          "Directory hole detected on inode %lu\n",
-                                          dir->i_ino);
-                               return -EIO;
-                       }
-                       return err; /* Failure */
-               }
-
-               if (!buffer_verified(bh) &&
-                   !ext4_dx_csum_verify(dir,
-                                        (struct ext4_dir_entry *)bh->b_data)) {
-                       ext4_warning(dir->i_sb, "Node failed checksum");
-                       return -EIO;
-               }
-               set_buffer_verified(bh);
-
+               bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
                p++;
                brelse(p->bh);
                p->bh = bh;
@@ -866,20 +904,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
 
        dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
                                                        (unsigned long)block));
-       if (!(bh = ext4_bread(NULL, dir, block, 0, &err))) {
-               if (!err) {
-                       err = -EIO;
-                       ext4_error(dir->i_sb,
-                                  "Directory hole detected on inode %lu\n",
-                                  dir->i_ino);
-               }
-               return err;
-       }
-
-       if (!buffer_verified(bh) &&
-           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
-               return -EIO;
-       set_buffer_verified(bh);
+       bh = ext4_read_dirblock(dir, block, DIRENT);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
 
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        top = (struct ext4_dir_entry_2 *) ((char *) de +
@@ -937,7 +964,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
 
        dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
                       start_hash, start_minor_hash));
-       dir = dir_file->f_path.dentry->d_inode;
+       dir = file_inode(dir_file);
        if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
                hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
                if (hinfo.hash_version <= DX_HASH_TEA)
@@ -1333,26 +1360,11 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
                return NULL;
        do {
                block = dx_get_block(frame->at);
-               if (!(bh = ext4_bread(NULL, dir, block, 0, err))) {
-                       if (!(*err)) {
-                               *err = -EIO;
-                               ext4_error(dir->i_sb,
-                                          "Directory hole detected on inode %lu\n",
-                                          dir->i_ino);
-                       }
-                       goto errout;
-               }
-
-               if (!buffer_verified(bh) &&
-                   !ext4_dirent_csum_verify(dir,
-                               (struct ext4_dir_entry *)bh->b_data)) {
-                       EXT4_ERROR_INODE(dir, "checksumming directory "
-                                        "block %lu", (unsigned long)block);
-                       brelse(bh);
-                       *err = -EIO;
+               bh = ext4_read_dirblock(dir, block, DIRENT);
+               if (IS_ERR(bh)) {
+                       *err = PTR_ERR(bh);
                        goto errout;
                }
-               set_buffer_verified(bh);
                retval = search_dirblock(bh, dir, d_name,
                                         block << EXT4_BLOCK_SIZE_BITS(sb),
                                         res_dir);
@@ -1536,11 +1548,12 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
                csum_size = sizeof(struct ext4_dir_entry_tail);
 
-       bh2 = ext4_append (handle, dir, &newblock, &err);
-       if (!(bh2)) {
+       bh2 = ext4_append(handle, dir, &newblock);
+       if (IS_ERR(bh2)) {
                brelse(*bh);
                *bh = NULL;
-               goto errout;
+               *error = PTR_ERR(bh2);
+               return NULL;
        }
 
        BUFFER_TRACE(*bh, "get_write_access");
@@ -1621,7 +1634,6 @@ journal_error:
        brelse(bh2);
        *bh = NULL;
        ext4_std_error(dir->i_sb, err);
-errout:
        *error = err;
        return NULL;
 }
@@ -1699,7 +1711,6 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        const char      *name = dentry->d_name.name;
        int             namelen = dentry->d_name.len;
        unsigned int    blocksize = dir->i_sb->s_blocksize;
-       unsigned short  reclen;
        int             csum_size = 0;
        int             err;
 
@@ -1707,7 +1718,6 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
                csum_size = sizeof(struct ext4_dir_entry_tail);
 
-       reclen = EXT4_DIR_REC_LEN(namelen);
        if (!de) {
                err = ext4_find_dest_de(dir, inode,
                                        bh, bh->b_data, blocksize - csum_size,
@@ -1798,10 +1808,10 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        len = ((char *) root) + (blocksize - csum_size) - (char *) de;
 
        /* Allocate new block for the 0th block's dirents */
-       bh2 = ext4_append(handle, dir, &block, &retval);
-       if (!(bh2)) {
+       bh2 = ext4_append(handle, dir, &block);
+       if (IS_ERR(bh2)) {
                brelse(bh);
-               return retval;
+               return PTR_ERR(bh2);
        }
        ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
        data1 = bh2->b_data;
@@ -1918,20 +1928,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        }
        blocks = dir->i_size >> sb->s_blocksize_bits;
        for (block = 0; block < blocks; block++) {
-               if (!(bh = ext4_bread(handle, dir, block, 0, &retval))) {
-                       if (!retval) {
-                               retval = -EIO;
-                               ext4_error(inode->i_sb,
-                                          "Directory hole detected on inode %lu\n",
-                                          inode->i_ino);
-                       }
-                       return retval;
-               }
-               if (!buffer_verified(bh) &&
-                   !ext4_dirent_csum_verify(dir,
-                               (struct ext4_dir_entry *)bh->b_data))
-                       return -EIO;
-               set_buffer_verified(bh);
+               bh = ext4_read_dirblock(dir, block, DIRENT);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
+
                retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
                if (retval != -ENOSPC) {
                        brelse(bh);
@@ -1943,9 +1943,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
                        return make_indexed_dir(handle, dentry, inode, bh);
                brelse(bh);
        }
-       bh = ext4_append(handle, dir, &block, &retval);
-       if (!bh)
-               return retval;
+       bh = ext4_append(handle, dir, &block);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de->inode = 0;
        de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
@@ -1982,22 +1982,13 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                return err;
        entries = frame->entries;
        at = frame->at;
-
-       if (!(bh = ext4_bread(handle, dir, dx_get_block(frame->at), 0, &err))) {
-               if (!err) {
-                       err = -EIO;
-                       ext4_error(dir->i_sb,
-                                  "Directory hole detected on inode %lu\n",
-                                  dir->i_ino);
-               }
+       bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
+       if (IS_ERR(bh)) {
+               err = PTR_ERR(bh);
+               bh = NULL;
                goto cleanup;
        }
 
-       if (!buffer_verified(bh) &&
-           !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
-               goto journal_error;
-       set_buffer_verified(bh);
-
        BUFFER_TRACE(bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, bh);
        if (err)
@@ -2025,9 +2016,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                        err = -ENOSPC;
                        goto cleanup;
                }
-               bh2 = ext4_append (handle, dir, &newblock, &err);
-               if (!(bh2))
+               bh2 = ext4_append(handle, dir, &newblock);
+               if (IS_ERR(bh2)) {
+                       err = PTR_ERR(bh2);
                        goto cleanup;
+               }
                node2 = (struct dx_node *)(bh2->b_data);
                entries2 = node2->entries;
                memset(&node2->fake, 0, sizeof(struct fake_dirent));
@@ -2106,8 +2099,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
 journal_error:
        ext4_std_error(dir->i_sb, err);
 cleanup:
-       if (bh)
-               brelse(bh);
+       brelse(bh);
        dx_release(frames);
        return err;
 }
@@ -2254,29 +2246,28 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 {
        handle_t *handle;
        struct inode *inode;
-       int err, retries = 0;
+       int err, credits, retries = 0;
 
        dquot_initialize(dir);
 
+       credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                  EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
 retry:
-       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-                                       EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
-       inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
+       inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
+                                           NULL, EXT4_HT_DIR, credits);
+       handle = ext4_journal_current_handle();
        err = PTR_ERR(inode);
        if (!IS_ERR(inode)) {
                inode->i_op = &ext4_file_inode_operations;
                inode->i_fop = &ext4_file_operations;
                ext4_set_aops(inode);
                err = ext4_add_nondir(handle, dentry, inode);
+               if (!err && IS_DIRSYNC(dir))
+                       ext4_handle_sync(handle);
        }
-       ext4_journal_stop(handle);
+       if (handle)
+               ext4_journal_stop(handle);
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
@@ -2287,31 +2278,30 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
 {
        handle_t *handle;
        struct inode *inode;
-       int err, retries = 0;
+       int err, credits, retries = 0;
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
        dquot_initialize(dir);
 
+       credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                  EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
 retry:
-       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-                                       EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
-       inode = ext4_new_inode(handle, dir, mode, &dentry->d_name, 0, NULL);
+       inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
+                                           NULL, EXT4_HT_DIR, credits);
+       handle = ext4_journal_current_handle();
        err = PTR_ERR(inode);
        if (!IS_ERR(inode)) {
                init_special_inode(inode, inode->i_mode, rdev);
                inode->i_op = &ext4_special_inode_operations;
                err = ext4_add_nondir(handle, dentry, inode);
+               if (!err && IS_DIRSYNC(dir))
+                       ext4_handle_sync(handle);
        }
-       ext4_journal_stop(handle);
+       if (handle)
+               ext4_journal_stop(handle);
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
@@ -2351,6 +2341,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
        struct buffer_head *dir_block = NULL;
        struct ext4_dir_entry_2 *de;
        struct ext4_dir_entry_tail *t;
+       ext4_lblk_t block = 0;
        unsigned int blocksize = dir->i_sb->s_blocksize;
        int csum_size = 0;
        int err;
@@ -2367,16 +2358,10 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
                        goto out;
        }
 
-       inode->i_size = EXT4_I(inode)->i_disksize = blocksize;
-       if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
-               if (!err) {
-                       err = -EIO;
-                       ext4_error(inode->i_sb,
-                                  "Directory hole detected on inode %lu\n",
-                                  inode->i_ino);
-               }
-               goto out;
-       }
+       inode->i_size = 0;
+       dir_block = ext4_append(handle, inode, &block);
+       if (IS_ERR(dir_block))
+               return PTR_ERR(dir_block);
        BUFFER_TRACE(dir_block, "get_write_access");
        err = ext4_journal_get_write_access(handle, dir_block);
        if (err)
@@ -2403,25 +2388,21 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        handle_t *handle;
        struct inode *inode;
-       int err, retries = 0;
+       int err, credits, retries = 0;
 
        if (EXT4_DIR_LINK_MAX(dir))
                return -EMLINK;
 
        dquot_initialize(dir);
 
+       credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                  EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                  EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
 retry:
-       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-                                       EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
-       inode = ext4_new_inode(handle, dir, S_IFDIR | mode,
-                              &dentry->d_name, 0, NULL);
+       inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
+                                           &dentry->d_name,
+                                           0, NULL, EXT4_HT_DIR, credits);
+       handle = ext4_journal_current_handle();
        err = PTR_ERR(inode);
        if (IS_ERR(inode))
                goto out_stop;
@@ -2449,8 +2430,12 @@ out_clear_inode:
                goto out_clear_inode;
        unlock_new_inode(inode);
        d_instantiate(dentry, inode);
+       if (IS_DIRSYNC(dir))
+               ext4_handle_sync(handle);
+
 out_stop:
-       ext4_journal_stop(handle);
+       if (handle)
+               ext4_journal_stop(handle);
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
@@ -2476,25 +2461,14 @@ static int empty_dir(struct inode *inode)
        }
 
        sb = inode->i_sb;
-       if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
-           !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
-               if (err)
-                       EXT4_ERROR_INODE(inode,
-                               "error %d reading directory lblock 0", err);
-               else
-                       ext4_warning(inode->i_sb,
-                                    "bad directory (dir #%lu) - no data block",
-                                    inode->i_ino);
+       if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
+               EXT4_ERROR_INODE(inode, "invalid size");
                return 1;
        }
-       if (!buffer_verified(bh) &&
-           !ext4_dirent_csum_verify(inode,
-                       (struct ext4_dir_entry *)bh->b_data)) {
-               EXT4_ERROR_INODE(inode, "checksum error reading directory "
-                                "lblock 0");
-               return -EIO;
-       }
-       set_buffer_verified(bh);
+       bh = ext4_read_dirblock(inode, 0, EITHER);
+       if (IS_ERR(bh))
+               return 1;
+
        de = (struct ext4_dir_entry_2 *) bh->b_data;
        de1 = ext4_next_entry(de, sb->s_blocksize);
        if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -2517,28 +2491,9 @@ static int empty_dir(struct inode *inode)
                        err = 0;
                        brelse(bh);
                        lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
-                       bh = ext4_bread(NULL, inode, lblock, 0, &err);
-                       if (!bh) {
-                               if (err)
-                                       EXT4_ERROR_INODE(inode,
-                                               "error %d reading directory "
-                                               "lblock %u", err, lblock);
-                               else
-                                       ext4_warning(inode->i_sb,
-                                               "bad directory (dir #%lu) - no data block",
-                                               inode->i_ino);
-
-                               offset += sb->s_blocksize;
-                               continue;
-                       }
-                       if (!buffer_verified(bh) &&
-                           !ext4_dirent_csum_verify(inode,
-                                       (struct ext4_dir_entry *)bh->b_data)) {
-                               EXT4_ERROR_INODE(inode, "checksum error "
-                                                "reading directory lblock 0");
-                               return -EIO;
-                       }
-                       set_buffer_verified(bh);
+                       bh = ext4_read_dirblock(inode, lblock, EITHER);
+                       if (IS_ERR(bh))
+                               return 1;
                        de = (struct ext4_dir_entry_2 *) bh->b_data;
                }
                if (ext4_check_dir_entry(inode, NULL, de, bh,
@@ -2717,25 +2672,18 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
        struct inode *inode;
        struct buffer_head *bh;
        struct ext4_dir_entry_2 *de;
-       handle_t *handle;
+       handle_t *handle = NULL;
 
        /* Initialize quotas before so that eventual writes go in
         * separate transaction */
        dquot_initialize(dir);
        dquot_initialize(dentry->d_inode);
 
-       handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
        retval = -ENOENT;
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        if (!bh)
                goto end_rmdir;
 
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
        inode = dentry->d_inode;
 
        retval = -EIO;
@@ -2746,6 +2694,17 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
        if (!empty_dir(inode))
                goto end_rmdir;
 
+       handle = ext4_journal_start(dir, EXT4_HT_DIR,
+                                   EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle)) {
+               retval = PTR_ERR(handle);
+               handle = NULL;
+               goto end_rmdir;
+       }
+
+       if (IS_DIRSYNC(dir))
+               ext4_handle_sync(handle);
+
        retval = ext4_delete_entry(handle, dir, de, bh);
        if (retval)
                goto end_rmdir;
@@ -2767,8 +2726,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
        ext4_mark_inode_dirty(handle, dir);
 
 end_rmdir:
-       ext4_journal_stop(handle);
        brelse(bh);
+       if (handle)
+               ext4_journal_stop(handle);
        return retval;
 }
 
@@ -2778,7 +2738,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        struct inode *inode;
        struct buffer_head *bh;
        struct ext4_dir_entry_2 *de;
-       handle_t *handle;
+       handle_t *handle = NULL;
 
        trace_ext4_unlink_enter(dir, dentry);
        /* Initialize quotas before so that eventual writes go
@@ -2786,13 +2746,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        dquot_initialize(dir);
        dquot_initialize(dentry->d_inode);
 
-       handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
        retval = -ENOENT;
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        if (!bh)
@@ -2804,6 +2757,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        if (le32_to_cpu(de->inode) != inode->i_ino)
                goto end_unlink;
 
+       handle = ext4_journal_start(dir, EXT4_HT_DIR,
+                                   EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle)) {
+               retval = PTR_ERR(handle);
+               handle = NULL;
+               goto end_unlink;
+       }
+
+       if (IS_DIRSYNC(dir))
+               ext4_handle_sync(handle);
+
        if (!inode->i_nlink) {
                ext4_warning(inode->i_sb,
                             "Deleting nonexistent file (%lu), %d",
@@ -2824,8 +2788,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
        retval = 0;
 
 end_unlink:
-       ext4_journal_stop(handle);
        brelse(bh);
+       if (handle)
+               ext4_journal_stop(handle);
        trace_ext4_unlink_exit(dentry, retval);
        return retval;
 }
@@ -2865,15 +2830,10 @@ static int ext4_symlink(struct inode *dir,
                          EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
        }
 retry:
-       handle = ext4_journal_start(dir, credits);
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(dir))
-               ext4_handle_sync(handle);
-
-       inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO,
-                              &dentry->d_name, 0, NULL);
+       inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
+                                           &dentry->d_name, 0, NULL,
+                                           EXT4_HT_DIR, credits);
+       handle = ext4_journal_current_handle();
        err = PTR_ERR(inode);
        if (IS_ERR(inode))
                goto out_stop;
@@ -2903,7 +2863,7 @@ retry:
                 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
                 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
                 */
-               handle = ext4_journal_start(dir,
+               handle = ext4_journal_start(dir, EXT4_HT_DIR,
                                EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
                                EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
                if (IS_ERR(handle)) {
@@ -2926,8 +2886,12 @@ retry:
        }
        EXT4_I(inode)->i_disksize = inode->i_size;
        err = ext4_add_nondir(handle, dentry, inode);
+       if (!err && IS_DIRSYNC(dir))
+               ext4_handle_sync(handle);
+
 out_stop:
-       ext4_journal_stop(handle);
+       if (handle)
+               ext4_journal_stop(handle);
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
@@ -2950,8 +2914,9 @@ static int ext4_link(struct dentry *old_dentry,
        dquot_initialize(dir);
 
 retry:
-       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS);
+       handle = ext4_journal_start(dir, EXT4_HT_DIR,
+               (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                EXT4_INDEX_EXTRA_TRANS_BLOCKS));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
@@ -2991,13 +2956,9 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
        struct buffer_head *bh;
 
        if (!ext4_has_inline_data(inode)) {
-               if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) {
-                       if (!*retval) {
-                               *retval = -EIO;
-                               ext4_error(inode->i_sb,
-                                          "Directory hole detected on inode %lu\n",
-                                          inode->i_ino);
-                       }
+               bh = ext4_read_dirblock(inode, 0, EITHER);
+               if (IS_ERR(bh)) {
+                       *retval = PTR_ERR(bh);
                        return NULL;
                }
                *parent_de = ext4_next_entry(
@@ -3034,9 +2995,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
         * in separate transaction */
        if (new_dentry->d_inode)
                dquot_initialize(new_dentry->d_inode);
-       handle = ext4_journal_start(old_dir, 2 *
-                                       EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
+       handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
+               (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
+                EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
@@ -3076,11 +3037,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                                  &inlined);
                if (!dir_bh)
                        goto end_rename;
-               if (!inlined && !buffer_verified(dir_bh) &&
-                   !ext4_dirent_csum_verify(old_inode,
-                               (struct ext4_dir_entry *)dir_bh->b_data))
-                       goto end_rename;
-               set_buffer_verified(dir_bh);
                if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
                        goto end_rename;
                retval = -EMLINK;
index 0016fbc..809b310 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/workqueue.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -73,8 +74,6 @@ void ext4_free_io_end(ext4_io_end_t *io)
        BUG_ON(!list_empty(&io->list));
        BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
 
-       if (io->page)
-               put_page(io->page);
        for (i = 0; i < io->num_io_pages; i++)
                put_io_page(io->pages[i]);
        io->num_io_pages = 0;
@@ -103,14 +102,13 @@ static int ext4_end_io(ext4_io_end_t *io)
                         "(inode %lu, offset %llu, size %zd, error %d)",
                         inode->i_ino, offset, size, ret);
        }
-       if (io->iocb)
-               aio_complete(io->iocb, io->result, 0);
-
-       if (io->flag & EXT4_IO_END_DIRECT)
-               inode_dio_done(inode);
        /* Wake up anyone waiting on unwritten extent conversion */
        if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
                wake_up_all(ext4_ioend_wq(inode));
+       if (io->flag & EXT4_IO_END_DIRECT)
+               inode_dio_done(inode);
+       if (io->iocb)
+               aio_complete(io->iocb, io->result, 0);
        return ret;
 }
 
@@ -119,7 +117,6 @@ static void dump_completed_IO(struct inode *inode)
 #ifdef EXT4FS_DEBUG
        struct list_head *cur, *before, *after;
        ext4_io_end_t *io, *io0, *io1;
-       unsigned long flags;
 
        if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
                ext4_debug("inode %lu completed_io list is empty\n",
@@ -152,26 +149,20 @@ void ext4_add_complete_io(ext4_io_end_t *io_end)
        wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
 
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       if (list_empty(&ei->i_completed_io_list)) {
-               io_end->flag |= EXT4_IO_END_QUEUED;
-               queue_work(wq, &io_end->work);
-       }
+       if (list_empty(&ei->i_completed_io_list))
+               queue_work(wq, &ei->i_unwritten_work);
        list_add_tail(&io_end->list, &ei->i_completed_io_list);
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 }
 
-static int ext4_do_flush_completed_IO(struct inode *inode,
-                                     ext4_io_end_t *work_io)
+static int ext4_do_flush_completed_IO(struct inode *inode)
 {
        ext4_io_end_t *io;
-       struct list_head unwritten, complete, to_free;
+       struct list_head unwritten;
        unsigned long flags;
        struct ext4_inode_info *ei = EXT4_I(inode);
        int err, ret = 0;
 
-       INIT_LIST_HEAD(&complete);
-       INIT_LIST_HEAD(&to_free);
-
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
        dump_completed_IO(inode);
        list_replace_init(&ei->i_completed_io_list, &unwritten);
@@ -185,32 +176,7 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
                err = ext4_end_io(io);
                if (unlikely(!ret && err))
                        ret = err;
-
-               list_add_tail(&io->list, &complete);
-       }
-       spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       while (!list_empty(&complete)) {
-               io = list_entry(complete.next, ext4_io_end_t, list);
                io->flag &= ~EXT4_IO_END_UNWRITTEN;
-               /* end_io context can not be destroyed now because it still
-                * used by queued worker. Worker thread will destroy it later */
-               if (io->flag & EXT4_IO_END_QUEUED)
-                       list_del_init(&io->list);
-               else
-                       list_move(&io->list, &to_free);
-       }
-       /* If we are called from worker context, it is time to clear queued
-        * flag, and destroy it's end_io if it was converted already */
-       if (work_io) {
-               work_io->flag &= ~EXT4_IO_END_QUEUED;
-               if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
-                       list_add_tail(&work_io->list, &to_free);
-       }
-       spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-
-       while (!list_empty(&to_free)) {
-               io = list_entry(to_free.next, ext4_io_end_t, list);
-               list_del_init(&io->list);
                ext4_free_io_end(io);
        }
        return ret;
@@ -219,10 +185,11 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
 /*
  * work on completed aio dio IO, to convert unwritten extents to extents
  */
-static void ext4_end_io_work(struct work_struct *work)
+void ext4_end_io_work(struct work_struct *work)
 {
-       ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
-       ext4_do_flush_completed_IO(io->inode, io);
+       struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
+                                                 i_unwritten_work);
+       ext4_do_flush_completed_IO(&ei->vfs_inode);
 }
 
 int ext4_flush_unwritten_io(struct inode *inode)
@@ -230,7 +197,7 @@ int ext4_flush_unwritten_io(struct inode *inode)
        int ret;
        WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
                     !(inode->i_state & I_FREEING));
-       ret = ext4_do_flush_completed_IO(inode, NULL);
+       ret = ext4_do_flush_completed_IO(inode);
        ext4_unwritten_wait(inode);
        return ret;
 }
@@ -241,7 +208,6 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
        if (io) {
                atomic_inc(&EXT4_I(inode)->i_ioend_count);
                io->inode = inode;
-               INIT_WORK(&io->work, ext4_end_io_work);
                INIT_LIST_HEAD(&io->list);
        }
        return io;
@@ -382,14 +348,6 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
                unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
        }
 
-       if (!buffer_mapped(bh) || buffer_delay(bh)) {
-               if (!buffer_mapped(bh))
-                       clear_buffer_dirty(bh);
-               if (io->io_bio)
-                       ext4_io_submit(io);
-               return 0;
-       }
-
        if (io->io_bio && bh->b_blocknr != io->io_next_block) {
 submit_and_retry:
                ext4_io_submit(io);
@@ -436,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
        io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
        if (!io_page) {
-               set_page_dirty(page);
+               redirty_page_for_writepage(wbc, page);
                unlock_page(page);
                return -ENOMEM;
        }
@@ -468,7 +426,15 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
                        set_buffer_uptodate(bh);
                        continue;
                }
-               clear_buffer_dirty(bh);
+               if (!buffer_dirty(bh) || buffer_delay(bh) ||
+                   !buffer_mapped(bh) || buffer_unwritten(bh)) {
+                       /* A hole? We can safely clear the dirty bit */
+                       if (!buffer_mapped(bh))
+                               clear_buffer_dirty(bh);
+                       if (io->io_bio)
+                               ext4_io_submit(io);
+                       continue;
+               }
                ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
                if (ret) {
                        /*
@@ -476,9 +442,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
                         * we can do but mark the page as dirty, and
                         * better luck next time.
                         */
-                       set_page_dirty(page);
+                       redirty_page_for_writepage(wbc, page);
                        break;
                }
+               clear_buffer_dirty(bh);
        }
        unlock_page(page);
        /*
index d99387b..b2c8ee5 100644 (file)
@@ -333,8 +333,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
        int err;
 
        bh = sb_getblk(sb, blk);
-       if (!bh)
-               return ERR_PTR(-EIO);
+       if (unlikely(!bh))
+               return ERR_PTR(-ENOMEM);
        if ((err = ext4_journal_get_write_access(handle, bh))) {
                brelse(bh);
                bh = ERR_PTR(err);
@@ -410,8 +410,8 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
                        return err;
 
                bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
-               if (!bh)
-                       return -EIO;
+               if (unlikely(!bh))
+                       return -ENOMEM;
 
                err = ext4_journal_get_write_access(handle, bh);
                if (err)
@@ -466,7 +466,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
        meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
 
        /* This transaction may be extended/restarted along the way */
-       handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
@@ -500,8 +500,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
                                goto out;
 
                        gdb = sb_getblk(sb, block);
-                       if (!gdb) {
-                               err = -EIO;
+                       if (unlikely(!gdb)) {
+                               err = -ENOMEM;
                                goto out;
                        }
 
@@ -1031,7 +1031,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
        handle_t *handle;
        int err = 0, err2;
 
-       handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
        if (IS_ERR(handle)) {
                group = 1;
                err = PTR_ERR(handle);
@@ -1064,8 +1064,8 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
                                        ext4_bg_has_super(sb, group));
 
                bh = sb_getblk(sb, backup_block);
-               if (!bh) {
-                       err = -EIO;
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
                        break;
                }
                ext4_debug("update metadata backup %llu(+%llu)\n",
@@ -1168,7 +1168,7 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
 {
        struct buffer_head *bh = sb_getblk(sb, block);
-       if (!bh)
+       if (unlikely(!bh))
                return NULL;
        if (!bh_uptodate_or_lock(bh)) {
                if (bh_submit_read(bh) < 0) {
@@ -1247,7 +1247,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
 
                ext4_inode_table_set(sb, gdp, group_data->inode_table);
                ext4_free_group_clusters_set(sb, gdp,
-                                            EXT4_B2C(sbi, group_data->free_blocks_count));
+                       EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
                ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
                if (ext4_has_group_desc_csum(sb))
                        ext4_itable_unused_set(sb, gdp,
@@ -1349,7 +1349,7 @@ static void ext4_update_super(struct super_block *sb,
 
        /* Update the free space counts */
        percpu_counter_add(&sbi->s_freeclusters_counter,
-                          EXT4_B2C(sbi, free_blocks));
+                          EXT4_NUM_B2C(sbi, free_blocks));
        percpu_counter_add(&sbi->s_freeinodes_counter,
                           EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
 
@@ -1360,7 +1360,7 @@ static void ext4_update_super(struct super_block *sb,
            sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group;
                flex_group = ext4_flex_group(sbi, group_data[0].group);
-               atomic_add(EXT4_B2C(sbi, free_blocks),
+               atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
                           &sbi->s_flex_groups[flex_group].free_clusters);
                atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
                           &sbi->s_flex_groups[flex_group].free_inodes);
@@ -1412,7 +1412,7 @@ static int ext4_flex_group_add(struct super_block *sb,
         * modify each of the reserved GDT dindirect blocks.
         */
        credit = flex_gd->count * 4 + reserved_gdb;
-       handle = ext4_journal_start_sb(sb, credit);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
                goto exit;
@@ -1506,10 +1506,12 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
                group_data[i].blocks_count = blocks_per_group;
                overhead = ext4_group_overhead_blocks(sb, group + i);
                group_data[i].free_blocks_count = blocks_per_group - overhead;
-               if (ext4_has_group_desc_csum(sb))
+               if (ext4_has_group_desc_csum(sb)) {
                        flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
                                               EXT4_BG_INODE_UNINIT;
-               else
+                       if (!test_opt(sb, INIT_INODE_TABLE))
+                               flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
+               } else
                        flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
        }
 
@@ -1594,7 +1596,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
 
        err = ext4_alloc_flex_bg_array(sb, input->group + 1);
        if (err)
-               return err;
+               goto out;
 
        err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
        if (err)
@@ -1622,7 +1624,7 @@ static int ext4_group_extend_no_check(struct super_block *sb,
        /* We will update the superblock, one block bitmap, and
         * one group descriptor via ext4_group_add_blocks().
         */
-       handle = ext4_journal_start_sb(sb, 3);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
                ext4_warning(sb, "error %d on journal start", err);
@@ -1786,7 +1788,7 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
                credits += 3;   /* block bitmap, bg descriptor, resize inode */
        }
 
-       handle = ext4_journal_start_sb(sb, credits);
+       handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
index 3d4fb81..5e6c878 100644 (file)
@@ -69,8 +69,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
 static void ext4_clear_journal_err(struct super_block *sb,
                                   struct ext4_super_block *es);
 static int ext4_sync_fs(struct super_block *sb, int wait);
-static const char *ext4_decode_error(struct super_block *sb, int errno,
-                                    char nbuf[16]);
 static int ext4_remount(struct super_block *sb, int *flags, char *data);
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
 static int ext4_unfreeze(struct super_block *sb);
@@ -296,107 +294,6 @@ void ext4_itable_unused_set(struct super_block *sb,
 }
 
 
-/* Just increment the non-pointer handle value */
-static handle_t *ext4_get_nojournal(void)
-{
-       handle_t *handle = current->journal_info;
-       unsigned long ref_cnt = (unsigned long)handle;
-
-       BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
-
-       ref_cnt++;
-       handle = (handle_t *)ref_cnt;
-
-       current->journal_info = handle;
-       return handle;
-}
-
-
-/* Decrement the non-pointer handle value */
-static void ext4_put_nojournal(handle_t *handle)
-{
-       unsigned long ref_cnt = (unsigned long)handle;
-
-       BUG_ON(ref_cnt == 0);
-
-       ref_cnt--;
-       handle = (handle_t *)ref_cnt;
-
-       current->journal_info = handle;
-}
-
-/*
- * Wrappers for jbd2_journal_start/end.
- */
-handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
-{
-       journal_t *journal;
-
-       trace_ext4_journal_start(sb, nblocks, _RET_IP_);
-       if (sb->s_flags & MS_RDONLY)
-               return ERR_PTR(-EROFS);
-
-       WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
-       journal = EXT4_SB(sb)->s_journal;
-       if (!journal)
-               return ext4_get_nojournal();
-       /*
-        * Special case here: if the journal has aborted behind our
-        * backs (eg. EIO in the commit thread), then we still need to
-        * take the FS itself readonly cleanly.
-        */
-       if (is_journal_aborted(journal)) {
-               ext4_abort(sb, "Detected aborted journal");
-               return ERR_PTR(-EROFS);
-       }
-       return jbd2_journal_start(journal, nblocks);
-}
-
-int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
-{
-       struct super_block *sb;
-       int err;
-       int rc;
-
-       if (!ext4_handle_valid(handle)) {
-               ext4_put_nojournal(handle);
-               return 0;
-       }
-       sb = handle->h_transaction->t_journal->j_private;
-       err = handle->h_err;
-       rc = jbd2_journal_stop(handle);
-
-       if (!err)
-               err = rc;
-       if (err)
-               __ext4_std_error(sb, where, line, err);
-       return err;
-}
-
-void ext4_journal_abort_handle(const char *caller, unsigned int line,
-                              const char *err_fn, struct buffer_head *bh,
-                              handle_t *handle, int err)
-{
-       char nbuf[16];
-       const char *errstr = ext4_decode_error(NULL, err, nbuf);
-
-       BUG_ON(!ext4_handle_valid(handle));
-
-       if (bh)
-               BUFFER_TRACE(bh, "abort");
-
-       if (!handle->h_err)
-               handle->h_err = err;
-
-       if (is_handle_aborted(handle))
-               return;
-
-       printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n",
-              caller, line, errstr, err_fn);
-
-       jbd2_journal_abort_handle(handle);
-}
-
 static void __save_error_info(struct super_block *sb, const char *func,
                            unsigned int line)
 {
@@ -553,7 +450,7 @@ void ext4_error_file(struct file *file, const char *function,
        va_list args;
        struct va_format vaf;
        struct ext4_super_block *es;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        char pathname[80], *path;
 
        es = EXT4_SB(inode->i_sb)->s_es;
@@ -582,8 +479,8 @@ void ext4_error_file(struct file *file, const char *function,
        ext4_handle_error(inode->i_sb);
 }
 
-static const char *ext4_decode_error(struct super_block *sb, int errno,
-                                    char nbuf[16])
+const char *ext4_decode_error(struct super_block *sb, int errno,
+                             char nbuf[16])
 {
        char *errstr = NULL;
 
@@ -858,6 +755,7 @@ static void ext4_put_super(struct super_block *sb)
                        ext4_abort(sb, "Couldn't clean up the journal");
        }
 
+       ext4_es_unregister_shrinker(sb);
        del_timer(&sbi->s_err_report);
        ext4_release_system_zone(sb);
        ext4_mb_release(sb);
@@ -885,6 +783,7 @@ static void ext4_put_super(struct super_block *sb)
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        brelse(sbi->s_sbh);
 #ifdef CONFIG_QUOTA
        for (i = 0; i < MAXQUOTAS; i++)
@@ -939,11 +838,12 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
                return NULL;
 
        ei->vfs_inode.i_version = 1;
-       memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
        INIT_LIST_HEAD(&ei->i_prealloc_list);
        spin_lock_init(&ei->i_prealloc_lock);
        ext4_es_init_tree(&ei->i_es_tree);
        rwlock_init(&ei->i_es_lock);
+       INIT_LIST_HEAD(&ei->i_es_lru);
+       ei->i_es_lru_nr = 0;
        ei->i_reserved_data_blocks = 0;
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
@@ -960,6 +860,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_datasync_tid = 0;
        atomic_set(&ei->i_ioend_count, 0);
        atomic_set(&ei->i_unwritten, 0);
+       INIT_WORK(&ei->i_unwritten_work, ext4_end_io_work);
 
        return &ei->vfs_inode;
 }
@@ -1031,6 +932,7 @@ void ext4_clear_inode(struct inode *inode)
        dquot_drop(inode);
        ext4_discard_preallocations(inode);
        ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+       ext4_es_lru_del(inode);
        if (EXT4_I(inode)->jinode) {
                jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
                                               EXT4_I(inode)->jinode);
@@ -1280,8 +1182,8 @@ static const match_table_t tokens = {
        {Opt_stripe, "stripe=%u"},
        {Opt_delalloc, "delalloc"},
        {Opt_nodelalloc, "nodelalloc"},
-       {Opt_mblk_io_submit, "mblk_io_submit"},
-       {Opt_nomblk_io_submit, "nomblk_io_submit"},
+       {Opt_removed, "mblk_io_submit"},
+       {Opt_removed, "nomblk_io_submit"},
        {Opt_block_validity, "block_validity"},
        {Opt_noblock_validity, "noblock_validity"},
        {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
@@ -1337,6 +1239,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        char *qname;
+       int ret = -1;
 
        if (sb_any_quota_loaded(sb) &&
                !sbi->s_qf_names[qtype]) {
@@ -1345,29 +1248,37 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
                        "quota options when quota turned on");
                return -1;
        }
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+               ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+                        "when QUOTA feature is enabled");
+               return -1;
+       }
        qname = match_strdup(args);
        if (!qname) {
                ext4_msg(sb, KERN_ERR,
                        "Not enough memory for storing quotafile name");
                return -1;
        }
-       if (sbi->s_qf_names[qtype] &&
-               strcmp(sbi->s_qf_names[qtype], qname)) {
-               ext4_msg(sb, KERN_ERR,
-                       "%s quota file already specified", QTYPE2NAME(qtype));
-               kfree(qname);
-               return -1;
+       if (sbi->s_qf_names[qtype]) {
+               if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+                       ret = 1;
+               else
+                       ext4_msg(sb, KERN_ERR,
+                                "%s quota file already specified",
+                                QTYPE2NAME(qtype));
+               goto errout;
        }
-       sbi->s_qf_names[qtype] = qname;
-       if (strchr(sbi->s_qf_names[qtype], '/')) {
+       if (strchr(qname, '/')) {
                ext4_msg(sb, KERN_ERR,
                        "quotafile must be on filesystem root");
-               kfree(sbi->s_qf_names[qtype]);
-               sbi->s_qf_names[qtype] = NULL;
-               return -1;
+               goto errout;
        }
+       sbi->s_qf_names[qtype] = qname;
        set_opt(sb, QUOTA);
        return 1;
+errout:
+       kfree(qname);
+       return ret;
 }
 
 static int clear_qf_name(struct super_block *sb, int qtype)
@@ -1381,10 +1292,7 @@ static int clear_qf_name(struct super_block *sb, int qtype)
                        " when quota turned on");
                return -1;
        }
-       /*
-        * The space will be released later when all options are confirmed
-        * to be correct
-        */
+       kfree(sbi->s_qf_names[qtype]);
        sbi->s_qf_names[qtype] = NULL;
        return 1;
 }
@@ -1404,6 +1312,9 @@ static int clear_qf_name(struct super_block *sb, int qtype)
 #define MOPT_QFMT      MOPT_NOSUPPORT
 #endif
 #define MOPT_DATAJ     0x0080
+#define MOPT_NO_EXT2   0x0100
+#define MOPT_NO_EXT3   0x0200
+#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
 
 static const struct mount_opts {
        int     token;
@@ -1414,25 +1325,31 @@ static const struct mount_opts {
        {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
        {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
        {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
-       {Opt_mblk_io_submit, EXT4_MOUNT_MBLK_IO_SUBMIT, MOPT_SET},
-       {Opt_nomblk_io_submit, EXT4_MOUNT_MBLK_IO_SUBMIT, MOPT_CLEAR},
        {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
        {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
-       {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_SET},
-       {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_CLEAR},
+       {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
+        MOPT_EXT4_ONLY | MOPT_SET},
+       {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
+        MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
        {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
-       {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_SET | MOPT_EXPLICIT},
-       {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_CLEAR | MOPT_EXPLICIT},
-       {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_SET},
+       {Opt_delalloc, EXT4_MOUNT_DELALLOC,
+        MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
+       {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
+        MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+       {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
+        MOPT_EXT4_ONLY | MOPT_SET},
        {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
-                                   EXT4_MOUNT_JOURNAL_CHECKSUM), MOPT_SET},
-       {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_SET},
+                                   EXT4_MOUNT_JOURNAL_CHECKSUM),
+        MOPT_EXT4_ONLY | MOPT_SET},
+       {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
        {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
        {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
        {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
-       {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_SET},
-       {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_CLEAR},
+       {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
+        MOPT_NO_EXT2 | MOPT_SET},
+       {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
+        MOPT_NO_EXT2 | MOPT_CLEAR},
        {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
        {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
        {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
@@ -1444,9 +1361,14 @@ static const struct mount_opts {
        {Opt_inode_readahead_blks, 0, MOPT_GTE0},
        {Opt_init_itable, 0, MOPT_GTE0},
        {Opt_stripe, 0, MOPT_GTE0},
-       {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_DATAJ},
-       {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_DATAJ},
-       {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_DATAJ},
+       {Opt_resuid, 0, MOPT_GTE0},
+       {Opt_resgid, 0, MOPT_GTE0},
+       {Opt_journal_dev, 0, MOPT_GTE0},
+       {Opt_journal_ioprio, 0, MOPT_GTE0},
+       {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
+       {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
+       {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
+        MOPT_NO_EXT2 | MOPT_DATAJ},
        {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
        {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
@@ -1496,8 +1418,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
        else if (token == Opt_offgrpjquota)
                return clear_qf_name(sb, GRPQUOTA);
 #endif
-       if (args->from && match_int(args, &arg))
-               return -1;
        switch (token) {
        case Opt_noacl:
        case Opt_nouser_xattr:
@@ -1506,138 +1426,156 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
        case Opt_sb:
                return 1;       /* handled by get_sb_block() */
        case Opt_removed:
-               ext4_msg(sb, KERN_WARNING,
-                        "Ignoring removed %s option", opt);
+               ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
+               return 1;
+       case Opt_abort:
+               sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               return 1;
+       case Opt_i_version:
+               sb->s_flags |= MS_I_VERSION;
                return 1;
-       case Opt_resuid:
+       }
+
+       for (m = ext4_mount_opts; m->token != Opt_err; m++)
+               if (token == m->token)
+                       break;
+
+       if (m->token == Opt_err) {
+               ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
+                        "or missing value", opt);
+               return -1;
+       }
+
+       if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Mount option \"%s\" incompatible with ext2", opt);
+               return -1;
+       }
+       if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Mount option \"%s\" incompatible with ext3", opt);
+               return -1;
+       }
+
+       if (args->from && match_int(args, &arg))
+               return -1;
+       if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
+               return -1;
+       if (m->flags & MOPT_EXPLICIT)
+               set_opt2(sb, EXPLICIT_DELALLOC);
+       if (m->flags & MOPT_CLEAR_ERR)
+               clear_opt(sb, ERRORS_MASK);
+       if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
+               ext4_msg(sb, KERN_ERR, "Cannot change quota "
+                        "options when quota turned on");
+               return -1;
+       }
+
+       if (m->flags & MOPT_NOSUPPORT) {
+               ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
+       } else if (token == Opt_commit) {
+               if (arg == 0)
+                       arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
+               sbi->s_commit_interval = HZ * arg;
+       } else if (token == Opt_max_batch_time) {
+               if (arg == 0)
+                       arg = EXT4_DEF_MAX_BATCH_TIME;
+               sbi->s_max_batch_time = arg;
+       } else if (token == Opt_min_batch_time) {
+               sbi->s_min_batch_time = arg;
+       } else if (token == Opt_inode_readahead_blks) {
+               if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
+                       ext4_msg(sb, KERN_ERR,
+                                "EXT4-fs: inode_readahead_blks must be "
+                                "0 or a power of 2 smaller than 2^31");
+                       return -1;
+               }
+               sbi->s_inode_readahead_blks = arg;
+       } else if (token == Opt_init_itable) {
+               set_opt(sb, INIT_INODE_TABLE);
+               if (!args->from)
+                       arg = EXT4_DEF_LI_WAIT_MULT;
+               sbi->s_li_wait_mult = arg;
+       } else if (token == Opt_max_dir_size_kb) {
+               sbi->s_max_dir_size_kb = arg;
+       } else if (token == Opt_stripe) {
+               sbi->s_stripe = arg;
+       } else if (token == Opt_resuid) {
                uid = make_kuid(current_user_ns(), arg);
                if (!uid_valid(uid)) {
                        ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
                        return -1;
                }
                sbi->s_resuid = uid;
-               return 1;
-       case Opt_resgid:
+       } else if (token == Opt_resgid) {
                gid = make_kgid(current_user_ns(), arg);
                if (!gid_valid(gid)) {
                        ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
                        return -1;
                }
                sbi->s_resgid = gid;
-               return 1;
-       case Opt_abort:
-               sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
-               return 1;
-       case Opt_i_version:
-               sb->s_flags |= MS_I_VERSION;
-               return 1;
-       case Opt_journal_dev:
+       } else if (token == Opt_journal_dev) {
                if (is_remount) {
                        ext4_msg(sb, KERN_ERR,
                                 "Cannot specify journal on remount");
                        return -1;
                }
                *journal_devnum = arg;
-               return 1;
-       case Opt_journal_ioprio:
-               if (arg < 0 || arg > 7)
-                       return -1;
-               *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
-               return 1;
-       }
-
-       for (m = ext4_mount_opts; m->token != Opt_err; m++) {
-               if (token != m->token)
-                       continue;
-               if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
-                       return -1;
-               if (m->flags & MOPT_EXPLICIT)
-                       set_opt2(sb, EXPLICIT_DELALLOC);
-               if (m->flags & MOPT_CLEAR_ERR)
-                       clear_opt(sb, ERRORS_MASK);
-               if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
-                       ext4_msg(sb, KERN_ERR, "Cannot change quota "
-                                "options when quota turned on");
+       } else if (token == Opt_journal_ioprio) {
+               if (arg > 7) {
+                       ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
+                                " (must be 0-7)");
                        return -1;
                }
-
-               if (m->flags & MOPT_NOSUPPORT) {
-                       ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
-               } else if (token == Opt_commit) {
-                       if (arg == 0)
-                               arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
-                       sbi->s_commit_interval = HZ * arg;
-               } else if (token == Opt_max_batch_time) {
-                       if (arg == 0)
-                               arg = EXT4_DEF_MAX_BATCH_TIME;
-                       sbi->s_max_batch_time = arg;
-               } else if (token == Opt_min_batch_time) {
-                       sbi->s_min_batch_time = arg;
-               } else if (token == Opt_inode_readahead_blks) {
-                       if (arg > (1 << 30))
-                               return -1;
-                       if (arg && !is_power_of_2(arg)) {
+               *journal_ioprio =
+                       IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+       } else if (m->flags & MOPT_DATAJ) {
+               if (is_remount) {
+                       if (!sbi->s_journal)
+                               ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
+                       else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
                                ext4_msg(sb, KERN_ERR,
-                                        "EXT4-fs: inode_readahead_blks"
-                                        " must be a power of 2");
-                               return -1;
-                       }
-                       sbi->s_inode_readahead_blks = arg;
-               } else if (token == Opt_init_itable) {
-                       set_opt(sb, INIT_INODE_TABLE);
-                       if (!args->from)
-                               arg = EXT4_DEF_LI_WAIT_MULT;
-                       sbi->s_li_wait_mult = arg;
-               } else if (token == Opt_max_dir_size_kb) {
-                       sbi->s_max_dir_size_kb = arg;
-               } else if (token == Opt_stripe) {
-                       sbi->s_stripe = arg;
-               } else if (m->flags & MOPT_DATAJ) {
-                       if (is_remount) {
-                               if (!sbi->s_journal)
-                                       ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
-                               else if (test_opt(sb, DATA_FLAGS) !=
-                                        m->mount_opt) {
-                                       ext4_msg(sb, KERN_ERR,
                                         "Cannot change data mode on remount");
-                                       return -1;
-                               }
-                       } else {
-                               clear_opt(sb, DATA_FLAGS);
-                               sbi->s_mount_opt |= m->mount_opt;
-                       }
-#ifdef CONFIG_QUOTA
-               } else if (m->flags & MOPT_QFMT) {
-                       if (sb_any_quota_loaded(sb) &&
-                           sbi->s_jquota_fmt != m->mount_opt) {
-                               ext4_msg(sb, KERN_ERR, "Cannot "
-                                        "change journaled quota options "
-                                        "when quota turned on");
                                return -1;
                        }
-                       sbi->s_jquota_fmt = m->mount_opt;
-#endif
                } else {
-                       if (!args->from)
-                               arg = 1;
-                       if (m->flags & MOPT_CLEAR)
-                               arg = !arg;
-                       else if (unlikely(!(m->flags & MOPT_SET))) {
-                               ext4_msg(sb, KERN_WARNING,
-                                        "buggy handling of option %s", opt);
-                               WARN_ON(1);
-                               return -1;
-                       }
-                       if (arg != 0)
-                               sbi->s_mount_opt |= m->mount_opt;
-                       else
-                               sbi->s_mount_opt &= ~m->mount_opt;
+                       clear_opt(sb, DATA_FLAGS);
+                       sbi->s_mount_opt |= m->mount_opt;
                }
-               return 1;
+#ifdef CONFIG_QUOTA
+       } else if (m->flags & MOPT_QFMT) {
+               if (sb_any_quota_loaded(sb) &&
+                   sbi->s_jquota_fmt != m->mount_opt) {
+                       ext4_msg(sb, KERN_ERR, "Cannot change journaled "
+                                "quota options when quota turned on");
+                       return -1;
+               }
+               if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                              EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+                       ext4_msg(sb, KERN_ERR,
+                                "Cannot set journaled quota options "
+                                "when QUOTA feature is enabled");
+                       return -1;
+               }
+               sbi->s_jquota_fmt = m->mount_opt;
+#endif
+       } else {
+               if (!args->from)
+                       arg = 1;
+               if (m->flags & MOPT_CLEAR)
+                       arg = !arg;
+               else if (unlikely(!(m->flags & MOPT_SET))) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "buggy handling of option %s", opt);
+                       WARN_ON(1);
+                       return -1;
+               }
+               if (arg != 0)
+                       sbi->s_mount_opt |= m->mount_opt;
+               else
+                       sbi->s_mount_opt &= ~m->mount_opt;
        }
-       ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
-                "or missing value", opt);
-       return -1;
+       return 1;
 }
 
 static int parse_options(char *options, struct super_block *sb,
@@ -1667,6 +1605,12 @@ static int parse_options(char *options, struct super_block *sb,
                        return 0;
        }
 #ifdef CONFIG_QUOTA
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+           (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+               ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+                        "feature is enabled");
+               return 0;
+       }
        if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
                if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
                        clear_opt(sb, USRQUOTA);
@@ -2776,7 +2720,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
                        break;
        }
 
-       if (group == ngroups)
+       if (group >= ngroups)
                ret = 1;
 
        if (!ret) {
@@ -3016,33 +2960,34 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
        return elr;
 }
 
-static int ext4_register_li_request(struct super_block *sb,
-                                   ext4_group_t first_not_zeroed)
+int ext4_register_li_request(struct super_block *sb,
+                            ext4_group_t first_not_zeroed)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_li_request *elr;
+       struct ext4_li_request *elr = NULL;
        ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
        int ret = 0;
 
+       mutex_lock(&ext4_li_mtx);
        if (sbi->s_li_request != NULL) {
                /*
                 * Reset timeout so it can be computed again, because
                 * s_li_wait_mult might have changed.
                 */
                sbi->s_li_request->lr_timeout = 0;
-               return 0;
+               goto out;
        }
 
        if (first_not_zeroed == ngroups ||
            (sb->s_flags & MS_RDONLY) ||
            !test_opt(sb, INIT_INODE_TABLE))
-               return 0;
+               goto out;
 
        elr = ext4_li_request_new(sb, first_not_zeroed);
-       if (!elr)
-               return -ENOMEM;
-
-       mutex_lock(&ext4_li_mtx);
+       if (!elr) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        if (NULL == ext4_li_info) {
                ret = ext4_li_info_new();
@@ -3235,7 +3180,7 @@ int ext4_calculate_overhead(struct super_block *sb)
        }
        /* Add the journal blocks as well */
        if (sbi->s_journal)
-               overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+               overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
 
        sbi->s_overhead = overhead;
        smp_wmb();
@@ -3379,7 +3324,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
        set_opt(sb, POSIX_ACL);
 #endif
-       set_opt(sb, MBLK_IO_SUBMIT);
        if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
                set_opt(sb, JOURNAL_DATA);
        else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3763,6 +3707,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (!err) {
                err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
        }
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
+       }
        if (err) {
                ext4_msg(sb, KERN_ERR, "insufficient memory");
                goto failed_mount3;
@@ -3772,6 +3719,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_max_writeback_mb_bump = 128;
        sbi->s_extent_max_zeroout_kb = 32;
 
+       /* Register extent status tree shrinker */
+       ext4_es_register_shrinker(sb);
+
        /*
         * set up enough so that it can read an inode
         */
@@ -3783,13 +3733,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_export_op = &ext4_export_ops;
        sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
-       sb->s_qcop = &ext4_qctl_operations;
        sb->dq_op = &ext4_quota_operations;
-
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
-               /* Use qctl operations for hidden quota files. */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
                sb->s_qcop = &ext4_qctl_sysfile_operations;
-       }
+       else
+               sb->s_qcop = &ext4_qctl_operations;
 #endif
        memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
 
@@ -3985,6 +3933,16 @@ no_journal:
        if (err)
                goto failed_mount7;
 
+#ifdef CONFIG_QUOTA
+       /* Enable quota usage during mount. */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+           !(sb->s_flags & MS_RDONLY)) {
+               err = ext4_enable_quotas(sb);
+               if (err)
+                       goto failed_mount8;
+       }
+#endif  /* CONFIG_QUOTA */
+
        EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
        ext4_orphan_cleanup(sb, es);
        EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
@@ -4002,16 +3960,6 @@ no_journal:
        } else
                descr = "out journal";
 
-#ifdef CONFIG_QUOTA
-       /* Enable quota usage during mount. */
-       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
-           !(sb->s_flags & MS_RDONLY)) {
-               err = ext4_enable_quotas(sb);
-               if (err)
-                       goto failed_mount7;
-       }
-#endif  /* CONFIG_QUOTA */
-
        if (test_opt(sb, DISCARD)) {
                struct request_queue *q = bdev_get_queue(sb->s_bdev);
                if (!blk_queue_discard(q))
@@ -4035,6 +3983,10 @@ cantfind_ext4:
                ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
        goto failed_mount;
 
+#ifdef CONFIG_QUOTA
+failed_mount8:
+       kobject_del(&sbi->s_kobj);
+#endif
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
@@ -4061,6 +4013,7 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
 failed_mount2:
@@ -4476,16 +4429,12 @@ static void ext4_clear_journal_err(struct super_block *sb,
 int ext4_force_commit(struct super_block *sb)
 {
        journal_t *journal;
-       int ret = 0;
 
        if (sb->s_flags & MS_RDONLY)
                return 0;
 
        journal = EXT4_SB(sb)->s_journal;
-       if (journal)
-               ret = ext4_journal_force_commit(journal);
-
-       return ret;
+       return ext4_journal_force_commit(journal);
 }
 
 static int ext4_sync_fs(struct super_block *sb, int wait)
@@ -4588,7 +4537,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
        int err = 0;
 #ifdef CONFIG_QUOTA
-       int i;
+       int i, j;
 #endif
        char *orig_data = kstrdup(data, GFP_KERNEL);
 
@@ -4604,7 +4553,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 #ifdef CONFIG_QUOTA
        old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
        for (i = 0; i < MAXQUOTAS; i++)
-               old_opts.s_qf_names[i] = sbi->s_qf_names[i];
+               if (sbi->s_qf_names[i]) {
+                       old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+                                                        GFP_KERNEL);
+                       if (!old_opts.s_qf_names[i]) {
+                               for (j = 0; j < i; j++)
+                                       kfree(old_opts.s_qf_names[j]);
+                               kfree(orig_data);
+                               return -ENOMEM;
+                       }
+               } else
+                       old_opts.s_qf_names[i] = NULL;
 #endif
        if (sbi->s_journal && sbi->s_journal->j_task->io_context)
                journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
@@ -4737,9 +4696,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 #ifdef CONFIG_QUOTA
        /* Release old quota file names */
        for (i = 0; i < MAXQUOTAS; i++)
-               if (old_opts.s_qf_names[i] &&
-                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
-                       kfree(old_opts.s_qf_names[i]);
+               kfree(old_opts.s_qf_names[i]);
        if (enable_quota) {
                if (sb_any_quota_suspended(sb))
                        dquot_resume(sb, -1);
@@ -4768,9 +4725,7 @@ restore_opts:
 #ifdef CONFIG_QUOTA
        sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
        for (i = 0; i < MAXQUOTAS; i++) {
-               if (sbi->s_qf_names[i] &&
-                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
-                       kfree(sbi->s_qf_names[i]);
+               kfree(sbi->s_qf_names[i]);
                sbi->s_qf_names[i] = old_opts.s_qf_names[i];
        }
 #endif
@@ -4835,7 +4790,7 @@ static int ext4_write_dquot(struct dquot *dquot)
        struct inode *inode;
 
        inode = dquot_to_inode(dquot);
-       handle = ext4_journal_start(inode,
+       handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
                                    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -4851,7 +4806,7 @@ static int ext4_acquire_dquot(struct dquot *dquot)
        int ret, err;
        handle_t *handle;
 
-       handle = ext4_journal_start(dquot_to_inode(dquot),
+       handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
                                    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -4867,7 +4822,7 @@ static int ext4_release_dquot(struct dquot *dquot)
        int ret, err;
        handle_t *handle;
 
-       handle = ext4_journal_start(dquot_to_inode(dquot),
+       handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
                                    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
        if (IS_ERR(handle)) {
                /* Release dquot anyway to avoid endless cycle in dqput() */
@@ -4883,9 +4838,12 @@ static int ext4_release_dquot(struct dquot *dquot)
 
 static int ext4_mark_dquot_dirty(struct dquot *dquot)
 {
+       struct super_block *sb = dquot->dq_sb;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
        /* Are we journaling quotas? */
-       if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
-           EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) ||
+           sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
                dquot_mark_dquot_dirty(dquot);
                return ext4_write_dquot(dquot);
        } else {
@@ -4899,7 +4857,7 @@ static int ext4_write_info(struct super_block *sb, int type)
        handle_t *handle;
 
        /* Data block + inode block */
-       handle = ext4_journal_start(sb->s_root->d_inode, 2);
+       handle = ext4_journal_start(sb->s_root->d_inode, EXT4_HT_QUOTA, 2);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
        ret = dquot_commit_info(sb, type);
@@ -5005,9 +4963,9 @@ static int ext4_enable_quotas(struct super_block *sb)
                                                DQUOT_USAGE_ENABLED);
                        if (err) {
                                ext4_warning(sb,
-                                       "Failed to enable quota (type=%d) "
-                                       "tracking. Please run e2fsck to fix.",
-                                       type);
+                                       "Failed to enable quota tracking "
+                                       "(type=%d, err=%d). Please run "
+                                       "e2fsck to fix.", type, err);
                                return err;
                        }
                }
@@ -5045,7 +5003,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
 
        /* Update modification times of quota files when userspace can
         * start looking at them */
-       handle = ext4_journal_start(inode, 1);
+       handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
        if (IS_ERR(handle))
                goto out;
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
index 3a91ebc..3a120b2 100644 (file)
@@ -549,7 +549,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                error = ext4_handle_dirty_xattr_block(handle, inode, bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
-               dquot_free_block(inode, 1);
+               dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
                ea_bdebug(bh, "refcount now=%d; releasing",
                          le32_to_cpu(BHDR(bh)->h_refcount));
        }
@@ -832,7 +832,8 @@ inserted:
                        else {
                                /* The old block is released after updating
                                   the inode. */
-                               error = dquot_alloc_block(inode, 1);
+                               error = dquot_alloc_block(inode,
+                                               EXT4_C2B(EXT4_SB(sb), 1));
                                if (error)
                                        goto cleanup;
                                error = ext4_journal_get_write_access(handle,
@@ -886,17 +887,18 @@ inserted:
                                  (unsigned long long)block);
 
                        new_bh = sb_getblk(sb, block);
-                       if (!new_bh) {
+                       if (unlikely(!new_bh)) {
+                               error = -ENOMEM;
 getblk_failed:
                                ext4_free_blocks(handle, inode, NULL, block, 1,
                                                 EXT4_FREE_BLOCKS_METADATA);
-                               error = -EIO;
                                goto cleanup;
                        }
                        lock_buffer(new_bh);
                        error = ext4_journal_get_create_access(handle, new_bh);
                        if (error) {
                                unlock_buffer(new_bh);
+                               error = -EIO;
                                goto getblk_failed;
                        }
                        memcpy(new_bh->b_data, s->base, new_bh->b_size);
@@ -928,7 +930,7 @@ cleanup:
        return error;
 
 cleanup_dquot:
-       dquot_free_block(inode, 1);
+       dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
        goto cleanup;
 
 bad_block:
@@ -1164,17 +1166,10 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
 {
        handle_t *handle;
        int error, retries = 0;
-       int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
+       int credits = ext4_jbd2_credits_xattr(inode);
 
 retry:
-       /*
-        * In case of inline data, we may push out the data to a block,
-        * So reserve the journal space first.
-        */
-       if (ext4_has_inline_data(inode))
-               credits += ext4_writepage_trans_blocks(inode) + 1;
-
-       handle = ext4_journal_start(inode, credits);
+       handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
        if (IS_ERR(handle)) {
                error = PTR_ERR(handle);
        } else {
index 69eda78..aa25deb 100644 (file)
@@ -125,74 +125,6 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
                                       struct ext4_xattr_info *i,
                                       struct ext4_xattr_ibody_find *is);
 
-extern int ext4_has_inline_data(struct inode *inode);
-extern int ext4_get_inline_size(struct inode *inode);
-extern int ext4_get_max_inline_size(struct inode *inode);
-extern int ext4_find_inline_data_nolock(struct inode *inode);
-extern void ext4_write_inline_data(struct inode *inode,
-                                  struct ext4_iloc *iloc,
-                                  void *buffer, loff_t pos,
-                                  unsigned int len);
-extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
-                                   unsigned int len);
-extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
-                                unsigned int len);
-extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
-
-extern int ext4_readpage_inline(struct inode *inode, struct page *page);
-extern int ext4_try_to_write_inline_data(struct address_space *mapping,
-                                        struct inode *inode,
-                                        loff_t pos, unsigned len,
-                                        unsigned flags,
-                                        struct page **pagep);
-extern int ext4_write_inline_data_end(struct inode *inode,
-                                     loff_t pos, unsigned len,
-                                     unsigned copied,
-                                     struct page *page);
-extern struct buffer_head *
-ext4_journalled_write_inline_data(struct inode *inode,
-                                 unsigned len,
-                                 struct page *page);
-extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
-                                          struct inode *inode,
-                                          loff_t pos, unsigned len,
-                                          unsigned flags,
-                                          struct page **pagep,
-                                          void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
-                                        unsigned len, unsigned copied,
-                                        struct page *page);
-extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
-                                    struct inode *inode);
-extern int ext4_try_create_inline_dir(handle_t *handle,
-                                     struct inode *parent,
-                                     struct inode *inode);
-extern int ext4_read_inline_dir(struct file *filp,
-                               void *dirent, filldir_t filldir,
-                               int *has_inline_data);
-extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
-                                       const struct qstr *d_name,
-                                       struct ext4_dir_entry_2 **res_dir,
-                                       int *has_inline_data);
-extern int ext4_delete_inline_entry(handle_t *handle,
-                                   struct inode *dir,
-                                   struct ext4_dir_entry_2 *de_del,
-                                   struct buffer_head *bh,
-                                   int *has_inline_data);
-extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
-extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
-                                       struct ext4_dir_entry_2 **parent_de,
-                                       int *retval);
-extern int ext4_inline_data_fiemap(struct inode *inode,
-                                  struct fiemap_extent_info *fieinfo,
-                                  int *has_inline);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
-                                        struct inode *inode,
-                                        int needed);
-extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
-
-extern int ext4_convert_inline_data(struct inode *inode);
-
 #ifdef CONFIG_EXT4_FS_SECURITY
 extern int ext4_init_security(handle_t *handle, struct inode *inode,
                              struct inode *dir, const struct qstr *qstr);
index ff3c843..2b6fc13 100644 (file)
@@ -72,22 +72,22 @@ static int f2fs_write_meta_page(struct page *page,
 {
        struct inode *inode = page->mapping->host;
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int err;
 
-       wait_on_page_writeback(page);
-
-       err = write_meta_page(sbi, page, wbc);
-       if (err) {
+       /* Should not write any meta pages, if any IO error was occurred */
+       if (wbc->for_reclaim ||
+                       is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
+               dec_page_count(sbi, F2FS_DIRTY_META);
                wbc->pages_skipped++;
                set_page_dirty(page);
+               return AOP_WRITEPAGE_ACTIVATE;
        }
 
-       dec_page_count(sbi, F2FS_DIRTY_META);
+       wait_on_page_writeback(page);
 
-       /* In this case, we should not unlock this page */
-       if (err != AOP_WRITEPAGE_ACTIVATE)
-               unlock_page(page);
-       return err;
+       write_meta_page(sbi, page);
+       dec_page_count(sbi, F2FS_DIRTY_META);
+       unlock_page(page);
+       return 0;
 }
 
 static int f2fs_write_meta_pages(struct address_space *mapping,
@@ -138,7 +138,10 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
                        BUG_ON(page->mapping != mapping);
                        BUG_ON(!PageDirty(page));
                        clear_page_dirty_for_io(page);
-                       f2fs_write_meta_page(page, &wbc);
+                       if (f2fs_write_meta_page(page, &wbc)) {
+                               unlock_page(page);
+                               break;
+                       }
                        if (nwritten++ >= nr_to_write)
                                break;
                }
@@ -161,7 +164,6 @@ static int f2fs_set_meta_page_dirty(struct page *page)
        if (!PageDirty(page)) {
                __set_page_dirty_nobuffers(page);
                inc_page_count(sbi, F2FS_DIRTY_META);
-               F2FS_SET_SB_DIRT(sbi);
                return 1;
        }
        return 0;
@@ -216,19 +218,11 @@ retry:
        new->ino = ino;
 
        /* add new_oentry into list which is sorted by inode number */
-       if (orphan) {
-               struct orphan_inode_entry *prev;
-
-               /* get previous entry */
-               prev = list_entry(orphan->list.prev, typeof(*prev), list);
-               if (&prev->list != head)
-                       /* insert new orphan inode entry */
-                       list_add(&new->list, &prev->list);
-               else
-                       list_add(&new->list, head);
-       } else {
+       if (orphan)
+               list_add(&new->list, this->prev);
+       else
                list_add_tail(&new->list, head);
-       }
+
        sbi->n_orphans++;
 out:
        mutex_unlock(&sbi->orphan_inode_mutex);
@@ -545,7 +539,7 @@ retry:
 /*
  * Freeze all the FS-operations for checkpoint.
  */
-void block_operations(struct f2fs_sb_info *sbi)
+static void block_operations(struct f2fs_sb_info *sbi)
 {
        int t;
        struct writeback_control wbc = {
@@ -717,27 +711,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        sbi->alloc_valid_block_count = 0;
 
        /* Here, we only have one bio having CP pack */
-       if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))
-               sbi->sb->s_flags |= MS_RDONLY;
-       else
-               sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
+       sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
 
-       clear_prefree_segments(sbi);
-       F2FS_RESET_SB_DIRT(sbi);
+       if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
+               clear_prefree_segments(sbi);
+               F2FS_RESET_SB_DIRT(sbi);
+       }
 }
 
 /*
  * We guarantee that this checkpoint procedure should not fail.
  */
-void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount)
+void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
 {
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
        unsigned long long ckpt_ver;
 
-       if (!blocked) {
-               mutex_lock(&sbi->cp_mutex);
-               block_operations(sbi);
-       }
+       mutex_lock(&sbi->cp_mutex);
+       block_operations(sbi);
 
        f2fs_submit_bio(sbi, DATA, true);
        f2fs_submit_bio(sbi, NODE, true);
index c8c3730..025b9e2 100644 (file)
@@ -183,10 +183,12 @@ static int stat_show(struct seq_file *s, void *v)
 
        mutex_lock(&f2fs_stat_mutex);
        list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
+               char devname[BDEVNAME_SIZE];
 
                update_general_status(si->sbi);
 
-               seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
+               seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
+                       bdevname(si->sbi->sb->s_bdev, devname), i++);
                seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
                           si->sit_area_segs, si->nat_area_segs);
                seq_printf(s, "[SSA: %d] [MAIN: %d",
index 989980e..a1f3844 100644 (file)
@@ -265,7 +265,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
        mutex_unlock_op(sbi, DENTRY_OPS);
 }
 
-void init_dent_inode(struct dentry *dentry, struct page *ipage)
+void init_dent_inode(const struct qstr *name, struct page *ipage)
 {
        struct f2fs_node *rn;
 
@@ -274,20 +274,19 @@ void init_dent_inode(struct dentry *dentry, struct page *ipage)
 
        wait_on_page_writeback(ipage);
 
-       /* copy dentry info. to this inode page */
+       /* copy name info. to this inode page */
        rn = (struct f2fs_node *)page_address(ipage);
-       rn->i.i_namelen = cpu_to_le32(dentry->d_name.len);
-       memcpy(rn->i.i_name, dentry->d_name.name, dentry->d_name.len);
+       rn->i.i_namelen = cpu_to_le32(name->len);
+       memcpy(rn->i.i_name, name->name, name->len);
        set_page_dirty(ipage);
 }
 
-static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
+static int init_inode_metadata(struct inode *inode,
+               struct inode *dir, const struct qstr *name)
 {
-       struct inode *dir = dentry->d_parent->d_inode;
-
        if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
                int err;
-               err = new_inode_page(inode, dentry);
+               err = new_inode_page(inode, name);
                if (err)
                        return err;
 
@@ -310,7 +309,7 @@ static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
                if (IS_ERR(ipage))
                        return PTR_ERR(ipage);
                set_cold_node(inode, ipage);
-               init_dent_inode(dentry, ipage);
+               init_dent_inode(name, ipage);
                f2fs_put_page(ipage, 1);
        }
        if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
@@ -371,7 +370,7 @@ next:
        goto next;
 }
 
-int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode)
 {
        unsigned int bit_pos;
        unsigned int level;
@@ -380,17 +379,15 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
        f2fs_hash_t dentry_hash;
        struct f2fs_dir_entry *de;
        unsigned int nbucket, nblock;
-       struct inode *dir = dentry->d_parent->d_inode;
        struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
-       const char *name = dentry->d_name.name;
-       size_t namelen = dentry->d_name.len;
+       size_t namelen = name->len;
        struct page *dentry_page = NULL;
        struct f2fs_dentry_block *dentry_blk = NULL;
        int slots = GET_DENTRY_SLOTS(namelen);
        int err = 0;
        int i;
 
-       dentry_hash = f2fs_dentry_hash(name, dentry->d_name.len);
+       dentry_hash = f2fs_dentry_hash(name->name, name->len);
        level = 0;
        current_depth = F2FS_I(dir)->i_current_depth;
        if (F2FS_I(dir)->chash == dentry_hash) {
@@ -433,7 +430,7 @@ start:
        ++level;
        goto start;
 add_dentry:
-       err = init_inode_metadata(inode, dentry);
+       err = init_inode_metadata(inode, dir, name);
        if (err)
                goto fail;
 
@@ -442,7 +439,7 @@ add_dentry:
        de = &dentry_blk->dentry[bit_pos];
        de->hash_code = dentry_hash;
        de->name_len = cpu_to_le16(namelen);
-       memcpy(dentry_blk->filename[bit_pos], name, namelen);
+       memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
        de->ino = cpu_to_le32(inode->i_ino);
        set_de_type(de, inode);
        for (i = 0; i < slots; i++)
@@ -603,7 +600,7 @@ bool f2fs_empty_dir(struct inode *dir)
 static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
        unsigned long pos = file->f_pos;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        unsigned long npages = dir_blocks(inode);
        unsigned char *types = NULL;
        unsigned int bit_pos = 0, start_bit_pos = 0;
index c8e2d75..cc2213a 100644 (file)
@@ -103,6 +103,20 @@ static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
        return before;
 }
 
+/*
+ * ioctl commands
+ */
+#define F2FS_IOC_GETFLAGS               FS_IOC_GETFLAGS
+#define F2FS_IOC_SETFLAGS               FS_IOC_SETFLAGS
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define F2FS_IOC32_GETFLAGS             FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS             FS_IOC32_SETFLAGS
+#endif
+
 /*
  * For INODE and NODE manager
  */
@@ -141,7 +155,7 @@ struct f2fs_inode_info {
 
        /* Use below internally in f2fs*/
        unsigned long flags;            /* use to pass per-file flags */
-       unsigned long long data_version;/* lastes version of data for fsync */
+       unsigned long long data_version;/* latest version of data for fsync */
        atomic_t dirty_dents;           /* # of dirty dentry pages */
        f2fs_hash_t chash;              /* hash value of given file name */
        unsigned int clevel;            /* maximum level of given file name */
@@ -573,6 +587,14 @@ static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
        return atomic_read(&sbi->nr_pages[count_type]);
 }
 
+static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
+{
+       unsigned int pages_per_sec = sbi->segs_per_sec *
+                                       (1 << sbi->log_blocks_per_seg);
+       return ((get_pages(sbi, block_type) + pages_per_sec - 1)
+                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+}
+
 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
 {
        block_t ret;
@@ -842,12 +864,12 @@ void f2fs_truncate(struct inode *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
 long f2fs_ioctl(struct file *, unsigned int, unsigned long);
+long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
 
 /*
  * inode.c
  */
 void f2fs_set_inode_flags(struct inode *);
-struct inode *f2fs_iget_nowait(struct super_block *, unsigned long);
 struct inode *f2fs_iget(struct super_block *, unsigned long);
 void update_inode(struct inode *, struct page *);
 int f2fs_write_inode(struct inode *, struct writeback_control *);
@@ -867,12 +889,18 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
 ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
                                struct page *, struct inode *);
-void init_dent_inode(struct dentry *, struct page *);
-int f2fs_add_link(struct dentry *, struct inode *);
+void init_dent_inode(const struct qstr *, struct page *);
+int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
 int f2fs_make_empty(struct inode *, struct inode *);
 bool f2fs_empty_dir(struct inode *);
 
+static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
+{
+       return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
+                               inode);
+}
+
 /*
  * super.c
  */
@@ -896,7 +924,7 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int remove_inode_page(struct inode *);
-int new_inode_page(struct inode *, struct dentry *);
+int new_inode_page(struct inode *, const struct qstr *);
 struct page *new_node_page(struct dnode_of_data *, unsigned int);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
@@ -929,8 +957,7 @@ void allocate_new_segments(struct f2fs_sb_info *);
 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
 struct bio *f2fs_bio_alloc(struct block_device *, int);
 void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
-int write_meta_page(struct f2fs_sb_info *, struct page *,
-                                       struct writeback_control *);
+void write_meta_page(struct f2fs_sb_info *, struct page *);
 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
                                        block_t, block_t *);
 void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
@@ -963,8 +990,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *);
 void set_dirty_dir_page(struct inode *, struct page *);
 void remove_dirty_dir_inode(struct inode *);
 void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void block_operations(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, bool, bool);
+void write_checkpoint(struct f2fs_sb_info *, bool);
 void init_orphan_info(struct f2fs_sb_info *);
 int __init create_checkpoint_caches(void);
 void destroy_checkpoint_caches(void);
index 3191b52..958a46d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/types.h>
+#include <linux/compat.h>
 #include <linux/uaccess.h>
 #include <linux/mount.h>
 
@@ -28,7 +29,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
                                                struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        block_t old_blk_addr;
        struct dnode_of_data dn;
@@ -157,11 +158,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
                need_cp = true;
-       if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
+       else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
                need_cp = true;
-       if (!space_for_roll_forward(sbi))
+       else if (!space_for_roll_forward(sbi))
                need_cp = true;
-       if (need_to_sync_dir(sbi, inode))
+       else if (need_to_sync_dir(sbi, inode))
                need_cp = true;
 
        if (need_cp) {
@@ -298,8 +299,6 @@ void f2fs_truncate(struct inode *inode)
                inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                mark_inode_dirty(inode);
        }
-
-       f2fs_balance_fs(F2FS_SB(inode->i_sb));
 }
 
 static int f2fs_getattr(struct vfsmount *mnt,
@@ -356,6 +355,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
                        attr->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, attr->ia_size);
                f2fs_truncate(inode);
+               f2fs_balance_fs(F2FS_SB(inode->i_sb));
        }
 
        __setattr_copy(inode, attr);
@@ -387,12 +387,17 @@ const struct inode_operations f2fs_file_inode_operations = {
 static void fill_zero(struct inode *inode, pgoff_t index,
                                        loff_t start, loff_t len)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct page *page;
 
        if (!len)
                return;
 
+       f2fs_balance_fs(sbi);
+
+       mutex_lock_op(sbi, DATA_NEW);
        page = get_new_data_page(inode, index, false);
+       mutex_unlock_op(sbi, DATA_NEW);
 
        if (!IS_ERR(page)) {
                wait_on_page_writeback(page);
@@ -539,7 +544,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 static long f2fs_fallocate(struct file *file, int mode,
                                loff_t offset, loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        long ret;
 
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -572,7 +577,7 @@ static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
 
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        unsigned int flags;
        int ret;
@@ -630,6 +635,23 @@ out:
        }
 }
 
+#ifdef CONFIG_COMPAT
+long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case F2FS_IOC32_GETFLAGS:
+               cmd = F2FS_IOC_GETFLAGS;
+               break;
+       case F2FS_IOC32_SETFLAGS:
+               cmd = F2FS_IOC_SETFLAGS;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
 const struct file_operations f2fs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
@@ -641,6 +663,9 @@ const struct file_operations f2fs_file_operations = {
        .fsync          = f2fs_sync_file,
        .fallocate      = f2fs_fallocate,
        .unlocked_ioctl = f2fs_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = f2fs_compat_ioctl,
+#endif
        .splice_read    = generic_file_splice_read,
        .splice_write   = generic_file_splice_write,
 };
index c386910..94b8a0c 100644 (file)
@@ -44,10 +44,10 @@ static int gc_thread_func(void *data)
                if (kthread_should_stop())
                        break;
 
-               f2fs_balance_fs(sbi);
-
-               if (!test_opt(sbi, BG_GC))
+               if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
+                       wait_ms = GC_THREAD_MAX_SLEEP_TIME;
                        continue;
+               }
 
                /*
                 * [GC triggering condition]
@@ -78,7 +78,8 @@ static int gc_thread_func(void *data)
 
                sbi->bg_gc++;
 
-               if (f2fs_gc(sbi) == GC_NONE)
+               /* if return value is not zero, no victim was selected */
+               if (f2fs_gc(sbi))
                        wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
                else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
                        wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -90,7 +91,10 @@ static int gc_thread_func(void *data)
 int start_gc_thread(struct f2fs_sb_info *sbi)
 {
        struct f2fs_gc_kthread *gc_th;
+       dev_t dev = sbi->sb->s_bdev->bd_dev;
 
+       if (!test_opt(sbi, BG_GC))
+               return 0;
        gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
        if (!gc_th)
                return -ENOMEM;
@@ -98,9 +102,10 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
        sbi->gc_thread = gc_th;
        init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
        sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
-                               GC_THREAD_NAME);
+                       "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
        if (IS_ERR(gc_th->f2fs_gc_task)) {
                kfree(gc_th);
+               sbi->gc_thread = NULL;
                return -ENOMEM;
        }
        return 0;
@@ -141,6 +146,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
                                struct victim_sel_policy *p)
 {
+       /* SSR allocates in a segment unit */
+       if (p->alloc_mode == SSR)
+               return 1 << sbi->log_blocks_per_seg;
        if (p->gc_mode == GC_GREEDY)
                return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
        else if (p->gc_mode == GC_CB)
@@ -356,7 +364,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
        sentry = get_seg_entry(sbi, segno);
        ret = f2fs_test_bit(offset, sentry->cur_valid_map);
        mutex_unlock(&sit_i->sentry_lock);
-       return ret ? GC_OK : GC_NEXT;
+       return ret;
 }
 
 /*
@@ -364,7 +372,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
                struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
        bool initial = true;
@@ -376,21 +384,12 @@ next_step:
        for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
                nid_t nid = le32_to_cpu(entry->nid);
                struct page *node_page;
-               int err;
 
-               /*
-                * It makes sure that free segments are able to write
-                * all the dirty node pages before CP after this CP.
-                * So let's check the space of dirty node pages.
-                */
-               if (should_do_checkpoint(sbi)) {
-                       mutex_lock(&sbi->cp_mutex);
-                       block_operations(sbi);
-                       return GC_BLOCKED;
-               }
+               /* stop BG_GC if there is not enough free sections. */
+               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+                       return;
 
-               err = check_valid_map(sbi, segno, off);
-               if (err == GC_NEXT)
+               if (check_valid_map(sbi, segno, off) == 0)
                        continue;
 
                if (initial) {
@@ -420,7 +419,6 @@ next_step:
                };
                sync_node_pages(sbi, 0, &wbc);
        }
-       return GC_DONE;
 }
 
 /*
@@ -463,13 +461,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 
        node_page = get_node_page(sbi, nid);
        if (IS_ERR(node_page))
-               return GC_NEXT;
+               return 0;
 
        get_node_info(sbi, nid, dni);
 
        if (sum->version != dni->version) {
                f2fs_put_page(node_page, 1);
-               return GC_NEXT;
+               return 0;
        }
 
        *nofs = ofs_of_node(node_page);
@@ -477,8 +475,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
        f2fs_put_page(node_page, 1);
 
        if (source_blkaddr != blkaddr)
-               return GC_NEXT;
-       return GC_OK;
+               return 0;
+       return 1;
 }
 
 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
@@ -519,13 +517,13 @@ out:
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
                struct list_head *ilist, unsigned int segno, int gc_type)
 {
        struct super_block *sb = sbi->sb;
        struct f2fs_summary *entry;
        block_t start_addr;
-       int err, off;
+       int off;
        int phase = 0;
 
        start_addr = START_BLOCK(sbi, segno);
@@ -539,20 +537,11 @@ next_step:
                unsigned int ofs_in_node, nofs;
                block_t start_bidx;
 
-               /*
-                * It makes sure that free segments are able to write
-                * all the dirty node pages before CP after this CP.
-                * So let's check the space of dirty node pages.
-                */
-               if (should_do_checkpoint(sbi)) {
-                       mutex_lock(&sbi->cp_mutex);
-                       block_operations(sbi);
-                       err = GC_BLOCKED;
-                       goto stop;
-               }
+               /* stop BG_GC if there is not enough free sections. */
+               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+                       return;
 
-               err = check_valid_map(sbi, segno, off);
-               if (err == GC_NEXT)
+               if (check_valid_map(sbi, segno, off) == 0)
                        continue;
 
                if (phase == 0) {
@@ -561,8 +550,7 @@ next_step:
                }
 
                /* Get an inode by ino with checking validity */
-               err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
-               if (err == GC_NEXT)
+               if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
                        continue;
 
                if (phase == 1) {
@@ -574,7 +562,7 @@ next_step:
                ofs_in_node = le16_to_cpu(entry->ofs_in_node);
 
                if (phase == 2) {
-                       inode = f2fs_iget_nowait(sb, dni.ino);
+                       inode = f2fs_iget(sb, dni.ino);
                        if (IS_ERR(inode))
                                continue;
 
@@ -602,11 +590,9 @@ next_iput:
        }
        if (++phase < 4)
                goto next_step;
-       err = GC_DONE;
-stop:
+
        if (gc_type == FG_GC)
                f2fs_submit_bio(sbi, DATA, true);
-       return err;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -620,17 +606,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
        return ret;
 }
 
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
                                struct list_head *ilist, int gc_type)
 {
        struct page *sum_page;
        struct f2fs_summary_block *sum;
-       int ret = GC_DONE;
 
        /* read segment summary of victim */
        sum_page = get_sum_page(sbi, segno);
        if (IS_ERR(sum_page))
-               return GC_ERROR;
+               return;
 
        /*
         * CP needs to lock sum_page. In this time, we don't need
@@ -642,17 +627,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
 
        switch (GET_SUM_TYPE((&sum->footer))) {
        case SUM_TYPE_NODE:
-               ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
+               gc_node_segment(sbi, sum->entries, segno, gc_type);
                break;
        case SUM_TYPE_DATA:
-               ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
+               gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
                break;
        }
        stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
        stat_inc_call_count(sbi->stat_info);
 
        f2fs_put_page(sum_page, 0);
-       return ret;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi)
@@ -660,40 +644,38 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
        struct list_head ilist;
        unsigned int segno, i;
        int gc_type = BG_GC;
-       int gc_status = GC_NONE;
+       int nfree = 0;
+       int ret = -1;
 
        INIT_LIST_HEAD(&ilist);
 gc_more:
        if (!(sbi->sb->s_flags & MS_ACTIVE))
                goto stop;
 
-       if (has_not_enough_free_secs(sbi))
+       if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
                gc_type = FG_GC;
 
        if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
                goto stop;
+       ret = 0;
 
-       for (i = 0; i < sbi->segs_per_sec; i++) {
-               /*
-                * do_garbage_collect will give us three gc_status:
-                * GC_ERROR, GC_DONE, and GC_BLOCKED.
-                * If GC is finished uncleanly, we have to return
-                * the victim to dirty segment list.
-                */
-               gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
-               if (gc_status != GC_DONE)
-                       break;
-       }
-       if (has_not_enough_free_secs(sbi)) {
-               write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
-               if (has_not_enough_free_secs(sbi))
-                       goto gc_more;
-       }
+       for (i = 0; i < sbi->segs_per_sec; i++)
+               do_garbage_collect(sbi, segno + i, &ilist, gc_type);
+
+       if (gc_type == FG_GC &&
+                       get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+               nfree++;
+
+       if (has_not_enough_free_secs(sbi, nfree))
+               goto gc_more;
+
+       if (gc_type == FG_GC)
+               write_checkpoint(sbi, false);
 stop:
        mutex_unlock(&sbi->gc_mutex);
 
        put_gc_inode(&ilist);
-       return gc_status;
+       return ret;
 }
 
 void build_gc_manager(struct f2fs_sb_info *sbi)
index b026d93..30b2db0 100644 (file)
@@ -8,7 +8,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#define GC_THREAD_NAME "f2fs_gc_task"
 #define GC_THREAD_MIN_WB_PAGES         1       /*
                                                 * a threshold to determine
                                                 * whether IO subsystem is idle
 /* Search max. number of dirty segments to select a victim segment */
 #define MAX_VICTIM_SEARCH      20
 
-enum {
-       GC_NONE = 0,
-       GC_ERROR,
-       GC_OK,
-       GC_NEXT,
-       GC_BLOCKED,
-       GC_DONE,
-};
-
 struct f2fs_gc_kthread {
        struct task_struct *f2fs_gc_task;
        wait_queue_head_t gc_wait_queue_head;
@@ -104,14 +94,3 @@ static inline int is_idle(struct f2fs_sb_info *sbi)
        struct request_list *rl = &q->root_rl;
        return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
 }
-
-static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
-{
-       unsigned int pages_per_sec = sbi->segs_per_sec *
-                                       (1 << sbi->log_blocks_per_seg);
-       int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
-                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
-       int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
-                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
-       return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
-}
index 7942417..ddae412 100644 (file)
 #include "f2fs.h"
 #include "node.h"
 
-struct f2fs_iget_args {
-       u64 ino;
-       int on_free;
-};
-
 void f2fs_set_inode_flags(struct inode *inode)
 {
        unsigned int flags = F2FS_I(inode)->i_flags;
@@ -40,34 +35,6 @@ void f2fs_set_inode_flags(struct inode *inode)
                inode->i_flags |= S_DIRSYNC;
 }
 
-static int f2fs_iget_test(struct inode *inode, void *data)
-{
-       struct f2fs_iget_args *args = data;
-
-       if (inode->i_ino != args->ino)
-               return 0;
-       if (inode->i_state & (I_FREEING | I_WILL_FREE)) {
-               args->on_free = 1;
-               return 0;
-       }
-       return 1;
-}
-
-struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino)
-{
-       struct f2fs_iget_args args = {
-               .ino = ino,
-               .on_free = 0
-       };
-       struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args);
-
-       if (inode)
-               return inode;
-       if (!args.on_free)
-               return f2fs_iget(sb, ino);
-       return ERR_PTR(-ENOENT);
-}
-
 static int do_read_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -100,6 +67,10 @@ static int do_read_inode(struct inode *inode)
        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
        inode->i_generation = le32_to_cpu(ri->i_generation);
+       if (ri->i_addr[0])
+               inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+       else
+               inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
 
        fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -203,6 +174,20 @@ void update_inode(struct inode *inode, struct page *node_page)
        ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
        ri->i_generation = cpu_to_le32(inode->i_generation);
+
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+               if (old_valid_dev(inode->i_rdev)) {
+                       ri->i_addr[0] =
+                               cpu_to_le32(old_encode_dev(inode->i_rdev));
+                       ri->i_addr[1] = 0;
+               } else {
+                       ri->i_addr[0] = 0;
+                       ri->i_addr[1] =
+                               cpu_to_le32(new_encode_dev(inode->i_rdev));
+                       ri->i_addr[2] = 0;
+               }
+       }
+
        set_cold_node(inode, node_page);
        set_page_dirty(node_page);
 }
@@ -260,6 +245,7 @@ void f2fs_evict_inode(struct inode *inode)
        if (inode->i_nlink || is_bad_inode(inode))
                goto no_delete;
 
+       sb_start_intwrite(inode->i_sb);
        set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
        i_size_write(inode, 0);
 
@@ -267,6 +253,7 @@ void f2fs_evict_inode(struct inode *inode)
                f2fs_truncate(inode);
 
        remove_inode_page(inode);
+       sb_end_intwrite(inode->i_sb);
 no_delete:
        clear_inode(inode);
 }
index 9bda63c..e275218 100644 (file)
@@ -104,7 +104,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
                        f2fs_put_page(page, 1);
                        continue;
                }
-               page_cache_release(page);
+               f2fs_put_page(page, 0);
        }
 }
 
@@ -660,7 +660,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        int err = 0, cont = 1;
        int level, offset[4], noffset[4];
-       unsigned int nofs;
+       unsigned int nofs = 0;
        struct f2fs_node *rn;
        struct dnode_of_data dn;
        struct page *page;
@@ -780,7 +780,7 @@ int remove_inode_page(struct inode *inode)
        return 0;
 }
 
-int new_inode_page(struct inode *inode, struct dentry *dentry)
+int new_inode_page(struct inode *inode, const struct qstr *name)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct page *page;
@@ -790,7 +790,7 @@ int new_inode_page(struct inode *inode, struct dentry *dentry)
        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
        mutex_lock_op(sbi, NODE_NEW);
        page = new_node_page(&dn, 0);
-       init_dent_inode(dentry, page);
+       init_dent_inode(name, page);
        mutex_unlock_op(sbi, NODE_NEW);
        if (IS_ERR(page))
                return PTR_ERR(page);
@@ -874,15 +874,11 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
                return;
 
        if (read_node_page(apage, READA))
-               goto unlock_out;
+               unlock_page(apage);
 
-       page_cache_release(apage);
-       return;
-
-unlock_out:
-       unlock_page(apage);
 release_out:
-       page_cache_release(apage);
+       f2fs_put_page(apage, 0);
+       return;
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1139,7 +1135,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 
        /* First check balancing cached NAT entries */
        if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
-               write_checkpoint(sbi, false, false);
+               write_checkpoint(sbi, false);
                return 0;
        }
 
index f42e406..b235215 100644 (file)
@@ -42,7 +42,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
 {
        struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
        struct f2fs_inode *raw_inode = &(raw_node->i);
-       struct dentry dent, parent;
+       struct qstr name;
        struct f2fs_dir_entry *de;
        struct page *page;
        struct inode *dir;
@@ -57,17 +57,15 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
                goto out;
        }
 
-       parent.d_inode = dir;
-       dent.d_parent = &parent;
-       dent.d_name.len = le32_to_cpu(raw_inode->i_namelen);
-       dent.d_name.name = raw_inode->i_name;
+       name.len = le32_to_cpu(raw_inode->i_namelen);
+       name.name = raw_inode->i_name;
 
-       de = f2fs_find_entry(dir, &dent.d_name, &page);
+       de = f2fs_find_entry(dir, &name, &page);
        if (de) {
                kunmap(page);
                f2fs_put_page(page, 0);
        } else {
-               err = f2fs_add_link(&dent, inode);
+               err = __f2fs_add_link(dir, &name, inode);
        }
        iput(dir);
 out:
@@ -226,7 +224,7 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
        f2fs_put_page(node_page, 1);
 
        /* Deallocate previous index in the node page */
-       inode = f2fs_iget_nowait(sbi->sb, ino);
+       inode = f2fs_iget(sbi->sb, ino);
        if (IS_ERR(inode))
                return;
 
@@ -373,5 +371,5 @@ void recover_fsync_data(struct f2fs_sb_info *sbi)
 out:
        destroy_fsync_dnodes(sbi, &inode_list);
        kmem_cache_destroy(fsync_entry_slab);
-       write_checkpoint(sbi, false, false);
+       write_checkpoint(sbi, false);
 }
index 4b00990..777f17e 100644 (file)
@@ -29,7 +29,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
         * We should do GC or end up with checkpoint, if there are so many dirty
         * dir/node pages without enough free segments.
         */
-       if (has_not_enough_free_secs(sbi)) {
+       if (has_not_enough_free_secs(sbi, 0)) {
                mutex_lock(&sbi->gc_mutex);
                f2fs_gc(sbi);
        }
@@ -308,7 +308,7 @@ static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
         * If there is not enough reserved sections,
         * we should not reuse prefree segments.
         */
-       if (has_not_enough_free_secs(sbi))
+       if (has_not_enough_free_secs(sbi, 0))
                return NULL_SEGNO;
 
        /*
@@ -536,6 +536,23 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
        }
 }
 
+static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+{
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
+       const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
+
+       if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+               return v_ops->get_victim(sbi,
+                               &(curseg)->next_segno, BG_GC, type, SSR);
+
+       /* For data segments, let's do SSR more intensively */
+       for (; type >= CURSEG_HOT_DATA; type--)
+               if (v_ops->get_victim(sbi, &(curseg)->next_segno,
+                                               BG_GC, type, SSR))
+                       return 1;
+       return 0;
+}
+
 /*
  * flush out current segment and replace it with new segment
  * This function should be returned with success, otherwise BUG
@@ -600,6 +617,7 @@ static void f2fs_end_io_write(struct bio *bio, int err)
                        if (page->mapping)
                                set_bit(AS_EIO, &page->mapping->flags);
                        set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
+                       p->sbi->sb->s_flags |= MS_RDONLY;
                }
                end_page_writeback(page);
                dec_page_count(p->sbi, F2FS_WRITEBACK);
@@ -815,15 +833,10 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
        mutex_unlock(&curseg->curseg_mutex);
 }
 
-int write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
-                       struct writeback_control *wbc)
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
 {
-       if (wbc->for_reclaim)
-               return AOP_WRITEPAGE_ACTIVATE;
-
        set_page_writeback(page);
        submit_write_page(sbi, page, page->index, META);
-       return 0;
 }
 
 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
index 66a288a..552dadb 100644 (file)
@@ -450,29 +450,16 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
        return (free_sections(sbi) < overprovision_sections(sbi));
 }
 
-static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
 {
-       struct curseg_info *curseg = CURSEG_I(sbi, type);
-       return DIRTY_I(sbi)->v_ops->get_victim(sbi,
-                               &(curseg)->next_segno, BG_GC, type, SSR);
-}
-
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
-{
-       unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
-                       sbi->segs_per_sec;
-       int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
-                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
-       int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
-                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+       int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+       int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
 
        if (sbi->por_doing)
                return false;
 
-       if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
-                                               reserved_sections(sbi)))
-               return true;
-       return false;
+       return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
+                                               reserved_sections(sbi)));
 }
 
 static inline int utilization(struct f2fs_sb_info *sbi)
index 37fad04..8c11764 100644 (file)
@@ -112,7 +112,7 @@ static void f2fs_put_super(struct super_block *sb)
        f2fs_destroy_stats(sbi);
        stop_gc_thread(sbi);
 
-       write_checkpoint(sbi, false, true);
+       write_checkpoint(sbi, true);
 
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
@@ -136,13 +136,29 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
                return 0;
 
        if (sync)
-               write_checkpoint(sbi, false, false);
+               write_checkpoint(sbi, false);
        else
                f2fs_balance_fs(sbi);
 
        return 0;
 }
 
+static int f2fs_freeze(struct super_block *sb)
+{
+       int err;
+
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
+
+       err = f2fs_sync_fs(sb, 1);
+       return err;
+}
+
+static int f2fs_unfreeze(struct super_block *sb)
+{
+       return 0;
+}
+
 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
@@ -198,7 +214,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",noacl");
 #endif
        if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
-               seq_puts(seq, ",disable_ext_indentify");
+               seq_puts(seq, ",disable_ext_identify");
 
        seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
@@ -213,6 +229,8 @@ static struct super_operations f2fs_sops = {
        .evict_inode    = f2fs_evict_inode,
        .put_super      = f2fs_put_super,
        .sync_fs        = f2fs_sync_fs,
+       .freeze_fs      = f2fs_freeze,
+       .unfreeze_fs    = f2fs_unfreeze,
        .statfs         = f2fs_statfs,
 };
 
@@ -366,14 +384,23 @@ static int sanity_check_raw_super(struct super_block *sb,
                return 1;
        }
 
+       /* Currently, support only 4KB page cache size */
+       if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid page_cache_size (%lu), supports only 4KB\n",
+                       PAGE_CACHE_SIZE);
+               return 1;
+       }
+
        /* Currently, support only 4KB block size */
        blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
-       if (blocksize != PAGE_CACHE_SIZE) {
+       if (blocksize != F2FS_BLKSIZE) {
                f2fs_msg(sb, KERN_INFO,
                        "Invalid blocksize (%u), supports only 4KB\n",
                        blocksize);
                return 1;
        }
+
        if (le32_to_cpu(raw_super->log_sectorsize) !=
                                        F2FS_LOG_SECTOR_SIZE) {
                f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
@@ -387,10 +414,11 @@ static int sanity_check_raw_super(struct super_block *sb,
        return 0;
 }
 
-static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
-                               struct f2fs_checkpoint *ckpt)
+static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 {
        unsigned int total, fsmeta;
+       struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 
        total = le32_to_cpu(raw_super->segment_count);
        fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -401,6 +429,11 @@ static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
 
        if (fsmeta >= total)
                return 1;
+
+       if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
+               f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+               return 1;
+       }
        return 0;
 }
 
@@ -429,6 +462,32 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
                atomic_set(&sbi->nr_pages[i], 0);
 }
 
+static int validate_superblock(struct super_block *sb,
+               struct f2fs_super_block **raw_super,
+               struct buffer_head **raw_super_buf, sector_t block)
+{
+       const char *super = (block == 0 ? "first" : "second");
+
+       /* read f2fs raw super block */
+       *raw_super_buf = sb_bread(sb, block);
+       if (!*raw_super_buf) {
+               f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
+                               super);
+               return 1;
+       }
+
+       *raw_super = (struct f2fs_super_block *)
+               ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
+
+       /* sanity checking of raw super */
+       if (!sanity_check_raw_super(sb, *raw_super))
+               return 0;
+
+       f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+                               "in %s superblock", super);
+       return 1;
+}
+
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct f2fs_sb_info *sbi;
@@ -449,16 +508,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_sbi;
        }
 
-       /* read f2fs raw super block */
-       raw_super_buf = sb_bread(sb, 0);
-       if (!raw_super_buf) {
-               err = -EIO;
-               f2fs_msg(sb, KERN_ERR, "unable to read superblock");
-               goto free_sbi;
+       if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
+               brelse(raw_super_buf);
+               if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
+                       goto free_sb_buf;
        }
-       raw_super = (struct f2fs_super_block *)
-                       ((char *)raw_super_buf->b_data + F2FS_SUPER_OFFSET);
-
        /* init some FS parameters */
        sbi->active_logs = NR_CURSEG_TYPE;
 
@@ -474,12 +528,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        if (parse_options(sb, sbi, (char *)data))
                goto free_sb_buf;
 
-       /* sanity checking of raw super */
-       if (sanity_check_raw_super(sb, raw_super)) {
-               f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
-               goto free_sb_buf;
-       }
-
        sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
        sb->s_max_links = F2FS_LINK_MAX;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
@@ -525,7 +573,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 
        /* sanity checking of checkpoint */
        err = -EINVAL;
-       if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
+       if (sanity_check_ckpt(sbi)) {
                f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
                goto free_cp;
        }
index 58bf744..165012e 100644 (file)
@@ -698,7 +698,7 @@ out:
 
 static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
 }
 
@@ -779,7 +779,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
 static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
                          unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg;
        int short_only, both;
 
@@ -819,7 +819,7 @@ FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
 static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
                                 unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct compat_dirent __user *d1 = compat_ptr(arg);
        int short_only, both;
 
index 12701a5..e9cc3f0 100644 (file)
@@ -95,6 +95,8 @@ struct msdos_sb_info {
 
        spinlock_t dir_hash_lock;
        struct hlist_head dir_hashtable[FAT_HASH_SIZE];
+
+       unsigned int dirty;           /* fs state before mount */
 };
 
 #define FAT_CACHE_VALID        0       /* special case for valid cache */
index a62e0ec..3978f8c 100644 (file)
@@ -32,7 +32,7 @@ static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
 
 static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
        int is_dir = S_ISDIR(inode->i_mode);
        u32 attr, oldattr;
@@ -116,7 +116,7 @@ out:
 
 long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        u32 __user *user_attr = (u32 __user *)arg;
 
        switch (cmd) {
index f8f4916..acf6e47 100644 (file)
@@ -341,12 +341,11 @@ struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
-       struct hlist_node *_p;
        struct msdos_inode_info *i;
        struct inode *inode = NULL;
 
        spin_lock(&sbi->inode_hash_lock);
-       hlist_for_each_entry(i, _p, head, i_fat_hash) {
+       hlist_for_each_entry(i, head, i_fat_hash) {
                BUG_ON(i->vfs_inode.i_sb != sb);
                if (i->i_pos != i_pos)
                        continue;
@@ -488,10 +487,59 @@ static void fat_evict_inode(struct inode *inode)
        fat_detach(inode);
 }
 
+static void fat_set_state(struct super_block *sb,
+                       unsigned int set, unsigned int force)
+{
+       struct buffer_head *bh;
+       struct fat_boot_sector *b;
+       struct msdos_sb_info *sbi = sb->s_fs_info;
+
+       /* do not change any thing if mounted read only */
+       if ((sb->s_flags & MS_RDONLY) && !force)
+               return;
+
+       /* do not change state if fs was dirty */
+       if (sbi->dirty) {
+               /* warn only on set (mount). */
+               if (set)
+                       fat_msg(sb, KERN_WARNING, "Volume was not properly "
+                               "unmounted. Some data may be corrupt. "
+                               "Please run fsck.");
+               return;
+       }
+
+       bh = sb_bread(sb, 0);
+       if (bh == NULL) {
+               fat_msg(sb, KERN_ERR, "unable to read boot sector "
+                       "to mark fs as dirty");
+               return;
+       }
+
+       b = (struct fat_boot_sector *) bh->b_data;
+
+       if (sbi->fat_bits == 32) {
+               if (set)
+                       b->fat32.state |= FAT_STATE_DIRTY;
+               else
+                       b->fat32.state &= ~FAT_STATE_DIRTY;
+       } else /* fat 16 and 12 */ {
+               if (set)
+                       b->fat16.state |= FAT_STATE_DIRTY;
+               else
+                       b->fat16.state &= ~FAT_STATE_DIRTY;
+       }
+
+       mark_buffer_dirty(bh);
+       sync_dirty_buffer(bh);
+       brelse(bh);
+}
+
 static void fat_put_super(struct super_block *sb)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
 
+       fat_set_state(sb, 0, 0);
+
        iput(sbi->fsinfo_inode);
        iput(sbi->fat_inode);
 
@@ -566,8 +614,18 @@ static void __exit fat_destroy_inodecache(void)
 
 static int fat_remount(struct super_block *sb, int *flags, char *data)
 {
+       int new_rdonly;
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+
+       /* make sure we update state on remount. */
+       new_rdonly = *flags & MS_RDONLY;
+       if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
+               if (new_rdonly)
+                       fat_set_state(sb, 0, 0);
+               else
+                       fat_set_state(sb, 1, 1);
+       }
        return 0;
 }
 
@@ -1298,17 +1356,17 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        sbi->prev_free = FAT_START_ENT;
        sb->s_maxbytes = 0xffffffff;
 
-       if (!sbi->fat_length && b->fat32_length) {
+       if (!sbi->fat_length && b->fat32.length) {
                struct fat_boot_fsinfo *fsinfo;
                struct buffer_head *fsinfo_bh;
 
                /* Must be FAT32 */
                sbi->fat_bits = 32;
-               sbi->fat_length = le32_to_cpu(b->fat32_length);
-               sbi->root_cluster = le32_to_cpu(b->root_cluster);
+               sbi->fat_length = le32_to_cpu(b->fat32.length);
+               sbi->root_cluster = le32_to_cpu(b->fat32.root_cluster);
 
                /* MC - if info_sector is 0, don't multiply by 0 */
-               sbi->fsinfo_sector = le16_to_cpu(b->info_sector);
+               sbi->fsinfo_sector = le16_to_cpu(b->fat32.info_sector);
                if (sbi->fsinfo_sector == 0)
                        sbi->fsinfo_sector = 1;
 
@@ -1362,6 +1420,12 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        if (sbi->fat_bits != 32)
                sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
 
+       /* some OSes set FAT_STATE_DIRTY and clean it on unmount. */
+       if (sbi->fat_bits == 32)
+               sbi->dirty = b->fat32.state & FAT_STATE_DIRTY;
+       else /* fat 16 or 12 */
+               sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
+
        /* check that FAT table does not overflow */
        fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
        total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
@@ -1456,6 +1520,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                                        "the device does not support discard");
        }
 
+       fat_set_state(sb, 1, 0);
        return 0;
 
 out_invalid:
index ef4b5fa..499c104 100644 (file)
@@ -21,13 +21,12 @@ static struct inode *fat_dget(struct super_block *sb, int i_logstart)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        struct hlist_head *head;
-       struct hlist_node *_p;
        struct msdos_inode_info *i;
        struct inode *inode = NULL;
 
        head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
        spin_lock(&sbi->dir_hash_lock);
-       hlist_for_each_entry(i, _p, head, i_dir_hash) {
+       hlist_for_each_entry(i, head, i_dir_hash) {
                BUG_ON(i->vfs_inode.i_sb != sb);
                if (i->i_logstart != i_logstart)
                        continue;
index 71a600a..6599222 100644 (file)
@@ -30,7 +30,7 @@
 
 static int setfl(int fd, struct file * filp, unsigned long arg)
 {
-       struct inode * inode = filp->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(filp);
        int error = 0;
 
        /*
index de9e965..cd4d87a 100644 (file)
@@ -94,8 +94,8 @@ int proc_nr_files(ctl_table *table, int write,
 #endif
 
 /* Find an unused file structure and return a pointer to it.
- * Returns NULL, if there are no more free file structures or
- * we run out of memory.
+ * Returns an error pointer if some error happend e.g. we over file
+ * structures limit, run out of memory or operation is not permitted.
  *
  * Be very careful using this.  You are responsible for
  * getting write access to any mount that you might assign
@@ -107,7 +107,8 @@ struct file *get_empty_filp(void)
 {
        const struct cred *cred = current_cred();
        static long old_max;
-       struct file * f;
+       struct file *f;
+       int error;
 
        /*
         * Privileged users can go above max_files
@@ -122,13 +123,16 @@ struct file *get_empty_filp(void)
        }
 
        f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
-       if (f == NULL)
-               goto fail;
+       if (unlikely(!f))
+               return ERR_PTR(-ENOMEM);
 
        percpu_counter_inc(&nr_files);
        f->f_cred = get_cred(cred);
-       if (security_file_alloc(f))
-               goto fail_sec;
+       error = security_file_alloc(f);
+       if (unlikely(error)) {
+               file_free(f);
+               return ERR_PTR(error);
+       }
 
        INIT_LIST_HEAD(&f->f_u.fu_list);
        atomic_long_set(&f->f_count, 1);
@@ -144,12 +148,7 @@ over:
                pr_info("VFS: file-max limit %lu reached\n", get_max_files());
                old_max = get_nr_files();
        }
-       goto fail;
-
-fail_sec:
-       file_free(f);
-fail:
-       return NULL;
+       return ERR_PTR(-ENFILE);
 }
 
 /**
@@ -173,10 +172,11 @@ struct file *alloc_file(struct path *path, fmode_t mode,
        struct file *file;
 
        file = get_empty_filp();
-       if (!file)
-               return NULL;
+       if (IS_ERR(file))
+               return file;
 
        file->f_path = *path;
+       file->f_inode = path->dentry->d_inode;
        file->f_mapping = path->dentry->d_inode->i_mapping;
        file->f_mode = mode;
        file->f_op = fop;
@@ -259,6 +259,7 @@ static void __fput(struct file *file)
                drop_file_write_access(file);
        file->f_path.dentry = NULL;
        file->f_path.mnt = NULL;
+       file->f_inode = NULL;
        file_free(file);
        dput(dentry);
        mntput(mnt);
@@ -447,7 +448,7 @@ void mark_files_ro(struct super_block *sb)
 
        lg_global_lock(&files_lglock);
        do_file_list_for_each_entry(sb, f) {
-               if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
+               if (!S_ISREG(file_inode(f)->i_mode))
                       continue;
                if (!file_count(f))
                        continue;
index bd447e8..664b07a 100644 (file)
@@ -237,7 +237,7 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
 static int
 vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
 {
-       struct inode            *ip = fp->f_path.dentry->d_inode;
+       struct inode            *ip = file_inode(fp);
        struct super_block      *sbp = ip->i_sb;
        u_long                  bsize = sbp->s_blocksize;
        u_long                  page, npages, block, pblocks, nblocks, offset;
index 310972b..21f46fb 100644 (file)
@@ -318,8 +318,14 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
 
 static int write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
-               return inode->i_sb->s_op->write_inode(inode, wbc);
+       int ret;
+
+       if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
+               trace_writeback_write_inode_start(inode, wbc);
+               ret = inode->i_sb->s_op->write_inode(inode, wbc);
+               trace_writeback_write_inode(inode, wbc);
+               return ret;
+       }
        return 0;
 }
 
@@ -450,6 +456,8 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        WARN_ON(!(inode->i_state & I_SYNC));
 
+       trace_writeback_single_inode_start(inode, wbc, nr_to_write);
+
        ret = do_writepages(mapping, wbc);
 
        /*
@@ -1150,8 +1158,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
         * dirty the inode itself
         */
        if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+               trace_writeback_dirty_inode_start(inode, flags);
+
                if (sb->s_op->dirty_inode)
                        sb->s_op->dirty_inode(inode, flags);
+
+               trace_writeback_dirty_inode(inode, flags);
        }
 
        /*
@@ -1332,47 +1344,43 @@ void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
 EXPORT_SYMBOL(writeback_inodes_sb);
 
 /**
- * writeback_inodes_sb_if_idle -       start writeback if none underway
+ * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
  * @sb: the superblock
- * @reason: reason why some writeback work was initiated
+ * @nr: the number of pages to write
+ * @reason: the reason of writeback
  *
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
  */
-int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason)
+int try_to_writeback_inodes_sb_nr(struct super_block *sb,
+                                 unsigned long nr,
+                                 enum wb_reason reason)
 {
-       if (!writeback_in_progress(sb->s_bdi)) {
-               down_read(&sb->s_umount);
-               writeback_inodes_sb(sb, reason);
-               up_read(&sb->s_umount);
+       if (writeback_in_progress(sb->s_bdi))
                return 1;
-       } else
+
+       if (!down_read_trylock(&sb->s_umount))
                return 0;
+
+       writeback_inodes_sb_nr(sb, nr, reason);
+       up_read(&sb->s_umount);
+       return 1;
 }
-EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
 
 /**
- * writeback_inodes_sb_nr_if_idle      -       start writeback if none underway
+ * try_to_writeback_inodes_sb - try to start writeback if none underway
  * @sb: the superblock
- * @nr: the number of pages to write
  * @reason: reason why some writeback work was initiated
  *
- * Invoke writeback_inodes_sb if no writeback is currently underway.
+ * Implement by try_to_writeback_inodes_sb_nr()
  * Returns 1 if writeback was started, 0 if not.
  */
-int writeback_inodes_sb_nr_if_idle(struct super_block *sb,
-                                  unsigned long nr,
-                                  enum wb_reason reason)
+int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
 {
-       if (!writeback_in_progress(sb->s_bdi)) {
-               down_read(&sb->s_umount);
-               writeback_inodes_sb_nr(sb, nr, reason);
-               up_read(&sb->s_umount);
-               return 1;
-       } else
-               return 0;
+       return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
 }
-EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle);
+EXPORT_SYMBOL(try_to_writeback_inodes_sb);
 
 /**
  * sync_inodes_sb      -       sync sb inode pages
index fe6ca58..d8ac61d 100644 (file)
@@ -10,7 +10,7 @@
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
  * It can block.
  */
-void set_fs_root(struct fs_struct *fs, struct path *path)
+void set_fs_root(struct fs_struct *fs, const struct path *path)
 {
        struct path old_root;
 
@@ -29,7 +29,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
  * It can block.
  */
-void set_fs_pwd(struct fs_struct *fs, struct path *path)
+void set_fs_pwd(struct fs_struct *fs, const struct path *path)
 {
        struct path old_pwd;
 
@@ -53,7 +53,7 @@ static inline int replace_path(struct path *p, const struct path *old, const str
        return 1;
 }
 
-void chroot_fs_refs(struct path *old_root, struct path *new_root)
+void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
 {
        struct task_struct *g, *p;
        struct fs_struct *fs;
index 8dcb114..e2cba1f 100644 (file)
@@ -237,13 +237,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
                                struct fscache_cookie *cookie)
 {
        struct fscache_object *object;
-       struct hlist_node *_n;
        int ret;
 
        _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
 
        spin_lock(&cookie->lock);
-       hlist_for_each_entry(object, _n, &cookie->backing_objects,
+       hlist_for_each_entry(object, &cookie->backing_objects,
                             cookie_link) {
                if (object->cache == cache)
                        goto object_already_extant;
@@ -311,7 +310,6 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
 {
        struct fscache_object *p;
        struct fscache_cache *cache = object->cache;
-       struct hlist_node *_n;
        int ret;
 
        _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
@@ -321,7 +319,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
        /* there may be multiple initial creations of this object, but we only
         * want one */
        ret = -EEXIST;
-       hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) {
+       hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
                if (p->cache == object->cache) {
                        if (p->state >= FSCACHE_OBJECT_DYING)
                                ret = -ENOBUFS;
@@ -331,7 +329,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
 
        /* pin the parent object */
        spin_lock_nested(&cookie->parent->lock, 1);
-       hlist_for_each_entry(p, _n, &cookie->parent->backing_objects,
+       hlist_for_each_entry(p, &cookie->parent->backing_objects,
                             cookie_link) {
                if (p->cache == object->cache) {
                        if (p->state >= FSCACHE_OBJECT_DYING) {
@@ -435,7 +433,6 @@ EXPORT_SYMBOL(__fscache_wait_on_invalidate);
 void __fscache_update_cookie(struct fscache_cookie *cookie)
 {
        struct fscache_object *object;
-       struct hlist_node *_p;
 
        fscache_stat(&fscache_n_updates);
 
@@ -452,7 +449,7 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
        spin_lock(&cookie->lock);
 
        /* update the index entry on disk in each cache backing this cookie */
-       hlist_for_each_entry(object, _p,
+       hlist_for_each_entry(object,
                             &cookie->backing_objects, cookie_link) {
                fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
        }
index 75a20c0..b7978b9 100644 (file)
@@ -23,7 +23,7 @@ static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
 {
        struct fuse_conn *fc;
        mutex_lock(&fuse_mutex);
-       fc = file->f_path.dentry->d_inode->i_private;
+       fc = file_inode(file)->i_private;
        if (fc)
                fc = fuse_conn_get(fc);
        mutex_unlock(&fuse_mutex);
index e9bdec0..11dfa0c 100644 (file)
@@ -532,7 +532,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
 
 void fuse_force_forget(struct file *file, u64 nodeid)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
        struct fuse_forget_in inarg;
index 8506522..ff15522 100644 (file)
@@ -1325,7 +1325,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
        int plus, err;
        size_t nbytes;
        struct page *page;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
        u64 attr_version = 0;
index c807176..34b80ba 100644 (file)
@@ -355,7 +355,7 @@ static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
 
 static int fuse_flush(struct file *file, fl_owner_t id)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_file *ff = file->private_data;
        struct fuse_req *req;
@@ -1215,7 +1215,7 @@ static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov,
                                  unsigned long nr_segs, loff_t *ppos)
 {
        ssize_t res;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        if (is_bad_inode(inode))
                return -EIO;
@@ -1238,7 +1238,7 @@ static ssize_t fuse_direct_read(struct file *file, char __user *buf,
 static ssize_t __fuse_direct_write(struct file *file, const struct iovec *iov,
                                   unsigned long nr_segs, loff_t *ppos)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        size_t count = iov_length(iov, nr_segs);
        ssize_t res;
 
@@ -1258,7 +1258,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        ssize_t res;
 
        if (is_bad_inode(inode))
@@ -1485,7 +1485,7 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
-               struct inode *inode = file->f_dentry->d_inode;
+               struct inode *inode = file_inode(file);
                struct fuse_conn *fc = get_fuse_conn(inode);
                struct fuse_inode *fi = get_fuse_inode(inode);
                struct fuse_file *ff = file->private_data;
@@ -1543,7 +1543,7 @@ static void fuse_lk_fill(struct fuse_req *req, struct file *file,
                         const struct file_lock *fl, int opcode, pid_t pid,
                         int flock)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_file *ff = file->private_data;
        struct fuse_lk_in *arg = &req->misc.lk_in;
@@ -1565,7 +1565,7 @@ static void fuse_lk_fill(struct fuse_req *req, struct file *file,
 
 static int fuse_getlk(struct file *file, struct file_lock *fl)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
        struct fuse_lk_out outarg;
@@ -1590,7 +1590,7 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
 
 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
        int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
@@ -1622,7 +1622,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
 
 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
 
@@ -1645,7 +1645,7 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
 
 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
 
@@ -1702,7 +1702,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
 {
        loff_t retval;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
        if (whence == SEEK_CUR || whence == SEEK_SET)
@@ -2079,7 +2079,7 @@ EXPORT_SYMBOL_GPL(fuse_do_ioctl);
 long fuse_ioctl_common(struct file *file, unsigned int cmd,
                       unsigned long arg, unsigned int flags)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct fuse_conn *fc = get_fuse_conn(inode);
 
        if (!fuse_allow_current_process(fc))
index 01353ed..df00993 100644 (file)
@@ -679,7 +679,7 @@ static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
 
        if (*max_len < len) {
                *max_len = len;
-               return  255;
+               return  FILEID_INVALID;
        }
 
        nodeid = get_fuse_inode(inode)->nodeid;
index 4767774..9973df4 100644 (file)
@@ -37,10 +37,10 @@ static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
 
        if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
                *len = GFS2_LARGE_FH_SIZE;
-               return 255;
+               return FILEID_INVALID;
        } else if (*len < GFS2_SMALL_FH_SIZE) {
                *len = GFS2_SMALL_FH_SIZE;
-               return 255;
+               return FILEID_INVALID;
        }
 
        fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32);
index 2687f50..019f45e 100644 (file)
@@ -157,7 +157,7 @@ static const u32 gfs2_to_fsflags[32] = {
 
 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int error;
@@ -217,7 +217,7 @@ void gfs2_set_inode_flags(struct inode *inode)
  */
 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct buffer_head *bh;
@@ -293,7 +293,7 @@ out_drop_write:
 
 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        u32 fsflags, gfsflags;
 
        if (get_user(fsflags, ptr))
@@ -336,7 +336,7 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
 {
-       struct inode *inode = filep->f_dentry->d_inode;
+       struct inode *inode = file_inode(filep);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *ip = GFS2_I(inode);
        size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
@@ -386,7 +386,7 @@ static int gfs2_allocate_page_backing(struct page *page)
 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        unsigned long last_index;
@@ -673,8 +673,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        size_t writesize = iov_length(iov, nr_segs);
-       struct dentry *dentry = file->f_dentry;
-       struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+       struct gfs2_inode *ip = GFS2_I(file_inode(file));
        int ret;
 
        ret = gfs2_rs_alloc(ip);
@@ -772,7 +771,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
                           loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *ip = GFS2_I(inode);
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -938,7 +937,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
 {
        struct gfs2_file *fp = file->private_data;
        struct gfs2_holder *fl_gh = &fp->f_fl_gh;
-       struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
+       struct gfs2_inode *ip = GFS2_I(file_inode(file));
        struct gfs2_glock *gl;
        unsigned int state;
        int flags;
index 52c2aea..d1f51fd 100644 (file)
@@ -1257,7 +1257,7 @@ fail:
 
 int gfs2_fitrim(struct file *filp, void __user *argp)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
        struct buffer_head *bh;
index 597a612..aa5c480 100644 (file)
@@ -103,7 +103,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
        int n = simple_strtol(buf, NULL, 0);
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        switch (n) {
        case 0:
@@ -133,7 +133,7 @@ static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
 {
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (simple_strtol(buf, NULL, 0) != 1)
                return -EINVAL;
@@ -148,7 +148,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
                                 size_t len)
 {
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (simple_strtol(buf, NULL, 0) != 1)
                return -EINVAL;
@@ -161,7 +161,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
                                size_t len)
 {
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (simple_strtol(buf, NULL, 0) != 1)
                return -EINVAL;
@@ -178,7 +178,7 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
        u32 id;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        id = simple_strtoul(buf, NULL, 0);
 
@@ -198,7 +198,7 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
        u32 id;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        id = simple_strtoul(buf, NULL, 0);
 
@@ -221,7 +221,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
        int rv;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
                    mode);
@@ -532,7 +532,7 @@ static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
        unsigned int x, y;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
                return -EINVAL;
@@ -551,7 +551,7 @@ static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
        unsigned int x;
 
        if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
+               return -EPERM;
 
        x = simple_strtoul(buf, NULL, 0);
 
index 422dde2..5f7f1ab 100644 (file)
@@ -51,7 +51,7 @@ done:
  */
 static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        int len, err;
        char strbuf[HFS_MAX_NAMELEN];
index d47f116..3031dfd 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+       struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
        ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
index 3cc0df7..09d278b 100644 (file)
@@ -5,5 +5,5 @@
 obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
 
 hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
-               bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o
-
+               bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
+               attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
new file mode 100644 (file)
index 0000000..8d691f1
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+ * linux/fs/hfsplus/attributes.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handling of records in attributes tree
+ */
+
+#include "hfsplus_fs.h"
+#include "hfsplus_raw.h"
+
+static struct kmem_cache *hfsplus_attr_tree_cachep;
+
+int hfsplus_create_attr_tree_cache(void)
+{
+       if (hfsplus_attr_tree_cachep)
+               return -EEXIST;
+
+       hfsplus_attr_tree_cachep =
+               kmem_cache_create("hfsplus_attr_cache",
+                       sizeof(hfsplus_attr_entry), 0,
+                       SLAB_HWCACHE_ALIGN, NULL);
+       if (!hfsplus_attr_tree_cachep)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void hfsplus_destroy_attr_tree_cache(void)
+{
+       kmem_cache_destroy(hfsplus_attr_tree_cachep);
+}
+
+int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
+                               const hfsplus_btree_key *k2)
+{
+       __be32 k1_cnid, k2_cnid;
+
+       k1_cnid = k1->attr.cnid;
+       k2_cnid = k2->attr.cnid;
+       if (k1_cnid != k2_cnid)
+               return be32_to_cpu(k1_cnid) < be32_to_cpu(k2_cnid) ? -1 : 1;
+
+       return hfsplus_strcmp(
+                       (const struct hfsplus_unistr *)&k1->attr.key_name,
+                       (const struct hfsplus_unistr *)&k2->attr.key_name);
+}
+
+int hfsplus_attr_build_key(struct super_block *sb, hfsplus_btree_key *key,
+                       u32 cnid, const char *name)
+{
+       int len;
+
+       memset(key, 0, sizeof(struct hfsplus_attr_key));
+       key->attr.cnid = cpu_to_be32(cnid);
+       if (name) {
+               len = strlen(name);
+               if (len > HFSPLUS_ATTR_MAX_STRLEN) {
+                       printk(KERN_ERR "hfs: invalid xattr name's length\n");
+                       return -EINVAL;
+               }
+               hfsplus_asc2uni(sb,
+                               (struct hfsplus_unistr *)&key->attr.key_name,
+                               HFSPLUS_ATTR_MAX_STRLEN, name, len);
+               len = be16_to_cpu(key->attr.key_name.length);
+       } else {
+               key->attr.key_name.length = 0;
+               len = 0;
+       }
+
+       /* The length of the key, as stored in key_len field, does not include
+        * the size of the key_len field itself.
+        * So, offsetof(hfsplus_attr_key, key_name) is a trick because
+        * it takes into consideration key_len field (__be16) of
+        * hfsplus_attr_key structure instead of length field (__be16) of
+        * hfsplus_attr_unistr structure.
+        */
+       key->key_len =
+               cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
+                               2 * len);
+
+       return 0;
+}
+
+void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
+                                       u32 cnid,
+                                       struct hfsplus_attr_unistr *name)
+{
+       int ustrlen;
+
+       memset(key, 0, sizeof(struct hfsplus_attr_key));
+       ustrlen = be16_to_cpu(name->length);
+       key->attr.cnid = cpu_to_be32(cnid);
+       key->attr.key_name.length = cpu_to_be16(ustrlen);
+       ustrlen *= 2;
+       memcpy(key->attr.key_name.unicode, name->unicode, ustrlen);
+
+       /* The length of the key, as stored in key_len field, does not include
+        * the size of the key_len field itself.
+        * So, offsetof(hfsplus_attr_key, key_name) is a trick because
+        * it takes into consideration key_len field (__be16) of
+        * hfsplus_attr_key structure instead of length field (__be16) of
+        * hfsplus_attr_unistr structure.
+        */
+       key->key_len =
+               cpu_to_be16(offsetof(struct hfsplus_attr_key, key_name) +
+                               ustrlen);
+}
+
+hfsplus_attr_entry *hfsplus_alloc_attr_entry(void)
+{
+       return kmem_cache_alloc(hfsplus_attr_tree_cachep, GFP_KERNEL);
+}
+
+void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry)
+{
+       if (entry)
+               kmem_cache_free(hfsplus_attr_tree_cachep, entry);
+}
+
+#define HFSPLUS_INVALID_ATTR_RECORD -1
+
+static int hfsplus_attr_build_record(hfsplus_attr_entry *entry, int record_type,
+                               u32 cnid, const void *value, size_t size)
+{
+       if (record_type == HFSPLUS_ATTR_FORK_DATA) {
+               /*
+                * Mac OS X supports only inline data attributes.
+                * Do nothing
+                */
+               memset(entry, 0, sizeof(*entry));
+               return sizeof(struct hfsplus_attr_fork_data);
+       } else if (record_type == HFSPLUS_ATTR_EXTENTS) {
+               /*
+                * Mac OS X supports only inline data attributes.
+                * Do nothing.
+                */
+               memset(entry, 0, sizeof(*entry));
+               return sizeof(struct hfsplus_attr_extents);
+       } else if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
+               u16 len;
+
+               memset(entry, 0, sizeof(struct hfsplus_attr_inline_data));
+               entry->inline_data.record_type = cpu_to_be32(record_type);
+               if (size <= HFSPLUS_MAX_INLINE_DATA_SIZE)
+                       len = size;
+               else
+                       return HFSPLUS_INVALID_ATTR_RECORD;
+               entry->inline_data.length = cpu_to_be16(len);
+               memcpy(entry->inline_data.raw_bytes, value, len);
+               /*
+                * Align len on two-byte boundary.
+                * It needs to add pad byte if we have odd len.
+                */
+               len = round_up(len, 2);
+               return offsetof(struct hfsplus_attr_inline_data, raw_bytes) +
+                                       len;
+       } else /* invalid input */
+               memset(entry, 0, sizeof(*entry));
+
+       return HFSPLUS_INVALID_ATTR_RECORD;
+}
+
+int hfsplus_find_attr(struct super_block *sb, u32 cnid,
+                       const char *name, struct hfs_find_data *fd)
+{
+       int err = 0;
+
+       dprint(DBG_ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid);
+
+       if (!HFSPLUS_SB(sb)->attr_tree) {
+               printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+               return -EINVAL;
+       }
+
+       if (name) {
+               err = hfsplus_attr_build_key(sb, fd->search_key, cnid, name);
+               if (err)
+                       goto failed_find_attr;
+               err = hfs_brec_find(fd, hfs_find_rec_by_key);
+               if (err)
+                       goto failed_find_attr;
+       } else {
+               err = hfsplus_attr_build_key(sb, fd->search_key, cnid, NULL);
+               if (err)
+                       goto failed_find_attr;
+               err = hfs_brec_find(fd, hfs_find_1st_rec_by_cnid);
+               if (err)
+                       goto failed_find_attr;
+       }
+
+failed_find_attr:
+       return err;
+}
+
+int hfsplus_attr_exists(struct inode *inode, const char *name)
+{
+       int err = 0;
+       struct super_block *sb = inode->i_sb;
+       struct hfs_find_data fd;
+
+       if (!HFSPLUS_SB(sb)->attr_tree)
+               return 0;
+
+       err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+       if (err)
+               return 0;
+
+       err = hfsplus_find_attr(sb, inode->i_ino, name, &fd);
+       if (err)
+               goto attr_not_found;
+
+       hfs_find_exit(&fd);
+       return 1;
+
+attr_not_found:
+       hfs_find_exit(&fd);
+       return 0;
+}
+
+int hfsplus_create_attr(struct inode *inode,
+                               const char *name,
+                               const void *value, size_t size)
+{
+       struct super_block *sb = inode->i_sb;
+       struct hfs_find_data fd;
+       hfsplus_attr_entry *entry_ptr;
+       int entry_size;
+       int err;
+
+       dprint(DBG_ATTR_MOD, "create_attr: %s,%ld\n",
+               name ? name : NULL, inode->i_ino);
+
+       if (!HFSPLUS_SB(sb)->attr_tree) {
+               printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+               return -EINVAL;
+       }
+
+       entry_ptr = hfsplus_alloc_attr_entry();
+       if (!entry_ptr)
+               return -ENOMEM;
+
+       err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+       if (err)
+               goto failed_init_create_attr;
+
+       if (name) {
+               err = hfsplus_attr_build_key(sb, fd.search_key,
+                                               inode->i_ino, name);
+               if (err)
+                       goto failed_create_attr;
+       } else {
+               err = -EINVAL;
+               goto failed_create_attr;
+       }
+
+       /* Mac OS X supports only inline data attributes. */
+       entry_size = hfsplus_attr_build_record(entry_ptr,
+                                       HFSPLUS_ATTR_INLINE_DATA,
+                                       inode->i_ino,
+                                       value, size);
+       if (entry_size == HFSPLUS_INVALID_ATTR_RECORD) {
+               err = -EINVAL;
+               goto failed_create_attr;
+       }
+
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
+       if (err != -ENOENT) {
+               if (!err)
+                       err = -EEXIST;
+               goto failed_create_attr;
+       }
+
+       err = hfs_brec_insert(&fd, entry_ptr, entry_size);
+       if (err)
+               goto failed_create_attr;
+
+       hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
+
+failed_create_attr:
+       hfs_find_exit(&fd);
+
+failed_init_create_attr:
+       hfsplus_destroy_attr_entry(entry_ptr);
+       return err;
+}
+
+static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
+                                       struct hfs_find_data *fd)
+{
+       int err = 0;
+       __be32 found_cnid, record_type;
+
+       hfs_bnode_read(fd->bnode, &found_cnid,
+                       fd->keyoffset +
+                       offsetof(struct hfsplus_attr_key, cnid),
+                       sizeof(__be32));
+       if (cnid != be32_to_cpu(found_cnid))
+               return -ENOENT;
+
+       hfs_bnode_read(fd->bnode, &record_type,
+                       fd->entryoffset, sizeof(record_type));
+
+       switch (be32_to_cpu(record_type)) {
+       case HFSPLUS_ATTR_INLINE_DATA:
+               /* All is OK. Do nothing. */
+               break;
+       case HFSPLUS_ATTR_FORK_DATA:
+       case HFSPLUS_ATTR_EXTENTS:
+               printk(KERN_ERR "hfs: only inline data xattr are supported\n");
+               return -EOPNOTSUPP;
+       default:
+               printk(KERN_ERR "hfs: invalid extended attribute record\n");
+               return -ENOENT;
+       }
+
+       err = hfs_brec_remove(fd);
+       if (err)
+               return err;
+
+       hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ATTR_DIRTY);
+       return err;
+}
+
+int hfsplus_delete_attr(struct inode *inode, const char *name)
+{
+       int err = 0;
+       struct super_block *sb = inode->i_sb;
+       struct hfs_find_data fd;
+
+       dprint(DBG_ATTR_MOD, "delete_attr: %s,%ld\n",
+               name ? name : NULL, inode->i_ino);
+
+       if (!HFSPLUS_SB(sb)->attr_tree) {
+               printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+               return -EINVAL;
+       }
+
+       err = hfs_find_init(HFSPLUS_SB(sb)->attr_tree, &fd);
+       if (err)
+               return err;
+
+       if (name) {
+               err = hfsplus_attr_build_key(sb, fd.search_key,
+                                               inode->i_ino, name);
+               if (err)
+                       goto out;
+       } else {
+               printk(KERN_ERR "hfs: invalid extended attribute name\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
+       if (err)
+               goto out;
+
+       err = __hfsplus_delete_attr(inode, inode->i_ino, &fd);
+       if (err)
+               goto out;
+
+out:
+       hfs_find_exit(&fd);
+       return err;
+}
+
+int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid)
+{
+       int err = 0;
+       struct hfs_find_data fd;
+
+       dprint(DBG_ATTR_MOD, "delete_all_attrs: %d\n", cnid);
+
+       if (!HFSPLUS_SB(dir->i_sb)->attr_tree) {
+               printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+               return -EINVAL;
+       }
+
+       err = hfs_find_init(HFSPLUS_SB(dir->i_sb)->attr_tree, &fd);
+       if (err)
+               return err;
+
+       for (;;) {
+               err = hfsplus_find_attr(dir->i_sb, cnid, NULL, &fd);
+               if (err) {
+                       if (err != -ENOENT)
+                               printk(KERN_ERR "hfs: xattr search failed.\n");
+                       goto end_delete_all;
+               }
+
+               err = __hfsplus_delete_attr(dir, cnid, &fd);
+               if (err)
+                       goto end_delete_all;
+       }
+
+end_delete_all:
+       hfs_find_exit(&fd);
+       return err;
+}
index 5d799c1..d73c98d 100644 (file)
@@ -24,7 +24,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
        fd->key = ptr + tree->max_key_len + 2;
        dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n",
                tree->cnid, __builtin_return_address(0));
-       mutex_lock(&tree->tree_lock);
+       switch (tree->cnid) {
+       case HFSPLUS_CAT_CNID:
+               mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+               break;
+       case HFSPLUS_EXT_CNID:
+               mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+               break;
+       case HFSPLUS_ATTR_CNID:
+               mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+               break;
+       default:
+               BUG();
+       }
        return 0;
 }
 
@@ -38,15 +50,73 @@ void hfs_find_exit(struct hfs_find_data *fd)
        fd->tree = NULL;
 }
 
-/* Find the record in bnode that best matches key (not greater than...)*/
-int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
+int hfs_find_1st_rec_by_cnid(struct hfs_bnode *bnode,
+                               struct hfs_find_data *fd,
+                               int *begin,
+                               int *end,
+                               int *cur_rec)
+{
+       __be32 cur_cnid, search_cnid;
+
+       if (bnode->tree->cnid == HFSPLUS_EXT_CNID) {
+               cur_cnid = fd->key->ext.cnid;
+               search_cnid = fd->search_key->ext.cnid;
+       } else if (bnode->tree->cnid == HFSPLUS_CAT_CNID) {
+               cur_cnid = fd->key->cat.parent;
+               search_cnid = fd->search_key->cat.parent;
+       } else if (bnode->tree->cnid == HFSPLUS_ATTR_CNID) {
+               cur_cnid = fd->key->attr.cnid;
+               search_cnid = fd->search_key->attr.cnid;
+       } else
+               BUG();
+
+       if (cur_cnid == search_cnid) {
+               (*end) = (*cur_rec);
+               if ((*begin) == (*end))
+                       return 1;
+       } else {
+               if (be32_to_cpu(cur_cnid) < be32_to_cpu(search_cnid))
+                       (*begin) = (*cur_rec) + 1;
+               else
+                       (*end) = (*cur_rec) - 1;
+       }
+
+       return 0;
+}
+
+int hfs_find_rec_by_key(struct hfs_bnode *bnode,
+                               struct hfs_find_data *fd,
+                               int *begin,
+                               int *end,
+                               int *cur_rec)
 {
        int cmpval;
+
+       cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
+       if (!cmpval) {
+               (*end) = (*cur_rec);
+               return 1;
+       }
+       if (cmpval < 0)
+               (*begin) = (*cur_rec) + 1;
+       else
+               *(end) = (*cur_rec) - 1;
+
+       return 0;
+}
+
+/* Find the record in bnode that best matches key (not greater than...)*/
+int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd,
+                                       search_strategy_t rec_found)
+{
        u16 off, len, keylen;
        int rec;
        int b, e;
        int res;
 
+       if (!rec_found)
+               BUG();
+
        b = 0;
        e = bnode->num_recs - 1;
        res = -ENOENT;
@@ -59,17 +129,12 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
                        goto fail;
                }
                hfs_bnode_read(bnode, fd->key, off, keylen);
-               cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
-               if (!cmpval) {
-                       e = rec;
+               if (rec_found(bnode, fd, &b, &e, &rec)) {
                        res = 0;
                        goto done;
                }
-               if (cmpval < 0)
-                       b = rec + 1;
-               else
-                       e = rec - 1;
        } while (b <= e);
+
        if (rec != e && e >= 0) {
                len = hfs_brec_lenoff(bnode, e, &off);
                keylen = hfs_brec_keylen(bnode, e);
@@ -79,19 +144,21 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
                }
                hfs_bnode_read(bnode, fd->key, off, keylen);
        }
+
 done:
        fd->record = e;
        fd->keyoffset = off;
        fd->keylength = keylen;
        fd->entryoffset = off + keylen;
        fd->entrylength = len - keylen;
+
 fail:
        return res;
 }
 
 /* Traverse a B*Tree from the root to a leaf finding best fit to key */
 /* Return allocated copy of node found, set recnum to best record */
-int hfs_brec_find(struct hfs_find_data *fd)
+int hfs_brec_find(struct hfs_find_data *fd, search_strategy_t do_key_compare)
 {
        struct hfs_btree *tree;
        struct hfs_bnode *bnode;
@@ -122,7 +189,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
                        goto invalid;
                bnode->parent = parent;
 
-               res = __hfs_brec_find(bnode, fd);
+               res = __hfs_brec_find(bnode, fd, do_key_compare);
                if (!height)
                        break;
                if (fd->record < 0)
@@ -149,7 +216,7 @@ int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len)
 {
        int res;
 
-       res = hfs_brec_find(fd);
+       res = hfs_brec_find(fd, hfs_find_rec_by_key);
        if (res)
                return res;
        if (fd->entrylength > rec_len)
index 1c42cc5..f31ac6f 100644 (file)
@@ -62,7 +62,8 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
 
        tree = node->tree;
        if (node->type == HFS_NODE_LEAF ||
-           tree->attributes & HFS_TREE_VARIDXKEYS)
+           tree->attributes & HFS_TREE_VARIDXKEYS ||
+           node->tree->cnid == HFSPLUS_ATTR_CNID)
                key_len = hfs_bnode_read_u16(node, off) + 2;
        else
                key_len = tree->max_key_len + 2;
@@ -314,7 +315,8 @@ void hfs_bnode_dump(struct hfs_bnode *node)
                if (i && node->type == HFS_NODE_INDEX) {
                        int tmp;
 
-                       if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
+                       if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
+                                       node->tree->cnid == HFSPLUS_ATTR_CNID)
                                tmp = hfs_bnode_read_u16(node, key_off) + 2;
                        else
                                tmp = node->tree->max_key_len + 2;
@@ -646,6 +648,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
                if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
                        hfs_bnode_unhash(node);
                        spin_unlock(&tree->hash_lock);
+                       hfs_bnode_clear(node, 0,
+                               PAGE_CACHE_SIZE * tree->pages_per_bnode);
                        hfs_bmap_free(node);
                        hfs_bnode_free(node);
                        return;
index 2a734cf..298d4e4 100644 (file)
@@ -36,7 +36,8 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
                return 0;
 
        if ((node->type == HFS_NODE_INDEX) &&
-          !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
+          !(node->tree->attributes & HFS_TREE_VARIDXKEYS) &&
+          (node->tree->cnid != HFSPLUS_ATTR_CNID)) {
                retval = node->tree->max_key_len + 2;
        } else {
                recoff = hfs_bnode_read_u16(node,
@@ -151,12 +152,13 @@ skip:
 
                /* get index key */
                hfs_bnode_read_key(new_node, fd->search_key, 14);
-               __hfs_brec_find(fd->bnode, fd);
+               __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
 
                hfs_bnode_put(new_node);
                new_node = NULL;
 
-               if (tree->attributes & HFS_TREE_VARIDXKEYS)
+               if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
+                               (tree->cnid == HFSPLUS_ATTR_CNID))
                        key_len = be16_to_cpu(fd->search_key->key_len) + 2;
                else {
                        fd->search_key->key_len =
@@ -201,7 +203,7 @@ again:
                hfs_bnode_put(node);
                node = fd->bnode = parent;
 
-               __hfs_brec_find(node, fd);
+               __hfs_brec_find(node, fd, hfs_find_rec_by_key);
                goto again;
        }
        hfs_bnode_write_u16(node,
@@ -367,12 +369,13 @@ again:
        parent = hfs_bnode_find(tree, node->parent);
        if (IS_ERR(parent))
                return PTR_ERR(parent);
-       __hfs_brec_find(parent, fd);
+       __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
        hfs_bnode_dump(parent);
        rec = fd->record;
 
        /* size difference between old and new key */
-       if (tree->attributes & HFS_TREE_VARIDXKEYS)
+       if ((tree->attributes & HFS_TREE_VARIDXKEYS) ||
+                               (tree->cnid == HFSPLUS_ATTR_CNID))
                newkeylen = hfs_bnode_read_u16(node, 14) + 2;
        else
                fd->keylength = newkeylen = tree->max_key_len + 2;
@@ -427,7 +430,7 @@ skip:
                hfs_bnode_read_key(new_node, fd->search_key, 14);
                cnid = cpu_to_be32(new_node->this);
 
-               __hfs_brec_find(fd->bnode, fd);
+               __hfs_brec_find(fd->bnode, fd, hfs_find_rec_by_key);
                hfs_brec_insert(fd, &cnid, sizeof(cnid));
                hfs_bnode_put(fd->bnode);
                hfs_bnode_put(new_node);
@@ -495,13 +498,15 @@ static int hfs_btree_inc_height(struct hfs_btree *tree)
                /* insert old root idx into new root */
                node->parent = tree->root;
                if (node->type == HFS_NODE_LEAF ||
-                   tree->attributes & HFS_TREE_VARIDXKEYS)
+                               tree->attributes & HFS_TREE_VARIDXKEYS ||
+                               tree->cnid == HFSPLUS_ATTR_CNID)
                        key_size = hfs_bnode_read_u16(node, 14) + 2;
                else
                        key_size = tree->max_key_len + 2;
                hfs_bnode_copy(new_node, 14, node, 14, key_size);
 
-               if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
+               if (!(tree->attributes & HFS_TREE_VARIDXKEYS) &&
+                               (tree->cnid != HFSPLUS_ATTR_CNID)) {
                        key_size = tree->max_key_len + 2;
                        hfs_bnode_write_u16(new_node, 14, tree->max_key_len);
                }
index 685d07d..efb689c 100644 (file)
@@ -98,6 +98,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
                        set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
                }
                break;
+       case HFSPLUS_ATTR_CNID:
+               if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
+                       printk(KERN_ERR "hfs: invalid attributes max_key_len %d\n",
+                               tree->max_key_len);
+                       goto fail_page;
+               }
+               tree->keycmp = hfsplus_attr_bin_cmp_key;
+               break;
        default:
                printk(KERN_ERR "hfs: unknown B*Tree requested\n");
                goto fail_page;
index 798d9c4..840d71e 100644 (file)
@@ -45,7 +45,8 @@ void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
 
        key->cat.parent = cpu_to_be32(parent);
        if (str) {
-               hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len);
+               hfsplus_asc2uni(sb, &key->cat.name, HFSPLUS_MAX_STRLEN,
+                                       str->name, str->len);
                len = be16_to_cpu(key->cat.name.length);
        } else {
                key->cat.name.length = 0;
@@ -167,7 +168,8 @@ static int hfsplus_fill_cat_thread(struct super_block *sb,
        entry->type = cpu_to_be16(type);
        entry->thread.reserved = 0;
        entry->thread.parentID = cpu_to_be32(parentid);
-       hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len);
+       hfsplus_asc2uni(sb, &entry->thread.nodeName, HFSPLUS_MAX_STRLEN,
+                               str->name, str->len);
        return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2;
 }
 
@@ -198,7 +200,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
        hfsplus_cat_build_key_uni(fd->search_key,
                be32_to_cpu(tmp.thread.parentID),
                &tmp.thread.nodeName);
-       return hfs_brec_find(fd);
+       return hfs_brec_find(fd, hfs_find_rec_by_key);
 }
 
 int hfsplus_create_cat(u32 cnid, struct inode *dir,
@@ -221,7 +223,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
                S_ISDIR(inode->i_mode) ?
                        HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
                dir->i_ino, str);
-       err = hfs_brec_find(&fd);
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
        if (err != -ENOENT) {
                if (!err)
                        err = -EEXIST;
@@ -233,7 +235,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
 
        hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
        entry_size = hfsplus_cat_build_record(&entry, cnid, inode);
-       err = hfs_brec_find(&fd);
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
        if (err != -ENOENT) {
                /* panic? */
                if (!err)
@@ -253,7 +255,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
 
 err1:
        hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
-       if (!hfs_brec_find(&fd))
+       if (!hfs_brec_find(&fd, hfs_find_rec_by_key))
                hfs_brec_remove(&fd);
 err2:
        hfs_find_exit(&fd);
@@ -279,7 +281,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
                int len;
 
                hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
-               err = hfs_brec_find(&fd);
+               err = hfs_brec_find(&fd, hfs_find_rec_by_key);
                if (err)
                        goto out;
 
@@ -296,7 +298,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
        } else
                hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
 
-       err = hfs_brec_find(&fd);
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
        if (err)
                goto out;
 
@@ -326,7 +328,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
                goto out;
 
        hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
-       err = hfs_brec_find(&fd);
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
        if (err)
                goto out;
 
@@ -337,6 +339,12 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
        dir->i_size--;
        dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
        hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
+
+       if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
+               if (HFSPLUS_SB(sb)->attr_tree)
+                       hfsplus_delete_all_attrs(dir, cnid);
+       }
+
 out:
        hfs_find_exit(&fd);
 
@@ -363,7 +371,7 @@ int hfsplus_rename_cat(u32 cnid,
 
        /* find the old dir entry and read the data */
        hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
-       err = hfs_brec_find(&src_fd);
+       err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
        if (err)
                goto out;
        if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
@@ -376,7 +384,7 @@ int hfsplus_rename_cat(u32 cnid,
 
        /* create new dir entry with the data from the old entry */
        hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
-       err = hfs_brec_find(&dst_fd);
+       err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
        if (err != -ENOENT) {
                if (!err)
                        err = -EEXIST;
@@ -391,7 +399,7 @@ int hfsplus_rename_cat(u32 cnid,
 
        /* finally remove the old entry */
        hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
-       err = hfs_brec_find(&src_fd);
+       err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
        if (err)
                goto out;
        err = hfs_brec_remove(&src_fd);
@@ -402,7 +410,7 @@ int hfsplus_rename_cat(u32 cnid,
 
        /* remove old thread entry */
        hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
-       err = hfs_brec_find(&src_fd);
+       err = hfs_brec_find(&src_fd, hfs_find_rec_by_key);
        if (err)
                goto out;
        type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset);
@@ -414,7 +422,7 @@ int hfsplus_rename_cat(u32 cnid,
        hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
        entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
                dst_dir->i_ino, dst_name);
-       err = hfs_brec_find(&dst_fd);
+       err = hfs_brec_find(&dst_fd, hfs_find_rec_by_key);
        if (err != -ENOENT) {
                if (!err)
                        err = -EEXIST;
index 6b9f921..031c24e 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
+#include "xattr.h"
 
 static inline void hfsplus_instantiate(struct dentry *dentry,
                                       struct inode *inode, u32 cnid)
@@ -122,7 +123,7 @@ fail:
 
 static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        int len, err;
        char strbuf[HFSPLUS_MAX_STRLEN + 1];
@@ -138,7 +139,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
        if (err)
                return err;
        hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
-       err = hfs_brec_find(&fd);
+       err = hfs_brec_find(&fd, hfs_find_rec_by_key);
        if (err)
                goto out;
 
@@ -421,6 +422,15 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
        if (res)
                goto out_err;
 
+       res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+       if (res == -EOPNOTSUPP)
+               res = 0; /* Operation is not supported. */
+       else if (res) {
+               /* Try to delete anyway without error analysis. */
+               hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
+               goto out_err;
+       }
+
        hfsplus_instantiate(dentry, inode, inode->i_ino);
        mark_inode_dirty(inode);
        goto out;
@@ -450,15 +460,26 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
                init_special_inode(inode, mode, rdev);
 
        res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode);
-       if (res) {
-               clear_nlink(inode);
-               hfsplus_delete_inode(inode);
-               iput(inode);
-               goto out;
+       if (res)
+               goto failed_mknod;
+
+       res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+       if (res == -EOPNOTSUPP)
+               res = 0; /* Operation is not supported. */
+       else if (res) {
+               /* Try to delete anyway without error analysis. */
+               hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name);
+               goto failed_mknod;
        }
 
        hfsplus_instantiate(dentry, inode, inode->i_ino);
        mark_inode_dirty(inode);
+       goto out;
+
+failed_mknod:
+       clear_nlink(inode);
+       hfsplus_delete_inode(inode);
+       iput(inode);
 out:
        mutex_unlock(&sbi->vh_mutex);
        return res;
@@ -499,15 +520,19 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 }
 
 const struct inode_operations hfsplus_dir_inode_operations = {
-       .lookup         = hfsplus_lookup,
-       .create         = hfsplus_create,
-       .link           = hfsplus_link,
-       .unlink         = hfsplus_unlink,
-       .mkdir          = hfsplus_mkdir,
-       .rmdir          = hfsplus_rmdir,
-       .symlink        = hfsplus_symlink,
-       .mknod          = hfsplus_mknod,
-       .rename         = hfsplus_rename,
+       .lookup                 = hfsplus_lookup,
+       .create                 = hfsplus_create,
+       .link                   = hfsplus_link,
+       .unlink                 = hfsplus_unlink,
+       .mkdir                  = hfsplus_mkdir,
+       .rmdir                  = hfsplus_rmdir,
+       .symlink                = hfsplus_symlink,
+       .mknod                  = hfsplus_mknod,
+       .rename                 = hfsplus_rename,
+       .setxattr               = generic_setxattr,
+       .getxattr               = generic_getxattr,
+       .listxattr              = hfsplus_listxattr,
+       .removexattr            = hfsplus_removexattr,
 };
 
 const struct file_operations hfsplus_dir_operations = {
index eba76ea..a94f0f7 100644 (file)
@@ -95,7 +95,7 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
                              HFSPLUS_IS_RSRC(inode) ?
                                HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
 
-       res = hfs_brec_find(fd);
+       res = hfs_brec_find(fd, hfs_find_rec_by_key);
        if (hip->extent_state & HFSPLUS_EXT_NEW) {
                if (res != -ENOENT)
                        return;
@@ -154,7 +154,7 @@ static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
 
        hfsplus_ext_build_key(fd->search_key, cnid, block, type);
        fd->key->ext.cnid = 0;
-       res = hfs_brec_find(fd);
+       res = hfs_brec_find(fd, hfs_find_rec_by_key);
        if (res && res != -ENOENT)
                return res;
        if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
index a6da86b..05b11f3 100644 (file)
@@ -23,6 +23,7 @@
 #define DBG_SUPER      0x00000010
 #define DBG_EXTENT     0x00000020
 #define DBG_BITMAP     0x00000040
+#define DBG_ATTR_MOD   0x00000080
 
 #if 0
 #define DBG_MASK       (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
@@ -46,6 +47,13 @@ typedef int (*btree_keycmp)(const hfsplus_btree_key *,
 
 #define NODE_HASH_SIZE 256
 
+/* B-tree mutex nested subclasses */
+enum hfsplus_btree_mutex_classes {
+       CATALOG_BTREE_MUTEX,
+       EXTENTS_BTREE_MUTEX,
+       ATTR_BTREE_MUTEX,
+};
+
 /* An HFS+ BTree held in memory */
 struct hfs_btree {
        struct super_block *sb;
@@ -223,6 +231,7 @@ struct hfsplus_inode_info {
 #define HFSPLUS_I_CAT_DIRTY    1       /* has changes in the catalog tree */
 #define HFSPLUS_I_EXT_DIRTY    2       /* has changes in the extent tree */
 #define HFSPLUS_I_ALLOC_DIRTY  3       /* has changes in the allocation file */
+#define HFSPLUS_I_ATTR_DIRTY   4       /* has changes in the attributes tree */
 
 #define HFSPLUS_IS_RSRC(inode) \
        test_bit(HFSPLUS_I_RSRC, &HFSPLUS_I(inode)->flags)
@@ -302,7 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
 #define hfs_brec_remove hfsplus_brec_remove
 #define hfs_find_init hfsplus_find_init
 #define hfs_find_exit hfsplus_find_exit
-#define __hfs_brec_find __hplusfs_brec_find
+#define __hfs_brec_find __hfsplus_brec_find
 #define hfs_brec_find hfsplus_brec_find
 #define hfs_brec_read hfsplus_brec_read
 #define hfs_brec_goto hfsplus_brec_goto
@@ -324,10 +333,33 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
  */
 #define HFSPLUS_IOC_BLESS _IO('h', 0x80)
 
+typedef int (*search_strategy_t)(struct hfs_bnode *,
+                               struct hfs_find_data *,
+                               int *, int *, int *);
+
 /*
  * Functions in any *.c used in other files
  */
 
+/* attributes.c */
+int hfsplus_create_attr_tree_cache(void);
+void hfsplus_destroy_attr_tree_cache(void);
+hfsplus_attr_entry *hfsplus_alloc_attr_entry(void);
+void hfsplus_destroy_attr_entry(hfsplus_attr_entry *entry_p);
+int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *,
+               const hfsplus_btree_key *);
+int hfsplus_attr_build_key(struct super_block *, hfsplus_btree_key *,
+                       u32, const char *);
+void hfsplus_attr_build_key_uni(hfsplus_btree_key *key,
+                                       u32 cnid,
+                                       struct hfsplus_attr_unistr *name);
+int hfsplus_find_attr(struct super_block *, u32,
+                       const char *, struct hfs_find_data *);
+int hfsplus_attr_exists(struct inode *inode, const char *name);
+int hfsplus_create_attr(struct inode *, const char *, const void *, size_t);
+int hfsplus_delete_attr(struct inode *, const char *);
+int hfsplus_delete_all_attrs(struct inode *dir, u32 cnid);
+
 /* bitmap.c */
 int hfsplus_block_allocate(struct super_block *, u32, u32, u32 *);
 int hfsplus_block_free(struct super_block *, u32, u32);
@@ -369,8 +401,15 @@ int hfs_brec_remove(struct hfs_find_data *);
 /* bfind.c */
 int hfs_find_init(struct hfs_btree *, struct hfs_find_data *);
 void hfs_find_exit(struct hfs_find_data *);
-int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *);
-int hfs_brec_find(struct hfs_find_data *);
+int hfs_find_1st_rec_by_cnid(struct hfs_bnode *,
+                               struct hfs_find_data *,
+                               int *, int *, int *);
+int hfs_find_rec_by_key(struct hfs_bnode *,
+                               struct hfs_find_data *,
+                               int *, int *, int *);
+int __hfs_brec_find(struct hfs_bnode *, struct hfs_find_data *,
+                               search_strategy_t);
+int hfs_brec_find(struct hfs_find_data *, search_strategy_t);
 int hfs_brec_read(struct hfs_find_data *, void *, int);
 int hfs_brec_goto(struct hfs_find_data *, int);
 
@@ -417,11 +456,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
 
 /* ioctl.c */
 long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
-                    const void *value, size_t size, int flags);
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
-                        void *value, size_t size);
-ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
 
 /* options.c */
 int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
@@ -446,7 +480,7 @@ int hfsplus_strcmp(const struct hfsplus_unistr *,
 int hfsplus_uni2asc(struct super_block *,
                const struct hfsplus_unistr *, char *, int *);
 int hfsplus_asc2uni(struct super_block *,
-               struct hfsplus_unistr *, const char *, int);
+               struct hfsplus_unistr *, int, const char *, int);
 int hfsplus_hash_dentry(const struct dentry *dentry,
                const struct inode *inode, struct qstr *str);
 int hfsplus_compare_dentry(const struct dentry *parent,
index 921967e..452ede0 100644 (file)
 typedef __be32 hfsplus_cnid;
 typedef __be16 hfsplus_unichr;
 
+#define HFSPLUS_MAX_STRLEN 255
+#define HFSPLUS_ATTR_MAX_STRLEN 127
+
 /* A "string" as used in filenames, etc. */
 struct hfsplus_unistr {
        __be16 length;
-       hfsplus_unichr unicode[255];
+       hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
 } __packed;
 
-#define HFSPLUS_MAX_STRLEN 255
+/*
+ * A "string" is used in attributes file
+ * for name of extended attribute
+ */
+struct hfsplus_attr_unistr {
+       __be16 length;
+       hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
+} __packed;
 
 /* POSIX permissions */
 struct hfsplus_perm {
@@ -291,6 +301,8 @@ struct hfsplus_cat_file {
 /* File attribute bits */
 #define HFSPLUS_FILE_LOCKED            0x0001
 #define HFSPLUS_FILE_THREAD_EXISTS     0x0002
+#define HFSPLUS_XATTR_EXISTS           0x0004
+#define HFSPLUS_ACL_EXISTS             0x0008
 
 /* HFS+ catalog thread (part of a cat_entry) */
 struct hfsplus_cat_thread {
@@ -327,11 +339,63 @@ struct hfsplus_ext_key {
 
 #define HFSPLUS_EXT_KEYLEN     sizeof(struct hfsplus_ext_key)
 
+#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
+#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
+
+#define HFSPLUS_ATTR_INLINE_DATA 0x10
+#define HFSPLUS_ATTR_FORK_DATA   0x20
+#define HFSPLUS_ATTR_EXTENTS     0x30
+
+/* HFS+ attributes tree key */
+struct hfsplus_attr_key {
+       __be16 key_len;
+       __be16 pad;
+       hfsplus_cnid cnid;
+       __be32 start_block;
+       struct hfsplus_attr_unistr key_name;
+} __packed;
+
+#define HFSPLUS_ATTR_KEYLEN    sizeof(struct hfsplus_attr_key)
+
+/* HFS+ fork data attribute */
+struct hfsplus_attr_fork_data {
+       __be32 record_type;
+       __be32 reserved;
+       struct hfsplus_fork_raw the_fork;
+} __packed;
+
+/* HFS+ extension attribute */
+struct hfsplus_attr_extents {
+       __be32 record_type;
+       __be32 reserved;
+       struct hfsplus_extent extents;
+} __packed;
+
+#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
+
+/* HFS+ attribute inline data */
+struct hfsplus_attr_inline_data {
+       __be32 record_type;
+       __be32 reserved1;
+       u8 reserved2[6];
+       __be16 length;
+       u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
+} __packed;
+
+/* A data record in the attributes tree */
+typedef union {
+       __be32 record_type;
+       struct hfsplus_attr_fork_data fork_data;
+       struct hfsplus_attr_extents extents;
+       struct hfsplus_attr_inline_data inline_data;
+} __packed hfsplus_attr_entry;
+
 /* HFS+ generic BTree key */
 typedef union {
        __be16 key_len;
        struct hfsplus_cat_key cat;
        struct hfsplus_ext_key ext;
+       struct hfsplus_attr_key attr;
 } __packed hfsplus_btree_key;
 
 #endif
index 799b336..160ccc9 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
+#include "xattr.h"
 
 static int hfsplus_readpage(struct file *file, struct page *page)
 {
@@ -124,7 +125,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+       struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
        ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
@@ -348,6 +349,18 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
                        error = error2;
        }
 
+       if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
+               if (sbi->attr_tree) {
+                       error2 =
+                               filemap_write_and_wait(
+                                           sbi->attr_tree->inode->i_mapping);
+                       if (!error)
+                               error = error2;
+               } else {
+                       printk(KERN_ERR "hfs: sync non-existent attributes tree\n");
+               }
+       }
+
        if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
                error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
                if (!error)
@@ -365,9 +378,10 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
 static const struct inode_operations hfsplus_file_inode_operations = {
        .lookup         = hfsplus_file_lookup,
        .setattr        = hfsplus_setattr,
-       .setxattr       = hfsplus_setxattr,
-       .getxattr       = hfsplus_getxattr,
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = hfsplus_listxattr,
+       .removexattr    = hfsplus_removexattr,
 };
 
 static const struct file_operations hfsplus_file_operations = {
index 09addc8..d3ff5cc 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
-#include <linux/xattr.h>
 #include <asm/uaccess.h>
 #include "hfsplus_fs.h"
 
@@ -59,7 +58,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
 
 static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        unsigned int flags = 0;
 
@@ -75,7 +74,7 @@ static int hfsplus_ioctl_getflags(struct file *file, int __user *user_flags)
 
 static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
        unsigned int flags;
        int err = 0;
@@ -151,110 +150,3 @@ long hfsplus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                return -ENOTTY;
        }
 }
-
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
-                    const void *value, size_t size, int flags)
-{
-       struct inode *inode = dentry->d_inode;
-       struct hfs_find_data fd;
-       hfsplus_cat_entry entry;
-       struct hfsplus_cat_file *file;
-       int res;
-
-       if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
-               return -EOPNOTSUPP;
-
-       res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
-       if (res)
-               return res;
-       res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
-       if (res)
-               goto out;
-       hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
-                       sizeof(struct hfsplus_cat_file));
-       file = &entry.file;
-
-       if (!strcmp(name, "hfs.type")) {
-               if (size == 4)
-                       memcpy(&file->user_info.fdType, value, 4);
-               else
-                       res = -ERANGE;
-       } else if (!strcmp(name, "hfs.creator")) {
-               if (size == 4)
-                       memcpy(&file->user_info.fdCreator, value, 4);
-               else
-                       res = -ERANGE;
-       } else
-               res = -EOPNOTSUPP;
-       if (!res) {
-               hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
-                               sizeof(struct hfsplus_cat_file));
-               hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
-       }
-out:
-       hfs_find_exit(&fd);
-       return res;
-}
-
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
-                        void *value, size_t size)
-{
-       struct inode *inode = dentry->d_inode;
-       struct hfs_find_data fd;
-       hfsplus_cat_entry entry;
-       struct hfsplus_cat_file *file;
-       ssize_t res = 0;
-
-       if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
-               return -EOPNOTSUPP;
-
-       if (size) {
-               res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
-               if (res)
-                       return res;
-               res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
-               if (res)
-                       goto out;
-               hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
-                               sizeof(struct hfsplus_cat_file));
-       }
-       file = &entry.file;
-
-       if (!strcmp(name, "hfs.type")) {
-               if (size >= 4) {
-                       memcpy(value, &file->user_info.fdType, 4);
-                       res = 4;
-               } else
-                       res = size ? -ERANGE : 4;
-       } else if (!strcmp(name, "hfs.creator")) {
-               if (size >= 4) {
-                       memcpy(value, &file->user_info.fdCreator, 4);
-                       res = 4;
-               } else
-                       res = size ? -ERANGE : 4;
-       } else
-               res = -EOPNOTSUPP;
-out:
-       if (size)
-               hfs_find_exit(&fd);
-       return res;
-}
-
-#define HFSPLUS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
-
-ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
-       struct inode *inode = dentry->d_inode;
-
-       if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
-               return -EOPNOTSUPP;
-
-       if (!buffer || !size)
-               return HFSPLUS_ATTRLIST_SIZE;
-       if (size < HFSPLUS_ATTRLIST_SIZE)
-               return -ERANGE;
-       strcpy(buffer, "hfs.type");
-       strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
-
-       return HFSPLUS_ATTRLIST_SIZE;
-}
index 796198d..974c26f 100644 (file)
@@ -20,6 +20,7 @@ static struct inode *hfsplus_alloc_inode(struct super_block *sb);
 static void hfsplus_destroy_inode(struct inode *inode);
 
 #include "hfsplus_fs.h"
+#include "xattr.h"
 
 static int hfsplus_system_read_inode(struct inode *inode)
 {
@@ -118,6 +119,7 @@ static int hfsplus_system_write_inode(struct inode *inode)
        case HFSPLUS_ATTR_CNID:
                fork = &vhdr->attr_file;
                tree = sbi->attr_tree;
+               break;
        default:
                return -EIO;
        }
@@ -191,6 +193,12 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
        error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
        if (!error)
                error = error2;
+       if (sbi->attr_tree) {
+               error2 =
+                   filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
+               if (!error)
+                       error = error2;
+       }
        error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
        if (!error)
                error = error2;
@@ -281,6 +289,7 @@ static void hfsplus_put_super(struct super_block *sb)
                hfsplus_sync_fs(sb, 1);
        }
 
+       hfs_btree_close(sbi->attr_tree);
        hfs_btree_close(sbi->cat_tree);
        hfs_btree_close(sbi->ext_tree);
        iput(sbi->alloc_file);
@@ -477,12 +486,20 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                printk(KERN_ERR "hfs: failed to load catalog file\n");
                goto out_close_ext_tree;
        }
+       if (vhdr->attr_file.total_blocks != 0) {
+               sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
+               if (!sbi->attr_tree) {
+                       printk(KERN_ERR "hfs: failed to load attributes file\n");
+                       goto out_close_cat_tree;
+               }
+       }
+       sb->s_xattr = hfsplus_xattr_handlers;
 
        inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
        if (IS_ERR(inode)) {
                printk(KERN_ERR "hfs: failed to load allocation file\n");
                err = PTR_ERR(inode);
-               goto out_close_cat_tree;
+               goto out_close_attr_tree;
        }
        sbi->alloc_file = inode;
 
@@ -542,10 +559,27 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                        }
                        err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
                                                 &str, sbi->hidden_dir);
-                       mutex_unlock(&sbi->vh_mutex);
-                       if (err)
+                       if (err) {
+                               mutex_unlock(&sbi->vh_mutex);
+                               goto out_put_hidden_dir;
+                       }
+
+                       err = hfsplus_init_inode_security(sbi->hidden_dir,
+                                                               root, &str);
+                       if (err == -EOPNOTSUPP)
+                               err = 0; /* Operation is not supported. */
+                       else if (err) {
+                               /*
+                                * Try to delete anyway without
+                                * error analysis.
+                                */
+                               hfsplus_delete_cat(sbi->hidden_dir->i_ino,
+                                                       root, &str);
+                               mutex_unlock(&sbi->vh_mutex);
                                goto out_put_hidden_dir;
+                       }
 
+                       mutex_unlock(&sbi->vh_mutex);
                        hfsplus_mark_inode_dirty(sbi->hidden_dir,
                                                 HFSPLUS_I_CAT_DIRTY);
                }
@@ -562,6 +596,8 @@ out_put_root:
        sb->s_root = NULL;
 out_put_alloc_file:
        iput(sbi->alloc_file);
+out_close_attr_tree:
+       hfs_btree_close(sbi->attr_tree);
 out_close_cat_tree:
        hfs_btree_close(sbi->cat_tree);
 out_close_ext_tree:
@@ -635,9 +671,20 @@ static int __init init_hfsplus_fs(void)
                hfsplus_init_once);
        if (!hfsplus_inode_cachep)
                return -ENOMEM;
+       err = hfsplus_create_attr_tree_cache();
+       if (err)
+               goto destroy_inode_cache;
        err = register_filesystem(&hfsplus_fs_type);
        if (err)
-               kmem_cache_destroy(hfsplus_inode_cachep);
+               goto destroy_attr_tree_cache;
+       return 0;
+
+destroy_attr_tree_cache:
+       hfsplus_destroy_attr_tree_cache();
+
+destroy_inode_cache:
+       kmem_cache_destroy(hfsplus_inode_cachep);
+
        return err;
 }
 
@@ -650,6 +697,7 @@ static void __exit exit_hfsplus_fs(void)
         * destroy cache.
         */
        rcu_barrier();
+       hfsplus_destroy_attr_tree_cache();
        kmem_cache_destroy(hfsplus_inode_cachep);
 }
 
index a32998f..2c2e47d 100644 (file)
@@ -295,7 +295,8 @@ static inline u16 *decompose_unichar(wchar_t uc, int *size)
        return hfsplus_decompose_table + (off / 4);
 }
 
-int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
+int hfsplus_asc2uni(struct super_block *sb,
+                   struct hfsplus_unistr *ustr, int max_unistr_len,
                    const char *astr, int len)
 {
        int size, dsize, decompose;
@@ -303,7 +304,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
        wchar_t c;
 
        decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
-       while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
+       while (outlen < max_unistr_len && len > 0) {
                size = asc2unichar(sb, astr, len, &c);
 
                if (decompose)
@@ -311,7 +312,7 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
                else
                        dstr = NULL;
                if (dstr) {
-                       if (outlen + dsize > HFSPLUS_MAX_STRLEN)
+                       if (outlen + dsize > max_unistr_len)
                                break;
                        do {
                                ustr->unicode[outlen++] = cpu_to_be16(*dstr++);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
new file mode 100644 (file)
index 0000000..e8a4b08
--- /dev/null
@@ -0,0 +1,709 @@
+/*
+ * linux/fs/hfsplus/xattr.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Logic of processing extended attributes
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+const struct xattr_handler *hfsplus_xattr_handlers[] = {
+       &hfsplus_xattr_osx_handler,
+       &hfsplus_xattr_user_handler,
+       &hfsplus_xattr_trusted_handler,
+       &hfsplus_xattr_security_handler,
+       NULL
+};
+
+static int strcmp_xattr_finder_info(const char *name)
+{
+       if (name) {
+               return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
+                               sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
+       }
+       return -1;
+}
+
+static int strcmp_xattr_acl(const char *name)
+{
+       if (name) {
+               return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
+                               sizeof(HFSPLUS_XATTR_ACL_NAME));
+       }
+       return -1;
+}
+
+static inline int is_known_namespace(const char *name)
+{
+       if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
+           strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+           strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+           strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+               return false;
+
+       return true;
+}
+
+static int can_set_xattr(struct inode *inode, const char *name,
+                               const void *value, size_t value_len)
+{
+       if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+               return -EOPNOTSUPP; /* TODO: implement ACL support */
+
+       if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) {
+               /*
+                * This makes sure that we aren't trying to set an
+                * attribute in a different namespace by prefixing it
+                * with "osx."
+                */
+               if (is_known_namespace(name + XATTR_MAC_OSX_PREFIX_LEN))
+                       return -EOPNOTSUPP;
+
+               return 0;
+       }
+
+       /*
+        * Don't allow setting an attribute in an unknown namespace.
+        */
+       if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
+           strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+           strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+int __hfsplus_setxattr(struct inode *inode, const char *name,
+                       const void *value, size_t size, int flags)
+{
+       int err = 0;
+       struct hfs_find_data cat_fd;
+       hfsplus_cat_entry entry;
+       u16 cat_entry_flags, cat_entry_type;
+       u16 folder_finderinfo_len = sizeof(struct DInfo) +
+                                       sizeof(struct DXInfo);
+       u16 file_finderinfo_len = sizeof(struct FInfo) +
+                                       sizeof(struct FXInfo);
+
+       if ((!S_ISREG(inode->i_mode) &&
+                       !S_ISDIR(inode->i_mode)) ||
+                               HFSPLUS_IS_RSRC(inode))
+               return -EOPNOTSUPP;
+
+       err = can_set_xattr(inode, name, value, size);
+       if (err)
+               return err;
+
+       if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+                               XATTR_MAC_OSX_PREFIX_LEN) == 0)
+               name += XATTR_MAC_OSX_PREFIX_LEN;
+
+       if (value == NULL) {
+               value = "";
+               size = 0;
+       }
+
+       err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
+       if (err) {
+               printk(KERN_ERR "hfs: can't init xattr find struct\n");
+               return err;
+       }
+
+       err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
+       if (err) {
+               printk(KERN_ERR "hfs: catalog searching failed\n");
+               goto end_setxattr;
+       }
+
+       if (!strcmp_xattr_finder_info(name)) {
+               if (flags & XATTR_CREATE) {
+                       printk(KERN_ERR "hfs: xattr exists yet\n");
+                       err = -EOPNOTSUPP;
+                       goto end_setxattr;
+               }
+               hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
+                                       sizeof(hfsplus_cat_entry));
+               if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
+                       if (size == folder_finderinfo_len) {
+                               memcpy(&entry.folder.user_info, value,
+                                               folder_finderinfo_len);
+                               hfs_bnode_write(cat_fd.bnode, &entry,
+                                       cat_fd.entryoffset,
+                                       sizeof(struct hfsplus_cat_folder));
+                               hfsplus_mark_inode_dirty(inode,
+                                               HFSPLUS_I_CAT_DIRTY);
+                       } else {
+                               err = -ERANGE;
+                               goto end_setxattr;
+                       }
+               } else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
+                       if (size == file_finderinfo_len) {
+                               memcpy(&entry.file.user_info, value,
+                                               file_finderinfo_len);
+                               hfs_bnode_write(cat_fd.bnode, &entry,
+                                       cat_fd.entryoffset,
+                                       sizeof(struct hfsplus_cat_file));
+                               hfsplus_mark_inode_dirty(inode,
+                                               HFSPLUS_I_CAT_DIRTY);
+                       } else {
+                               err = -ERANGE;
+                               goto end_setxattr;
+                       }
+               } else {
+                       err = -EOPNOTSUPP;
+                       goto end_setxattr;
+               }
+               goto end_setxattr;
+       }
+
+       if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
+               err = -EOPNOTSUPP;
+               goto end_setxattr;
+       }
+
+       if (hfsplus_attr_exists(inode, name)) {
+               if (flags & XATTR_CREATE) {
+                       printk(KERN_ERR "hfs: xattr exists yet\n");
+                       err = -EOPNOTSUPP;
+                       goto end_setxattr;
+               }
+               err = hfsplus_delete_attr(inode, name);
+               if (err)
+                       goto end_setxattr;
+               err = hfsplus_create_attr(inode, name, value, size);
+               if (err)
+                       goto end_setxattr;
+       } else {
+               if (flags & XATTR_REPLACE) {
+                       printk(KERN_ERR "hfs: cannot replace xattr\n");
+                       err = -EOPNOTSUPP;
+                       goto end_setxattr;
+               }
+               err = hfsplus_create_attr(inode, name, value, size);
+               if (err)
+                       goto end_setxattr;
+       }
+
+       cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
+       if (cat_entry_type == HFSPLUS_FOLDER) {
+               cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
+                                   cat_fd.entryoffset +
+                                   offsetof(struct hfsplus_cat_folder, flags));
+               cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
+               if (!strcmp_xattr_acl(name))
+                       cat_entry_flags |= HFSPLUS_ACL_EXISTS;
+               hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+                               offsetof(struct hfsplus_cat_folder, flags),
+                               cat_entry_flags);
+               hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+       } else if (cat_entry_type == HFSPLUS_FILE) {
+               cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
+                                   cat_fd.entryoffset +
+                                   offsetof(struct hfsplus_cat_file, flags));
+               cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
+               if (!strcmp_xattr_acl(name))
+                       cat_entry_flags |= HFSPLUS_ACL_EXISTS;
+               hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+                                   offsetof(struct hfsplus_cat_file, flags),
+                                   cat_entry_flags);
+               hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+       } else {
+               printk(KERN_ERR "hfs: invalid catalog entry type\n");
+               err = -EIO;
+               goto end_setxattr;
+       }
+
+end_setxattr:
+       hfs_find_exit(&cat_fd);
+       return err;
+}
+
+static inline int is_osx_xattr(const char *xattr_name)
+{
+       return !is_known_namespace(xattr_name);
+}
+
+static int name_len(const char *xattr_name, int xattr_name_len)
+{
+       int len = xattr_name_len + 1;
+
+       if (is_osx_xattr(xattr_name))
+               len += XATTR_MAC_OSX_PREFIX_LEN;
+
+       return len;
+}
+
+static int copy_name(char *buffer, const char *xattr_name, int name_len)
+{
+       int len = name_len;
+       int offset = 0;
+
+       if (is_osx_xattr(xattr_name)) {
+               strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
+               offset += XATTR_MAC_OSX_PREFIX_LEN;
+               len += XATTR_MAC_OSX_PREFIX_LEN;
+       }
+
+       strncpy(buffer + offset, xattr_name, name_len);
+       memset(buffer + offset + name_len, 0, 1);
+       len += 1;
+
+       return len;
+}
+
+static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry,
+                                               void *value, size_t size)
+{
+       ssize_t res = 0;
+       struct inode *inode = dentry->d_inode;
+       struct hfs_find_data fd;
+       u16 entry_type;
+       u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+       u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+       u16 record_len = max(folder_rec_len, file_rec_len);
+       u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
+       u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+
+       if (size >= record_len) {
+               res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+               if (res) {
+                       printk(KERN_ERR "hfs: can't init xattr find struct\n");
+                       return res;
+               }
+               res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+               if (res)
+                       goto end_getxattr_finder_info;
+               entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
+
+               if (entry_type == HFSPLUS_FOLDER) {
+                       hfs_bnode_read(fd.bnode, folder_finder_info,
+                               fd.entryoffset +
+                               offsetof(struct hfsplus_cat_folder, user_info),
+                               folder_rec_len);
+                       memcpy(value, folder_finder_info, folder_rec_len);
+                       res = folder_rec_len;
+               } else if (entry_type == HFSPLUS_FILE) {
+                       hfs_bnode_read(fd.bnode, file_finder_info,
+                               fd.entryoffset +
+                               offsetof(struct hfsplus_cat_file, user_info),
+                               file_rec_len);
+                       memcpy(value, file_finder_info, file_rec_len);
+                       res = file_rec_len;
+               } else {
+                       res = -EOPNOTSUPP;
+                       goto end_getxattr_finder_info;
+               }
+       } else
+               res = size ? -ERANGE : record_len;
+
+end_getxattr_finder_info:
+       if (size >= record_len)
+               hfs_find_exit(&fd);
+       return res;
+}
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+                        void *value, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       struct hfs_find_data fd;
+       hfsplus_attr_entry *entry;
+       __be32 xattr_record_type;
+       u32 record_type;
+       u16 record_length = 0;
+       ssize_t res = 0;
+
+       if ((!S_ISREG(inode->i_mode) &&
+                       !S_ISDIR(inode->i_mode)) ||
+                               HFSPLUS_IS_RSRC(inode))
+               return -EOPNOTSUPP;
+
+       if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+                               XATTR_MAC_OSX_PREFIX_LEN) == 0) {
+               /* skip "osx." prefix */
+               name += XATTR_MAC_OSX_PREFIX_LEN;
+               /*
+                * Don't allow retrieving properly prefixed attributes
+                * by prepending them with "osx."
+                */
+               if (is_known_namespace(name))
+                       return -EOPNOTSUPP;
+       }
+
+       if (!strcmp_xattr_finder_info(name))
+               return hfsplus_getxattr_finder_info(dentry, value, size);
+
+       if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+               return -EOPNOTSUPP;
+
+       entry = hfsplus_alloc_attr_entry();
+       if (!entry) {
+               printk(KERN_ERR "hfs: can't allocate xattr entry\n");
+               return -ENOMEM;
+       }
+
+       res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
+       if (res) {
+               printk(KERN_ERR "hfs: can't init xattr find struct\n");
+               goto failed_getxattr_init;
+       }
+
+       res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
+       if (res) {
+               if (res == -ENOENT)
+                       res = -ENODATA;
+               else
+                       printk(KERN_ERR "hfs: xattr searching failed\n");
+               goto out;
+       }
+
+       hfs_bnode_read(fd.bnode, &xattr_record_type,
+                       fd.entryoffset, sizeof(xattr_record_type));
+       record_type = be32_to_cpu(xattr_record_type);
+       if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
+               record_length = hfs_bnode_read_u16(fd.bnode,
+                               fd.entryoffset +
+                               offsetof(struct hfsplus_attr_inline_data,
+                               length));
+               if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
+                       printk(KERN_ERR "hfs: invalid xattr record size\n");
+                       res = -EIO;
+                       goto out;
+               }
+       } else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
+                       record_type == HFSPLUS_ATTR_EXTENTS) {
+               printk(KERN_ERR "hfs: only inline data xattr are supported\n");
+               res = -EOPNOTSUPP;
+               goto out;
+       } else {
+               printk(KERN_ERR "hfs: invalid xattr record\n");
+               res = -EIO;
+               goto out;
+       }
+
+       if (size) {
+               hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
+                               offsetof(struct hfsplus_attr_inline_data,
+                                       raw_bytes) + record_length);
+       }
+
+       if (size >= record_length) {
+               memcpy(value, entry->inline_data.raw_bytes, record_length);
+               res = record_length;
+       } else
+               res = size ? -ERANGE : record_length;
+
+out:
+       hfs_find_exit(&fd);
+
+failed_getxattr_init:
+       hfsplus_destroy_attr_entry(entry);
+       return res;
+}
+
+static inline int can_list(const char *xattr_name)
+{
+       if (!xattr_name)
+               return 0;
+
+       return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
+                       XATTR_TRUSTED_PREFIX_LEN) ||
+                               capable(CAP_SYS_ADMIN);
+}
+
+static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
+                                               char *buffer, size_t size)
+{
+       ssize_t res = 0;
+       struct inode *inode = dentry->d_inode;
+       struct hfs_find_data fd;
+       u16 entry_type;
+       u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
+       u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
+       unsigned long len, found_bit;
+       int xattr_name_len, symbols_count;
+
+       res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+       if (res) {
+               printk(KERN_ERR "hfs: can't init xattr find struct\n");
+               return res;
+       }
+
+       res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+       if (res)
+               goto end_listxattr_finder_info;
+
+       entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
+       if (entry_type == HFSPLUS_FOLDER) {
+               len = sizeof(struct DInfo) + sizeof(struct DXInfo);
+               hfs_bnode_read(fd.bnode, folder_finder_info,
+                               fd.entryoffset +
+                               offsetof(struct hfsplus_cat_folder, user_info),
+                               len);
+               found_bit = find_first_bit((void *)folder_finder_info, len*8);
+       } else if (entry_type == HFSPLUS_FILE) {
+               len = sizeof(struct FInfo) + sizeof(struct FXInfo);
+               hfs_bnode_read(fd.bnode, file_finder_info,
+                               fd.entryoffset +
+                               offsetof(struct hfsplus_cat_file, user_info),
+                               len);
+               found_bit = find_first_bit((void *)file_finder_info, len*8);
+       } else {
+               res = -EOPNOTSUPP;
+               goto end_listxattr_finder_info;
+       }
+
+       if (found_bit >= (len*8))
+               res = 0;
+       else {
+               symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
+               xattr_name_len =
+                       name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
+               if (!buffer || !size) {
+                       if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
+                               res = xattr_name_len;
+               } else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
+                       if (size < xattr_name_len)
+                               res = -ERANGE;
+                       else {
+                               res = copy_name(buffer,
+                                               HFSPLUS_XATTR_FINDER_INFO_NAME,
+                                               symbols_count);
+                       }
+               }
+       }
+
+end_listxattr_finder_info:
+       hfs_find_exit(&fd);
+
+       return res;
+}
+
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+       ssize_t err;
+       ssize_t res = 0;
+       struct inode *inode = dentry->d_inode;
+       struct hfs_find_data fd;
+       u16 key_len = 0;
+       struct hfsplus_attr_key attr_key;
+       char strbuf[HFSPLUS_ATTR_MAX_STRLEN +
+                       XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+       int xattr_name_len;
+
+       if ((!S_ISREG(inode->i_mode) &&
+                       !S_ISDIR(inode->i_mode)) ||
+                               HFSPLUS_IS_RSRC(inode))
+               return -EOPNOTSUPP;
+
+       res = hfsplus_listxattr_finder_info(dentry, buffer, size);
+       if (res < 0)
+               return res;
+       else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+               return (res == 0) ? -EOPNOTSUPP : res;
+
+       err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
+       if (err) {
+               printk(KERN_ERR "hfs: can't init xattr find struct\n");
+               return err;
+       }
+
+       err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
+       if (err) {
+               if (err == -ENOENT) {
+                       if (res == 0)
+                               res = -ENODATA;
+                       goto end_listxattr;
+               } else {
+                       res = err;
+                       goto end_listxattr;
+               }
+       }
+
+       for (;;) {
+               key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
+               if (key_len == 0 || key_len > fd.tree->max_key_len) {
+                       printk(KERN_ERR "hfs: invalid xattr key length: %d\n",
+                                                       key_len);
+                       res = -EIO;
+                       goto end_listxattr;
+               }
+
+               hfs_bnode_read(fd.bnode, &attr_key,
+                               fd.keyoffset, key_len + sizeof(key_len));
+
+               if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
+                       goto end_listxattr;
+
+               xattr_name_len = HFSPLUS_ATTR_MAX_STRLEN;
+               if (hfsplus_uni2asc(inode->i_sb,
+                       (const struct hfsplus_unistr *)&fd.key->attr.key_name,
+                                       strbuf, &xattr_name_len)) {
+                       printk(KERN_ERR "hfs: unicode conversion failed\n");
+                       res = -EIO;
+                       goto end_listxattr;
+               }
+
+               if (!buffer || !size) {
+                       if (can_list(strbuf))
+                               res += name_len(strbuf, xattr_name_len);
+               } else if (can_list(strbuf)) {
+                       if (size < (res + name_len(strbuf, xattr_name_len))) {
+                               res = -ERANGE;
+                               goto end_listxattr;
+                       } else
+                               res += copy_name(buffer + res,
+                                               strbuf, xattr_name_len);
+               }
+
+               if (hfs_brec_goto(&fd, 1))
+                       goto end_listxattr;
+       }
+
+end_listxattr:
+       hfs_find_exit(&fd);
+       return res;
+}
+
+int hfsplus_removexattr(struct dentry *dentry, const char *name)
+{
+       int err = 0;
+       struct inode *inode = dentry->d_inode;
+       struct hfs_find_data cat_fd;
+       u16 flags;
+       u16 cat_entry_type;
+       int is_xattr_acl_deleted = 0;
+       int is_all_xattrs_deleted = 0;
+
+       if ((!S_ISREG(inode->i_mode) &&
+                       !S_ISDIR(inode->i_mode)) ||
+                               HFSPLUS_IS_RSRC(inode))
+               return -EOPNOTSUPP;
+
+       if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
+               return -EOPNOTSUPP;
+
+       err = can_set_xattr(inode, name, NULL, 0);
+       if (err)
+               return err;
+
+       if (strncmp(name, XATTR_MAC_OSX_PREFIX,
+                               XATTR_MAC_OSX_PREFIX_LEN) == 0)
+               name += XATTR_MAC_OSX_PREFIX_LEN;
+
+       if (!strcmp_xattr_finder_info(name))
+               return -EOPNOTSUPP;
+
+       err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
+       if (err) {
+               printk(KERN_ERR "hfs: can't init xattr find struct\n");
+               return err;
+       }
+
+       err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
+       if (err) {
+               printk(KERN_ERR "hfs: catalog searching failed\n");
+               goto end_removexattr;
+       }
+
+       err = hfsplus_delete_attr(inode, name);
+       if (err)
+               goto end_removexattr;
+
+       is_xattr_acl_deleted = !strcmp_xattr_acl(name);
+       is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
+
+       if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
+               goto end_removexattr;
+
+       cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
+
+       if (cat_entry_type == HFSPLUS_FOLDER) {
+               flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
+                               offsetof(struct hfsplus_cat_folder, flags));
+               if (is_xattr_acl_deleted)
+                       flags &= ~HFSPLUS_ACL_EXISTS;
+               if (is_all_xattrs_deleted)
+                       flags &= ~HFSPLUS_XATTR_EXISTS;
+               hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+                               offsetof(struct hfsplus_cat_folder, flags),
+                               flags);
+               hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+       } else if (cat_entry_type == HFSPLUS_FILE) {
+               flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
+                               offsetof(struct hfsplus_cat_file, flags));
+               if (is_xattr_acl_deleted)
+                       flags &= ~HFSPLUS_ACL_EXISTS;
+               if (is_all_xattrs_deleted)
+                       flags &= ~HFSPLUS_XATTR_EXISTS;
+               hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
+                               offsetof(struct hfsplus_cat_file, flags),
+                               flags);
+               hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+       } else {
+               printk(KERN_ERR "hfs: invalid catalog entry type\n");
+               err = -EIO;
+               goto end_removexattr;
+       }
+
+end_removexattr:
+       hfs_find_exit(&cat_fd);
+       return err;
+}
+
+static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
+                                       void *buffer, size_t size, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
+                               XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+       strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+       return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
+               const void *buffer, size_t size, int flags, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN +
+                               XATTR_MAC_OSX_PREFIX_LEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
+       strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
+
+       return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
+               size_t list_size, const char *name, size_t name_len, int type)
+{
+       /*
+        * This method is not used.
+        * It is used hfsplus_listxattr() instead of generic_listxattr().
+        */
+       return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_osx_handler = {
+       .prefix = XATTR_MAC_OSX_PREFIX,
+       .list   = hfsplus_osx_listxattr,
+       .get    = hfsplus_osx_getxattr,
+       .set    = hfsplus_osx_setxattr,
+};
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
new file mode 100644 (file)
index 0000000..847b695
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * linux/fs/hfsplus/xattr.h
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Logic of processing extended attributes
+ */
+
+#ifndef _LINUX_HFSPLUS_XATTR_H
+#define _LINUX_HFSPLUS_XATTR_H
+
+#include <linux/xattr.h>
+
+extern const struct xattr_handler hfsplus_xattr_osx_handler;
+extern const struct xattr_handler hfsplus_xattr_user_handler;
+extern const struct xattr_handler hfsplus_xattr_trusted_handler;
+/*extern const struct xattr_handler hfsplus_xattr_acl_access_handler;*/
+/*extern const struct xattr_handler hfsplus_xattr_acl_default_handler;*/
+extern const struct xattr_handler hfsplus_xattr_security_handler;
+
+extern const struct xattr_handler *hfsplus_xattr_handlers[];
+
+int __hfsplus_setxattr(struct inode *inode, const char *name,
+                       const void *value, size_t size, int flags);
+
+static inline int hfsplus_setxattr(struct dentry *dentry, const char *name,
+                       const void *value, size_t size, int flags)
+{
+       return __hfsplus_setxattr(dentry->d_inode, name, value, size, flags);
+}
+
+ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+                       void *value, size_t size);
+
+ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+int hfsplus_removexattr(struct dentry *dentry, const char *name);
+
+int hfsplus_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr);
+
+static inline int hfsplus_init_acl(struct inode *inode, struct inode *dir)
+{
+       /*TODO: implement*/
+       return 0;
+}
+
+static inline int hfsplus_init_inode_security(struct inode *inode,
+                                               struct inode *dir,
+                                               const struct qstr *qstr)
+{
+       int err;
+
+       err = hfsplus_init_acl(inode, dir);
+       if (!err)
+               err = hfsplus_init_security(inode, dir, qstr);
+       return err;
+}
+
+#endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
new file mode 100644 (file)
index 0000000..83b842f
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * linux/fs/hfsplus/xattr_trusted.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for storing security labels as extended attributes.
+ */
+
+#include <linux/security.h>
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_security_getxattr(struct dentry *dentry, const char *name,
+                                       void *buffer, size_t size, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+       strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
+
+       return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_security_setxattr(struct dentry *dentry, const char *name,
+               const void *buffer, size_t size, int flags, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_SECURITY_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+       strcpy(xattr_name + XATTR_SECURITY_PREFIX_LEN, name);
+
+       return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_security_listxattr(struct dentry *dentry, char *list,
+               size_t list_size, const char *name, size_t name_len, int type)
+{
+       /*
+        * This method is not used.
+        * It is used hfsplus_listxattr() instead of generic_listxattr().
+        */
+       return -EOPNOTSUPP;
+}
+
+static int hfsplus_initxattrs(struct inode *inode,
+                               const struct xattr *xattr_array,
+                               void *fs_info)
+{
+       const struct xattr *xattr;
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t xattr_name_len;
+       int err = 0;
+
+       for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+               xattr_name_len = strlen(xattr->name);
+
+               if (xattr_name_len == 0)
+                       continue;
+
+               if (xattr_name_len + XATTR_SECURITY_PREFIX_LEN >
+                               HFSPLUS_ATTR_MAX_STRLEN)
+                       return -EOPNOTSUPP;
+
+               strcpy(xattr_name, XATTR_SECURITY_PREFIX);
+               strcpy(xattr_name +
+                       XATTR_SECURITY_PREFIX_LEN, xattr->name);
+               memset(xattr_name +
+                       XATTR_SECURITY_PREFIX_LEN + xattr_name_len, 0, 1);
+
+               err = __hfsplus_setxattr(inode, xattr_name,
+                                       xattr->value, xattr->value_len, 0);
+               if (err)
+                       break;
+       }
+       return err;
+}
+
+int hfsplus_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr)
+{
+       return security_inode_init_security(inode, dir, qstr,
+                                       &hfsplus_initxattrs, NULL);
+}
+
+const struct xattr_handler hfsplus_xattr_security_handler = {
+       .prefix = XATTR_SECURITY_PREFIX,
+       .list   = hfsplus_security_listxattr,
+       .get    = hfsplus_security_getxattr,
+       .set    = hfsplus_security_setxattr,
+};
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
new file mode 100644 (file)
index 0000000..426cee2
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * linux/fs/hfsplus/xattr_trusted.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for trusted extended attributes.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_trusted_getxattr(struct dentry *dentry, const char *name,
+                                       void *buffer, size_t size, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
+       strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
+
+       return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_trusted_setxattr(struct dentry *dentry, const char *name,
+               const void *buffer, size_t size, int flags, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_TRUSTED_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_TRUSTED_PREFIX);
+       strcpy(xattr_name + XATTR_TRUSTED_PREFIX_LEN, name);
+
+       return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_trusted_listxattr(struct dentry *dentry, char *list,
+               size_t list_size, const char *name, size_t name_len, int type)
+{
+       /*
+        * This method is not used.
+        * It is used hfsplus_listxattr() instead of generic_listxattr().
+        */
+       return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_trusted_handler = {
+       .prefix = XATTR_TRUSTED_PREFIX,
+       .list   = hfsplus_trusted_listxattr,
+       .get    = hfsplus_trusted_getxattr,
+       .set    = hfsplus_trusted_setxattr,
+};
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
new file mode 100644 (file)
index 0000000..e340165
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * linux/fs/hfsplus/xattr_user.c
+ *
+ * Vyacheslav Dubeyko <slava@dubeyko.com>
+ *
+ * Handler for user extended attributes.
+ */
+
+#include "hfsplus_fs.h"
+#include "xattr.h"
+
+static int hfsplus_user_getxattr(struct dentry *dentry, const char *name,
+                                       void *buffer, size_t size, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_USER_PREFIX);
+       strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
+
+       return hfsplus_getxattr(dentry, xattr_name, buffer, size);
+}
+
+static int hfsplus_user_setxattr(struct dentry *dentry, const char *name,
+               const void *buffer, size_t size, int flags, int type)
+{
+       char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + 1] = {0};
+       size_t len = strlen(name);
+
+       if (!strcmp(name, ""))
+               return -EINVAL;
+
+       if (len + XATTR_USER_PREFIX_LEN > HFSPLUS_ATTR_MAX_STRLEN)
+               return -EOPNOTSUPP;
+
+       strcpy(xattr_name, XATTR_USER_PREFIX);
+       strcpy(xattr_name + XATTR_USER_PREFIX_LEN, name);
+
+       return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
+}
+
+static size_t hfsplus_user_listxattr(struct dentry *dentry, char *list,
+               size_t list_size, const char *name, size_t name_len, int type)
+{
+       /*
+        * This method is not used.
+        * It is used hfsplus_listxattr() instead of generic_listxattr().
+        */
+       return -EOPNOTSUPP;
+}
+
+const struct xattr_handler hfsplus_xattr_user_handler = {
+       .prefix = XATTR_USER_PREFIX,
+       .list   = hfsplus_user_listxattr,
+       .get    = hfsplus_user_getxattr,
+       .set    = hfsplus_user_setxattr,
+};
index 457addc..fbabb90 100644 (file)
@@ -30,7 +30,7 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
        return list_entry(inode, struct hostfs_inode_info, vfs_inode);
 }
 
-#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
+#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
 
 static int hostfs_d_delete(const struct dentry *dentry)
 {
@@ -861,14 +861,6 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
 }
 
 static const struct inode_operations hostfs_iops = {
-       .create         = hostfs_create,
-       .link           = hostfs_link,
-       .unlink         = hostfs_unlink,
-       .symlink        = hostfs_symlink,
-       .mkdir          = hostfs_mkdir,
-       .rmdir          = hostfs_rmdir,
-       .mknod          = hostfs_mknod,
-       .rename         = hostfs_rename,
        .permission     = hostfs_permission,
        .setattr        = hostfs_setattr,
 };
index 78e12b2..546f6d3 100644 (file)
@@ -25,7 +25,7 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
        loff_t new_off = off + (whence == 1 ? filp->f_pos : 0);
        loff_t pos;
        struct quad_buffer_head qbh;
-       struct inode *i = filp->f_path.dentry->d_inode;
+       struct inode *i = file_inode(filp);
        struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
        struct super_block *s = i->i_sb;
 
@@ -57,7 +57,7 @@ fail:
 
 static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
        struct quad_buffer_head qbh;
        struct hpfs_dirent *de;
index fbfe2df..9f9dbec 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
        retval = do_sync_write(file, buf, count, ppos);
        if (retval > 0) {
                hpfs_lock(file->f_path.dentry->d_sb);
-               hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1;
+               hpfs_i(file_inode(file))->i_dirty = 1;
                hpfs_unlock(file->f_path.dentry->d_sb);
        }
        return retval;
index 43b315f..74f5570 100644 (file)
@@ -180,7 +180,7 @@ static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count,
        ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
        ssize_t n;
 
-       read = file->f_path.dentry->d_inode->i_fop->read;
+       read = file_inode(file)->i_fop->read;
 
        if (!is_user)
                set_fs(KERNEL_DS);
@@ -288,7 +288,7 @@ static ssize_t hppfs_write(struct file *file, const char __user *buf,
        struct file *proc_file = data->proc_file;
        ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
 
-       write = proc_file->f_path.dentry->d_inode->i_fop->write;
+       write = file_inode(proc_file)->i_fop->write;
        return (*write)(proc_file, buf, len, ppos);
 }
 
@@ -513,7 +513,7 @@ static loff_t hppfs_llseek(struct file *file, loff_t off, int where)
        loff_t (*llseek)(struct file *, loff_t, int);
        loff_t ret;
 
-       llseek = proc_file->f_path.dentry->d_inode->i_fop->llseek;
+       llseek = file_inode(proc_file)->i_fop->llseek;
        if (llseek != NULL) {
                ret = (*llseek)(proc_file, off, where);
                if (ret < 0)
@@ -561,7 +561,7 @@ static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
                                      });
        int err;
 
-       readdir = proc_file->f_path.dentry->d_inode->i_fop->readdir;
+       readdir = file_inode(proc_file)->i_fop->readdir;
 
        proc_file->f_pos = file->f_pos;
        err = (*readdir)(proc_file, &dirent, hppfs_filldir);
index 78bde32..7f94e0c 100644 (file)
@@ -97,7 +97,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
 
 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        loff_t len, vma_len;
        int ret;
        struct hstate *h = hstate_file(file);
@@ -918,16 +918,25 @@ static int get_hstate_idx(int page_size_log)
        return h - hstates;
 }
 
+static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+       return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
+                               dentry->d_name.name);
+}
+
+static struct dentry_operations anon_ops = {
+       .d_dname = hugetlb_dname
+};
+
 struct file *hugetlb_file_setup(const char *name, unsigned long addr,
                                size_t size, vm_flags_t acctflag,
                                struct user_struct **user,
                                int creat_flags, int page_size_log)
 {
-       int error = -ENOMEM;
-       struct file *file;
+       struct file *file = ERR_PTR(-ENOMEM);
        struct inode *inode;
        struct path path;
-       struct dentry *root;
+       struct super_block *sb;
        struct qstr quick_string;
        struct hstate *hstate;
        unsigned long num_pages;
@@ -955,17 +964,18 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
                }
        }
 
-       root = hugetlbfs_vfsmount[hstate_idx]->mnt_root;
+       sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
        quick_string.name = name;
        quick_string.len = strlen(quick_string.name);
        quick_string.hash = 0;
-       path.dentry = d_alloc(root, &quick_string);
+       path.dentry = d_alloc_pseudo(sb, &quick_string);
        if (!path.dentry)
                goto out_shm_unlock;
 
+       d_set_d_op(path.dentry, &anon_ops);
        path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
-       error = -ENOSPC;
-       inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
+       file = ERR_PTR(-ENOSPC);
+       inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
        if (!inode)
                goto out_dentry;
 
@@ -973,7 +983,7 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
        size += addr & ~huge_page_mask(hstate);
        num_pages = ALIGN(size, huge_page_size(hstate)) >>
                        huge_page_shift(hstate);
-       error = -ENOMEM;
+       file = ERR_PTR(-ENOMEM);
        if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
                goto out_inode;
 
@@ -981,10 +991,9 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
        inode->i_size = size;
        clear_nlink(inode);
 
-       error = -ENFILE;
        file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
                        &hugetlbfs_file_operations);
-       if (!file)
+       if (IS_ERR(file))
                goto out_dentry; /* inode is already attached */
 
        return file;
@@ -998,7 +1007,7 @@ out_shm_unlock:
                user_shm_unlock(size, *user);
                *user = NULL;
        }
-       return ERR_PTR(error);
+       return file;
 }
 
 static int __init init_hugetlbfs_fs(void)
index 14084b7..f5f7c06 100644 (file)
@@ -798,11 +798,10 @@ static struct inode *find_inode(struct super_block *sb,
                                int (*test)(struct inode *, void *),
                                void *data)
 {
-       struct hlist_node *node;
        struct inode *inode = NULL;
 
 repeat:
-       hlist_for_each_entry(inode, node, head, i_hash) {
+       hlist_for_each_entry(inode, head, i_hash) {
                spin_lock(&inode->i_lock);
                if (inode->i_sb != sb) {
                        spin_unlock(&inode->i_lock);
@@ -830,11 +829,10 @@ repeat:
 static struct inode *find_inode_fast(struct super_block *sb,
                                struct hlist_head *head, unsigned long ino)
 {
-       struct hlist_node *node;
        struct inode *inode = NULL;
 
 repeat:
-       hlist_for_each_entry(inode, node, head, i_hash) {
+       hlist_for_each_entry(inode, head, i_hash) {
                spin_lock(&inode->i_lock);
                if (inode->i_ino != ino) {
                        spin_unlock(&inode->i_lock);
@@ -1132,11 +1130,10 @@ EXPORT_SYMBOL(iget_locked);
 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
 {
        struct hlist_head *b = inode_hashtable + hash(sb, ino);
-       struct hlist_node *node;
        struct inode *inode;
 
        spin_lock(&inode_hash_lock);
-       hlist_for_each_entry(inode, node, b, i_hash) {
+       hlist_for_each_entry(inode, b, i_hash) {
                if (inode->i_ino == ino && inode->i_sb == sb) {
                        spin_unlock(&inode_hash_lock);
                        return 0;
@@ -1291,10 +1288,9 @@ int insert_inode_locked(struct inode *inode)
        struct hlist_head *head = inode_hashtable + hash(sb, ino);
 
        while (1) {
-               struct hlist_node *node;
                struct inode *old = NULL;
                spin_lock(&inode_hash_lock);
-               hlist_for_each_entry(old, node, head, i_hash) {
+               hlist_for_each_entry(old, head, i_hash) {
                        if (old->i_ino != ino)
                                continue;
                        if (old->i_sb != sb)
@@ -1306,7 +1302,7 @@ int insert_inode_locked(struct inode *inode)
                        }
                        break;
                }
-               if (likely(!node)) {
+               if (likely(!old)) {
                        spin_lock(&inode->i_lock);
                        inode->i_state |= I_NEW;
                        hlist_add_head(&inode->i_hash, head);
@@ -1334,11 +1330,10 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
        struct hlist_head *head = inode_hashtable + hash(sb, hashval);
 
        while (1) {
-               struct hlist_node *node;
                struct inode *old = NULL;
 
                spin_lock(&inode_hash_lock);
-               hlist_for_each_entry(old, node, head, i_hash) {
+               hlist_for_each_entry(old, head, i_hash) {
                        if (old->i_sb != sb)
                                continue;
                        if (!test(old, data))
@@ -1350,7 +1345,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
                        }
                        break;
                }
-               if (likely(!node)) {
+               if (likely(!old)) {
                        spin_lock(&inode->i_lock);
                        inode->i_state |= I_NEW;
                        hlist_add_head(&inode->i_hash, head);
@@ -1655,7 +1650,7 @@ EXPORT_SYMBOL(file_remove_suid);
 
 int file_update_time(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct timespec now;
        int sync_it = 0;
        int ret;
index 2f6af7f..507141f 100644 (file)
@@ -69,7 +69,7 @@ extern void __mnt_drop_write_file(struct file *);
 /*
  * fs_struct.c
  */
-extern void chroot_fs_refs(struct path *, struct path *);
+extern void chroot_fs_refs(const struct path *, const struct path *);
 
 /*
  * file_table.c
index 3bdad6d..fd507fb 100644 (file)
@@ -175,7 +175,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
        struct fiemap fiemap;
        struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
        struct fiemap_extent_info fieinfo = { 0, };
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        u64 len;
        int error;
@@ -424,7 +424,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
  */
 int ioctl_preallocate(struct file *filp, void __user *argp)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct space_resv sr;
 
        if (copy_from_user(&sr, argp, sizeof(sr)))
@@ -449,7 +449,7 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
 static int file_ioctl(struct file *filp, unsigned int cmd,
                unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int __user *p = (int __user *)arg;
 
        switch (cmd) {
@@ -512,7 +512,7 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp,
 
 static int ioctl_fsfreeze(struct file *filp)
 {
-       struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+       struct super_block *sb = file_inode(filp)->i_sb;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -527,7 +527,7 @@ static int ioctl_fsfreeze(struct file *filp)
 
 static int ioctl_fsthaw(struct file *filp)
 {
-       struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+       struct super_block *sb = file_inode(filp)->i_sb;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -548,7 +548,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 {
        int error = 0;
        int __user *argp = (int __user *)arg;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        switch (cmd) {
        case FIOCLEX:
index 0b3fa79..592e511 100644 (file)
@@ -296,7 +296,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
  */
 static int zisofs_readpage(struct file *file, struct page *page)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct address_space *mapping = inode->i_mapping;
        int err;
        int i, pcount, full_page;
index f20437c..a7d5c3c 100644 (file)
@@ -253,7 +253,7 @@ static int isofs_readdir(struct file *filp,
        int result;
        char *tmpname;
        struct iso_directory_record *tmpde;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        tmpname = (char *)__get_free_page(GFP_KERNEL);
        if (tmpname == NULL)
index 2b4f235..12088d8 100644 (file)
@@ -125,10 +125,10 @@ isofs_export_encode_fh(struct inode *inode,
         */
        if (parent && (len < 5)) {
                *max_len = 5;
-               return 255;
+               return FILEID_INVALID;
        } else if (len < 3) {
                *max_len = 3;
-               return 255;
+               return FILEID_INVALID;
        }
 
        len = 3;
index 3091d42..750c701 100644 (file)
@@ -435,7 +435,12 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
        trace_jbd2_commit_locking(journal, commit_transaction);
        stats.run.rs_wait = commit_transaction->t_max_wait;
+       stats.run.rs_request_delay = 0;
        stats.run.rs_locked = jiffies;
+       if (commit_transaction->t_requested)
+               stats.run.rs_request_delay =
+                       jbd2_time_diff(commit_transaction->t_requested,
+                                      stats.run.rs_locked);
        stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
                                              stats.run.rs_locked);
 
@@ -1116,7 +1121,10 @@ restart_loop:
         */
        spin_lock(&journal->j_history_lock);
        journal->j_stats.ts_tid++;
+       if (commit_transaction->t_requested)
+               journal->j_stats.ts_requested++;
        journal->j_stats.run.rs_wait += stats.run.rs_wait;
+       journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
        journal->j_stats.run.rs_running += stats.run.rs_running;
        journal->j_stats.run.rs_locked += stats.run.rs_locked;
        journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
index dbf41f9..ed10991 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/kthread.h>
 #include <linux/poison.h>
 #include <linux/proc_fs.h>
-#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/math64.h>
 #include <linux/hash.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_JBD2_DEBUG
+ushort jbd2_journal_enable_debug __read_mostly;
+EXPORT_SYMBOL(jbd2_journal_enable_debug);
+
+module_param_named(jbd2_debug, jbd2_journal_enable_debug, ushort, 0644);
+MODULE_PARM_DESC(jbd2_debug, "Debugging level for jbd2");
+#endif
+
 EXPORT_SYMBOL(jbd2_journal_extend);
 EXPORT_SYMBOL(jbd2_journal_stop);
 EXPORT_SYMBOL(jbd2_journal_lock_updates);
@@ -513,6 +520,10 @@ int __jbd2_log_space_left(journal_t *journal)
  */
 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
+       /* Return if the txn has already requested to be committed */
+       if (journal->j_commit_request == target)
+               return 0;
+
        /*
         * The only transaction we can possibly wait upon is the
         * currently running transaction (if it exists).  Otherwise,
@@ -529,6 +540,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
                jbd_debug(1, "JBD2: requesting commit %d/%d\n",
                          journal->j_commit_request,
                          journal->j_commit_sequence);
+               journal->j_running_transaction->t_requested = jiffies;
                wake_up(&journal->j_wait_commit);
                return 1;
        } else if (!tid_geq(journal->j_commit_request, target))
@@ -894,13 +906,18 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
 
        if (v != SEQ_START_TOKEN)
                return 0;
-       seq_printf(seq, "%lu transaction, each up to %u blocks\n",
-                       s->stats->ts_tid,
-                       s->journal->j_max_transaction_buffers);
+       seq_printf(seq, "%lu transactions (%lu requested), "
+                  "each up to %u blocks\n",
+                  s->stats->ts_tid, s->stats->ts_requested,
+                  s->journal->j_max_transaction_buffers);
        if (s->stats->ts_tid == 0)
                return 0;
        seq_printf(seq, "average: \n  %ums waiting for transaction\n",
            jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid));
+       seq_printf(seq, "  %ums request delay\n",
+           (s->stats->ts_requested == 0) ? 0 :
+           jiffies_to_msecs(s->stats->run.rs_request_delay /
+                            s->stats->ts_requested));
        seq_printf(seq, "  %ums running transaction\n",
            jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid));
        seq_printf(seq, "  %ums transaction was being locked\n",
@@ -2485,45 +2502,6 @@ restart:
        spin_unlock(&journal->j_list_lock);
 }
 
-/*
- * debugfs tunables
- */
-#ifdef CONFIG_JBD2_DEBUG
-u8 jbd2_journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(jbd2_journal_enable_debug);
-
-#define JBD2_DEBUG_NAME "jbd2-debug"
-
-static struct dentry *jbd2_debugfs_dir;
-static struct dentry *jbd2_debug;
-
-static void __init jbd2_create_debugfs_entry(void)
-{
-       jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
-       if (jbd2_debugfs_dir)
-               jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME,
-                                              S_IRUGO | S_IWUSR,
-                                              jbd2_debugfs_dir,
-                                              &jbd2_journal_enable_debug);
-}
-
-static void __exit jbd2_remove_debugfs_entry(void)
-{
-       debugfs_remove(jbd2_debug);
-       debugfs_remove(jbd2_debugfs_dir);
-}
-
-#else
-
-static void __init jbd2_create_debugfs_entry(void)
-{
-}
-
-static void __exit jbd2_remove_debugfs_entry(void)
-{
-}
-
-#endif
 
 #ifdef CONFIG_PROC_FS
 
@@ -2609,7 +2587,6 @@ static int __init journal_init(void)
 
        ret = journal_init_caches();
        if (ret == 0) {
-               jbd2_create_debugfs_entry();
                jbd2_create_jbd_stats_proc_entry();
        } else {
                jbd2_journal_destroy_caches();
@@ -2624,7 +2601,6 @@ static void __exit journal_exit(void)
        if (n)
                printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
 #endif
-       jbd2_remove_debugfs_entry();
        jbd2_remove_jbd_stats_proc_entry();
        jbd2_journal_destroy_caches();
 }
index df9f297..d6ee5ae 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/bug.h>
 #include <linux/module.h>
 
+#include <trace/events/jbd2.h>
+
 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
 
@@ -100,6 +102,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
        journal->j_running_transaction = transaction;
        transaction->t_max_wait = 0;
        transaction->t_start = jiffies;
+       transaction->t_requested = 0;
 
        return transaction;
 }
@@ -306,6 +309,8 @@ repeat:
         */
        update_t_max_wait(transaction, ts);
        handle->h_transaction = transaction;
+       handle->h_requested_credits = nblocks;
+       handle->h_start_jiffies = jiffies;
        atomic_inc(&transaction->t_updates);
        atomic_inc(&transaction->t_handle_count);
        jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
@@ -352,7 +357,8 @@ static handle_t *new_handle(int nblocks)
  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
  * on failure.
  */
-handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
+                             unsigned int type, unsigned int line_no)
 {
        handle_t *handle = journal_current_handle();
        int err;
@@ -376,8 +382,13 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
        if (err < 0) {
                jbd2_free_handle(handle);
                current->journal_info = NULL;
-               handle = ERR_PTR(err);
+               return ERR_PTR(err);
        }
+       handle->h_type = type;
+       handle->h_line_no = line_no;
+       trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
+                               handle->h_transaction->t_tid, type,
+                               line_no, nblocks);
        return handle;
 }
 EXPORT_SYMBOL(jbd2__journal_start);
@@ -385,7 +396,7 @@ EXPORT_SYMBOL(jbd2__journal_start);
 
 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
 {
-       return jbd2__journal_start(journal, nblocks, GFP_NOFS);
+       return jbd2__journal_start(journal, nblocks, GFP_NOFS, 0, 0);
 }
 EXPORT_SYMBOL(jbd2_journal_start);
 
@@ -447,7 +458,14 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
                goto unlock;
        }
 
+       trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
+                                handle->h_transaction->t_tid,
+                                handle->h_type, handle->h_line_no,
+                                handle->h_buffer_credits,
+                                nblocks);
+
        handle->h_buffer_credits += nblocks;
+       handle->h_requested_credits += nblocks;
        atomic_add(nblocks, &transaction->t_outstanding_credits);
        result = 0;
 
@@ -1376,6 +1394,13 @@ int jbd2_journal_stop(handle_t *handle)
        }
 
        jbd_debug(4, "Handle %p going down\n", handle);
+       trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
+                               handle->h_transaction->t_tid,
+                               handle->h_type, handle->h_line_no,
+                               jiffies - handle->h_start_jiffies,
+                               handle->h_sync, handle->h_requested_credits,
+                               (handle->h_requested_credits -
+                                handle->h_buffer_credits));
 
        /*
         * Implement synchronous transaction batching.  If the handle
index ad7774d..acd46a4 100644 (file)
@@ -117,12 +117,12 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
 static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        struct jffs2_inode_info *f;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct jffs2_full_dirent *fd;
        unsigned long offset, curofs;
 
        jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
-                 filp->f_path.dentry->d_inode->i_ino);
+                 file_inode(filp)->i_ino);
 
        f = JFFS2_INODE_INFO(inode);
 
index bc555ff..93a1232 100644 (file)
@@ -58,7 +58,7 @@ static long jfs_map_ext2(unsigned long flags, int from)
 
 long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct jfs_inode_info *jfs_inode = JFS_IP(inode);
        unsigned int flags;
 
index 9197a1b..0ddbece 100644 (file)
@@ -3004,7 +3004,7 @@ static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
  */
 int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *ip = filp->f_path.dentry->d_inode;
+       struct inode *ip = file_inode(filp);
        struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
        int rc = 0;
        loff_t dtpos;   /* legacy OS/2 style position */
index ca0a080..0796c45 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/nfs_fs.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
 #include <linux/kthread.h>
@@ -178,7 +178,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
                        continue;
                if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
                        continue;
-               if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
+               if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)) ,fh) != 0)
                        continue;
                /* Alright, we found a lock. Set the return status
                 * and wake up the caller
@@ -220,10 +220,19 @@ reclaimer(void *ptr)
 {
        struct nlm_host   *host = (struct nlm_host *) ptr;
        struct nlm_wait   *block;
+       struct nlm_rqst   *req;
        struct file_lock *fl, *next;
        u32 nsmstate;
        struct net *net = host->net;
 
+       req = kmalloc(sizeof(*req), GFP_KERNEL);
+       if (!req) {
+               printk(KERN_ERR "lockd: reclaimer unable to alloc memory."
+                               " Locks for %s won't be reclaimed!\n",
+                               host->h_name);
+               return 0;
+       }
+
        allow_signal(SIGKILL);
 
        down_write(&host->h_rwsem);
@@ -253,7 +262,7 @@ restart:
                 */
                if (signalled())
                        continue;
-               if (nlmclnt_reclaim(host, fl) != 0)
+               if (nlmclnt_reclaim(host, fl, req) != 0)
                        continue;
                list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
                if (host->h_nsmstate != nsmstate) {
@@ -279,5 +288,6 @@ restart:
        /* Release host handle after use */
        nlmclnt_release_host(host);
        lockd_down(net);
+       kfree(req);
        return 0;
 }
index 52e5120..7e529c3 100644 (file)
@@ -127,7 +127,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
        struct nlm_lock *lock = &argp->lock;
 
        nlmclnt_next_cookie(&argp->cookie);
-       memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
+       memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
        lock->caller  = utsname()->nodename;
        lock->oh.data = req->a_owner;
        lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
@@ -618,17 +618,15 @@ out_unlock:
  * RECLAIM: Try to reclaim a lock
  */
 int
-nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
+nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
+               struct nlm_rqst *req)
 {
-       struct nlm_rqst reqst, *req;
        int             status;
 
-       req = &reqst;
        memset(req, 0, sizeof(*req));
        locks_init_lock(&req->a_args.lock.fl);
        locks_init_lock(&req->a_res.lock.fl);
        req->a_host  = host;
-       req->a_flags = 0;
 
        /* Set up the argument struct */
        nlmclnt_setlockargs(req, fl);
index 0e17090..969d589 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
 #include <linux/mutex.h>
 static struct hlist_head       nlm_server_hosts[NLM_HOST_NRHASH];
 static struct hlist_head       nlm_client_hosts[NLM_HOST_NRHASH];
 
-#define for_each_host(host, pos, chain, table) \
+#define for_each_host(host, chain, table) \
        for ((chain) = (table); \
             (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
-               hlist_for_each_entry((host), (pos), (chain), h_hash)
+               hlist_for_each_entry((host), (chain), h_hash)
 
-#define for_each_host_safe(host, pos, next, chain, table) \
+#define for_each_host_safe(host, next, chain, table) \
        for ((chain) = (table); \
             (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
-               hlist_for_each_entry_safe((host), (pos), (next), \
+               hlist_for_each_entry_safe((host), (next), \
                                                (chain), h_hash)
 
 static unsigned long           nrhosts;
@@ -225,7 +226,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
                .net            = net,
        };
        struct hlist_head *chain;
-       struct hlist_node *pos;
        struct nlm_host *host;
        struct nsm_handle *nsm = NULL;
        struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -237,7 +237,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
        mutex_lock(&nlm_host_mutex);
 
        chain = &nlm_client_hosts[nlm_hash_address(sap)];
-       hlist_for_each_entry(host, pos, chain, h_hash) {
+       hlist_for_each_entry(host, chain, h_hash) {
                if (host->net != net)
                        continue;
                if (!rpc_cmp_addr(nlm_addr(host), sap))
@@ -322,7 +322,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
                                    const size_t hostname_len)
 {
        struct hlist_head *chain;
-       struct hlist_node *pos;
        struct nlm_host *host = NULL;
        struct nsm_handle *nsm = NULL;
        struct sockaddr *src_sap = svc_daddr(rqstp);
@@ -350,7 +349,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
                nlm_gc_hosts(net);
 
        chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
-       hlist_for_each_entry(host, pos, chain, h_hash) {
+       hlist_for_each_entry(host, chain, h_hash) {
                if (host->net != net)
                        continue;
                if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
@@ -515,10 +514,9 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
 {
        struct nlm_host *host;
        struct hlist_head *chain;
-       struct hlist_node *pos;
 
        mutex_lock(&nlm_host_mutex);
-       for_each_host(host, pos, chain, cache) {
+       for_each_host(host, chain, cache) {
                if (host->h_nsmhandle == nsm
                    && host->h_nsmstate != info->state) {
                        host->h_nsmstate = info->state;
@@ -570,7 +568,6 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
 static void nlm_complain_hosts(struct net *net)
 {
        struct hlist_head *chain;
-       struct hlist_node *pos;
        struct nlm_host *host;
 
        if (net) {
@@ -587,7 +584,7 @@ static void nlm_complain_hosts(struct net *net)
                dprintk("lockd: %lu hosts left:\n", nrhosts);
        }
 
-       for_each_host(host, pos, chain, nlm_server_hosts) {
+       for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
                dprintk("       %s (cnt %d use %d exp %ld net %p)\n",
@@ -600,14 +597,13 @@ void
 nlm_shutdown_hosts_net(struct net *net)
 {
        struct hlist_head *chain;
-       struct hlist_node *pos;
        struct nlm_host *host;
 
        mutex_lock(&nlm_host_mutex);
 
        /* First, make all hosts eligible for gc */
        dprintk("lockd: nuking all hosts in net %p...\n", net);
-       for_each_host(host, pos, chain, nlm_server_hosts) {
+       for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
                host->h_expires = jiffies - 1;
@@ -644,11 +640,11 @@ static void
 nlm_gc_hosts(struct net *net)
 {
        struct hlist_head *chain;
-       struct hlist_node *pos, *next;
+       struct hlist_node *next;
        struct nlm_host *host;
 
        dprintk("lockd: host garbage collection for net %p\n", net);
-       for_each_host(host, pos, chain, nlm_server_hosts) {
+       for_each_host(host, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
                host->h_inuse = 0;
@@ -657,7 +653,7 @@ nlm_gc_hosts(struct net *net)
        /* Mark all hosts that hold locks, blocks or shares */
        nlmsvc_mark_resources(net);
 
-       for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+       for_each_host_safe(host, next, chain, nlm_server_hosts) {
                if (net && host->net != net)
                        continue;
                if (atomic_read(&host->h_count) || host->h_inuse
index 3c2cfc6..1812f02 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/xprtsock.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
index 8d80c99..e703318 100644 (file)
@@ -406,8 +406,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
        __be32                  ret;
 
        dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
-                               file->f_file->f_path.dentry->d_inode->i_sb->s_id,
-                               file->f_file->f_path.dentry->d_inode->i_ino,
+                               file_inode(file->f_file)->i_sb->s_id,
+                               file_inode(file->f_file)->i_ino,
                                lock->fl.fl_type, lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end,
@@ -513,8 +513,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
        __be32                  ret;
 
        dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
-                               file->f_file->f_path.dentry->d_inode->i_sb->s_id,
-                               file->f_file->f_path.dentry->d_inode->i_ino,
+                               file_inode(file->f_file)->i_sb->s_id,
+                               file_inode(file->f_file)->i_ino,
                                lock->fl.fl_type,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
@@ -606,8 +606,8 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
        int     error;
 
        dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
-                               file->f_file->f_path.dentry->d_inode->i_sb->s_id,
-                               file->f_file->f_path.dentry->d_inode->i_ino,
+                               file_inode(file->f_file)->i_sb->s_id,
+                               file_inode(file->f_file)->i_ino,
                                lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
@@ -635,8 +635,8 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
        int status = 0;
 
        dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
-                               file->f_file->f_path.dentry->d_inode->i_sb->s_id,
-                               file->f_file->f_path.dentry->d_inode->i_ino,
+                               file_inode(file->f_file)->i_sb->s_id,
+                               file_inode(file->f_file)->i_ino,
                                lock->fl.fl_pid,
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
index 0deb5f6..97e8741 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/nfsd/nfsfh.h>
 #include <linux/nfsd/export.h>
 #include <linux/lockd/lockd.h>
@@ -45,7 +45,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
 
 static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
 {
-       struct inode *inode = file->f_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file->f_file);
 
        dprintk("lockd: %s %s/%ld\n",
                msg, inode->i_sb->s_id, inode->i_ino);
@@ -84,7 +84,6 @@ __be32
 nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
                                        struct nfs_fh *f)
 {
-       struct hlist_node *pos;
        struct nlm_file *file;
        unsigned int    hash;
        __be32          nfserr;
@@ -96,7 +95,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
        /* Lock file table */
        mutex_lock(&nlm_file_mutex);
 
-       hlist_for_each_entry(file, pos, &nlm_files[hash], f_list)
+       hlist_for_each_entry(file, &nlm_files[hash], f_list)
                if (!nfs_compare_fh(&file->f_handle, f))
                        goto found;
 
@@ -248,13 +247,13 @@ static int
 nlm_traverse_files(void *data, nlm_host_match_fn_t match,
                int (*is_failover_file)(void *data, struct nlm_file *file))
 {
-       struct hlist_node *pos, *next;
+       struct hlist_node *next;
        struct nlm_file *file;
        int i, ret = 0;
 
        mutex_lock(&nlm_file_mutex);
        for (i = 0; i < FILE_NRHASH; i++) {
-               hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) {
+               hlist_for_each_entry_safe(file, next, &nlm_files[i], f_list) {
                        if (is_failover_file && !is_failover_file(data, file))
                                continue;
                        file->f_count++;
index a94e331..cb424a4 100644 (file)
@@ -334,7 +334,7 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
                start = filp->f_pos;
                break;
        case SEEK_END:
-               start = i_size_read(filp->f_path.dentry->d_inode);
+               start = i_size_read(file_inode(filp));
                break;
        default:
                return -EINVAL;
@@ -384,7 +384,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
                start = filp->f_pos;
                break;
        case SEEK_END:
-               start = i_size_read(filp->f_path.dentry->d_inode);
+               start = i_size_read(file_inode(filp));
                break;
        default:
                return -EINVAL;
@@ -627,7 +627,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
        struct file_lock *cfl;
 
        lock_flocks();
-       for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
+       for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
                if (!IS_POSIX(cfl))
                        continue;
                if (posix_locks_conflict(fl, cfl))
@@ -708,7 +708,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
 {
        struct file_lock *new_fl = NULL;
        struct file_lock **before;
-       struct inode * inode = filp->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(filp);
        int error = 0;
        int found = 0;
 
@@ -1002,7 +1002,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
 int posix_lock_file(struct file *filp, struct file_lock *fl,
                        struct file_lock *conflock)
 {
-       return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
+       return __posix_lock_file(file_inode(filp), fl, conflock);
 }
 EXPORT_SYMBOL(posix_lock_file);
 
@@ -1326,8 +1326,8 @@ int fcntl_getlease(struct file *filp)
        int type = F_UNLCK;
 
        lock_flocks();
-       time_out_leases(filp->f_path.dentry->d_inode);
-       for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
+       time_out_leases(file_inode(filp));
+       for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
                        fl = fl->fl_next) {
                if (fl->fl_file == filp) {
                        type = target_leasetype(fl);
@@ -1843,7 +1843,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
        if (copy_from_user(&flock, l, sizeof(flock)))
                goto out;
 
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
 
        /* Don't allow mandatory locks on files that may be memory mapped
         * and shared.
@@ -1961,7 +1961,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
        if (copy_from_user(&flock, l, sizeof(flock)))
                goto out;
 
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
 
        /* Don't allow mandatory locks on files that may be memory mapped
         * and shared.
@@ -2030,7 +2030,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
         * posix_lock_file().  Another process could be setting a lock on this
         * file at the same time, but we wouldn't remove that lock anyway.
         */
-       if (!filp->f_path.dentry->d_inode->i_flock)
+       if (!file_inode(filp)->i_flock)
                return;
 
        lock.fl_type = F_UNLCK;
@@ -2056,7 +2056,7 @@ EXPORT_SYMBOL(locks_remove_posix);
  */
 void locks_remove_flock(struct file *filp)
 {
-       struct inode * inode = filp->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(filp);
        struct file_lock *fl;
        struct file_lock **before;
 
@@ -2152,7 +2152,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
                fl_pid = fl->fl_pid;
 
        if (fl->fl_file != NULL)
-               inode = fl->fl_file->f_path.dentry->d_inode;
+               inode = file_inode(fl->fl_file);
 
        seq_printf(f, "%lld:%s ", id, pfx);
        if (IS_POSIX(fl)) {
index 26e4a94..b827510 100644 (file)
@@ -284,7 +284,7 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
 #define IMPLICIT_NODES 2
 static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
 {
-       struct inode *dir = file->f_dentry->d_inode;
+       struct inode *dir = file_inode(file);
        loff_t pos = file->f_pos - IMPLICIT_NODES;
        struct page *page;
        struct logfs_disk_dentry *dd;
@@ -320,7 +320,7 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
 
 static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        ino_t pino = parent_ino(file->f_dentry);
        int err;
 
index 3886cde..c2219a6 100644 (file)
@@ -183,7 +183,7 @@ static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
 
 long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct logfs_inode *li = logfs_inode(inode);
        unsigned int oldflags, flags;
        int err;
index 685b2d9..a9ed6f3 100644 (file)
@@ -85,7 +85,7 @@ static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
        unsigned long pos = filp->f_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        unsigned offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
index 43a97ee..961bc12 100644 (file)
@@ -451,7 +451,7 @@ int inode_permission(struct inode *inode, int mask)
  *
  * Given a path increment the reference count to the dentry and the vfsmount.
  */
-void path_get(struct path *path)
+void path_get(const struct path *path)
 {
        mntget(path->mnt);
        dget(path->dentry);
@@ -464,7 +464,7 @@ EXPORT_SYMBOL(path_get);
  *
  * Given a path decrement the reference count to the dentry and the vfsmount.
  */
-void path_put(struct path *path)
+void path_put(const struct path *path)
 {
        dput(path->dentry);
        mntput(path->mnt);
@@ -600,14 +600,10 @@ static int complete_walk(struct nameidata *nd)
        if (likely(!(nd->flags & LOOKUP_JUMPED)))
                return 0;
 
-       if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
+       if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
                return 0;
 
-       if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
-               return 0;
-
-       /* Note: we do not d_invalidate() */
-       status = d_revalidate(dentry, nd->flags);
+       status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
        if (status > 0)
                return 0;
 
@@ -1342,7 +1338,7 @@ static struct dentry *__lookup_hash(struct qstr *name,
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
  */
-static int lookup_fast(struct nameidata *nd, struct qstr *name,
+static int lookup_fast(struct nameidata *nd,
                       struct path *path, struct inode **inode)
 {
        struct vfsmount *mnt = nd->path.mnt;
@@ -1358,7 +1354,7 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name,
         */
        if (nd->flags & LOOKUP_RCU) {
                unsigned seq;
-               dentry = __d_lookup_rcu(parent, name, &seq, nd->inode);
+               dentry = __d_lookup_rcu(parent, &nd->last, &seq, nd->inode);
                if (!dentry)
                        goto unlazy;
 
@@ -1400,7 +1396,7 @@ unlazy:
                if (unlazy_walk(nd, dentry))
                        return -ECHILD;
        } else {
-               dentry = __d_lookup(parent, name);
+               dentry = __d_lookup(parent, &nd->last);
        }
 
        if (unlikely(!dentry))
@@ -1436,8 +1432,7 @@ need_lookup:
 }
 
 /* Fast lookup failed, do it the slow way */
-static int lookup_slow(struct nameidata *nd, struct qstr *name,
-                      struct path *path)
+static int lookup_slow(struct nameidata *nd, struct path *path)
 {
        struct dentry *dentry, *parent;
        int err;
@@ -1446,7 +1441,7 @@ static int lookup_slow(struct nameidata *nd, struct qstr *name,
        BUG_ON(nd->inode != parent->d_inode);
 
        mutex_lock(&parent->d_inode->i_mutex);
-       dentry = __lookup_hash(name, parent, nd->flags);
+       dentry = __lookup_hash(&nd->last, parent, nd->flags);
        mutex_unlock(&parent->d_inode->i_mutex);
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
@@ -1519,7 +1514,7 @@ static inline int should_follow_link(struct inode *inode, int follow)
 }
 
 static inline int walk_component(struct nameidata *nd, struct path *path,
-               struct qstr *name, int type, int follow)
+               int follow)
 {
        struct inode *inode;
        int err;
@@ -1528,14 +1523,14 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
         * to be able to know about the current root directory and
         * parent relationships.
         */
-       if (unlikely(type != LAST_NORM))
-               return handle_dots(nd, type);
-       err = lookup_fast(nd, name, path, &inode);
+       if (unlikely(nd->last_type != LAST_NORM))
+               return handle_dots(nd, nd->last_type);
+       err = lookup_fast(nd, path, &inode);
        if (unlikely(err)) {
                if (err < 0)
                        goto out_err;
 
-               err = lookup_slow(nd, name, path);
+               err = lookup_slow(nd, path);
                if (err < 0)
                        goto out_err;
 
@@ -1594,8 +1589,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
                res = follow_link(&link, nd, &cookie);
                if (res)
                        break;
-               res = walk_component(nd, path, &nd->last,
-                                    nd->last_type, LOOKUP_FOLLOW);
+               res = walk_component(nd, path, LOOKUP_FOLLOW);
                put_link(nd, &link, cookie);
        } while (res > 0);
 
@@ -1802,8 +1796,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                        }
                }
 
+               nd->last = this;
+               nd->last_type = type;
+
                if (!name[len])
-                       goto last_component;
+                       return 0;
                /*
                 * If it wasn't NUL, we know it was '/'. Skip that
                 * slash, and continue until no more slashes.
@@ -1812,10 +1809,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                        len++;
                } while (unlikely(name[len] == '/'));
                if (!name[len])
-                       goto last_component;
+                       return 0;
+
                name += len;
 
-               err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW);
+               err = walk_component(nd, &next, LOOKUP_FOLLOW);
                if (err < 0)
                        return err;
 
@@ -1824,16 +1822,10 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                        if (err)
                                return err;
                }
-               if (can_lookup(nd->inode))
-                       continue;
-               err = -ENOTDIR; 
-               break;
-               /* here ends the main loop */
-
-last_component:
-               nd->last = this;
-               nd->last_type = type;
-               return 0;
+               if (!can_lookup(nd->inode)) {
+                       err = -ENOTDIR; 
+                       break;
+               }
        }
        terminate_walk(nd);
        return err;
@@ -1932,8 +1924,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
                nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 
        nd->flags &= ~LOOKUP_PARENT;
-       return walk_component(nd, path, &nd->last, nd->last_type,
-                                       nd->flags & LOOKUP_FOLLOW);
+       return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW);
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@ -2732,7 +2723,7 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
                        symlink_ok = true;
                /* we _can_ be in RCU mode here */
-               error = lookup_fast(nd, &nd->last, path, &inode);
+               error = lookup_fast(nd, path, &inode);
                if (likely(!error))
                        goto finish_lookup;
 
@@ -2778,7 +2769,7 @@ retry_lookup:
                        goto out;
 
                if ((*opened & FILE_CREATED) ||
-                   !S_ISREG(file->f_path.dentry->d_inode->i_mode))
+                   !S_ISREG(file_inode(file)->i_mode))
                        will_truncate = false;
 
                audit_inode(name, file->f_path.dentry, 0);
@@ -2941,8 +2932,8 @@ static struct file *path_openat(int dfd, struct filename *pathname,
        int error;
 
        file = get_empty_filp();
-       if (!file)
-               return ERR_PTR(-ENFILE);
+       if (IS_ERR(file))
+               return file;
 
        file->f_flags = op->open_flag;
 
index edac42c..50ca17d 100644 (file)
@@ -384,7 +384,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
  */
 int __mnt_want_write_file(struct file *file)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
                return __mnt_want_write(file->f_path.mnt);
@@ -1300,24 +1300,6 @@ SYSCALL_DEFINE1(oldumount, char __user *, name)
 
 #endif
 
-static int mount_is_safe(struct path *path)
-{
-       if (may_mount())
-               return 0;
-       return -EPERM;
-#ifdef notyet
-       if (S_ISLNK(path->dentry->d_inode->i_mode))
-               return -EPERM;
-       if (path->dentry->d_inode->i_mode & S_ISVTX) {
-               if (current_uid() != path->dentry->d_inode->i_uid)
-                       return -EPERM;
-       }
-       if (inode_permission(path->dentry->d_inode, MAY_WRITE))
-               return -EPERM;
-       return 0;
-#endif
-}
-
 static bool mnt_ns_loop(struct path *path)
 {
        /* Could bind mounting the mount namespace inode cause a
@@ -1640,9 +1622,6 @@ static int do_change_type(struct path *path, int flag)
        int type;
        int err = 0;
 
-       if (!may_mount())
-               return -EPERM;
-
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
@@ -1676,9 +1655,7 @@ static int do_loopback(struct path *path, const char *old_name,
        LIST_HEAD(umount_list);
        struct path old_path;
        struct mount *mnt = NULL, *old;
-       int err = mount_is_safe(path);
-       if (err)
-               return err;
+       int err;
        if (!old_name || !*old_name)
                return -EINVAL;
        err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
@@ -1755,9 +1732,6 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        struct super_block *sb = path->mnt->mnt_sb;
        struct mount *mnt = real_mount(path->mnt);
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
        if (!check_mnt(mnt))
                return -EINVAL;
 
@@ -1771,6 +1745,8 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        down_write(&sb->s_umount);
        if (flags & MS_BIND)
                err = change_mount_flags(path->mnt, flags);
+       else if (!capable(CAP_SYS_ADMIN))
+               err = -EPERM;
        else
                err = do_remount_sb(sb, flags, data, 0);
        if (!err) {
@@ -1803,9 +1779,7 @@ static int do_move_mount(struct path *path, const char *old_name)
        struct path old_path, parent_path;
        struct mount *p;
        struct mount *old;
-       int err = 0;
-       if (!may_mount())
-               return -EPERM;
+       int err;
        if (!old_name || !*old_name)
                return -EINVAL;
        err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
@@ -1947,9 +1921,6 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
        if (!fstype)
                return -EINVAL;
 
-       if (!may_mount())
-               return -EPERM;
-
        type = get_fs_type(fstype);
        if (!type)
                return -ENODEV;
@@ -2263,6 +2234,9 @@ long do_mount(const char *dev_name, const char *dir_name,
        if (retval)
                goto dput_out;
 
+       if (!may_mount())
+               return -EPERM;
+
        /* Default to relatime unless overriden */
        if (!(flags & MS_NOATIME))
                mnt_flags |= MNT_RELATIME;
index 4117e7b..8163260 100644 (file)
@@ -593,14 +593,10 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
                return 1; /* I'm not sure */
 
        qname.name = __name;
-       qname.hash = full_name_hash(qname.name, qname.len);
-
-       if (dentry->d_op && dentry->d_op->d_hash)
-               if (dentry->d_op->d_hash(dentry, dentry->d_inode, &qname) != 0)
-                       goto end_advance;
-
-       newdent = d_lookup(dentry, &qname);
 
+       newdent = d_hash_and_lookup(dentry, &qname);
+       if (unlikely(IS_ERR(newdent)))
+               goto end_advance;
        if (!newdent) {
                newdent = d_alloc(dentry, &qname);
                if (!newdent)
index e2be336..7dafd68 100644 (file)
@@ -538,7 +538,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        if (!ncp_filp)
                goto out;
        error = -ENOTSOCK;
-       sock_inode = ncp_filp->f_path.dentry->d_inode;
+       sock_inode = file_inode(ncp_filp);
        if (!S_ISSOCK(sock_inode->i_mode))
                goto out_fput;
        sock = SOCKET_I(sock_inode);
@@ -577,7 +577,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                if (!server->info_filp)
                        goto out_bdi;
                error = -ENOTSOCK;
-               sock_inode = server->info_filp->f_path.dentry->d_inode;
+               sock_inode = file_inode(server->info_filp);
                if (!S_ISSOCK(sock_inode->i_mode))
                        goto out_fput2;
                info_sock = SOCKET_I(sock_inode);
index d44318d..60426cc 100644 (file)
@@ -811,7 +811,7 @@ outrel:
 
 long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ncp_server *server = NCP_SERVER(inode);
        kuid_t uid = current_uid();
        int need_drop_write = 0;
@@ -822,7 +822,7 @@ long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case NCP_IOC_CONN_LOGGED_IN:
        case NCP_IOC_SETROOT:
                if (!capable(CAP_SYS_ADMIN)) {
-                       ret = -EACCES;
+                       ret = -EPERM;
                        goto out;
                }
                break;
index 63d14a9..ee24df5 100644 (file)
@@ -105,7 +105,7 @@ static const struct vm_operations_struct ncp_file_mmap =
 /* This is used for a general mmap of a ncp file */
 int ncp_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        
        DPRINTK("ncp_mmap: called\n");
 
index 862a2f1..5f7b053 100644 (file)
@@ -128,10 +128,13 @@ int nfs_cache_register_net(struct net *net, struct cache_detail *cd)
        struct super_block *pipefs_sb;
        int ret = 0;
 
+       sunrpc_init_cache_detail(cd);
        pipefs_sb = rpc_get_sb_net(net);
        if (pipefs_sb) {
                ret = nfs_cache_register_sb(pipefs_sb, cd);
                rpc_put_sb_net(net);
+               if (ret)
+                       sunrpc_destroy_cache_detail(cd);
        }
        return ret;
 }
@@ -151,14 +154,5 @@ void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd)
                nfs_cache_unregister_sb(pipefs_sb, cd);
                rpc_put_sb_net(net);
        }
-}
-
-void nfs_cache_init(struct cache_detail *cd)
-{
-       sunrpc_init_cache_detail(cd);
-}
-
-void nfs_cache_destroy(struct cache_detail *cd)
-{
        sunrpc_destroy_cache_detail(cd);
 }
index 317db95..4116d2c 100644 (file)
@@ -23,8 +23,6 @@ extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
 extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
 extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
 
-extern void nfs_cache_init(struct cache_detail *cd);
-extern void nfs_cache_destroy(struct cache_detail *cd);
 extern int nfs_cache_register_net(struct net *net, struct cache_detail *cd);
 extern void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd);
 extern int nfs_cache_register_sb(struct super_block *sb,
index 9f3c664..84d8eae 100644 (file)
@@ -197,7 +197,6 @@ error_0:
 EXPORT_SYMBOL_GPL(nfs_alloc_client);
 
 #if IS_ENABLED(CONFIG_NFS_V4)
-/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
 void nfs_cleanup_cb_ident_idr(struct net *net)
 {
        struct nfs_net *nn = net_generic(net, nfs_net_id);
index 1b2d7eb..f23f455 100644 (file)
@@ -281,7 +281,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
 
        for (i = 0; i < array->size; i++) {
                if (array->array[i].cookie == *desc->dir_cookie) {
-                       struct nfs_inode *nfsi = NFS_I(desc->file->f_path.dentry->d_inode);
+                       struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
                        struct nfs_open_dir_context *ctx = desc->file->private_data;
 
                        new_pos = desc->current_index + i;
@@ -629,7 +629,7 @@ out:
 static
 int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
 {
-       struct inode    *inode = desc->file->f_path.dentry->d_inode;
+       struct inode    *inode = file_inode(desc->file);
        int ret;
 
        ret = nfs_readdir_xdr_to_array(desc, page, inode);
@@ -660,7 +660,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
 static
 struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
 {
-       return read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
+       return read_cache_page(file_inode(desc->file)->i_mapping,
                        desc->page_index, (filler_t *)nfs_readdir_filler, desc);
 }
 
@@ -764,7 +764,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
 {
        struct page     *page = NULL;
        int             status;
-       struct inode *inode = desc->file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(desc->file);
        struct nfs_open_dir_context *ctx = desc->file->private_data;
 
        dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
@@ -1135,6 +1135,45 @@ out_error:
        return error;
 }
 
+/*
+ * A weaker form of d_revalidate for revalidating just the dentry->d_inode
+ * when we don't really care about the dentry name. This is called when a
+ * pathwalk ends on a dentry that was not found via a normal lookup in the
+ * parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
+ *
+ * In this situation, we just want to verify that the inode itself is OK
+ * since the dentry might have changed on the server.
+ */
+static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       int error;
+       struct inode *inode = dentry->d_inode;
+
+       /*
+        * I believe we can only get a negative dentry here in the case of a
+        * procfs-style symlink. Just assume it's correct for now, but we may
+        * eventually need to do something more here.
+        */
+       if (!inode) {
+               dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
+                               __func__, dentry->d_parent->d_name.name,
+                               dentry->d_name.name);
+               return 1;
+       }
+
+       if (is_bad_inode(inode)) {
+               dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
+                               __func__, dentry->d_parent->d_name.name,
+                               dentry->d_name.name);
+               return 0;
+       }
+
+       error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
+                       __func__, inode->i_ino, error ? "invalid" : "valid");
+       return !error;
+}
+
 /*
  * This is called from dput() when d_count is going to 0.
  */
@@ -1202,6 +1241,7 @@ static void nfs_d_release(struct dentry *dentry)
 
 const struct dentry_operations nfs_dentry_operations = {
        .d_revalidate   = nfs_lookup_revalidate,
+       .d_weak_revalidate      = nfs_weak_revalidate,
        .d_delete       = nfs_dentry_delete,
        .d_iput         = nfs_dentry_iput,
        .d_automount    = nfs_d_automount,
index ca4b11e..9455270 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/module.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/dns_resolver.h>
 #include "dns_resolve.h"
 
@@ -42,6 +43,7 @@ EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
 #include <linux/seq_file.h>
 #include <linux/inet.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/cache.h>
 #include <linux/sunrpc/svcauth.h>
 #include <linux/sunrpc/rpc_pipe_fs.h>
@@ -142,7 +144,7 @@ static int nfs_dns_upcall(struct cache_detail *cd,
 
        ret = nfs_cache_upcall(cd, key->hostname);
        if (ret)
-               ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
+               ret = sunrpc_cache_pipe_upcall(cd, ch);
        return ret;
 }
 
@@ -351,60 +353,47 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name,
 }
 EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
 
+static struct cache_detail nfs_dns_resolve_template = {
+       .owner          = THIS_MODULE,
+       .hash_size      = NFS_DNS_HASHTBL_SIZE,
+       .name           = "dns_resolve",
+       .cache_put      = nfs_dns_ent_put,
+       .cache_upcall   = nfs_dns_upcall,
+       .cache_request  = nfs_dns_request,
+       .cache_parse    = nfs_dns_parse,
+       .cache_show     = nfs_dns_show,
+       .match          = nfs_dns_match,
+       .init           = nfs_dns_ent_init,
+       .update         = nfs_dns_ent_update,
+       .alloc          = nfs_dns_ent_alloc,
+};
+
+
 int nfs_dns_resolver_cache_init(struct net *net)
 {
-       int err = -ENOMEM;
+       int err;
        struct nfs_net *nn = net_generic(net, nfs_net_id);
-       struct cache_detail *cd;
-       struct cache_head **tbl;
 
-       cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
-       if (cd == NULL)
-               goto err_cd;
-
-       tbl = kzalloc(NFS_DNS_HASHTBL_SIZE * sizeof(struct cache_head *),
-                       GFP_KERNEL);
-       if (tbl == NULL)
-               goto err_tbl;
-
-       cd->owner = THIS_MODULE,
-       cd->hash_size = NFS_DNS_HASHTBL_SIZE,
-       cd->hash_table = tbl,
-       cd->name = "dns_resolve",
-       cd->cache_put = nfs_dns_ent_put,
-       cd->cache_upcall = nfs_dns_upcall,
-       cd->cache_parse = nfs_dns_parse,
-       cd->cache_show = nfs_dns_show,
-       cd->match = nfs_dns_match,
-       cd->init = nfs_dns_ent_init,
-       cd->update = nfs_dns_ent_update,
-       cd->alloc = nfs_dns_ent_alloc,
-
-       nfs_cache_init(cd);
-       err = nfs_cache_register_net(net, cd);
+       nn->nfs_dns_resolve = cache_create_net(&nfs_dns_resolve_template, net);
+       if (IS_ERR(nn->nfs_dns_resolve))
+               return PTR_ERR(nn->nfs_dns_resolve);
+
+       err = nfs_cache_register_net(net, nn->nfs_dns_resolve);
        if (err)
                goto err_reg;
-       nn->nfs_dns_resolve = cd;
        return 0;
 
 err_reg:
-       nfs_cache_destroy(cd);
-       kfree(cd->hash_table);
-err_tbl:
-       kfree(cd);
-err_cd:
+       cache_destroy_net(nn->nfs_dns_resolve, net);
        return err;
 }
 
 void nfs_dns_resolver_cache_destroy(struct net *net)
 {
        struct nfs_net *nn = net_generic(net, nfs_net_id);
-       struct cache_detail *cd = nn->nfs_dns_resolve;
 
-       nfs_cache_unregister_net(net, cd);
-       nfs_cache_destroy(cd);
-       kfree(cd->hash_table);
-       kfree(cd);
+       nfs_cache_unregister_net(net, nn->nfs_dns_resolve);
+       cache_destroy_net(nn->nfs_dns_resolve, net);
 }
 
 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
index 3c2b893..29f4a48 100644 (file)
@@ -292,7 +292,7 @@ static int
 nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        int ret;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        do {
                ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
index b9623d1..dc0f98d 100644 (file)
@@ -765,7 +765,7 @@ out:
 static ssize_t
 idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 {
-       struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
+       struct rpc_inode *rpci = RPC_I(file_inode(filp));
        struct idmap *idmap = (struct idmap *)rpci->private;
        struct key_construction *cons;
        struct idmap_msg im;
index 468ba8b..1f94167 100644 (file)
@@ -237,6 +237,8 @@ nfs_find_actor(struct inode *inode, void *opaque)
 
        if (NFS_FILEID(inode) != fattr->fileid)
                return 0;
+       if ((S_IFMT & inode->i_mode) != (S_IFMT & fattr->mode))
+               return 0;
        if (nfs_compare_fh(NFS_FH(inode), fh))
                return 0;
        if (is_bad_inode(inode) || NFS_STALE(inode))
@@ -711,7 +713,7 @@ EXPORT_SYMBOL_GPL(put_nfs_open_context);
  */
 void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct nfs_inode *nfsi = NFS_I(inode);
 
        filp->private_data = get_nfs_open_context(ctx);
@@ -744,7 +746,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
 
 static void nfs_file_clear_open_context(struct file *filp)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct nfs_open_context *ctx = nfs_file_open_context(filp);
 
        if (ctx) {
index 70efb63..43ea96c 100644 (file)
@@ -872,7 +872,7 @@ static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
 static int
 nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
 }
index 2e9779b..ac4fc9a 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/nfs_idmap.h>
 #include <linux/nfs_mount.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/auth.h>
 #include <linux/sunrpc/xprt.h>
 #include <linux/sunrpc/bc_xprt.h>
@@ -29,15 +30,14 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
 
        if (clp->rpc_ops->version != 4 || minorversion != 0)
                return ret;
-retry:
-       if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
-               return -ENOMEM;
+       idr_preload(GFP_KERNEL);
        spin_lock(&nn->nfs_client_lock);
-       ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
+       ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
+       if (ret >= 0)
+               clp->cl_cb_ident = ret;
        spin_unlock(&nn->nfs_client_lock);
-       if (ret == -EAGAIN)
-               goto retry;
-       return ret;
+       idr_preload_end();
+       return ret < 0 ? ret : 0;
 }
 
 #ifdef CONFIG_NFS_V4_1
index 08ddccc..13e6bb3 100644 (file)
@@ -94,7 +94,7 @@ static int
 nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        int ret;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        do {
                ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
index 194c484..49eeb04 100644 (file)
@@ -99,7 +99,8 @@ static void filelayout_reset_write(struct nfs_write_data *data)
 
                task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
                                                        &hdr->pages,
-                                                       hdr->completion_ops);
+                                                       hdr->completion_ops,
+                                                       hdr->dreq);
        }
 }
 
@@ -119,7 +120,8 @@ static void filelayout_reset_read(struct nfs_read_data *data)
 
                task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
                                                        &hdr->pages,
-                                                       hdr->completion_ops);
+                                                       hdr->completion_ops,
+                                                       hdr->dreq);
        }
 }
 
index 8c07241..b8da955 100644 (file)
@@ -36,7 +36,7 @@
  * Default data server connection timeout and retrans vaules.
  * Set by module paramters dataserver_timeo and dataserver_retrans.
  */
-#define NFS4_DEF_DS_TIMEO   60
+#define NFS4_DEF_DS_TIMEO   600 /* in tenths of a second */
 #define NFS4_DEF_DS_RETRANS 5
 
 /*
index b720064..1fe284f 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
+#include <linux/sunrpc/addr.h>
 
 #include "internal.h"
 #include "nfs4session.h"
index 1e09eb7..0dd7660 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/vfs.h>
 #include <linux/inet.h>
 #include "internal.h"
index eae83bf..b2671cb 100644 (file)
@@ -93,6 +93,8 @@ static int nfs4_map_errors(int err)
                return err;
        switch (err) {
        case -NFS4ERR_RESOURCE:
+       case -NFS4ERR_LAYOUTTRYLATER:
+       case -NFS4ERR_RECALLCONFLICT:
                return -EREMOTEIO;
        case -NFS4ERR_WRONGSEC:
                return -EPERM;
@@ -1158,6 +1160,7 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
                        data->o_arg.fmode);
        iput(inode);
 out:
+       nfs_release_seqid(data->o_arg.seqid);
        return state;
 err_put_inode:
        iput(inode);
@@ -6045,6 +6048,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        struct nfs_server *server = NFS_SERVER(inode);
        struct pnfs_layout_hdr *lo;
        struct nfs4_state *state = NULL;
+       unsigned long timeo, giveup;
 
        dprintk("--> %s\n", __func__);
 
@@ -6056,7 +6060,10 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
                goto out;
        case -NFS4ERR_LAYOUTTRYLATER:
        case -NFS4ERR_RECALLCONFLICT:
-               task->tk_status = -NFS4ERR_DELAY;
+               timeo = rpc_get_timeout(task->tk_client);
+               giveup = lgp->args.timestamp + timeo;
+               if (time_after(giveup, jiffies))
+                       task->tk_status = -NFS4ERR_DELAY;
                break;
        case -NFS4ERR_EXPIRED:
        case -NFS4ERR_BAD_STATEID:
@@ -6129,11 +6136,13 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
 static void nfs4_layoutget_release(void *calldata)
 {
        struct nfs4_layoutget *lgp = calldata;
-       struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       struct inode *inode = lgp->args.inode;
+       struct nfs_server *server = NFS_SERVER(inode);
        size_t max_pages = max_response_pages(server);
 
        dprintk("--> %s\n", __func__);
        nfs4_free_pages(lgp->args.layout.pages, max_pages);
+       pnfs_put_layout_hdr(NFS_I(inode)->layout);
        put_nfs_open_context(lgp->args.ctx);
        kfree(calldata);
        dprintk("<-- %s\n", __func__);
@@ -6148,7 +6157,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
 struct pnfs_layout_segment *
 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
 {
-       struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+       struct inode *inode = lgp->args.inode;
+       struct nfs_server *server = NFS_SERVER(inode);
        size_t max_pages = max_response_pages(server);
        struct rpc_task *task;
        struct rpc_message msg = {
@@ -6174,10 +6184,15 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
                return ERR_PTR(-ENOMEM);
        }
        lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+       lgp->args.timestamp = jiffies;
 
        lgp->res.layoutp = &lgp->args.layout;
        lgp->res.seq_res.sr_slot = NULL;
        nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+
+       /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+       pnfs_get_layout_hdr(NFS_I(inode)->layout);
+
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return ERR_CAST(task);
index 84d2e9e..569b166 100644 (file)
@@ -28,7 +28,7 @@ static struct file_system_type nfs4_remote_fs_type = {
        .name           = "nfs4",
        .mount          = nfs4_remote_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 
 static struct file_system_type nfs4_remote_referral_fs_type = {
@@ -36,7 +36,7 @@ static struct file_system_type nfs4_remote_referral_fs_type = {
        .name           = "nfs4",
        .mount          = nfs4_remote_referral_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 
 struct file_system_type nfs4_referral_fs_type = {
@@ -44,7 +44,7 @@ struct file_system_type nfs4_referral_fs_type = {
        .name           = "nfs4",
        .mount          = nfs4_referral_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 
 static const struct super_operations nfs4_sops = {
index 6be70f6..48ac5aa 100644 (file)
@@ -1181,7 +1181,7 @@ pnfs_update_layout(struct inode *ino,
        struct nfs_client *clp = server->nfs_client;
        struct pnfs_layout_hdr *lo;
        struct pnfs_layout_segment *lseg = NULL;
-       bool first = false;
+       bool first;
 
        if (!pnfs_enabled_sb(NFS_SERVER(ino)))
                goto out;
@@ -1215,10 +1215,9 @@ pnfs_update_layout(struct inode *ino,
                goto out_unlock;
        atomic_inc(&lo->plh_outstanding);
 
-       if (list_empty(&lo->plh_segs))
-               first = true;
-
+       first = list_empty(&lo->plh_layouts) ? true : false;
        spin_unlock(&ino->i_lock);
+
        if (first) {
                /* The lo must be on the clp list if there is any
                 * chance of a CB_LAYOUTRECALL(FILE) coming in.
@@ -1422,13 +1421,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
 int pnfs_write_done_resend_to_mds(struct inode *inode,
                                struct list_head *head,
-                               const struct nfs_pgio_completion_ops *compl_ops)
+                               const struct nfs_pgio_completion_ops *compl_ops,
+                               struct nfs_direct_req *dreq)
 {
        struct nfs_pageio_descriptor pgio;
        LIST_HEAD(failed);
 
        /* Resend all requests through the MDS */
        nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
+       pgio.pg_dreq = dreq;
        while (!list_empty(head)) {
                struct nfs_page *req = nfs_list_entry(head->next);
 
@@ -1463,7 +1464,8 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
                data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
                                                        &hdr->pages,
-                                                       hdr->completion_ops);
+                                                       hdr->completion_ops,
+                                                       hdr->dreq);
 }
 
 /*
@@ -1578,13 +1580,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
 
 int pnfs_read_done_resend_to_mds(struct inode *inode,
                                struct list_head *head,
-                               const struct nfs_pgio_completion_ops *compl_ops)
+                               const struct nfs_pgio_completion_ops *compl_ops,
+                               struct nfs_direct_req *dreq)
 {
        struct nfs_pageio_descriptor pgio;
        LIST_HEAD(failed);
 
        /* Resend all requests through the MDS */
        nfs_pageio_init_read(&pgio, inode, compl_ops);
+       pgio.pg_dreq = dreq;
        while (!list_empty(head)) {
                struct nfs_page *req = nfs_list_entry(head->next);
 
@@ -1615,7 +1619,8 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
                data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
                                                        &hdr->pages,
-                                                       hdr->completion_ops);
+                                                       hdr->completion_ops,
+                                                       hdr->dreq);
 }
 
 /*
index 97cb358..94ba804 100644 (file)
@@ -230,9 +230,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
 
 void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
 int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
-                       const struct nfs_pgio_completion_ops *compl_ops);
+                       const struct nfs_pgio_completion_ops *compl_ops,
+                       struct nfs_direct_req *dreq);
 int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
-                       const struct nfs_pgio_completion_ops *compl_ops);
+                       const struct nfs_pgio_completion_ops *compl_ops,
+                       struct nfs_direct_req *dreq);
 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
 
 /* nfs4_deviceid_flags */
index d35b62e..6da209b 100644 (file)
@@ -77,9 +77,8 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
                 long hash)
 {
        struct nfs4_deviceid_node *d;
-       struct hlist_node *n;
 
-       hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+       hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
                if (d->ld == ld && d->nfs_client == clp &&
                    !memcmp(&d->deviceid, id, sizeof(*id))) {
                        if (atomic_read(&d->ref))
@@ -248,12 +247,11 @@ static void
 _deviceid_purge_client(const struct nfs_client *clp, long hash)
 {
        struct nfs4_deviceid_node *d;
-       struct hlist_node *n;
        HLIST_HEAD(tmp);
 
        spin_lock(&nfs4_deviceid_lock);
        rcu_read_lock();
-       hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+       hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
                if (d->nfs_client == clp && atomic_read(&d->ref)) {
                        hlist_del_init_rcu(&d->node);
                        hlist_add_head(&d->tmpnode, &tmp);
@@ -291,12 +289,11 @@ void
 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
 {
        struct nfs4_deviceid_node *d;
-       struct hlist_node *n;
        int i;
 
        rcu_read_lock();
        for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
-               hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
+               hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
                        if (d->nfs_client == clp)
                                set_bit(NFS_DEVICEID_INVALID, &d->flags);
        }
index f084dac..fc8de90 100644 (file)
@@ -662,7 +662,7 @@ nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
 static int
 nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
 }
index befbae0..17b32b7 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/errno.h>
 #include <linux/unistd.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/metrics.h>
 #include <linux/sunrpc/xprtsock.h>
@@ -291,7 +292,7 @@ struct file_system_type nfs_fs_type = {
        .name           = "nfs",
        .mount          = nfs_fs_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 EXPORT_SYMBOL_GPL(nfs_fs_type);
 
@@ -300,7 +301,7 @@ struct file_system_type nfs_xdev_fs_type = {
        .name           = "nfs",
        .mount          = nfs_xdev_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 
 const struct super_operations nfs_sops = {
@@ -330,7 +331,7 @@ struct file_system_type nfs4_fs_type = {
        .name           = "nfs4",
        .mount          = nfs_fs_mount,
        .kill_sb        = nfs_kill_super,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
 };
 EXPORT_SYMBOL_GPL(nfs4_fs_type);
 
index d26a32f..1f1f38f 100644 (file)
@@ -335,20 +335,14 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
        struct inode *old_dir = data->old_dir;
        struct inode *new_dir = data->new_dir;
        struct dentry *old_dentry = data->old_dentry;
-       struct dentry *new_dentry = data->new_dentry;
 
        if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
                rpc_restart_call_prepare(task);
                return;
        }
 
-       if (task->tk_status != 0) {
+       if (task->tk_status != 0)
                nfs_cancel_async_unlink(old_dentry);
-               return;
-       }
-
-       d_drop(old_dentry);
-       d_drop(new_dentry);
 }
 
 /**
@@ -549,6 +543,18 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
        error = rpc_wait_for_completion_task(task);
        if (error == 0)
                error = task->tk_status;
+       switch (error) {
+       case 0:
+               /* The rename succeeded */
+               nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+               d_move(dentry, sdentry);
+               break;
+       case -ERESTARTSYS:
+               /* The result of the rename is unknown. Play it safe by
+                * forcing a new lookup */
+               d_drop(dentry);
+               d_drop(sdentry);
+       }
        rpc_put_task(task);
 out_dput:
        dput(sdentry);
index 93cc9d3..87fd141 100644 (file)
 
 /*
  * Representation of a reply cache entry.
+ *
+ * Note that we use a sockaddr_in6 to hold the address instead of the more
+ * typical sockaddr_storage. This is for space reasons, since sockaddr_storage
+ * is much larger than a sockaddr_in6.
  */
 struct svc_cacherep {
        struct hlist_node       c_hash;
@@ -20,11 +24,13 @@ struct svc_cacherep {
        unsigned char           c_state,        /* unused, inprog, done */
                                c_type,         /* status, buffer */
                                c_secure : 1;   /* req came from port < 1024 */
-       struct sockaddr_in      c_addr;
+       struct sockaddr_in6     c_addr;
        __be32                  c_xid;
        u32                     c_prot;
        u32                     c_proc;
        u32                     c_vers;
+       unsigned int            c_len;
+       __wsum                  c_csum;
        unsigned long           c_timestamp;
        union {
                struct kvec     u_vec;
@@ -46,8 +52,7 @@ enum {
 enum {
        RC_DROPIT,
        RC_REPLY,
-       RC_DOIT,
-       RC_INTR
+       RC_DOIT
 };
 
 /*
@@ -67,6 +72,12 @@ enum {
  */
 #define RC_DELAY               (HZ/5)
 
+/* Cache entries expire after this time period */
+#define RC_EXPIRE              (120 * HZ)
+
+/* Checksum this amount of the request */
+#define RC_CSUMLEN             (256U)
+
 int    nfsd_reply_cache_init(void);
 void   nfsd_reply_cache_shutdown(void);
 int    nfsd_cache_lookup(struct svc_rqst *);
index 5681c59..5f38ea3 100644 (file)
@@ -67,11 +67,6 @@ static void expkey_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-       return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
-}
-
 static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
                                            struct svc_expkey *old);
 static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
@@ -245,7 +240,7 @@ static struct cache_detail svc_expkey_cache_template = {
        .hash_size      = EXPKEY_HASHMAX,
        .name           = "nfsd.fh",
        .cache_put      = expkey_put,
-       .cache_upcall   = expkey_upcall,
+       .cache_request  = expkey_request,
        .cache_parse    = expkey_parse,
        .cache_show     = expkey_show,
        .match          = expkey_match,
@@ -315,6 +310,7 @@ static void svc_export_put(struct kref *ref)
        path_put(&exp->ex_path);
        auth_domain_put(exp->ex_client);
        nfsd4_fslocs_free(&exp->ex_fslocs);
+       kfree(exp->ex_uuid);
        kfree(exp);
 }
 
@@ -337,11 +333,6 @@ static void svc_export_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-       return sunrpc_cache_pipe_upcall(cd, h, svc_export_request);
-}
-
 static struct svc_export *svc_export_update(struct svc_export *new,
                                            struct svc_export *old);
 static struct svc_export *svc_export_lookup(struct svc_export *);
@@ -674,6 +665,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
        new->ex_fslocs.locations = NULL;
        new->ex_fslocs.locations_count = 0;
        new->ex_fslocs.migrated = 0;
+       new->ex_uuid = NULL;
        new->cd = item->cd;
 }
 
@@ -715,7 +707,7 @@ static struct cache_detail svc_export_cache_template = {
        .hash_size      = EXPORT_HASHMAX,
        .name           = "nfsd.export",
        .cache_put      = svc_export_put,
-       .cache_upcall   = svc_export_upcall,
+       .cache_request  = svc_export_request,
        .cache_parse    = svc_export_parse,
        .cache_show     = svc_export_show,
        .match          = svc_export_match,
index e761ee9..d620e7f 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/debugfs.h>
 #include <linux/module.h>
 #include <linux/nsproxy.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <asm/uaccess.h>
 
 #include "state.h"
@@ -101,7 +101,7 @@ static ssize_t fault_inject_read(struct file *file, char __user *buf,
        loff_t pos = *ppos;
 
        if (!pos)
-               nfsd_inject_get(file->f_dentry->d_inode->i_private, &val);
+               nfsd_inject_get(file_inode(file)->i_private, &val);
        size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
 
        if (pos < 0)
@@ -133,10 +133,10 @@ static ssize_t fault_inject_write(struct file *file, const char __user *buf,
 
        size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
        if (size > 0)
-               nfsd_inject_set_client(file->f_dentry->d_inode->i_private, &sa, size);
+               nfsd_inject_set_client(file_inode(file)->i_private, &sa, size);
        else {
                val = simple_strtoll(write_buf, NULL, 0);
-               nfsd_inject_set(file->f_dentry->d_inode->i_private, val);
+               nfsd_inject_set(file_inode(file)->i_private, val);
        }
        return len; /* on success, claim we got the whole input */
 }
index 9170861..95d76dc 100644 (file)
@@ -45,6 +45,10 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
                RETURN_STATUS(nfserr_inval);
        resp->mask = argp->mask;
 
+       nfserr = fh_getattr(fh, &resp->stat);
+       if (nfserr)
+               goto fail;
+
        if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
                acl = nfsd_get_posix_acl(fh, ACL_TYPE_ACCESS);
                if (IS_ERR(acl)) {
@@ -115,6 +119,9 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
                nfserr = nfserrno( nfsd_set_posix_acl(
                        fh, ACL_TYPE_DEFAULT, argp->acl_default) );
        }
+       if (!nfserr) {
+               nfserr = fh_getattr(fh, &resp->stat);
+       }
 
        /* argp->acl_{access,default} may have been allocated in
           nfssvc_decode_setaclargs. */
@@ -129,10 +136,15 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
 static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp,
                struct nfsd_fhandle *argp, struct nfsd_attrstat *resp)
 {
+       __be32 nfserr;
        dprintk("nfsd: GETATTR  %s\n", SVCFH_fmt(&argp->fh));
 
        fh_copy(&resp->fh, &argp->fh);
-       return fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
+       nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP);
+       if (nfserr)
+               return nfserr;
+       nfserr = fh_getattr(&resp->fh, &resp->stat);
+       return nfserr;
 }
 
 /*
@@ -150,6 +162,9 @@ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessarg
        fh_copy(&resp->fh, &argp->fh);
        resp->access = argp->access;
        nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL);
+       if (nfserr)
+               return nfserr;
+       nfserr = fh_getattr(&resp->fh, &resp->stat);
        return nfserr;
 }
 
@@ -243,7 +258,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
                return 0;
        inode = dentry->d_inode;
 
-       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
        *p++ = htonl(resp->mask);
        if (!xdr_ressize_check(rqstp, p))
                return 0;
@@ -274,7 +289,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
 static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
                struct nfsd_attrstat *resp)
 {
-       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
        return xdr_ressize_check(rqstp, p);
 }
 
@@ -282,7 +297,7 @@ static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
 static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p,
                struct nfsd3_accessres *resp)
 {
-       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh);
+       p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat);
        *p++ = htonl(resp->access);
        return xdr_ressize_check(rqstp, p);
 }
index 1fc02df..4012899 100644 (file)
@@ -43,7 +43,6 @@ static __be32
 nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
                                           struct nfsd3_attrstat *resp)
 {
-       int     err;
        __be32  nfserr;
 
        dprintk("nfsd: GETATTR(3)  %s\n",
@@ -55,9 +54,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
        if (nfserr)
                RETURN_STATUS(nfserr);
 
-       err = vfs_getattr(resp->fh.fh_export->ex_path.mnt,
-                         resp->fh.fh_dentry, &resp->stat);
-       nfserr = nfserrno(err);
+       nfserr = fh_getattr(&resp->fh, &resp->stat);
 
        RETURN_STATUS(nfserr);
 }
index 925c944..14d9ecb 100644 (file)
@@ -11,6 +11,7 @@
 #include "xdr3.h"
 #include "auth.h"
 #include "netns.h"
+#include "vfs.h"
 
 #define NFSDDBG_FACILITY               NFSDDBG_XDR
 
@@ -206,10 +207,10 @@ encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
 {
        struct dentry *dentry = fhp->fh_dentry;
        if (dentry && dentry->d_inode) {
-               int err;
+               __be32 err;
                struct kstat stat;
 
-               err = vfs_getattr(fhp->fh_export->ex_path.mnt, dentry, &stat);
+               err = fh_getattr(fhp, &stat);
                if (!err) {
                        *p++ = xdr_one;         /* attributes follow */
                        lease_get_mtime(dentry->d_inode, &stat.mtime);
@@ -256,13 +257,12 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
  */
 void fill_post_wcc(struct svc_fh *fhp)
 {
-       int err;
+       __be32 err;
 
        if (fhp->fh_post_saved)
                printk("nfsd: inode locked twice during operation.\n");
 
-       err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
-                       &fhp->fh_post_attr);
+       err = fh_getattr(fhp, &fhp->fh_post_attr);
        fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
        if (err) {
                fhp->fh_post_saved = 0;
index 0ce1234..4832fd8 100644 (file)
@@ -139,12 +139,6 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
        (*bpp)[-1] = '\n';
 }
 
-static int
-idtoname_upcall(struct cache_detail *cd, struct cache_head *ch)
-{
-       return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request);
-}
-
 static int
 idtoname_match(struct cache_head *ca, struct cache_head *cb)
 {
@@ -192,7 +186,7 @@ static struct cache_detail idtoname_cache_template = {
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.idtoname",
        .cache_put      = ent_put,
-       .cache_upcall   = idtoname_upcall,
+       .cache_request  = idtoname_request,
        .cache_parse    = idtoname_parse,
        .cache_show     = idtoname_show,
        .warn_no_listener = warn_no_idmapd,
@@ -320,12 +314,6 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
        (*bpp)[-1] = '\n';
 }
 
-static int
-nametoid_upcall(struct cache_detail *cd, struct cache_head *ch)
-{
-       return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request);
-}
-
 static int
 nametoid_match(struct cache_head *ca, struct cache_head *cb)
 {
@@ -365,7 +353,7 @@ static struct cache_detail nametoid_cache_template = {
        .hash_size      = ENT_HASHMAX,
        .name           = "nfs4.nametoid",
        .cache_put      = ent_put,
-       .cache_upcall   = nametoid_upcall,
+       .cache_request  = nametoid_request,
        .cache_parse    = nametoid_parse,
        .cache_show     = nametoid_show,
        .warn_no_listener = warn_no_idmapd,
index 9d1c5db..ae73175 100644 (file)
@@ -993,14 +993,15 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (!buf)
                return nfserr_jukebox;
 
+       p = buf;
        status = nfsd4_encode_fattr(&cstate->current_fh,
                                    cstate->current_fh.fh_export,
-                                   cstate->current_fh.fh_dentry, buf,
-                                   &count, verify->ve_bmval,
+                                   cstate->current_fh.fh_dentry, &p,
+                                   count, verify->ve_bmval,
                                    rqstp, 0);
 
        /* this means that nfsd4_encode_fattr() ran out of space */
-       if (status == nfserr_resource && count == 0)
+       if (status == nfserr_resource)
                status = nfserr_not_same;
        if (status)
                goto out_kfree;
index 4914af4..899ca26 100644 (file)
@@ -1185,6 +1185,12 @@ bin_to_hex_dup(const unsigned char *src, int srclen)
 static int
 nfsd4_umh_cltrack_init(struct net __attribute__((unused)) *net)
 {
+       /* XXX: The usermode helper s not working in container yet. */
+       if (net != &init_net) {
+               WARN(1, KERN_ERR "NFSD: attempt to initialize umh client "
+                       "tracking in a container!\n");
+               return -EINVAL;
+       }
        return nfsd4_umh_cltrack_upcall("init", NULL, NULL);
 }
 
index 9e7103b..16d39c6 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/pagemap.h>
 #include <linux/ratelimit.h>
 #include <linux/sunrpc/svcauth_gss.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include "xdr4.h"
 #include "vfs.h"
 #include "current_stateid.h"
@@ -261,33 +261,46 @@ static inline int get_new_stid(struct nfs4_stid *stid)
        return new_stid;
 }
 
-static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
+static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
+kmem_cache *slab)
 {
-       stateid_t *s = &stid->sc_stateid;
+       struct idr *stateids = &cl->cl_stateids;
+       static int min_stateid = 0;
+       struct nfs4_stid *stid;
        int new_id;
 
-       stid->sc_type = type;
+       stid = kmem_cache_alloc(slab, GFP_KERNEL);
+       if (!stid)
+               return NULL;
+
+       if (!idr_pre_get(stateids, GFP_KERNEL))
+               goto out_free;
+       if (idr_get_new_above(stateids, stid, min_stateid, &new_id))
+               goto out_free;
        stid->sc_client = cl;
-       s->si_opaque.so_clid = cl->cl_clientid;
-       new_id = get_new_stid(stid);
-       s->si_opaque.so_id = (u32)new_id;
+       stid->sc_type = 0;
+       stid->sc_stateid.si_opaque.so_id = new_id;
+       stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
        /* Will be incremented before return to client: */
-       s->si_generation = 0;
-}
-
-static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
-{
-       struct idr *stateids = &cl->cl_stateids;
+       stid->sc_stateid.si_generation = 0;
 
-       if (!idr_pre_get(stateids, GFP_KERNEL))
-               return NULL;
        /*
-        * Note: if we fail here (or any time between now and the time
-        * we actually get the new idr), we won't need to undo the idr
-        * preallocation, since the idr code caps the number of
-        * preallocated entries.
+        * It shouldn't be a problem to reuse an opaque stateid value.
+        * I don't think it is for 4.1.  But with 4.0 I worry that, for
+        * example, a stray write retransmission could be accepted by
+        * the server when it should have been rejected.  Therefore,
+        * adopt a trick from the sctp code to attempt to maximize the
+        * amount of time until an id is reused, by ensuring they always
+        * "increase" (mod INT_MAX):
         */
-       return kmem_cache_alloc(slab, GFP_KERNEL);
+
+       min_stateid = new_id+1;
+       if (min_stateid == INT_MAX)
+               min_stateid = 0;
+       return stid;
+out_free:
+       kfree(stid);
+       return NULL;
 }
 
 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
@@ -316,7 +329,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
        dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
        if (dp == NULL)
                return dp;
-       init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
+       dp->dl_stid.sc_type = NFS4_DELEG_STID;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -337,13 +350,21 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
        return dp;
 }
 
+static void free_stid(struct nfs4_stid *s, struct kmem_cache *slab)
+{
+       struct idr *stateids = &s->sc_client->cl_stateids;
+
+       idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
+       kmem_cache_free(slab, s);
+}
+
 void
 nfs4_put_delegation(struct nfs4_delegation *dp)
 {
        if (atomic_dec_and_test(&dp->dl_count)) {
                dprintk("NFSD: freeing dp %p\n",dp);
                put_nfs4_file(dp->dl_file);
-               kmem_cache_free(deleg_slab, dp);
+               free_stid(&dp->dl_stid, deleg_slab);
                num_delegations--;
        }
 }
@@ -360,9 +381,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 
 static void unhash_stid(struct nfs4_stid *s)
 {
-       struct idr *stateids = &s->sc_client->cl_stateids;
-
-       idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
+       s->sc_type = 0;
 }
 
 /* Called under the state lock. */
@@ -519,7 +538,7 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp)
 
 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
 {
-       kmem_cache_free(stateid_slab, stp);
+       free_stid(&stp->st_stid, stateid_slab);
 }
 
 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
@@ -905,7 +924,7 @@ static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan,
 
        new = __alloc_session(slotsize, numslots);
        if (!new) {
-               nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
+               nfsd4_put_drc_mem(slotsize, numslots);
                return NULL;
        }
        init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn);
@@ -1048,7 +1067,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
 static inline void
 free_client(struct nfs4_client *clp)
 {
-       struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
+       struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
 
        lockdep_assert_held(&nn->client_lock);
        while (!list_empty(&clp->cl_sessions)) {
@@ -1060,6 +1079,7 @@ free_client(struct nfs4_client *clp)
        }
        free_svc_cred(&clp->cl_cred);
        kfree(clp->cl_name.data);
+       idr_destroy(&clp->cl_stateids);
        kfree(clp);
 }
 
@@ -1258,7 +1278,12 @@ static void gen_confirm(struct nfs4_client *clp)
 
 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
 {
-       return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+       struct nfs4_stid *ret;
+
+       ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
+       if (!ret || !ret->sc_type)
+               return NULL;
+       return ret;
 }
 
 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
@@ -1844,11 +1869,12 @@ nfsd4_create_session(struct svc_rqst *rqstp,
 
        /* cache solo and embedded create sessions under the state lock */
        nfsd4_cache_create_session(cr_ses, cs_slot, status);
-out:
        nfs4_unlock_state();
+out:
        dprintk("%s returns %d\n", __func__, ntohl(status));
        return status;
 out_free_conn:
+       nfs4_unlock_state();
        free_conn(conn);
 out_free_session:
        __free_session(new);
@@ -2443,9 +2469,8 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
 
 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
        struct nfs4_openowner *oo = open->op_openowner;
-       struct nfs4_client *clp = oo->oo_owner.so_client;
 
-       init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
+       stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_lockowners);
        list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
        list_add(&stp->st_perfile, &fp->fi_stateids);
@@ -4031,7 +4056,7 @@ alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct
        stp = nfs4_alloc_stateid(clp);
        if (stp == NULL)
                return NULL;
-       init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
+       stp->st_stid.sc_type = NFS4_LOCK_STID;
        list_add(&stp->st_perfile, &fp->fi_stateids);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
        stp->st_stateowner = &lo->lo_owner;
@@ -4913,16 +4938,6 @@ nfs4_state_start_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        int ret;
 
-       /*
-        * FIXME: For now, we hang most of the pernet global stuff off of
-        * init_net until nfsd is fully containerized. Eventually, we'll
-        * need to pass a net pointer into this function, take a reference
-        * to that instead and then do most of the rest of this on a per-net
-        * basis.
-        */
-       if (net != &init_net)
-               return -EINVAL;
-
        ret = nfs4_state_create_net(net);
        if (ret)
                return ret;
index 2d1d06b..0116886 100644 (file)
@@ -2015,7 +2015,7 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
                if (path.dentry != path.mnt->mnt_root)
                        break;
        }
-       err = vfs_getattr(path.mnt, path.dentry, stat);
+       err = vfs_getattr(&path, stat);
        path_put(&path);
        return err;
 }
@@ -2024,12 +2024,11 @@ static int get_parent_attributes(struct svc_export *exp, struct kstat *stat)
  * Note: @fhp can be NULL; in this case, we might have to compose the filehandle
  * ourselves.
  *
- * @countp is the buffer size in _words_; upon successful return this becomes
- * replaced with the number of words written.
+ * countp is the buffer size in _words_
  */
 __be32
 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
-               struct dentry *dentry, __be32 *buffer, int *countp, u32 *bmval,
+               struct dentry *dentry, __be32 **buffer, int count, u32 *bmval,
                struct svc_rqst *rqstp, int ignore_crossmnt)
 {
        u32 bmval0 = bmval[0];
@@ -2038,12 +2037,12 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
        struct kstat stat;
        struct svc_fh tempfh;
        struct kstatfs statfs;
-       int buflen = *countp << 2;
+       int buflen = count << 2;
        __be32 *attrlenp;
        u32 dummy;
        u64 dummy64;
        u32 rdattr_err = 0;
-       __be32 *p = buffer;
+       __be32 *p = *buffer;
        __be32 status;
        int err;
        int aclsupport = 0;
@@ -2068,7 +2067,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
                        goto out;
        }
 
-       err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
+       err = vfs_getattr(&path, &stat);
        if (err)
                goto out_nfserr;
        if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
@@ -2447,7 +2446,7 @@ out_acl:
        }
 
        *attrlenp = htonl((char *)p - (char *)attrlenp - 4);
-       *countp = p - buffer;
+       *buffer = p;
        status = nfs_ok;
 
 out:
@@ -2459,7 +2458,6 @@ out_nfserr:
        status = nfserrno(err);
        goto out;
 out_resource:
-       *countp = 0;
        status = nfserr_resource;
        goto out;
 out_serverfault:
@@ -2478,7 +2476,7 @@ static inline int attributes_need_mount(u32 *bmval)
 
 static __be32
 nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
-               const char *name, int namlen, __be32 *p, int *buflen)
+               const char *name, int namlen, __be32 **p, int buflen)
 {
        struct svc_export *exp = cd->rd_fhp->fh_export;
        struct dentry *dentry;
@@ -2584,10 +2582,9 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        p = xdr_encode_hyper(p, NFS_OFFSET_MAX);    /* offset of next entry */
        p = xdr_encode_array(p, name, namlen);      /* name length & name */
 
-       nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, p, &buflen);
+       nfserr = nfsd4_encode_dirent_fattr(cd, name, namlen, &p, buflen);
        switch (nfserr) {
        case nfs_ok:
-               p += buflen;
                break;
        case nfserr_resource:
                nfserr = nfserr_toosmall;
@@ -2714,10 +2711,8 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
 
        buflen = resp->end - resp->p - (COMPOUND_ERR_SLACK_SPACE >> 2);
        nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry,
-                                   resp->p, &buflen, getattr->ga_bmval,
+                                   &resp->p, buflen, getattr->ga_bmval,
                                    resp->rqstp, 0);
-       if (!nfserr)
-               resp->p += buflen;
        return nfserr;
 }
 
index 2cbac34..62c1ee1 100644 (file)
@@ -9,22 +9,22 @@
  */
 
 #include <linux/slab.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/highmem.h>
+#include <net/checksum.h>
 
 #include "nfsd.h"
 #include "cache.h"
 
-/* Size of reply cache. Common values are:
- * 4.3BSD:     128
- * 4.4BSD:     256
- * Solaris2:   1024
- * DEC Unix:   512-4096
- */
-#define CACHESIZE              1024
+#define NFSDDBG_FACILITY       NFSDDBG_REPCACHE
+
 #define HASHSIZE               64
 
 static struct hlist_head *     cache_hash;
 static struct list_head        lru_head;
-static int                     cache_disabled = 1;
+static struct kmem_cache       *drc_slab;
+static unsigned int            num_drc_entries;
+static unsigned int            max_drc_entries;
 
 /*
  * Calculate the hash index from an XID.
@@ -37,6 +37,14 @@ static inline u32 request_hash(u32 xid)
 }
 
 static int     nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
+static void    cache_cleaner_func(struct work_struct *unused);
+static int     nfsd_reply_cache_shrink(struct shrinker *shrink,
+                                       struct shrink_control *sc);
+
+struct shrinker nfsd_reply_cache_shrinker = {
+       .shrink = nfsd_reply_cache_shrink,
+       .seeks  = 1,
+};
 
 /*
  * locking for the reply cache:
@@ -44,30 +52,86 @@ static int  nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
  * Otherwise, it when accessing _prev or _next, the lock must be held.
  */
 static DEFINE_SPINLOCK(cache_lock);
+static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
 
-int nfsd_reply_cache_init(void)
+/*
+ * Put a cap on the size of the DRC based on the amount of available
+ * low memory in the machine.
+ *
+ *  64MB:    8192
+ * 128MB:   11585
+ * 256MB:   16384
+ * 512MB:   23170
+ *   1GB:   32768
+ *   2GB:   46340
+ *   4GB:   65536
+ *   8GB:   92681
+ *  16GB:  131072
+ *
+ * ...with a hard cap of 256k entries. In the worst case, each entry will be
+ * ~1k, so the above numbers should give a rough max of the amount of memory
+ * used in k.
+ */
+static unsigned int
+nfsd_cache_size_limit(void)
+{
+       unsigned int limit;
+       unsigned long low_pages = totalram_pages - totalhigh_pages;
+
+       limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
+       return min_t(unsigned int, limit, 256*1024);
+}
+
+static struct svc_cacherep *
+nfsd_reply_cache_alloc(void)
 {
        struct svc_cacherep     *rp;
-       int                     i;
 
-       INIT_LIST_HEAD(&lru_head);
-       i = CACHESIZE;
-       while (i) {
-               rp = kmalloc(sizeof(*rp), GFP_KERNEL);
-               if (!rp)
-                       goto out_nomem;
-               list_add(&rp->c_lru, &lru_head);
+       rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
+       if (rp) {
                rp->c_state = RC_UNUSED;
                rp->c_type = RC_NOCACHE;
+               INIT_LIST_HEAD(&rp->c_lru);
                INIT_HLIST_NODE(&rp->c_hash);
-               i--;
        }
+       return rp;
+}
+
+static void
+nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
+{
+       if (rp->c_type == RC_REPLBUFF)
+               kfree(rp->c_replvec.iov_base);
+       hlist_del(&rp->c_hash);
+       list_del(&rp->c_lru);
+       --num_drc_entries;
+       kmem_cache_free(drc_slab, rp);
+}
+
+static void
+nfsd_reply_cache_free(struct svc_cacherep *rp)
+{
+       spin_lock(&cache_lock);
+       nfsd_reply_cache_free_locked(rp);
+       spin_unlock(&cache_lock);
+}
+
+int nfsd_reply_cache_init(void)
+{
+       register_shrinker(&nfsd_reply_cache_shrinker);
+       drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
+                                       0, 0, NULL);
+       if (!drc_slab)
+               goto out_nomem;
 
-       cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
+       cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
        if (!cache_hash)
                goto out_nomem;
 
-       cache_disabled = 0;
+       INIT_LIST_HEAD(&lru_head);
+       max_drc_entries = nfsd_cache_size_limit();
+       num_drc_entries = 0;
+
        return 0;
 out_nomem:
        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
@@ -79,27 +143,33 @@ void nfsd_reply_cache_shutdown(void)
 {
        struct svc_cacherep     *rp;
 
+       unregister_shrinker(&nfsd_reply_cache_shrinker);
+       cancel_delayed_work_sync(&cache_cleaner);
+
        while (!list_empty(&lru_head)) {
                rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
-               if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
-                       kfree(rp->c_replvec.iov_base);
-               list_del(&rp->c_lru);
-               kfree(rp);
+               nfsd_reply_cache_free_locked(rp);
        }
 
-       cache_disabled = 1;
-
        kfree (cache_hash);
        cache_hash = NULL;
+
+       if (drc_slab) {
+               kmem_cache_destroy(drc_slab);
+               drc_slab = NULL;
+       }
 }
 
 /*
- * Move cache entry to end of LRU list
+ * Move cache entry to end of LRU list, and queue the cleaner to run if it's
+ * not already scheduled.
  */
 static void
 lru_put_end(struct svc_cacherep *rp)
 {
+       rp->c_timestamp = jiffies;
        list_move_tail(&rp->c_lru, &lru_head);
+       schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
 }
 
 /*
@@ -112,83 +182,214 @@ hash_refile(struct svc_cacherep *rp)
        hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
 }
 
+static inline bool
+nfsd_cache_entry_expired(struct svc_cacherep *rp)
+{
+       return rp->c_state != RC_INPROG &&
+              time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
+}
+
+/*
+ * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+ * Also prune the oldest ones when the total exceeds the max number of entries.
+ */
+static void
+prune_cache_entries(void)
+{
+       struct svc_cacherep *rp, *tmp;
+
+       list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
+               if (!nfsd_cache_entry_expired(rp) &&
+                   num_drc_entries <= max_drc_entries)
+                       break;
+               nfsd_reply_cache_free_locked(rp);
+       }
+
+       /*
+        * Conditionally rearm the job. If we cleaned out the list, then
+        * cancel any pending run (since there won't be any work to do).
+        * Otherwise, we rearm the job or modify the existing one to run in
+        * RC_EXPIRE since we just ran the pruner.
+        */
+       if (list_empty(&lru_head))
+               cancel_delayed_work(&cache_cleaner);
+       else
+               mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
+}
+
+static void
+cache_cleaner_func(struct work_struct *unused)
+{
+       spin_lock(&cache_lock);
+       prune_cache_entries();
+       spin_unlock(&cache_lock);
+}
+
+static int
+nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+       unsigned int num;
+
+       spin_lock(&cache_lock);
+       if (sc->nr_to_scan)
+               prune_cache_entries();
+       num = num_drc_entries;
+       spin_unlock(&cache_lock);
+
+       return num;
+}
+
+/*
+ * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+ */
+static __wsum
+nfsd_cache_csum(struct svc_rqst *rqstp)
+{
+       int idx;
+       unsigned int base;
+       __wsum csum;
+       struct xdr_buf *buf = &rqstp->rq_arg;
+       const unsigned char *p = buf->head[0].iov_base;
+       size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
+                               RC_CSUMLEN);
+       size_t len = min(buf->head[0].iov_len, csum_len);
+
+       /* rq_arg.head first */
+       csum = csum_partial(p, len, 0);
+       csum_len -= len;
+
+       /* Continue into page array */
+       idx = buf->page_base / PAGE_SIZE;
+       base = buf->page_base & ~PAGE_MASK;
+       while (csum_len) {
+               p = page_address(buf->pages[idx]) + base;
+               len = min_t(size_t, PAGE_SIZE - base, csum_len);
+               csum = csum_partial(p, len, csum);
+               csum_len -= len;
+               base = 0;
+               ++idx;
+       }
+       return csum;
+}
+
+/*
+ * Search the request hash for an entry that matches the given rqstp.
+ * Must be called with cache_lock held. Returns the found entry or
+ * NULL on failure.
+ */
+static struct svc_cacherep *
+nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
+{
+       struct svc_cacherep     *rp;
+       struct hlist_head       *rh;
+       __be32                  xid = rqstp->rq_xid;
+       u32                     proto =  rqstp->rq_prot,
+                               vers = rqstp->rq_vers,
+                               proc = rqstp->rq_proc;
+
+       rh = &cache_hash[request_hash(xid)];
+       hlist_for_each_entry(rp, rh, c_hash) {
+               if (xid == rp->c_xid && proc == rp->c_proc &&
+                   proto == rp->c_prot && vers == rp->c_vers &&
+                   rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum &&
+                   rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
+                   rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
+                       return rp;
+       }
+       return NULL;
+}
+
 /*
  * Try to find an entry matching the current call in the cache. When none
- * is found, we grab the oldest unlocked entry off the LRU list.
- * Note that no operation within the loop may sleep.
+ * is found, we try to grab the oldest expired entry off the LRU list. If
+ * a suitable one isn't there, then drop the cache_lock and allocate a
+ * new one, then search again in case one got inserted while this thread
+ * didn't hold the lock.
  */
 int
 nfsd_cache_lookup(struct svc_rqst *rqstp)
 {
-       struct hlist_node       *hn;
-       struct hlist_head       *rh;
-       struct svc_cacherep     *rp;
+       struct svc_cacherep     *rp, *found;
        __be32                  xid = rqstp->rq_xid;
        u32                     proto =  rqstp->rq_prot,
                                vers = rqstp->rq_vers,
                                proc = rqstp->rq_proc;
+       __wsum                  csum;
        unsigned long           age;
        int type = rqstp->rq_cachetype;
        int rtn;
 
        rqstp->rq_cacherep = NULL;
-       if (cache_disabled || type == RC_NOCACHE) {
+       if (type == RC_NOCACHE) {
                nfsdstats.rcnocache++;
                return RC_DOIT;
        }
 
+       csum = nfsd_cache_csum(rqstp);
+
        spin_lock(&cache_lock);
        rtn = RC_DOIT;
 
-       rh = &cache_hash[request_hash(xid)];
-       hlist_for_each_entry(rp, hn, rh, c_hash) {
-               if (rp->c_state != RC_UNUSED &&
-                   xid == rp->c_xid && proc == rp->c_proc &&
-                   proto == rp->c_prot && vers == rp->c_vers &&
-                   time_before(jiffies, rp->c_timestamp + 120*HZ) &&
-                   memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
-                       nfsdstats.rchits++;
-                       goto found_entry;
+       rp = nfsd_cache_search(rqstp, csum);
+       if (rp)
+               goto found_entry;
+
+       /* Try to use the first entry on the LRU */
+       if (!list_empty(&lru_head)) {
+               rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
+               if (nfsd_cache_entry_expired(rp) ||
+                   num_drc_entries >= max_drc_entries) {
+                       lru_put_end(rp);
+                       prune_cache_entries();
+                       goto setup_entry;
                }
        }
-       nfsdstats.rcmisses++;
 
-       /* This loop shouldn't take more than a few iterations normally */
-       {
-       int     safe = 0;
-       list_for_each_entry(rp, &lru_head, c_lru) {
-               if (rp->c_state != RC_INPROG)
-                       break;
-               if (safe++ > CACHESIZE) {
-                       printk("nfsd: loop in repcache LRU list\n");
-                       cache_disabled = 1;
-                       goto out;
-               }
+       /* Drop the lock and allocate a new entry */
+       spin_unlock(&cache_lock);
+       rp = nfsd_reply_cache_alloc();
+       if (!rp) {
+               dprintk("nfsd: unable to allocate DRC entry!\n");
+               return RC_DOIT;
        }
+       spin_lock(&cache_lock);
+       ++num_drc_entries;
+
+       /*
+        * Must search again just in case someone inserted one
+        * after we dropped the lock above.
+        */
+       found = nfsd_cache_search(rqstp, csum);
+       if (found) {
+               nfsd_reply_cache_free_locked(rp);
+               rp = found;
+               goto found_entry;
        }
 
-       /* All entries on the LRU are in-progress. This should not happen */
-       if (&rp->c_lru == &lru_head) {
-               static int      complaints;
-
-               printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
-               if (++complaints > 5) {
-                       printk(KERN_WARNING "nfsd: disabling repcache.\n");
-                       cache_disabled = 1;
-               }
-               goto out;
-       }
+       /*
+        * We're keeping the one we just allocated. Are we now over the
+        * limit? Prune one off the tip of the LRU in trade for the one we
+        * just allocated if so.
+        */
+       if (num_drc_entries >= max_drc_entries)
+               nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
+                                               struct svc_cacherep, c_lru));
 
+setup_entry:
+       nfsdstats.rcmisses++;
        rqstp->rq_cacherep = rp;
        rp->c_state = RC_INPROG;
        rp->c_xid = xid;
        rp->c_proc = proc;
-       memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
+       rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
+       rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
        rp->c_prot = proto;
        rp->c_vers = vers;
-       rp->c_timestamp = jiffies;
+       rp->c_len = rqstp->rq_arg.len;
+       rp->c_csum = csum;
 
        hash_refile(rp);
+       lru_put_end(rp);
 
        /* release any buffer */
        if (rp->c_type == RC_REPLBUFF) {
@@ -201,9 +402,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
        return rtn;
 
 found_entry:
+       nfsdstats.rchits++;
        /* We found a matching entry which is either in progress or done. */
        age = jiffies - rp->c_timestamp;
-       rp->c_timestamp = jiffies;
        lru_put_end(rp);
 
        rtn = RC_DROPIT;
@@ -232,7 +433,7 @@ found_entry:
                break;
        default:
                printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
-               rp->c_state = RC_UNUSED;
+               nfsd_reply_cache_free_locked(rp);
        }
 
        goto out;
@@ -257,11 +458,11 @@ found_entry:
 void
 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 {
-       struct svc_cacherep *rp;
+       struct svc_cacherep *rp = rqstp->rq_cacherep;
        struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
        int             len;
 
-       if (!(rp = rqstp->rq_cacherep) || cache_disabled)
+       if (!rp)
                return;
 
        len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
@@ -269,7 +470,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
 
        /* Don't cache excessive amounts of data and XDR failures */
        if (!statp || len > (256 >> 2)) {
-               rp->c_state = RC_UNUSED;
+               nfsd_reply_cache_free(rp);
                return;
        }
 
@@ -283,21 +484,21 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
                cachv = &rp->c_replvec;
                cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
                if (!cachv->iov_base) {
-                       spin_lock(&cache_lock);
-                       rp->c_state = RC_UNUSED;
-                       spin_unlock(&cache_lock);
+                       nfsd_reply_cache_free(rp);
                        return;
                }
                cachv->iov_len = len << 2;
                memcpy(cachv->iov_base, statp, len << 2);
                break;
+       case RC_NOCACHE:
+               nfsd_reply_cache_free(rp);
+               return;
        }
        spin_lock(&cache_lock);
        lru_put_end(rp);
        rp->c_secure = rqstp->rq_secure;
        rp->c_type = cachetype;
        rp->c_state = RC_DONE;
-       rp->c_timestamp = jiffies;
        spin_unlock(&cache_lock);
        return;
 }
index 7493428..13a21c8 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/sunrpc/svcsock.h>
 #include <linux/lockd/lockd.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/gss_api.h>
 #include <linux/sunrpc/gss_krb5_enctypes.h>
 #include <linux/sunrpc/rpc_pipe_fs.h>
@@ -85,7 +85,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
 
 static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
 {
-       ino_t ino =  file->f_path.dentry->d_inode->i_ino;
+       ino_t ino =  file_inode(file)->i_ino;
        char *data;
        ssize_t rv;
 
@@ -125,11 +125,11 @@ static const struct file_operations transaction_ops = {
        .llseek         = default_llseek,
 };
 
-static int exports_open(struct inode *inode, struct file *file)
+static int exports_net_open(struct net *net, struct file *file)
 {
        int err;
        struct seq_file *seq;
-       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        err = seq_open(file, &nfs_exports_op);
        if (err)
@@ -140,8 +140,26 @@ static int exports_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations exports_operations = {
-       .open           = exports_open,
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+       return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct file_operations exports_proc_operations = {
+       .open           = exports_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+       .owner          = THIS_MODULE,
+};
+
+static int exports_nfsd_open(struct inode *inode, struct file *file)
+{
+       return exports_net_open(inode->i_sb->s_fs_info, file);
+}
+
+static const struct file_operations exports_nfsd_operations = {
+       .open           = exports_nfsd_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
@@ -220,6 +238,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
        struct sockaddr *sap = (struct sockaddr *)&address;
        size_t salen = sizeof(address);
        char *fo_path;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
 
        /* sanity check */
        if (size == 0)
@@ -232,7 +251,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
        if (qword_get(&buf, fo_path, size) < 0)
                return -EINVAL;
 
-       if (rpc_pton(&init_net, fo_path, size, sap, salen) == 0)
+       if (rpc_pton(net, fo_path, size, sap, salen) == 0)
                return -EINVAL;
 
        return nlmsvc_unlock_all_by_ip(sap);
@@ -317,6 +336,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
        int len;
        struct auth_domain *dom;
        struct knfsd_fh fh;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
 
        if (size == 0)
                return -EINVAL;
@@ -352,7 +372,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
        if (!dom)
                return -ENOMEM;
 
-       len = exp_rootfh(&init_net, dom, path, &fh,  maxsize);
+       len = exp_rootfh(net, dom, path, &fh,  maxsize);
        auth_domain_put(dom);
        if (len)
                return len;
@@ -396,7 +416,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
 {
        char *mesg = buf;
        int rv;
-       struct net *net = &init_net;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
 
        if (size > 0) {
                int newthreads;
@@ -447,7 +467,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
        int len;
        int npools;
        int *nthreads;
-       struct net *net = &init_net;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
 
        mutex_lock(&nfsd_mutex);
        npools = nfsd_nrpools(net);
@@ -510,7 +530,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
        unsigned minor;
        ssize_t tlen = 0;
        char *sep;
-       struct net *net = &init_net;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        if (size>0) {
@@ -534,7 +554,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
                        else
                                num = simple_strtol(vers, &minorp, 0);
                        if (*minorp == '.') {
-                               if (num < 4)
+                               if (num != 4)
                                        return -EINVAL;
                                minor = simple_strtoul(minorp+1, NULL, 0);
                                if (minor == 0)
@@ -792,7 +812,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
 static ssize_t write_ports(struct file *file, char *buf, size_t size)
 {
        ssize_t rv;
-       struct net *net = &init_net;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
 
        mutex_lock(&nfsd_mutex);
        rv = __write_ports(file, buf, size, net);
@@ -827,7 +847,7 @@ int nfsd_max_blksize;
 static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
 {
        char *mesg = buf;
-       struct net *net = &init_net;
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        if (size > 0) {
@@ -923,7 +943,8 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
  */
 static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
 {
-       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
 }
 
@@ -939,7 +960,8 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
  */
 static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
 {
-       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn);
 }
 
@@ -995,7 +1017,8 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
 static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
 {
        ssize_t rv;
-       struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
+       struct net *net = file->f_dentry->d_sb->s_fs_info;
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        mutex_lock(&nfsd_mutex);
        rv = __write_recoverydir(file, buf, size, nn);
@@ -1013,7 +1036,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
 static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
 {
        static struct tree_descr nfsd_files[] = {
-               [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+               [NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO},
                [NFSD_Export_features] = {"export_features",
                                        &export_features_operations, S_IRUGO},
                [NFSD_FO_UnlockIP] = {"unlock_ip",
@@ -1037,20 +1060,35 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
 #endif
                /* last one */ {""}
        };
-       return simple_fill_super(sb, 0x6e667364, nfsd_files);
+       struct net *net = data;
+       int ret;
+
+       ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
+       if (ret)
+               return ret;
+       sb->s_fs_info = get_net(net);
+       return 0;
 }
 
 static struct dentry *nfsd_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
-       return mount_single(fs_type, flags, data, nfsd_fill_super);
+       return mount_ns(fs_type, flags, current->nsproxy->net_ns, nfsd_fill_super);
+}
+
+static void nfsd_umount(struct super_block *sb)
+{
+       struct net *net = sb->s_fs_info;
+
+       kill_litter_super(sb);
+       put_net(net);
 }
 
 static struct file_system_type nfsd_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "nfsd",
        .mount          = nfsd_mount,
-       .kill_sb        = kill_litter_super,
+       .kill_sb        = nfsd_umount,
 };
 
 #ifdef CONFIG_PROC_FS
@@ -1061,7 +1099,8 @@ static int create_proc_exports_entry(void)
        entry = proc_mkdir("fs/nfs", NULL);
        if (!entry)
                return -ENOMEM;
-       entry = proc_create("exports", 0, entry, &exports_operations);
+       entry = proc_create("exports", 0, entry,
+                                &exports_proc_operations);
        if (!entry)
                return -ENOMEM;
        return 0;
index aad6d45..54c6b3d 100644 (file)
@@ -26,17 +26,13 @@ static __be32
 nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp)
 {
        if (err) return err;
-       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
-                                   resp->fh.fh_dentry,
-                                   &resp->stat));
+       return fh_getattr(&resp->fh, &resp->stat);
 }
 static __be32
 nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
 {
        if (err) return err;
-       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
-                                   resp->fh.fh_dentry,
-                                   &resp->stat));
+       return fh_getattr(&resp->fh, &resp->stat);
 }
 /*
  * Get a file's attributes
@@ -150,9 +146,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
                                  &resp->count);
 
        if (nfserr) return nfserr;
-       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
-                                   resp->fh.fh_dentry,
-                                   &resp->stat));
+       return fh_getattr(&resp->fh, &resp->stat);
 }
 
 /*
index be7af50..262df5c 100644 (file)
@@ -652,7 +652,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 
        /* Check whether we have this call in the cache. */
        switch (nfsd_cache_lookup(rqstp)) {
-       case RC_INTR:
        case RC_DROPIT:
                return 0;
        case RC_REPLY:
@@ -703,8 +702,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
 int nfsd_pool_stats_open(struct inode *inode, struct file *file)
 {
        int ret;
-       struct net *net = &init_net;
-       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
 
        mutex_lock(&nfsd_mutex);
        if (nn->nfsd_serv == NULL) {
@@ -721,7 +719,7 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
 int nfsd_pool_stats_release(struct inode *inode, struct file *file)
 {
        int ret = seq_release(inode, file);
-       struct net *net = &init_net;
+       struct net *net = inode->i_sb->s_fs_info;
 
        mutex_lock(&nfsd_mutex);
        /* this function really, really should have been called svc_put() */
index 4201ede..9c769a4 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  */
 
+#include "vfs.h"
 #include "xdr.h"
 #include "auth.h"
 
@@ -196,11 +197,9 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
 }
 
 /* Helper function for NFSv2 ACL code */
-__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
+__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat)
 {
-       struct kstat stat;
-       vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat);
-       return encode_fattr(rqstp, p, fhp, &stat);
+       return encode_fattr(rqstp, p, fhp, stat);
 }
 
 /*
index 31ff1d6..2a7eb53 100644 (file)
@@ -979,7 +979,7 @@ static void kill_suid(struct dentry *dentry)
  */
 static int wait_for_concurrent_writes(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        static ino_t last_ino;
        static dev_t last_dev;
        int err = 0;
@@ -1070,7 +1070,7 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (err)
                return err;
 
-       inode = file->f_path.dentry->d_inode;
+       inode = file_inode(file);
 
        /* Get readahead parameters */
        ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino);
@@ -1957,7 +1957,7 @@ static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
        offset = *offsetp;
 
        while (1) {
-               struct inode *dir_inode = file->f_path.dentry->d_inode;
+               struct inode *dir_inode = file_inode(file);
                unsigned int reclen;
 
                cdp->err = nfserr_eof; /* will be cleared on successful read */
index 359594c..5b58941 100644 (file)
@@ -6,6 +6,7 @@
 #define LINUX_NFSD_VFS_H
 
 #include "nfsfh.h"
+#include "nfsd.h"
 
 /*
  * Flags for nfsd_permission
@@ -125,4 +126,11 @@ static inline void fh_drop_write(struct svc_fh *fh)
        }
 }
 
+static inline __be32 fh_getattr(struct svc_fh *fh, struct kstat *stat)
+{
+       struct path p = {.mnt = fh->fh_export->ex_path.mnt,
+                        .dentry = fh->fh_dentry};
+       return nfserrno(vfs_getattr(&p, stat));
+}
+
 #endif /* LINUX_NFSD_VFS_H */
index 53b1863..4f0481d 100644 (file)
@@ -167,7 +167,7 @@ int nfssvc_encode_entry(void *, const char *name,
 int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
 
 /* Helper functions for NFSv2 ACL code */
-__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp);
+__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
 __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
 
 #endif /* LINUX_NFSD_H */
index 7df980e..b6d5542 100644 (file)
@@ -136,6 +136,7 @@ struct nfsd3_accessres {
        __be32                  status;
        struct svc_fh           fh;
        __u32                   access;
+       struct kstat            stat;
 };
 
 struct nfsd3_readlinkres {
@@ -225,6 +226,7 @@ struct nfsd3_getaclres {
        int                     mask;
        struct posix_acl        *acl_access;
        struct posix_acl        *acl_default;
+       struct kstat            stat;
 };
 
 /* dummy type for release */
index 0889bfb..546f898 100644 (file)
@@ -563,7 +563,7 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
 void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
 void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
 __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
-                      struct dentry *dentry, __be32 *buffer, int *countp,
+                      struct dentry *dentry, __be32 **buffer, int countp,
                       u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
 extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
                struct nfsd4_compound_state *,
index df1a7fb..f30b017 100644 (file)
@@ -259,7 +259,7 @@ static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
 static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        loff_t pos = filp->f_pos;
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
index bec4af6..08fdb77 100644 (file)
@@ -67,7 +67,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct nilfs_transaction_info ti;
        int ret = 0;
 
index f385935..b44bdb2 100644 (file)
@@ -796,7 +796,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
 
 long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        void __user *argp = (void __user *)arg;
 
        switch (cmd) {
index 1d0c0b8..9de78f0 100644 (file)
@@ -517,11 +517,11 @@ static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
 
        if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
                *lenp = NILFS_FID_SIZE_CONNECTABLE;
-               return 255;
+               return FILEID_INVALID;
        }
        if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
                *lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
-               return 255;
+               return FILEID_INVALID;
        }
 
        fid->cno = root->cno;
index 08b886f..2bfe6dc 100644 (file)
@@ -174,7 +174,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
        struct dnotify_struct **prev;
        struct inode *inode;
 
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
        if (!S_ISDIR(inode->i_mode))
                return;
 
@@ -296,7 +296,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
        }
 
        /* dnotify only works on directories */
-       inode = filp->f_path.dentry->d_inode;
+       inode = file_inode(filp);
        if (!S_ISDIR(inode->i_mode)) {
                error = -ENOTDIR;
                goto out_err;
index 9ff4a5e..5d84442 100644 (file)
@@ -466,7 +466,7 @@ static int fanotify_find_path(int dfd, const char __user *filename,
 
                ret = -ENOTDIR;
                if ((flags & FAN_MARK_ONLYDIR) &&
-                   !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) {
+                   !(S_ISDIR(file_inode(f.file)->i_mode))) {
                        fdput(f);
                        goto out;
                }
index 6baadb5..4bb21d6 100644 (file)
@@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
 void __fsnotify_update_child_dentry_flags(struct inode *inode)
 {
        struct dentry *alias;
-       struct hlist_node *p;
        int watched;
 
        if (!S_ISDIR(inode->i_mode))
@@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
        spin_lock(&inode->i_lock);
        /* run all of the dentries associated with this inode.  Since this is a
         * directory, there damn well better only be one item on this list */
-       hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
                struct dentry *child;
 
                /* run all of the children of the original inode and fix their
index f31e90f..74825be 100644 (file)
 static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
 {
        struct fsnotify_mark *mark;
-       struct hlist_node *pos;
        __u32 new_mask = 0;
 
        assert_spin_locked(&inode->i_lock);
 
-       hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
+       hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)
                new_mask |= mark->mask;
        inode->i_fsnotify_mask = new_mask;
 }
@@ -87,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
 void fsnotify_clear_marks_by_inode(struct inode *inode)
 {
        struct fsnotify_mark *mark, *lmark;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        LIST_HEAD(free_list);
 
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
+       hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {
                list_add(&mark->i.free_i_list, &free_list);
                hlist_del_init_rcu(&mark->i.i_list);
                fsnotify_get_mark(mark);
@@ -129,11 +128,10 @@ static struct fsnotify_mark *fsnotify_find_inode_mark_locked(
                struct inode *inode)
 {
        struct fsnotify_mark *mark;
-       struct hlist_node *pos;
 
        assert_spin_locked(&inode->i_lock);
 
-       hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
+       hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {
                if (mark->group == group) {
                        fsnotify_get_mark(mark);
                        return mark;
@@ -194,8 +192,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
                            struct fsnotify_group *group, struct inode *inode,
                            int allow_dups)
 {
-       struct fsnotify_mark *lmark;
-       struct hlist_node *node, *last = NULL;
+       struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
@@ -214,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
        }
 
        /* should mark be in the middle of the current list? */
-       hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
-               last = node;
+       hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) {
+               last = lmark;
 
                if ((lmark->group == group) && !allow_dups) {
                        ret = -EEXIST;
@@ -235,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
 
        BUG_ON(last == NULL);
        /* mark should be the last entry.  last is the current last entry */
-       hlist_add_after_rcu(last, &mark->i.i_list);
+       hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);
 out:
        fsnotify_recalc_inode_mask_locked(inode);
        spin_unlock(&inode->i_lock);
index 871569c..4216308 100644 (file)
@@ -197,7 +197,6 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
 {
        /* ideally the idr is empty and we won't hit the BUG in the callback */
        idr_for_each(&group->inotify_data.idr, idr_callback, group);
-       idr_remove_all(&group->inotify_data.idr);
        idr_destroy(&group->inotify_data.idr);
        atomic_dec(&group->inotify_data.user->inotify_devs);
        free_uid(group->inotify_data.user);
index 07f7a92..e0f7c12 100644 (file)
@@ -364,22 +364,20 @@ static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
 {
        int ret;
 
-       do {
-               if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
-                       return -ENOMEM;
+       idr_preload(GFP_KERNEL);
+       spin_lock(idr_lock);
 
-               spin_lock(idr_lock);
-               ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
-                                       &i_mark->wd);
+       ret = idr_alloc(idr, i_mark, *last_wd + 1, 0, GFP_NOWAIT);
+       if (ret >= 0) {
                /* we added the mark to the idr, take a reference */
-               if (!ret) {
-                       *last_wd = i_mark->wd;
-                       fsnotify_get_mark(&i_mark->fsn_mark);
-               }
-               spin_unlock(idr_lock);
-       } while (ret == -EAGAIN);
+               i_mark->wd = ret;
+               *last_wd = i_mark->wd;
+               fsnotify_get_mark(&i_mark->fsn_mark);
+       }
 
-       return ret;
+       spin_unlock(idr_lock);
+       idr_preload_end();
+       return ret < 0 ? ret : 0;
 }
 
 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
index 4df58b8..68ca5a8 100644 (file)
 void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
 {
        struct fsnotify_mark *mark, *lmark;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        struct mount *m = real_mount(mnt);
        LIST_HEAD(free_list);
 
        spin_lock(&mnt->mnt_root->d_lock);
-       hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
+       hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
                list_add(&mark->m.free_m_list, &free_list);
                hlist_del_init_rcu(&mark->m.m_list);
                fsnotify_get_mark(mark);
@@ -71,12 +71,11 @@ static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
 {
        struct mount *m = real_mount(mnt);
        struct fsnotify_mark *mark;
-       struct hlist_node *pos;
        __u32 new_mask = 0;
 
        assert_spin_locked(&mnt->mnt_root->d_lock);
 
-       hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
+       hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
                new_mask |= mark->mask;
        m->mnt_fsnotify_mask = new_mask;
 }
@@ -114,11 +113,10 @@ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_
 {
        struct mount *m = real_mount(mnt);
        struct fsnotify_mark *mark;
-       struct hlist_node *pos;
 
        assert_spin_locked(&mnt->mnt_root->d_lock);
 
-       hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
+       hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
                if (mark->group == group) {
                        fsnotify_get_mark(mark);
                        return mark;
@@ -153,8 +151,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
                               int allow_dups)
 {
        struct mount *m = real_mount(mnt);
-       struct fsnotify_mark *lmark;
-       struct hlist_node *node, *last = NULL;
+       struct fsnotify_mark *lmark, *last = NULL;
        int ret = 0;
 
        mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
@@ -173,8 +170,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
        }
 
        /* should mark be in the middle of the current list? */
-       hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) {
-               last = node;
+       hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
+               last = lmark;
 
                if ((lmark->group == group) && !allow_dups) {
                        ret = -EEXIST;
@@ -194,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
 
        BUG_ON(last == NULL);
        /* mark should be the last entry.  last is the current last entry */
-       hlist_add_after_rcu(last, &mark->m.m_list);
+       hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
 out:
        fsnotify_recalc_vfsmount_mask_locked(mnt);
        spin_unlock(&mnt->mnt_root->d_lock);
index 99e3610..aa411c3 100644 (file)
@@ -1101,7 +1101,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
        loff_t fpos, i_size;
-       struct inode *bmp_vi, *vdir = filp->f_path.dentry->d_inode;
+       struct inode *bmp_vi, *vdir = file_inode(filp);
        struct super_block *sb = vdir->i_sb;
        ntfs_inode *ndir = NTFS_I(vdir);
        ntfs_volume *vol = NTFS_SB(sb);
index 9796330..20dfec7 100644 (file)
@@ -569,7 +569,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
                             int ret,
                             bool is_async)
 {
-       struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        int level;
        wait_queue_head_t *wq = ocfs2_ioend_wq(inode);
 
@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        level = ocfs2_iocb_rw_locked_level(iocb);
        ocfs2_rw_unlock(inode, level);
 
+       inode_dio_done(inode);
        if (is_async)
                aio_complete(iocb, ret, 0);
-       inode_dio_done(inode);
 }
 
 /*
@@ -626,7 +626,7 @@ static ssize_t ocfs2_direct_IO(int rw,
                               unsigned long nr_segs)
 {
        struct file *file = iocb->ki_filp;
-       struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+       struct inode *inode = file_inode(file)->i_mapping->host;
 
        /*
         * Fallback to buffered I/O if we see an inode without
index 0d2bf56..aa88bd8 100644 (file)
@@ -304,28 +304,22 @@ static u8 o2net_num_from_nn(struct o2net_node *nn)
 
 static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
 {
-       int ret = 0;
-
-       do {
-               if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
-                       ret = -EAGAIN;
-                       break;
-               }
-               spin_lock(&nn->nn_lock);
-               ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
-               if (ret == 0)
-                       list_add_tail(&nsw->ns_node_item,
-                                     &nn->nn_status_list);
-               spin_unlock(&nn->nn_lock);
-       } while (ret == -EAGAIN);
+       int ret;
 
-       if (ret == 0)  {
-               init_waitqueue_head(&nsw->ns_wq);
-               nsw->ns_sys_status = O2NET_ERR_NONE;
-               nsw->ns_status = 0;
+       spin_lock(&nn->nn_lock);
+       ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
+       if (ret >= 0) {
+               nsw->ns_id = ret;
+               list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
        }
+       spin_unlock(&nn->nn_lock);
+       if (ret < 0)
+               return ret;
 
-       return ret;
+       init_waitqueue_head(&nsw->ns_wq);
+       nsw->ns_sys_status = O2NET_ERR_NONE;
+       nsw->ns_status = 0;
+       return 0;
 }
 
 static void o2net_complete_nsw_locked(struct o2net_node *nn,
index 8db4b58..ef99972 100644 (file)
@@ -169,11 +169,10 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
                                      u64 parent_blkno,
                                      int skip_unhashed)
 {
-       struct hlist_node *p;
        struct dentry *dentry;
 
        spin_lock(&inode->i_lock);
-       hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+       hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
                spin_lock(&dentry->d_lock);
                if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
                        trace_ocfs2_find_local_alias(dentry->d_name.len,
index 8fe4e28..f1e1aed 100644 (file)
@@ -67,7 +67,6 @@
 #define NAMEI_RA_CHUNKS  2
 #define NAMEI_RA_BLOCKS  4
 #define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
 
 static unsigned char ocfs2_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -2015,12 +2014,12 @@ int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv,
 int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
        int error = 0;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int lock_level = 0;
 
        trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-       error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+       error = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
        if (lock_level && error >= 0) {
                /* We release EX lock which used to update atime
                 * and get PR lock again to reduce contention
index 005261c..33ecbe0 100644 (file)
@@ -2020,7 +2020,7 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
                               int ignore_higher, u8 request_from, u32 flags)
 {
        struct dlm_work_item *item;
-       item = kzalloc(sizeof(*item), GFP_NOFS);
+       item = kzalloc(sizeof(*item), GFP_ATOMIC);
        if (!item)
                return -ENOMEM;
 
index 01ebfd0..eeac97b 100644 (file)
@@ -2083,7 +2083,6 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
                                              u8 dead_node, u8 new_master)
 {
        int i;
-       struct hlist_node *hash_iter;
        struct hlist_head *bucket;
        struct dlm_lock_resource *res, *next;
 
@@ -2114,7 +2113,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
         * if necessary */
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
                bucket = dlm_lockres_hash(dlm, i);
-               hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
+               hlist_for_each_entry(res, bucket, hash_node) {
                        if (!(res->state & DLM_LOCK_RES_RECOVERING))
                                continue;
 
@@ -2273,7 +2272,6 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
 
 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
 {
-       struct hlist_node *iter;
        struct dlm_lock_resource *res;
        int i;
        struct hlist_head *bucket;
@@ -2299,7 +2297,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
         */
        for (i = 0; i < DLM_HASH_BUCKETS; i++) {
                bucket = dlm_lockres_hash(dlm, i);
-               hlist_for_each_entry(res, iter, bucket, hash_node) {
+               hlist_for_each_entry(res, bucket, hash_node) {
                        /* always prune any $RECOVERY entries for dead nodes,
                         * otherwise hangs can occur during later recovery */
                        if (dlm_is_recovery_lock(res->lockname.name,
index 16b712d..4c5fc8d 100644 (file)
@@ -224,7 +224,7 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
 static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
 {
        int event = 0;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct dlmfs_inode_private *ip = DLMFS_I(inode);
 
        poll_wait(file, &ip->ip_lockres.l_event, wait);
@@ -245,7 +245,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
        int bytes_left;
        ssize_t readlen, got;
        char *lvb_buf;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
                inode->i_ino, count, *ppos);
@@ -293,7 +293,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
        int bytes_left;
        ssize_t writelen;
        char *lvb_buf;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
                inode->i_ino, count, *ppos);
index 322216a..2965116 100644 (file)
@@ -195,11 +195,11 @@ static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
 
        if (parent && (len < 6)) {
                *max_len = 6;
-               type = 255;
+               type = FILEID_INVALID;
                goto bail;
        } else if (len < 3) {
                *max_len = 3;
-               type = 255;
+               type = FILEID_INVALID;
                goto bail;
        }
 
index 0a2924a..6474cb4 100644 (file)
@@ -1950,7 +1950,7 @@ out:
 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
                            struct ocfs2_space_resv *sr)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int ret;
 
@@ -1978,7 +1978,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
                            loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_space_resv sr;
        int change_size = 1;
@@ -2233,7 +2233,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
        loff_t old_size, *ppos = &iocb->ki_pos;
        u32 old_clusters;
        struct file *file = iocb->ki_filp;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int full_coherency = !(osb->s_mount_opt &
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
@@ -2517,7 +2517,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
                                      unsigned int flags)
 {
        int ret = 0, lock_level = 0;
-       struct inode *inode = in->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(in);
 
        trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2527,7 +2527,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
        /*
         * See the comment in ocfs2_file_aio_read()
         */
-       ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
+       ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
        if (ret < 0) {
                mlog_errno(ret);
                goto bail;
@@ -2547,7 +2547,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
 {
        int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
        struct file *filp = iocb->ki_filp;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2590,7 +2590,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         * like i_size. This allows the checks down below
         * generic_file_aio_read() a chance of actually working.
         */
-       ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+       ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
        if (ret < 0) {
                mlog_errno(ret);
                goto bail;
index f20edcb..752f0b2 100644 (file)
@@ -881,7 +881,7 @@ bail:
 
 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned int flags;
        int new_clusters;
        int status;
@@ -994,7 +994,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
        bool preserve;
        struct reflink_arguments args;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ocfs2_info info;
        void __user *argp = (void __user *)arg;
 
index 47a87dd..10d66c7 100644 (file)
@@ -62,7 +62,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh,
                                struct page *page)
 {
        int ret = VM_FAULT_NOPAGE;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct address_space *mapping = inode->i_mapping;
        loff_t pos = page_offset(page);
        unsigned int len = PAGE_CACHE_SIZE;
@@ -131,7 +131,7 @@ out:
 static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct buffer_head *di_bh = NULL;
        sigset_t oldset;
        int ret;
@@ -180,13 +180,13 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
 {
        int ret = 0, lock_level = 0;
 
-       ret = ocfs2_inode_lock_atime(file->f_dentry->d_inode,
-                                   file->f_vfsmnt, &lock_level);
+       ret = ocfs2_inode_lock_atime(file_inode(file),
+                                   file->f_path.mnt, &lock_level);
        if (ret < 0) {
                mlog_errno(ret);
                goto out;
        }
-       ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level);
+       ocfs2_inode_unlock(file_inode(file), lock_level);
 out:
        vma->vm_ops = &ocfs2_file_vm_ops;
        return 0;
index 6083432..9f8dcad 100644 (file)
@@ -1055,7 +1055,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
 {
        int status;
 
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ocfs2_move_extents range;
        struct ocfs2_move_extents_context *context = NULL;
 
index 934a4ac..998b17e 100644 (file)
@@ -2927,7 +2927,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                                     u32 new_cluster, u32 new_len)
 {
        int ret = 0, partial;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
        u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
@@ -3020,7 +3020,7 @@ int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
                                    u32 new_cluster, u32 new_len)
 {
        int ret = 0;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
index f169da4..b7e74b5 100644 (file)
@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
         * cluster groups will be staying in cache for the duration of
         * this operation.
         */
-       ac->ac_allow_chain_relink = 0;
+       ac->ac_disable_chain_relink = 1;
 
        /* Claim the first region */
        status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
         * Do this *after* figuring out how many bits we're taking out
         * of our target group.
         */
-       if (ac->ac_allow_chain_relink &&
+       if (!ac->ac_disable_chain_relink &&
            (prev_group_bh) &&
            (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
                status = ocfs2_relink_block_group(handle, alloc_inode,
@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        victim = ocfs2_find_victim_chain(cl);
        ac->ac_chain = victim;
-       ac->ac_allow_chain_relink = 1;
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
         * searching each chain in order. Don't allow chain relinking
         * because we only calculate enough journal credits for one
         * relink per alloc. */
-       ac->ac_allow_chain_relink = 0;
+       ac->ac_disable_chain_relink = 1;
        for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
                if (i == victim)
                        continue;
index b8afabf..a36d0aa 100644 (file)
@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
 
        /* these are used by the chain search */
        u16    ac_chain;
-       int    ac_allow_chain_relink;
+       int    ac_disable_chain_relink;
        group_search_t *ac_group_search;
 
        u64    ac_last_group;
index f1fbb4b..66edce7 100644 (file)
@@ -57,7 +57,7 @@
 static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
 {
        struct inode *inode = page->mapping->host;
-       struct buffer_head *bh;
+       struct buffer_head *bh = NULL;
        int status = ocfs2_read_inode_block(inode, &bh);
        struct ocfs2_dinode *fe;
        const char *link;
index 0ba9ea1..2e3ea30 100644 (file)
@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
        struct buffer_head *dir_bh = NULL;
 
        ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
-       if (!ret) {
+       if (ret) {
                mlog_errno(ret);
                goto leave;
        }
index fb5b3ff..acbaebc 100644 (file)
@@ -330,7 +330,7 @@ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
 static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
                u64 fsblock, int hindex)
 {
-       struct inode *dir = filp->f_dentry->d_inode;
+       struct inode *dir = file_inode(filp);
        struct buffer_head *bh;
        struct omfs_inode *oi;
        u64 self;
@@ -405,7 +405,7 @@ out:
 
 static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *dir = filp->f_dentry->d_inode;
+       struct inode *dir = file_inode(filp);
        struct buffer_head *bh;
        loff_t offset, res;
        unsigned int hchain, hindex;
index 9b33c0c..6835446 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -30,6 +30,7 @@
 #include <linux/fs_struct.h>
 #include <linux/ima.h>
 #include <linux/dnotify.h>
+#include <linux/compat.h>
 
 #include "internal.h"
 
@@ -140,6 +141,13 @@ SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
        return do_sys_truncate(path, length);
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length)
+{
+       return do_sys_truncate(path, length);
+}
+#endif
+
 static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
 {
        struct inode *inode;
@@ -195,6 +203,13 @@ SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
+{
+       return do_sys_ftruncate(fd, length, 1);
+}
+#endif
+
 /* LFS versions of truncate are only needed on 32 bit machines */
 #if BITS_PER_LONG == 32
 SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
@@ -228,7 +243,7 @@ SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
 
 int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        long ret;
 
        if (offset < 0 || len <= 0)
@@ -426,7 +441,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!f.file)
                goto out;
 
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
 
        error = -ENOTDIR;
        if (!S_ISDIR(inode->i_mode))
@@ -689,7 +704,7 @@ static int do_dentry_open(struct file *f,
                f->f_mode = FMODE_PATH;
 
        path_get(&f->f_path);
-       inode = f->f_path.dentry->d_inode;
+       inode = f->f_inode = f->f_path.dentry->d_inode;
        if (f->f_mode & FMODE_WRITE) {
                error = __get_file_write_access(inode, f->f_path.mnt);
                if (error)
@@ -699,7 +714,6 @@ static int do_dentry_open(struct file *f,
        }
 
        f->f_mapping = inode->i_mapping;
-       f->f_pos = 0;
        file_sb_list_add(f, inode->i_sb);
 
        if (unlikely(f->f_mode & FMODE_PATH)) {
@@ -753,6 +767,7 @@ cleanup_file:
        path_put(&f->f_path);
        f->f_path.mnt = NULL;
        f->f_path.dentry = NULL;
+       f->f_inode = NULL;
        return error;
 }
 
@@ -810,23 +825,22 @@ struct file *dentry_open(const struct path *path, int flags,
        /* We must always pass in a valid mount pointer. */
        BUG_ON(!path->mnt);
 
-       error = -ENFILE;
        f = get_empty_filp();
-       if (f == NULL)
-               return ERR_PTR(error);
-
-       f->f_flags = flags;
-       f->f_path = *path;
-       error = do_dentry_open(f, NULL, cred);
-       if (!error) {
-               error = open_check_o_direct(f);
-               if (error) {
-                       fput(f);
+       if (!IS_ERR(f)) {
+               f->f_flags = flags;
+               f->f_path = *path;
+               error = do_dentry_open(f, NULL, cred);
+               if (!error) {
+                       /* from now on we need fput() to dispose of f */
+                       error = open_check_o_direct(f);
+                       if (error) {
+                               fput(f);
+                               f = ERR_PTR(error);
+                       }
+               } else { 
+                       put_filp(f);
                        f = ERR_PTR(error);
                }
-       } else { 
-               put_filp(f);
-               f = ERR_PTR(error);
        }
        return f;
 }
index 2ad080f..ae47fa7 100644 (file)
@@ -262,7 +262,7 @@ found:
 
 static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct op_inode_info *oi = OP_I(inode);
        struct device_node *dp = oi->u.node;
        struct device_node *child;
index bd3479d..64a494c 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -361,7 +361,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
           unsigned long nr_segs, loff_t pos)
 {
        struct file *filp = iocb->ki_filp;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pipe_inode_info *pipe;
        int do_wakeup;
        ssize_t ret;
@@ -486,7 +486,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
            unsigned long nr_segs, loff_t ppos)
 {
        struct file *filp = iocb->ki_filp;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pipe_inode_info *pipe;
        ssize_t ret;
        int do_wakeup;
@@ -677,7 +677,7 @@ bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
 
 static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pipe_inode_info *pipe;
        int count, buf, nrbufs;
 
@@ -705,7 +705,7 @@ static unsigned int
 pipe_poll(struct file *filp, poll_table *wait)
 {
        unsigned int mask;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pipe_inode_info *pipe = inode->i_pipe;
        int nrbufs;
 
@@ -758,7 +758,7 @@ pipe_release(struct inode *inode, int decr, int decw)
 static int
 pipe_read_fasync(int fd, struct file *filp, int on)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int retval;
 
        mutex_lock(&inode->i_mutex);
@@ -772,7 +772,7 @@ pipe_read_fasync(int fd, struct file *filp, int on)
 static int
 pipe_write_fasync(int fd, struct file *filp, int on)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int retval;
 
        mutex_lock(&inode->i_mutex);
@@ -786,7 +786,7 @@ pipe_write_fasync(int fd, struct file *filp, int on)
 static int
 pipe_rdwr_fasync(int fd, struct file *filp, int on)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct pipe_inode_info *pipe = inode->i_pipe;
        int retval;
 
@@ -1037,13 +1037,13 @@ int create_pipe_files(struct file **res, int flags)
 
        err = -ENFILE;
        f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
-       if (!f)
+       if (IS_ERR(f))
                goto err_dentry;
 
        f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
 
        res[0] = alloc_file(&path, FMODE_READ, &read_pipefifo_fops);
-       if (!res[0])
+       if (IS_ERR(res[0]))
                goto err_file;
 
        path_get(&path);
@@ -1226,7 +1226,7 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
  */
 struct pipe_inode_info *get_pipe_info(struct file *file)
 {
-       struct inode *i = file->f_path.dentry->d_inode;
+       struct inode *i = file_inode(file);
 
        return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
 }
index 9b43ff7..69078c7 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
+#include <linux/printk.h>
 #include <linux/cgroup.h>
 #include <linux/cpuset.h>
 #include <linux/audit.h>
@@ -383,7 +384,7 @@ static int lstats_open(struct inode *inode, struct file *file)
 static ssize_t lstats_write(struct file *file, const char __user *buf,
                            size_t count, loff_t *offs)
 {
-       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
 
        if (!task)
                return -ESRCH;
@@ -602,7 +603,7 @@ static const struct inode_operations proc_def_inode_operations = {
 static ssize_t proc_info_read(struct file * file, char __user * buf,
                          size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        unsigned long page;
        ssize_t length;
        struct task_struct *task = get_proc_task(inode);
@@ -668,7 +669,7 @@ static const struct file_operations proc_single_file_operations = {
 
 static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 {
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        struct mm_struct *mm;
 
        if (!task)
@@ -869,7 +870,7 @@ static const struct file_operations proc_environ_operations = {
 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
                            loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        char buffer[PROC_NUMBUF];
        int oom_adj = OOM_ADJUST_MIN;
        size_t len;
@@ -916,7 +917,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       task = get_proc_task(file->f_path.dentry->d_inode);
+       task = get_proc_task(file_inode(file));
        if (!task) {
                err = -ESRCH;
                goto out;
@@ -952,7 +953,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
         * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
         * /proc/pid/oom_score_adj instead.
         */
-       printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+       pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
                  current->comm, task_pid_nr(current), task_pid_nr(task),
                  task_pid_nr(task));
 
@@ -976,7 +977,7 @@ static const struct file_operations proc_oom_adj_operations = {
 static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
                                        size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        char buffer[PROC_NUMBUF];
        short oom_score_adj = OOM_SCORE_ADJ_MIN;
        unsigned long flags;
@@ -1019,7 +1020,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       task = get_proc_task(file->f_path.dentry->d_inode);
+       task = get_proc_task(file_inode(file));
        if (!task) {
                err = -ESRCH;
                goto out;
@@ -1067,7 +1068,7 @@ static const struct file_operations proc_oom_score_adj_operations = {
 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        struct task_struct *task = get_proc_task(inode);
        ssize_t length;
        char tmpbuf[TMPBUFLEN];
@@ -1084,7 +1085,7 @@ static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
                                   size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        char *page, *tmp;
        ssize_t length;
        uid_t loginuid;
@@ -1142,7 +1143,7 @@ static const struct file_operations proc_loginuid_operations = {
 static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        struct task_struct *task = get_proc_task(inode);
        ssize_t length;
        char tmpbuf[TMPBUFLEN];
@@ -1165,7 +1166,7 @@ static const struct file_operations proc_sessionid_operations = {
 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
                                      size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        char buffer[PROC_NUMBUF];
        size_t len;
        int make_it_fail;
@@ -1197,7 +1198,7 @@ static ssize_t proc_fault_inject_write(struct file * file,
        make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
        if (*end)
                return -EINVAL;
-       task = get_proc_task(file->f_dentry->d_inode);
+       task = get_proc_task(file_inode(file));
        if (!task)
                return -ESRCH;
        task->make_it_fail = make_it_fail;
@@ -1237,7 +1238,7 @@ static ssize_t
 sched_write(struct file *file, const char __user *buf,
            size_t count, loff_t *offset)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct task_struct *p;
 
        p = get_proc_task(inode);
@@ -1288,7 +1289,7 @@ static ssize_t
 sched_autogroup_write(struct file *file, const char __user *buf,
            size_t count, loff_t *offset)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct task_struct *p;
        char buffer[PROC_NUMBUF];
        int nice;
@@ -1343,7 +1344,7 @@ static const struct file_operations proc_pid_sched_autogroup_operations = {
 static ssize_t comm_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *offset)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct task_struct *p;
        char buffer[TASK_COMM_LEN];
 
@@ -1711,7 +1712,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
                return -ECHILD;
 
        if (!capable(CAP_SYS_ADMIN)) {
-               status = -EACCES;
+               status = -EPERM;
                goto out_notask;
        }
 
@@ -1844,7 +1845,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
        struct dentry *result;
        struct mm_struct *mm;
 
-       result = ERR_PTR(-EACCES);
+       result = ERR_PTR(-EPERM);
        if (!capable(CAP_SYS_ADMIN))
                goto out;
 
@@ -1900,7 +1901,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
        ino_t ino;
        int ret;
 
-       ret = -EACCES;
+       ret = -EPERM;
        if (!capable(CAP_SYS_ADMIN))
                goto out;
 
@@ -2146,7 +2147,7 @@ out_no_task:
 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        char *p = NULL;
        ssize_t length;
        struct task_struct *task = get_proc_task(inode);
@@ -2167,7 +2168,7 @@ static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
                                   size_t count, loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        char *page;
        ssize_t length;
        struct task_struct *task = get_proc_task(inode);
@@ -2256,7 +2257,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
                                         size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        struct mm_struct *mm;
        char buffer[PROC_NUMBUF];
        size_t len;
@@ -2308,7 +2309,7 @@ static ssize_t proc_coredump_filter_write(struct file *file,
                goto out_no_task;
 
        ret = -ESRCH;
-       task = get_proc_task(file->f_dentry->d_inode);
+       task = get_proc_task(file_inode(file));
        if (!task)
                goto out_no_task;
 
@@ -2618,6 +2619,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
 
        name.name = buf;
        name.len = snprintf(buf, sizeof(buf), "%d", pid);
+       /* no ->d_hash() rejects on procfs */
        dentry = d_hash_and_lookup(mnt->mnt_root, &name);
        if (dentry) {
                shrink_dcache_parent(dentry);
index 76ddae8..4b3b3ff 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/printk.h>
 #include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/idr.h>
@@ -42,7 +43,7 @@ static ssize_t
 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
               loff_t *ppos)
 {
-       struct inode * inode = file->f_path.dentry->d_inode;
+       struct inode * inode = file_inode(file);
        char    *page;
        ssize_t retval=0;
        int     eof=0;
@@ -132,11 +133,8 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
                }
 
                if (start == NULL) {
-                       if (n > PAGE_SIZE) {
-                               printk(KERN_ERR
-                                      "proc_file_read: Apparent buffer overflow!\n");
+                       if (n > PAGE_SIZE)      /* Apparent buffer overflow */
                                n = PAGE_SIZE;
-                       }
                        n -= *ppos;
                        if (n <= 0)
                                break;
@@ -144,26 +142,19 @@ __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
                                n = count;
                        start = page + *ppos;
                } else if (start < page) {
-                       if (n > PAGE_SIZE) {
-                               printk(KERN_ERR
-                                      "proc_file_read: Apparent buffer overflow!\n");
+                       if (n > PAGE_SIZE)      /* Apparent buffer overflow */
                                n = PAGE_SIZE;
-                       }
                        if (n > count) {
                                /*
                                 * Don't reduce n because doing so might
                                 * cut off part of a data block.
                                 */
-                               printk(KERN_WARNING
-                                      "proc_file_read: Read count exceeded\n");
+                               pr_warn("proc_file_read: count exceeded\n");
                        }
                } else /* start >= page */ {
                        unsigned long startoff = (unsigned long)(start - page);
-                       if (n > (PAGE_SIZE - startoff)) {
-                               printk(KERN_ERR
-                                      "proc_file_read: Apparent buffer overflow!\n");
+                       if (n > (PAGE_SIZE - startoff)) /* buffer overflow? */
                                n = PAGE_SIZE - startoff;
-                       }
                        if (n > count)
                                n = count;
                }
@@ -188,7 +179,7 @@ static ssize_t
 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
               loff_t *ppos)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
 
        spin_lock(&pde->pde_unload_lock);
@@ -209,7 +200,7 @@ static ssize_t
 proc_file_write(struct file *file, const char __user *buffer,
                size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
 
        if (pde->write_proc) {
@@ -412,8 +403,7 @@ static const struct dentry_operations proc_dentry_operations =
 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
                struct dentry *dentry)
 {
-       struct inode *inode = NULL;
-       int error = -ENOENT;
+       struct inode *inode;
 
        spin_lock(&proc_subdir_lock);
        for (de = de->subdir; de ; de = de->next) {
@@ -422,22 +412,16 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
                if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
                        pde_get(de);
                        spin_unlock(&proc_subdir_lock);
-                       error = -ENOMEM;
                        inode = proc_get_inode(dir->i_sb, de);
-                       goto out_unlock;
+                       if (!inode)
+                               return ERR_PTR(-ENOMEM);
+                       d_set_d_op(dentry, &proc_dentry_operations);
+                       d_add(dentry, inode);
+                       return NULL;
                }
        }
        spin_unlock(&proc_subdir_lock);
-out_unlock:
-
-       if (inode) {
-               d_set_d_op(dentry, &proc_dentry_operations);
-               d_add(dentry, inode);
-               return NULL;
-       }
-       if (de)
-               pde_put(de);
-       return ERR_PTR(error);
+       return ERR_PTR(-ENOENT);
 }
 
 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
@@ -460,7 +444,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
 {
        unsigned int ino;
        int i;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int ret = 0;
 
        ino = inode->i_ino;
@@ -522,7 +506,7 @@ out:
 
 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        return proc_readdir_de(PDE(inode), filp, dirent, filldir);
 }
@@ -576,7 +560,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
 
        for (tmp = dir->subdir; tmp; tmp = tmp->next)
                if (strcmp(tmp->name, dp->name) == 0) {
-                       WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
+                       WARN(1, "proc_dir_entry '%s/%s' already registered\n",
                                dir->name, dp->name);
                        break;
                }
@@ -837,9 +821,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
        if (S_ISDIR(de->mode))
                parent->nlink--;
        de->nlink = 0;
-       WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
-                       "'%s/%s', leaking at least '%s'\n", __func__,
-                       de->parent->name, de->name, de->subdir->name);
+       WARN(de->subdir, "%s: removing non-empty directory "
+                        "'%s/%s', leaking at least '%s'\n", __func__,
+                        de->parent->name, de->name, de->subdir->name);
        pde_put(de);
 }
 EXPORT_SYMBOL(remove_proc_entry);
index 439ae68..a86aebc 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/stat.h>
 #include <linux/completion.h>
 #include <linux/poll.h>
+#include <linux/printk.h>
 #include <linux/file.h>
 #include <linux/limits.h>
 #include <linux/init.h>
@@ -144,7 +145,7 @@ void pde_users_dec(struct proc_dir_entry *pde)
 
 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        loff_t rv = -EINVAL;
        loff_t (*llseek)(struct file *, loff_t, int);
 
@@ -179,7 +180,7 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
 
 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
        ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
 
@@ -201,7 +202,7 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
 
 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        ssize_t rv = -EIO;
        ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
 
@@ -223,7 +224,7 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
 
 static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        unsigned int rv = DEFAULT_POLLMASK;
        unsigned int (*poll)(struct file *, struct poll_table_struct *);
 
@@ -245,7 +246,7 @@ static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *p
 
 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        long rv = -ENOTTY;
        long (*ioctl)(struct file *, unsigned int, unsigned long);
 
@@ -268,7 +269,7 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
 #ifdef CONFIG_COMPAT
 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        long rv = -ENOTTY;
        long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
 
@@ -291,7 +292,7 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
 
 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       struct proc_dir_entry *pde = PDE(file_inode(file));
        int rv = -EIO;
        int (*mmap)(struct file *, struct vm_area_struct *);
 
@@ -445,12 +446,9 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
-       struct inode * inode;
+       struct inode *inode = iget_locked(sb, de->low_ino);
 
-       inode = iget_locked(sb, de->low_ino);
-       if (!inode)
-               return NULL;
-       if (inode->i_state & I_NEW) {
+       if (inode && (inode->i_state & I_NEW)) {
                inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
                PROC_I(inode)->pde = de;
 
@@ -482,10 +480,12 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
        } else
               pde_put(de);
        return inode;
-}                      
+}
 
 int proc_fill_super(struct super_block *s)
 {
+       struct inode *root_inode;
+
        s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
        s->s_blocksize = 1024;
        s->s_blocksize_bits = 10;
@@ -494,11 +494,17 @@ int proc_fill_super(struct super_block *s)
        s->s_time_gran = 1;
        
        pde_get(&proc_root);
-       s->s_root = d_make_root(proc_get_inode(s, &proc_root));
-       if (s->s_root)
-               return 0;
+       root_inode = proc_get_inode(s, &proc_root);
+       if (!root_inode) {
+               pr_err("proc_fill_super: get root inode failed\n");
+               return -ENOMEM;
+       }
 
-       printk("proc_read_super: get root inode failed\n");
-       pde_put(&proc_root);
-       return -ENOMEM;
+       s->s_root = d_make_root(root_inode);
+       if (!s->s_root) {
+               pr_err("proc_fill_super: allocate dentry failed\n");
+               return -ENOMEM;
+       }
+
+       return 0;
 }
index 252544c..85ff3a4 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/sched.h>
 #include <linux/proc_fs.h>
+#include <linux/binfmts.h>
 struct  ctl_table_header;
 struct  mempolicy;
 
@@ -108,7 +109,7 @@ static inline int task_dumpable(struct task_struct *task)
        if (mm)
                dumpable = get_dumpable(mm);
        task_unlock(task);
-       if (dumpable == SUID_DUMPABLE_ENABLED)
+       if (dumpable == SUID_DUMP_USER)
                return 1;
        return 0;
 }
index e96d4f1..eda6f01 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/elfcore.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
+#include <linux/printk.h>
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -619,7 +620,7 @@ static int __init proc_kcore_init(void)
        proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
                                      &proc_kcore_operations);
        if (!proc_root_kcore) {
-               printk(KERN_ERR "couldn't create /proc/kcore\n");
+               pr_err("couldn't create /proc/kcore\n");
                return 0; /* Always returns 0. */
        }
        /* Store text area if it's special */
index b1822dd..ccfd99b 100644 (file)
@@ -45,7 +45,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
        file = region->vm_file;
 
        if (file) {
-               struct inode *inode = region->vm_file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(region->vm_file);
                dev = inode->i_sb->s_dev;
                ino = inode->i_ino;
        }
index de20ec4..30b590f 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/time.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/printk.h>
 #include <linux/stat.h>
 #include <linux/string.h>
 #include <linux/of.h>
@@ -110,8 +111,8 @@ void proc_device_tree_update_prop(struct proc_dir_entry *pde,
                if (ent->data == oldprop)
                        break;
        if (ent == NULL) {
-               printk(KERN_WARNING "device-tree: property \"%s\" "
-                      " does not exist\n", oldprop->name);
+               pr_warn("device-tree: property \"%s\" does not exist\n",
+                       oldprop->name);
        } else {
                ent->data = newprop;
                ent->size = newprop->length;
@@ -153,8 +154,8 @@ static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de,
 realloc:
        fixed_name = kmalloc(fixup_len, GFP_KERNEL);
        if (fixed_name == NULL) {
-               printk(KERN_ERR "device-tree: Out of memory trying to fixup "
-                               "name \"%s\"\n", name);
+               pr_err("device-tree: Out of memory trying to fixup "
+                      "name \"%s\"\n", name);
                return name;
        }
 
@@ -175,8 +176,8 @@ retry:
                goto retry;
        }
 
-       printk(KERN_WARNING "device-tree: Duplicate name in %s, "
-                       "renamed to \"%s\"\n", np->full_name, fixed_name);
+       pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+               np->full_name, fixed_name);
 
        return fixed_name;
 }
index 3131a03..b4ac657 100644 (file)
@@ -163,7 +163,7 @@ static int proc_tgid_net_readdir(struct file *filp, void *dirent,
        struct net *net;
 
        ret = -EINVAL;
-       net = get_proc_task_net(filp->f_path.dentry->d_inode);
+       net = get_proc_task_net(file_inode(filp));
        if (net != NULL) {
                ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
                put_net(net);
index 1827d88..ac05f33 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/sysctl.h>
 #include <linux/poll.h>
 #include <linux/proc_fs.h>
+#include <linux/printk.h>
 #include <linux/security.h>
 #include <linux/sched.h>
 #include <linux/namei.h>
@@ -57,7 +58,7 @@ static void sysctl_print_dir(struct ctl_dir *dir)
 {
        if (dir->header.parent)
                sysctl_print_dir(dir->header.parent);
-       printk(KERN_CONT "%s/", dir->header.ctl_table[0].procname);
+       pr_cont("%s/", dir->header.ctl_table[0].procname);
 }
 
 static int namecmp(const char *name1, int len1, const char *name2, int len2)
@@ -134,9 +135,9 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
                else if (cmp > 0)
                        p = &(*p)->rb_right;
                else {
-                       printk(KERN_ERR "sysctl duplicate entry: ");
+                       pr_err("sysctl duplicate entry: ");
                        sysctl_print_dir(head->parent);
-                       printk(KERN_CONT "/%s\n", entry->procname);
+                       pr_cont("/%s\n", entry->procname);
                        return -EEXIST;
                }
        }
@@ -478,7 +479,7 @@ out:
 static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
                size_t count, loff_t *ppos, int write)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ctl_table_header *head = grab_header(inode);
        struct ctl_table *table = PROC_I(inode)->sysctl_entry;
        ssize_t error;
@@ -542,7 +543,7 @@ static int proc_sys_open(struct inode *inode, struct file *filp)
 
 static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct ctl_table_header *head = grab_header(inode);
        struct ctl_table *table = PROC_I(inode)->sysctl_entry;
        unsigned int ret = DEFAULT_POLLMASK;
@@ -927,9 +928,9 @@ found:
        subdir->header.nreg++;
 failed:
        if (unlikely(IS_ERR(subdir))) {
-               printk(KERN_ERR "sysctl could not get directory: ");
+               pr_err("sysctl could not get directory: ");
                sysctl_print_dir(dir);
-               printk(KERN_CONT "/%*.*s %ld\n",
+               pr_cont("/%*.*s %ld\n",
                        namelen, namelen, name, PTR_ERR(subdir));
        }
        drop_sysctl_table(&dir->header);
@@ -995,8 +996,8 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       printk(KERN_ERR "sysctl table check failed: %s/%s %pV\n",
-               path, table->procname, &vaf);
+       pr_err("sysctl table check failed: %s/%s %pV\n",
+              path, table->procname, &vaf);
 
        va_end(args);
        return -EINVAL;
@@ -1510,9 +1511,9 @@ static void put_links(struct ctl_table_header *header)
                        drop_sysctl_table(link_head);
                }
                else {
-                       printk(KERN_ERR "sysctl link missing during unregister: ");
+                       pr_err("sysctl link missing during unregister: ");
                        sysctl_print_dir(parent);
-                       printk(KERN_CONT "/%s\n", name);
+                       pr_cont("/%s\n", name);
                }
        }
 }
index ca5ce7f..3e636d8 100644 (file)
@@ -271,7 +271,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
        const char *name = NULL;
 
        if (file) {
-               struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(vma->vm_file);
                dev = inode->i_sb->s_dev;
                ino = inode->i_ino;
                pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -743,7 +743,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                return rv;
        if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
                return -EINVAL;
-       task = get_proc_task(file->f_path.dentry->d_inode);
+       task = get_proc_task(file_inode(file));
        if (!task)
                return -ESRCH;
        mm = get_task_mm(task);
@@ -1015,7 +1015,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
 static ssize_t pagemap_read(struct file *file, char __user *buf,
                            size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       struct task_struct *task = get_proc_task(file_inode(file));
        struct mm_struct *mm;
        struct pagemapread pm;
        int ret = -ESRCH;
index 1ccfa53..56123a6 100644 (file)
@@ -149,7 +149,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
        file = vma->vm_file;
 
        if (file) {
-               struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(vma->vm_file);
                dev = inode->i_sb->s_dev;
                ino = inode->i_ino;
                pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
index 0d5071d..b870f74 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
+#include <linux/printk.h>
 #include <linux/bootmem.h>
 #include <linux/init.h>
 #include <linux/crash_dump.h>
@@ -175,15 +176,15 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
        start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
        if (!curr_m)
                return -EINVAL;
-       if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
-               tsz = buflen;
-
-       /* Calculate left bytes in current memory segment. */
-       nr_bytes = (curr_m->size - (start - curr_m->paddr));
-       if (tsz > nr_bytes)
-               tsz = nr_bytes;
 
        while (buflen) {
+               tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));
+
+               /* Calculate left bytes in current memory segment. */
+               nr_bytes = (curr_m->size - (start - curr_m->paddr));
+               if (tsz > nr_bytes)
+                       tsz = nr_bytes;
+
                tmp = read_from_oldmem(buffer, tsz, &start, 1);
                if (tmp < 0)
                        return tmp;
@@ -198,12 +199,6 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
                                                struct vmcore, list);
                        start = curr_m->paddr;
                }
-               if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
-                       tsz = buflen;
-               /* Calculate left bytes in current memory segment. */
-               nr_bytes = (curr_m->size - (start - curr_m->paddr));
-               if (tsz > nr_bytes)
-                       tsz = nr_bytes;
        }
        return acc;
 }
@@ -553,8 +548,7 @@ static int __init parse_crash_elf64_headers(void)
                ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
                ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
                ehdr.e_phnum == 0) {
-               printk(KERN_WARNING "Warning: Core image elf header is not"
-                                       "sane\n");
+               pr_warn("Warning: Core image elf header is not sane\n");
                return -EINVAL;
        }
 
@@ -609,8 +603,7 @@ static int __init parse_crash_elf32_headers(void)
                ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
                ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
                ehdr.e_phnum == 0) {
-               printk(KERN_WARNING "Warning: Core image elf header is not"
-                                       "sane\n");
+               pr_warn("Warning: Core image elf header is not sane\n");
                return -EINVAL;
        }
 
@@ -653,8 +646,7 @@ static int __init parse_crash_elf_headers(void)
        if (rc < 0)
                return rc;
        if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
-               printk(KERN_WARNING "Warning: Core image elf header"
-                                       " not found\n");
+               pr_warn("Warning: Core image elf header not found\n");
                return -EINVAL;
        }
 
@@ -673,8 +665,7 @@ static int __init parse_crash_elf_headers(void)
                /* Determine vmcore size. */
                vmcore_size = get_vmcore_size_elf32(elfcorebuf);
        } else {
-               printk(KERN_WARNING "Warning: Core image elf header is not"
-                                       " sane\n");
+               pr_warn("Warning: Core image elf header is not sane\n");
                return -EINVAL;
        }
        return 0;
@@ -690,7 +681,7 @@ static int __init vmcore_init(void)
                return rc;
        rc = parse_crash_elf_headers();
        if (rc) {
-               printk(KERN_WARNING "Kdump: vmcore not initialized\n");
+               pr_warn("Kdump: vmcore not initialized\n");
                return rc;
        }
 
index 7b03294..28ce014 100644 (file)
@@ -16,7 +16,7 @@
 
 static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned int offset;
        struct buffer_head *bh;
        struct qnx4_inode_entry *de;
index dc59735..8798d06 100644 (file)
@@ -117,7 +117,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
 
 static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *s = inode->i_sb;
        struct qnx6_sb_info *sbi = QNX6_SB(s);
        loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1);
index d5378d0..8d5b438 100644 (file)
@@ -202,7 +202,7 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
                                            unsigned long pgoff, unsigned long flags)
 {
        unsigned long maxpages, lpages, nr, loop, ret;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct page **pages = NULL, **ptr, *page;
        loff_t isize;
 
index bb34af3..a698eff 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/syscalls.h>
 #include <linux/pagemap.h>
 #include <linux/splice.h>
+#include <linux/compat.h>
 #include "read_write.h"
 
 #include <asm/uaccess.h>
@@ -163,7 +164,7 @@ EXPORT_SYMBOL(no_llseek);
 
 loff_t default_llseek(struct file *file, loff_t offset, int whence)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        loff_t retval;
 
        mutex_lock(&inode->i_mutex);
@@ -247,6 +248,13 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
        return retval;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
+{
+       return sys_lseek(fd, offset, whence);
+}
+#endif
+
 #ifdef __ARCH_WANT_SYS_LLSEEK
 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
                unsigned long, offset_low, loff_t __user *, result,
@@ -278,7 +286,6 @@ out_putf:
 }
 #endif
 
-
 /*
  * rw_verify_area doesn't like huge counts. We limit
  * them to something that fits in "int" so that others
@@ -290,7 +297,7 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
        loff_t pos;
        int retval = -EINVAL;
 
-       inode = file->f_path.dentry->d_inode;
+       inode = file_inode(file);
        if (unlikely((ssize_t) count < 0))
                return retval;
        pos = *ppos;
@@ -901,8 +908,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
        if (!(out.file->f_mode & FMODE_WRITE))
                goto fput_out;
        retval = -EINVAL;
-       in_inode = in.file->f_path.dentry->d_inode;
-       out_inode = out.file->f_path.dentry->d_inode;
+       in_inode = file_inode(in.file);
+       out_inode = file_inode(out.file);
        retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
        if (retval < 0)
                goto fput_out;
index 5e69ef5..fee38e0 100644 (file)
@@ -22,7 +22,7 @@
 
 int vfs_readdir(struct file *file, filldir_t filler, void *buf)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int res = -ENOTDIR;
        if (!file->f_op || !file->f_op->readdir)
                goto out;
index 50302d6..6165bd4 100644 (file)
@@ -268,7 +268,7 @@ static ssize_t reiserfs_file_write(struct file *file,       /* the file we are going t
                                                         * new current position before returning. */
                                   )
 {
-       struct inode *inode = file->f_path.dentry->d_inode;     // Inode of the file that we are writing to.
+       struct inode *inode = file_inode(file); // Inode of the file that we are writing to.
        /* To simplify coding at this time, we store
           locked pages in array for now */
        struct reiserfs_transaction_handle th;
index 95d7680..ea5061f 100644 (file)
@@ -1603,10 +1603,10 @@ int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
 
        if (parent && (maxlen < 5)) {
                *lenp = 5;
-               return 255;
+               return FILEID_INVALID;
        } else if (maxlen < 3) {
                *lenp = 3;
-               return 255;
+               return FILEID_INVALID;
        }
 
        data[0] = inode->i_ino;
index 0c21850..15cb5fe 100644 (file)
@@ -21,7 +21,7 @@
  */
 long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned int flags;
        int err = 0;
 
index e60e870..9cc0740 100644 (file)
@@ -281,7 +281,7 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
        }
 #if defined( REISERFS_USE_OIDMAPF )
        if (sb_info->oidmap.use_file && (sb_info->oidmap.mapf != NULL)) {
-               loff_t size = sb_info->oidmap.mapf->f_path.dentry->d_inode->i_size;
+               loff_t size = file_inode(sb_info->oidmap.mapf)->i_size;
                total_used += size / sizeof(reiserfs_oidinterval_d_t);
        }
 #endif
index fd7c5f6..7e8d3a8 100644 (file)
@@ -147,7 +147,7 @@ static const struct address_space_operations romfs_aops = {
  */
 static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *i = filp->f_dentry->d_inode;
+       struct inode *i = file_inode(filp);
        struct romfs_inode ri;
        unsigned long offset, maxoff;
        int j, ino, nextfh;
index f2bc3df..38bb59f 100644 (file)
@@ -308,27 +308,27 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
        mutex_lock(&m->lock);
        m->version = file->f_version;
        switch (whence) {
-               case 1:
-                       offset += file->f_pos;
-               case 0:
-                       if (offset < 0)
-                               break;
-                       retval = offset;
-                       if (offset != m->read_pos) {
-                               while ((retval=traverse(m, offset)) == -EAGAIN)
-                                       ;
-                               if (retval) {
-                                       /* with extreme prejudice... */
-                                       file->f_pos = 0;
-                                       m->read_pos = 0;
-                                       m->version = 0;
-                                       m->index = 0;
-                                       m->count = 0;
-                               } else {
-                                       m->read_pos = offset;
-                                       retval = file->f_pos = offset;
-                               }
+       case SEEK_CUR:
+               offset += file->f_pos;
+       case SEEK_SET:
+               if (offset < 0)
+                       break;
+               retval = offset;
+               if (offset != m->read_pos) {
+                       while ((retval = traverse(m, offset)) == -EAGAIN)
+                               ;
+                       if (retval) {
+                               /* with extreme prejudice... */
+                               file->f_pos = 0;
+                               m->read_pos = 0;
+                               m->version = 0;
+                               m->index = 0;
+                               m->count = 0;
+                       } else {
+                               m->read_pos = offset;
+                               retval = file->f_pos = offset;
                        }
+               }
        }
        file->f_version = m->version;
        mutex_unlock(&m->lock);
@@ -339,7 +339,7 @@ EXPORT_SYMBOL(seq_lseek);
 /**
  *     seq_release -   free the structures associated with sequential file.
  *     @file: file in question
- *     @inode: file->f_path.dentry->d_inode
+ *     @inode: its inode
  *
  *     Frees the structures associated with sequential file; can be used
  *     as ->f_op->release() if you don't have private data to destroy.
index 6909d89..718bd00 100644 (file)
@@ -569,7 +569,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
        return res;
 }
 
-static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+ssize_t kernel_write(struct file *file, const char *buf, size_t count,
                            loff_t pos)
 {
        mm_segment_t old_fs;
@@ -578,11 +578,12 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
        old_fs = get_fs();
        set_fs(get_ds());
        /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_write(file, (const char __user *)buf, count, &pos);
+       res = vfs_write(file, (__force const char __user *)buf, count, &pos);
        set_fs(old_fs);
 
        return res;
 }
+EXPORT_SYMBOL(kernel_write);
 
 ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
                                 struct pipe_inode_info *pipe, size_t len,
@@ -1170,7 +1171,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
         * randomly drop data for eg socket -> socket splicing. Use the
         * piped splicing for that!
         */
-       i_mode = in->f_path.dentry->d_inode->i_mode;
+       i_mode = file_inode(in)->i_mode;
        if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
                return -EINVAL;
 
index b381305..57dc70e 100644 (file)
@@ -102,7 +102,7 @@ static int get_dir_index_using_offset(struct super_block *sb,
 
 static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
        u64 block = squashfs_i(inode)->start + msblk->directory_table;
        int offset = squashfs_i(inode)->offset, length, dir_count, size,
index 14f4545..04ce1ac 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -37,17 +37,17 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
 
 EXPORT_SYMBOL(generic_fillattr);
 
-int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+int vfs_getattr(struct path *path, struct kstat *stat)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = path->dentry->d_inode;
        int retval;
 
-       retval = security_inode_getattr(mnt, dentry);
+       retval = security_inode_getattr(path->mnt, path->dentry);
        if (retval)
                return retval;
 
        if (inode->i_op->getattr)
-               return inode->i_op->getattr(mnt, dentry, stat);
+               return inode->i_op->getattr(path->mnt, path->dentry, stat);
 
        generic_fillattr(inode, stat);
        return 0;
@@ -61,8 +61,7 @@ int vfs_fstat(unsigned int fd, struct kstat *stat)
        int error = -EBADF;
 
        if (f.file) {
-               error = vfs_getattr(f.file->f_path.mnt, f.file->f_path.dentry,
-                                   stat);
+               error = vfs_getattr(&f.file->f_path, stat);
                fdput(f);
        }
        return error;
@@ -89,7 +88,7 @@ retry:
        if (error)
                goto out;
 
-       error = vfs_getattr(path.mnt, path.dentry, stat);
+       error = vfs_getattr(&path, stat);
        path_put(&path);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
index 12f1237..7465d43 100644 (file)
@@ -447,14 +447,13 @@ struct super_block *sget(struct file_system_type *type,
                        void *data)
 {
        struct super_block *s = NULL;
-       struct hlist_node *node;
        struct super_block *old;
        int err;
 
 retry:
        spin_lock(&sb_lock);
        if (test) {
-               hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
+               hlist_for_each_entry(old, &type->fs_supers, s_instances) {
                        if (!test(old, data))
                                continue;
                        if (!grab_super(old))
@@ -554,10 +553,9 @@ void iterate_supers_type(struct file_system_type *type,
        void (*f)(struct super_block *, void *), void *arg)
 {
        struct super_block *sb, *p = NULL;
-       struct hlist_node *node;
 
        spin_lock(&sb_lock);
-       hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
+       hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
                sb->s_count++;
                spin_unlock(&sb_lock);
 
@@ -842,7 +840,7 @@ int get_anon_bdev(dev_t *p)
        else if (error)
                return -EAGAIN;
 
-       if ((dev & MAX_IDR_MASK) == (1 << MINORBITS)) {
+       if (dev == (1 << MINORBITS)) {
                spin_lock(&unnamed_dev_lock);
                ida_remove(&unnamed_dev_ida, dev);
                if (unnamed_dev_start > dev)
index 14eefeb..2c5d663 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -332,7 +332,7 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
        if (!f.file)
                goto out;
 
-       i_mode = f.file->f_path.dentry->d_inode->i_mode;
+       i_mode = file_inode(f.file)->i_mode;
        ret = -ESPIPE;
        if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
                        !S_ISLNK(i_mode))
index 614b2b5..15c68f9 100644 (file)
@@ -70,7 +70,7 @@ static ssize_t
 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
 {
        struct bin_buffer *bb = file->private_data;
-       int size = file->f_path.dentry->d_inode->i_size;
+       int size = file_inode(file)->i_size;
        loff_t offs = *off;
        int count = min_t(size_t, bytes, PAGE_SIZE);
        char *temp;
@@ -140,7 +140,7 @@ static ssize_t write(struct file *file, const char __user *userbuf,
                     size_t bytes, loff_t *off)
 {
        struct bin_buffer *bb = file->private_data;
-       int size = file->f_path.dentry->d_inode->i_size;
+       int size = file_inode(file)->i_size;
        loff_t offs = *off;
        int count = min_t(size_t, bytes, PAGE_SIZE);
        char *temp;
@@ -461,15 +461,14 @@ const struct file_operations bin_fops = {
 void unmap_bin_file(struct sysfs_dirent *attr_sd)
 {
        struct bin_buffer *bb;
-       struct hlist_node *tmp;
 
        if (sysfs_type(attr_sd) != SYSFS_KOBJ_BIN_ATTR)
                return;
 
        mutex_lock(&sysfs_bin_lock);
 
-       hlist_for_each_entry(bb, tmp, &attr_sd->s_bin_attr.buffers, list) {
-               struct inode *inode = bb->file->f_path.dentry->d_inode;
+       hlist_for_each_entry(bb, &attr_sd->s_bin_attr.buffers, list) {
+               struct inode *inode = file_inode(bb->file);
 
                unmap_mapping_range(inode->i_mapping, 0, 0, 1);
        }
index a77c421..3799e8d 100644 (file)
@@ -68,7 +68,7 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
 static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
 {
        unsigned long pos = filp->f_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        unsigned offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
index 0e606b1..32b644f 100644 (file)
@@ -383,10 +383,10 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
        return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
 }
 
-#ifdef COMPAT
+#ifdef CONFIG_COMPAT
 COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
-               const struct itimerspec __user *, utmr,
-               struct itimerspec __user *, otmr)
+               const struct compat_itimerspec __user *, utmr,
+               struct compat_itimerspec __user *, otmr)
 {
        struct itimerspec new, old;
        int ret;
@@ -402,12 +402,12 @@ COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
 }
 
 COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
-               struct itimerspec __user *, otmr)
+               struct compat_itimerspec __user *, otmr)
 {
        struct itimerspec kotmr;
        int ret = do_timerfd_gettime(ufd, &kotmr);
        if (ret)
                return ret;
-       return put_compat_itimerspec(otmr, &t) ? -EFAULT: 0;
+       return put_compat_itimerspec(otmr, &kotmr) ? -EFAULT: 0;
 }
 #endif
index 12817ff..7f60e90 100644 (file)
@@ -2459,7 +2459,7 @@ error_dump:
 
 static inline int chance(unsigned int n, unsigned int out_of)
 {
-       return !!((random32() % out_of) + 1 <= n);
+       return !!((prandom_u32() % out_of) + 1 <= n);
 
 }
 
@@ -2477,13 +2477,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
                        if (chance(1, 2)) {
                                d->pc_delay = 1;
                                /* Fail withing 1 minute */
-                               delay = random32() % 60000;
+                               delay = prandom_u32() % 60000;
                                d->pc_timeout = jiffies;
                                d->pc_timeout += msecs_to_jiffies(delay);
                                ubifs_warn("failing after %lums", delay);
                        } else {
                                d->pc_delay = 2;
-                               delay = random32() % 10000;
+                               delay = prandom_u32() % 10000;
                                /* Fail within 10000 operations */
                                d->pc_cnt_max = delay;
                                ubifs_warn("failing after %lu calls", delay);
@@ -2563,7 +2563,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
        unsigned int from, to, ffs = chance(1, 2);
        unsigned char *p = (void *)buf;
 
-       from = random32() % (len + 1);
+       from = prandom_u32() % (len + 1);
        /* Corruption may only span one max. write unit */
        to = min(len, ALIGN(from, c->max_write_size));
 
index 8a57477..de08c92 100644 (file)
@@ -352,7 +352,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
        struct qstr nm;
        union ubifs_key key;
        struct ubifs_dent_node *dent;
-       struct inode *dir = file->f_path.dentry->d_inode;
+       struct inode *dir = file_inode(file);
        struct ubifs_info *c = dir->i_sb->s_fs_info;
 
        dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
index 4f6493c..f12189d 100644 (file)
@@ -1444,7 +1444,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
                                 struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct timespec now = ubifs_current_time(inode);
        struct ubifs_budget_req req = { .new_page = 1 };
index 1a7e2d8..648b143 100644 (file)
@@ -147,7 +147,7 @@ out_unlock:
 long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        int flags, err;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        switch (cmd) {
        case FS_IOC_GETFLAGS:
index 9daaeef..4b826ab 100644 (file)
@@ -2007,28 +2007,28 @@ static int dbg_populate_lsave(struct ubifs_info *c)
 
        if (!dbg_is_chk_gen(c))
                return 0;
-       if (random32() & 3)
+       if (prandom_u32() & 3)
                return 0;
 
        for (i = 0; i < c->lsave_cnt; i++)
                c->lsave[i] = c->main_first;
 
        list_for_each_entry(lprops, &c->empty_list, list)
-               c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
        list_for_each_entry(lprops, &c->freeable_list, list)
-               c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
        list_for_each_entry(lprops, &c->frdi_idx_list, list)
-               c->lsave[random32() % c->lsave_cnt] = lprops->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
 
        heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
        heap = &c->lpt_heap[LPROPS_DIRTY - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
        heap = &c->lpt_heap[LPROPS_FREE - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[random32() % c->lsave_cnt] = heap->arr[i]->lnum;
+               c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
 
        return 1;
 }
index 769701c..ba32da3 100644 (file)
@@ -126,13 +126,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
                else if (inum > o->inum)
                        p = p->rb_right;
                else {
-                       if (o->dnext) {
+                       if (o->del) {
                                spin_unlock(&c->orphan_lock);
                                dbg_gen("deleted twice ino %lu",
                                        (unsigned long)inum);
                                return;
                        }
-                       if (o->cnext) {
+                       if (o->cmt) {
+                               o->del = 1;
                                o->dnext = c->orph_dnext;
                                c->orph_dnext = o;
                                spin_unlock(&c->orphan_lock);
@@ -172,7 +173,9 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
        last = &c->orph_cnext;
        list_for_each_entry(orphan, &c->orph_new, new_list) {
                ubifs_assert(orphan->new);
+               ubifs_assert(!orphan->cmt);
                orphan->new = 0;
+               orphan->cmt = 1;
                *last = orphan;
                last = &orphan->cnext;
        }
@@ -299,7 +302,9 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
        cnext = c->orph_cnext;
        for (i = 0; i < cnt; i++) {
                orphan = cnext;
+               ubifs_assert(orphan->cmt);
                orph->inos[i] = cpu_to_le64(orphan->inum);
+               orphan->cmt = 0;
                cnext = orphan->cnext;
                orphan->cnext = NULL;
        }
@@ -378,6 +383,7 @@ static int consolidate(struct ubifs_info *c)
                list_for_each_entry(orphan, &c->orph_list, list) {
                        if (orphan->new)
                                continue;
+                       orphan->cmt = 1;
                        *last = orphan;
                        last = &orphan->cnext;
                        cnt += 1;
@@ -442,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
                orphan = dnext;
                dnext = orphan->dnext;
                ubifs_assert(!orphan->new);
+               ubifs_assert(orphan->del);
                rb_erase(&orphan->rb, &c->orph_tree);
                list_del(&orphan->list);
                c->tot_orphans -= 1;
@@ -531,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
        rb_link_node(&orphan->rb, parent, p);
        rb_insert_color(&orphan->rb, &c->orph_tree);
        list_add_tail(&orphan->list, &c->orph_list);
+       orphan->del = 1;
        orphan->dnext = c->orph_dnext;
        c->orph_dnext = orphan;
        dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
index 523bbad..52a6559 100644 (file)
@@ -683,7 +683,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
                c->ilebs[c->ileb_cnt++] = lnum;
                dbg_cmt("LEB %d", lnum);
        }
-       if (dbg_is_chk_index(c) && !(random32() & 7))
+       if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
                return -ENOSPC;
        return 0;
 }
index d133c27..b2babce 100644 (file)
@@ -904,6 +904,8 @@ struct ubifs_budget_req {
  * @dnext: next orphan to delete
  * @inum: inode number
  * @new: %1 => added since the last commit, otherwise %0
+ * @cmt: %1 => commit pending, otherwise %0
+ * @del: %1 => delete pending, otherwise %0
  */
 struct ubifs_orphan {
        struct rb_node rb;
@@ -912,7 +914,9 @@ struct ubifs_orphan {
        struct ubifs_orphan *cnext;
        struct ubifs_orphan *dnext;
        ino_t inum;
-       int new;
+       unsigned new:1;
+       unsigned cmt:1;
+       unsigned del:1;
 };
 
 /**
index eb8bfe2..b3e93f5 100644 (file)
@@ -186,7 +186,7 @@ out:
 
 static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
-       struct inode *dir = filp->f_path.dentry->d_inode;
+       struct inode *dir = file_inode(filp);
        int result;
 
        if (filp->f_pos == 0) {
index 77b5953..29569dd 100644 (file)
@@ -139,7 +139,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 {
        ssize_t retval;
        struct file *file = iocb->ki_filp;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int err, pos;
        size_t count = iocb->ki_left;
        struct udf_inode_info *iinfo = UDF_I(inode);
@@ -178,7 +178,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 
 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        long old_block, new_block;
        int result = -EINVAL;
 
@@ -204,7 +204,7 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                goto out;
        case UDF_RELOCATE_BLOCKS:
                if (!capable(CAP_SYS_ADMIN)) {
-                       result = -EACCES;
+                       result = -EPERM;
                        goto out;
                }
                if (get_user(old_block, (long __user *)arg)) {
index cbae1ed..7a12e48 100644 (file)
@@ -67,6 +67,74 @@ static void udf_update_extents(struct inode *,
                               struct extent_position *);
 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
+static void __udf_clear_extent_cache(struct inode *inode)
+{
+       struct udf_inode_info *iinfo = UDF_I(inode);
+
+       if (iinfo->cached_extent.lstart != -1) {
+               brelse(iinfo->cached_extent.epos.bh);
+               iinfo->cached_extent.lstart = -1;
+       }
+}
+
+/* Invalidate extent cache */
+static void udf_clear_extent_cache(struct inode *inode)
+{
+       struct udf_inode_info *iinfo = UDF_I(inode);
+
+       spin_lock(&iinfo->i_extent_cache_lock);
+       __udf_clear_extent_cache(inode);
+       spin_unlock(&iinfo->i_extent_cache_lock);
+}
+
+/* Return contents of extent cache */
+static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
+                                loff_t *lbcount, struct extent_position *pos)
+{
+       struct udf_inode_info *iinfo = UDF_I(inode);
+       int ret = 0;
+
+       spin_lock(&iinfo->i_extent_cache_lock);
+       if ((iinfo->cached_extent.lstart <= bcount) &&
+           (iinfo->cached_extent.lstart != -1)) {
+               /* Cache hit */
+               *lbcount = iinfo->cached_extent.lstart;
+               memcpy(pos, &iinfo->cached_extent.epos,
+                      sizeof(struct extent_position));
+               if (pos->bh)
+                       get_bh(pos->bh);
+               ret = 1;
+       }
+       spin_unlock(&iinfo->i_extent_cache_lock);
+       return ret;
+}
+
+/* Add extent to extent cache */
+static void udf_update_extent_cache(struct inode *inode, loff_t estart,
+                                   struct extent_position *pos, int next_epos)
+{
+       struct udf_inode_info *iinfo = UDF_I(inode);
+
+       spin_lock(&iinfo->i_extent_cache_lock);
+       /* Invalidate previously cached extent */
+       __udf_clear_extent_cache(inode);
+       if (pos->bh)
+               get_bh(pos->bh);
+       memcpy(&iinfo->cached_extent.epos, pos,
+              sizeof(struct extent_position));
+       iinfo->cached_extent.lstart = estart;
+       if (next_epos)
+               switch (iinfo->i_alloc_type) {
+               case ICBTAG_FLAG_AD_SHORT:
+                       iinfo->cached_extent.epos.offset -=
+                       sizeof(struct short_ad);
+                       break;
+               case ICBTAG_FLAG_AD_LONG:
+                       iinfo->cached_extent.epos.offset -=
+                       sizeof(struct long_ad);
+               }
+       spin_unlock(&iinfo->i_extent_cache_lock);
+}
 
 void udf_evict_inode(struct inode *inode)
 {
@@ -90,6 +158,7 @@ void udf_evict_inode(struct inode *inode)
        }
        kfree(iinfo->i_ext.i_data);
        iinfo->i_ext.i_data = NULL;
+       udf_clear_extent_cache(inode);
        if (want_delete) {
                udf_free_inode(inode);
        }
@@ -105,6 +174,7 @@ static void udf_write_failed(struct address_space *mapping, loff_t to)
                truncate_pagecache(inode, to, isize);
                if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
                        down_write(&iinfo->i_data_sem);
+                       udf_clear_extent_cache(inode);
                        udf_truncate_extents(inode);
                        up_write(&iinfo->i_data_sem);
                }
@@ -372,7 +442,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
                iinfo->i_next_alloc_goal++;
        }
 
-
+       udf_clear_extent_cache(inode);
        phys = inode_getblk(inode, block, &err, &new);
        if (!phys)
                goto abort;
@@ -1171,6 +1241,7 @@ set_size:
        } else {
                if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
                        down_write(&iinfo->i_data_sem);
+                       udf_clear_extent_cache(inode);
                        memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
                               0x00, bsize - newsize -
                               udf_file_entry_alloc_offset(inode));
@@ -1184,6 +1255,7 @@ set_size:
                if (err)
                        return err;
                down_write(&iinfo->i_data_sem);
+               udf_clear_extent_cache(inode);
                truncate_setsize(inode, newsize);
                udf_truncate_extents(inode);
                up_write(&iinfo->i_data_sem);
@@ -2156,11 +2228,12 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
        struct udf_inode_info *iinfo;
 
        iinfo = UDF_I(inode);
-       pos->offset = 0;
-       pos->block = iinfo->i_location;
-       pos->bh = NULL;
+       if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
+               pos->offset = 0;
+               pos->block = iinfo->i_location;
+               pos->bh = NULL;
+       }
        *elen = 0;
-
        do {
                etype = udf_next_aext(inode, pos, eloc, elen, 1);
                if (etype == -1) {
@@ -2170,7 +2243,8 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
                }
                lbcount += *elen;
        } while (lbcount <= bcount);
-
+       /* update extent cache */
+       udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
        *offset = (bcount + *elen - lbcount) >> blocksize_bits;
 
        return etype;
index 95fee27..102c072 100644 (file)
@@ -1270,10 +1270,10 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
 
        if (parent && (len < 5)) {
                *lenp = 5;
-               return 255;
+               return FILEID_INVALID;
        } else if (len < 3) {
                *lenp = 3;
-               return 255;
+               return FILEID_INVALID;
        }
 
        *lenp = 3;
index e9be396..bc5b30a 100644 (file)
@@ -134,6 +134,8 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
        ei->i_next_alloc_goal = 0;
        ei->i_strat4096 = 0;
        init_rwsem(&ei->i_data_sem);
+       ei->cached_extent.lstart = -1;
+       spin_lock_init(&ei->i_extent_cache_lock);
 
        return &ei->vfs_inode;
 }
@@ -1021,7 +1023,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
        if (bitmap == NULL)
                return NULL;
 
-       bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
        bitmap->s_nr_groups = nr_groups;
        return bitmap;
 }
@@ -1079,8 +1080,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                if (!bitmap)
                        return 1;
                map->s_uspace.s_bitmap = bitmap;
-               bitmap->s_extLength = le32_to_cpu(
-                               phd->unallocSpaceBitmap.extLength);
                bitmap->s_extPosition = le32_to_cpu(
                                phd->unallocSpaceBitmap.extPosition);
                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
@@ -1115,8 +1114,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                if (!bitmap)
                        return 1;
                map->s_fspace.s_bitmap = bitmap;
-               bitmap->s_extLength = le32_to_cpu(
-                               phd->freedSpaceBitmap.extLength);
                bitmap->s_extPosition = le32_to_cpu(
                                phd->freedSpaceBitmap.extPosition);
                map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
@@ -1866,6 +1863,8 @@ static void udf_open_lvid(struct super_block *sb)
        mark_buffer_dirty(bh);
        sbi->s_lvid_dirty = 0;
        mutex_unlock(&sbi->s_alloc_mutex);
+       /* Make opening of filesystem visible on the media immediately */
+       sync_dirty_buffer(bh);
 }
 
 static void udf_close_lvid(struct super_block *sb)
@@ -1906,6 +1905,8 @@ static void udf_close_lvid(struct super_block *sb)
        mark_buffer_dirty(bh);
        sbi->s_lvid_dirty = 0;
        mutex_unlock(&sbi->s_alloc_mutex);
+       /* Make closing of filesystem visible on the media immediately */
+       sync_dirty_buffer(bh);
 }
 
 u64 lvid_get_unique_id(struct super_block *sb)
index bb8309d..b5cd8ed 100644 (file)
@@ -1,6 +1,19 @@
 #ifndef _UDF_I_H
 #define _UDF_I_H
 
+struct extent_position {
+       struct buffer_head *bh;
+       uint32_t offset;
+       struct kernel_lb_addr block;
+};
+
+struct udf_ext_cache {
+       /* Extent position */
+       struct extent_position epos;
+       /* Start logical offset in bytes */
+       loff_t lstart;
+};
+
 /*
  * The i_data_sem and i_mutex serve for protection of allocation information
  * of a regular files and symlinks. This includes all extents belonging to
@@ -35,6 +48,9 @@ struct udf_inode_info {
                __u8            *i_data;
        } i_ext;
        struct rw_semaphore     i_data_sem;
+       struct udf_ext_cache cached_extent;
+       /* Spinlock for protecting extent cache */
+       spinlock_t i_extent_cache_lock;
        struct inode vfs_inode;
 };
 
index 5f02722..ed401e9 100644 (file)
@@ -80,10 +80,9 @@ struct udf_virtual_data {
 };
 
 struct udf_bitmap {
-       __u32                   s_extLength;
        __u32                   s_extPosition;
-       __u16                   s_nr_groups;
-       struct buffer_head      **s_block_bitmap;
+       int                     s_nr_groups;
+       struct buffer_head      *s_block_bitmap[0];
 };
 
 struct udf_part_map {
index de038da..be7dabb 100644 (file)
@@ -113,11 +113,6 @@ struct ustr {
        uint8_t u_len;
 };
 
-struct extent_position {
-       struct buffer_head *bh;
-       uint32_t offset;
-       struct kernel_lb_addr block;
-};
 
 /* super.c */
 
index dbc9099..3a75ca0 100644 (file)
@@ -433,7 +433,7 @@ static int
 ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 {
        loff_t pos = filp->f_pos;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct super_block *sb = inode->i_sb;
        unsigned int offset = pos & ~PAGE_CACHE_MASK;
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
index a8bd26b..f852b08 100644 (file)
@@ -78,14 +78,14 @@ xfs_swapext(
                goto out_put_tmp_file;
        }
 
-       if (IS_SWAPFILE(f.file->f_path.dentry->d_inode) ||
-           IS_SWAPFILE(tmp.file->f_path.dentry->d_inode)) {
+       if (IS_SWAPFILE(file_inode(f.file)) ||
+           IS_SWAPFILE(file_inode(tmp.file))) {
                error = XFS_ERROR(EINVAL);
                goto out_put_tmp_file;
        }
 
-       ip = XFS_I(f.file->f_path.dentry->d_inode);
-       tip = XFS_I(tmp.file->f_path.dentry->d_inode);
+       ip = XFS_I(file_inode(f.file));
+       tip = XFS_I(file_inode(tmp.file));
 
        if (ip->i_mount != tip->i_mount) {
                error = XFS_ERROR(EINVAL);
index a836118..c585bc6 100644 (file)
@@ -48,7 +48,7 @@ static int xfs_fileid_length(int fileid_type)
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                return 6;
        }
-       return 255; /* invalid */
+       return FILEID_INVALID;
 }
 
 STATIC int
@@ -90,7 +90,7 @@ xfs_fs_encode_fh(
        len = xfs_fileid_length(fileid_type);
        if (*max_len < len) {
                *max_len = len;
-               return 255;
+               return FILEID_INVALID;
        }
        *max_len = len;
 
index 67284ed..f03bf1a 100644 (file)
@@ -811,7 +811,7 @@ xfs_file_fallocate(
        loff_t          offset,
        loff_t          len)
 {
-       struct inode    *inode = file->f_path.dentry->d_inode;
+       struct inode    *inode = file_inode(file);
        long            error;
        loff_t          new_size = 0;
        xfs_flock64_t   bf;
@@ -912,7 +912,7 @@ xfs_file_readdir(
        void            *dirent,
        filldir_t       filldir)
 {
-       struct inode    *inode = filp->f_path.dentry->d_inode;
+       struct inode    *inode = file_inode(filp);
        xfs_inode_t     *ip = XFS_I(inode);
        int             error;
        size_t          bufsize;
index c1c3ef8..d681e34 100644 (file)
@@ -80,7 +80,7 @@ xfs_find_handle(
                f = fdget(hreq->fd);
                if (!f.file)
                        return -EBADF;
-               inode = f.file->f_path.dentry->d_inode;
+               inode = file_inode(f.file);
        } else {
                error = user_lpath((const char __user *)hreq->path, &path);
                if (error)
@@ -168,7 +168,7 @@ xfs_handle_to_dentry(
        /*
         * Only allow handle opens under a directory.
         */
-       if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode))
+       if (!S_ISDIR(file_inode(parfilp)->i_mode))
                return ERR_PTR(-ENOTDIR);
 
        if (hlen != sizeof(xfs_handle_t))
@@ -1334,7 +1334,7 @@ xfs_file_ioctl(
        unsigned int            cmd,
        unsigned long           p)
 {
-       struct inode            *inode = filp->f_path.dentry->d_inode;
+       struct inode            *inode = file_inode(filp);
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        void                    __user *arg = (void __user *)p;
index 1244274..63b8fc4 100644 (file)
@@ -530,7 +530,7 @@ xfs_file_compat_ioctl(
        unsigned                cmd,
        unsigned long           p)
 {
-       struct inode            *inode = filp->f_path.dentry->d_inode;
+       struct inode            *inode = file_inode(filp);
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        void                    __user *arg = (void __user *)p;
index 96fcbb8..d1dba7c 100644 (file)
@@ -1442,9 +1442,8 @@ xlog_recover_find_tid(
        xlog_tid_t              tid)
 {
        xlog_recover_t          *trans;
-       struct hlist_node       *n;
 
-       hlist_for_each_entry(trans, n, head, r_list) {
+       hlist_for_each_entry(trans, head, r_list) {
                if (trans->r_log_tid == tid)
                        return trans;
        }
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
new file mode 100644 (file)
index 0000000..720446c
--- /dev/null
@@ -0,0 +1,72 @@
+#include <acpi/apei.h>
+#include <acpi/hed.h>
+
+/*
+ * One struct ghes is created for each generic hardware error source.
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR          0x0001
+#define GHES_EXITING           0x0002
+
+struct ghes {
+       struct acpi_hest_generic *generic;
+       struct acpi_hest_generic_status *estatus;
+       u64 buffer_paddr;
+       unsigned long flags;
+       union {
+               struct list_head list;
+               struct timer_list timer;
+               unsigned int irq;
+       };
+};
+
+struct ghes_estatus_node {
+       struct llist_node llnode;
+       struct acpi_hest_generic *generic;
+       struct ghes *ghes;
+};
+
+struct ghes_estatus_cache {
+       u32 estatus_len;
+       atomic_t count;
+       struct acpi_hest_generic *generic;
+       unsigned long long time_in;
+       struct rcu_head rcu;
+};
+
+enum {
+       GHES_SEV_NO = 0x0,
+       GHES_SEV_CORRECTED = 0x1,
+       GHES_SEV_RECOVERABLE = 0x2,
+       GHES_SEV_PANIC = 0x3,
+};
+
+/* From drivers/edac/ghes_edac.c */
+
+#ifdef CONFIG_EDAC_GHES
+void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+                               struct cper_sec_mem_err *mem_err);
+
+int ghes_edac_register(struct ghes *ghes, struct device *dev);
+
+void ghes_edac_unregister(struct ghes *ghes);
+
+#else
+static inline void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
+                                      struct cper_sec_mem_err *mem_err)
+{
+}
+
+static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
+{
+       return 0;
+}
+
+static inline void ghes_edac_unregister(struct ghes *ghes)
+{
+}
+#endif
index c084767..59811df 100644 (file)
@@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
        csum_partial_copy((src), (dst), (len), (sum))
 #endif
 
+#ifndef ip_fast_csum
 /*
  * This is a version of ip_compute_csum() optimized for IP headers,
  * which always checksum on 4 octet boundaries.
  */
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
 
+#ifndef csum_fold
 /*
  * Fold a partial checksum
  */
@@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
        sum = (sum & 0xffff) + (sum >> 16);
        return (__force __sum16)~sum;
 }
+#endif
 
 #ifndef csum_tcpudp_nofold
 /*
index b6485ca..a8ece9a 100644 (file)
@@ -76,7 +76,7 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
 /*
  * Convert cputime <-> timeval (msec)
  */
-static inline cputime_t timeval_to_cputime(struct timeval *val)
+static inline cputime_t timeval_to_cputime(const struct timeval *val)
 {
        u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
        return (__force cputime_t) ret;
index 8e260cf..ac9da00 100644 (file)
@@ -239,15 +239,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 #ifndef CONFIG_GENERIC_IOMAP
 #define ioread8(addr)          readb(addr)
 #define ioread16(addr)         readw(addr)
-#define ioread16be(addr)       be16_to_cpu(ioread16(addr))
+#define ioread16be(addr)       __be16_to_cpu(__raw_readw(addr))
 #define ioread32(addr)         readl(addr)
-#define ioread32be(addr)       be32_to_cpu(ioread32(addr))
+#define ioread32be(addr)       __be32_to_cpu(__raw_readl(addr))
 
 #define iowrite8(v, addr)      writeb((v), (addr))
 #define iowrite16(v, addr)     writew((v), (addr))
-#define iowrite16be(v, addr)   iowrite16(be16_to_cpu(v), (addr))
+#define iowrite16be(v, addr)   __raw_writew(__cpu_to_be16(v), addr)
 #define iowrite32(v, addr)     writel((v), (addr))
-#define iowrite32be(v, addr)   iowrite32(be32_to_cpu(v), (addr))
+#define iowrite32be(v, addr)   __raw_writel(__cpu_to_be32(v), addr)
 
 #define ioread8_rep(p, dst, count) \
        insb((unsigned long) (p), (dst), (count))
@@ -346,6 +346,7 @@ extern void ioport_unmap(void __iomem *p);
 #define xlate_dev_kmem_ptr(p)  p
 #define xlate_dev_mem_ptr(p)   __va(p)
 
+#ifdef CONFIG_VIRT_TO_BUS
 #ifndef virt_to_bus
 static inline unsigned long virt_to_bus(volatile void *address)
 {
@@ -357,6 +358,7 @@ static inline void *bus_to_virt(unsigned long address)
        return (void *) address;
 }
 #endif
+#endif
 
 #ifndef memset_io
 #define memset_io(a, b, c)     memset(__io_virt(a), (b), (c))
index 9788568..c184aa8 100644 (file)
@@ -7,7 +7,6 @@
  * address space, e.g. all NOMMU machines.
  */
 #include <linux/sched.h>
-#include <linux/mm.h>
 #include <linux/string.h>
 
 #include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
 }
 #endif
 
+#ifndef segment_eq
 #define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to,
                -EFAULT;                                        \
 })
 
+#ifndef __put_user_fn
+
 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
 {
        size = __copy_to_user(ptr, x, size);
        return size ? -EFAULT : size;
 }
 
+#define __put_user_fn(sz, u, k)        __put_user_fn(sz, u, k)
+
+#endif
+
 extern int __put_user_bad(void) __attribute__((noreturn));
 
 #define __get_user(x, ptr)                                     \
@@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn));
                -EFAULT;                                        \
 })
 
+#ifndef __get_user_fn
 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 {
        size = __copy_from_user(x, ptr, size);
        return size ? -EFAULT : size;
 }
 
+#define __get_user_fn(sz, u, k)        __get_user_fn(sz, u, k)
+
+#endif
+
 extern int __get_user_bad(void) __attribute__((noreturn));
 
 #ifndef __copy_from_user_inatomic
index 257c55e..4077b5d 100644 (file)
  * but it doesn't work on all toolchains, so we just do it by hand
  */
 #ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#ifdef CONFIG_SYMBOL_PREFIX
+#define __SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
+#else
+#define __SYMBOL_PREFIX
+#endif
+#define cond_syscall(x) asm(".weak\t" __SYMBOL_PREFIX #x "\n\t" \
+                           ".set\t" __SYMBOL_PREFIX #x "," \
+                           __SYMBOL_PREFIX "sys_ni_syscall")
 #endif
diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h
new file mode 100644 (file)
index 0000000..ac17e7d
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2013 Imaginaton Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __CLKSOURCE_METAG_GENERIC_H
+#define __CLKSOURCE_METAG_GENERIC_H
+
+extern int metag_generic_timer_init(void);
+
+#endif /* __CLKSOURCE_METAG_GENERIC_H */
index f46cfd7..bcbdd74 100644 (file)
@@ -485,14 +485,6 @@ static inline bool acpi_driver_match_device(struct device *dev,
 
 #endif /* !CONFIG_ACPI */
 
-#ifdef CONFIG_ACPI_NUMA
-void __init early_parse_srat(void);
-#else
-static inline void early_parse_srat(void)
-{
-}
-#endif
-
 #ifdef CONFIG_ACPI
 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
                               u32 pm1a_ctrl,  u32 pm1b_ctrl));
index e5dfc25..b708786 100644 (file)
@@ -1,7 +1,10 @@
 #ifndef LINUX_BCM47XX_WDT_H_
 #define LINUX_BCM47XX_WDT_H_
 
+#include <linux/notifier.h>
+#include <linux/timer.h>
 #include <linux/types.h>
+#include <linux/watchdog.h>
 
 
 struct bcm47xx_wdt {
@@ -10,6 +13,12 @@ struct bcm47xx_wdt {
        u32 max_timer_ms;
 
        void *driver_data;
+
+       struct watchdog_device wdd;
+       struct notifier_block notifier;
+
+       struct timer_list soft_timer;
+       atomic_t soft_ticks;
 };
 
 static inline void *bcm47xx_wdt_get_drvdata(struct bcm47xx_wdt *wdt)
index 1d002b5..8390c47 100644 (file)
@@ -528,6 +528,7 @@ struct bcma_sflash {
        u32 size;
 
        struct mtd_info *mtd;
+       void *priv;
 };
 #endif
 
index 0530b98..c3a0914 100644 (file)
@@ -111,7 +111,6 @@ extern int suid_dumpable;
 extern int setup_arg_pages(struct linux_binprm * bprm,
                           unsigned long stack_top,
                           int executable_stack);
-extern int bprm_mm_init(struct linux_binprm *bprm);
 extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc, const char *const *argv,
                               struct linux_binprm *bprm);
index f94bc83..78feda9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/gfp.h>
 #include <linux/bsg.h>
 #include <linux/smp.h>
+#include <linux/rcupdate.h>
 
 #include <asm/scatterlist.h>
 
@@ -437,6 +438,7 @@ struct request_queue {
        /* Throttle data */
        struct throtl_data *td;
 #endif
+       struct rcu_head         rcu_head;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
@@ -974,7 +976,6 @@ struct blk_plug {
        unsigned long magic; /* detect uninitialized use-cases */
        struct list_head list; /* requests */
        struct list_head cb_list; /* md requires an unplug callback */
-       unsigned int should_sort; /* list to be sorted before flushing? */
 };
 #define BLK_MAX_REQUEST_COUNT 16
 
index 7c2e030..0ea61e0 100644 (file)
@@ -12,6 +12,7 @@
 
 struct blk_trace {
        int trace_state;
+       bool rq_based;
        struct rchan *rchan;
        unsigned long __percpu *sequence;
        unsigned char __percpu *msg_data;
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
new file mode 100644 (file)
index 0000000..22d7991
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _LINUX_BTRFS_H
+#define _LINUX_BTRFS_H
+
+#include <uapi/linux/btrfs.h>
+
+#endif /* _LINUX_BTRFS_H */
index 458f497..5afc4f9 100644 (file)
@@ -126,7 +126,6 @@ BUFFER_FNS(Write_EIO, write_io_error)
 BUFFER_FNS(Unwritten, unwritten)
 
 #define bh_offset(bh)          ((unsigned long)(bh)->b_data & ~PAGE_MASK)
-#define touch_buffer(bh)       mark_page_accessed(bh->b_page)
 
 /* If we *know* page->private refers to buffer_heads */
 #define page_buffers(page)                                     \
@@ -142,6 +141,7 @@ BUFFER_FNS(Unwritten, unwritten)
 
 void mark_buffer_dirty(struct buffer_head *bh);
 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
+void touch_buffer(struct buffer_head *bh);
 void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset);
 int try_to_free_buffers(struct page *);
index dad579b..76554ce 100644 (file)
 #define CEPH_FEATURE_MONNAMES       (1<<5)
 #define CEPH_FEATURE_RECONNECT_SEQ  (1<<6)
 #define CEPH_FEATURE_DIRLAYOUTHASH  (1<<7)
-/* bits 8-17 defined by user-space; not supported yet here */
+#define CEPH_FEATURE_OBJECTLOCATOR  (1<<8)
+#define CEPH_FEATURE_PGID64         (1<<9)
+#define CEPH_FEATURE_INCSUBOSDMAP   (1<<10)
+#define CEPH_FEATURE_PGPOOL3        (1<<11)
+#define CEPH_FEATURE_OSDREPLYMUX    (1<<12)
+#define CEPH_FEATURE_OSDENC         (1<<13)
+#define CEPH_FEATURE_OMAP           (1<<14)
+#define CEPH_FEATURE_MONENC         (1<<15)
+#define CEPH_FEATURE_QUERY_T        (1<<16)
+#define CEPH_FEATURE_INDEP_PG_MAP   (1<<17)
 #define CEPH_FEATURE_CRUSH_TUNABLES (1<<18)
+#define CEPH_FEATURE_CHUNKY_SCRUB   (1<<19)
+#define CEPH_FEATURE_MON_NULLROUTE  (1<<20)
+#define CEPH_FEATURE_MON_GV         (1<<21)
+#define CEPH_FEATURE_BACKFILL_RESERVATION (1<<22)
+#define CEPH_FEATURE_MSG_AUTH      (1<<23)
+#define CEPH_FEATURE_RECOVERY_RESERVATION (1<<24)
+#define CEPH_FEATURE_CRUSH_TUNABLES2 (1<<25)
+#define CEPH_FEATURE_CREATEPOOLID   (1<<26)
+#define CEPH_FEATURE_REPLY_CREATE_INODE   (1<<27)
+#define CEPH_FEATURE_OSD_HBMSGS     (1<<28)
+#define CEPH_FEATURE_MDSENC         (1<<29)
+#define CEPH_FEATURE_OSDHASHPSPOOL  (1<<30)
 
 /*
  * Features supported.
  */
 #define CEPH_FEATURES_SUPPORTED_DEFAULT  \
-       (CEPH_FEATURE_NOSRCADDR |        \
-        CEPH_FEATURE_CRUSH_TUNABLES)
+       (CEPH_FEATURE_NOSRCADDR |               \
+        CEPH_FEATURE_PGID64 |                  \
+        CEPH_FEATURE_PGPOOL3 |                 \
+        CEPH_FEATURE_OSDENC |                  \
+        CEPH_FEATURE_CRUSH_TUNABLES |          \
+        CEPH_FEATURE_CRUSH_TUNABLES2 |         \
+        CEPH_FEATURE_REPLY_CREATE_INODE |      \
+        CEPH_FEATURE_OSDHASHPSPOOL)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
-       (CEPH_FEATURE_NOSRCADDR)
+       (CEPH_FEATURE_NOSRCADDR |        \
+        CEPH_FEATURE_PGID64 |           \
+        CEPH_FEATURE_PGPOOL3 |          \
+        CEPH_FEATURE_OSDENC)
 #endif
index cf6f4d9..2ad7b86 100644 (file)
  * internal cluster protocols separately from the public,
  * client-facing protocol.
  */
-#define CEPH_OSD_PROTOCOL     8 /* cluster internal */
-#define CEPH_MDS_PROTOCOL    12 /* cluster internal */
-#define CEPH_MON_PROTOCOL     5 /* cluster internal */
 #define CEPH_OSDC_PROTOCOL   24 /* server/client */
 #define CEPH_MDSC_PROTOCOL   32 /* server/client */
 #define CEPH_MONC_PROTOCOL   15 /* server/client */
 
 
-#define CEPH_INO_ROOT  1
-#define CEPH_INO_CEPH  2        /* hidden .ceph dir */
+#define CEPH_INO_ROOT   1
+#define CEPH_INO_CEPH   2       /* hidden .ceph dir */
+#define CEPH_INO_DOTDOT 3      /* used by ceph fuse for parent (..) */
 
 /* arbitrary limit on max # of monitors (cluster of 3 is typical) */
 #define CEPH_MAX_MON   31
@@ -51,7 +49,7 @@ struct ceph_file_layout {
        __le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
        /* object -> pg layout */
-       __le32 fl_unused;       /* unused; used to be preferred primary (-1) */
+       __le32 fl_unused;       /* unused; used to be preferred primary for pg (-1 for none) */
        __le32 fl_pg_pool;      /* namespace, crush ruleset, rep level */
 } __attribute__ ((packed));
 
@@ -101,6 +99,8 @@ struct ceph_dir_layout {
 #define CEPH_MSG_MON_SUBSCRIBE_ACK      16
 #define CEPH_MSG_AUTH                  17
 #define CEPH_MSG_AUTH_REPLY            18
+#define CEPH_MSG_MON_GET_VERSION        19
+#define CEPH_MSG_MON_GET_VERSION_REPLY  20
 
 /* client <-> mds */
 #define CEPH_MSG_MDS_MAP                21
@@ -220,6 +220,11 @@ struct ceph_mon_subscribe_ack {
        struct ceph_fsid fsid;
 } __attribute__ ((packed));
 
+/*
+ * mdsmap flags
+ */
+#define CEPH_MDSMAP_DOWN    (1<<0)  /* cluster deliberately down */
+
 /*
  * mds states
  *   > 0 -> in
@@ -233,6 +238,7 @@ struct ceph_mon_subscribe_ack {
 #define CEPH_MDS_STATE_CREATING    -6  /* up, creating MDS instance. */
 #define CEPH_MDS_STATE_STARTING    -7  /* up, starting previously stopped mds */
 #define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
+#define CEPH_MDS_STATE_REPLAYONCE   -9 /* up, replaying an active node's journal */
 
 #define CEPH_MDS_STATE_REPLAY       8  /* up, replaying journal. */
 #define CEPH_MDS_STATE_RESOLVE      9  /* up, disambiguating distributed
@@ -264,6 +270,7 @@ extern const char *ceph_mds_state_name(int s);
 #define CEPH_LOCK_IXATTR      2048
 #define CEPH_LOCK_IFLOCK      4096  /* advisory file locks */
 #define CEPH_LOCK_INO         8192  /* immutable inode bits; not a lock */
+#define CEPH_LOCK_IPOLICY     16384 /* policy lock on dirs. MDS internal */
 
 /* client_session ops */
 enum {
@@ -338,6 +345,12 @@ extern const char *ceph_mds_op_name(int op);
 #define CEPH_SETATTR_SIZE  32
 #define CEPH_SETATTR_CTIME 64
 
+/*
+ * Ceph setxattr request flags.
+ */
+#define CEPH_XATTR_CREATE  1
+#define CEPH_XATTR_REPLACE 2
+
 union ceph_mds_request_args {
        struct {
                __le32 mask;                 /* CEPH_CAP_* */
@@ -522,14 +535,17 @@ int ceph_flags_to_mode(int flags);
 #define CEPH_CAP_GWREXTEND  64  /* (file) client can extend EOF */
 #define CEPH_CAP_GLAZYIO   128  /* (file) client can perform lazy io */
 
+#define CEPH_CAP_SIMPLE_BITS  2
+#define CEPH_CAP_FILE_BITS    8
+
 /* per-lock shift */
 #define CEPH_CAP_SAUTH      2
 #define CEPH_CAP_SLINK      4
 #define CEPH_CAP_SXATTR     6
 #define CEPH_CAP_SFILE      8
-#define CEPH_CAP_SFLOCK    20 
+#define CEPH_CAP_SFLOCK    20
 
-#define CEPH_CAP_BITS       22
+#define CEPH_CAP_BITS      22
 
 /* composed values */
 #define CEPH_CAP_AUTH_SHARED  (CEPH_CAP_GSHARED  << CEPH_CAP_SAUTH)
index 63d0928..360d9d0 100644 (file)
@@ -52,10 +52,10 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
        return end >= *p && n <= end - *p;
 }
 
-#define ceph_decode_need(p, end, n, bad)               \
-       do {                                            \
-               if (!likely(ceph_has_room(p, end, n)))  \
-                       goto bad;                       \
+#define ceph_decode_need(p, end, n, bad)                       \
+       do {                                                    \
+               if (!likely(ceph_has_room(p, end, n)))          \
+                       goto bad;                               \
        } while (0)
 
 #define ceph_decode_64_safe(p, end, v, bad)                    \
@@ -99,8 +99,8 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
  *
  * There are two possible failures:
  *   - converting the string would require accessing memory at or
- *     beyond the "end" pointer provided (-E
- *   - memory could not be allocated for the result
+ *     beyond the "end" pointer provided (-ERANGE)
+ *   - memory could not be allocated for the result (-ENOMEM)
  */
 static inline char *ceph_extract_encoded_string(void **p, void *end,
                                                size_t *lenp, gfp_t gfp)
@@ -217,10 +217,10 @@ static inline void ceph_encode_string(void **p, void *end,
        *p += len;
 }
 
-#define ceph_encode_need(p, end, n, bad)               \
-       do {                                            \
-               if (!likely(ceph_has_room(p, end, n)))  \
-                       goto bad;                       \
+#define ceph_encode_need(p, end, n, bad)                       \
+       do {                                                    \
+               if (!likely(ceph_has_room(p, end, n)))          \
+                       goto bad;                               \
        } while (0)
 
 #define ceph_encode_64_safe(p, end, v, bad)                    \
@@ -231,12 +231,17 @@ static inline void ceph_encode_string(void **p, void *end,
 #define ceph_encode_32_safe(p, end, v, bad)                    \
        do {                                                    \
                ceph_encode_need(p, end, sizeof(u32), bad);     \
-               ceph_encode_32(p, v);                   \
+               ceph_encode_32(p, v);                           \
        } while (0)
 #define ceph_encode_16_safe(p, end, v, bad)                    \
        do {                                                    \
                ceph_encode_need(p, end, sizeof(u16), bad);     \
-               ceph_encode_16(p, v);                   \
+               ceph_encode_16(p, v);                           \
+       } while (0)
+#define ceph_encode_8_safe(p, end, v, bad)                     \
+       do {                                                    \
+               ceph_encode_need(p, end, sizeof(u8), bad);      \
+               ceph_encode_8(p, v);                            \
        } while (0)
 
 #define ceph_encode_copy_safe(p, end, pv, n, bad)              \
index 084d3c6..29818fc 100644 (file)
@@ -193,6 +193,8 @@ static inline int calc_pages_for(u64 off, u64 len)
 }
 
 /* ceph_common.c */
+extern bool libceph_compatible(void *data);
+
 extern const char *ceph_msg_type_name(int type);
 extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
 extern struct kmem_cache *ceph_inode_cachep;
@@ -220,7 +222,7 @@ extern int ceph_open_session(struct ceph_client *client);
 /* pagevec.c */
 extern void ceph_release_page_vector(struct page **pages, int num_pages);
 
-extern struct page **ceph_get_direct_page_vector(const char __user *data,
+extern struct page **ceph_get_direct_page_vector(const void __user *data,
                                                 int num_pages,
                                                 bool write_page);
 extern void ceph_put_page_vector(struct page **pages, int num_pages,
@@ -228,15 +230,15 @@ extern void ceph_put_page_vector(struct page **pages, int num_pages,
 extern void ceph_release_page_vector(struct page **pages, int num_pages);
 extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
 extern int ceph_copy_user_to_page_vector(struct page **pages,
-                                        const char __user *data,
+                                        const void __user *data,
                                         loff_t off, size_t len);
-extern int ceph_copy_to_page_vector(struct page **pages,
-                                   const char *data,
+extern void ceph_copy_to_page_vector(struct page **pages,
+                                   const void *data,
                                    loff_t off, size_t len);
-extern int ceph_copy_from_page_vector(struct page **pages,
-                                   char *data,
+extern void ceph_copy_from_page_vector(struct page **pages,
+                                   void *data,
                                    loff_t off, size_t len);
-extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data,
+extern int ceph_copy_page_vector_to_user(struct page **pages, void __user *data,
                                    loff_t off, size_t len);
 extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
 
index cb15b5d..87ed09f 100644 (file)
@@ -29,8 +29,8 @@ struct ceph_mdsmap {
 
        /* which object pools file data can be stored in */
        int m_num_data_pg_pools;
-       u32 *m_data_pg_pools;
-       u32 m_cas_pg_pool;
+       u64 *m_data_pg_pools;
+       u64 m_cas_pg_pool;
 };
 
 static inline struct ceph_entity_addr *
index 14ba5ee..60903e0 100644 (file)
@@ -83,9 +83,11 @@ struct ceph_msg {
        struct list_head list_head;
 
        struct kref kref;
+#ifdef CONFIG_BLOCK
        struct bio  *bio;               /* instead of pages/pagelist */
        struct bio  *bio_iter;          /* bio iterator */
        int bio_seg;                    /* current bio segment */
+#endif /* CONFIG_BLOCK */
        struct ceph_pagelist *trail;    /* the trailing part of the data */
        bool front_is_vmalloc;
        bool more_to_follow;
index d9b880e..1dd5d46 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/ceph/osdmap.h>
 #include <linux/ceph/messenger.h>
 #include <linux/ceph/auth.h>
+#include <linux/ceph/pagelist.h>
 
 /* 
  * Maximum object name size 
@@ -22,7 +23,6 @@ struct ceph_snap_context;
 struct ceph_osd_request;
 struct ceph_osd_client;
 struct ceph_authorizer;
-struct ceph_pagelist;
 
 /*
  * completion callback for async writepages
@@ -47,6 +47,9 @@ struct ceph_osd {
        struct list_head o_keepalive_item;
 };
 
+
+#define CEPH_OSD_MAX_OP 10
+
 /* an in-flight request */
 struct ceph_osd_request {
        u64             r_tid;              /* unique for this client */
@@ -63,9 +66,23 @@ struct ceph_osd_request {
        struct ceph_connection *r_con_filling_msg;
 
        struct ceph_msg  *r_request, *r_reply;
-       int               r_result;
        int               r_flags;     /* any additional flags for the osd */
        u32               r_sent;      /* >0 if r_request is sending/sent */
+       int               r_num_ops;
+
+       /* encoded message content */
+       struct ceph_osd_op *r_request_ops;
+       /* these are updated on each send */
+       __le32           *r_request_osdmap_epoch;
+       __le32           *r_request_flags;
+       __le64           *r_request_pool;
+       void             *r_request_pgid;
+       __le32           *r_request_attempts;
+       struct ceph_eversion *r_request_reassert_version;
+
+       int               r_result;
+       int               r_reply_op_len[CEPH_OSD_MAX_OP];
+       s32               r_reply_op_result[CEPH_OSD_MAX_OP];
        int               r_got_reply;
        int               r_linger;
 
@@ -82,6 +99,7 @@ struct ceph_osd_request {
 
        char              r_oid[MAX_OBJ_NAME_SIZE];          /* object name */
        int               r_oid_len;
+       u64               r_snapid;
        unsigned long     r_stamp;            /* send OR check time */
 
        struct ceph_file_layout r_file_layout;
@@ -95,7 +113,7 @@ struct ceph_osd_request {
        struct bio       *r_bio;              /* instead of pages */
 #endif
 
-       struct ceph_pagelist *r_trail;        /* trailing part of the data */
+       struct ceph_pagelist r_trail;         /* trailing part of the data */
 };
 
 struct ceph_osd_event {
@@ -107,7 +125,6 @@ struct ceph_osd_event {
        struct rb_node node;
        struct list_head osd_node;
        struct kref kref;
-       struct completion completion;
 };
 
 struct ceph_osd_event_work {
@@ -157,7 +174,7 @@ struct ceph_osd_client {
 
 struct ceph_osd_req_op {
        u16 op;           /* CEPH_OSD_OP_* */
-       u32 flags;        /* CEPH_OSD_FLAG_* */
+       u32 payload_len;
        union {
                struct {
                        u64 offset, length;
@@ -166,23 +183,24 @@ struct ceph_osd_req_op {
                } extent;
                struct {
                        const char *name;
-                       u32 name_len;
                        const char  *val;
+                       u32 name_len;
                        u32 value_len;
                        __u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
                        __u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
                } xattr;
                struct {
                        const char *class_name;
-                       __u8 class_len;
                        const char *method_name;
-                       __u8 method_len;
-                       __u8 argc;
                        const char *indata;
                        u32 indata_len;
+                       __u8 class_len;
+                       __u8 method_len;
+                       __u8 argc;
                } cls;
                struct {
-                       u64 cookie, count;
+                       u64 cookie;
+                       u64 count;
                } pgls;
                struct {
                        u64 snapid;
@@ -190,12 +208,11 @@ struct ceph_osd_req_op {
                struct {
                        u64 cookie;
                        u64 ver;
-                       __u8 flag;
                        u32 prot_ver;
                        u32 timeout;
+                       __u8 flag;
                } watch;
        };
-       u32 payload_len;
 };
 
 extern int ceph_osdc_init(struct ceph_osd_client *osdc,
@@ -207,29 +224,19 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
 extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
                                 struct ceph_msg *msg);
 
-extern int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
-                       struct ceph_file_layout *layout,
-                       u64 snapid,
-                       u64 off, u64 *plen, u64 *bno,
-                       struct ceph_osd_request *req,
-                       struct ceph_osd_req_op *op);
-
 extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
-                                              int flags,
                                               struct ceph_snap_context *snapc,
-                                              struct ceph_osd_req_op *ops,
+                                              unsigned int num_op,
                                               bool use_mempool,
-                                              gfp_t gfp_flags,
-                                              struct page **pages,
-                                              struct bio *bio);
+                                              gfp_t gfp_flags);
 
 extern void ceph_osdc_build_request(struct ceph_osd_request *req,
-                                   u64 off, u64 *plen,
+                                   u64 off, u64 len,
+                                   unsigned int num_op,
                                    struct ceph_osd_req_op *src_ops,
                                    struct ceph_snap_context *snapc,
-                                   struct timespec *mtime,
-                                   const char *oid,
-                                   int oid_len);
+                                   u64 snap_id,
+                                   struct timespec *mtime);
 
 extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
                                      struct ceph_file_layout *layout,
@@ -239,8 +246,7 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
                                      int do_sync, u32 truncate_seq,
                                      u64 truncate_size,
                                      struct timespec *mtime,
-                                     bool use_mempool, int num_reply,
-                                     int page_align);
+                                     bool use_mempool, int page_align);
 
 extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
                                         struct ceph_osd_request *req);
@@ -279,17 +285,13 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
                                u64 off, u64 len,
                                u32 truncate_seq, u64 truncate_size,
                                struct timespec *mtime,
-                               struct page **pages, int nr_pages,
-                               int flags, int do_sync, bool nofail);
+                               struct page **pages, int nr_pages);
 
 /* watch/notify events */
 extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
                                  void (*event_cb)(u64, u64, u8, void *),
-                                 int one_shot, void *data,
-                                 struct ceph_osd_event **pevent);
+                                 void *data, struct ceph_osd_event **pevent);
 extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
-extern int ceph_osdc_wait_event(struct ceph_osd_event *event,
-                               unsigned long timeout);
 extern void ceph_osdc_put_event(struct ceph_osd_event *event);
 #endif
 
index 10a417f..c819190 100644 (file)
  * The map can be updated either via an incremental map (diff) describing
  * the change between two successive epochs, or as a fully encoded map.
  */
+struct ceph_pg {
+       uint64_t pool;
+       uint32_t seed;
+};
+
+#define CEPH_POOL_FLAG_HASHPSPOOL  1
+
 struct ceph_pg_pool_info {
        struct rb_node node;
-       int id;
-       struct ceph_pg_pool v;
-       int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+       s64 id;
+       u8 type;
+       u8 size;
+       u8 crush_ruleset;
+       u8 object_hash;
+       u32 pg_num, pgp_num;
+       int pg_num_mask, pgp_num_mask;
+       u64 flags;
        char *name;
 };
 
+struct ceph_object_locator {
+       uint64_t pool;
+       char *key;
+};
+
 struct ceph_pg_mapping {
        struct rb_node node;
        struct ceph_pg pgid;
@@ -110,15 +127,16 @@ extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
 
 /* calculate mapping of a file extent to an object */
 extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
-                                        u64 off, u64 *plen,
+                                        u64 off, u64 len,
                                         u64 *bno, u64 *oxoff, u64 *oxlen);
 
 /* calculate mapping of object to a placement group */
-extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
+extern int ceph_calc_object_layout(struct ceph_pg *pg,
                                   const char *oid,
                                   struct ceph_file_layout *fl,
                                   struct ceph_osdmap *osdmap);
-extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
+extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
+                              struct ceph_pg pgid,
                               int *acting);
 extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
                                struct ceph_pg pgid);
index 2c04afe..68c96a5 100644 (file)
@@ -8,14 +8,6 @@
 
 #include <linux/ceph/msgr.h>
 
-/*
- * osdmap encoding versions
- */
-#define CEPH_OSDMAP_INC_VERSION     5
-#define CEPH_OSDMAP_INC_VERSION_EXT 6
-#define CEPH_OSDMAP_VERSION         5
-#define CEPH_OSDMAP_VERSION_EXT     6
-
 /*
  * fs id
  */
@@ -64,7 +56,7 @@ struct ceph_timespec {
  * placement group.
  * we encode this into one __le64.
  */
-struct ceph_pg {
+struct ceph_pg_v1 {
        __le16 preferred; /* preferred primary osd */
        __le16 ps;        /* placement seed */
        __le32 pool;      /* object pool */
@@ -91,21 +83,6 @@ struct ceph_pg {
 
 #define CEPH_PG_TYPE_REP     1
 #define CEPH_PG_TYPE_RAID4   2
-#define CEPH_PG_POOL_VERSION 2
-struct ceph_pg_pool {
-       __u8 type;                /* CEPH_PG_TYPE_* */
-       __u8 size;                /* number of osds in each pg */
-       __u8 crush_ruleset;       /* crush placement rule */
-       __u8 object_hash;         /* hash mapping object name to ps */
-       __le32 pg_num, pgp_num;   /* number of pg's */
-       __le32 lpg_num, lpgp_num; /* number of localized pg's */
-       __le32 last_change;       /* most recent epoch changed */
-       __le64 snap_seq;          /* seq for per-pool snapshot */
-       __le32 snap_epoch;        /* epoch of last snap */
-       __le32 num_snaps;
-       __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */
-       __le64 auid;               /* who owns the pg */
-} __attribute__ ((packed));
 
 /*
  * stable_mod func is used to control number of placement groups.
@@ -128,7 +105,7 @@ static inline int ceph_stable_mod(int x, int b, int bmask)
  * object layout - how a given object should be stored.
  */
 struct ceph_object_layout {
-       struct ceph_pg ol_pgid;   /* raw pg, with _full_ ps precision. */
+       struct ceph_pg_v1 ol_pgid;   /* raw pg, with _full_ ps precision. */
        __le32 ol_stripe_unit;    /* for per-object parity, if any */
 } __attribute__ ((packed));
 
@@ -145,8 +122,12 @@ struct ceph_eversion {
  */
 
 /* status bits */
-#define CEPH_OSD_EXISTS 1
-#define CEPH_OSD_UP     2
+#define CEPH_OSD_EXISTS  (1<<0)
+#define CEPH_OSD_UP      (1<<1)
+#define CEPH_OSD_AUTOOUT (1<<2)  /* osd was automatically marked out */
+#define CEPH_OSD_NEW     (1<<3)  /* osd is new, never marked in */
+
+extern const char *ceph_osd_state_name(int s);
 
 /* osd weights.  fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
 #define CEPH_OSD_IN  0x10000
@@ -161,9 +142,25 @@ struct ceph_eversion {
 #define CEPH_OSDMAP_PAUSERD  (1<<2)  /* pause all reads */
 #define CEPH_OSDMAP_PAUSEWR  (1<<3)  /* pause all writes */
 #define CEPH_OSDMAP_PAUSEREC (1<<4)  /* pause recovery */
+#define CEPH_OSDMAP_NOUP     (1<<5)  /* block osd boot */
+#define CEPH_OSDMAP_NODOWN   (1<<6)  /* block osd mark-down/failure */
+#define CEPH_OSDMAP_NOOUT    (1<<7)  /* block osd auto mark-out */
+#define CEPH_OSDMAP_NOIN     (1<<8)  /* block osd auto mark-in */
+#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
+#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+
+/*
+ * The error code to return when an OSD can't handle a write
+ * because it is too large.
+ */
+#define OSD_WRITETOOBIG EMSGSIZE
 
 /*
  * osd ops
+ *
+ * WARNING: do not use these op codes directly.  Use the helpers
+ * defined below instead.  In certain cases, op code behavior was
+ * redefined, resulting in special-cases in the helpers.
  */
 #define CEPH_OSD_OP_MODE       0xf000
 #define CEPH_OSD_OP_MODE_RD    0x1000
@@ -177,6 +174,7 @@ struct ceph_eversion {
 #define CEPH_OSD_OP_TYPE_ATTR  0x0300
 #define CEPH_OSD_OP_TYPE_EXEC  0x0400
 #define CEPH_OSD_OP_TYPE_PG    0x0500
+#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
 
 enum {
        /** data **/
@@ -217,6 +215,23 @@ enum {
 
        CEPH_OSD_OP_WATCH   = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
 
+       /* omap */
+       CEPH_OSD_OP_OMAPGETKEYS   = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
+       CEPH_OSD_OP_OMAPGETVALS   = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
+       CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
+       CEPH_OSD_OP_OMAPGETVALSBYKEYS  =
+         CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
+       CEPH_OSD_OP_OMAPSETVALS   = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
+       CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
+       CEPH_OSD_OP_OMAPCLEAR     = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
+       CEPH_OSD_OP_OMAPRMKEYS    = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
+       CEPH_OSD_OP_OMAP_CMP      = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
+
+       /** multi **/
+       CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
+       CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
+       CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
+
        /** attrs **/
        /* read */
        CEPH_OSD_OP_GETXATTR  = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
@@ -238,6 +253,7 @@ enum {
        CEPH_OSD_OP_SCRUB_RESERVE   = CEPH_OSD_OP_MODE_SUB | 6,
        CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
        CEPH_OSD_OP_SCRUB_STOP      = CEPH_OSD_OP_MODE_SUB | 8,
+       CEPH_OSD_OP_SCRUB_MAP     = CEPH_OSD_OP_MODE_SUB | 9,
 
        /** lock **/
        CEPH_OSD_OP_WRLOCK    = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
@@ -248,10 +264,12 @@ enum {
        CEPH_OSD_OP_DNLOCK    = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
 
        /** exec **/
+       /* note: the RD bit here is wrong; see special-case below in helper */
        CEPH_OSD_OP_CALL    = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
 
        /** pg **/
        CEPH_OSD_OP_PGLS      = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
+       CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
 };
 
 static inline int ceph_osd_op_type_lock(int op)
@@ -274,6 +292,10 @@ static inline int ceph_osd_op_type_pg(int op)
 {
        return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
 }
+static inline int ceph_osd_op_type_multi(int op)
+{
+       return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI;
+}
 
 static inline int ceph_osd_op_mode_subop(int op)
 {
@@ -281,11 +303,12 @@ static inline int ceph_osd_op_mode_subop(int op)
 }
 static inline int ceph_osd_op_mode_read(int op)
 {
-       return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
+       return (op & CEPH_OSD_OP_MODE_RD) &&
+               op != CEPH_OSD_OP_CALL;
 }
 static inline int ceph_osd_op_mode_modify(int op)
 {
-       return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
+       return op & CEPH_OSD_OP_MODE_WR;
 }
 
 /*
@@ -294,34 +317,38 @@ static inline int ceph_osd_op_mode_modify(int op)
  */
 #define CEPH_OSD_TMAP_HDR 'h'
 #define CEPH_OSD_TMAP_SET 's'
+#define CEPH_OSD_TMAP_CREATE 'c' /* create key */
 #define CEPH_OSD_TMAP_RM  'r'
+#define CEPH_OSD_TMAP_RMSLOPPY 'R'
 
 extern const char *ceph_osd_op_name(int op);
 
-
 /*
  * osd op flags
  *
  * An op may be READ, WRITE, or READ|WRITE.
  */
 enum {
-       CEPH_OSD_FLAG_ACK = 1,          /* want (or is) "ack" ack */
-       CEPH_OSD_FLAG_ONNVRAM = 2,      /* want (or is) "onnvram" ack */
-       CEPH_OSD_FLAG_ONDISK = 4,       /* want (or is) "ondisk" ack */
-       CEPH_OSD_FLAG_RETRY = 8,        /* resend attempt */
-       CEPH_OSD_FLAG_READ = 16,        /* op may read */
-       CEPH_OSD_FLAG_WRITE = 32,       /* op may write */
-       CEPH_OSD_FLAG_ORDERSNAP = 64,   /* EOLDSNAP if snapc is out of order */
-       CEPH_OSD_FLAG_PEERSTAT = 128,   /* msg includes osd_peer_stat */
-       CEPH_OSD_FLAG_BALANCE_READS = 256,
-       CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
-       CEPH_OSD_FLAG_PGOP = 1024,      /* pg op, no object */
-       CEPH_OSD_FLAG_EXEC = 2048,      /* op may exec */
-       CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */
+       CEPH_OSD_FLAG_ACK =            0x0001,  /* want (or is) "ack" ack */
+       CEPH_OSD_FLAG_ONNVRAM =        0x0002,  /* want (or is) "onnvram" ack */
+       CEPH_OSD_FLAG_ONDISK =         0x0004,  /* want (or is) "ondisk" ack */
+       CEPH_OSD_FLAG_RETRY =          0x0008,  /* resend attempt */
+       CEPH_OSD_FLAG_READ =           0x0010,  /* op may read */
+       CEPH_OSD_FLAG_WRITE =          0x0020,  /* op may write */
+       CEPH_OSD_FLAG_ORDERSNAP =      0x0040,  /* EOLDSNAP if snapc is out of order */
+       CEPH_OSD_FLAG_PEERSTAT_OLD =   0x0080,  /* DEPRECATED msg includes osd_peer_stat */
+       CEPH_OSD_FLAG_BALANCE_READS =  0x0100,
+       CEPH_OSD_FLAG_PARALLELEXEC =   0x0200,  /* execute op in parallel */
+       CEPH_OSD_FLAG_PGOP =           0x0400,  /* pg op, no object */
+       CEPH_OSD_FLAG_EXEC =           0x0800,  /* op may exec */
+       CEPH_OSD_FLAG_EXEC_PUBLIC =    0x1000,  /* DEPRECATED op may exec (public) */
+       CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000,  /* read from nearby replica, if any */
+       CEPH_OSD_FLAG_RWORDERED =      0x4000,  /* order wrt concurrent reads */
 };
 
 enum {
        CEPH_OSD_OP_FLAG_EXCL = 1,      /* EXCL object create */
+       CEPH_OSD_OP_FLAG_FAILOK = 2,    /* continue despite failure */
 };
 
 #define EOLDSNAPC    ERESTART  /* ORDERSNAP flag set; writer has old snapc*/
@@ -381,48 +408,13 @@ struct ceph_osd_op {
                        __le64 ver;
                        __u8 flag;      /* 0 = unwatch, 1 = watch */
                } __attribute__ ((packed)) watch;
-};
+               struct {
+                       __le64 offset, length;
+                       __le64 src_offset;
+               } __attribute__ ((packed)) clonerange;
+       };
        __le32 payload_len;
 } __attribute__ ((packed));
 
-/*
- * osd request message header.  each request may include multiple
- * ceph_osd_op object operations.
- */
-struct ceph_osd_request_head {
-       __le32 client_inc;                 /* client incarnation */
-       struct ceph_object_layout layout;  /* pgid */
-       __le32 osdmap_epoch;               /* client's osdmap epoch */
-
-       __le32 flags;
-
-       struct ceph_timespec mtime;        /* for mutations only */
-       struct ceph_eversion reassert_version; /* if we are replaying op */
-
-       __le32 object_len;     /* length of object name */
-
-       __le64 snapid;         /* snapid to read */
-       __le64 snap_seq;       /* writer's snap context */
-       __le32 num_snaps;
-
-       __le16 num_ops;
-       struct ceph_osd_op ops[];  /* followed by ops[], obj, ticket, snaps */
-} __attribute__ ((packed));
-
-struct ceph_osd_reply_head {
-       __le32 client_inc;                /* client incarnation */
-       __le32 flags;
-       struct ceph_object_layout layout;
-       __le32 osdmap_epoch;
-       struct ceph_eversion reassert_version; /* for replaying uncommitted */
-
-       __le32 result;                    /* result code */
-
-       __le32 object_len;                /* length of object name */
-       __le32 num_ops;
-       struct ceph_osd_op ops[0];  /* ops[], object */
-} __attribute__ ((packed));
-
-
 
 #endif
index de095b0..76a87fb 100644 (file)
@@ -359,6 +359,7 @@ asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
 asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
                const struct compat_iovec __user *vec,
                unsigned long vlen, u32 pos_low, u32 pos_high);
+asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
 
 asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
                     const compat_uptr_t __user *envp);
@@ -535,6 +536,8 @@ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
 asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
                                             struct file_handle __user *handle,
                                             int flags);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
 asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
                                    compat_ulong_t __user *outp,
                                    compat_ulong_t __user *exp,
index 51494e6..33f0280 100644 (file)
@@ -77,10 +77,13 @@ static inline void init_completion(struct completion *x)
 }
 
 extern void wait_for_completion(struct completion *);
+extern void wait_for_completion_io(struct completion *);
 extern int wait_for_completion_interruptible(struct completion *x);
 extern int wait_for_completion_killable(struct completion *x);
 extern unsigned long wait_for_completion_timeout(struct completion *x,
                                                   unsigned long timeout);
+extern unsigned long wait_for_completion_io_timeout(struct completion *x,
+                                                   unsigned long timeout);
 extern long wait_for_completion_interruptible_timeout(
        struct completion *x, unsigned long timeout);
 extern long wait_for_completion_killable_timeout(
index 25baa28..6a1101f 100644 (file)
@@ -162,6 +162,8 @@ struct crush_map {
        __u32 choose_local_fallback_tries;
        /* choose attempts before giving up */ 
        __u32 choose_total_tries;
+       /* attempt chooseleaf inner descent once; on failure retry outer descent */
+       __u32 chooseleaf_descend_once;
 };
 
 
index c1754b5..1a6bb81 100644 (file)
@@ -145,6 +145,7 @@ enum dentry_d_lock_class
 
 struct dentry_operations {
        int (*d_revalidate)(struct dentry *, unsigned int);
+       int (*d_weak_revalidate)(struct dentry *, unsigned int);
        int (*d_hash)(const struct dentry *, const struct inode *,
                        struct qstr *);
        int (*d_compare)(const struct dentry *, const struct inode *,
@@ -192,6 +193,8 @@ struct dentry_operations {
 #define DCACHE_GENOCIDE                0x0200
 #define DCACHE_SHRINK_LIST     0x0400
 
+#define DCACHE_OP_WEAK_REVALIDATE      0x0800
+
 #define DCACHE_NFSFS_RENAMED   0x1000
      /* this dentry has been "silly renamed" and has to be deleted on the last
       * dput() */
@@ -293,9 +296,9 @@ extern void d_move(struct dentry *, struct dentry *);
 extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
 
 /* appendix may either be NULL or be used for transname suffixes */
-extern struct dentry *d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
 extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
 extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
                                const struct qstr *name,
                                unsigned *seq, struct inode *inode);
@@ -333,7 +336,6 @@ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 extern char *__d_path(const struct path *, const struct path *, char *, int);
 extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
-extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
 extern char *dentry_path(struct dentry *, char *, int);
 
index 3bd46f7..a975de1 100644 (file)
@@ -51,7 +51,7 @@ struct task_struct;
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
 }
 
 static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
 {
 }
 #endif
index bf6afa2..1e483fa 100644 (file)
@@ -68,8 +68,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
 typedef int (*dm_preresume_fn) (struct dm_target *ti);
 typedef void (*dm_resume_fn) (struct dm_target *ti);
 
-typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
-                            unsigned status_flags, char *result, unsigned maxlen);
+typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+                             unsigned status_flags, char *result, unsigned maxlen);
 
 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
 
@@ -175,6 +175,14 @@ struct target_type {
 #define DM_TARGET_IMMUTABLE            0x00000004
 #define dm_target_is_immutable(type)   ((type)->features & DM_TARGET_IMMUTABLE)
 
+/*
+ * Some targets need to be sent the same WRITE bio severals times so
+ * that they can send copies of it to different devices.  This function
+ * examines any supplied bio and returns the number of copies of it the
+ * target requires.
+ */
+typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+
 struct dm_target {
        struct dm_table *table;
        struct target_type *type;
@@ -187,26 +195,26 @@ struct dm_target {
        uint32_t max_io_len;
 
        /*
-        * A number of zero-length barrier requests that will be submitted
+        * A number of zero-length barrier bios that will be submitted
         * to the target for the purpose of flushing cache.
         *
-        * The request number can be accessed with dm_bio_get_target_request_nr.
-        * It is a responsibility of the target driver to remap these requests
+        * The bio number can be accessed with dm_bio_get_target_bio_nr.
+        * It is a responsibility of the target driver to remap these bios
         * to the real underlying devices.
         */
-       unsigned num_flush_requests;
+       unsigned num_flush_bios;
 
        /*
-        * The number of discard requests that will be submitted to the target.
-        * The request number can be accessed with dm_bio_get_target_request_nr.
+        * The number of discard bios that will be submitted to the target.
+        * The bio number can be accessed with dm_bio_get_target_bio_nr.
         */
-       unsigned num_discard_requests;
+       unsigned num_discard_bios;
 
        /*
-        * The number of WRITE SAME requests that will be submitted to the target.
-        * The request number can be accessed with dm_bio_get_target_request_nr.
+        * The number of WRITE SAME bios that will be submitted to the target.
+        * The bio number can be accessed with dm_bio_get_target_bio_nr.
         */
-       unsigned num_write_same_requests;
+       unsigned num_write_same_bios;
 
        /*
         * The minimum number of extra bytes allocated in each bio for the
@@ -214,6 +222,13 @@ struct dm_target {
         */
        unsigned per_bio_data_size;
 
+       /*
+        * If defined, this function is called to find out how many
+        * duplicate bios should be sent to the target when writing
+        * data.
+        */
+       dm_num_write_bios_fn num_write_bios;
+
        /* target specific data */
        void *private;
 
@@ -233,10 +248,10 @@ struct dm_target {
        bool discards_supported:1;
 
        /*
-        * Set if the target required discard request to be split
+        * Set if the target required discard bios to be split
         * on max_io_len boundary.
         */
-       bool split_discard_requests:1;
+       bool split_discard_bios:1;
 
        /*
         * Set if this target does not return zeroes on discarded blocks.
@@ -261,7 +276,7 @@ struct dm_target_io {
        struct dm_io *io;
        struct dm_target *ti;
        union map_info info;
-       unsigned target_request_nr;
+       unsigned target_bio_nr;
        struct bio clone;
 };
 
@@ -275,9 +290,9 @@ static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
        return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
 }
 
-static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
+static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
 {
-       return container_of(bio, struct dm_target_io, clone)->target_request_nr;
+       return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
 }
 
 int dm_register_target(struct target_type *t);
index 47d9d37..f486d63 100644 (file)
 
 #define DM_KCOPYD_IGNORE_ERROR 1
 
+struct dm_kcopyd_throttle {
+       unsigned throttle;
+       unsigned num_io_jobs;
+       unsigned io_period;
+       unsigned total_period;
+       unsigned last_jiffies;
+};
+
+/*
+ * kcopyd clients that want to support throttling must pass an initialised
+ * dm_kcopyd_throttle struct into dm_kcopyd_client_create().
+ * Two or more clients may share the same instance of this struct between
+ * them if they wish to be throttled as a group.
+ *
+ * This macro also creates a corresponding module parameter to configure
+ * the amount of throttling.
+ */
+#define DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(name, description) \
+static struct dm_kcopyd_throttle dm_kcopyd_throttle = { 100, 0, 0, 0, 0 }; \
+module_param_named(name, dm_kcopyd_throttle.throttle, uint, 0644); \
+MODULE_PARM_DESC(name, description)
+
 /*
  * To use kcopyd you must first create a dm_kcopyd_client object.
+ * throttle can be NULL if you don't want any throttling.
  */
 struct dm_kcopyd_client;
-struct dm_kcopyd_client *dm_kcopyd_client_create(void);
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
 
 /*
index 3d754a3..9978b61 100644 (file)
@@ -119,8 +119,10 @@ struct dma_buf {
        struct file *file;
        struct list_head attachments;
        const struct dma_buf_ops *ops;
-       /* mutex to serialize list manipulation and attach/detach */
+       /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
        struct mutex lock;
+       unsigned vmapping_counter;
+       void *vmap_ptr;
        void *priv;
 };
 
index f593999..91ac8da 100644 (file)
@@ -1001,6 +1001,22 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
 struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
+       __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+
+static inline struct dma_chan
+*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn,
+                                 void *fn_param, struct device *dev,
+                                 char *name)
+{
+       struct dma_chan *chan;
+
+       chan = dma_request_slave_channel(dev, name);
+       if (chan)
+               return chan;
+
+       return __dma_request_channel(mask, fn, fn_param);
+}
 
 /* --- Helper iov-locking functions --- */
 
index 41766de..481ab23 100644 (file)
@@ -27,7 +27,6 @@
  */
 struct dw_dma_slave {
        struct device           *dma_dev;
-       const char              *bus_id;
        u32                     cfg_hi;
        u32                     cfg_lo;
        u8                      src_master;
@@ -60,9 +59,6 @@ struct dw_dma_platform_data {
        unsigned short  block_size;
        unsigned char   nr_masters;
        unsigned char   data_width[4];
-
-       struct dw_dma_slave *sd;
-       unsigned int sd_count;
 };
 
 /* bursts size */
@@ -114,6 +110,5 @@ void dw_dma_cyclic_stop(struct dma_chan *chan);
 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
 
 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-bool dw_dma_generic_filter(struct dma_chan *chan, void *param);
 
 #endif /* DW_DMAC_H */
index 1b8c02b..4fd4999 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/atomic.h>
 #include <linux/device.h>
-#include <linux/kobject.h>
 #include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
@@ -48,8 +47,17 @@ static inline void opstate_init(void)
        return;
 }
 
+/* Max length of a DIMM label*/
 #define EDAC_MC_LABEL_LEN      31
-#define MC_PROC_NAME_MAX_LEN   7
+
+/* Maximum size of the location string */
+#define LOCATION_SIZE 80
+
+/* Defines the maximum number of labels that can be reported */
+#define EDAC_MAX_LABELS                8
+
+/* String used to join two or more labels */
+#define OTHER_LABEL " or "
 
 /**
  * enum dev_type - describe the type of memory DRAM chips used at the stick
@@ -101,8 +109,24 @@ enum hw_event_mc_err_type {
        HW_EVENT_ERR_CORRECTED,
        HW_EVENT_ERR_UNCORRECTED,
        HW_EVENT_ERR_FATAL,
+       HW_EVENT_ERR_INFO,
 };
 
+static inline char *mc_event_error_type(const unsigned int err_type)
+{
+       switch (err_type) {
+       case HW_EVENT_ERR_CORRECTED:
+               return "Corrected";
+       case HW_EVENT_ERR_UNCORRECTED:
+               return "Uncorrected";
+       case HW_EVENT_ERR_FATAL:
+               return "Fatal";
+       default:
+       case HW_EVENT_ERR_INFO:
+               return "Info";
+       }
+}
+
 /**
  * enum mem_type - memory types. For a more detailed reference, please see
  *                     http://en.wikipedia.org/wiki/DRAM
@@ -376,6 +400,9 @@ enum scrub_type {
  * @EDAC_MC_LAYER_CHANNEL:     memory layer is named "channel"
  * @EDAC_MC_LAYER_SLOT:                memory layer is named "slot"
  * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ * @EDAC_MC_LAYER_ALL_MEM:     memory layout is unknown. All memory is mapped
+ *                             as a single memory area. This is used when
+ *                             retrieving errors from a firmware driven driver.
  *
  * This enum is used by the drivers to tell edac_mc_sysfs what name should
  * be used when describing a memory stick location.
@@ -385,6 +412,7 @@ enum edac_mc_layer_type {
        EDAC_MC_LAYER_CHANNEL,
        EDAC_MC_LAYER_SLOT,
        EDAC_MC_LAYER_CHIP_SELECT,
+       EDAC_MC_LAYER_ALL_MEM,
 };
 
 /**
@@ -551,6 +579,46 @@ struct errcount_attribute_data {
        int layer0, layer1, layer2;
 };
 
+/**
+ * edac_raw_error_desc - Raw error report structure
+ * @grain:                     minimum granularity for an error report, in bytes
+ * @error_count:               number of errors of the same type
+ * @top_layer:                 top layer of the error (layer[0])
+ * @mid_layer:                 middle layer of the error (layer[1])
+ * @low_layer:                 low layer of the error (layer[2])
+ * @page_frame_number:         page where the error happened
+ * @offset_in_page:            page offset
+ * @syndrome:                  syndrome of the error (or 0 if unknown or if
+ *                             the syndrome is not applicable)
+ * @msg:                       error message
+ * @location:                  location of the error
+ * @label:                     label of the affected DIMM(s)
+ * @other_detail:              other driver-specific detail about the error
+ * @enable_per_layer_report:   if false, the error affects all layers
+ *                             (typically, a memory controller error)
+ */
+struct edac_raw_error_desc {
+       /*
+        * NOTE: everything before grain won't be cleaned by
+        * edac_raw_error_desc_clean()
+        */
+       char location[LOCATION_SIZE];
+       char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
+       long grain;
+
+       /* the vars below and grain will be cleaned on every new error report */
+       u16 error_count;
+       int top_layer;
+       int mid_layer;
+       int low_layer;
+       unsigned long page_frame_number;
+       unsigned long offset_in_page;
+       unsigned long syndrome;
+       const char *msg;
+       const char *other_detail;
+       bool enable_per_layer_report;
+};
+
 /* MEMORY controller information structure
  */
 struct mem_ctl_info {
@@ -630,7 +698,6 @@ struct mem_ctl_info {
        const char *mod_ver;
        const char *ctl_name;
        const char *dev_name;
-       char proc_name[MC_PROC_NAME_MAX_LEN + 1];
        void *pvt_info;
        unsigned long start_time;       /* mci load start time (in jiffies) */
 
@@ -659,6 +726,12 @@ struct mem_ctl_info {
        /* work struct for this MC */
        struct delayed_work work;
 
+       /*
+        * Used to report an error - by being at the global struct
+        * makes the memory allocated by the EDAC core
+        */
+       struct edac_raw_error_desc error_desc;
+
        /* the internal state of this controller instance */
        int op_state;
 
index 1866206..acd0312 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_ELEVATOR_H
 
 #include <linux/percpu.h>
+#include <linux/hashtable.h>
 
 #ifdef CONFIG_BLOCK
 
@@ -96,6 +97,8 @@ struct elevator_type
        struct list_head list;
 };
 
+#define ELV_HASH_BITS 6
+
 /*
  * each queue has an elevator_queue associated with it
  */
@@ -105,8 +108,8 @@ struct elevator_queue
        void *elevator_data;
        struct kobject kobj;
        struct mutex sysfs_lock;
-       struct hlist_head *hash;
        unsigned int registered:1;
+       DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
 };
 
 /*
index 8c9048e..40a3c0e 100644 (file)
      Override in asm/elf.h as needed.  */
 # define elf_read_implies_exec(ex, have_pt_gnu_stack)  0
 #endif
+#ifndef SET_PERSONALITY
+#define SET_PERSONALITY(ex) \
+       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+#endif
 
 #if ELF_CLASS == ELFCLASS32
 
index 3c3ef19..cf5d2af 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/wait.h>
 
 /*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
  * new flags, since they might collide with O_* ones. We want
  * to re-use O_* flags that couldn't possibly have a meaning
  * from eventfd, in order to leave a free define-space for
index e70df40..043a5cf 100644 (file)
@@ -3,6 +3,7 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
+#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -48,6 +49,8 @@ extern void thaw_kernel_threads(void);
 
 static inline bool try_to_freeze(void)
 {
+       if (!(current->flags & PF_NOFREEZE))
+               debug_check_no_locks_held();
        might_sleep();
        if (likely(!freezing(current)))
                return false;
index 7d2e893..74a907b 100644 (file)
@@ -769,7 +769,7 @@ struct file {
        } f_u;
        struct path             f_path;
 #define f_dentry       f_path.dentry
-#define f_vfsmnt       f_path.mnt
+       struct inode            *f_inode;       /* cached value */
        const struct file_operations    *f_op;
 
        /*
@@ -1807,7 +1807,6 @@ struct file_system_type {
 #define FS_HAS_SUBTYPE         4
 #define FS_USERNS_MOUNT                8       /* Can be mounted by userns root */
 #define FS_USERNS_DEV_MOUNT    16 /* A userns mount does not imply MNT_NODEV */
-#define FS_REVAL_DOT           16384   /* Check the paths ".", ".." for staleness */
 #define FS_RENAME_DOES_D_MOVE  32768   /* FS will handle d_move() during rename() internally. */
        struct dentry *(*mount) (struct file_system_type *, int,
                       const char *, void *);
@@ -2217,6 +2216,11 @@ static inline bool execute_ok(struct inode *inode)
        return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
 }
 
+static inline struct inode *file_inode(struct file *f)
+{
+       return f->f_inode;
+}
+
 /*
  * get_write_access() gets write permission for a file.
  * put_write_access() releases this write permission.
@@ -2239,7 +2243,7 @@ static inline int get_write_access(struct inode *inode)
 }
 static inline int deny_write_access(struct file *file)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
 }
 static inline void put_write_access(struct inode * inode)
@@ -2249,7 +2253,7 @@ static inline void put_write_access(struct inode * inode)
 static inline void allow_write_access(struct file *file)
 {
        if (file)
-               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+               atomic_inc(&file_inode(file)->i_writecount);
 }
 #ifdef CONFIG_IMA
 static inline void i_readcount_dec(struct inode *inode)
@@ -2274,6 +2278,7 @@ static inline void i_readcount_inc(struct inode *inode)
 extern int do_pipe_flags(int *, int);
 
 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
 extern struct file * open_exec(const char *);
  
 /* fs/dcache.c -- generic fs support functions */
@@ -2463,7 +2468,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
 extern const struct inode_operations page_symlink_inode_operations;
 extern int generic_readlink(struct dentry *, char __user *, int);
 extern void generic_fillattr(struct inode *, struct kstat *);
-extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int vfs_getattr(struct path *, struct kstat *);
 void __inode_add_bytes(struct inode *inode, loff_t bytes);
 void inode_add_bytes(struct inode *inode, loff_t bytes);
 void inode_sub_bytes(struct inode *inode, loff_t bytes);
index d0ae3a8..729eded 100644 (file)
@@ -17,8 +17,8 @@ struct fs_struct {
 extern struct kmem_cache *fs_cachep;
 
 extern void exit_fs(struct task_struct *);
-extern void set_fs_root(struct fs_struct *, struct path *);
-extern void set_fs_pwd(struct fs_struct *, struct path *);
+extern void set_fs_root(struct fs_struct *, const struct path *);
+extern void set_fs_pwd(struct fs_struct *, const struct path *);
 extern struct fs_struct *copy_fs_struct(struct fs_struct *);
 extern void free_fs_struct(struct fs_struct *);
 extern int unshare_fs_struct(void);
index 0fbfb46..a78680a 100644 (file)
@@ -244,7 +244,7 @@ static inline void fsnotify_open(struct file *file)
 static inline void fsnotify_close(struct file *file)
 {
        struct path *path = &file->f_path;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        fmode_t mode = file->f_mode;
        __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
 
index 29eb805..c1d6555 100644 (file)
 
 #ifdef CONFIG_PREEMPT_COUNT
 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
-# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
 #else
 # define preemptible() 0
-# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
index 227c624..a9df51f 100644 (file)
@@ -115,51 +115,50 @@ static inline void hash_del_rcu(struct hlist_node *node)
  * hash_for_each - iterate over a hashtable
  * @name: hashtable to iterate
  * @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @obj: the type * to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
  */
-#define hash_for_each(name, bkt, node, obj, member)                            \
-       for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
-               hlist_for_each_entry(obj, node, &name[bkt], member)
+#define hash_for_each(name, bkt, obj, member)                          \
+       for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+                       (bkt)++)\
+               hlist_for_each_entry(obj, &name[bkt], member)
 
 /**
  * hash_for_each_rcu - iterate over a rcu enabled hashtable
  * @name: hashtable to iterate
  * @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @obj: the type * to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
  */
-#define hash_for_each_rcu(name, bkt, node, obj, member)                                \
-       for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
-               hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
+#define hash_for_each_rcu(name, bkt, obj, member)                      \
+       for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+                       (bkt)++)\
+               hlist_for_each_entry_rcu(obj, &name[bkt], member)
 
 /**
  * hash_for_each_safe - iterate over a hashtable safe against removal of
  * hash entry
  * @name: hashtable to iterate
  * @bkt: integer to use as bucket loop cursor
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @tmp: a &struct used for temporary storage
  * @obj: the type * to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
  */
-#define hash_for_each_safe(name, bkt, node, tmp, obj, member)                  \
-       for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
-               hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
+#define hash_for_each_safe(name, bkt, tmp, obj, member)                        \
+       for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+                       (bkt)++)\
+               hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
 
 /**
  * hash_for_each_possible - iterate over all possible objects hashing to the
  * same bucket
  * @name: hashtable to iterate
  * @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
  * @key: the key of the objects to iterate over
  */
-#define hash_for_each_possible(name, obj, node, member, key)                   \
-       hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+#define hash_for_each_possible(name, obj, member, key)                 \
+       hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
 
 /**
  * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
@@ -167,25 +166,24 @@ static inline void hash_del_rcu(struct hlist_node *node)
  * in a rcu enabled hashtable
  * @name: hashtable to iterate
  * @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
  * @key: the key of the objects to iterate over
  */
-#define hash_for_each_possible_rcu(name, obj, node, member, key)               \
-       hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+#define hash_for_each_possible_rcu(name, obj, member, key)             \
+       hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
+               member)
 
 /**
  * hash_for_each_possible_safe - iterate over all possible objects hashing to the
  * same bucket safe against removals
  * @name: hashtable to iterate
  * @obj: the type * to use as a loop cursor for each entry
- * @node: the &struct list_head to use as a loop cursor for each entry
  * @tmp: a &struct used for temporary storage
  * @member: the name of the hlist_node within the struct
  * @key: the key of the objects to iterate over
  */
-#define hash_for_each_possible_safe(name, obj, node, tmp, member, key)         \
-       hlist_for_each_entry_safe(obj, node, tmp,                               \
+#define hash_for_each_possible_safe(name, obj, tmp, member, key)       \
+       hlist_for_each_entry_safe(obj, tmp,\
                &name[hash_min(key, HASH_BITS(name))], member)
 
 
index 56fae86..0dca785 100644 (file)
@@ -121,9 +121,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
  * @device: Driver model representation of the device
  * @tx_cfg: HSI TX configuration
  * @rx_cfg: HSI RX configuration
- * @e_handler: Callback for handling port events (RX Wake High/Low)
- * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * @nb: Notifier block for port events
+ * e_handler: Callback for handling port events (RX Wake High/Low)
+ * pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * nb: Notifier block for port events
  */
 struct hsi_client {
        struct device           device;
index eedc334..16e4e9a 100644 (file)
@@ -281,7 +281,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
 
 static inline struct hstate *hstate_file(struct file *f)
 {
-       return hstate_inode(f->f_dentry->d_inode);
+       return hstate_inode(file_inode(f));
 }
 
 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
index e5eb125..a6f38b5 100644 (file)
 #include <linux/init.h>
 #include <linux/rcupdate.h>
 
-#if BITS_PER_LONG == 32
-# define IDR_BITS 5
-# define IDR_FULL 0xfffffffful
-/* We can only use two of the bits in the top level because there is
-   only one possible bit in the top level (5 bits * 7 levels = 35
-   bits, but you only use 31 bits in the id). */
-# define TOP_LEVEL_FULL (IDR_FULL >> 30)
-#elif BITS_PER_LONG == 64
-# define IDR_BITS 6
-# define IDR_FULL 0xfffffffffffffffful
-/* We can only use two of the bits in the top level because there is
-   only one possible bit in the top level (6 bits * 6 levels = 36
-   bits, but you only use 31 bits in the id). */
-# define TOP_LEVEL_FULL (IDR_FULL >> 62)
-#else
-# error "BITS_PER_LONG is not 32 or 64"
-#endif
-
+/*
+ * We want shallower trees and thus more bits covered at each layer.  8
+ * bits gives us large enough first layer for most use cases and maximum
+ * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
+ * 1k on 32bit.
+ */
+#define IDR_BITS 8
 #define IDR_SIZE (1 << IDR_BITS)
 #define IDR_MASK ((1 << IDR_BITS)-1)
 
-#define MAX_IDR_SHIFT (sizeof(int)*8 - 1)
-#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-#define MAX_IDR_MASK (MAX_IDR_BIT - 1)
-
-/* Leave the possibility of an incomplete final layer */
-#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
-
-/* Number of id_layer structs to leave in free list */
-#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
-
 struct idr_layer {
-       unsigned long            bitmap; /* A zero bit means "space here" */
+       int                     prefix; /* the ID prefix of this idr_layer */
+       DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
        struct idr_layer __rcu  *ary[1<<IDR_BITS];
-       int                      count;  /* When zero, we can release it */
-       int                      layer;  /* distance from leaf */
-       struct rcu_head          rcu_head;
+       int                     count;  /* When zero, we can release it */
+       int                     layer;  /* distance from leaf */
+       struct rcu_head         rcu_head;
 };
 
 struct idr {
-       struct idr_layer __rcu *top;
-       struct idr_layer *id_free;
-       int               layers; /* only valid without concurrent changes */
-       int               id_free_cnt;
-       spinlock_t        lock;
+       struct idr_layer __rcu  *hint;  /* the last layer allocated from */
+       struct idr_layer __rcu  *top;
+       struct idr_layer        *id_free;
+       int                     layers; /* only valid w/o concurrent changes */
+       int                     id_free_cnt;
+       spinlock_t              lock;
 };
 
-#define IDR_INIT(name)                                         \
-{                                                              \
-       .top            = NULL,                                 \
-       .id_free        = NULL,                                 \
-       .layers         = 0,                                    \
-       .id_free_cnt    = 0,                                    \
-       .lock           = __SPIN_LOCK_UNLOCKED(name.lock),      \
+#define IDR_INIT(name)                                                 \
+{                                                                      \
+       .lock                   = __SPIN_LOCK_UNLOCKED(name.lock),      \
 }
 #define DEFINE_IDR(name)       struct idr name = IDR_INIT(name)
 
-/* Actions to be taken after a call to _idr_sub_alloc */
-#define IDR_NEED_TO_GROW -2
-#define IDR_NOMORE_SPACE -3
-
-#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
-
 /**
  * DOC: idr sync
  * idr synchronization (stolen from radix-tree.h)
@@ -101,19 +72,90 @@ struct idr {
  * This is what we export.
  */
 
-void *idr_find(struct idr *idp, int id);
+void *idr_find_slowpath(struct idr *idp, int id);
 int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
-int idr_get_new(struct idr *idp, void *ptr, int *id);
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
+void idr_preload(gfp_t gfp_mask);
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
 int idr_for_each(struct idr *idp,
                 int (*fn)(int id, void *p, void *data), void *data);
 void *idr_get_next(struct idr *idp, int *nextid);
 void *idr_replace(struct idr *idp, void *ptr, int id);
 void idr_remove(struct idr *idp, int id);
-void idr_remove_all(struct idr *idp);
+void idr_free(struct idr *idp, int id);
 void idr_destroy(struct idr *idp);
 void idr_init(struct idr *idp);
 
+/**
+ * idr_preload_end - end preload section started with idr_preload()
+ *
+ * Each idr_preload() should be matched with an invocation of this
+ * function.  See idr_preload() for details.
+ */
+static inline void idr_preload_end(void)
+{
+       preempt_enable();
+}
+
+/**
+ * idr_find - return pointer for given id
+ * @idp: idr handle
+ * @id: lookup key
+ *
+ * Return the pointer given the id it has been registered with.  A %NULL
+ * return indicates that @id is not valid or you passed %NULL in
+ * idr_get_new().
+ *
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
+ */
+static inline void *idr_find(struct idr *idr, int id)
+{
+       struct idr_layer *hint = rcu_dereference_raw(idr->hint);
+
+       if (hint && (id & ~IDR_MASK) == hint->prefix)
+               return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
+
+       return idr_find_slowpath(idr, id);
+}
+
+/**
+ * idr_get_new - allocate new idr entry
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: pointer to the allocated handle
+ *
+ * Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
+ */
+static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
+{
+       return idr_get_new_above(idp, ptr, 0, id);
+}
+
+/**
+ * idr_for_each_entry - iterate over an idr's elements of a given type
+ * @idp:     idr handle
+ * @entry:   the type * to use as cursor
+ * @id:      id entry's key
+ */
+#define idr_for_each_entry(idp, entry, id)                             \
+       for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
+            entry != NULL;                                             \
+            ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+
+void __idr_remove_all(struct idr *idp);        /* don't use */
+
+/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * If you're trying to destroy @idp, calling idr_destroy() is enough.
+ * This is going away.  Don't use.
+ */
+static inline void __deprecated idr_remove_all(struct idr *idp)
+{
+       __idr_remove_all(idp);
+}
 
 /*
  * IDA - IDR based id allocator, use when translation from id to
@@ -141,7 +183,6 @@ struct ida {
 
 int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-int ida_get_new(struct ida *ida, int *p_id);
 void ida_remove(struct ida *ida, int id);
 void ida_destroy(struct ida *ida);
 void ida_init(struct ida *ida);
@@ -150,17 +191,18 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
                   gfp_t gfp_mask);
 void ida_simple_remove(struct ida *ida, unsigned int id);
 
-void __init idr_init_cache(void);
-
 /**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idp:     idr handle
- * @entry:   the type * to use as cursor
- * @id:      id entry's key
+ * ida_get_new - allocate new ID
+ * @ida:       idr handle
+ * @p_id:      pointer to the allocated handle
+ *
+ * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  */
-#define idr_for_each_entry(idp, entry, id)                             \
-       for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
-            entry != NULL;                                             \
-            ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+static inline int ida_get_new(struct ida *ida, int *p_id)
+{
+       return ida_get_new_above(ida, 0, p_id);
+}
+
+void __init idr_init_cache(void);
 
 #endif /* __IDR_H__ */
index 4648d80..cfd21e3 100644 (file)
@@ -216,11 +216,10 @@ static inline struct hlist_head *team_port_index_hash(struct team *team,
 static inline struct team_port *team_get_port_by_index(struct team *team,
                                                       int port_index)
 {
-       struct hlist_node *p;
        struct team_port *port;
        struct hlist_head *head = team_port_index_hash(team, port_index);
 
-       hlist_for_each_entry(port, p, head, hlist)
+       hlist_for_each_entry(port, head, hlist)
                if (port->index == port_index)
                        return port;
        return NULL;
@@ -228,11 +227,10 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
                                                           int port_index)
 {
-       struct hlist_node *p;
        struct team_port *port;
        struct hlist_head *head = team_port_index_hash(team, port_index);
 
-       hlist_for_each_entry_rcu(port, p, head, hlist)
+       hlist_for_each_entry_rcu(port, head, hlist)
                if (port->index == port_index)
                        return port;
        return NULL;
index 1487e79..1f9f56e 100644 (file)
 
 #include <uapi/linux/ipmi.h>
 
-
-/*
- * The in-kernel interface.
- */
 #include <linux/list.h>
 #include <linux/proc_fs.h>
 
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
new file mode 100644 (file)
index 0000000..697af0f
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies
+ */
+
+#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
+#define _LINUX_IRQCHIP_METAG_EXT_H_
+
+struct irq_data;
+struct platform_device;
+
+/* called from core irq code at init */
+int init_external_IRQ(void);
+
+/*
+ * called from SoC init_irq() callback to dynamically indicate the lack of
+ * HWMASKEXT registers.
+ */
+void meta_intc_no_mask(void);
+
+/*
+ * These allow SoCs to specialise the interrupt controller from their init_irq
+ * callbacks.
+ */
+
+extern struct irq_chip meta_intc_edge_chip;
+extern struct irq_chip meta_intc_level_chip;
+
+/* this should be called in the mask callback */
+void meta_intc_mask_irq_simple(struct irq_data *data);
+/* this should be called in the unmask callback */
+void meta_intc_unmask_irq_simple(struct irq_data *data);
+
+#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
new file mode 100644 (file)
index 0000000..4ebdfb3
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2011 Imagination Technologies
+ */
+
+#ifndef _LINUX_IRQCHIP_METAG_H_
+#define _LINUX_IRQCHIP_METAG_H_
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
+extern int init_internal_IRQ(void);
+extern int internal_irq_map(unsigned int hw);
+#else
+static inline int init_internal_IRQ(void)
+{
+       return 0;
+}
+static inline int internal_irq_map(unsigned int hw)
+{
+       return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_IRQCHIP_METAG_H_ */
index e30b663..50e5a5e 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef __KERNEL__
 #include "jfs_compat.h"
 #define JBD2_DEBUG
-#define jfs_debug jbd_debug
 #else
 
 #include <linux/types.h>
@@ -57,7 +56,7 @@
  * CONFIG_JBD2_DEBUG is on.
  */
 #define JBD2_EXPENSIVE_CHECKING
-extern u8 jbd2_journal_enable_debug;
+extern ushort jbd2_journal_enable_debug;
 
 #define jbd_debug(n, f, a...)                                          \
        do {                                                            \
@@ -397,35 +396,18 @@ struct jbd2_journal_handle
        int                     h_err;
 
        /* Flags [no locking] */
-       unsigned int    h_sync:1;       /* sync-on-close */
-       unsigned int    h_jdata:1;      /* force data journaling */
-       unsigned int    h_aborted:1;    /* fatal error on handle */
-       unsigned int    h_cowing:1;     /* COWing block to snapshot */
-
-       /* Number of buffers requested by user:
-        * (before adding the COW credits factor) */
-       unsigned int    h_base_credits:14;
-
-       /* Number of buffers the user is allowed to dirty:
-        * (counts only buffers dirtied when !h_cowing) */
-       unsigned int    h_user_credits:14;
+       unsigned int    h_sync:         1;      /* sync-on-close */
+       unsigned int    h_jdata:        1;      /* force data journaling */
+       unsigned int    h_aborted:      1;      /* fatal error on handle */
+       unsigned int    h_type:         8;      /* for handle statistics */
+       unsigned int    h_line_no:      16;     /* for handle statistics */
 
+       unsigned long           h_start_jiffies;
+       unsigned int            h_requested_credits;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      h_lockdep_map;
 #endif
-
-#ifdef CONFIG_JBD2_DEBUG
-       /* COW debugging counters: */
-       unsigned int h_cow_moved; /* blocks moved to snapshot */
-       unsigned int h_cow_copied; /* blocks copied to snapshot */
-       unsigned int h_cow_ok_jh; /* blocks already COWed during current
-                                    transaction */
-       unsigned int h_cow_ok_bitmap; /* blocks not set in COW bitmap */
-       unsigned int h_cow_ok_mapped;/* blocks already mapped in snapshot */
-       unsigned int h_cow_bitmaps; /* COW bitmaps created */
-       unsigned int h_cow_excluded; /* blocks set in exclude bitmap */
-#endif
 };
 
 
@@ -580,6 +562,11 @@ struct transaction_s
         */
        unsigned long           t_start;
 
+       /*
+        * When commit was requested
+        */
+       unsigned long           t_requested;
+
        /*
         * Checkpointing stats [j_checkpoint_sem]
         */
@@ -637,6 +624,7 @@ struct transaction_s
 
 struct transaction_run_stats_s {
        unsigned long           rs_wait;
+       unsigned long           rs_request_delay;
        unsigned long           rs_running;
        unsigned long           rs_locked;
        unsigned long           rs_flushing;
@@ -649,6 +637,7 @@ struct transaction_run_stats_s {
 
 struct transaction_stats_s {
        unsigned long           ts_tid;
+       unsigned long           ts_requested;
        struct transaction_run_stats_s run;
 };
 
@@ -1086,7 +1075,8 @@ static inline handle_t *journal_current_handle(void)
  */
 
 extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
-extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask);
+extern handle_t *jbd2__journal_start(journal_t *, int nblocks, gfp_t gfp_mask,
+                                    unsigned int type, unsigned int line_no);
 extern int      jbd2_journal_restart(handle_t *, int nblocks);
 extern int      jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
 extern int      jbd2_journal_extend (handle_t *, int nblocks);
index cc6d2aa..d991cc1 100644 (file)
@@ -666,54 +666,49 @@ static inline void hlist_move_list(struct hlist_head *old,
        for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
             pos = n)
 
+#define hlist_entry_safe(ptr, type, member) \
+       (ptr) ? hlist_entry(ptr, type, member) : NULL
+
 /**
  * hlist_for_each_entry        - iterate over list of given type
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @head:      the head for your list.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry(tpos, pos, head, member)                   \
-       for (pos = (head)->first;                                        \
-            pos &&                                                      \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-            pos = pos->next)
+#define hlist_for_each_entry(pos, head, member)                                \
+       for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+            pos;                                                       \
+            pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_continue(tpos, pos, member)                \
-       for (pos = (pos)->next;                                          \
-            pos &&                                                      \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-            pos = pos->next)
+#define hlist_for_each_entry_continue(pos, member)                     \
+       for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+            pos;                                                       \
+            pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_from(tpos, pos, member)                    \
-       for (; pos &&                                                    \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-            pos = pos->next)
+#define hlist_for_each_entry_from(pos, member)                         \
+       for (; pos;                                                     \
+            pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @n:         another &struct hlist_node to use as temporary storage
  * @head:      the head for your list.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_safe(tpos, pos, n, head, member)           \
-       for (pos = (head)->first;                                        \
-            pos && ({ n = pos->next; 1; }) &&                           \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
-            pos = n)
+#define hlist_for_each_entry_safe(pos, n, head, member)                \
+       for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+            pos && ({ n = pos->member.next; 1; });                     \
+            pos = hlist_entry_safe(n, typeof(*pos), member))
 
 #endif
index d0ab98f..a5199f6 100644 (file)
@@ -124,31 +124,6 @@ static inline void init_llist_head(struct llist_head *list)
             &(pos)->member != NULL;                                    \
             (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
 
-/**
- * llist_for_each_entry_safe - iterate safely against remove over some entries
- * of lock-less list of given type.
- * @pos:       the type * to use as a loop cursor.
- * @n:         another type * to use as a temporary storage.
- * @node:      the fist entry of deleted list entries.
- * @member:    the name of the llist_node with the struct.
- *
- * In general, some entries of the lock-less list can be traversed
- * safely only after being removed from list, so start with an entry
- * instead of list head. This variant allows removal of entries
- * as we iterate.
- *
- * If being used on entries deleted from lock-less list directly, the
- * traverse order is from the newest to the oldest added entry.  If
- * you want to traverse from the oldest to the newest, you must
- * reverse the order by yourself before traversing.
- */
-#define llist_for_each_entry_safe(pos, n, node, member)                \
-       for ((pos) = llist_entry((node), typeof(*(pos)), member),       \
-            (n) = (pos)->member.next;                                  \
-            &(pos)->member != NULL;                                    \
-            (pos) = llist_entry(n, typeof(*(pos)), member),            \
-            (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
-
 /**
  * llist_empty - tests whether a lock-less list is empty
  * @head:      the list to test
index f5a051a..dcaad79 100644 (file)
@@ -212,7 +212,8 @@ int           nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
 __be32           nlmclnt_grant(const struct sockaddr *addr,
                                const struct nlm_lock *lock);
 void             nlmclnt_recovery(struct nlm_host *);
-int              nlmclnt_reclaim(struct nlm_host *, struct file_lock *);
+int              nlmclnt_reclaim(struct nlm_host *, struct file_lock *,
+                                 struct nlm_rqst *);
 void             nlmclnt_next_cookie(struct nlm_cookie *);
 
 /*
@@ -291,7 +292,7 @@ int           nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
 
 static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
 {
-       return file->f_file->f_path.dentry->d_inode;
+       return file_inode(file->f_file);
 }
 
 static inline int __nlm_privileged_request4(const struct sockaddr *sap)
index d793497..a0848d9 100644 (file)
@@ -4,28 +4,28 @@
  *  LZO Public Kernel Interface
  *  A mini subset of the LZO real-time data compression library
  *
- *  Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
  *
  *  The full LZO package can be found at:
  *  http://www.oberhumer.com/opensource/lzo/
  *
- *  Changed for kernel use by:
+ *  Changed for Linux kernel use by:
  *  Nitin Gupta <nitingupta910@gmail.com>
  *  Richard Purdie <rpurdie@openedhand.com>
  */
 
-#define LZO1X_MEM_COMPRESS     (16384 * sizeof(unsigned char *))
-#define LZO1X_1_MEM_COMPRESS   LZO1X_MEM_COMPRESS
+#define LZO1X_1_MEM_COMPRESS   (8192 * sizeof(unsigned short))
+#define LZO1X_MEM_COMPRESS     LZO1X_1_MEM_COMPRESS
 
 #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
 
-/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
 int lzo1x_1_compress(const unsigned char *src, size_t src_len,
-                       unsigned char *dst, size_t *dst_len, void *wrkmem);
+                    unsigned char *dst, size_t *dst_len, void *wrkmem);
 
 /* safe decompression with overrun testing */
 int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
-                       unsigned char *dst, size_t *dst_len);
+                         unsigned char *dst, size_t *dst_len);
 
 /*
  * Return values (< 0 = Error)
@@ -40,5 +40,6 @@ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
 #define LZO_E_EOF_NOT_FOUND            (-7)
 #define LZO_E_INPUT_NOT_CONSUMED       (-8)
 #define LZO_E_NOT_YET_IMPLEMENTED      (-9)
+#define LZO_E_INVALID_ARGUMENT         (-10)
 
 #endif
index 3e5ecb2..f388203 100644 (file)
@@ -42,7 +42,6 @@ struct memblock {
 
 extern struct memblock memblock;
 extern int memblock_debug;
-extern struct movablemem_map movablemem_map;
 
 #define memblock_dbg(fmt, ...) \
        if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
@@ -61,7 +60,6 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
 void memblock_trim_memory(phys_addr_t align);
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-
 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
                          unsigned long *out_end_pfn, int *out_nid);
 
index 2a32b16..786bf66 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/gpio.h>
 #include <linux/irqdomain.h>
+#include <linux/pwm.h>
 #include <linux/regmap.h>
 
 #define LP8788_DEV_BUCK                "lp8788-buck"
@@ -124,11 +125,6 @@ enum lp8788_bl_ramp_step {
        LP8788_RAMP_65538us,
 };
 
-enum lp8788_bl_pwm_polarity {
-       LP8788_PWM_ACTIVE_HIGH,
-       LP8788_PWM_ACTIVE_LOW,
-};
-
 enum lp8788_isink_scale {
        LP8788_ISINK_SCALE_100mA,
        LP8788_ISINK_SCALE_120mA,
@@ -228,16 +224,6 @@ struct lp8788_charger_platform_data {
                                enum lp8788_charger_event event);
 };
 
-/*
- * struct lp8788_bl_pwm_data
- * @pwm_set_intensity     : set duty of pwm
- * @pwm_get_intensity     : get current duty of pwm
- */
-struct lp8788_bl_pwm_data {
-       void (*pwm_set_intensity) (int brightness, int max_brightness);
-       int (*pwm_get_intensity) (int max_brightness);
-};
-
 /*
  * struct lp8788_backlight_platform_data
  * @name                  : backlight driver name. (default: "lcd-backlight")
@@ -248,8 +234,8 @@ struct lp8788_bl_pwm_data {
  * @rise_time             : brightness ramp up step time
  * @fall_time             : brightness ramp down step time
  * @pwm_pol               : pwm polarity setting when bl_mode is pwm based
- * @pwm_data              : platform specific pwm generation functions
- *                          only valid when bl_mode is pwm based
+ * @period_ns             : platform specific pwm period value. unit is nano.
+                           Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
  */
 struct lp8788_backlight_platform_data {
        char *name;
@@ -259,8 +245,8 @@ struct lp8788_backlight_platform_data {
        enum lp8788_bl_full_scale_current full_scale;
        enum lp8788_bl_ramp_step rise_time;
        enum lp8788_bl_ramp_step fall_time;
-       enum lp8788_bl_pwm_polarity pwm_pol;
-       struct lp8788_bl_pwm_data pwm_data;
+       enum pwm_polarity pwm_pol;
+       unsigned int period_ns;
 };
 
 /*
index e7c3f9a..7acc9dc 100644 (file)
@@ -115,6 +115,8 @@ extern unsigned int kobjsize(const void *objp);
 # define VM_SAO                VM_ARCH_1       /* Strong Access Ordering (powerpc) */
 #elif defined(CONFIG_PARISC)
 # define VM_GROWSUP    VM_ARCH_1
+#elif defined(CONFIG_METAG)
+# define VM_GROWSUP    VM_ARCH_1
 #elif defined(CONFIG_IA64)
 # define VM_GROWSUP    VM_ARCH_1
 #elif !defined(CONFIG_MMU)
@@ -1333,24 +1335,6 @@ extern void free_bootmem_with_active_regions(int nid,
                                                unsigned long max_low_pfn);
 extern void sparse_memory_present_with_active_regions(int nid);
 
-#define MOVABLEMEM_MAP_MAX MAX_NUMNODES
-struct movablemem_entry {
-       unsigned long start_pfn;    /* start pfn of memory segment */
-       unsigned long end_pfn;      /* end pfn of memory segment (exclusive) */
-};
-
-struct movablemem_map {
-       bool acpi;      /* true if using SRAT info */
-       int nr_map;
-       struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
-       nodemask_t numa_nodes_hotplug;  /* on which nodes we specify memory */
-       nodemask_t numa_nodes_kernel;   /* on which nodes kernel resides in */
-};
-
-extern void __init insert_movablemem_map(unsigned long start_pfn,
-                                        unsigned long end_pfn);
-extern int __init movablemem_map_overlap(unsigned long start_pfn,
-                                        unsigned long end_pfn);
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
index fed3def..779cf7c 100644 (file)
@@ -33,8 +33,7 @@ struct ieee1394_device_id {
        __u32 model_id;
        __u32 specifier_id;
        __u32 version;
-       kernel_ulong_t driver_data
-               __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;
 };
 
 
@@ -148,8 +147,7 @@ struct hid_device_id {
        __u16 group;
        __u32 vendor;
        __u32 product;
-       kernel_ulong_t driver_data
-               __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;
 };
 
 /* s390 CCW devices */
@@ -173,8 +171,6 @@ struct ccw_device_id {
 struct ap_device_id {
        __u16 match_flags;      /* which fields to match against */
        __u8 dev_type;          /* device type */
-       __u8 pad1;
-       __u32 pad2;
        kernel_ulong_t driver_info;
 };
 
@@ -184,13 +180,10 @@ struct ap_device_id {
 struct css_device_id {
        __u8 match_flags;
        __u8 type; /* subchannel type */
-       __u16 pad2;
-       __u32 pad3;
        kernel_ulong_t driver_data;
 };
 
-#define ACPI_ID_LEN    16 /* only 9 bytes needed here, 16 bytes are used */
-                          /* to workaround crosscompile issues */
+#define ACPI_ID_LEN    9
 
 struct acpi_device_id {
        __u8 id[ACPI_ID_LEN];
@@ -231,11 +224,7 @@ struct of_device_id
        char    name[32];
        char    type[32];
        char    compatible[128];
-#ifdef __KERNEL__
        const void *data;
-#else
-       kernel_ulong_t data;
-#endif
 };
 
 /* VIO */
@@ -260,24 +249,14 @@ struct pcmcia_device_id {
        /* for pseudo multi-function devices */
        __u8            device_no;
 
-       __u32           prod_id_hash[4]
-               __attribute__((aligned(sizeof(__u32))));
+       __u32           prod_id_hash[4];
 
        /* not matched against in kernelspace*/
-#ifdef __KERNEL__
        const char *    prod_id[4];
-#else
-       kernel_ulong_t  prod_id[4]
-               __attribute__((aligned(sizeof(kernel_ulong_t))));
-#endif
 
        /* not matched against */
        kernel_ulong_t  driver_info;
-#ifdef __KERNEL__
        char *          cisfile;
-#else
-       kernel_ulong_t  cisfile;
-#endif
 };
 
 #define PCMCIA_DEV_ID_MATCH_MANF_ID    0x0001
@@ -373,8 +352,7 @@ struct sdio_device_id {
        __u8    class;                  /* Standard interface or SDIO_ANY_ID */
        __u16   vendor;                 /* Vendor or SDIO_ANY_ID */
        __u16   device;                 /* Device ID or SDIO_ANY_ID */
-       kernel_ulong_t driver_data      /* Data private to the driver */
-               __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;     /* Data private to the driver */
 };
 
 /* SSB core, see drivers/ssb/ */
@@ -420,8 +398,7 @@ struct virtio_device_id {
  */
 struct hv_vmbus_device_id {
        __u8 guid[16];
-       kernel_ulong_t driver_data      /* Data private to the driver */
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;     /* Data private to the driver */
 };
 
 /* rpmsg */
@@ -440,8 +417,7 @@ struct rpmsg_device_id {
 
 struct i2c_device_id {
        char name[I2C_NAME_SIZE];
-       kernel_ulong_t driver_data      /* Data private to the driver */
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;     /* Data private to the driver */
 };
 
 /* spi */
@@ -451,8 +427,7 @@ struct i2c_device_id {
 
 struct spi_device_id {
        char name[SPI_NAME_SIZE];
-       kernel_ulong_t driver_data      /* Data private to the driver */
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;     /* Data private to the driver */
 };
 
 /* dmi */
@@ -484,15 +459,6 @@ struct dmi_strmatch {
        char substr[79];
 };
 
-#ifndef __KERNEL__
-struct dmi_system_id {
-       kernel_ulong_t callback;
-       kernel_ulong_t ident;
-       struct dmi_strmatch matches[4];
-       kernel_ulong_t driver_data
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
-};
-#else
 struct dmi_system_id {
        int (*callback)(const struct dmi_system_id *);
        const char *ident;
@@ -506,7 +472,6 @@ struct dmi_system_id {
  *     error: storage size of '__mod_dmi_device_table' isn't known
  */
 #define dmi_device_id dmi_system_id
-#endif
 
 #define DMI_MATCH(a, b)        { a, b }
 
@@ -515,8 +480,7 @@ struct dmi_system_id {
 
 struct platform_device_id {
        char name[PLATFORM_NAME_SIZE];
-       kernel_ulong_t driver_data
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
+       kernel_ulong_t driver_data;
 };
 
 #define MDIO_MODULE_PREFIX     "mdio:"
@@ -572,11 +536,7 @@ struct isapnp_device_id {
 struct amba_id {
        unsigned int            id;
        unsigned int            mask;
-#ifndef __KERNEL__
-       kernel_ulong_t          data;
-#else
        void                    *data;
-#endif
 };
 
 /*
index f6eb433..4b02512 100644 (file)
@@ -245,6 +245,7 @@ struct map_info {
        unsigned long pfow_base;
        unsigned long map_priv_1;
        unsigned long map_priv_2;
+       struct device_node *device_node;
        void *fldrv_priv;
        struct mtd_chip_driver *fldrv;
 };
@@ -328,7 +329,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word
 
 static inline map_word map_word_load(struct map_info *map, const void *ptr)
 {
-       map_word r = {{0} };
+       map_word r;
 
        if (map_bankwidth_is_1(map))
                r.x[0] = *(unsigned char *)ptr;
@@ -342,6 +343,8 @@ static inline map_word map_word_load(struct map_info *map, const void *ptr)
 #endif
        else if (map_bankwidth_is_large(map))
                memcpy(r.x, ptr, map->bankwidth);
+       else
+               BUG();
 
        return r;
 }
@@ -391,7 +394,7 @@ static inline map_word map_word_ff(struct map_info *map)
 
 static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
 {
-       map_word uninitialized_var(r);
+       map_word r;
 
        if (map_bankwidth_is_1(map))
                r.x[0] = __raw_readb(map->virt + ofs);
@@ -425,6 +428,8 @@ static inline void inline_map_write(struct map_info *map, const map_word datum,
 #endif
        else if (map_bankwidth_is_large(map))
                memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+       else
+               BUG();
        mb();
 }
 
index 13441dd..4b993d3 100644 (file)
@@ -233,6 +233,7 @@ struct nfs4_layoutget_args {
        struct inode *inode;
        struct nfs_open_context *ctx;
        nfs4_stateid stateid;
+       unsigned long timestamp;
        struct nfs4_layoutdriver_data layout;
 };
 
index edc98de..d137218 100644 (file)
@@ -9,8 +9,8 @@ struct path {
        struct dentry *dentry;
 };
 
-extern void path_get(struct path *);
-extern void path_put(struct path *);
+extern void path_get(const struct path *);
+extern void path_put(const struct path *);
 
 static inline int path_equal(const struct path *path1, const struct path *path2)
 {
index 31717bd..f11c1c2 100644 (file)
 #define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX      0x3ce0
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB   0x402f
 #define PCI_DEVICE_ID_INTEL_5100_16    0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19    0x65f3
 #define PCI_DEVICE_ID_INTEL_5100_21    0x65f5
 #define PCI_DEVICE_ID_INTEL_5100_22    0x65f6
 #define PCI_DEVICE_ID_INTEL_5400_ERR   0x4030
index 2381c97..a089a3c 100644 (file)
@@ -176,9 +176,8 @@ pid_t pid_vnr(struct pid *pid);
 
 #define do_each_pid_task(pid, type, task)                              \
        do {                                                            \
-               struct hlist_node *pos___;                              \
                if ((pid) != NULL)                                      \
-                       hlist_for_each_entry_rcu((task), pos___,        \
+                       hlist_for_each_entry_rcu((task),                \
                                &(pid)->tasks[type], pids[type].node) {
 
                        /*
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
new file mode 100644 (file)
index 0000000..1bd5244
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * BCH Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ELM_H
+#define __ELM_H
+
+enum bch_ecc {
+       BCH4_ECC = 0,
+       BCH8_ECC,
+};
+
+/* ELM support 8 error syndrome process */
+#define ERROR_VECTOR_MAX               8
+
+#define BCH8_ECC_OOB_BYTES             13
+#define BCH4_ECC_OOB_BYTES             7
+/* RBL requires 14 byte even though BCH8 uses only 13 byte */
+#define BCH8_SIZE                      (BCH8_ECC_OOB_BYTES + 1)
+/* Uses 1 extra byte to handle erased pages */
+#define BCH4_SIZE                      (BCH4_ECC_OOB_BYTES + 1)
+
+/**
+ * struct elm_errorvec - error vector for elm
+ * @error_reported:            set true for vectors error is reported
+ * @error_uncorrectable:       number of uncorrectable errors
+ * @error_count:               number of correctable errors in the sector
+ * @error_loc:                 buffer for error location
+ *
+ */
+struct elm_errorvec {
+       bool error_reported;
+       bool error_uncorrectable;
+       int error_count;
+       int error_loc[ERROR_VECTOR_MAX];
+};
+
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+               struct elm_errorvec *err_vec);
+void elm_config(struct device *dev, enum bch_ecc bch_type);
+#endif /* __ELM_H */
index a7bdb2f..da7e627 100644 (file)
@@ -53,6 +53,8 @@ struct freq_clip_table {
  * struct exynos_tmu_platform_data
  * @threshold: basic temperature for generating interrupt
  *            25 <= threshold <= 125 [unit: degree Celsius]
+ * @threshold_falling: differntial value for setting threshold
+ *                    of temperature falling interrupt.
  * @trigger_levels: array for each interrupt levels
  *     [unit: degree Celsius]
  *     0: temperature for trigger_level0 interrupt
@@ -97,6 +99,7 @@ struct freq_clip_table {
  */
 struct exynos_tmu_platform_data {
        u8 threshold;
+       u8 threshold_falling;
        u8 trigger_levels[4];
        bool trigger_level0_en;
        bool trigger_level1_en;
index 58fdef1..d133711 100644 (file)
@@ -405,6 +405,7 @@ struct quota_module_name {
 #define INIT_QUOTA_MODULE_NAMES {\
        {QFMT_VFS_OLD, "quota_v1"},\
        {QFMT_VFS_V0, "quota_v2"},\
+       {QFMT_VFS_V1, "quota_v2"},\
        {0, NULL}}
 
 #endif /* _QUOTA_ */
index c92dd28..8089e35 100644 (file)
@@ -445,8 +445,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
 
 /**
  * hlist_for_each_entry_rcu - iterate over rcu list of given type
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @head:      the head for your list.
  * @member:    the name of the hlist_node within the struct.
  *
@@ -454,16 +453,16 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  * as long as the traversal is guarded by rcu_read_lock().
  */
-#define hlist_for_each_entry_rcu(tpos, pos, head, member)              \
-       for (pos = rcu_dereference_raw(hlist_first_rcu(head));          \
-               pos &&                                                   \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
-               pos = rcu_dereference_raw(hlist_next_rcu(pos)))
+#define hlist_for_each_entry_rcu(pos, head, member)                    \
+       for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
+                       typeof(*(pos)), member);                        \
+               pos;                                                    \
+               pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @head:      the head for your list.
  * @member:    the name of the hlist_node within the struct.
  *
@@ -471,35 +470,36 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  * as long as the traversal is guarded by rcu_read_lock().
  */
-#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member)            \
-       for (pos = rcu_dereference_bh((head)->first);                    \
-               pos &&                                                   \
-               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
-               pos = rcu_dereference_bh(pos->next))
+#define hlist_for_each_entry_rcu_bh(pos, head, member)                 \
+       for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
+                       typeof(*(pos)), member);                        \
+               pos;                                                    \
+               pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_continue_rcu(tpos, pos, member)           \
-       for (pos = rcu_dereference((pos)->next);                        \
-            pos &&                                                     \
-            ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });  \
-            pos = rcu_dereference(pos->next))
+#define hlist_for_each_entry_continue_rcu(pos, member)                 \
+       for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+                       typeof(*(pos)), member);                        \
+            pos;                                                       \
+            pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+                       typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
- * @tpos:      the type * to use as a loop cursor.
- * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @pos:       the type * to use as a loop cursor.
  * @member:    the name of the hlist_node within the struct.
  */
-#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member)                \
-       for (pos = rcu_dereference_bh((pos)->next);                     \
-            pos &&                                                     \
-            ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });  \
-            pos = rcu_dereference_bh(pos->next))
+#define hlist_for_each_entry_continue_rcu_bh(pos, member)              \
+       for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
+                       typeof(*(pos)), member);                        \
+            pos;                                                       \
+            pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
+                       typeof(*(pos)), member))
 
 
 #endif /* __KERNEL__ */
index 4bd6c06..2d8bdae 100644 (file)
@@ -231,6 +231,41 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
  */
 #define SG_MAX_SINGLE_ALLOC            (PAGE_SIZE / sizeof(struct scatterlist))
 
+/*
+ * sg page iterator
+ *
+ * Iterates over sg entries page-by-page.  On each successful iteration,
+ * @piter->page points to the current page, @piter->sg to the sg holding this
+ * page and @piter->sg_pgoffset to the page's page offset within the sg. The
+ * iteration will stop either when a maximum number of sg entries was reached
+ * or a terminating sg (sg_last(sg) == true) was reached.
+ */
+struct sg_page_iter {
+       struct page             *page;          /* current page */
+       struct scatterlist      *sg;            /* sg holding the page */
+       unsigned int            sg_pgoffset;    /* page offset within the sg */
+
+       /* these are internal states, keep away */
+       unsigned int            __nents;        /* remaining sg entries */
+       int                     __pg_advance;   /* nr pages to advance at the
+                                                * next step */
+};
+
+bool __sg_page_iter_next(struct sg_page_iter *piter);
+void __sg_page_iter_start(struct sg_page_iter *piter,
+                         struct scatterlist *sglist, unsigned int nents,
+                         unsigned long pgoffset);
+
+/**
+ * for_each_sg_page - iterate over the pages of the given sg list
+ * @sglist:    sglist to iterate over
+ * @piter:     page iterator to hold current page, sg, sg_pgoffset
+ * @nents:     maximum number of sg entries to iterate over
+ * @pgoffset:  starting page offset
+ */
+#define for_each_sg_page(sglist, piter, nents, pgoffset)                  \
+       for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
+            __sg_page_iter_next(piter);)
 
 /*
  * Mapping sg iterator
@@ -258,11 +293,11 @@ struct sg_mapping_iter {
        void                    *addr;          /* pointer to the mapped area */
        size_t                  length;         /* length of the mapped area */
        size_t                  consumed;       /* number of consumed bytes */
+       struct sg_page_iter     piter;          /* page iterator */
 
        /* these are internal states, keep away */
-       struct scatterlist      *__sg;          /* current entry */
-       unsigned int            __nents;        /* nr of remaining entries */
-       unsigned int            __offset;       /* offset within sg */
+       unsigned int            __offset;       /* offset within page */
+       unsigned int            __remaining;    /* remaining bytes on page */
        unsigned int            __flags;
 };
 
index 0655570..d35d2b6 100644 (file)
@@ -99,7 +99,6 @@ extern int nr_threads;
 DECLARE_PER_CPU(unsigned long, process_counts);
 extern int nr_processes(void);
 extern unsigned long nr_running(void);
-extern unsigned long nr_uninterruptible(void);
 extern unsigned long nr_iowait(void);
 extern unsigned long nr_iowait_cpu(int cpu);
 extern unsigned long this_cpu_load(void);
@@ -347,11 +346,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 extern void set_dumpable(struct mm_struct *mm, int value);
 extern int get_dumpable(struct mm_struct *mm);
 
-/* get/set_dumpable() values */
-#define SUID_DUMPABLE_DISABLED 0
-#define SUID_DUMPABLE_ENABLED  1
-#define SUID_DUMPABLE_SAFE     2
-
 /* mm flags */
 /* dumpable bits */
 #define MMF_DUMPABLE      0  /* core dump is permitted */
index c65dee0..13e9296 100644 (file)
@@ -24,6 +24,9 @@ struct smpboot_thread_data;
  *                     parked (cpu offline)
  * @unpark:            Optional unpark function, called when the thread is
  *                     unparked (cpu online)
+ * @pre_unpark:                Optional unpark function, called before the thread is
+ *                     unparked (cpu online). This is not guaranteed to be
+ *                     called on the target cpu of the thread. Careful!
  * @selfparking:       Thread is not parked by the park function.
  * @thread_comm:       The base name of the thread
  */
@@ -37,6 +40,7 @@ struct smp_hotplug_thread {
        void                            (*cleanup)(unsigned int cpu, bool online);
        void                            (*park)(unsigned int cpu);
        void                            (*unpark)(unsigned int cpu);
+       void                            (*pre_unpark)(unsigned int cpu);
        bool                            selfparking;
        const char                      *thread_comm;
 };
diff --git a/include/linux/stmp3xxx_rtc_wdt.h b/include/linux/stmp3xxx_rtc_wdt.h
new file mode 100644 (file)
index 0000000..1dd12c9
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * stmp3xxx_rtc_wdt.h
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This file is released under the GPLv2.
+ */
+#ifndef __LINUX_STMP3XXX_RTC_WDT_H
+#define __LINUX_STMP3XXX_RTC_WDT_H
+
+struct stmp3xxx_wdt_pdata {
+       void (*wdt_set_timeout)(struct device *dev, u32 timeout);
+};
+
+#endif /* __LINUX_STMP3XXX_RTC_WDT_H */
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
new file mode 100644 (file)
index 0000000..07d8e53
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * linux/include/linux/sunrpc/addr.h
+ *
+ * Various routines for copying and comparing sockaddrs and for
+ * converting them to and from presentation format.
+ */
+#ifndef _LINUX_SUNRPC_ADDR_H
+#define _LINUX_SUNRPC_ADDR_H
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/ipv6.h>
+
+size_t         rpc_ntop(const struct sockaddr *, char *, const size_t);
+size_t         rpc_pton(struct net *, const char *, const size_t,
+                        struct sockaddr *, const size_t);
+char *         rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
+size_t         rpc_uaddr2sockaddr(struct net *, const char *, const size_t,
+                                  struct sockaddr *, const size_t);
+
+static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+{
+       switch (sap->sa_family) {
+       case AF_INET:
+               return ntohs(((struct sockaddr_in *)sap)->sin_port);
+       case AF_INET6:
+               return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+       }
+       return 0;
+}
+
+static inline void rpc_set_port(struct sockaddr *sap,
+                               const unsigned short port)
+{
+       switch (sap->sa_family) {
+       case AF_INET:
+               ((struct sockaddr_in *)sap)->sin_port = htons(port);
+               break;
+       case AF_INET6:
+               ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+               break;
+       }
+}
+
+#define IPV6_SCOPE_DELIMITER           '%'
+#define IPV6_SCOPE_ID_LEN              sizeof("%nnnnnnnnnn")
+
+static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+                                  const struct sockaddr *sap2)
+{
+       const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
+       const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
+
+       return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
+}
+
+static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+                                   const struct sockaddr *src)
+{
+       const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+       struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+       dsin->sin_family = ssin->sin_family;
+       dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
+       return true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+                                  const struct sockaddr *sap2)
+{
+       const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
+       const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
+
+       if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+               return false;
+       else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+               return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+       return true;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+                                   const struct sockaddr *src)
+{
+       const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
+       struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
+
+       dsin6->sin6_family = ssin6->sin6_family;
+       dsin6->sin6_addr = ssin6->sin6_addr;
+       dsin6->sin6_scope_id = ssin6->sin6_scope_id;
+       return true;
+}
+#else  /* !(IS_ENABLED(CONFIG_IPV6) */
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+                                  const struct sockaddr *sap2)
+{
+       return false;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+                                   const struct sockaddr *src)
+{
+       return false;
+}
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
+
+/**
+ * rpc_cmp_addr - compare the address portion of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ *
+ * Just compares the family and address portion. Ignores port, but
+ * compares the scope if it's a link-local address.
+ *
+ * Returns true if the addrs are equal, false if they aren't.
+ */
+static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
+                               const struct sockaddr *sap2)
+{
+       if (sap1->sa_family == sap2->sa_family) {
+               switch (sap1->sa_family) {
+               case AF_INET:
+                       return __rpc_cmp_addr4(sap1, sap2);
+               case AF_INET6:
+                       return __rpc_cmp_addr6(sap1, sap2);
+               }
+       }
+       return false;
+}
+
+/**
+ * rpc_copy_addr - copy the address portion of one sockaddr to another
+ * @dst: destination sockaddr
+ * @src: source sockaddr
+ *
+ * Just copies the address portion and family. Ignores port, scope, etc.
+ * Caller is responsible for making certain that dst is large enough to hold
+ * the address in src. Returns true if address family is supported. Returns
+ * false otherwise.
+ */
+static inline bool rpc_copy_addr(struct sockaddr *dst,
+                                const struct sockaddr *src)
+{
+       switch (src->sa_family) {
+       case AF_INET:
+               return __rpc_copy_addr4(dst, src);
+       case AF_INET6:
+               return __rpc_copy_addr6(dst, src);
+       }
+       return false;
+}
+
+/**
+ * rpc_get_scope_id - return scopeid for a given sockaddr
+ * @sa: sockaddr to get scopeid from
+ *
+ * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
+ * not an AF_INET6 address.
+ */
+static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+{
+       if (sa->sa_family != AF_INET6)
+               return 0;
+
+       return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+}
+
+#endif /* _LINUX_SUNRPC_ADDR_H */
index 5dc9ee4..303399b 100644 (file)
@@ -83,6 +83,10 @@ struct cache_detail {
        int                     (*cache_upcall)(struct cache_detail *,
                                                struct cache_head *);
 
+       void                    (*cache_request)(struct cache_detail *cd,
+                                                struct cache_head *ch,
+                                                char **bpp, int *blen);
+
        int                     (*cache_parse)(struct cache_detail *,
                                               char *buf, int len);
 
@@ -157,11 +161,7 @@ sunrpc_cache_update(struct cache_detail *detail,
                    struct cache_head *new, struct cache_head *old, int hash);
 
 extern int
-sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
-               void (*cache_request)(struct cache_detail *,
-                                     struct cache_head *,
-                                     char **,
-                                     int *));
+sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
 
 
 extern void cache_clean_deferred(void *owner);
index 34206b8..2cf4ffa 100644 (file)
@@ -160,162 +160,11 @@ void             rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
 int            rpc_protocol(struct rpc_clnt *);
 struct net *   rpc_net_ns(struct rpc_clnt *);
 size_t         rpc_max_payload(struct rpc_clnt *);
+unsigned long  rpc_get_timeout(struct rpc_clnt *clnt);
 void           rpc_force_rebind(struct rpc_clnt *);
 size_t         rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
 const char     *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
 int            rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
 
-size_t         rpc_ntop(const struct sockaddr *, char *, const size_t);
-size_t         rpc_pton(struct net *, const char *, const size_t,
-                        struct sockaddr *, const size_t);
-char *         rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
-size_t         rpc_uaddr2sockaddr(struct net *, const char *, const size_t,
-                                  struct sockaddr *, const size_t);
-
-static inline unsigned short rpc_get_port(const struct sockaddr *sap)
-{
-       switch (sap->sa_family) {
-       case AF_INET:
-               return ntohs(((struct sockaddr_in *)sap)->sin_port);
-       case AF_INET6:
-               return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
-       }
-       return 0;
-}
-
-static inline void rpc_set_port(struct sockaddr *sap,
-                               const unsigned short port)
-{
-       switch (sap->sa_family) {
-       case AF_INET:
-               ((struct sockaddr_in *)sap)->sin_port = htons(port);
-               break;
-       case AF_INET6:
-               ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
-               break;
-       }
-}
-
-#define IPV6_SCOPE_DELIMITER           '%'
-#define IPV6_SCOPE_ID_LEN              sizeof("%nnnnnnnnnn")
-
-static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
-                                  const struct sockaddr *sap2)
-{
-       const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
-       const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
-
-       return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
-}
-
-static inline bool __rpc_copy_addr4(struct sockaddr *dst,
-                                   const struct sockaddr *src)
-{
-       const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
-       struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
-
-       dsin->sin_family = ssin->sin_family;
-       dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
-       return true;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
-                                  const struct sockaddr *sap2)
-{
-       const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
-       const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
-
-       if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
-               return false;
-       else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
-               return sin1->sin6_scope_id == sin2->sin6_scope_id;
-
-       return true;
-}
-
-static inline bool __rpc_copy_addr6(struct sockaddr *dst,
-                                   const struct sockaddr *src)
-{
-       const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
-       struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
-
-       dsin6->sin6_family = ssin6->sin6_family;
-       dsin6->sin6_addr = ssin6->sin6_addr;
-       return true;
-}
-#else  /* !(IS_ENABLED(CONFIG_IPV6) */
-static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
-                                  const struct sockaddr *sap2)
-{
-       return false;
-}
-
-static inline bool __rpc_copy_addr6(struct sockaddr *dst,
-                                   const struct sockaddr *src)
-{
-       return false;
-}
-#endif /* !(IS_ENABLED(CONFIG_IPV6) */
-
-/**
- * rpc_cmp_addr - compare the address portion of two sockaddrs.
- * @sap1: first sockaddr
- * @sap2: second sockaddr
- *
- * Just compares the family and address portion. Ignores port, scope, etc.
- * Returns true if the addrs are equal, false if they aren't.
- */
-static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
-                               const struct sockaddr *sap2)
-{
-       if (sap1->sa_family == sap2->sa_family) {
-               switch (sap1->sa_family) {
-               case AF_INET:
-                       return __rpc_cmp_addr4(sap1, sap2);
-               case AF_INET6:
-                       return __rpc_cmp_addr6(sap1, sap2);
-               }
-       }
-       return false;
-}
-
-/**
- * rpc_copy_addr - copy the address portion of one sockaddr to another
- * @dst: destination sockaddr
- * @src: source sockaddr
- *
- * Just copies the address portion and family. Ignores port, scope, etc.
- * Caller is responsible for making certain that dst is large enough to hold
- * the address in src. Returns true if address family is supported. Returns
- * false otherwise.
- */
-static inline bool rpc_copy_addr(struct sockaddr *dst,
-                                const struct sockaddr *src)
-{
-       switch (src->sa_family) {
-       case AF_INET:
-               return __rpc_copy_addr4(dst, src);
-       case AF_INET6:
-               return __rpc_copy_addr6(dst, src);
-       }
-       return false;
-}
-
-/**
- * rpc_get_scope_id - return scopeid for a given sockaddr
- * @sa: sockaddr to get scopeid from
- *
- * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
- * not an AF_INET6 address.
- */
-static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
-{
-       if (sa->sa_family != AF_INET6)
-               return 0;
-
-       return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
-}
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_CLNT_H */
index 676ddf5..1f0216b 100644 (file)
@@ -50,6 +50,7 @@ struct svc_pool {
        unsigned int            sp_nrthreads;   /* # of threads in pool */
        struct list_head        sp_all_threads; /* all server threads */
        struct svc_pool_stats   sp_stats;       /* statistics on pool operation */
+       int                     sp_task_pending;/* has pending task */
 } ____cacheline_aligned_in_smp;
 
 /*
index 6398899..15f9204 100644 (file)
@@ -56,7 +56,7 @@ struct xdr_buf {
        struct kvec     head[1],        /* RPC header + non-page data */
                        tail[1];        /* Appended after page data */
 
-       struct page **  pages;          /* Array of contiguous pages */
+       struct page **  pages;          /* Array of pages */
        unsigned int    page_base,      /* Start of page data */
                        page_len,       /* Length of page data */
                        flags;          /* Flags for data disposition */
@@ -152,6 +152,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
 extern void xdr_shift_buf(struct xdr_buf *, size_t);
 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
 extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
 extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
 extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
index fe82022..f0bd7f9 100644 (file)
@@ -74,6 +74,8 @@ enum thermal_trend {
        THERMAL_TREND_STABLE, /* temperature is stable */
        THERMAL_TREND_RAISING, /* temperature is raising */
        THERMAL_TREND_DROPPING, /* temperature is dropping */
+       THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
+       THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
 };
 
 /* Events supported by Thermal Netlink */
@@ -121,6 +123,7 @@ struct thermal_zone_device_ops {
        int (*set_trip_hyst) (struct thermal_zone_device *, int,
                              unsigned long);
        int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+       int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
        int (*get_trend) (struct thermal_zone_device *, int,
                          enum thermal_trend *);
        int (*notify) (struct thermal_zone_device *, int,
@@ -163,6 +166,7 @@ struct thermal_zone_device {
        int polling_delay;
        int temperature;
        int last_temperature;
+       int emul_temperature;
        int passive;
        unsigned int forced_passive;
        const struct thermal_zone_device_ops *ops;
@@ -244,9 +248,11 @@ int thermal_register_governor(struct thermal_governor *);
 void thermal_unregister_governor(struct thermal_governor *);
 
 #ifdef CONFIG_NET
-extern int thermal_generate_netlink_event(u32 orig, enum events event);
+extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+                                               enum events event);
 #else
-static inline int thermal_generate_netlink_event(u32 orig, enum events event)
+static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+                                               enum events event)
 {
        return 0;
 }
index cf8adb1..ff6714e 100644 (file)
@@ -78,7 +78,7 @@ struct virtio_device {
        int index;
        struct device dev;
        struct virtio_device_id id;
-       struct virtio_config_ops *config;
+       const struct virtio_config_ops *config;
        struct list_head vqs;
        /* Note that this is a Linux set_bit-style bitmap. */
        unsigned long features[1];
@@ -126,4 +126,13 @@ static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
 
 int register_virtio_driver(struct virtio_driver *drv);
 void unregister_virtio_driver(struct virtio_driver *drv);
+
+/* module_virtio_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_virtio_driver(__virtio_driver) \
+       module_driver(__virtio_driver, register_virtio_driver, \
+                       unregister_virtio_driver)
 #endif /* _LINUX_VIRTIO_H */
index 3a9df2f..2a3038e 100644 (file)
@@ -118,6 +118,13 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway
                set_bit(WDOG_NO_WAY_OUT, &wdd->status);
 }
 
+/* Use the following function to check if a timeout value is invalid */
+static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
+{
+       return ((wdd->max_timeout != 0) &&
+               (t < wdd->min_timeout || t > wdd->max_timeout));
+}
+
 /* Use the following functions to manipulate watchdog driver specific data */
 static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
 {
@@ -130,6 +137,8 @@ static inline void *watchdog_get_drvdata(struct watchdog_device *wdd)
 }
 
 /* drivers/watchdog/watchdog_core.c */
+extern int watchdog_init_timeout(struct watchdog_device *wdd,
+                                 unsigned int timeout_parm, struct device *dev);
 extern int watchdog_register_device(struct watchdog_device *);
 extern void watchdog_unregister_device(struct watchdog_device *);
 
index b82a83a..9a9367c 100644 (file)
@@ -87,9 +87,9 @@ int inode_wait(void *);
 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
                                                        enum wb_reason reason);
-int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason);
-int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr,
-                                                       enum wb_reason reason);
+int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+                                 enum wb_reason reason);
 void sync_inodes_sb(struct super_block *);
 long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
                                enum wb_reason reason);
index 5ff70f4..4c7c01a 100644 (file)
@@ -192,7 +192,7 @@ struct p9_fid {
        void *rdir;
 
        struct list_head flist;
-       struct list_head dlist; /* list of all fids attached to a dentry */
+       struct hlist_node dlist;        /* list of all fids attached to a dentry */
 };
 
 /**
index 53539ac..89ed9ac 100644 (file)
@@ -161,8 +161,8 @@ typedef struct ax25_uid_assoc {
        ax25_address            call;
 } ax25_uid_assoc;
 
-#define ax25_uid_for_each(__ax25, node, list) \
-       hlist_for_each_entry(__ax25, node, list, uid_node)
+#define ax25_uid_for_each(__ax25, list) \
+       hlist_for_each_entry(__ax25, list, uid_node)
 
 #define ax25_uid_hold(ax25) \
        atomic_inc(&((ax25)->refcount))
@@ -247,8 +247,8 @@ typedef struct ax25_cb {
 
 #define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo)
 
-#define ax25_for_each(__ax25, node, list) \
-       hlist_for_each_entry(__ax25, node, list, ax25_node)
+#define ax25_for_each(__ax25, list) \
+       hlist_for_each_entry(__ax25, list, ax25_node)
 
 #define ax25_cb_hold(__ax25) \
        atomic_inc(&((__ax25)->refcount))
index 7b2ae9d..ef83d9e 100644 (file)
@@ -94,8 +94,8 @@ static inline struct net *ib_net(struct inet_bind_bucket *ib)
        return read_pnet(&ib->ib_net);
 }
 
-#define inet_bind_bucket_for_each(tb, pos, head) \
-       hlist_for_each_entry(tb, pos, head, node)
+#define inet_bind_bucket_for_each(tb, head) \
+       hlist_for_each_entry(tb, head, node)
 
 struct inet_bind_hashbucket {
        spinlock_t              lock;
index 7d658d5..f908dfc 100644 (file)
@@ -178,11 +178,11 @@ static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
 #define inet_twsk_for_each(tw, node, head) \
        hlist_nulls_for_each_entry(tw, node, head, tw_node)
 
-#define inet_twsk_for_each_inmate(tw, node, jail) \
-       hlist_for_each_entry(tw, node, jail, tw_death_node)
+#define inet_twsk_for_each_inmate(tw, jail) \
+       hlist_for_each_entry(tw, jail, tw_death_node)
 
-#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
-       hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
+#define inet_twsk_for_each_inmate_safe(tw, safe, jail) \
+       hlist_for_each_entry_safe(tw, safe, jail, tw_death_node)
 
 static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
 {
index f0793c1..121dcf8 100644 (file)
@@ -154,17 +154,17 @@ static __inline__ void nr_node_unlock(struct nr_node *nr_node)
        nr_node_put(nr_node);
 }
 
-#define nr_neigh_for_each(__nr_neigh, node, list) \
-       hlist_for_each_entry(__nr_neigh, node, list, neigh_node)
+#define nr_neigh_for_each(__nr_neigh, list) \
+       hlist_for_each_entry(__nr_neigh, list, neigh_node)
 
-#define nr_neigh_for_each_safe(__nr_neigh, node, node2, list) \
-       hlist_for_each_entry_safe(__nr_neigh, node, node2, list, neigh_node)
+#define nr_neigh_for_each_safe(__nr_neigh, node2, list) \
+       hlist_for_each_entry_safe(__nr_neigh, node2, list, neigh_node)
 
-#define nr_node_for_each(__nr_node, node, list) \
-       hlist_for_each_entry(__nr_node, node, list, node_node)
+#define nr_node_for_each(__nr_node, list) \
+       hlist_for_each_entry(__nr_node, list, node_node)
 
-#define nr_node_for_each_safe(__nr_node, node, node2, list) \
-       hlist_for_each_entry_safe(__nr_node, node, node2, list, node_node)
+#define nr_node_for_each_safe(__nr_node, node2, list) \
+       hlist_for_each_entry_safe(__nr_node, node2, list, node_node)
 
 
 /*********************************************************************/
index 2761c90..f10818f 100644 (file)
@@ -339,11 +339,10 @@ static inline struct Qdisc_class_common *
 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
 {
        struct Qdisc_class_common *cl;
-       struct hlist_node *n;
        unsigned int h;
 
        h = qdisc_class_hash(id, hash->hashmask);
-       hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
+       hlist_for_each_entry(cl, &hash->hash[h], hnode) {
                if (cl->classid == id)
                        return cl;
        }
index 7fdf298..df85a0c 100644 (file)
@@ -675,8 +675,8 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
        return h & (sctp_assoc_hashsize - 1);
 }
 
-#define sctp_for_each_hentry(epb, node, head) \
-       hlist_for_each_entry(epb, node, head, node)
+#define sctp_for_each_hentry(epb, head) \
+       hlist_for_each_entry(epb, head, node)
 
 /* Is a socket of this style? */
 #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
index a66caa2..14f6e9d 100644 (file)
@@ -606,24 +606,23 @@ static inline void sk_add_bind_node(struct sock *sk,
        hlist_add_head(&sk->sk_bind_node, list);
 }
 
-#define sk_for_each(__sk, node, list) \
-       hlist_for_each_entry(__sk, node, list, sk_node)
-#define sk_for_each_rcu(__sk, node, list) \
-       hlist_for_each_entry_rcu(__sk, node, list, sk_node)
+#define sk_for_each(__sk, list) \
+       hlist_for_each_entry(__sk, list, sk_node)
+#define sk_for_each_rcu(__sk, list) \
+       hlist_for_each_entry_rcu(__sk, list, sk_node)
 #define sk_nulls_for_each(__sk, node, list) \
        hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
 #define sk_nulls_for_each_rcu(__sk, node, list) \
        hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
-#define sk_for_each_from(__sk, node) \
-       if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
-               hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_for_each_from(__sk) \
+       hlist_for_each_entry_from(__sk, sk_node)
 #define sk_nulls_for_each_from(__sk, node) \
        if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
                hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
-#define sk_for_each_safe(__sk, node, tmp, list) \
-       hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
-#define sk_for_each_bound(__sk, node, list) \
-       hlist_for_each_entry(__sk, node, list, sk_bind_node)
+#define sk_for_each_safe(__sk, tmp, list) \
+       hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, list) \
+       hlist_for_each_entry(__sk, list, sk_bind_node)
 
 static inline struct user_namespace *sk_user_ns(struct sock *sk)
 {
index 23f2e98..cf0694d 100644 (file)
@@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
        if (sysctl_tcp_low_latency || !tp->ucopy.task)
                return false;
 
+       if (skb->len <= tcp_hdrlen(skb) &&
+           skb_queue_len(&tp->ucopy.prequeue) == 0)
+               return false;
+
        __skb_queue_tail(&tp->ucopy.prequeue, skb);
        tp->ucopy.memory += skb->truesize;
        if (tp->ucopy.memory > sk->sk_rcvbuf) {
index 260470e..21cdb0b 100644 (file)
@@ -78,9 +78,7 @@ TRACE_EVENT(mc_event,
 
        TP_printk("%d %s error%s:%s%s on %s (mc:%d location:%d:%d:%d address:0x%08lx grain:%d syndrome:0x%08lx%s%s)",
                  __entry->error_count,
-                 (__entry->error_type == HW_EVENT_ERR_CORRECTED) ? "Corrected" :
-                       ((__entry->error_type == HW_EVENT_ERR_FATAL) ?
-                       "Fatal" : "Uncorrected"),
+                 mc_event_error_type(__entry->error_type),
                  __entry->error_count > 1 ? "s" : "",
                  ((char *)__get_str(msg))[0] ? " " : "",
                  __get_str(msg),
index f2b9491..562ff9d 100644 (file)
@@ -1,4 +1 @@
-header-y += scsi_netlink.h
-header-y += scsi_netlink_fc.h
-header-y += scsi_bsg_fc.h
 header-y += fc/
index 5660381..e69de29 100644 (file)
@@ -1,4 +0,0 @@
-header-y += fc_els.h
-header-y += fc_fs.h
-header-y += fc_gs.h
-header-y += fc_ns.h
diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h
deleted file mode 100644 (file)
index 481abbd..0000000
+++ /dev/null
@@ -1,831 +0,0 @@
-/*
- * Copyright(c) 2007 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_ELS_H_
-#define        _FC_ELS_H_
-
-#include <linux/types.h>
-
-/*
- * Fibre Channel Switch - Enhanced Link Services definitions.
- * From T11 FC-LS Rev 1.2 June 7, 2005.
- */
-
-/*
- * ELS Command codes - byte 0 of the frame payload
- */
-enum fc_els_cmd {
-       ELS_LS_RJT =    0x01,   /* ESL reject */
-       ELS_LS_ACC =    0x02,   /* ESL Accept */
-       ELS_PLOGI =     0x03,   /* N_Port login */
-       ELS_FLOGI =     0x04,   /* F_Port login */
-       ELS_LOGO =      0x05,   /* Logout */
-       ELS_ABTX =      0x06,   /* Abort exchange - obsolete */
-       ELS_RCS =       0x07,   /* read connection status */
-       ELS_RES =       0x08,   /* read exchange status block */
-       ELS_RSS =       0x09,   /* read sequence status block */
-       ELS_RSI =       0x0a,   /* read sequence initiative */
-       ELS_ESTS =      0x0b,   /* establish streaming */
-       ELS_ESTC =      0x0c,   /* estimate credit */
-       ELS_ADVC =      0x0d,   /* advise credit */
-       ELS_RTV =       0x0e,   /* read timeout value */
-       ELS_RLS =       0x0f,   /* read link error status block */
-       ELS_ECHO =      0x10,   /* echo */
-       ELS_TEST =      0x11,   /* test */
-       ELS_RRQ =       0x12,   /* reinstate recovery qualifier */
-       ELS_REC =       0x13,   /* read exchange concise */
-       ELS_SRR =       0x14,   /* sequence retransmission request */
-       ELS_PRLI =      0x20,   /* process login */
-       ELS_PRLO =      0x21,   /* process logout */
-       ELS_SCN =       0x22,   /* state change notification */
-       ELS_TPLS =      0x23,   /* test process login state */
-       ELS_TPRLO =     0x24,   /* third party process logout */
-       ELS_LCLM =      0x25,   /* login control list mgmt (obs) */
-       ELS_GAID =      0x30,   /* get alias_ID */
-       ELS_FACT =      0x31,   /* fabric activate alias_id */
-       ELS_FDACDT =    0x32,   /* fabric deactivate alias_id */
-       ELS_NACT =      0x33,   /* N-port activate alias_id */
-       ELS_NDACT =     0x34,   /* N-port deactivate alias_id */
-       ELS_QOSR =      0x40,   /* quality of service request */
-       ELS_RVCS =      0x41,   /* read virtual circuit status */
-       ELS_PDISC =     0x50,   /* discover N_port service params */
-       ELS_FDISC =     0x51,   /* discover F_port service params */
-       ELS_ADISC =     0x52,   /* discover address */
-       ELS_RNC =       0x53,   /* report node cap (obs) */
-       ELS_FARP_REQ =  0x54,   /* FC ARP request */
-       ELS_FARP_REPL = 0x55,   /* FC ARP reply */
-       ELS_RPS =       0x56,   /* read port status block */
-       ELS_RPL =       0x57,   /* read port list */
-       ELS_RPBC =      0x58,   /* read port buffer condition */
-       ELS_FAN =       0x60,   /* fabric address notification */
-       ELS_RSCN =      0x61,   /* registered state change notification */
-       ELS_SCR =       0x62,   /* state change registration */
-       ELS_RNFT =      0x63,   /* report node FC-4 types */
-       ELS_CSR =       0x68,   /* clock synch. request */
-       ELS_CSU =       0x69,   /* clock synch. update */
-       ELS_LINIT =     0x70,   /* loop initialize */
-       ELS_LSTS =      0x72,   /* loop status */
-       ELS_RNID =      0x78,   /* request node ID data */
-       ELS_RLIR =      0x79,   /* registered link incident report */
-       ELS_LIRR =      0x7a,   /* link incident record registration */
-       ELS_SRL =       0x7b,   /* scan remote loop */
-       ELS_SBRP =      0x7c,   /* set bit-error reporting params */
-       ELS_RPSC =      0x7d,   /* report speed capabilities */
-       ELS_QSA =       0x7e,   /* query security attributes */
-       ELS_EVFP =      0x7f,   /* exchange virt. fabrics params */
-       ELS_LKA =       0x80,   /* link keep-alive */
-       ELS_AUTH_ELS =  0x90,   /* authentication ELS */
-};
-
-/*
- * Initializer useful for decoding table.
- * Please keep this in sync with the above definitions.
- */
-#define        FC_ELS_CMDS_INIT {                      \
-       [ELS_LS_RJT] =  "LS_RJT",               \
-       [ELS_LS_ACC] =  "LS_ACC",               \
-       [ELS_PLOGI] =   "PLOGI",                \
-       [ELS_FLOGI] =   "FLOGI",                \
-       [ELS_LOGO] =    "LOGO",                 \
-       [ELS_ABTX] =    "ABTX",                 \
-       [ELS_RCS] =     "RCS",                  \
-       [ELS_RES] =     "RES",                  \
-       [ELS_RSS] =     "RSS",                  \
-       [ELS_RSI] =     "RSI",                  \
-       [ELS_ESTS] =    "ESTS",                 \
-       [ELS_ESTC] =    "ESTC",                 \
-       [ELS_ADVC] =    "ADVC",                 \
-       [ELS_RTV] =     "RTV",                  \
-       [ELS_RLS] =     "RLS",                  \
-       [ELS_ECHO] =    "ECHO",                 \
-       [ELS_TEST] =    "TEST",                 \
-       [ELS_RRQ] =     "RRQ",                  \
-       [ELS_REC] =     "REC",                  \
-       [ELS_SRR] =     "SRR",                  \
-       [ELS_PRLI] =    "PRLI",                 \
-       [ELS_PRLO] =    "PRLO",                 \
-       [ELS_SCN] =     "SCN",                  \
-       [ELS_TPLS] =    "TPLS",                 \
-       [ELS_TPRLO] =   "TPRLO",                \
-       [ELS_LCLM] =    "LCLM",                 \
-       [ELS_GAID] =    "GAID",                 \
-       [ELS_FACT] =    "FACT",                 \
-       [ELS_FDACDT] =  "FDACDT",               \
-       [ELS_NACT] =    "NACT",                 \
-       [ELS_NDACT] =   "NDACT",                \
-       [ELS_QOSR] =    "QOSR",                 \
-       [ELS_RVCS] =    "RVCS",                 \
-       [ELS_PDISC] =   "PDISC",                \
-       [ELS_FDISC] =   "FDISC",                \
-       [ELS_ADISC] =   "ADISC",                \
-       [ELS_RNC] =     "RNC",                  \
-       [ELS_FARP_REQ] = "FARP_REQ",            \
-       [ELS_FARP_REPL] =  "FARP_REPL",         \
-       [ELS_RPS] =     "RPS",                  \
-       [ELS_RPL] =     "RPL",                  \
-       [ELS_RPBC] =    "RPBC",                 \
-       [ELS_FAN] =     "FAN",                  \
-       [ELS_RSCN] =    "RSCN",                 \
-       [ELS_SCR] =     "SCR",                  \
-       [ELS_RNFT] =    "RNFT",                 \
-       [ELS_CSR] =     "CSR",                  \
-       [ELS_CSU] =     "CSU",                  \
-       [ELS_LINIT] =   "LINIT",                \
-       [ELS_LSTS] =    "LSTS",                 \
-       [ELS_RNID] =    "RNID",                 \
-       [ELS_RLIR] =    "RLIR",                 \
-       [ELS_LIRR] =    "LIRR",                 \
-       [ELS_SRL] =     "SRL",                  \
-       [ELS_SBRP] =    "SBRP",                 \
-       [ELS_RPSC] =    "RPSC",                 \
-       [ELS_QSA] =     "QSA",                  \
-       [ELS_EVFP] =    "EVFP",                 \
-       [ELS_LKA] =     "LKA",                  \
-       [ELS_AUTH_ELS] = "AUTH_ELS",            \
-}
-
-/*
- * LS_ACC payload.
- */
-struct fc_els_ls_acc {
-       __u8          la_cmd;           /* command code ELS_LS_ACC */
-       __u8          la_resv[3];       /* reserved */
-};
-
-/*
- * ELS reject payload.
- */
-struct fc_els_ls_rjt {
-       __u8    er_cmd;         /* command code ELS_LS_RJT */
-       __u8    er_resv[4];     /* reserved must be zero */
-       __u8    er_reason;      /* reason (enum fc_els_rjt_reason below) */
-       __u8    er_explan;      /* explanation (enum fc_els_rjt_explan below) */
-       __u8    er_vendor;      /* vendor specific code */
-};
-
-/*
- * ELS reject reason codes (er_reason).
- */
-enum fc_els_rjt_reason {
-       ELS_RJT_NONE =          0,      /* no reject - not to be sent */
-       ELS_RJT_INVAL =         0x01,   /* invalid ELS command code */
-       ELS_RJT_LOGIC =         0x03,   /* logical error */
-       ELS_RJT_BUSY =          0x05,   /* logical busy */
-       ELS_RJT_PROT =          0x07,   /* protocol error */
-       ELS_RJT_UNAB =          0x09,   /* unable to perform command request */
-       ELS_RJT_UNSUP =         0x0b,   /* command not supported */
-       ELS_RJT_INPROG =        0x0e,   /* command already in progress */
-       ELS_RJT_FIP =           0x20,   /* FIP error */
-       ELS_RJT_VENDOR =        0xff,   /* vendor specific error */
-};
-
-
-/*
- * reason code explanation (er_explan).
- */
-enum fc_els_rjt_explan {
-       ELS_EXPL_NONE =         0x00,   /* No additional explanation */
-       ELS_EXPL_SPP_OPT_ERR =  0x01,   /* service parameter error - options */
-       ELS_EXPL_SPP_ICTL_ERR = 0x03,   /* service parm error - initiator ctl */
-       ELS_EXPL_AH =           0x11,   /* invalid association header */
-       ELS_EXPL_AH_REQ =       0x13,   /* association_header required */
-       ELS_EXPL_SID =          0x15,   /* invalid originator S_ID */
-       ELS_EXPL_OXID_RXID =    0x17,   /* invalid OX_ID-RX_ID combination */
-       ELS_EXPL_INPROG =       0x19,   /* Request already in progress */
-       ELS_EXPL_PLOGI_REQD =   0x1e,   /* N_Port login required */
-       ELS_EXPL_INSUF_RES =    0x29,   /* insufficient resources */
-       ELS_EXPL_UNAB_DATA =    0x2a,   /* unable to supply requested data */
-       ELS_EXPL_UNSUPR =       0x2c,   /* Request not supported */
-       ELS_EXPL_INV_LEN =      0x2d,   /* Invalid payload length */
-       ELS_EXPL_NOT_NEIGHBOR = 0x62,   /* VN2VN_Port not in neighbor set */
-       /* TBD - above definitions incomplete */
-};
-
-/*
- * Common service parameters (N ports).
- */
-struct fc_els_csp {
-       __u8            sp_hi_ver;      /* highest version supported (obs.) */
-       __u8            sp_lo_ver;      /* highest version supported (obs.) */
-       __be16          sp_bb_cred;     /* buffer-to-buffer credits */
-       __be16          sp_features;    /* common feature flags */
-       __be16          sp_bb_data;     /* b-b state number and data field sz */
-       union {
-               struct {
-                       __be16  _sp_tot_seq; /* total concurrent sequences */
-                       __be16  _sp_rel_off; /* rel. offset by info cat */
-               } sp_plogi;
-               struct {
-                       __be32  _sp_r_a_tov; /* resource alloc. timeout msec */
-               } sp_flogi_acc;
-       } sp_u;
-       __be32          sp_e_d_tov;     /* error detect timeout value */
-};
-#define        sp_tot_seq      sp_u.sp_plogi._sp_tot_seq
-#define        sp_rel_off      sp_u.sp_plogi._sp_rel_off
-#define        sp_r_a_tov      sp_u.sp_flogi_acc._sp_r_a_tov
-
-#define        FC_SP_BB_DATA_MASK 0xfff /* mask for data field size in sp_bb_data */
-
-/*
- * Minimum and maximum values for max data field size in service parameters.
- */
-#define        FC_SP_MIN_MAX_PAYLOAD   FC_MIN_MAX_PAYLOAD
-#define        FC_SP_MAX_MAX_PAYLOAD   FC_MAX_PAYLOAD
-
-/*
- * sp_features
- */
-#define        FC_SP_FT_NPIV   0x8000  /* multiple N_Port_ID support (FLOGI) */
-#define        FC_SP_FT_CIRO   0x8000  /* continuously increasing rel off (PLOGI) */
-#define        FC_SP_FT_CLAD   0x8000  /* clean address (in FLOGI LS_ACC) */
-#define        FC_SP_FT_RAND   0x4000  /* random relative offset */
-#define        FC_SP_FT_VAL    0x2000  /* valid vendor version level */
-#define        FC_SP_FT_NPIV_ACC       0x2000  /* NPIV assignment (FLOGI LS_ACC) */
-#define        FC_SP_FT_FPORT  0x1000  /* F port (1) vs. N port (0) */
-#define        FC_SP_FT_ABB    0x0800  /* alternate BB_credit management */
-#define        FC_SP_FT_EDTR   0x0400  /* E_D_TOV Resolution is nanoseconds */
-#define        FC_SP_FT_MCAST  0x0200  /* multicast */
-#define        FC_SP_FT_BCAST  0x0100  /* broadcast */
-#define        FC_SP_FT_HUNT   0x0080  /* hunt group */
-#define        FC_SP_FT_SIMP   0x0040  /* dedicated simplex */
-#define        FC_SP_FT_SEC    0x0020  /* reserved for security */
-#define        FC_SP_FT_CSYN   0x0010  /* clock synch. supported */
-#define        FC_SP_FT_RTTOV  0x0008  /* R_T_TOV value 100 uS, else 100 mS */
-#define        FC_SP_FT_HALF   0x0004  /* dynamic half duplex */
-#define        FC_SP_FT_SEQC   0x0002  /* SEQ_CNT */
-#define        FC_SP_FT_PAYL   0x0001  /* FLOGI payload length 256, else 116 */
-
-/*
- * Class-specific service parameters.
- */
-struct fc_els_cssp {
-       __be16          cp_class;       /* class flags */
-       __be16          cp_init;        /* initiator flags */
-       __be16          cp_recip;       /* recipient flags */
-       __be16          cp_rdfs;        /* receive data field size */
-       __be16          cp_con_seq;     /* concurrent sequences */
-       __be16          cp_ee_cred;     /* N-port end-to-end credit */
-       __u8            cp_resv1;       /* reserved */
-       __u8            cp_open_seq;    /* open sequences per exchange */
-       __u8            _cp_resv2[2];   /* reserved */
-};
-
-/*
- * cp_class flags.
- */
-#define        FC_CPC_VALID    0x8000          /* class valid */
-#define        FC_CPC_IMIX     0x4000          /* intermix mode */
-#define        FC_CPC_SEQ      0x0800          /* sequential delivery */
-#define        FC_CPC_CAMP     0x0200          /* camp-on */
-#define        FC_CPC_PRI      0x0080          /* priority */
-
-/*
- * cp_init flags.
- * (TBD: not all flags defined here).
- */
-#define        FC_CPI_CSYN     0x0010          /* clock synch. capable */
-
-/*
- * cp_recip flags.
- */
-#define        FC_CPR_CSYN     0x0008          /* clock synch. capable */
-
-/*
- * NFC_ELS_FLOGI: Fabric login request.
- * NFC_ELS_PLOGI: Port login request (same format).
- */
-struct fc_els_flogi {
-       __u8            fl_cmd;         /* command */
-       __u8            _fl_resvd[3];   /* must be zero */
-       struct fc_els_csp fl_csp;       /* common service parameters */
-       __be64          fl_wwpn;        /* port name */
-       __be64          fl_wwnn;        /* node name */
-       struct fc_els_cssp fl_cssp[4];  /* class 1-4 service parameters */
-       __u8            fl_vend[16];    /* vendor version level */
-} __attribute__((__packed__));
-
-/*
- * Process login service parameter page.
- */
-struct fc_els_spp {
-       __u8            spp_type;       /* type code or common service params */
-       __u8            spp_type_ext;   /* type code extension */
-       __u8            spp_flags;
-       __u8            _spp_resvd;
-       __be32          spp_orig_pa;    /* originator process associator */
-       __be32          spp_resp_pa;    /* responder process associator */
-       __be32          spp_params;     /* service parameters */
-};
-
-/*
- * spp_flags.
- */
-#define        FC_SPP_OPA_VAL      0x80        /* originator proc. assoc. valid */
-#define        FC_SPP_RPA_VAL      0x40        /* responder proc. assoc. valid */
-#define        FC_SPP_EST_IMG_PAIR 0x20        /* establish image pair */
-#define        FC_SPP_RESP_MASK    0x0f        /* mask for response code (below) */
-
-/*
- * SPP response code in spp_flags - lower 4 bits.
- */
-enum fc_els_spp_resp {
-       FC_SPP_RESP_ACK =       1,      /* request executed */
-       FC_SPP_RESP_RES =       2,      /* unable due to lack of resources */
-       FC_SPP_RESP_INIT =      3,      /* initialization not complete */
-       FC_SPP_RESP_NO_PA =     4,      /* unknown process associator */
-       FC_SPP_RESP_CONF =      5,      /* configuration precludes image pair */
-       FC_SPP_RESP_COND =      6,      /* request completed conditionally */
-       FC_SPP_RESP_MULT =      7,      /* unable to handle multiple SPPs */
-       FC_SPP_RESP_INVL =      8,      /* SPP is invalid */
-};
-
-/*
- * ELS_RRQ - Reinstate Recovery Qualifier
- */
-struct fc_els_rrq {
-       __u8            rrq_cmd;        /* command (0x12) */
-       __u8            rrq_zero[3];    /* specified as zero - part of cmd */
-       __u8            rrq_resvd;      /* reserved */
-       __u8            rrq_s_id[3];    /* originator FID */
-       __be16          rrq_ox_id;      /* originator exchange ID */
-       __be16          rrq_rx_id;      /* responders exchange ID */
-};
-
-/*
- * ELS_REC - Read exchange concise.
- */
-struct fc_els_rec {
-       __u8            rec_cmd;        /* command (0x13) */
-       __u8            rec_zero[3];    /* specified as zero - part of cmd */
-       __u8            rec_resvd;      /* reserved */
-       __u8            rec_s_id[3];    /* originator FID */
-       __be16          rec_ox_id;      /* originator exchange ID */
-       __be16          rec_rx_id;      /* responders exchange ID */
-};
-
-/*
- * ELS_REC LS_ACC payload.
- */
-struct fc_els_rec_acc {
-       __u8            reca_cmd;       /* accept (0x02) */
-       __u8            reca_zero[3];   /* specified as zero - part of cmd */
-       __be16          reca_ox_id;     /* originator exchange ID */
-       __be16          reca_rx_id;     /* responders exchange ID */
-       __u8            reca_resvd1;    /* reserved */
-       __u8            reca_ofid[3];   /* originator FID */
-       __u8            reca_resvd2;    /* reserved */
-       __u8            reca_rfid[3];   /* responder FID */
-       __be32          reca_fc4value;  /* FC4 value */
-       __be32          reca_e_stat;    /* ESB (exchange status block) status */
-};
-
-/*
- * ELS_PRLI - Process login request and response.
- */
-struct fc_els_prli {
-       __u8            prli_cmd;       /* command */
-       __u8            prli_spp_len;   /* length of each serv. parm. page */
-       __be16          prli_len;       /* length of entire payload */
-       /* service parameter pages follow */
-};
-
-/*
- * ELS_PRLO - Process logout request and response.
- */
-struct fc_els_prlo {
-       __u8            prlo_cmd;       /* command */
-       __u8            prlo_obs;       /* obsolete, but shall be set to 10h */
-       __be16          prlo_len;       /* payload length */
-};
-
-/*
- * ELS_ADISC payload
- */
-struct fc_els_adisc {
-       __u8            adisc_cmd;
-       __u8            adisc_resv[3];
-       __u8            adisc_resv1;
-       __u8            adisc_hard_addr[3];
-       __be64          adisc_wwpn;
-       __be64          adisc_wwnn;
-       __u8            adisc_resv2;
-       __u8            adisc_port_id[3];
-} __attribute__((__packed__));
-
-/*
- * ELS_LOGO - process or fabric logout.
- */
-struct fc_els_logo {
-       __u8            fl_cmd;         /* command code */
-       __u8            fl_zero[3];     /* specified as zero - part of cmd */
-       __u8            fl_resvd;       /* reserved */
-       __u8            fl_n_port_id[3];/* N port ID */
-       __be64          fl_n_port_wwn;  /* port name */
-};
-
-/*
- * ELS_RTV - read timeout value.
- */
-struct fc_els_rtv {
-       __u8            rtv_cmd;        /* command code 0x0e */
-       __u8            rtv_zero[3];    /* specified as zero - part of cmd */
-};
-
-/*
- * LS_ACC for ELS_RTV - read timeout value.
- */
-struct fc_els_rtv_acc {
-       __u8            rtv_cmd;        /* command code 0x02 */
-       __u8            rtv_zero[3];    /* specified as zero - part of cmd */
-       __be32          rtv_r_a_tov;    /* resource allocation timeout value */
-       __be32          rtv_e_d_tov;    /* error detection timeout value */
-       __be32          rtv_toq;        /* timeout qualifier (see below) */
-};
-
-/*
- * rtv_toq bits.
- */
-#define        FC_ELS_RTV_EDRES (1 << 26)      /* E_D_TOV resolution is nS else mS */
-#define        FC_ELS_RTV_RTTOV (1 << 19)      /* R_T_TOV is 100 uS else 100 mS */
-
-/*
- * ELS_SCR - state change registration payload.
- */
-struct fc_els_scr {
-       __u8            scr_cmd;        /* command code */
-       __u8            scr_resv[6];    /* reserved */
-       __u8            scr_reg_func;   /* registration function (see below) */
-};
-
-enum fc_els_scr_func {
-       ELS_SCRF_FAB =  1,      /* fabric-detected registration */
-       ELS_SCRF_NPORT = 2,     /* Nx_Port-detected registration */
-       ELS_SCRF_FULL = 3,      /* full registration */
-       ELS_SCRF_CLEAR = 255,   /* remove any current registrations */
-};
-
-/*
- * ELS_RSCN - registered state change notification payload.
- */
-struct fc_els_rscn {
-       __u8            rscn_cmd;       /* RSCN opcode (0x61) */
-       __u8            rscn_page_len;  /* page length (4) */
-       __be16          rscn_plen;      /* payload length including this word */
-
-       /* followed by 4-byte generic affected Port_ID pages */
-};
-
-struct fc_els_rscn_page {
-       __u8            rscn_page_flags; /* event and address format */
-       __u8            rscn_fid[3];    /* fabric ID */
-};
-
-#define        ELS_RSCN_EV_QUAL_BIT    2       /* shift count for event qualifier */
-#define        ELS_RSCN_EV_QUAL_MASK   0xf     /* mask for event qualifier */
-#define        ELS_RSCN_ADDR_FMT_BIT   0       /* shift count for address format */
-#define        ELS_RSCN_ADDR_FMT_MASK  0x3     /* mask for address format */
-
-enum fc_els_rscn_ev_qual {
-       ELS_EV_QUAL_NONE = 0,           /* unspecified */
-       ELS_EV_QUAL_NS_OBJ = 1,         /* changed name server object */
-       ELS_EV_QUAL_PORT_ATTR = 2,      /* changed port attribute */
-       ELS_EV_QUAL_SERV_OBJ = 3,       /* changed service object */
-       ELS_EV_QUAL_SW_CONFIG = 4,      /* changed switch configuration */
-       ELS_EV_QUAL_REM_OBJ = 5,        /* removed object */
-};
-
-enum fc_els_rscn_addr_fmt {
-       ELS_ADDR_FMT_PORT = 0,  /* rscn_fid is a port address */
-       ELS_ADDR_FMT_AREA = 1,  /* rscn_fid is a area address */
-       ELS_ADDR_FMT_DOM = 2,   /* rscn_fid is a domain address */
-       ELS_ADDR_FMT_FAB = 3,   /* anything on fabric may have changed */
-};
-
-/*
- * ELS_RNID - request Node ID.
- */
-struct fc_els_rnid {
-       __u8            rnid_cmd;       /* RNID opcode (0x78) */
-       __u8            rnid_resv[3];   /* reserved */
-       __u8            rnid_fmt;       /* data format */
-       __u8            rnid_resv2[3];  /* reserved */
-};
-
-/*
- * Node Identification Data formats (rnid_fmt)
- */
-enum fc_els_rnid_fmt {
-       ELS_RNIDF_NONE = 0,             /* no specific identification data */
-       ELS_RNIDF_GEN = 0xdf,           /* general topology discovery format */
-};
-
-/*
- * ELS_RNID response.
- */
-struct fc_els_rnid_resp {
-       __u8            rnid_cmd;       /* response code (LS_ACC) */
-       __u8            rnid_resv[3];   /* reserved */
-       __u8            rnid_fmt;       /* data format */
-       __u8            rnid_cid_len;   /* common ID data length */
-       __u8            rnid_resv2;     /* reserved */
-       __u8            rnid_sid_len;   /* specific ID data length */
-};
-
-struct fc_els_rnid_cid {
-       __be64          rnid_wwpn;      /* N port name */
-       __be64          rnid_wwnn;      /* node name */
-};
-
-struct fc_els_rnid_gen {
-       __u8            rnid_vend_id[16]; /* vendor-unique ID */
-       __be32          rnid_atype;     /* associated type (see below) */
-       __be32          rnid_phys_port; /* physical port number */
-       __be32          rnid_att_nodes; /* number of attached nodes */
-       __u8            rnid_node_mgmt; /* node management (see below) */
-       __u8            rnid_ip_ver;    /* IP version (see below) */
-       __be16          rnid_prot_port; /* UDP / TCP port number */
-       __be32          rnid_ip_addr[4]; /* IP address */
-       __u8            rnid_resvd[2];  /* reserved */
-       __be16          rnid_vend_spec; /* vendor-specific field */
-};
-
-enum fc_els_rnid_atype {
-       ELS_RNIDA_UNK =         0x01,   /* unknown */
-       ELS_RNIDA_OTHER =       0x02,   /* none of the following */
-       ELS_RNIDA_HUB =         0x03,
-       ELS_RNIDA_SWITCH =      0x04,
-       ELS_RNIDA_GATEWAY =     0x05,
-       ELS_RNIDA_CONV =        0x06,   /* Obsolete, do not use this value */
-       ELS_RNIDA_HBA =         0x07,   /* Obsolete, do not use this value */
-       ELS_RNIDA_PROXY =       0x08,   /* Obsolete, do not use this value */
-       ELS_RNIDA_STORAGE =     0x09,
-       ELS_RNIDA_HOST =        0x0a,
-       ELS_RNIDA_SUBSYS =      0x0b,   /* storage subsystem (e.g., RAID) */
-       ELS_RNIDA_ACCESS =      0x0e,   /* access device (e.g. media changer) */
-       ELS_RNIDA_NAS =         0x11,   /* NAS server */
-       ELS_RNIDA_BRIDGE =      0x12,   /* bridge */
-       ELS_RNIDA_VIRT =        0x13,   /* virtualization device */
-       ELS_RNIDA_MF =          0xff,   /* multifunction device (bits below) */
-       ELS_RNIDA_MF_HUB =      1UL << 31,      /* hub */
-       ELS_RNIDA_MF_SW =       1UL << 30,      /* switch */
-       ELS_RNIDA_MF_GW =       1UL << 29,      /* gateway */
-       ELS_RNIDA_MF_ST =       1UL << 28,      /* storage */
-       ELS_RNIDA_MF_HOST =     1UL << 27,      /* host */
-       ELS_RNIDA_MF_SUB =      1UL << 26,      /* storage subsystem */
-       ELS_RNIDA_MF_ACC =      1UL << 25,      /* storage access dev */
-       ELS_RNIDA_MF_WDM =      1UL << 24,      /* wavelength division mux */
-       ELS_RNIDA_MF_NAS =      1UL << 23,      /* NAS server */
-       ELS_RNIDA_MF_BR =       1UL << 22,      /* bridge */
-       ELS_RNIDA_MF_VIRT =     1UL << 21,      /* virtualization device */
-};
-
-enum fc_els_rnid_mgmt {
-       ELS_RNIDM_SNMP =        0,
-       ELS_RNIDM_TELNET =      1,
-       ELS_RNIDM_HTTP =        2,
-       ELS_RNIDM_HTTPS =       3,
-       ELS_RNIDM_XML =         4,      /* HTTP + XML */
-};
-
-enum fc_els_rnid_ipver {
-       ELS_RNIDIP_NONE =       0,      /* no IP support or node mgmt. */
-       ELS_RNIDIP_V4 =         1,      /* IPv4 */
-       ELS_RNIDIP_V6 =         2,      /* IPv6 */
-};
-
-/*
- * ELS RPL - Read Port List.
- */
-struct fc_els_rpl {
-       __u8            rpl_cmd;        /* command */
-       __u8            rpl_resv[5];    /* reserved - must be zero */
-       __be16          rpl_max_size;   /* maximum response size or zero */
-       __u8            rpl_resv1;      /* reserved - must be zero */
-       __u8            rpl_index[3];   /* starting index */
-};
-
-/*
- * Port number block in RPL response.
- */
-struct fc_els_pnb {
-       __be32          pnb_phys_pn;    /* physical port number */
-       __u8            pnb_resv;       /* reserved */
-       __u8            pnb_port_id[3]; /* port ID */
-       __be64          pnb_wwpn;       /* port name */
-};
-
-/*
- * RPL LS_ACC response.
- */
-struct fc_els_rpl_resp {
-       __u8            rpl_cmd;        /* ELS_LS_ACC */
-       __u8            rpl_resv1;      /* reserved - must be zero */
-       __be16          rpl_plen;       /* payload length */
-       __u8            rpl_resv2;      /* reserved - must be zero */
-       __u8            rpl_llen[3];    /* list length */
-       __u8            rpl_resv3;      /* reserved - must be zero */
-       __u8            rpl_index[3];   /* starting index */
-       struct fc_els_pnb rpl_pnb[1];   /* variable number of PNBs */
-};
-
-/*
- * Link Error Status Block.
- */
-struct fc_els_lesb {
-       __be32          lesb_link_fail; /* link failure count */
-       __be32          lesb_sync_loss; /* loss of synchronization count */
-       __be32          lesb_sig_loss;  /* loss of signal count */
-       __be32          lesb_prim_err;  /* primitive sequence error count */
-       __be32          lesb_inv_word;  /* invalid transmission word count */
-       __be32          lesb_inv_crc;   /* invalid CRC count */
-};
-
-/*
- * ELS RPS - Read Port Status Block request.
- */
-struct fc_els_rps {
-       __u8            rps_cmd;        /* command */
-       __u8            rps_resv[2];    /* reserved - must be zero */
-       __u8            rps_flag;       /* flag - see below */
-       __be64          rps_port_spec;  /* port selection */
-};
-
-enum fc_els_rps_flag {
-       FC_ELS_RPS_DID =        0x00,   /* port identified by D_ID of req. */
-       FC_ELS_RPS_PPN =        0x01,   /* port_spec is physical port number */
-       FC_ELS_RPS_WWPN =       0x02,   /* port_spec is port WWN */
-};
-
-/*
- * ELS RPS LS_ACC response.
- */
-struct fc_els_rps_resp {
-       __u8            rps_cmd;        /* command - LS_ACC */
-       __u8            rps_resv[2];    /* reserved - must be zero */
-       __u8            rps_flag;       /* flag - see below */
-       __u8            rps_resv2[2];   /* reserved */
-       __be16          rps_status;     /* port status - see below */
-       struct fc_els_lesb rps_lesb;    /* link error status block */
-};
-
-enum fc_els_rps_resp_flag {
-       FC_ELS_RPS_LPEV =       0x01,   /* L_port extension valid */
-};
-
-enum fc_els_rps_resp_status {
-       FC_ELS_RPS_PTP =        1 << 5, /* point-to-point connection */
-       FC_ELS_RPS_LOOP =       1 << 4, /* loop mode */
-       FC_ELS_RPS_FAB =        1 << 3, /* fabric present */
-       FC_ELS_RPS_NO_SIG =     1 << 2, /* loss of signal */
-       FC_ELS_RPS_NO_SYNC =    1 << 1, /* loss of synchronization */
-       FC_ELS_RPS_RESET =      1 << 0, /* in link reset protocol */
-};
-
-/*
- * ELS LIRR - Link Incident Record Registration request.
- */
-struct fc_els_lirr {
-       __u8            lirr_cmd;       /* command */
-       __u8            lirr_resv[3];   /* reserved - must be zero */
-       __u8            lirr_func;      /* registration function */
-       __u8            lirr_fmt;       /* FC-4 type of RLIR requested */
-       __u8            lirr_resv2[2];  /* reserved - must be zero */
-};
-
-enum fc_els_lirr_func {
-       ELS_LIRR_SET_COND =     0x01,   /* set - conditionally receive */
-       ELS_LIRR_SET_UNCOND =   0x02,   /* set - unconditionally receive */
-       ELS_LIRR_CLEAR =        0xff    /* clear registration */
-};
-
-/*
- * ELS SRL - Scan Remote Loop request.
- */
-struct fc_els_srl {
-       __u8            srl_cmd;        /* command */
-       __u8            srl_resv[3];    /* reserved - must be zero */
-       __u8            srl_flag;       /* flag - see below */
-       __u8            srl_flag_param[3];      /* flag parameter */
-};
-
-enum fc_els_srl_flag {
-       FC_ELS_SRL_ALL =        0x00,   /* scan all FL ports */
-       FC_ELS_SRL_ONE =        0x01,   /* scan specified loop */
-       FC_ELS_SRL_EN_PER =     0x02,   /* enable periodic scanning (param) */
-       FC_ELS_SRL_DIS_PER =    0x03,   /* disable periodic scanning */
-};
-
-/*
- * ELS RLS - Read Link Error Status Block request.
- */
-struct fc_els_rls {
-       __u8            rls_cmd;        /* command */
-       __u8            rls_resv[4];    /* reserved - must be zero */
-       __u8            rls_port_id[3]; /* port ID */
-};
-
-/*
- * ELS RLS LS_ACC Response.
- */
-struct fc_els_rls_resp {
-       __u8            rls_cmd;        /* ELS_LS_ACC */
-       __u8            rls_resv[3];    /* reserved - must be zero */
-       struct fc_els_lesb rls_lesb;    /* link error status block */
-};
-
-/*
- * ELS RLIR - Registered Link Incident Report.
- * This is followed by the CLIR and the CLID, described below.
- */
-struct fc_els_rlir {
-       __u8            rlir_cmd;       /* command */
-       __u8            rlir_resv[3];   /* reserved - must be zero */
-       __u8            rlir_fmt;       /* format (FC4-type if type specific) */
-       __u8            rlir_clr_len;   /* common link incident record length */
-       __u8            rlir_cld_len;   /* common link incident desc. length */
-       __u8            rlir_slr_len;   /* spec. link incident record length */
-};
-
-/*
- * CLIR - Common Link Incident Record Data. - Sent via RLIR.
- */
-struct fc_els_clir {
-       __be64          clir_wwpn;      /* incident port name */
-       __be64          clir_wwnn;      /* incident port node name */
-       __u8            clir_port_type; /* incident port type */
-       __u8            clir_port_id[3];        /* incident port ID */
-
-       __be64          clir_conn_wwpn; /* connected port name */
-       __be64          clir_conn_wwnn; /* connected node name */
-       __be64          clir_fab_name;  /* fabric name */
-       __be32          clir_phys_port; /* physical port number */
-       __be32          clir_trans_id;  /* transaction ID */
-       __u8            clir_resv[3];   /* reserved */
-       __u8            clir_ts_fmt;    /* time stamp format */
-       __be64          clir_timestamp; /* time stamp */
-};
-
-/*
- * CLIR clir_ts_fmt - time stamp format values.
- */
-enum fc_els_clir_ts_fmt {
-       ELS_CLIR_TS_UNKNOWN =   0,      /* time stamp field unknown */
-       ELS_CLIR_TS_SEC_FRAC =  1,      /* time in seconds and fractions */
-       ELS_CLIR_TS_CSU =       2,      /* time in clock synch update format */
-};
-
-/*
- * Common Link Incident Descriptor - sent via RLIR.
- */
-struct fc_els_clid {
-       __u8            clid_iq;        /* incident qualifier flags */
-       __u8            clid_ic;        /* incident code */
-       __be16          clid_epai;      /* domain/area of ISL */
-};
-
-/*
- * CLID incident qualifier flags.
- */
-enum fc_els_clid_iq {
-       ELS_CLID_SWITCH =       0x20,   /* incident port is a switch node */
-       ELS_CLID_E_PORT =       0x10,   /* incident is an ISL (E) port */
-       ELS_CLID_SEV_MASK =     0x0c,   /* severity 2-bit field mask */
-       ELS_CLID_SEV_INFO =     0x00,   /* report is informational */
-       ELS_CLID_SEV_INOP =     0x08,   /* link not operational */
-       ELS_CLID_SEV_DEG =      0x04,   /* link degraded but operational */
-       ELS_CLID_LASER =        0x02,   /* subassembly is a laser */
-       ELS_CLID_FRU =          0x01,   /* format can identify a FRU */
-};
-
-/*
- * CLID incident code.
- */
-enum fc_els_clid_ic {
-       ELS_CLID_IC_IMPL =      1,      /* implicit incident */
-       ELS_CLID_IC_BER =       2,      /* bit-error-rate threshold exceeded */
-       ELS_CLID_IC_LOS =       3,      /* loss of synch or signal */
-       ELS_CLID_IC_NOS =       4,      /* non-operational primitive sequence */
-       ELS_CLID_IC_PST =       5,      /* primitive sequence timeout */
-       ELS_CLID_IC_INVAL =     6,      /* invalid primitive sequence */
-       ELS_CLID_IC_LOOP_TO =   7,      /* loop initialization time out */
-       ELS_CLID_IC_LIP =       8,      /* receiving LIP */
-};
-
-#endif /* _FC_ELS_H_ */
diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h
deleted file mode 100644 (file)
index 50f28b1..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright(c) 2007 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_FS_H_
-#define _FC_FS_H_
-
-#include <linux/types.h>
-
-/*
- * Fibre Channel Framing and Signalling definitions.
- * From T11 FC-FS-2 Rev 0.90 - 9 August 2005.
- */
-
-/*
- * Frame header
- */
-struct fc_frame_header {
-       __u8          fh_r_ctl; /* routing control */
-       __u8          fh_d_id[3];       /* Destination ID */
-
-       __u8          fh_cs_ctl;        /* class of service control / pri */
-       __u8          fh_s_id[3];       /* Source ID */
-
-       __u8          fh_type;          /* see enum fc_fh_type below */
-       __u8          fh_f_ctl[3];      /* frame control */
-
-       __u8          fh_seq_id;        /* sequence ID */
-       __u8          fh_df_ctl;        /* data field control */
-       __be16        fh_seq_cnt;       /* sequence count */
-
-       __be16        fh_ox_id;         /* originator exchange ID */
-       __be16        fh_rx_id;         /* responder exchange ID */
-       __be32        fh_parm_offset;   /* parameter or relative offset */
-};
-
-#define FC_FRAME_HEADER_LEN 24 /* expected length of structure */
-
-#define FC_MAX_PAYLOAD  2112U          /* max payload length in bytes */
-#define FC_MIN_MAX_PAYLOAD  256U       /* lower limit on max payload */
-
-#define FC_MAX_FRAME   (FC_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
-#define FC_MIN_MAX_FRAME (FC_MIN_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
-
-/*
- * fh_r_ctl - Routing control definitions.
- */
-    /*
-     * FC-4 device_data.
-     */
-enum fc_rctl {
-       FC_RCTL_DD_UNCAT = 0x00,        /* uncategorized information */
-       FC_RCTL_DD_SOL_DATA = 0x01,     /* solicited data */
-       FC_RCTL_DD_UNSOL_CTL = 0x02,    /* unsolicited control */
-       FC_RCTL_DD_SOL_CTL = 0x03,      /* solicited control or reply */
-       FC_RCTL_DD_UNSOL_DATA = 0x04,   /* unsolicited data */
-       FC_RCTL_DD_DATA_DESC = 0x05,    /* data descriptor */
-       FC_RCTL_DD_UNSOL_CMD = 0x06,    /* unsolicited command */
-       FC_RCTL_DD_CMD_STATUS = 0x07,   /* command status */
-
-#define FC_RCTL_ILS_REQ FC_RCTL_DD_UNSOL_CTL   /* ILS request */
-#define FC_RCTL_ILS_REP FC_RCTL_DD_SOL_CTL     /* ILS reply */
-
-       /*
-        * Extended Link_Data
-        */
-       FC_RCTL_ELS_REQ = 0x22, /* extended link services request */
-       FC_RCTL_ELS_REP = 0x23, /* extended link services reply */
-       FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
-       FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
-       /*
-        * Optional Extended Headers
-        */
-       FC_RCTL_VFTH = 0x50,    /* virtual fabric tagging header */
-       FC_RCTL_IFRH = 0x51,    /* inter-fabric routing header */
-       FC_RCTL_ENCH = 0x52,    /* encapsulation header */
-       /*
-        * Basic Link Services fh_r_ctl values.
-        */
-       FC_RCTL_BA_NOP = 0x80,  /* basic link service NOP */
-       FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */
-       FC_RCTL_BA_RMC = 0x82,  /* remove connection */
-       FC_RCTL_BA_ACC = 0x84,  /* basic accept */
-       FC_RCTL_BA_RJT = 0x85,  /* basic reject */
-       FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */
-       /*
-        * Link Control Information.
-        */
-       FC_RCTL_ACK_1 = 0xc0,   /* acknowledge_1 */
-       FC_RCTL_ACK_0 = 0xc1,   /* acknowledge_0 */
-       FC_RCTL_P_RJT = 0xc2,   /* port reject */
-       FC_RCTL_F_RJT = 0xc3,   /* fabric reject */
-       FC_RCTL_P_BSY = 0xc4,   /* port busy */
-       FC_RCTL_F_BSY = 0xc5,   /* fabric busy to data frame */
-       FC_RCTL_F_BSYL = 0xc6,  /* fabric busy to link control frame */
-       FC_RCTL_LCR = 0xc7,     /* link credit reset */
-       FC_RCTL_END = 0xc9,     /* end */
-};
-                                   /* incomplete list of definitions */
-
-/*
- * R_CTL names initializer.
- * Please keep this matching the above definitions.
- */
-#define FC_RCTL_NAMES_INIT { \
-       [FC_RCTL_DD_UNCAT] =            "uncat",                        \
-       [FC_RCTL_DD_SOL_DATA] =         "sol data",                     \
-       [FC_RCTL_DD_UNSOL_CTL] =        "unsol ctl",                    \
-       [FC_RCTL_DD_SOL_CTL] =          "sol ctl/reply",                \
-       [FC_RCTL_DD_UNSOL_DATA] =       "unsol data",                   \
-       [FC_RCTL_DD_DATA_DESC] =        "data desc",                    \
-       [FC_RCTL_DD_UNSOL_CMD] =        "unsol cmd",                    \
-       [FC_RCTL_DD_CMD_STATUS] =       "cmd status",                   \
-       [FC_RCTL_ELS_REQ] =             "ELS req",                      \
-       [FC_RCTL_ELS_REP] =             "ELS rep",                      \
-       [FC_RCTL_ELS4_REQ] =            "FC-4 ELS req",                 \
-       [FC_RCTL_ELS4_REP] =            "FC-4 ELS rep",                 \
-       [FC_RCTL_BA_NOP] =              "BLS NOP",                      \
-       [FC_RCTL_BA_ABTS] =             "BLS abort",                    \
-       [FC_RCTL_BA_RMC] =              "BLS remove connection",        \
-       [FC_RCTL_BA_ACC] =              "BLS accept",                   \
-       [FC_RCTL_BA_RJT] =              "BLS reject",                   \
-       [FC_RCTL_BA_PRMT] =             "BLS dedicated connection preempted", \
-       [FC_RCTL_ACK_1] =               "LC ACK_1",                     \
-       [FC_RCTL_ACK_0] =               "LC ACK_0",                     \
-       [FC_RCTL_P_RJT] =               "LC port reject",               \
-       [FC_RCTL_F_RJT] =               "LC fabric reject",             \
-       [FC_RCTL_P_BSY] =               "LC port busy",                 \
-       [FC_RCTL_F_BSY] =               "LC fabric busy to data frame", \
-       [FC_RCTL_F_BSYL] =              "LC fabric busy to link control frame",\
-       [FC_RCTL_LCR] =                 "LC link credit reset",         \
-       [FC_RCTL_END] =                 "LC end",                       \
-}
-
-/*
- * Well-known fabric addresses.
- */
-enum fc_well_known_fid {
-       FC_FID_NONE =           0x000000,       /* No destination */
-       FC_FID_BCAST =          0xffffff,       /* broadcast */
-       FC_FID_FLOGI =          0xfffffe,       /* fabric login */
-       FC_FID_FCTRL =          0xfffffd,       /* fabric controller */
-       FC_FID_DIR_SERV =       0xfffffc,       /* directory server */
-       FC_FID_TIME_SERV =      0xfffffb,       /* time server */
-       FC_FID_MGMT_SERV =      0xfffffa,       /* management server */
-       FC_FID_QOS =            0xfffff9,       /* QoS Facilitator */
-       FC_FID_ALIASES =        0xfffff8,       /* alias server (FC-PH2) */
-       FC_FID_SEC_KEY =        0xfffff7,       /* Security key dist. server */
-       FC_FID_CLOCK =          0xfffff6,       /* clock synch server */
-       FC_FID_MCAST_SERV =     0xfffff5,       /* multicast server */
-};
-
-#define        FC_FID_WELL_KNOWN_MAX   0xffffff /* highest well-known fabric ID */
-#define        FC_FID_WELL_KNOWN_BASE  0xfffff5 /* start of well-known fabric ID */
-
-/*
- * Other well-known addresses, outside the above contiguous range.
- */
-#define        FC_FID_DOM_MGR          0xfffc00        /* domain manager base */
-
-/*
- * Fabric ID bytes.
- */
-#define        FC_FID_DOMAIN           0
-#define        FC_FID_PORT             1
-#define        FC_FID_LINK             2
-
-/*
- * fh_type codes
- */
-enum fc_fh_type {
-       FC_TYPE_BLS =   0x00,   /* basic link service */
-       FC_TYPE_ELS =   0x01,   /* extended link service */
-       FC_TYPE_IP =    0x05,   /* IP over FC, RFC 4338 */
-       FC_TYPE_FCP =   0x08,   /* SCSI FCP */
-       FC_TYPE_CT =    0x20,   /* Fibre Channel Services (FC-CT) */
-       FC_TYPE_ILS =   0x22,   /* internal link service */
-};
-
-/*
- * FC_TYPE names initializer.
- * Please keep this matching the above definitions.
- */
-#define FC_TYPE_NAMES_INIT {                           \
-       [FC_TYPE_BLS] =         "BLS",                  \
-       [FC_TYPE_ELS] =         "ELS",                  \
-       [FC_TYPE_IP] =          "IP",                   \
-       [FC_TYPE_FCP] =         "FCP",                  \
-       [FC_TYPE_CT] =          "CT",                   \
-       [FC_TYPE_ILS] =         "ILS",                  \
-}
-
-/*
- * Exchange IDs.
- */
-#define FC_XID_UNKNOWN  0xffff /* unknown exchange ID */
-#define FC_XID_MIN     0x0     /* supported min exchange ID */
-#define FC_XID_MAX     0xfffe  /* supported max exchange ID */
-
-/*
- * fh_f_ctl - Frame control flags.
- */
-#define        FC_FC_EX_CTX    (1 << 23)       /* sent by responder to exchange */
-#define        FC_FC_SEQ_CTX   (1 << 22)       /* sent by responder to sequence */
-#define        FC_FC_FIRST_SEQ (1 << 21)       /* first sequence of this exchange */
-#define        FC_FC_LAST_SEQ  (1 << 20)       /* last sequence of this exchange */
-#define        FC_FC_END_SEQ   (1 << 19)       /* last frame of sequence */
-#define        FC_FC_END_CONN  (1 << 18)       /* end of class 1 connection pending */
-#define        FC_FC_RES_B17   (1 << 17)       /* reserved */
-#define        FC_FC_SEQ_INIT  (1 << 16)       /* transfer of sequence initiative */
-#define        FC_FC_X_ID_REASS (1 << 15)      /* exchange ID has been changed */
-#define        FC_FC_X_ID_INVAL (1 << 14)      /* exchange ID invalidated */
-
-#define        FC_FC_ACK_1     (1 << 12)       /* 13:12 = 1: ACK_1 expected */
-#define        FC_FC_ACK_N     (2 << 12)       /* 13:12 = 2: ACK_N expected */
-#define        FC_FC_ACK_0     (3 << 12)       /* 13:12 = 3: ACK_0 expected */
-
-#define        FC_FC_RES_B11   (1 << 11)       /* reserved */
-#define        FC_FC_RES_B10   (1 << 10)       /* reserved */
-#define        FC_FC_RETX_SEQ  (1 << 9)        /* retransmitted sequence */
-#define        FC_FC_UNI_TX    (1 << 8)        /* unidirectional transmit (class 1) */
-#define        FC_FC_CONT_SEQ(i) ((i) << 6)
-#define        FC_FC_ABT_SEQ(i) ((i) << 4)
-#define        FC_FC_REL_OFF   (1 << 3)        /* parameter is relative offset */
-#define        FC_FC_RES2      (1 << 2)        /* reserved */
-#define        FC_FC_FILL(i)   ((i) & 3)       /* 1:0: bytes of trailing fill */
-
-/*
- * BA_ACC payload.
- */
-struct fc_ba_acc {
-       __u8            ba_seq_id_val;  /* SEQ_ID validity */
-#define FC_BA_SEQ_ID_VAL 0x80
-       __u8            ba_seq_id;      /* SEQ_ID of seq last deliverable */
-       __u8            ba_resvd[2];    /* reserved */
-       __be16          ba_ox_id;       /* OX_ID for aborted seq or exch */
-       __be16          ba_rx_id;       /* RX_ID for aborted seq or exch */
-       __be16          ba_low_seq_cnt; /* low SEQ_CNT of aborted seq */
-       __be16          ba_high_seq_cnt; /* high SEQ_CNT of aborted seq */
-};
-
-/*
- * BA_RJT: Basic Reject payload.
- */
-struct fc_ba_rjt {
-       __u8            br_resvd;       /* reserved */
-       __u8            br_reason;      /* reason code */
-       __u8            br_explan;      /* reason explanation */
-       __u8            br_vendor;      /* vendor unique code */
-};
-
-/*
- * BA_RJT reason codes.
- * From FS-2.
- */
-enum fc_ba_rjt_reason {
-       FC_BA_RJT_NONE =        0,      /* in software this means no reject */
-       FC_BA_RJT_INVL_CMD =    0x01,   /* invalid command code */
-       FC_BA_RJT_LOG_ERR =     0x03,   /* logical error */
-       FC_BA_RJT_LOG_BUSY =    0x05,   /* logical busy */
-       FC_BA_RJT_PROTO_ERR =   0x07,   /* protocol error */
-       FC_BA_RJT_UNABLE =      0x09,   /* unable to perform request */
-       FC_BA_RJT_VENDOR =      0xff,   /* vendor-specific (see br_vendor) */
-};
-
-/*
- * BA_RJT reason code explanations.
- */
-enum fc_ba_rjt_explan {
-       FC_BA_RJT_EXP_NONE =    0x00,   /* no additional expanation */
-       FC_BA_RJT_INV_XID =     0x03,   /* invalid OX_ID-RX_ID combination */
-       FC_BA_RJT_ABT =         0x05,   /* sequence aborted, no seq info */
-};
-
-/*
- * P_RJT or F_RJT: Port Reject or Fabric Reject parameter field.
- */
-struct fc_pf_rjt {
-       __u8            rj_action;      /* reserved */
-       __u8            rj_reason;      /* reason code */
-       __u8            rj_resvd;       /* reserved */
-       __u8            rj_vendor;      /* vendor unique code */
-};
-
-/*
- * P_RJT and F_RJT reject reason codes.
- */
-enum fc_pf_rjt_reason {
-       FC_RJT_NONE =           0,      /* non-reject (reserved by standard) */
-       FC_RJT_INVL_DID =       0x01,   /* invalid destination ID */
-       FC_RJT_INVL_SID =       0x02,   /* invalid source ID */
-       FC_RJT_P_UNAV_T =       0x03,   /* port unavailable, temporary */
-       FC_RJT_P_UNAV =         0x04,   /* port unavailable, permanent */
-       FC_RJT_CLS_UNSUP =      0x05,   /* class not supported */
-       FC_RJT_DEL_USAGE =      0x06,   /* delimiter usage error */
-       FC_RJT_TYPE_UNSUP =     0x07,   /* type not supported */
-       FC_RJT_LINK_CTL =       0x08,   /* invalid link control */
-       FC_RJT_R_CTL =          0x09,   /* invalid R_CTL field */
-       FC_RJT_F_CTL =          0x0a,   /* invalid F_CTL field */
-       FC_RJT_OX_ID =          0x0b,   /* invalid originator exchange ID */
-       FC_RJT_RX_ID =          0x0c,   /* invalid responder exchange ID */
-       FC_RJT_SEQ_ID =         0x0d,   /* invalid sequence ID */
-       FC_RJT_DF_CTL =         0x0e,   /* invalid DF_CTL field */
-       FC_RJT_SEQ_CNT =        0x0f,   /* invalid SEQ_CNT field */
-       FC_RJT_PARAM =          0x10,   /* invalid parameter field */
-       FC_RJT_EXCH_ERR =       0x11,   /* exchange error */
-       FC_RJT_PROTO =          0x12,   /* protocol error */
-       FC_RJT_LEN =            0x13,   /* incorrect length */
-       FC_RJT_UNEXP_ACK =      0x14,   /* unexpected ACK */
-       FC_RJT_FAB_CLASS =      0x15,   /* class unsupported by fabric entity */
-       FC_RJT_LOGI_REQ =       0x16,   /* login required */
-       FC_RJT_SEQ_XS =         0x17,   /* excessive sequences attempted */
-       FC_RJT_EXCH_EST =       0x18,   /* unable to establish exchange */
-       FC_RJT_FAB_UNAV =       0x1a,   /* fabric unavailable */
-       FC_RJT_VC_ID =          0x1b,   /* invalid VC_ID (class 4) */
-       FC_RJT_CS_CTL =         0x1c,   /* invalid CS_CTL field */
-       FC_RJT_INSUF_RES =      0x1d,   /* insuff. resources for VC (Class 4) */
-       FC_RJT_INVL_CLS =       0x1f,   /* invalid class of service */
-       FC_RJT_PREEMT_RJT =     0x20,   /* preemption request rejected */
-       FC_RJT_PREEMT_DIS =     0x21,   /* preemption not enabled */
-       FC_RJT_MCAST_ERR =      0x22,   /* multicast error */
-       FC_RJT_MCAST_ET =       0x23,   /* multicast error terminate */
-       FC_RJT_PRLI_REQ =       0x24,   /* process login required */
-       FC_RJT_INVL_ATT =       0x25,   /* invalid attachment */
-       FC_RJT_VENDOR =         0xff,   /* vendor specific reject */
-};
-
-/* default timeout values */
-
-#define FC_DEF_E_D_TOV 2000UL
-#define FC_DEF_R_A_TOV 10000UL
-
-#endif /* _FC_FS_H_ */
diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h
deleted file mode 100644 (file)
index a37346d..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright(c) 2007 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_GS_H_
-#define        _FC_GS_H_
-
-#include <linux/types.h>
-
-/*
- * Fibre Channel Services - Common Transport.
- * From T11.org FC-GS-2 Rev 5.3 November 1998.
- */
-
-struct fc_ct_hdr {
-       __u8            ct_rev;         /* revision */
-       __u8            ct_in_id[3];    /* N_Port ID of original requestor */
-       __u8            ct_fs_type;     /* type of fibre channel service */
-       __u8            ct_fs_subtype;  /* subtype */
-       __u8            ct_options;
-       __u8            _ct_resvd1;
-       __be16          ct_cmd;         /* command / response code */
-       __be16          ct_mr_size;     /* maximum / residual size */
-       __u8            _ct_resvd2;
-       __u8            ct_reason;      /* reject reason */
-       __u8            ct_explan;      /* reason code explanation */
-       __u8            ct_vendor;      /* vendor unique data */
-};
-
-#define        FC_CT_HDR_LEN   16      /* expected sizeof (struct fc_ct_hdr) */
-
-enum fc_ct_rev {
-       FC_CT_REV = 1           /* common transport revision */
-};
-
-/*
- * ct_fs_type values.
- */
-enum fc_ct_fs_type {
-       FC_FST_ALIAS =  0xf8,   /* alias service */
-       FC_FST_MGMT =   0xfa,   /* management service */
-       FC_FST_TIME =   0xfb,   /* time service */
-       FC_FST_DIR =    0xfc,   /* directory service */
-};
-
-/*
- * ct_cmd: Command / response codes
- */
-enum fc_ct_cmd {
-       FC_FS_RJT =     0x8001, /* reject */
-       FC_FS_ACC =     0x8002, /* accept */
-};
-
-/*
- * FS_RJT reason codes.
- */
-enum fc_ct_reason {
-       FC_FS_RJT_CMD =         0x01,   /* invalid command code */
-       FC_FS_RJT_VER =         0x02,   /* invalid version level */
-       FC_FS_RJT_LOG =         0x03,   /* logical error */
-       FC_FS_RJT_IUSIZ =       0x04,   /* invalid IU size */
-       FC_FS_RJT_BSY =         0x05,   /* logical busy */
-       FC_FS_RJT_PROTO =       0x07,   /* protocol error */
-       FC_FS_RJT_UNABL =       0x09,   /* unable to perform command request */
-       FC_FS_RJT_UNSUP =       0x0b,   /* command not supported */
-};
-
-/*
- * FS_RJT reason code explanations.
- */
-enum fc_ct_explan {
-       FC_FS_EXP_NONE =        0x00,   /* no additional explanation */
-       FC_FS_EXP_PID =         0x01,   /* port ID not registered */
-       FC_FS_EXP_PNAM =        0x02,   /* port name not registered */
-       FC_FS_EXP_NNAM =        0x03,   /* node name not registered */
-       FC_FS_EXP_COS =         0x04,   /* class of service not registered */
-       FC_FS_EXP_FTNR =        0x07,   /* FC-4 types not registered */
-       /* definitions not complete */
-};
-
-#endif /* _FC_GS_H_ */
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
deleted file mode 100644 (file)
index f7751d5..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright(c) 2007 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#ifndef _FC_NS_H_
-#define        _FC_NS_H_
-
-#include <linux/types.h>
-
-/*
- * Fibre Channel Services - Name Service (dNS)
- * From T11.org FC-GS-2 Rev 5.3 November 1998.
- */
-
-/*
- * Common-transport sub-type for Name Server.
- */
-#define        FC_NS_SUBTYPE       2   /* fs_ct_hdr.ct_fs_subtype */
-
-/*
- * Name server Requests.
- * Note:  this is an incomplete list, some unused requests are omitted.
- */
-enum fc_ns_req {
-       FC_NS_GA_NXT =  0x0100,         /* get all next */
-       FC_NS_GI_A =    0x0101,         /* get identifiers - scope */
-       FC_NS_GPN_ID =  0x0112,         /* get port name by ID */
-       FC_NS_GNN_ID =  0x0113,         /* get node name by ID */
-       FC_NS_GSPN_ID = 0x0118,         /* get symbolic port name */
-       FC_NS_GID_PN =  0x0121,         /* get ID for port name */
-       FC_NS_GID_NN =  0x0131,         /* get IDs for node name */
-       FC_NS_GID_FT =  0x0171,         /* get IDs by FC4 type */
-       FC_NS_GPN_FT =  0x0172,         /* get port names by FC4 type */
-       FC_NS_GID_PT =  0x01a1,         /* get IDs by port type */
-       FC_NS_RPN_ID =  0x0212,         /* reg port name for ID */
-       FC_NS_RNN_ID =  0x0213,         /* reg node name for ID */
-       FC_NS_RFT_ID =  0x0217,         /* reg FC4 type for ID */
-       FC_NS_RSPN_ID = 0x0218,         /* reg symbolic port name */
-       FC_NS_RFF_ID =  0x021f,         /* reg FC4 Features for ID */
-       FC_NS_RSNN_NN = 0x0239,         /* reg symbolic node name */
-};
-
-/*
- * Port type values.
- */
-enum fc_ns_pt {
-       FC_NS_UNID_PORT = 0x00, /* unidentified */
-       FC_NS_N_PORT =  0x01,   /* N port */
-       FC_NS_NL_PORT = 0x02,   /* NL port */
-       FC_NS_FNL_PORT = 0x03,  /* F/NL port */
-       FC_NS_NX_PORT = 0x7f,   /* Nx port */
-       FC_NS_F_PORT =  0x81,   /* F port */
-       FC_NS_FL_PORT = 0x82,   /* FL port */
-       FC_NS_E_PORT =  0x84,   /* E port */
-       FC_NS_B_PORT =  0x85,   /* B port */
-};
-
-/*
- * Port type object.
- */
-struct fc_ns_pt_obj {
-       __u8            pt_type;
-};
-
-/*
- * Port ID object
- */
-struct fc_ns_fid {
-       __u8            fp_flags;       /* flags for responses only */
-       __u8            fp_fid[3];
-};
-
-/*
- * fp_flags in port ID object, for responses only.
- */
-#define        FC_NS_FID_LAST  0x80            /* last object */
-
-/*
- * FC4-types object.
- */
-#define        FC_NS_TYPES     256     /* number of possible FC-4 types */
-#define        FC_NS_BPW       32      /* bits per word in bitmap */
-
-struct fc_ns_fts {
-       __be32  ff_type_map[FC_NS_TYPES / FC_NS_BPW]; /* bitmap of FC-4 types */
-};
-
-/*
- * FC4-features object.
- */
-struct fc_ns_ff        {
-       __be32  fd_feat[FC_NS_TYPES * 4 / FC_NS_BPW]; /* 4-bits per FC-type */
-};
-
-/*
- * GID_PT request.
- */
-struct fc_ns_gid_pt {
-       __u8            fn_pt_type;
-       __u8            fn_domain_id_scope;
-       __u8            fn_area_id_scope;
-       __u8            fn_resvd;
-};
-
-/*
- * GID_FT or GPN_FT request.
- */
-struct fc_ns_gid_ft {
-       __u8            fn_resvd;
-       __u8            fn_domain_id_scope;
-       __u8            fn_area_id_scope;
-       __u8            fn_fc4_type;
-};
-
-/*
- * GPN_FT response.
- */
-struct fc_gpn_ft_resp {
-       __u8            fp_flags;       /* see fp_flags definitions above */
-       __u8            fp_fid[3];      /* port ID */
-       __be32          fp_resvd;
-       __be64          fp_wwpn;        /* port name */
-};
-
-/*
- * GID_PN request
- */
-struct fc_ns_gid_pn {
-       __be64     fn_wwpn;    /* port name */
-};
-
-/*
- * GID_PN response or GSPN_ID request
- */
-struct fc_gid_pn_resp {
-       __u8      fp_resvd;
-       __u8      fp_fid[3];     /* port ID */
-};
-
-/*
- * GSPN_ID response
- */
-struct fc_gspn_resp {
-       __u8    fp_name_len;
-       char    fp_name[];
-};
-
-/*
- * RFT_ID request - register FC-4 types for ID.
- */
-struct fc_ns_rft_id {
-       struct fc_ns_fid fr_fid;        /* port ID object */
-       struct fc_ns_fts fr_fts;        /* FC-4 types object */
-};
-
-/*
- * RPN_ID request - register port name for ID.
- * RNN_ID request - register node name for ID.
- */
-struct fc_ns_rn_id {
-       struct fc_ns_fid fr_fid;        /* port ID object */
-       __be64          fr_wwn;         /* node name or port name */
-} __attribute__((__packed__));
-
-/*
- * RSNN_NN request - register symbolic node name
- */
-struct fc_ns_rsnn {
-       __be64          fr_wwn;         /* node name */
-       __u8            fr_name_len;
-       char            fr_name[];
-} __attribute__((__packed__));
-
-/*
- * RSPN_ID request - register symbolic port name
- */
-struct fc_ns_rspn {
-       struct fc_ns_fid fr_fid;        /* port ID object */
-       __u8            fr_name_len;
-       char            fr_name[];
-} __attribute__((__packed__));
-
-/*
- * RFF_ID request - register FC-4 Features for ID.
- */
-struct fc_ns_rff_id {
-       struct fc_ns_fid fr_fid;        /* port ID object */
-       __u8            fr_resvd[2];
-       __u8            fr_feat;        /* FC-4 Feature bits */
-       __u8            fr_type;        /* FC-4 type */
-} __attribute__((__packed__));
-
-#endif /* _FC_NS_H_ */
index 604cb9b..7e23148 100644 (file)
@@ -34,7 +34,8 @@ struct fcoe_sysfs_function_template {
        void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
        void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
        void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
-       void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       void (*set_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+       int  (*set_fcoe_ctlr_enabled)(struct fcoe_ctlr_device *);
        void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
        void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
 };
@@ -48,6 +49,12 @@ enum fip_conn_type {
        FIP_CONN_TYPE_VN2VN,
 };
 
+enum ctlr_enabled_state {
+       FCOE_CTLR_ENABLED,
+       FCOE_CTLR_DISABLED,
+       FCOE_CTLR_UNUSED,
+};
+
 struct fcoe_ctlr_device {
        u32                             id;
 
@@ -64,6 +71,8 @@ struct fcoe_ctlr_device {
        int                             fcf_dev_loss_tmo;
        enum fip_conn_type              mode;
 
+       enum ctlr_enabled_state         enabled;
+
        /* expected in host order for displaying */
        struct fcoe_fc_els_lesb         lesb;
 };
index 8742d85..4427393 100644 (file)
@@ -260,6 +260,9 @@ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb,
                     struct net_device *netdev);
 void fcoe_wwn_to_str(u64 wwn, char *buf, int len);
 int fcoe_validate_vport_create(struct fc_vport *vport);
+int fcoe_link_speed_update(struct fc_lport *);
+void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
 
 /**
  * is_fip_mode() - returns true if FIP mode selected.
@@ -289,8 +292,11 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
  * @attached:  whether this transport is already attached
  * @list:      list linkage to all attached transports
  * @match:     handler to allow the transport driver to match up a given netdev
+ * @alloc:      handler to allocate per-instance FCoE structures
+ *             (no discovery or login)
  * @create:    handler to sysfs entry of create for FCoE instances
- * @destroy:   handler to sysfs entry of destroy for FCoE instances
+ * @destroy:    handler to delete per-instance FCoE structures
+ *             (frees all memory)
  * @enable:    handler to sysfs entry of enable for FCoE instances
  * @disable:   handler to sysfs entry of disable for FCoE instances
  */
@@ -299,6 +305,7 @@ struct fcoe_transport {
        bool attached;
        struct list_head list;
        bool (*match) (struct net_device *device);
+       int (*alloc) (struct net_device *device);
        int (*create) (struct net_device *device, enum fip_state fip_mode);
        int (*destroy) (struct net_device *device);
        int (*enable) (struct net_device *device);
@@ -347,7 +354,20 @@ struct fcoe_port {
        struct timer_list     timer;
        struct work_struct    destroy_work;
        u8                    data_src_addr[ETH_ALEN];
+       struct net_device * (*get_netdev)(const struct fc_lport *lport);
 };
+
+/**
+ * fcoe_get_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport)
+{
+       struct fcoe_port *port = ((struct fcoe_port *)lport_priv(lport));
+
+       return (port->get_netdev) ? port->get_netdev(lport) : NULL;
+}
+
 void fcoe_clean_pending_queue(struct fc_lport *);
 void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
 void fcoe_queue_timer(ulong lport);
@@ -356,7 +376,7 @@ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
 
 /* FCoE Sysfs helpers */
 void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *);
 
 /**
  * struct netdev_list
@@ -372,4 +392,12 @@ struct fcoe_netdev_mapping {
 int fcoe_transport_attach(struct fcoe_transport *ft);
 int fcoe_transport_detach(struct fcoe_transport *ft);
 
+/* sysfs store handler for ctrl_control interface */
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+                              const char *buf, size_t count);
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+                               const char *buf, size_t count);
+
 #endif /* _LIBFCOE_H */
+
+
diff --git a/include/scsi/scsi_bsg_fc.h b/include/scsi/scsi_bsg_fc.h
deleted file mode 100644 (file)
index 3031b90..0000000
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- *  FC Transport BSG Interface
- *
- *  Copyright (C) 2008   James Smart, Emulex Corporation
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#ifndef SCSI_BSG_FC_H
-#define SCSI_BSG_FC_H
-
-/*
- * This file intended to be included by both kernel and user space
- */
-
-/*
- * FC Transport SGIO v4 BSG Message Support
- */
-
-/* Default BSG request timeout (in seconds) */
-#define FC_DEFAULT_BSG_TIMEOUT         (10 * HZ)
-
-
-/*
- * Request Message Codes supported by the FC Transport
- */
-
-/* define the class masks for the message codes */
-#define FC_BSG_CLS_MASK                0xF0000000      /* find object class */
-#define FC_BSG_HST_MASK                0x80000000      /* fc host class */
-#define FC_BSG_RPT_MASK                0x40000000      /* fc rport class */
-
-       /* fc_host Message Codes */
-#define FC_BSG_HST_ADD_RPORT           (FC_BSG_HST_MASK | 0x00000001)
-#define FC_BSG_HST_DEL_RPORT           (FC_BSG_HST_MASK | 0x00000002)
-#define FC_BSG_HST_ELS_NOLOGIN         (FC_BSG_HST_MASK | 0x00000003)
-#define FC_BSG_HST_CT                  (FC_BSG_HST_MASK | 0x00000004)
-#define FC_BSG_HST_VENDOR              (FC_BSG_HST_MASK | 0x000000FF)
-
-       /* fc_rport Message Codes */
-#define FC_BSG_RPT_ELS                 (FC_BSG_RPT_MASK | 0x00000001)
-#define FC_BSG_RPT_CT                  (FC_BSG_RPT_MASK | 0x00000002)
-
-
-
-/*
- * FC Address Identifiers in Message Structures :
- *
- *   Whenever a command payload contains a FC Address Identifier
- *   (aka port_id), the value is effectively in big-endian
- *   order, thus the array elements are decoded as follows:
- *     element [0] is bits 23:16 of the FC Address Identifier
- *     element [1] is bits 15:8 of the FC Address Identifier
- *     element [2] is bits 7:0 of the FC Address Identifier
- */
-
-
-/*
- * FC Host Messages
- */
-
-/* FC_BSG_HST_ADDR_PORT : */
-
-/* Request:
- * This message requests the FC host to login to the remote port
- * at the specified N_Port_Id.  The remote port is to be enumerated
- * with the transport upon completion of the login.
- */
-struct fc_bsg_host_add_rport {
-       uint8_t         reserved;
-
-       /* FC Address Identier of the remote port to login to */
-       uint8_t         port_id[3];
-};
-
-/* Response:
- * There is no additional response data - fc_bsg_reply->result is sufficient
- */
-
-
-/* FC_BSG_HST_DEL_RPORT : */
-
-/* Request:
- * This message requests the FC host to remove an enumerated
- * remote port and to terminate the login to it.
- *
- * Note: The driver is free to reject this request if it desires to
- * remain logged in with the remote port.
- */
-struct fc_bsg_host_del_rport {
-       uint8_t         reserved;
-
-       /* FC Address Identier of the remote port to logout of */
-       uint8_t         port_id[3];
-};
-
-/* Response:
- * There is no additional response data - fc_bsg_reply->result is sufficient
- */
-
-
-/* FC_BSG_HST_ELS_NOLOGIN : */
-
-/* Request:
- * This message requests the FC_Host to send an ELS to a specific
- * N_Port_ID. The host does not need to log into the remote port,
- * nor does it need to enumerate the rport for further traffic
- * (although, the FC host is free to do so if it desires).
- */
-struct fc_bsg_host_els {
-       /*
-        * ELS Command Code being sent (must be the same as byte 0
-        * of the payload)
-        */
-       uint8_t         command_code;
-
-       /* FC Address Identier of the remote port to send the ELS to */
-       uint8_t         port_id[3];
-};
-
-/* Response:
- */
-/* fc_bsg_ctels_reply->status values */
-#define FC_CTELS_STATUS_OK     0x00000000
-#define FC_CTELS_STATUS_REJECT 0x00000001
-#define FC_CTELS_STATUS_P_RJT  0x00000002
-#define FC_CTELS_STATUS_F_RJT  0x00000003
-#define FC_CTELS_STATUS_P_BSY  0x00000004
-#define FC_CTELS_STATUS_F_BSY  0x00000006
-struct fc_bsg_ctels_reply {
-       /*
-        * Note: An ELS LS_RJT may be reported in 2 ways:
-        *  a) A status of FC_CTELS_STATUS_OK is returned. The caller
-        *     is to look into the ELS receive payload to determine
-        *     LS_ACC or LS_RJT (by contents of word 0). The reject
-        *     data will be in word 1.
-        *  b) A status of FC_CTELS_STATUS_REJECT is returned, The
-        *     rjt_data field will contain valid data.
-        *
-        * Note: ELS LS_ACC is determined by an FC_CTELS_STATUS_OK, and
-        *   the receive payload word 0 indicates LS_ACC
-        *   (e.g. value is 0x02xxxxxx).
-        *
-        * Note: Similarly, a CT Reject may be reported in 2 ways:
-        *  a) A status of FC_CTELS_STATUS_OK is returned. The caller
-        *     is to look into the CT receive payload to determine
-        *     Accept or Reject (by contents of word 2). The reject
-        *     data will be in word 3.
-        *  b) A status of FC_CTELS_STATUS_REJECT is returned, The
-        *     rjt_data field will contain valid data.
-        *
-        * Note: x_RJT/BSY status will indicae that the rjt_data field
-        *   is valid and contains the reason/explanation values.
-        */
-       uint32_t        status;         /* See FC_CTELS_STATUS_xxx */
-
-       /* valid if status is not FC_CTELS_STATUS_OK */
-       struct  {
-               uint8_t action;         /* fragment_id for CT REJECT */
-               uint8_t reason_code;
-               uint8_t reason_explanation;
-               uint8_t vendor_unique;
-       } rjt_data;
-};
-
-
-/* FC_BSG_HST_CT : */
-
-/* Request:
- * This message requests that a CT Request be performed with the
- * indicated N_Port_ID. The driver is responsible for logging in with
- * the fabric and/or N_Port_ID, etc as per FC rules. This request does
- * not mandate that the driver must enumerate the destination in the
- * transport. The driver is allowed to decide whether to enumerate it,
- * and whether to tear it down after the request.
- */
-struct fc_bsg_host_ct {
-       uint8_t         reserved;
-
-       /* FC Address Identier of the remote port to send the ELS to */
-       uint8_t         port_id[3];
-
-       /*
-        * We need words 0-2 of the generic preamble for the LLD's
-        */
-       uint32_t        preamble_word0; /* revision & IN_ID */
-       uint32_t        preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
-       uint32_t        preamble_word2; /* Cmd Code, Max Size */
-
-};
-/* Response:
- *
- * The reply structure is an fc_bsg_ctels_reply structure
- */
-
-
-/* FC_BSG_HST_VENDOR : */
-
-/* Request:
- * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
- *   formatting requirements specified in scsi_netlink.h
- */
-struct fc_bsg_host_vendor {
-       /*
-        * Identifies the vendor that the message is formatted for. This
-        * should be the recipient of the message.
-        */
-       uint64_t vendor_id;
-
-       /* start of vendor command area */
-       uint32_t vendor_cmd[0];
-};
-
-/* Response:
- */
-struct fc_bsg_host_vendor_reply {
-       /* start of vendor response area */
-       uint32_t vendor_rsp[0];
-};
-
-
-
-/*
- * FC Remote Port Messages
- */
-
-/* FC_BSG_RPT_ELS : */
-
-/* Request:
- * This message requests that an ELS be performed with the rport.
- */
-struct fc_bsg_rport_els {
-       /*
-        * ELS Command Code being sent (must be the same as
-        * byte 0 of the payload)
-        */
-       uint8_t els_code;
-};
-
-/* Response:
- *
- * The reply structure is an fc_bsg_ctels_reply structure
- */
-
-
-/* FC_BSG_RPT_CT : */
-
-/* Request:
- * This message requests that a CT Request be performed with the rport.
- */
-struct fc_bsg_rport_ct {
-       /*
-        * We need words 0-2 of the generic preamble for the LLD's
-        */
-       uint32_t        preamble_word0; /* revision & IN_ID */
-       uint32_t        preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
-       uint32_t        preamble_word2; /* Cmd Code, Max Size */
-};
-/* Response:
- *
- * The reply structure is an fc_bsg_ctels_reply structure
- */
-
-
-
-
-/* request (CDB) structure of the sg_io_v4 */
-struct fc_bsg_request {
-       uint32_t msgcode;
-       union {
-               struct fc_bsg_host_add_rport    h_addrport;
-               struct fc_bsg_host_del_rport    h_delrport;
-               struct fc_bsg_host_els          h_els;
-               struct fc_bsg_host_ct           h_ct;
-               struct fc_bsg_host_vendor       h_vendor;
-
-               struct fc_bsg_rport_els         r_els;
-               struct fc_bsg_rport_ct          r_ct;
-       } rqst_data;
-} __attribute__((packed));
-
-
-/* response (request sense data) structure of the sg_io_v4 */
-struct fc_bsg_reply {
-       /*
-        * The completion result. Result exists in two forms:
-        *  if negative, it is an -Exxx system errno value. There will
-        *    be no further reply information supplied.
-        *  else, it's the 4-byte scsi error result, with driver, host,
-        *    msg and status fields. The per-msgcode reply structure
-        *    will contain valid data.
-        */
-       uint32_t result;
-
-       /* If there was reply_payload, how much was recevied ? */
-       uint32_t reply_payload_rcv_len;
-
-       union {
-               struct fc_bsg_host_vendor_reply         vendor_reply;
-
-               struct fc_bsg_ctels_reply               ctels_reply;
-       } reply_data;
-};
-
-
-#endif /* SCSI_BSG_FC_H */
-
index 4908480..2b6956e 100644 (file)
@@ -873,7 +873,7 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIF_TYPE2_PROTECTION,
                                       SHOST_DIF_TYPE3_PROTECTION };
 
-       if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+       if (target_type >= ARRAY_SIZE(cap))
                return 0;
 
        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
@@ -887,7 +887,7 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIX_TYPE2_PROTECTION,
                                       SHOST_DIX_TYPE3_PROTECTION };
 
-       if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+       if (target_type >= ARRAY_SIZE(cap))
                return 0;
 
        return shost->prot_capabilities & cap[target_type];
diff --git a/include/scsi/scsi_netlink.h b/include/scsi/scsi_netlink.h
deleted file mode 100644 (file)
index 62b4eda..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  SCSI Transport Netlink Interface
- *    Used for the posting of outbound SCSI transport events
- *
- *  Copyright (C) 2006   James Smart, Emulex Corporation
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-#ifndef SCSI_NETLINK_H
-#define SCSI_NETLINK_H
-
-#include <linux/netlink.h>
-#include <linux/types.h>
-
-/*
- * This file intended to be included by both kernel and user space
- */
-
-/* Single Netlink Message type to send all SCSI Transport messages */
-#define SCSI_TRANSPORT_MSG             NLMSG_MIN_TYPE + 1
-
-/* SCSI Transport Broadcast Groups */
-       /* leaving groups 0 and 1 unassigned */
-#define SCSI_NL_GRP_FC_EVENTS          (1<<2)          /* Group 2 */
-#define SCSI_NL_GRP_CNT                        3
-
-
-/* SCSI_TRANSPORT_MSG event message header */
-struct scsi_nl_hdr {
-       uint8_t version;
-       uint8_t transport;
-       uint16_t magic;
-       uint16_t msgtype;
-       uint16_t msglen;
-} __attribute__((aligned(sizeof(uint64_t))));
-
-/* scsi_nl_hdr->version value */
-#define SCSI_NL_VERSION                                1
-
-/* scsi_nl_hdr->magic value */
-#define SCSI_NL_MAGIC                          0xA1B2
-
-/* scsi_nl_hdr->transport value */
-#define SCSI_NL_TRANSPORT                      0
-#define SCSI_NL_TRANSPORT_FC                   1
-#define SCSI_NL_MAX_TRANSPORTS                 2
-
-/* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */
-
-/*
- * GENERIC SCSI scsi_nl_hdr->msgtype Values
- */
-       /* kernel -> user */
-#define SCSI_NL_SHOST_VENDOR                   0x0001
-       /* user -> kernel */
-/* SCSI_NL_SHOST_VENDOR msgtype is kernel->user and user->kernel */
-
-
-/*
- * Message Structures :
- */
-
-/* macro to round up message lengths to 8byte boundary */
-#define SCSI_NL_MSGALIGN(len)          (((len) + 7) & ~7)
-
-
-/*
- * SCSI HOST Vendor Unique messages :
- *   SCSI_NL_SHOST_VENDOR
- *
- * Note: The Vendor Unique message payload will begin directly after
- *      this structure, with the length of the payload per vmsg_datalen.
- *
- * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
- *   formatting requirements specified below
- */
-struct scsi_nl_host_vendor_msg {
-       struct scsi_nl_hdr snlh;                /* must be 1st element ! */
-       uint64_t vendor_id;
-       uint16_t host_no;
-       uint16_t vmsg_datalen;
-} __attribute__((aligned(sizeof(uint64_t))));
-
-
-/*
- * Vendor ID:
- *   If transports post vendor-unique events, they must pass a well-known
- *   32-bit vendor identifier. This identifier consists of 8 bits indicating
- *   the "type" of identifier contained, and 24 bits of id data.
- *
- *   Identifiers for each type:
- *    PCI :  ID data is the 16 bit PCI Registered Vendor ID
- */
-#define SCSI_NL_VID_TYPE_SHIFT         56
-#define SCSI_NL_VID_TYPE_MASK          ((__u64)0xFF << SCSI_NL_VID_TYPE_SHIFT)
-#define SCSI_NL_VID_TYPE_PCI           ((__u64)0x01 << SCSI_NL_VID_TYPE_SHIFT)
-#define SCSI_NL_VID_ID_MASK            (~ SCSI_NL_VID_TYPE_MASK)
-
-
-#define INIT_SCSI_NL_HDR(hdr, t, mtype, mlen)                  \
-       {                                                       \
-       (hdr)->version = SCSI_NL_VERSION;                       \
-       (hdr)->transport = t;                                   \
-       (hdr)->magic = SCSI_NL_MAGIC;                           \
-       (hdr)->msgtype = mtype;                                 \
-       (hdr)->msglen = mlen;                                   \
-       }
-
-#endif /* SCSI_NETLINK_H */
-
diff --git a/include/scsi/scsi_netlink_fc.h b/include/scsi/scsi_netlink_fc.h
deleted file mode 100644 (file)
index cbf76e4..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- *  FC Transport Netlink Interface
- *
- *  Copyright (C) 2006   James Smart, Emulex Corporation
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-#ifndef SCSI_NETLINK_FC_H
-#define SCSI_NETLINK_FC_H
-
-#include <scsi/scsi_netlink.h>
-
-/*
- * This file intended to be included by both kernel and user space
- */
-
-/*
- * FC Transport Message Types
- */
-       /* kernel -> user */
-#define FC_NL_ASYNC_EVENT                      0x0100
-       /* user -> kernel */
-/* none */
-
-
-/*
- * Message Structures :
- */
-
-/* macro to round up message lengths to 8byte boundary */
-#define FC_NL_MSGALIGN(len)            (((len) + 7) & ~7)
-
-
-/*
- * FC Transport Broadcast Event Message :
- *   FC_NL_ASYNC_EVENT
- *
- * Note: if Vendor Unique message, &event_data will be  start of
- *      vendor unique payload, and the length of the payload is
- *       per event_datalen
- *
- * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
- *   formatting requirements specified in scsi_netlink.h
- */
-struct fc_nl_event {
-       struct scsi_nl_hdr snlh;                /* must be 1st element ! */
-       uint64_t seconds;
-       uint64_t vendor_id;
-       uint16_t host_no;
-       uint16_t event_datalen;
-       uint32_t event_num;
-       uint32_t event_code;
-       uint32_t event_data;
-} __attribute__((aligned(sizeof(uint64_t))));
-
-
-#endif /* SCSI_NETLINK_FC_H */
-
diff --git a/include/sound/aess.h b/include/sound/aess.h
new file mode 100644 (file)
index 0000000..cee0d09
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * AESS IP block reset
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef __SOUND_AESS_H__
+#define __SOUND_AESS_H__
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+/*
+ * AESS_AUTO_GATING_ENABLE_OFFSET: offset in bytes of the AESS IP
+ *     block's AESS_AUTO_GATING_ENABLE__1 register from the IP block's
+ *     base address
+ */
+#define AESS_AUTO_GATING_ENABLE_OFFSET                 0x07c
+
+/* Register bitfields in the AESS_AUTO_GATING_ENABLE__1 register */
+#define AESS_AUTO_GATING_ENABLE_SHIFT                  0
+
+/**
+ * aess_enable_autogating - enable AESS internal autogating
+ * @oh: struct omap_hwmod *
+ *
+ * Enable internal autogating on the AESS.  This allows the AESS to
+ * indicate that it is idle to the OMAP PRCM.  Returns 0.
+ */
+static inline void aess_enable_autogating(void __iomem *base)
+{
+       u32 v;
+
+       /* Set AESS_AUTO_GATING_ENABLE__1.ENABLE to allow idle entry */
+       v = 1 << AESS_AUTO_GATING_ENABLE_SHIFT;
+       writel(v, base + AESS_AUTO_GATING_ENABLE_OFFSET);
+}
+
+#endif /* __SOUND_AESS_H__ */
index 05c5e61..9961726 100644 (file)
@@ -6,10 +6,61 @@
 
 #include <linux/blktrace_api.h>
 #include <linux/blkdev.h>
+#include <linux/buffer_head.h>
 #include <linux/tracepoint.h>
 
 #define RWBS_LEN       8
 
+DECLARE_EVENT_CLASS(block_buffer,
+
+       TP_PROTO(struct buffer_head *bh),
+
+       TP_ARGS(bh),
+
+       TP_STRUCT__entry (
+               __field(  dev_t,        dev                     )
+               __field(  sector_t,     sector                  )
+               __field(  size_t,       size                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = bh->b_bdev->bd_dev;
+               __entry->sector         = bh->b_blocknr;
+               __entry->size           = bh->b_size;
+       ),
+
+       TP_printk("%d,%d sector=%llu size=%zu",
+               MAJOR(__entry->dev), MINOR(__entry->dev),
+               (unsigned long long)__entry->sector, __entry->size
+       )
+);
+
+/**
+ * block_touch_buffer - mark a buffer accessed
+ * @bh: buffer_head being touched
+ *
+ * Called from touch_buffer().
+ */
+DEFINE_EVENT(block_buffer, block_touch_buffer,
+
+       TP_PROTO(struct buffer_head *bh),
+
+       TP_ARGS(bh)
+);
+
+/**
+ * block_dirty_buffer - mark a buffer dirty
+ * @bh: buffer_head being dirtied
+ *
+ * Called from mark_buffer_dirty().
+ */
+DEFINE_EVENT(block_buffer, block_dirty_buffer,
+
+       TP_PROTO(struct buffer_head *bh),
+
+       TP_ARGS(bh)
+);
+
 DECLARE_EVENT_CLASS(block_rq_with_error,
 
        TP_PROTO(struct request_queue *q, struct request *rq),
@@ -206,7 +257,6 @@ TRACE_EVENT(block_bio_bounce,
 
 /**
  * block_bio_complete - completed all work on the block operation
- * @q: queue holding the block operation
  * @bio: block operation completed
  * @error: io error value
  *
@@ -215,9 +265,9 @@ TRACE_EVENT(block_bio_bounce,
  */
 TRACE_EVENT(block_bio_complete,
 
-       TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+       TP_PROTO(struct bio *bio, int error),
 
-       TP_ARGS(q, bio, error),
+       TP_ARGS(bio, error),
 
        TP_STRUCT__entry(
                __field( dev_t,         dev             )
@@ -228,7 +278,8 @@ TRACE_EVENT(block_bio_complete,
        ),
 
        TP_fast_assign(
-               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->dev            = bio->bi_bdev ?
+                                         bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio->bi_sector;
                __entry->nr_sector      = bio->bi_size >> 9;
                __entry->error          = error;
@@ -241,11 +292,11 @@ TRACE_EVENT(block_bio_complete,
                  __entry->nr_sector, __entry->error)
 );
 
-DECLARE_EVENT_CLASS(block_bio,
+DECLARE_EVENT_CLASS(block_bio_merge,
 
-       TP_PROTO(struct request_queue *q, struct bio *bio),
+       TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 
-       TP_ARGS(q, bio),
+       TP_ARGS(q, rq, bio),
 
        TP_STRUCT__entry(
                __field( dev_t,         dev                     )
@@ -272,31 +323,33 @@ DECLARE_EVENT_CLASS(block_bio,
 /**
  * block_bio_backmerge - merging block operation to the end of an existing operation
  * @q: queue holding operation
+ * @rq: request bio is being merged into
  * @bio: new block operation to merge
  *
  * Merging block request @bio to the end of an existing block request
  * in queue @q.
  */
-DEFINE_EVENT(block_bio, block_bio_backmerge,
+DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
 
-       TP_PROTO(struct request_queue *q, struct bio *bio),
+       TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 
-       TP_ARGS(q, bio)
+       TP_ARGS(q, rq, bio)
 );
 
 /**
  * block_bio_frontmerge - merging block operation to the beginning of an existing operation
  * @q: queue holding operation
+ * @rq: request bio is being merged into
  * @bio: new block operation to merge
  *
  * Merging block IO operation @bio to the beginning of an existing block
  * operation in queue @q.
  */
-DEFINE_EVENT(block_bio, block_bio_frontmerge,
+DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
 
-       TP_PROTO(struct request_queue *q, struct bio *bio),
+       TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 
-       TP_ARGS(q, bio)
+       TP_ARGS(q, rq, bio)
 );
 
 /**
@@ -306,11 +359,32 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge,
  *
  * About to place the block IO operation @bio into queue @q.
  */
-DEFINE_EVENT(block_bio, block_bio_queue,
+TRACE_EVENT(block_bio_queue,
 
        TP_PROTO(struct request_queue *q, struct bio *bio),
 
-       TP_ARGS(q, bio)
+       TP_ARGS(q, bio),
+
+       TP_STRUCT__entry(
+               __field( dev_t,         dev                     )
+               __field( sector_t,      sector                  )
+               __field( unsigned int,  nr_sector               )
+               __array( char,          rwbs,   RWBS_LEN        )
+               __array( char,          comm,   TASK_COMM_LEN   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = bio->bi_bdev->bd_dev;
+               __entry->sector         = bio->bi_sector;
+               __entry->nr_sector      = bio->bi_size >> 9;
+               blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+               memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+       ),
+
+       TP_printk("%d,%d %s %llu + %u [%s]",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+                 (unsigned long long)__entry->sector,
+                 __entry->nr_sector, __entry->comm)
 );
 
 DECLARE_EVENT_CLASS(block_get_rq,
index 7e8c36b..4ee4710 100644 (file)
@@ -1324,6 +1324,31 @@ TRACE_EVENT(ext4_fallocate_exit,
                  __entry->ret)
 );
 
+TRACE_EVENT(ext4_punch_hole,
+       TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
+
+       TP_ARGS(inode, offset, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, offset                  )
+               __field(        loff_t, len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->offset = offset;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu offset %lld len %lld",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->offset, __entry->len)
+);
+
 TRACE_EVENT(ext4_unlink_enter,
        TP_PROTO(struct inode *parent, struct dentry *dentry),
 
@@ -2068,103 +2093,210 @@ TRACE_EVENT(ext4_ext_remove_space_done,
 );
 
 TRACE_EVENT(ext4_es_insert_extent,
-       TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+       TP_PROTO(struct inode *inode, struct extent_status *es),
 
-       TP_ARGS(inode, start, len),
+       TP_ARGS(inode, es),
 
        TP_STRUCT__entry(
-               __field(        dev_t,  dev                     )
-               __field(        ino_t,  ino                     )
-               __field(        loff_t, start                   )
-               __field(        loff_t, len                     )
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        ext4_lblk_t,    len             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned long long, status      )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
-               __entry->start  = start;
-               __entry->len    = len;
+               __entry->lblk   = es->es_lblk;
+               __entry->len    = es->es_len;
+               __entry->pblk   = ext4_es_pblock(es);
+               __entry->status = ext4_es_status(es);
        ),
 
-       TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->start, __entry->len)
+                 __entry->lblk, __entry->len,
+                 __entry->pblk, __entry->status)
 );
 
 TRACE_EVENT(ext4_es_remove_extent,
-       TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
 
-       TP_ARGS(inode, start, len),
+       TP_ARGS(inode, lblk, len),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
                __field(        ino_t,  ino                     )
-               __field(        loff_t, start                   )
+               __field(        loff_t, lblk                    )
                __field(        loff_t, len                     )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
-               __entry->start  = start;
+               __entry->lblk   = lblk;
                __entry->len    = len;
        ),
 
        TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->start, __entry->len)
+                 __entry->lblk, __entry->len)
 );
 
-TRACE_EVENT(ext4_es_find_extent_enter,
-       TP_PROTO(struct inode *inode, ext4_lblk_t start),
+TRACE_EVENT(ext4_es_find_delayed_extent_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
 
-       TP_ARGS(inode, start),
+       TP_ARGS(inode, lblk),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
                __field(        ino_t,          ino             )
-               __field(        ext4_lblk_t,    start           )
+               __field(        ext4_lblk_t,    lblk            )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
-               __entry->start  = start;
+               __entry->lblk   = lblk;
        ),
 
-       TP_printk("dev %d,%d ino %lu start %u",
+       TP_printk("dev %d,%d ino %lu lblk %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->start)
+                 (unsigned long) __entry->ino, __entry->lblk)
 );
 
-TRACE_EVENT(ext4_es_find_extent_exit,
-       TP_PROTO(struct inode *inode, struct extent_status *es,
-                ext4_lblk_t ret),
+TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+       TP_PROTO(struct inode *inode, struct extent_status *es),
 
-       TP_ARGS(inode, es, ret),
+       TP_ARGS(inode, es),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
                __field(        ino_t,          ino             )
-               __field(        ext4_lblk_t,    start           )
+               __field(        ext4_lblk_t,    lblk            )
                __field(        ext4_lblk_t,    len             )
-               __field(        ext4_lblk_t,    ret             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned long long, status      )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
-               __entry->start  = es->start;
-               __entry->len    = es->len;
-               __entry->ret    = ret;
+               __entry->lblk   = es->es_lblk;
+               __entry->len    = es->es_len;
+               __entry->pblk   = ext4_es_pblock(es);
+               __entry->status = ext4_es_status(es);
        ),
 
-       TP_printk("dev %d,%d ino %lu es [%u/%u) ret %u",
+       TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
-                 __entry->start, __entry->len, __entry->ret)
+                 __entry->lblk, __entry->len,
+                 __entry->pblk, __entry->status)
+);
+
+TRACE_EVENT(ext4_es_lookup_extent_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
+
+       TP_ARGS(inode, lblk),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    lblk            )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = lblk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->lblk)
+);
+
+TRACE_EVENT(ext4_es_lookup_extent_exit,
+       TP_PROTO(struct inode *inode, struct extent_status *es,
+                int found),
+
+       TP_ARGS(inode, es, found),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        ext4_lblk_t,    len             )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned long long,     status  )
+               __field(        int,            found           )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->lblk   = es->es_lblk;
+               __entry->len    = es->es_len;
+               __entry->pblk   = ext4_es_pblock(es);
+               __entry->status = ext4_es_status(es);
+               __entry->found  = found;
+       ),
+
+       TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %llx",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->found,
+                 __entry->lblk, __entry->len,
+                 __entry->found ? __entry->pblk : 0,
+                 __entry->found ? __entry->status : 0)
+);
+
+TRACE_EVENT(ext4_es_shrink_enter,
+       TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
+
+       TP_ARGS(sb, nr_to_scan, cache_cnt),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    nr_to_scan              )
+               __field(        int,    cache_cnt               )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = sb->s_dev;
+               __entry->nr_to_scan     = nr_to_scan;
+               __entry->cache_cnt      = cache_cnt;
+       ),
+
+       TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->nr_to_scan, __entry->cache_cnt)
+);
+
+TRACE_EVENT(ext4_es_shrink_exit,
+       TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
+
+       TP_ARGS(sb, shrunk_nr, cache_cnt),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        int,    shrunk_nr               )
+               __field(        int,    cache_cnt               )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = sb->s_dev;
+               __entry->shrunk_nr      = shrunk_nr;
+               __entry->cache_cnt      = cache_cnt;
+       ),
+
+       TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->shrunk_nr, __entry->cache_cnt)
 );
 
 #endif /* _TRACE_EXT4_H */
index 127993d..070df49 100644 (file)
@@ -132,6 +132,104 @@ TRACE_EVENT(jbd2_submit_inode_data,
                  (unsigned long) __entry->ino)
 );
 
+TRACE_EVENT(jbd2_handle_start,
+       TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+                unsigned int line_no, int requested_blocks),
+
+       TP_ARGS(dev, tid, type, line_no, requested_blocks),
+
+       TP_STRUCT__entry(
+               __field(                dev_t,  dev             )
+               __field(        unsigned long,  tid             )
+               __field(         unsigned int,  type            )
+               __field(         unsigned int,  line_no         )
+               __field(                  int,  requested_blocks)
+       ),
+
+       TP_fast_assign(
+               __entry->dev              = dev;
+               __entry->tid              = tid;
+               __entry->type             = type;
+               __entry->line_no          = line_no;
+               __entry->requested_blocks = requested_blocks;
+       ),
+
+       TP_printk("dev %d,%d tid %lu type %u line_no %u "
+                 "requested_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+                 __entry->type, __entry->line_no, __entry->requested_blocks)
+);
+
+TRACE_EVENT(jbd2_handle_extend,
+       TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+                unsigned int line_no, int buffer_credits,
+                int requested_blocks),
+
+       TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks),
+
+       TP_STRUCT__entry(
+               __field(                dev_t,  dev             )
+               __field(        unsigned long,  tid             )
+               __field(         unsigned int,  type            )
+               __field(         unsigned int,  line_no         )
+               __field(                  int,  buffer_credits  )
+               __field(                  int,  requested_blocks)
+       ),
+
+       TP_fast_assign(
+               __entry->dev              = dev;
+               __entry->tid              = tid;
+               __entry->type             = type;
+               __entry->line_no          = line_no;
+               __entry->buffer_credits   = buffer_credits;
+               __entry->requested_blocks = requested_blocks;
+       ),
+
+       TP_printk("dev %d,%d tid %lu type %u line_no %u "
+                 "buffer_credits %d requested_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+                 __entry->type, __entry->line_no, __entry->buffer_credits,
+                 __entry->requested_blocks)
+);
+
+TRACE_EVENT(jbd2_handle_stats,
+       TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+                unsigned int line_no, int interval, int sync,
+                int requested_blocks, int dirtied_blocks),
+
+       TP_ARGS(dev, tid, type, line_no, interval, sync,
+               requested_blocks, dirtied_blocks),
+
+       TP_STRUCT__entry(
+               __field(                dev_t,  dev             )
+               __field(        unsigned long,  tid             )
+               __field(         unsigned int,  type            )
+               __field(         unsigned int,  line_no         )
+               __field(                  int,  interval        )
+               __field(                  int,  sync            )
+               __field(                  int,  requested_blocks)
+               __field(                  int,  dirtied_blocks  )
+       ),
+
+       TP_fast_assign(
+               __entry->dev              = dev;
+               __entry->tid              = tid;
+               __entry->type             = type;
+               __entry->line_no          = line_no;
+               __entry->interval         = interval;
+               __entry->sync             = sync;
+               __entry->requested_blocks = requested_blocks;
+               __entry->dirtied_blocks   = dirtied_blocks;
+       ),
+
+       TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
+                 "sync %d requested_blocks %d dirtied_blocks %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+                 __entry->type, __entry->line_no, __entry->interval,
+                 __entry->sync, __entry->requested_blocks,
+                 __entry->dirtied_blocks)
+);
+
 TRACE_EVENT(jbd2_run_stats,
        TP_PROTO(dev_t dev, unsigned long tid,
                 struct transaction_run_stats_s *stats),
@@ -142,6 +240,7 @@ TRACE_EVENT(jbd2_run_stats,
                __field(                dev_t,  dev             )
                __field(        unsigned long,  tid             )
                __field(        unsigned long,  wait            )
+               __field(        unsigned long,  request_delay   )
                __field(        unsigned long,  running         )
                __field(        unsigned long,  locked          )
                __field(        unsigned long,  flushing        )
@@ -155,6 +254,7 @@ TRACE_EVENT(jbd2_run_stats,
                __entry->dev            = dev;
                __entry->tid            = tid;
                __entry->wait           = stats->rs_wait;
+               __entry->request_delay  = stats->rs_request_delay;
                __entry->running        = stats->rs_running;
                __entry->locked         = stats->rs_locked;
                __entry->flushing       = stats->rs_flushing;
@@ -164,10 +264,12 @@ TRACE_EVENT(jbd2_run_stats,
                __entry->blocks_logged  = stats->rs_blocks_logged;
        ),
 
-       TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
-                 "logging %u handle_count %u blocks %u blocks_logged %u",
+       TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
+                 "locked %u flushing %u logging %u handle_count %u "
+                 "blocks %u blocks_logged %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
                  jiffies_to_msecs(__entry->wait),
+                 jiffies_to_msecs(__entry->request_delay),
                  jiffies_to_msecs(__entry->running),
                  jiffies_to_msecs(__entry->locked),
                  jiffies_to_msecs(__entry->flushing),
index b453d92..6a16fd2 100644 (file)
 
 struct wb_writeback_work;
 
+TRACE_EVENT(writeback_dirty_page,
+
+       TP_PROTO(struct page *page, struct address_space *mapping),
+
+       TP_ARGS(page, mapping),
+
+       TP_STRUCT__entry (
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(pgoff_t, index)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name,
+                       mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
+               __entry->ino = mapping ? mapping->host->i_ino : 0;
+               __entry->index = page->index;
+       ),
+
+       TP_printk("bdi %s: ino=%lu index=%lu",
+               __entry->name,
+               __entry->ino,
+               __entry->index
+       )
+);
+
+DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
+
+       TP_PROTO(struct inode *inode, int flags),
+
+       TP_ARGS(inode, flags),
+
+       TP_STRUCT__entry (
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(unsigned long, flags)
+       ),
+
+       TP_fast_assign(
+               struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+
+               /* may be called for files on pseudo FSes w/ unregistered bdi */
+               strncpy(__entry->name,
+                       bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+               __entry->ino            = inode->i_ino;
+               __entry->flags          = flags;
+       ),
+
+       TP_printk("bdi %s: ino=%lu flags=%s",
+               __entry->name,
+               __entry->ino,
+               show_inode_state(__entry->flags)
+       )
+);
+
+DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
+
+       TP_PROTO(struct inode *inode, int flags),
+
+       TP_ARGS(inode, flags)
+);
+
+DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
+
+       TP_PROTO(struct inode *inode, int flags),
+
+       TP_ARGS(inode, flags)
+);
+
+DECLARE_EVENT_CLASS(writeback_write_inode_template,
+
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+       TP_ARGS(inode, wbc),
+
+       TP_STRUCT__entry (
+               __array(char, name, 32)
+               __field(unsigned long, ino)
+               __field(int, sync_mode)
+       ),
+
+       TP_fast_assign(
+               strncpy(__entry->name,
+                       dev_name(inode->i_mapping->backing_dev_info->dev), 32);
+               __entry->ino            = inode->i_ino;
+               __entry->sync_mode      = wbc->sync_mode;
+       ),
+
+       TP_printk("bdi %s: ino=%lu sync_mode=%d",
+               __entry->name,
+               __entry->ino,
+               __entry->sync_mode
+       )
+);
+
+DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
+
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+       TP_ARGS(inode, wbc)
+);
+
+DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
+
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+       TP_ARGS(inode, wbc)
+);
+
 DECLARE_EVENT_CLASS(writeback_work_class,
        TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
        TP_ARGS(bdi, work),
@@ -479,6 +588,13 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
        )
 );
 
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
+       TP_PROTO(struct inode *inode,
+                struct writeback_control *wbc,
+                unsigned long nr_to_write),
+       TP_ARGS(inode, wbc, nr_to_write)
+);
+
 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
        TP_PROTO(struct inode *inode,
                 struct writeback_control *wbc,
index 4e67194..5c8a1d2 100644 (file)
@@ -68,6 +68,7 @@ header-y += blkpg.h
 header-y += blktrace_api.h
 header-y += bpqether.h
 header-y += bsg.h
+header-y += btrfs.h
 header-y += can.h
 header-y += capability.h
 header-y += capi.h
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
new file mode 100644 (file)
index 0000000..fa3a5f9
--- /dev/null
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _UAPI_LINUX_BTRFS_H
+#define _UAPI_LINUX_BTRFS_H
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define BTRFS_IOCTL_MAGIC 0x94
+#define BTRFS_VOL_NAME_MAX 255
+
+/* this should be 4k */
+#define BTRFS_PATH_NAME_MAX 4087
+struct btrfs_ioctl_vol_args {
+       __s64 fd;
+       char name[BTRFS_PATH_NAME_MAX + 1];
+};
+
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+
+#define BTRFS_SUBVOL_CREATE_ASYNC      (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY            (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT    (1ULL << 2)
+#define BTRFS_FSID_SIZE 16
+#define BTRFS_UUID_SIZE 16
+
+#define BTRFS_QGROUP_INHERIT_SET_LIMITS        (1ULL << 0)
+
+struct btrfs_qgroup_limit {
+       __u64   flags;
+       __u64   max_rfer;
+       __u64   max_excl;
+       __u64   rsv_rfer;
+       __u64   rsv_excl;
+};
+
+struct btrfs_qgroup_inherit {
+       __u64   flags;
+       __u64   num_qgroups;
+       __u64   num_ref_copies;
+       __u64   num_excl_copies;
+       struct btrfs_qgroup_limit lim;
+       __u64   qgroups[0];
+};
+
+struct btrfs_ioctl_qgroup_limit_args {
+       __u64   qgroupid;
+       struct btrfs_qgroup_limit lim;
+};
+
+#define BTRFS_SUBVOL_NAME_MAX 4039
+struct btrfs_ioctl_vol_args_v2 {
+       __s64 fd;
+       __u64 transid;
+       __u64 flags;
+       union {
+               struct {
+                       __u64 size;
+                       struct btrfs_qgroup_inherit __user *qgroup_inherit;
+               };
+               __u64 unused[4];
+       };
+       char name[BTRFS_SUBVOL_NAME_MAX + 1];
+};
+
+/*
+ * structure to report errors and progress to userspace, either as a
+ * result of a finished scrub, a canceled scrub or a progress inquiry
+ */
+struct btrfs_scrub_progress {
+       __u64 data_extents_scrubbed;    /* # of data extents scrubbed */
+       __u64 tree_extents_scrubbed;    /* # of tree extents scrubbed */
+       __u64 data_bytes_scrubbed;      /* # of data bytes scrubbed */
+       __u64 tree_bytes_scrubbed;      /* # of tree bytes scrubbed */
+       __u64 read_errors;              /* # of read errors encountered (EIO) */
+       __u64 csum_errors;              /* # of failed csum checks */
+       __u64 verify_errors;            /* # of occurences, where the metadata
+                                        * of a tree block did not match the
+                                        * expected values, like generation or
+                                        * logical */
+       __u64 no_csum;                  /* # of 4k data block for which no csum
+                                        * is present, probably the result of
+                                        * data written with nodatasum */
+       __u64 csum_discards;            /* # of csum for which no data was found
+                                        * in the extent tree. */
+       __u64 super_errors;             /* # of bad super blocks encountered */
+       __u64 malloc_errors;            /* # of internal kmalloc errors. These
+                                        * will likely cause an incomplete
+                                        * scrub */
+       __u64 uncorrectable_errors;     /* # of errors where either no intact
+                                        * copy was found or the writeback
+                                        * failed */
+       __u64 corrected_errors;         /* # of errors corrected */
+       __u64 last_physical;            /* last physical address scrubbed. In
+                                        * case a scrub was aborted, this can
+                                        * be used to restart the scrub */
+       __u64 unverified_errors;        /* # of occurences where a read for a
+                                        * full (64k) bio failed, but the re-
+                                        * check succeeded for each 4k piece.
+                                        * Intermittent error. */
+};
+
+#define BTRFS_SCRUB_READONLY   1
+struct btrfs_ioctl_scrub_args {
+       __u64 devid;                            /* in */
+       __u64 start;                            /* in */
+       __u64 end;                              /* in */
+       __u64 flags;                            /* in */
+       struct btrfs_scrub_progress progress;   /* out */
+       /* pad to 1k */
+       __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
+};
+
+#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS   0
+#define BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID    1
+struct btrfs_ioctl_dev_replace_start_params {
+       __u64 srcdevid; /* in, if 0, use srcdev_name instead */
+       __u64 cont_reading_from_srcdev_mode;    /* in, see #define
+                                                * above */
+       __u8 srcdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1];       /* in */
+       __u8 tgtdev_name[BTRFS_DEVICE_PATH_NAME_MAX + 1];       /* in */
+};
+
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED    0
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED          1
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED         2
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED         3
+#define BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED                4
+struct btrfs_ioctl_dev_replace_status_params {
+       __u64 replace_state;    /* out, see #define above */
+       __u64 progress_1000;    /* out, 0 <= x <= 1000 */
+       __u64 time_started;     /* out, seconds since 1-Jan-1970 */
+       __u64 time_stopped;     /* out, seconds since 1-Jan-1970 */
+       __u64 num_write_errors; /* out */
+       __u64 num_uncorrectable_read_errors;    /* out */
+};
+
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_START                      0
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS                     1
+#define BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL                     2
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR                        0
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED             1
+#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED         2
+struct btrfs_ioctl_dev_replace_args {
+       __u64 cmd;      /* in */
+       __u64 result;   /* out */
+
+       union {
+               struct btrfs_ioctl_dev_replace_start_params start;
+               struct btrfs_ioctl_dev_replace_status_params status;
+       };      /* in/out */
+
+       __u64 spare[64];
+};
+
+struct btrfs_ioctl_dev_info_args {
+       __u64 devid;                            /* in/out */
+       __u8 uuid[BTRFS_UUID_SIZE];             /* in/out */
+       __u64 bytes_used;                       /* out */
+       __u64 total_bytes;                      /* out */
+       __u64 unused[379];                      /* pad to 4k */
+       __u8 path[BTRFS_DEVICE_PATH_NAME_MAX];  /* out */
+};
+
+struct btrfs_ioctl_fs_info_args {
+       __u64 max_id;                           /* out */
+       __u64 num_devices;                      /* out */
+       __u8 fsid[BTRFS_FSID_SIZE];             /* out */
+       __u64 reserved[124];                    /* pad to 1k */
+};
+
+/* balance control ioctl modes */
+#define BTRFS_BALANCE_CTL_PAUSE                1
+#define BTRFS_BALANCE_CTL_CANCEL       2
+
+/*
+ * this is packed, because it should be exactly the same as its disk
+ * byte order counterpart (struct btrfs_disk_balance_args)
+ */
+struct btrfs_balance_args {
+       __u64 profiles;
+       __u64 usage;
+       __u64 devid;
+       __u64 pstart;
+       __u64 pend;
+       __u64 vstart;
+       __u64 vend;
+
+       __u64 target;
+
+       __u64 flags;
+
+       __u64 unused[8];
+} __attribute__ ((__packed__));
+
+/* report balance progress to userspace */
+struct btrfs_balance_progress {
+       __u64 expected;         /* estimated # of chunks that will be
+                                * relocated to fulfill the request */
+       __u64 considered;       /* # of chunks we have considered so far */
+       __u64 completed;        /* # of chunks relocated so far */
+};
+
+#define BTRFS_BALANCE_STATE_RUNNING    (1ULL << 0)
+#define BTRFS_BALANCE_STATE_PAUSE_REQ  (1ULL << 1)
+#define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2)
+
+struct btrfs_ioctl_balance_args {
+       __u64 flags;                            /* in/out */
+       __u64 state;                            /* out */
+
+       struct btrfs_balance_args data;         /* in/out */
+       struct btrfs_balance_args meta;         /* in/out */
+       struct btrfs_balance_args sys;          /* in/out */
+
+       struct btrfs_balance_progress stat;     /* out */
+
+       __u64 unused[72];                       /* pad to 1k */
+};
+
+#define BTRFS_INO_LOOKUP_PATH_MAX 4080
+struct btrfs_ioctl_ino_lookup_args {
+       __u64 treeid;
+       __u64 objectid;
+       char name[BTRFS_INO_LOOKUP_PATH_MAX];
+};
+
+struct btrfs_ioctl_search_key {
+       /* which root are we searching.  0 is the tree of tree roots */
+       __u64 tree_id;
+
+       /* keys returned will be >= min and <= max */
+       __u64 min_objectid;
+       __u64 max_objectid;
+
+       /* keys returned will be >= min and <= max */
+       __u64 min_offset;
+       __u64 max_offset;
+
+       /* max and min transids to search for */
+       __u64 min_transid;
+       __u64 max_transid;
+
+       /* keys returned will be >= min and <= max */
+       __u32 min_type;
+       __u32 max_type;
+
+       /*
+        * how many items did userland ask for, and how many are we
+        * returning
+        */
+       __u32 nr_items;
+
+       /* align to 64 bits */
+       __u32 unused;
+
+       /* some extra for later */
+       __u64 unused1;
+       __u64 unused2;
+       __u64 unused3;
+       __u64 unused4;
+};
+
+struct btrfs_ioctl_search_header {
+       __u64 transid;
+       __u64 objectid;
+       __u64 offset;
+       __u32 type;
+       __u32 len;
+};
+
+#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
+/*
+ * the buf is an array of search headers where
+ * each header is followed by the actual item
+ * the type field is expanded to 32 bits for alignment
+ */
+struct btrfs_ioctl_search_args {
+       struct btrfs_ioctl_search_key key;
+       char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
+};
+
+struct btrfs_ioctl_clone_range_args {
+  __s64 src_fd;
+  __u64 src_offset, src_length;
+  __u64 dest_offset;
+};
+
+/* flags for the defrag range ioctl */
+#define BTRFS_DEFRAG_RANGE_COMPRESS 1
+#define BTRFS_DEFRAG_RANGE_START_IO 2
+
+struct btrfs_ioctl_space_info {
+       __u64 flags;
+       __u64 total_bytes;
+       __u64 used_bytes;
+};
+
+struct btrfs_ioctl_space_args {
+       __u64 space_slots;
+       __u64 total_spaces;
+       struct btrfs_ioctl_space_info spaces[0];
+};
+
+struct btrfs_data_container {
+       __u32   bytes_left;     /* out -- bytes not needed to deliver output */
+       __u32   bytes_missing;  /* out -- additional bytes needed for result */
+       __u32   elem_cnt;       /* out */
+       __u32   elem_missed;    /* out */
+       __u64   val[0];         /* out */
+};
+
+struct btrfs_ioctl_ino_path_args {
+       __u64                           inum;           /* in */
+       __u64                           size;           /* in */
+       __u64                           reserved[4];
+       /* struct btrfs_data_container  *fspath;           out */
+       __u64                           fspath;         /* out */
+};
+
+struct btrfs_ioctl_logical_ino_args {
+       __u64                           logical;        /* in */
+       __u64                           size;           /* in */
+       __u64                           reserved[4];
+       /* struct btrfs_data_container  *inodes;        out   */
+       __u64                           inodes;
+};
+
+enum btrfs_dev_stat_values {
+       /* disk I/O failure stats */
+       BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
+       BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
+
+       /* stats for indirect indications for I/O failures */
+       BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
+                                        * contents is illegal: this is an
+                                        * indication that the block was damaged
+                                        * during read or write, or written to
+                                        * wrong location or read from wrong
+                                        * location */
+       BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
+                                        * been written */
+
+       BTRFS_DEV_STAT_VALUES_MAX
+};
+
+/* Reset statistics after reading; needs SYS_ADMIN capability */
+#define        BTRFS_DEV_STATS_RESET           (1ULL << 0)
+
+struct btrfs_ioctl_get_dev_stats {
+       __u64 devid;                            /* in */
+       __u64 nr_items;                         /* in/out */
+       __u64 flags;                            /* in/out */
+
+       /* out values: */
+       __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
+
+       __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
+};
+
+#define BTRFS_QUOTA_CTL_ENABLE 1
+#define BTRFS_QUOTA_CTL_DISABLE        2
+#define BTRFS_QUOTA_CTL_RESCAN 3
+struct btrfs_ioctl_quota_ctl_args {
+       __u64 cmd;
+       __u64 status;
+};
+
+struct btrfs_ioctl_qgroup_assign_args {
+       __u64 assign;
+       __u64 src;
+       __u64 dst;
+};
+
+struct btrfs_ioctl_qgroup_create_args {
+       __u64 create;
+       __u64 qgroupid;
+};
+struct btrfs_ioctl_timespec {
+       __u64 sec;
+       __u32 nsec;
+};
+
+struct btrfs_ioctl_received_subvol_args {
+       char    uuid[BTRFS_UUID_SIZE];  /* in */
+       __u64   stransid;               /* in */
+       __u64   rtransid;               /* out */
+       struct btrfs_ioctl_timespec stime; /* in */
+       struct btrfs_ioctl_timespec rtime; /* out */
+       __u64   flags;                  /* in */
+       __u64   reserved[16];           /* in */
+};
+
+/*
+ * Caller doesn't want file data in the send stream, even if the
+ * search of clone sources doesn't find an extent. UPDATE_EXTENT
+ * commands will be sent instead of WRITE commands.
+ */
+#define BTRFS_SEND_FLAG_NO_FILE_DATA     0x1
+
+struct btrfs_ioctl_send_args {
+       __s64 send_fd;                  /* in */
+       __u64 clone_sources_count;      /* in */
+       __u64 __user *clone_sources;    /* in */
+       __u64 parent_root;              /* in */
+       __u64 flags;                    /* in */
+       __u64 reserved[4];              /* in */
+};
+
+#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_RESIZE _IOW(BTRFS_IOCTL_MAGIC, 3, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_SCAN_DEV _IOW(BTRFS_IOCTL_MAGIC, 4, \
+                                  struct btrfs_ioctl_vol_args)
+/* trans start and trans end are dangerous, and only for
+ * use by applications that know how to avoid the
+ * resulting deadlocks
+ */
+#define BTRFS_IOC_TRANS_START  _IO(BTRFS_IOCTL_MAGIC, 6)
+#define BTRFS_IOC_TRANS_END    _IO(BTRFS_IOCTL_MAGIC, 7)
+#define BTRFS_IOC_SYNC         _IO(BTRFS_IOCTL_MAGIC, 8)
+
+#define BTRFS_IOC_CLONE        _IOW(BTRFS_IOCTL_MAGIC, 9, int)
+#define BTRFS_IOC_ADD_DEV _IOW(BTRFS_IOCTL_MAGIC, 10, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_RM_DEV _IOW(BTRFS_IOCTL_MAGIC, 11, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_BALANCE _IOW(BTRFS_IOCTL_MAGIC, 12, \
+                                  struct btrfs_ioctl_vol_args)
+
+#define BTRFS_IOC_CLONE_RANGE _IOW(BTRFS_IOCTL_MAGIC, 13, \
+                                 struct btrfs_ioctl_clone_range_args)
+
+#define BTRFS_IOC_SUBVOL_CREATE _IOW(BTRFS_IOCTL_MAGIC, 14, \
+                                  struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
+                               struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \
+                               struct btrfs_ioctl_defrag_range_args)
+#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
+                                  struct btrfs_ioctl_search_args)
+#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \
+                                  struct btrfs_ioctl_ino_lookup_args)
+#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64)
+#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
+                                   struct btrfs_ioctl_space_args)
+#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
+#define BTRFS_IOC_WAIT_SYNC  _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
+#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
+                                  struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_SUBVOL_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 24, \
+                                  struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
+#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
+                             struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28)
+#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \
+                                      struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \
+                                struct btrfs_ioctl_dev_info_args)
+#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
+                              struct btrfs_ioctl_fs_info_args)
+#define BTRFS_IOC_BALANCE_V2 _IOWR(BTRFS_IOCTL_MAGIC, 32, \
+                                  struct btrfs_ioctl_balance_args)
+#define BTRFS_IOC_BALANCE_CTL _IOW(BTRFS_IOCTL_MAGIC, 33, int)
+#define BTRFS_IOC_BALANCE_PROGRESS _IOR(BTRFS_IOCTL_MAGIC, 34, \
+                                       struct btrfs_ioctl_balance_args)
+#define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
+                                       struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
+                                       struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_SET_RECEIVED_SUBVOL _IOWR(BTRFS_IOCTL_MAGIC, 37, \
+                               struct btrfs_ioctl_received_subvol_args)
+#define BTRFS_IOC_SEND _IOW(BTRFS_IOCTL_MAGIC, 38, struct btrfs_ioctl_send_args)
+#define BTRFS_IOC_DEVICES_READY _IOR(BTRFS_IOCTL_MAGIC, 39, \
+                                    struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_QUOTA_CTL _IOWR(BTRFS_IOCTL_MAGIC, 40, \
+                              struct btrfs_ioctl_quota_ctl_args)
+#define BTRFS_IOC_QGROUP_ASSIGN _IOW(BTRFS_IOCTL_MAGIC, 41, \
+                              struct btrfs_ioctl_qgroup_assign_args)
+#define BTRFS_IOC_QGROUP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 42, \
+                              struct btrfs_ioctl_qgroup_create_args)
+#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
+                              struct btrfs_ioctl_qgroup_limit_args)
+#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
+                                  char[BTRFS_LABEL_SIZE])
+#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
+                                  char[BTRFS_LABEL_SIZE])
+#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
+                                     struct btrfs_ioctl_get_dev_stats)
+#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
+                                   struct btrfs_ioctl_dev_replace_args)
+
+#endif /* _UAPI_LINUX_BTRFS_H */
index 539b179..7e75b6f 100644 (file)
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       23
-#define DM_VERSION_PATCHLEVEL  1
-#define DM_VERSION_EXTRA       "-ioctl (2012-12-18)"
+#define DM_VERSION_MINOR       24
+#define DM_VERSION_PATCHLEVEL  0
+#define DM_VERSION_EXTRA       "-ioctl (2013-01-15)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
@@ -336,4 +336,9 @@ enum {
  */
 #define DM_SECURE_DATA_FLAG            (1 << 15) /* In */
 
+/*
+ * If set, a message generated output data.
+ */
+#define DM_DATA_OUT_FLAG               (1 << 16) /* Out */
+
 #endif                         /* _LINUX_DM_IOCTL_H */
index 900b948..8072d35 100644 (file)
@@ -395,6 +395,8 @@ typedef struct elf64_shdr {
 #define NT_ARM_TLS     0x401           /* ARM TLS register */
 #define NT_ARM_HW_BREAK        0x402           /* ARM hardware breakpoint registers */
 #define NT_ARM_HW_WATCH        0x403           /* ARM hardware watchpoint registers */
+#define NT_METAG_CBUF  0x500           /* Metag catch buffer registers */
+#define NT_METAG_RPIPE 0x501           /* Metag read pipeline state */
 
 
 /* Note header in a PT_NOTE section */
index 33fbc99..7b26a62 100644 (file)
  * if it becomes full and it is queried once a second to see if
  * anything is in it.  Incoming commands to the driver will get
  * delivered as commands.
- *
- * This driver provides two main interfaces: one for in-kernel
- * applications and another for userland applications.  The
- * capabilities are basically the same for both interface, although
- * the interfaces are somewhat different.  The stuff in the
- * #ifdef __KERNEL__ below is the in-kernel interface.  The userland
- * interface is defined later in the file.  */
-
-
+ */
 
 /*
  * This is an overlay for all the address types, so it's easy to
index 996719f..f055e58 100644 (file)
@@ -87,6 +87,8 @@
 #define IS_FSINFO(x)   (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \
                         && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2)
 
+#define FAT_STATE_DIRTY 0x01
+
 struct __fat_dirent {
        long            d_ino;
        __kernel_off_t  d_off;
@@ -120,14 +122,34 @@ struct fat_boot_sector {
        __le32  hidden;         /* hidden sectors (unused) */
        __le32  total_sect;     /* number of sectors (if sectors == 0) */
 
-       /* The following fields are only used by FAT32 */
-       __le32  fat32_length;   /* sectors/FAT */
-       __le16  flags;          /* bit 8: fat mirroring, low 4: active fat */
-       __u8    version[2];     /* major, minor filesystem version */
-       __le32  root_cluster;   /* first cluster in root directory */
-       __le16  info_sector;    /* filesystem info sector */
-       __le16  backup_boot;    /* backup boot sector */
-       __le16  reserved2[6];   /* Unused */
+       union {
+               struct {
+                       /*  Extended BPB Fields for FAT16 */
+                       __u8    drive_number;   /* Physical drive number */
+                       __u8    state;          /* undocumented, but used
+                                                  for mount state. */
+                       /* other fiealds are not added here */
+               } fat16;
+
+               struct {
+                       /* only used by FAT32 */
+                       __le32  length;         /* sectors/FAT */
+                       __le16  flags;          /* bit 8: fat mirroring,
+                                                  low 4: active fat */
+                       __u8    version[2];     /* major, minor filesystem
+                                                  version */
+                       __le32  root_cluster;   /* first cluster in
+                                                  root directory */
+                       __le16  info_sector;    /* filesystem info sector */
+                       __le16  backup_boot;    /* backup boot sector */
+                       __le16  reserved2[6];   /* Unused */
+                       /* Extended BPB Fields for FAT32 */
+                       __u8    drive_number;   /* Physical drive number */
+                       __u8    state;          /* undocumented, but used
+                                                  for mount state. */
+                       /* other fiealds are not added here */
+               } fat32;
+       };
 };
 
 struct fat_boot_fsinfo {
index dfb5144..4f52549 100644 (file)
@@ -33,13 +33,14 @@ enum {
        NBD_CMD_READ = 0,
        NBD_CMD_WRITE = 1,
        NBD_CMD_DISC = 2,
-       /* there is a gap here to match userspace */
+       NBD_CMD_FLUSH = 3,
        NBD_CMD_TRIM = 4
 };
 
 /* values for flags field */
 #define NBD_FLAG_HAS_FLAGS    (1 << 0) /* nbd-server supports flags */
 #define NBD_FLAG_READ_ONLY    (1 << 1) /* device is read-only */
+#define NBD_FLAG_SEND_FLUSH   (1 << 2) /* can flush writeback cache */
 /* there is a gap here to match userspace */
 #define NBD_FLAG_SEND_TRIM    (1 << 5) /* send trim/discard */
 
index 4758d1b..4f41f30 100644 (file)
@@ -303,6 +303,15 @@ enum {
        VFIO_PCI_BAR5_REGION_INDEX,
        VFIO_PCI_ROM_REGION_INDEX,
        VFIO_PCI_CONFIG_REGION_INDEX,
+       /*
+        * Expose VGA regions defined for PCI base class 03, subclass 00.
+        * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
+        * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
+        * range is found at it's identity mapped offset from the region
+        * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
+        * between described ranges are unimplemented.
+        */
+       VFIO_PCI_VGA_REGION_INDEX,
        VFIO_PCI_NUM_REGIONS
 };
 
index 26607bd..e4629b9 100644 (file)
 
 /* Namespaces */
 #define XATTR_OS2_PREFIX "os2."
-#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
+#define XATTR_OS2_PREFIX_LEN (sizeof(XATTR_OS2_PREFIX) - 1)
+
+#define XATTR_MAC_OSX_PREFIX "osx."
+#define XATTR_MAC_OSX_PREFIX_LEN (sizeof(XATTR_MAC_OSX_PREFIX) - 1)
 
 #define XATTR_SECURITY_PREFIX  "security."
-#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
+#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
 
 #define XATTR_SYSTEM_PREFIX "system."
-#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
+#define XATTR_SYSTEM_PREFIX_LEN (sizeof(XATTR_SYSTEM_PREFIX) - 1)
 
 #define XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof(XATTR_TRUSTED_PREFIX) - 1)
 
 #define XATTR_USER_PREFIX "user."
-#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
+#define XATTR_USER_PREFIX_LEN (sizeof(XATTR_USER_PREFIX) - 1)
 
 /* Security namespace */
 #define XATTR_EVM_SUFFIX "evm"
index 29a87dd..75746d5 100644 (file)
@@ -1,2 +1,5 @@
 # UAPI Header export list
 header-y += fc/
+header-y += scsi_bsg_fc.h
+header-y += scsi_netlink.h
+header-y += scsi_netlink_fc.h
index aafaa5a..5ead9fa 100644 (file)
@@ -1 +1,5 @@
 # UAPI Header export list
+header-y += fc_els.h
+header-y += fc_fs.h
+header-y += fc_gs.h
+header-y += fc_ns.h
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
new file mode 100644 (file)
index 0000000..481abbd
--- /dev/null
@@ -0,0 +1,831 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ELS_H_
+#define        _FC_ELS_H_
+
+#include <linux/types.h>
+
+/*
+ * Fibre Channel Switch - Enhanced Link Services definitions.
+ * From T11 FC-LS Rev 1.2 June 7, 2005.
+ */
+
+/*
+ * ELS Command codes - byte 0 of the frame payload
+ */
+enum fc_els_cmd {
+       ELS_LS_RJT =    0x01,   /* ESL reject */
+       ELS_LS_ACC =    0x02,   /* ESL Accept */
+       ELS_PLOGI =     0x03,   /* N_Port login */
+       ELS_FLOGI =     0x04,   /* F_Port login */
+       ELS_LOGO =      0x05,   /* Logout */
+       ELS_ABTX =      0x06,   /* Abort exchange - obsolete */
+       ELS_RCS =       0x07,   /* read connection status */
+       ELS_RES =       0x08,   /* read exchange status block */
+       ELS_RSS =       0x09,   /* read sequence status block */
+       ELS_RSI =       0x0a,   /* read sequence initiative */
+       ELS_ESTS =      0x0b,   /* establish streaming */
+       ELS_ESTC =      0x0c,   /* estimate credit */
+       ELS_ADVC =      0x0d,   /* advise credit */
+       ELS_RTV =       0x0e,   /* read timeout value */
+       ELS_RLS =       0x0f,   /* read link error status block */
+       ELS_ECHO =      0x10,   /* echo */
+       ELS_TEST =      0x11,   /* test */
+       ELS_RRQ =       0x12,   /* reinstate recovery qualifier */
+       ELS_REC =       0x13,   /* read exchange concise */
+       ELS_SRR =       0x14,   /* sequence retransmission request */
+       ELS_PRLI =      0x20,   /* process login */
+       ELS_PRLO =      0x21,   /* process logout */
+       ELS_SCN =       0x22,   /* state change notification */
+       ELS_TPLS =      0x23,   /* test process login state */
+       ELS_TPRLO =     0x24,   /* third party process logout */
+       ELS_LCLM =      0x25,   /* login control list mgmt (obs) */
+       ELS_GAID =      0x30,   /* get alias_ID */
+       ELS_FACT =      0x31,   /* fabric activate alias_id */
+       ELS_FDACDT =    0x32,   /* fabric deactivate alias_id */
+       ELS_NACT =      0x33,   /* N-port activate alias_id */
+       ELS_NDACT =     0x34,   /* N-port deactivate alias_id */
+       ELS_QOSR =      0x40,   /* quality of service request */
+       ELS_RVCS =      0x41,   /* read virtual circuit status */
+       ELS_PDISC =     0x50,   /* discover N_port service params */
+       ELS_FDISC =     0x51,   /* discover F_port service params */
+       ELS_ADISC =     0x52,   /* discover address */
+       ELS_RNC =       0x53,   /* report node cap (obs) */
+       ELS_FARP_REQ =  0x54,   /* FC ARP request */
+       ELS_FARP_REPL = 0x55,   /* FC ARP reply */
+       ELS_RPS =       0x56,   /* read port status block */
+       ELS_RPL =       0x57,   /* read port list */
+       ELS_RPBC =      0x58,   /* read port buffer condition */
+       ELS_FAN =       0x60,   /* fabric address notification */
+       ELS_RSCN =      0x61,   /* registered state change notification */
+       ELS_SCR =       0x62,   /* state change registration */
+       ELS_RNFT =      0x63,   /* report node FC-4 types */
+       ELS_CSR =       0x68,   /* clock synch. request */
+       ELS_CSU =       0x69,   /* clock synch. update */
+       ELS_LINIT =     0x70,   /* loop initialize */
+       ELS_LSTS =      0x72,   /* loop status */
+       ELS_RNID =      0x78,   /* request node ID data */
+       ELS_RLIR =      0x79,   /* registered link incident report */
+       ELS_LIRR =      0x7a,   /* link incident record registration */
+       ELS_SRL =       0x7b,   /* scan remote loop */
+       ELS_SBRP =      0x7c,   /* set bit-error reporting params */
+       ELS_RPSC =      0x7d,   /* report speed capabilities */
+       ELS_QSA =       0x7e,   /* query security attributes */
+       ELS_EVFP =      0x7f,   /* exchange virt. fabrics params */
+       ELS_LKA =       0x80,   /* link keep-alive */
+       ELS_AUTH_ELS =  0x90,   /* authentication ELS */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define        FC_ELS_CMDS_INIT {                      \
+       [ELS_LS_RJT] =  "LS_RJT",               \
+       [ELS_LS_ACC] =  "LS_ACC",               \
+       [ELS_PLOGI] =   "PLOGI",                \
+       [ELS_FLOGI] =   "FLOGI",                \
+       [ELS_LOGO] =    "LOGO",                 \
+       [ELS_ABTX] =    "ABTX",                 \
+       [ELS_RCS] =     "RCS",                  \
+       [ELS_RES] =     "RES",                  \
+       [ELS_RSS] =     "RSS",                  \
+       [ELS_RSI] =     "RSI",                  \
+       [ELS_ESTS] =    "ESTS",                 \
+       [ELS_ESTC] =    "ESTC",                 \
+       [ELS_ADVC] =    "ADVC",                 \
+       [ELS_RTV] =     "RTV",                  \
+       [ELS_RLS] =     "RLS",                  \
+       [ELS_ECHO] =    "ECHO",                 \
+       [ELS_TEST] =    "TEST",                 \
+       [ELS_RRQ] =     "RRQ",                  \
+       [ELS_REC] =     "REC",                  \
+       [ELS_SRR] =     "SRR",                  \
+       [ELS_PRLI] =    "PRLI",                 \
+       [ELS_PRLO] =    "PRLO",                 \
+       [ELS_SCN] =     "SCN",                  \
+       [ELS_TPLS] =    "TPLS",                 \
+       [ELS_TPRLO] =   "TPRLO",                \
+       [ELS_LCLM] =    "LCLM",                 \
+       [ELS_GAID] =    "GAID",                 \
+       [ELS_FACT] =    "FACT",                 \
+       [ELS_FDACDT] =  "FDACDT",               \
+       [ELS_NACT] =    "NACT",                 \
+       [ELS_NDACT] =   "NDACT",                \
+       [ELS_QOSR] =    "QOSR",                 \
+       [ELS_RVCS] =    "RVCS",                 \
+       [ELS_PDISC] =   "PDISC",                \
+       [ELS_FDISC] =   "FDISC",                \
+       [ELS_ADISC] =   "ADISC",                \
+       [ELS_RNC] =     "RNC",                  \
+       [ELS_FARP_REQ] = "FARP_REQ",            \
+       [ELS_FARP_REPL] =  "FARP_REPL",         \
+       [ELS_RPS] =     "RPS",                  \
+       [ELS_RPL] =     "RPL",                  \
+       [ELS_RPBC] =    "RPBC",                 \
+       [ELS_FAN] =     "FAN",                  \
+       [ELS_RSCN] =    "RSCN",                 \
+       [ELS_SCR] =     "SCR",                  \
+       [ELS_RNFT] =    "RNFT",                 \
+       [ELS_CSR] =     "CSR",                  \
+       [ELS_CSU] =     "CSU",                  \
+       [ELS_LINIT] =   "LINIT",                \
+       [ELS_LSTS] =    "LSTS",                 \
+       [ELS_RNID] =    "RNID",                 \
+       [ELS_RLIR] =    "RLIR",                 \
+       [ELS_LIRR] =    "LIRR",                 \
+       [ELS_SRL] =     "SRL",                  \
+       [ELS_SBRP] =    "SBRP",                 \
+       [ELS_RPSC] =    "RPSC",                 \
+       [ELS_QSA] =     "QSA",                  \
+       [ELS_EVFP] =    "EVFP",                 \
+       [ELS_LKA] =     "LKA",                  \
+       [ELS_AUTH_ELS] = "AUTH_ELS",            \
+}
+
+/*
+ * LS_ACC payload.
+ */
+struct fc_els_ls_acc {
+       __u8          la_cmd;           /* command code ELS_LS_ACC */
+       __u8          la_resv[3];       /* reserved */
+};
+
+/*
+ * ELS reject payload.
+ */
+struct fc_els_ls_rjt {
+       __u8    er_cmd;         /* command code ELS_LS_RJT */
+       __u8    er_resv[4];     /* reserved must be zero */
+       __u8    er_reason;      /* reason (enum fc_els_rjt_reason below) */
+       __u8    er_explan;      /* explanation (enum fc_els_rjt_explan below) */
+       __u8    er_vendor;      /* vendor specific code */
+};
+
+/*
+ * ELS reject reason codes (er_reason).
+ */
+enum fc_els_rjt_reason {
+       ELS_RJT_NONE =          0,      /* no reject - not to be sent */
+       ELS_RJT_INVAL =         0x01,   /* invalid ELS command code */
+       ELS_RJT_LOGIC =         0x03,   /* logical error */
+       ELS_RJT_BUSY =          0x05,   /* logical busy */
+       ELS_RJT_PROT =          0x07,   /* protocol error */
+       ELS_RJT_UNAB =          0x09,   /* unable to perform command request */
+       ELS_RJT_UNSUP =         0x0b,   /* command not supported */
+       ELS_RJT_INPROG =        0x0e,   /* command already in progress */
+       ELS_RJT_FIP =           0x20,   /* FIP error */
+       ELS_RJT_VENDOR =        0xff,   /* vendor specific error */
+};
+
+
+/*
+ * reason code explanation (er_explan).
+ */
+enum fc_els_rjt_explan {
+       ELS_EXPL_NONE =         0x00,   /* No additional explanation */
+       ELS_EXPL_SPP_OPT_ERR =  0x01,   /* service parameter error - options */
+       ELS_EXPL_SPP_ICTL_ERR = 0x03,   /* service parm error - initiator ctl */
+       ELS_EXPL_AH =           0x11,   /* invalid association header */
+       ELS_EXPL_AH_REQ =       0x13,   /* association_header required */
+       ELS_EXPL_SID =          0x15,   /* invalid originator S_ID */
+       ELS_EXPL_OXID_RXID =    0x17,   /* invalid OX_ID-RX_ID combination */
+       ELS_EXPL_INPROG =       0x19,   /* Request already in progress */
+       ELS_EXPL_PLOGI_REQD =   0x1e,   /* N_Port login required */
+       ELS_EXPL_INSUF_RES =    0x29,   /* insufficient resources */
+       ELS_EXPL_UNAB_DATA =    0x2a,   /* unable to supply requested data */
+       ELS_EXPL_UNSUPR =       0x2c,   /* Request not supported */
+       ELS_EXPL_INV_LEN =      0x2d,   /* Invalid payload length */
+       ELS_EXPL_NOT_NEIGHBOR = 0x62,   /* VN2VN_Port not in neighbor set */
+       /* TBD - above definitions incomplete */
+};
+
+/*
+ * Common service parameters (N ports).
+ */
+struct fc_els_csp {
+       __u8            sp_hi_ver;      /* highest version supported (obs.) */
+       __u8            sp_lo_ver;      /* highest version supported (obs.) */
+       __be16          sp_bb_cred;     /* buffer-to-buffer credits */
+       __be16          sp_features;    /* common feature flags */
+       __be16          sp_bb_data;     /* b-b state number and data field sz */
+       union {
+               struct {
+                       __be16  _sp_tot_seq; /* total concurrent sequences */
+                       __be16  _sp_rel_off; /* rel. offset by info cat */
+               } sp_plogi;
+               struct {
+                       __be32  _sp_r_a_tov; /* resource alloc. timeout msec */
+               } sp_flogi_acc;
+       } sp_u;
+       __be32          sp_e_d_tov;     /* error detect timeout value */
+};
+#define        sp_tot_seq      sp_u.sp_plogi._sp_tot_seq
+#define        sp_rel_off      sp_u.sp_plogi._sp_rel_off
+#define        sp_r_a_tov      sp_u.sp_flogi_acc._sp_r_a_tov
+
+#define        FC_SP_BB_DATA_MASK 0xfff /* mask for data field size in sp_bb_data */
+
+/*
+ * Minimum and maximum values for max data field size in service parameters.
+ */
+#define        FC_SP_MIN_MAX_PAYLOAD   FC_MIN_MAX_PAYLOAD
+#define        FC_SP_MAX_MAX_PAYLOAD   FC_MAX_PAYLOAD
+
+/*
+ * sp_features
+ */
+#define        FC_SP_FT_NPIV   0x8000  /* multiple N_Port_ID support (FLOGI) */
+#define        FC_SP_FT_CIRO   0x8000  /* continuously increasing rel off (PLOGI) */
+#define        FC_SP_FT_CLAD   0x8000  /* clean address (in FLOGI LS_ACC) */
+#define        FC_SP_FT_RAND   0x4000  /* random relative offset */
+#define        FC_SP_FT_VAL    0x2000  /* valid vendor version level */
+#define        FC_SP_FT_NPIV_ACC       0x2000  /* NPIV assignment (FLOGI LS_ACC) */
+#define        FC_SP_FT_FPORT  0x1000  /* F port (1) vs. N port (0) */
+#define        FC_SP_FT_ABB    0x0800  /* alternate BB_credit management */
+#define        FC_SP_FT_EDTR   0x0400  /* E_D_TOV Resolution is nanoseconds */
+#define        FC_SP_FT_MCAST  0x0200  /* multicast */
+#define        FC_SP_FT_BCAST  0x0100  /* broadcast */
+#define        FC_SP_FT_HUNT   0x0080  /* hunt group */
+#define        FC_SP_FT_SIMP   0x0040  /* dedicated simplex */
+#define        FC_SP_FT_SEC    0x0020  /* reserved for security */
+#define        FC_SP_FT_CSYN   0x0010  /* clock synch. supported */
+#define        FC_SP_FT_RTTOV  0x0008  /* R_T_TOV value 100 uS, else 100 mS */
+#define        FC_SP_FT_HALF   0x0004  /* dynamic half duplex */
+#define        FC_SP_FT_SEQC   0x0002  /* SEQ_CNT */
+#define        FC_SP_FT_PAYL   0x0001  /* FLOGI payload length 256, else 116 */
+
+/*
+ * Class-specific service parameters.
+ */
+struct fc_els_cssp {
+       __be16          cp_class;       /* class flags */
+       __be16          cp_init;        /* initiator flags */
+       __be16          cp_recip;       /* recipient flags */
+       __be16          cp_rdfs;        /* receive data field size */
+       __be16          cp_con_seq;     /* concurrent sequences */
+       __be16          cp_ee_cred;     /* N-port end-to-end credit */
+       __u8            cp_resv1;       /* reserved */
+       __u8            cp_open_seq;    /* open sequences per exchange */
+       __u8            _cp_resv2[2];   /* reserved */
+};
+
+/*
+ * cp_class flags.
+ */
+#define        FC_CPC_VALID    0x8000          /* class valid */
+#define        FC_CPC_IMIX     0x4000          /* intermix mode */
+#define        FC_CPC_SEQ      0x0800          /* sequential delivery */
+#define        FC_CPC_CAMP     0x0200          /* camp-on */
+#define        FC_CPC_PRI      0x0080          /* priority */
+
+/*
+ * cp_init flags.
+ * (TBD: not all flags defined here).
+ */
+#define        FC_CPI_CSYN     0x0010          /* clock synch. capable */
+
+/*
+ * cp_recip flags.
+ */
+#define        FC_CPR_CSYN     0x0008          /* clock synch. capable */
+
+/*
+ * NFC_ELS_FLOGI: Fabric login request.
+ * NFC_ELS_PLOGI: Port login request (same format).
+ */
+struct fc_els_flogi {
+       __u8            fl_cmd;         /* command */
+       __u8            _fl_resvd[3];   /* must be zero */
+       struct fc_els_csp fl_csp;       /* common service parameters */
+       __be64          fl_wwpn;        /* port name */
+       __be64          fl_wwnn;        /* node name */
+       struct fc_els_cssp fl_cssp[4];  /* class 1-4 service parameters */
+       __u8            fl_vend[16];    /* vendor version level */
+} __attribute__((__packed__));
+
+/*
+ * Process login service parameter page.
+ */
+struct fc_els_spp {
+       __u8            spp_type;       /* type code or common service params */
+       __u8            spp_type_ext;   /* type code extension */
+       __u8            spp_flags;
+       __u8            _spp_resvd;
+       __be32          spp_orig_pa;    /* originator process associator */
+       __be32          spp_resp_pa;    /* responder process associator */
+       __be32          spp_params;     /* service parameters */
+};
+
+/*
+ * spp_flags.
+ */
+#define        FC_SPP_OPA_VAL      0x80        /* originator proc. assoc. valid */
+#define        FC_SPP_RPA_VAL      0x40        /* responder proc. assoc. valid */
+#define        FC_SPP_EST_IMG_PAIR 0x20        /* establish image pair */
+#define        FC_SPP_RESP_MASK    0x0f        /* mask for response code (below) */
+
+/*
+ * SPP response code in spp_flags - lower 4 bits.
+ */
+enum fc_els_spp_resp {
+       FC_SPP_RESP_ACK =       1,      /* request executed */
+       FC_SPP_RESP_RES =       2,      /* unable due to lack of resources */
+       FC_SPP_RESP_INIT =      3,      /* initialization not complete */
+       FC_SPP_RESP_NO_PA =     4,      /* unknown process associator */
+       FC_SPP_RESP_CONF =      5,      /* configuration precludes image pair */
+       FC_SPP_RESP_COND =      6,      /* request completed conditionally */
+       FC_SPP_RESP_MULT =      7,      /* unable to handle multiple SPPs */
+       FC_SPP_RESP_INVL =      8,      /* SPP is invalid */
+};
+
+/*
+ * ELS_RRQ - Reinstate Recovery Qualifier
+ */
+struct fc_els_rrq {
+       __u8            rrq_cmd;        /* command (0x12) */
+       __u8            rrq_zero[3];    /* specified as zero - part of cmd */
+       __u8            rrq_resvd;      /* reserved */
+       __u8            rrq_s_id[3];    /* originator FID */
+       __be16          rrq_ox_id;      /* originator exchange ID */
+       __be16          rrq_rx_id;      /* responders exchange ID */
+};
+
+/*
+ * ELS_REC - Read exchange concise.
+ */
+struct fc_els_rec {
+       __u8            rec_cmd;        /* command (0x13) */
+       __u8            rec_zero[3];    /* specified as zero - part of cmd */
+       __u8            rec_resvd;      /* reserved */
+       __u8            rec_s_id[3];    /* originator FID */
+       __be16          rec_ox_id;      /* originator exchange ID */
+       __be16          rec_rx_id;      /* responders exchange ID */
+};
+
+/*
+ * ELS_REC LS_ACC payload.
+ */
+struct fc_els_rec_acc {
+       __u8            reca_cmd;       /* accept (0x02) */
+       __u8            reca_zero[3];   /* specified as zero - part of cmd */
+       __be16          reca_ox_id;     /* originator exchange ID */
+       __be16          reca_rx_id;     /* responders exchange ID */
+       __u8            reca_resvd1;    /* reserved */
+       __u8            reca_ofid[3];   /* originator FID */
+       __u8            reca_resvd2;    /* reserved */
+       __u8            reca_rfid[3];   /* responder FID */
+       __be32          reca_fc4value;  /* FC4 value */
+       __be32          reca_e_stat;    /* ESB (exchange status block) status */
+};
+
+/*
+ * ELS_PRLI - Process login request and response.
+ */
+struct fc_els_prli {
+       __u8            prli_cmd;       /* command */
+       __u8            prli_spp_len;   /* length of each serv. parm. page */
+       __be16          prli_len;       /* length of entire payload */
+       /* service parameter pages follow */
+};
+
+/*
+ * ELS_PRLO - Process logout request and response.
+ */
+struct fc_els_prlo {
+       __u8            prlo_cmd;       /* command */
+       __u8            prlo_obs;       /* obsolete, but shall be set to 10h */
+       __be16          prlo_len;       /* payload length */
+};
+
+/*
+ * ELS_ADISC payload
+ */
+struct fc_els_adisc {
+       __u8            adisc_cmd;
+       __u8            adisc_resv[3];
+       __u8            adisc_resv1;
+       __u8            adisc_hard_addr[3];
+       __be64          adisc_wwpn;
+       __be64          adisc_wwnn;
+       __u8            adisc_resv2;
+       __u8            adisc_port_id[3];
+} __attribute__((__packed__));
+
+/*
+ * ELS_LOGO - process or fabric logout.
+ */
+struct fc_els_logo {
+       __u8            fl_cmd;         /* command code */
+       __u8            fl_zero[3];     /* specified as zero - part of cmd */
+       __u8            fl_resvd;       /* reserved */
+       __u8            fl_n_port_id[3];/* N port ID */
+       __be64          fl_n_port_wwn;  /* port name */
+};
+
+/*
+ * ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv {
+       __u8            rtv_cmd;        /* command code 0x0e */
+       __u8            rtv_zero[3];    /* specified as zero - part of cmd */
+};
+
+/*
+ * LS_ACC for ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv_acc {
+       __u8            rtv_cmd;        /* command code 0x02 */
+       __u8            rtv_zero[3];    /* specified as zero - part of cmd */
+       __be32          rtv_r_a_tov;    /* resource allocation timeout value */
+       __be32          rtv_e_d_tov;    /* error detection timeout value */
+       __be32          rtv_toq;        /* timeout qualifier (see below) */
+};
+
+/*
+ * rtv_toq bits.
+ */
+#define        FC_ELS_RTV_EDRES (1 << 26)      /* E_D_TOV resolution is nS else mS */
+#define        FC_ELS_RTV_RTTOV (1 << 19)      /* R_T_TOV is 100 uS else 100 mS */
+
+/*
+ * ELS_SCR - state change registration payload.
+ */
+struct fc_els_scr {
+       __u8            scr_cmd;        /* command code */
+       __u8            scr_resv[6];    /* reserved */
+       __u8            scr_reg_func;   /* registration function (see below) */
+};
+
+enum fc_els_scr_func {
+       ELS_SCRF_FAB =  1,      /* fabric-detected registration */
+       ELS_SCRF_NPORT = 2,     /* Nx_Port-detected registration */
+       ELS_SCRF_FULL = 3,      /* full registration */
+       ELS_SCRF_CLEAR = 255,   /* remove any current registrations */
+};
+
+/*
+ * ELS_RSCN - registered state change notification payload.
+ */
+struct fc_els_rscn {
+       __u8            rscn_cmd;       /* RSCN opcode (0x61) */
+       __u8            rscn_page_len;  /* page length (4) */
+       __be16          rscn_plen;      /* payload length including this word */
+
+       /* followed by 4-byte generic affected Port_ID pages */
+};
+
+struct fc_els_rscn_page {
+       __u8            rscn_page_flags; /* event and address format */
+       __u8            rscn_fid[3];    /* fabric ID */
+};
+
+#define        ELS_RSCN_EV_QUAL_BIT    2       /* shift count for event qualifier */
+#define        ELS_RSCN_EV_QUAL_MASK   0xf     /* mask for event qualifier */
+#define        ELS_RSCN_ADDR_FMT_BIT   0       /* shift count for address format */
+#define        ELS_RSCN_ADDR_FMT_MASK  0x3     /* mask for address format */
+
+enum fc_els_rscn_ev_qual {
+       ELS_EV_QUAL_NONE = 0,           /* unspecified */
+       ELS_EV_QUAL_NS_OBJ = 1,         /* changed name server object */
+       ELS_EV_QUAL_PORT_ATTR = 2,      /* changed port attribute */
+       ELS_EV_QUAL_SERV_OBJ = 3,       /* changed service object */
+       ELS_EV_QUAL_SW_CONFIG = 4,      /* changed switch configuration */
+       ELS_EV_QUAL_REM_OBJ = 5,        /* removed object */
+};
+
+enum fc_els_rscn_addr_fmt {
+       ELS_ADDR_FMT_PORT = 0,  /* rscn_fid is a port address */
+       ELS_ADDR_FMT_AREA = 1,  /* rscn_fid is a area address */
+       ELS_ADDR_FMT_DOM = 2,   /* rscn_fid is a domain address */
+       ELS_ADDR_FMT_FAB = 3,   /* anything on fabric may have changed */
+};
+
+/*
+ * ELS_RNID - request Node ID.
+ */
+struct fc_els_rnid {
+       __u8            rnid_cmd;       /* RNID opcode (0x78) */
+       __u8            rnid_resv[3];   /* reserved */
+       __u8            rnid_fmt;       /* data format */
+       __u8            rnid_resv2[3];  /* reserved */
+};
+
+/*
+ * Node Identification Data formats (rnid_fmt)
+ */
+enum fc_els_rnid_fmt {
+       ELS_RNIDF_NONE = 0,             /* no specific identification data */
+       ELS_RNIDF_GEN = 0xdf,           /* general topology discovery format */
+};
+
+/*
+ * ELS_RNID response.
+ */
+struct fc_els_rnid_resp {
+       __u8            rnid_cmd;       /* response code (LS_ACC) */
+       __u8            rnid_resv[3];   /* reserved */
+       __u8            rnid_fmt;       /* data format */
+       __u8            rnid_cid_len;   /* common ID data length */
+       __u8            rnid_resv2;     /* reserved */
+       __u8            rnid_sid_len;   /* specific ID data length */
+};
+
+struct fc_els_rnid_cid {
+       __be64          rnid_wwpn;      /* N port name */
+       __be64          rnid_wwnn;      /* node name */
+};
+
+struct fc_els_rnid_gen {
+       __u8            rnid_vend_id[16]; /* vendor-unique ID */
+       __be32          rnid_atype;     /* associated type (see below) */
+       __be32          rnid_phys_port; /* physical port number */
+       __be32          rnid_att_nodes; /* number of attached nodes */
+       __u8            rnid_node_mgmt; /* node management (see below) */
+       __u8            rnid_ip_ver;    /* IP version (see below) */
+       __be16          rnid_prot_port; /* UDP / TCP port number */
+       __be32          rnid_ip_addr[4]; /* IP address */
+       __u8            rnid_resvd[2];  /* reserved */
+       __be16          rnid_vend_spec; /* vendor-specific field */
+};
+
+enum fc_els_rnid_atype {
+       ELS_RNIDA_UNK =         0x01,   /* unknown */
+       ELS_RNIDA_OTHER =       0x02,   /* none of the following */
+       ELS_RNIDA_HUB =         0x03,
+       ELS_RNIDA_SWITCH =      0x04,
+       ELS_RNIDA_GATEWAY =     0x05,
+       ELS_RNIDA_CONV =        0x06,   /* Obsolete, do not use this value */
+       ELS_RNIDA_HBA =         0x07,   /* Obsolete, do not use this value */
+       ELS_RNIDA_PROXY =       0x08,   /* Obsolete, do not use this value */
+       ELS_RNIDA_STORAGE =     0x09,
+       ELS_RNIDA_HOST =        0x0a,
+       ELS_RNIDA_SUBSYS =      0x0b,   /* storage subsystem (e.g., RAID) */
+       ELS_RNIDA_ACCESS =      0x0e,   /* access device (e.g. media changer) */
+       ELS_RNIDA_NAS =         0x11,   /* NAS server */
+       ELS_RNIDA_BRIDGE =      0x12,   /* bridge */
+       ELS_RNIDA_VIRT =        0x13,   /* virtualization device */
+       ELS_RNIDA_MF =          0xff,   /* multifunction device (bits below) */
+       ELS_RNIDA_MF_HUB =      1UL << 31,      /* hub */
+       ELS_RNIDA_MF_SW =       1UL << 30,      /* switch */
+       ELS_RNIDA_MF_GW =       1UL << 29,      /* gateway */
+       ELS_RNIDA_MF_ST =       1UL << 28,      /* storage */
+       ELS_RNIDA_MF_HOST =     1UL << 27,      /* host */
+       ELS_RNIDA_MF_SUB =      1UL << 26,      /* storage subsystem */
+       ELS_RNIDA_MF_ACC =      1UL << 25,      /* storage access dev */
+       ELS_RNIDA_MF_WDM =      1UL << 24,      /* wavelength division mux */
+       ELS_RNIDA_MF_NAS =      1UL << 23,      /* NAS server */
+       ELS_RNIDA_MF_BR =       1UL << 22,      /* bridge */
+       ELS_RNIDA_MF_VIRT =     1UL << 21,      /* virtualization device */
+};
+
+enum fc_els_rnid_mgmt {
+       ELS_RNIDM_SNMP =        0,
+       ELS_RNIDM_TELNET =      1,
+       ELS_RNIDM_HTTP =        2,
+       ELS_RNIDM_HTTPS =       3,
+       ELS_RNIDM_XML =         4,      /* HTTP + XML */
+};
+
+enum fc_els_rnid_ipver {
+       ELS_RNIDIP_NONE =       0,      /* no IP support or node mgmt. */
+       ELS_RNIDIP_V4 =         1,      /* IPv4 */
+       ELS_RNIDIP_V6 =         2,      /* IPv6 */
+};
+
+/*
+ * ELS RPL - Read Port List.
+ */
+struct fc_els_rpl {
+       __u8            rpl_cmd;        /* command */
+       __u8            rpl_resv[5];    /* reserved - must be zero */
+       __be16          rpl_max_size;   /* maximum response size or zero */
+       __u8            rpl_resv1;      /* reserved - must be zero */
+       __u8            rpl_index[3];   /* starting index */
+};
+
+/*
+ * Port number block in RPL response.
+ */
+struct fc_els_pnb {
+       __be32          pnb_phys_pn;    /* physical port number */
+       __u8            pnb_resv;       /* reserved */
+       __u8            pnb_port_id[3]; /* port ID */
+       __be64          pnb_wwpn;       /* port name */
+};
+
+/*
+ * RPL LS_ACC response.
+ */
+struct fc_els_rpl_resp {
+       __u8            rpl_cmd;        /* ELS_LS_ACC */
+       __u8            rpl_resv1;      /* reserved - must be zero */
+       __be16          rpl_plen;       /* payload length */
+       __u8            rpl_resv2;      /* reserved - must be zero */
+       __u8            rpl_llen[3];    /* list length */
+       __u8            rpl_resv3;      /* reserved - must be zero */
+       __u8            rpl_index[3];   /* starting index */
+       struct fc_els_pnb rpl_pnb[1];   /* variable number of PNBs */
+};
+
+/*
+ * Link Error Status Block.
+ */
+struct fc_els_lesb {
+       __be32          lesb_link_fail; /* link failure count */
+       __be32          lesb_sync_loss; /* loss of synchronization count */
+       __be32          lesb_sig_loss;  /* loss of signal count */
+       __be32          lesb_prim_err;  /* primitive sequence error count */
+       __be32          lesb_inv_word;  /* invalid transmission word count */
+       __be32          lesb_inv_crc;   /* invalid CRC count */
+};
+
+/*
+ * ELS RPS - Read Port Status Block request.
+ */
+struct fc_els_rps {
+       __u8            rps_cmd;        /* command */
+       __u8            rps_resv[2];    /* reserved - must be zero */
+       __u8            rps_flag;       /* flag - see below */
+       __be64          rps_port_spec;  /* port selection */
+};
+
+enum fc_els_rps_flag {
+       FC_ELS_RPS_DID =        0x00,   /* port identified by D_ID of req. */
+       FC_ELS_RPS_PPN =        0x01,   /* port_spec is physical port number */
+       FC_ELS_RPS_WWPN =       0x02,   /* port_spec is port WWN */
+};
+
+/*
+ * ELS RPS LS_ACC response.
+ */
+struct fc_els_rps_resp {
+       __u8            rps_cmd;        /* command - LS_ACC */
+       __u8            rps_resv[2];    /* reserved - must be zero */
+       __u8            rps_flag;       /* flag - see below */
+       __u8            rps_resv2[2];   /* reserved */
+       __be16          rps_status;     /* port status - see below */
+       struct fc_els_lesb rps_lesb;    /* link error status block */
+};
+
+enum fc_els_rps_resp_flag {
+       FC_ELS_RPS_LPEV =       0x01,   /* L_port extension valid */
+};
+
+enum fc_els_rps_resp_status {
+       FC_ELS_RPS_PTP =        1 << 5, /* point-to-point connection */
+       FC_ELS_RPS_LOOP =       1 << 4, /* loop mode */
+       FC_ELS_RPS_FAB =        1 << 3, /* fabric present */
+       FC_ELS_RPS_NO_SIG =     1 << 2, /* loss of signal */
+       FC_ELS_RPS_NO_SYNC =    1 << 1, /* loss of synchronization */
+       FC_ELS_RPS_RESET =      1 << 0, /* in link reset protocol */
+};
+
+/*
+ * ELS LIRR - Link Incident Record Registration request.
+ */
+struct fc_els_lirr {
+       __u8            lirr_cmd;       /* command */
+       __u8            lirr_resv[3];   /* reserved - must be zero */
+       __u8            lirr_func;      /* registration function */
+       __u8            lirr_fmt;       /* FC-4 type of RLIR requested */
+       __u8            lirr_resv2[2];  /* reserved - must be zero */
+};
+
+enum fc_els_lirr_func {
+       ELS_LIRR_SET_COND =     0x01,   /* set - conditionally receive */
+       ELS_LIRR_SET_UNCOND =   0x02,   /* set - unconditionally receive */
+       ELS_LIRR_CLEAR =        0xff    /* clear registration */
+};
+
+/*
+ * ELS SRL - Scan Remote Loop request.
+ */
+struct fc_els_srl {
+       __u8            srl_cmd;        /* command */
+       __u8            srl_resv[3];    /* reserved - must be zero */
+       __u8            srl_flag;       /* flag - see below */
+       __u8            srl_flag_param[3];      /* flag parameter */
+};
+
+enum fc_els_srl_flag {
+       FC_ELS_SRL_ALL =        0x00,   /* scan all FL ports */
+       FC_ELS_SRL_ONE =        0x01,   /* scan specified loop */
+       FC_ELS_SRL_EN_PER =     0x02,   /* enable periodic scanning (param) */
+       FC_ELS_SRL_DIS_PER =    0x03,   /* disable periodic scanning */
+};
+
+/*
+ * ELS RLS - Read Link Error Status Block request.
+ */
+struct fc_els_rls {
+       __u8            rls_cmd;        /* command */
+       __u8            rls_resv[4];    /* reserved - must be zero */
+       __u8            rls_port_id[3]; /* port ID */
+};
+
+/*
+ * ELS RLS LS_ACC Response.
+ */
+struct fc_els_rls_resp {
+       __u8            rls_cmd;        /* ELS_LS_ACC */
+       __u8            rls_resv[3];    /* reserved - must be zero */
+       struct fc_els_lesb rls_lesb;    /* link error status block */
+};
+
+/*
+ * ELS RLIR - Registered Link Incident Report.
+ * This is followed by the CLIR and the CLID, described below.
+ */
+struct fc_els_rlir {
+       __u8            rlir_cmd;       /* command */
+       __u8            rlir_resv[3];   /* reserved - must be zero */
+       __u8            rlir_fmt;       /* format (FC4-type if type specific) */
+       __u8            rlir_clr_len;   /* common link incident record length */
+       __u8            rlir_cld_len;   /* common link incident desc. length */
+       __u8            rlir_slr_len;   /* spec. link incident record length */
+};
+
+/*
+ * CLIR - Common Link Incident Record Data. - Sent via RLIR.
+ */
+struct fc_els_clir {
+       __be64          clir_wwpn;      /* incident port name */
+       __be64          clir_wwnn;      /* incident port node name */
+       __u8            clir_port_type; /* incident port type */
+       __u8            clir_port_id[3];        /* incident port ID */
+
+       __be64          clir_conn_wwpn; /* connected port name */
+       __be64          clir_conn_wwnn; /* connected node name */
+       __be64          clir_fab_name;  /* fabric name */
+       __be32          clir_phys_port; /* physical port number */
+       __be32          clir_trans_id;  /* transaction ID */
+       __u8            clir_resv[3];   /* reserved */
+       __u8            clir_ts_fmt;    /* time stamp format */
+       __be64          clir_timestamp; /* time stamp */
+};
+
+/*
+ * CLIR clir_ts_fmt - time stamp format values.
+ */
+enum fc_els_clir_ts_fmt {
+       ELS_CLIR_TS_UNKNOWN =   0,      /* time stamp field unknown */
+       ELS_CLIR_TS_SEC_FRAC =  1,      /* time in seconds and fractions */
+       ELS_CLIR_TS_CSU =       2,      /* time in clock synch update format */
+};
+
+/*
+ * Common Link Incident Descriptor - sent via RLIR.
+ */
+struct fc_els_clid {
+       __u8            clid_iq;        /* incident qualifier flags */
+       __u8            clid_ic;        /* incident code */
+       __be16          clid_epai;      /* domain/area of ISL */
+};
+
+/*
+ * CLID incident qualifier flags.
+ */
+enum fc_els_clid_iq {
+       ELS_CLID_SWITCH =       0x20,   /* incident port is a switch node */
+       ELS_CLID_E_PORT =       0x10,   /* incident is an ISL (E) port */
+       ELS_CLID_SEV_MASK =     0x0c,   /* severity 2-bit field mask */
+       ELS_CLID_SEV_INFO =     0x00,   /* report is informational */
+       ELS_CLID_SEV_INOP =     0x08,   /* link not operational */
+       ELS_CLID_SEV_DEG =      0x04,   /* link degraded but operational */
+       ELS_CLID_LASER =        0x02,   /* subassembly is a laser */
+       ELS_CLID_FRU =          0x01,   /* format can identify a FRU */
+};
+
+/*
+ * CLID incident code.
+ */
+enum fc_els_clid_ic {
+       ELS_CLID_IC_IMPL =      1,      /* implicit incident */
+       ELS_CLID_IC_BER =       2,      /* bit-error-rate threshold exceeded */
+       ELS_CLID_IC_LOS =       3,      /* loss of synch or signal */
+       ELS_CLID_IC_NOS =       4,      /* non-operational primitive sequence */
+       ELS_CLID_IC_PST =       5,      /* primitive sequence timeout */
+       ELS_CLID_IC_INVAL =     6,      /* invalid primitive sequence */
+       ELS_CLID_IC_LOOP_TO =   7,      /* loop initialization time out */
+       ELS_CLID_IC_LIP =       8,      /* receiving LIP */
+};
+
+#endif /* _FC_ELS_H_ */
diff --git a/include/uapi/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h
new file mode 100644 (file)
index 0000000..50f28b1
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FS_H_
+#define _FC_FS_H_
+
+#include <linux/types.h>
+
+/*
+ * Fibre Channel Framing and Signalling definitions.
+ * From T11 FC-FS-2 Rev 0.90 - 9 August 2005.
+ */
+
+/*
+ * Frame header
+ */
+struct fc_frame_header {
+       __u8          fh_r_ctl; /* routing control */
+       __u8          fh_d_id[3];       /* Destination ID */
+
+       __u8          fh_cs_ctl;        /* class of service control / pri */
+       __u8          fh_s_id[3];       /* Source ID */
+
+       __u8          fh_type;          /* see enum fc_fh_type below */
+       __u8          fh_f_ctl[3];      /* frame control */
+
+       __u8          fh_seq_id;        /* sequence ID */
+       __u8          fh_df_ctl;        /* data field control */
+       __be16        fh_seq_cnt;       /* sequence count */
+
+       __be16        fh_ox_id;         /* originator exchange ID */
+       __be16        fh_rx_id;         /* responder exchange ID */
+       __be32        fh_parm_offset;   /* parameter or relative offset */
+};
+
+#define FC_FRAME_HEADER_LEN 24 /* expected length of structure */
+
+#define FC_MAX_PAYLOAD  2112U          /* max payload length in bytes */
+#define FC_MIN_MAX_PAYLOAD  256U       /* lower limit on max payload */
+
+#define FC_MAX_FRAME   (FC_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+#define FC_MIN_MAX_FRAME (FC_MIN_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+
+/*
+ * fh_r_ctl - Routing control definitions.
+ */
+    /*
+     * FC-4 device_data.
+     */
+enum fc_rctl {
+       FC_RCTL_DD_UNCAT = 0x00,        /* uncategorized information */
+       FC_RCTL_DD_SOL_DATA = 0x01,     /* solicited data */
+       FC_RCTL_DD_UNSOL_CTL = 0x02,    /* unsolicited control */
+       FC_RCTL_DD_SOL_CTL = 0x03,      /* solicited control or reply */
+       FC_RCTL_DD_UNSOL_DATA = 0x04,   /* unsolicited data */
+       FC_RCTL_DD_DATA_DESC = 0x05,    /* data descriptor */
+       FC_RCTL_DD_UNSOL_CMD = 0x06,    /* unsolicited command */
+       FC_RCTL_DD_CMD_STATUS = 0x07,   /* command status */
+
+#define FC_RCTL_ILS_REQ FC_RCTL_DD_UNSOL_CTL   /* ILS request */
+#define FC_RCTL_ILS_REP FC_RCTL_DD_SOL_CTL     /* ILS reply */
+
+       /*
+        * Extended Link_Data
+        */
+       FC_RCTL_ELS_REQ = 0x22, /* extended link services request */
+       FC_RCTL_ELS_REP = 0x23, /* extended link services reply */
+       FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
+       FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
+       /*
+        * Optional Extended Headers
+        */
+       FC_RCTL_VFTH = 0x50,    /* virtual fabric tagging header */
+       FC_RCTL_IFRH = 0x51,    /* inter-fabric routing header */
+       FC_RCTL_ENCH = 0x52,    /* encapsulation header */
+       /*
+        * Basic Link Services fh_r_ctl values.
+        */
+       FC_RCTL_BA_NOP = 0x80,  /* basic link service NOP */
+       FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */
+       FC_RCTL_BA_RMC = 0x82,  /* remove connection */
+       FC_RCTL_BA_ACC = 0x84,  /* basic accept */
+       FC_RCTL_BA_RJT = 0x85,  /* basic reject */
+       FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */
+       /*
+        * Link Control Information.
+        */
+       FC_RCTL_ACK_1 = 0xc0,   /* acknowledge_1 */
+       FC_RCTL_ACK_0 = 0xc1,   /* acknowledge_0 */
+       FC_RCTL_P_RJT = 0xc2,   /* port reject */
+       FC_RCTL_F_RJT = 0xc3,   /* fabric reject */
+       FC_RCTL_P_BSY = 0xc4,   /* port busy */
+       FC_RCTL_F_BSY = 0xc5,   /* fabric busy to data frame */
+       FC_RCTL_F_BSYL = 0xc6,  /* fabric busy to link control frame */
+       FC_RCTL_LCR = 0xc7,     /* link credit reset */
+       FC_RCTL_END = 0xc9,     /* end */
+};
+                                   /* incomplete list of definitions */
+
+/*
+ * R_CTL names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_RCTL_NAMES_INIT { \
+       [FC_RCTL_DD_UNCAT] =            "uncat",                        \
+       [FC_RCTL_DD_SOL_DATA] =         "sol data",                     \
+       [FC_RCTL_DD_UNSOL_CTL] =        "unsol ctl",                    \
+       [FC_RCTL_DD_SOL_CTL] =          "sol ctl/reply",                \
+       [FC_RCTL_DD_UNSOL_DATA] =       "unsol data",                   \
+       [FC_RCTL_DD_DATA_DESC] =        "data desc",                    \
+       [FC_RCTL_DD_UNSOL_CMD] =        "unsol cmd",                    \
+       [FC_RCTL_DD_CMD_STATUS] =       "cmd status",                   \
+       [FC_RCTL_ELS_REQ] =             "ELS req",                      \
+       [FC_RCTL_ELS_REP] =             "ELS rep",                      \
+       [FC_RCTL_ELS4_REQ] =            "FC-4 ELS req",                 \
+       [FC_RCTL_ELS4_REP] =            "FC-4 ELS rep",                 \
+       [FC_RCTL_BA_NOP] =              "BLS NOP",                      \
+       [FC_RCTL_BA_ABTS] =             "BLS abort",                    \
+       [FC_RCTL_BA_RMC] =              "BLS remove connection",        \
+       [FC_RCTL_BA_ACC] =              "BLS accept",                   \
+       [FC_RCTL_BA_RJT] =              "BLS reject",                   \
+       [FC_RCTL_BA_PRMT] =             "BLS dedicated connection preempted", \
+       [FC_RCTL_ACK_1] =               "LC ACK_1",                     \
+       [FC_RCTL_ACK_0] =               "LC ACK_0",                     \
+       [FC_RCTL_P_RJT] =               "LC port reject",               \
+       [FC_RCTL_F_RJT] =               "LC fabric reject",             \
+       [FC_RCTL_P_BSY] =               "LC port busy",                 \
+       [FC_RCTL_F_BSY] =               "LC fabric busy to data frame", \
+       [FC_RCTL_F_BSYL] =              "LC fabric busy to link control frame",\
+       [FC_RCTL_LCR] =                 "LC link credit reset",         \
+       [FC_RCTL_END] =                 "LC end",                       \
+}
+
+/*
+ * Well-known fabric addresses.
+ */
+enum fc_well_known_fid {
+       FC_FID_NONE =           0x000000,       /* No destination */
+       FC_FID_BCAST =          0xffffff,       /* broadcast */
+       FC_FID_FLOGI =          0xfffffe,       /* fabric login */
+       FC_FID_FCTRL =          0xfffffd,       /* fabric controller */
+       FC_FID_DIR_SERV =       0xfffffc,       /* directory server */
+       FC_FID_TIME_SERV =      0xfffffb,       /* time server */
+       FC_FID_MGMT_SERV =      0xfffffa,       /* management server */
+       FC_FID_QOS =            0xfffff9,       /* QoS Facilitator */
+       FC_FID_ALIASES =        0xfffff8,       /* alias server (FC-PH2) */
+       FC_FID_SEC_KEY =        0xfffff7,       /* Security key dist. server */
+       FC_FID_CLOCK =          0xfffff6,       /* clock synch server */
+       FC_FID_MCAST_SERV =     0xfffff5,       /* multicast server */
+};
+
+#define        FC_FID_WELL_KNOWN_MAX   0xffffff /* highest well-known fabric ID */
+#define        FC_FID_WELL_KNOWN_BASE  0xfffff5 /* start of well-known fabric ID */
+
+/*
+ * Other well-known addresses, outside the above contiguous range.
+ */
+#define        FC_FID_DOM_MGR          0xfffc00        /* domain manager base */
+
+/*
+ * Fabric ID bytes.
+ */
+#define        FC_FID_DOMAIN           0
+#define        FC_FID_PORT             1
+#define        FC_FID_LINK             2
+
+/*
+ * fh_type codes
+ */
+enum fc_fh_type {
+       FC_TYPE_BLS =   0x00,   /* basic link service */
+       FC_TYPE_ELS =   0x01,   /* extended link service */
+       FC_TYPE_IP =    0x05,   /* IP over FC, RFC 4338 */
+       FC_TYPE_FCP =   0x08,   /* SCSI FCP */
+       FC_TYPE_CT =    0x20,   /* Fibre Channel Services (FC-CT) */
+       FC_TYPE_ILS =   0x22,   /* internal link service */
+};
+
+/*
+ * FC_TYPE names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_TYPE_NAMES_INIT {                           \
+       [FC_TYPE_BLS] =         "BLS",                  \
+       [FC_TYPE_ELS] =         "ELS",                  \
+       [FC_TYPE_IP] =          "IP",                   \
+       [FC_TYPE_FCP] =         "FCP",                  \
+       [FC_TYPE_CT] =          "CT",                   \
+       [FC_TYPE_ILS] =         "ILS",                  \
+}
+
+/*
+ * Exchange IDs.
+ */
+#define FC_XID_UNKNOWN  0xffff /* unknown exchange ID */
+#define FC_XID_MIN     0x0     /* supported min exchange ID */
+#define FC_XID_MAX     0xfffe  /* supported max exchange ID */
+
+/*
+ * fh_f_ctl - Frame control flags.
+ */
+#define        FC_FC_EX_CTX    (1 << 23)       /* sent by responder to exchange */
+#define        FC_FC_SEQ_CTX   (1 << 22)       /* sent by responder to sequence */
+#define        FC_FC_FIRST_SEQ (1 << 21)       /* first sequence of this exchange */
+#define        FC_FC_LAST_SEQ  (1 << 20)       /* last sequence of this exchange */
+#define        FC_FC_END_SEQ   (1 << 19)       /* last frame of sequence */
+#define        FC_FC_END_CONN  (1 << 18)       /* end of class 1 connection pending */
+#define        FC_FC_RES_B17   (1 << 17)       /* reserved */
+#define        FC_FC_SEQ_INIT  (1 << 16)       /* transfer of sequence initiative */
+#define        FC_FC_X_ID_REASS (1 << 15)      /* exchange ID has been changed */
+#define        FC_FC_X_ID_INVAL (1 << 14)      /* exchange ID invalidated */
+
+#define        FC_FC_ACK_1     (1 << 12)       /* 13:12 = 1: ACK_1 expected */
+#define        FC_FC_ACK_N     (2 << 12)       /* 13:12 = 2: ACK_N expected */
+#define        FC_FC_ACK_0     (3 << 12)       /* 13:12 = 3: ACK_0 expected */
+
+#define        FC_FC_RES_B11   (1 << 11)       /* reserved */
+#define        FC_FC_RES_B10   (1 << 10)       /* reserved */
+#define        FC_FC_RETX_SEQ  (1 << 9)        /* retransmitted sequence */
+#define        FC_FC_UNI_TX    (1 << 8)        /* unidirectional transmit (class 1) */
+#define        FC_FC_CONT_SEQ(i) ((i) << 6)
+#define        FC_FC_ABT_SEQ(i) ((i) << 4)
+#define        FC_FC_REL_OFF   (1 << 3)        /* parameter is relative offset */
+#define        FC_FC_RES2      (1 << 2)        /* reserved */
+#define        FC_FC_FILL(i)   ((i) & 3)       /* 1:0: bytes of trailing fill */
+
+/*
+ * BA_ACC payload.
+ */
+struct fc_ba_acc {
+       __u8            ba_seq_id_val;  /* SEQ_ID validity */
+#define FC_BA_SEQ_ID_VAL 0x80
+       __u8            ba_seq_id;      /* SEQ_ID of seq last deliverable */
+       __u8            ba_resvd[2];    /* reserved */
+       __be16          ba_ox_id;       /* OX_ID for aborted seq or exch */
+       __be16          ba_rx_id;       /* RX_ID for aborted seq or exch */
+       __be16          ba_low_seq_cnt; /* low SEQ_CNT of aborted seq */
+       __be16          ba_high_seq_cnt; /* high SEQ_CNT of aborted seq */
+};
+
+/*
+ * BA_RJT: Basic Reject payload.
+ */
+struct fc_ba_rjt {
+       __u8            br_resvd;       /* reserved */
+       __u8            br_reason;      /* reason code */
+       __u8            br_explan;      /* reason explanation */
+       __u8            br_vendor;      /* vendor unique code */
+};
+
+/*
+ * BA_RJT reason codes.
+ * From FS-2.
+ */
+enum fc_ba_rjt_reason {
+       FC_BA_RJT_NONE =        0,      /* in software this means no reject */
+       FC_BA_RJT_INVL_CMD =    0x01,   /* invalid command code */
+       FC_BA_RJT_LOG_ERR =     0x03,   /* logical error */
+       FC_BA_RJT_LOG_BUSY =    0x05,   /* logical busy */
+       FC_BA_RJT_PROTO_ERR =   0x07,   /* protocol error */
+       FC_BA_RJT_UNABLE =      0x09,   /* unable to perform request */
+       FC_BA_RJT_VENDOR =      0xff,   /* vendor-specific (see br_vendor) */
+};
+
+/*
+ * BA_RJT reason code explanations.
+ */
+enum fc_ba_rjt_explan {
+       FC_BA_RJT_EXP_NONE =    0x00,   /* no additional expanation */
+       FC_BA_RJT_INV_XID =     0x03,   /* invalid OX_ID-RX_ID combination */
+       FC_BA_RJT_ABT =         0x05,   /* sequence aborted, no seq info */
+};
+
+/*
+ * P_RJT or F_RJT: Port Reject or Fabric Reject parameter field.
+ */
+struct fc_pf_rjt {
+       __u8            rj_action;      /* reserved */
+       __u8            rj_reason;      /* reason code */
+       __u8            rj_resvd;       /* reserved */
+       __u8            rj_vendor;      /* vendor unique code */
+};
+
+/*
+ * P_RJT and F_RJT reject reason codes.
+ */
+enum fc_pf_rjt_reason {
+       FC_RJT_NONE =           0,      /* non-reject (reserved by standard) */
+       FC_RJT_INVL_DID =       0x01,   /* invalid destination ID */
+       FC_RJT_INVL_SID =       0x02,   /* invalid source ID */
+       FC_RJT_P_UNAV_T =       0x03,   /* port unavailable, temporary */
+       FC_RJT_P_UNAV =         0x04,   /* port unavailable, permanent */
+       FC_RJT_CLS_UNSUP =      0x05,   /* class not supported */
+       FC_RJT_DEL_USAGE =      0x06,   /* delimiter usage error */
+       FC_RJT_TYPE_UNSUP =     0x07,   /* type not supported */
+       FC_RJT_LINK_CTL =       0x08,   /* invalid link control */
+       FC_RJT_R_CTL =          0x09,   /* invalid R_CTL field */
+       FC_RJT_F_CTL =          0x0a,   /* invalid F_CTL field */
+       FC_RJT_OX_ID =          0x0b,   /* invalid originator exchange ID */
+       FC_RJT_RX_ID =          0x0c,   /* invalid responder exchange ID */
+       FC_RJT_SEQ_ID =         0x0d,   /* invalid sequence ID */
+       FC_RJT_DF_CTL =         0x0e,   /* invalid DF_CTL field */
+       FC_RJT_SEQ_CNT =        0x0f,   /* invalid SEQ_CNT field */
+       FC_RJT_PARAM =          0x10,   /* invalid parameter field */
+       FC_RJT_EXCH_ERR =       0x11,   /* exchange error */
+       FC_RJT_PROTO =          0x12,   /* protocol error */
+       FC_RJT_LEN =            0x13,   /* incorrect length */
+       FC_RJT_UNEXP_ACK =      0x14,   /* unexpected ACK */
+       FC_RJT_FAB_CLASS =      0x15,   /* class unsupported by fabric entity */
+       FC_RJT_LOGI_REQ =       0x16,   /* login required */
+       FC_RJT_SEQ_XS =         0x17,   /* excessive sequences attempted */
+       FC_RJT_EXCH_EST =       0x18,   /* unable to establish exchange */
+       FC_RJT_FAB_UNAV =       0x1a,   /* fabric unavailable */
+       FC_RJT_VC_ID =          0x1b,   /* invalid VC_ID (class 4) */
+       FC_RJT_CS_CTL =         0x1c,   /* invalid CS_CTL field */
+       FC_RJT_INSUF_RES =      0x1d,   /* insuff. resources for VC (Class 4) */
+       FC_RJT_INVL_CLS =       0x1f,   /* invalid class of service */
+       FC_RJT_PREEMT_RJT =     0x20,   /* preemption request rejected */
+       FC_RJT_PREEMT_DIS =     0x21,   /* preemption not enabled */
+       FC_RJT_MCAST_ERR =      0x22,   /* multicast error */
+       FC_RJT_MCAST_ET =       0x23,   /* multicast error terminate */
+       FC_RJT_PRLI_REQ =       0x24,   /* process login required */
+       FC_RJT_INVL_ATT =       0x25,   /* invalid attachment */
+       FC_RJT_VENDOR =         0xff,   /* vendor specific reject */
+};
+
+/* default timeout values */
+
+#define FC_DEF_E_D_TOV 2000UL
+#define FC_DEF_R_A_TOV 10000UL
+
+#endif /* _FC_FS_H_ */
diff --git a/include/uapi/scsi/fc/fc_gs.h b/include/uapi/scsi/fc/fc_gs.h
new file mode 100644 (file)
index 0000000..a37346d
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_GS_H_
+#define        _FC_GS_H_
+
+#include <linux/types.h>
+
+/*
+ * Fibre Channel Services - Common Transport.
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+struct fc_ct_hdr {
+       __u8            ct_rev;         /* revision */
+       __u8            ct_in_id[3];    /* N_Port ID of original requestor */
+       __u8            ct_fs_type;     /* type of fibre channel service */
+       __u8            ct_fs_subtype;  /* subtype */
+       __u8            ct_options;
+       __u8            _ct_resvd1;
+       __be16          ct_cmd;         /* command / response code */
+       __be16          ct_mr_size;     /* maximum / residual size */
+       __u8            _ct_resvd2;
+       __u8            ct_reason;      /* reject reason */
+       __u8            ct_explan;      /* reason code explanation */
+       __u8            ct_vendor;      /* vendor unique data */
+};
+
+#define        FC_CT_HDR_LEN   16      /* expected sizeof (struct fc_ct_hdr) */
+
+enum fc_ct_rev {
+       FC_CT_REV = 1           /* common transport revision */
+};
+
+/*
+ * ct_fs_type values.
+ */
+enum fc_ct_fs_type {
+       FC_FST_ALIAS =  0xf8,   /* alias service */
+       FC_FST_MGMT =   0xfa,   /* management service */
+       FC_FST_TIME =   0xfb,   /* time service */
+       FC_FST_DIR =    0xfc,   /* directory service */
+};
+
+/*
+ * ct_cmd: Command / response codes
+ */
+enum fc_ct_cmd {
+       FC_FS_RJT =     0x8001, /* reject */
+       FC_FS_ACC =     0x8002, /* accept */
+};
+
+/*
+ * FS_RJT reason codes.
+ */
+enum fc_ct_reason {
+       FC_FS_RJT_CMD =         0x01,   /* invalid command code */
+       FC_FS_RJT_VER =         0x02,   /* invalid version level */
+       FC_FS_RJT_LOG =         0x03,   /* logical error */
+       FC_FS_RJT_IUSIZ =       0x04,   /* invalid IU size */
+       FC_FS_RJT_BSY =         0x05,   /* logical busy */
+       FC_FS_RJT_PROTO =       0x07,   /* protocol error */
+       FC_FS_RJT_UNABL =       0x09,   /* unable to perform command request */
+       FC_FS_RJT_UNSUP =       0x0b,   /* command not supported */
+};
+
+/*
+ * FS_RJT reason code explanations.
+ */
+enum fc_ct_explan {
+       FC_FS_EXP_NONE =        0x00,   /* no additional explanation */
+       FC_FS_EXP_PID =         0x01,   /* port ID not registered */
+       FC_FS_EXP_PNAM =        0x02,   /* port name not registered */
+       FC_FS_EXP_NNAM =        0x03,   /* node name not registered */
+       FC_FS_EXP_COS =         0x04,   /* class of service not registered */
+       FC_FS_EXP_FTNR =        0x07,   /* FC-4 types not registered */
+       /* definitions not complete */
+};
+
+#endif /* _FC_GS_H_ */
diff --git a/include/uapi/scsi/fc/fc_ns.h b/include/uapi/scsi/fc/fc_ns.h
new file mode 100644 (file)
index 0000000..f7751d5
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_NS_H_
+#define        _FC_NS_H_
+
+#include <linux/types.h>
+
+/*
+ * Fibre Channel Services - Name Service (dNS)
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+/*
+ * Common-transport sub-type for Name Server.
+ */
+#define        FC_NS_SUBTYPE       2   /* fs_ct_hdr.ct_fs_subtype */
+
+/*
+ * Name server Requests.
+ * Note:  this is an incomplete list, some unused requests are omitted.
+ */
+enum fc_ns_req {
+       FC_NS_GA_NXT =  0x0100,         /* get all next */
+       FC_NS_GI_A =    0x0101,         /* get identifiers - scope */
+       FC_NS_GPN_ID =  0x0112,         /* get port name by ID */
+       FC_NS_GNN_ID =  0x0113,         /* get node name by ID */
+       FC_NS_GSPN_ID = 0x0118,         /* get symbolic port name */
+       FC_NS_GID_PN =  0x0121,         /* get ID for port name */
+       FC_NS_GID_NN =  0x0131,         /* get IDs for node name */
+       FC_NS_GID_FT =  0x0171,         /* get IDs by FC4 type */
+       FC_NS_GPN_FT =  0x0172,         /* get port names by FC4 type */
+       FC_NS_GID_PT =  0x01a1,         /* get IDs by port type */
+       FC_NS_RPN_ID =  0x0212,         /* reg port name for ID */
+       FC_NS_RNN_ID =  0x0213,         /* reg node name for ID */
+       FC_NS_RFT_ID =  0x0217,         /* reg FC4 type for ID */
+       FC_NS_RSPN_ID = 0x0218,         /* reg symbolic port name */
+       FC_NS_RFF_ID =  0x021f,         /* reg FC4 Features for ID */
+       FC_NS_RSNN_NN = 0x0239,         /* reg symbolic node name */
+};
+
+/*
+ * Port type values.
+ */
+enum fc_ns_pt {
+       FC_NS_UNID_PORT = 0x00, /* unidentified */
+       FC_NS_N_PORT =  0x01,   /* N port */
+       FC_NS_NL_PORT = 0x02,   /* NL port */
+       FC_NS_FNL_PORT = 0x03,  /* F/NL port */
+       FC_NS_NX_PORT = 0x7f,   /* Nx port */
+       FC_NS_F_PORT =  0x81,   /* F port */
+       FC_NS_FL_PORT = 0x82,   /* FL port */
+       FC_NS_E_PORT =  0x84,   /* E port */
+       FC_NS_B_PORT =  0x85,   /* B port */
+};
+
+/*
+ * Port type object.
+ */
+struct fc_ns_pt_obj {
+       __u8            pt_type;
+};
+
+/*
+ * Port ID object
+ */
+struct fc_ns_fid {
+       __u8            fp_flags;       /* flags for responses only */
+       __u8            fp_fid[3];
+};
+
+/*
+ * fp_flags in port ID object, for responses only.
+ */
+#define        FC_NS_FID_LAST  0x80            /* last object */
+
+/*
+ * FC4-types object.
+ */
+#define        FC_NS_TYPES     256     /* number of possible FC-4 types */
+#define        FC_NS_BPW       32      /* bits per word in bitmap */
+
+struct fc_ns_fts {
+       __be32  ff_type_map[FC_NS_TYPES / FC_NS_BPW]; /* bitmap of FC-4 types */
+};
+
+/*
+ * FC4-features object.
+ */
+struct fc_ns_ff        {
+       __be32  fd_feat[FC_NS_TYPES * 4 / FC_NS_BPW]; /* 4-bits per FC-type */
+};
+
+/*
+ * GID_PT request.
+ */
+struct fc_ns_gid_pt {
+       __u8            fn_pt_type;
+       __u8            fn_domain_id_scope;
+       __u8            fn_area_id_scope;
+       __u8            fn_resvd;
+};
+
+/*
+ * GID_FT or GPN_FT request.
+ */
+struct fc_ns_gid_ft {
+       __u8            fn_resvd;
+       __u8            fn_domain_id_scope;
+       __u8            fn_area_id_scope;
+       __u8            fn_fc4_type;
+};
+
+/*
+ * GPN_FT response.
+ */
+struct fc_gpn_ft_resp {
+       __u8            fp_flags;       /* see fp_flags definitions above */
+       __u8            fp_fid[3];      /* port ID */
+       __be32          fp_resvd;
+       __be64          fp_wwpn;        /* port name */
+};
+
+/*
+ * GID_PN request
+ */
+struct fc_ns_gid_pn {
+       __be64     fn_wwpn;    /* port name */
+};
+
+/*
+ * GID_PN response or GSPN_ID request
+ */
+struct fc_gid_pn_resp {
+       __u8      fp_resvd;
+       __u8      fp_fid[3];     /* port ID */
+};
+
+/*
+ * GSPN_ID response
+ */
+struct fc_gspn_resp {
+       __u8    fp_name_len;
+       char    fp_name[];
+};
+
+/*
+ * RFT_ID request - register FC-4 types for ID.
+ */
+struct fc_ns_rft_id {
+       struct fc_ns_fid fr_fid;        /* port ID object */
+       struct fc_ns_fts fr_fts;        /* FC-4 types object */
+};
+
+/*
+ * RPN_ID request - register port name for ID.
+ * RNN_ID request - register node name for ID.
+ */
+struct fc_ns_rn_id {
+       struct fc_ns_fid fr_fid;        /* port ID object */
+       __be64          fr_wwn;         /* node name or port name */
+} __attribute__((__packed__));
+
+/*
+ * RSNN_NN request - register symbolic node name
+ */
+struct fc_ns_rsnn {
+       __be64          fr_wwn;         /* node name */
+       __u8            fr_name_len;
+       char            fr_name[];
+} __attribute__((__packed__));
+
+/*
+ * RSPN_ID request - register symbolic port name
+ */
+struct fc_ns_rspn {
+       struct fc_ns_fid fr_fid;        /* port ID object */
+       __u8            fr_name_len;
+       char            fr_name[];
+} __attribute__((__packed__));
+
+/*
+ * RFF_ID request - register FC-4 Features for ID.
+ */
+struct fc_ns_rff_id {
+       struct fc_ns_fid fr_fid;        /* port ID object */
+       __u8            fr_resvd[2];
+       __u8            fr_feat;        /* FC-4 Feature bits */
+       __u8            fr_type;        /* FC-4 type */
+} __attribute__((__packed__));
+
+#endif /* _FC_NS_H_ */
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
new file mode 100644 (file)
index 0000000..3031b90
--- /dev/null
@@ -0,0 +1,320 @@
+/*
+ *  FC Transport BSG Interface
+ *
+ *  Copyright (C) 2008   James Smart, Emulex Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef SCSI_BSG_FC_H
+#define SCSI_BSG_FC_H
+
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+/*
+ * FC Transport SGIO v4 BSG Message Support
+ */
+
+/* Default BSG request timeout (in seconds) */
+#define FC_DEFAULT_BSG_TIMEOUT         (10 * HZ)
+
+
+/*
+ * Request Message Codes supported by the FC Transport
+ */
+
+/* define the class masks for the message codes */
+#define FC_BSG_CLS_MASK                0xF0000000      /* find object class */
+#define FC_BSG_HST_MASK                0x80000000      /* fc host class */
+#define FC_BSG_RPT_MASK                0x40000000      /* fc rport class */
+
+       /* fc_host Message Codes */
+#define FC_BSG_HST_ADD_RPORT           (FC_BSG_HST_MASK | 0x00000001)
+#define FC_BSG_HST_DEL_RPORT           (FC_BSG_HST_MASK | 0x00000002)
+#define FC_BSG_HST_ELS_NOLOGIN         (FC_BSG_HST_MASK | 0x00000003)
+#define FC_BSG_HST_CT                  (FC_BSG_HST_MASK | 0x00000004)
+#define FC_BSG_HST_VENDOR              (FC_BSG_HST_MASK | 0x000000FF)
+
+       /* fc_rport Message Codes */
+#define FC_BSG_RPT_ELS                 (FC_BSG_RPT_MASK | 0x00000001)
+#define FC_BSG_RPT_CT                  (FC_BSG_RPT_MASK | 0x00000002)
+
+
+
+/*
+ * FC Address Identifiers in Message Structures :
+ *
+ *   Whenever a command payload contains a FC Address Identifier
+ *   (aka port_id), the value is effectively in big-endian
+ *   order, thus the array elements are decoded as follows:
+ *     element [0] is bits 23:16 of the FC Address Identifier
+ *     element [1] is bits 15:8 of the FC Address Identifier
+ *     element [2] is bits 7:0 of the FC Address Identifier
+ */
+
+
+/*
+ * FC Host Messages
+ */
+
+/* FC_BSG_HST_ADDR_PORT : */
+
+/* Request:
+ * This message requests the FC host to login to the remote port
+ * at the specified N_Port_Id.  The remote port is to be enumerated
+ * with the transport upon completion of the login.
+ */
+struct fc_bsg_host_add_rport {
+       uint8_t         reserved;
+
+       /* FC Address Identier of the remote port to login to */
+       uint8_t         port_id[3];
+};
+
+/* Response:
+ * There is no additional response data - fc_bsg_reply->result is sufficient
+ */
+
+
+/* FC_BSG_HST_DEL_RPORT : */
+
+/* Request:
+ * This message requests the FC host to remove an enumerated
+ * remote port and to terminate the login to it.
+ *
+ * Note: The driver is free to reject this request if it desires to
+ * remain logged in with the remote port.
+ */
+struct fc_bsg_host_del_rport {
+       uint8_t         reserved;
+
+       /* FC Address Identier of the remote port to logout of */
+       uint8_t         port_id[3];
+};
+
+/* Response:
+ * There is no additional response data - fc_bsg_reply->result is sufficient
+ */
+
+
+/* FC_BSG_HST_ELS_NOLOGIN : */
+
+/* Request:
+ * This message requests the FC_Host to send an ELS to a specific
+ * N_Port_ID. The host does not need to log into the remote port,
+ * nor does it need to enumerate the rport for further traffic
+ * (although, the FC host is free to do so if it desires).
+ */
+struct fc_bsg_host_els {
+       /*
+        * ELS Command Code being sent (must be the same as byte 0
+        * of the payload)
+        */
+       uint8_t         command_code;
+
+       /* FC Address Identier of the remote port to send the ELS to */
+       uint8_t         port_id[3];
+};
+
+/* Response:
+ */
+/* fc_bsg_ctels_reply->status values */
+#define FC_CTELS_STATUS_OK     0x00000000
+#define FC_CTELS_STATUS_REJECT 0x00000001
+#define FC_CTELS_STATUS_P_RJT  0x00000002
+#define FC_CTELS_STATUS_F_RJT  0x00000003
+#define FC_CTELS_STATUS_P_BSY  0x00000004
+#define FC_CTELS_STATUS_F_BSY  0x00000006
+struct fc_bsg_ctels_reply {
+       /*
+        * Note: An ELS LS_RJT may be reported in 2 ways:
+        *  a) A status of FC_CTELS_STATUS_OK is returned. The caller
+        *     is to look into the ELS receive payload to determine
+        *     LS_ACC or LS_RJT (by contents of word 0). The reject
+        *     data will be in word 1.
+        *  b) A status of FC_CTELS_STATUS_REJECT is returned, The
+        *     rjt_data field will contain valid data.
+        *
+        * Note: ELS LS_ACC is determined by an FC_CTELS_STATUS_OK, and
+        *   the receive payload word 0 indicates LS_ACC
+        *   (e.g. value is 0x02xxxxxx).
+        *
+        * Note: Similarly, a CT Reject may be reported in 2 ways:
+        *  a) A status of FC_CTELS_STATUS_OK is returned. The caller
+        *     is to look into the CT receive payload to determine
+        *     Accept or Reject (by contents of word 2). The reject
+        *     data will be in word 3.
+        *  b) A status of FC_CTELS_STATUS_REJECT is returned, The
+        *     rjt_data field will contain valid data.
+        *
+        * Note: x_RJT/BSY status will indicae that the rjt_data field
+        *   is valid and contains the reason/explanation values.
+        */
+       uint32_t        status;         /* See FC_CTELS_STATUS_xxx */
+
+       /* valid if status is not FC_CTELS_STATUS_OK */
+       struct  {
+               uint8_t action;         /* fragment_id for CT REJECT */
+               uint8_t reason_code;
+               uint8_t reason_explanation;
+               uint8_t vendor_unique;
+       } rjt_data;
+};
+
+
+/* FC_BSG_HST_CT : */
+
+/* Request:
+ * This message requests that a CT Request be performed with the
+ * indicated N_Port_ID. The driver is responsible for logging in with
+ * the fabric and/or N_Port_ID, etc as per FC rules. This request does
+ * not mandate that the driver must enumerate the destination in the
+ * transport. The driver is allowed to decide whether to enumerate it,
+ * and whether to tear it down after the request.
+ */
+struct fc_bsg_host_ct {
+       uint8_t         reserved;
+
+       /* FC Address Identier of the remote port to send the ELS to */
+       uint8_t         port_id[3];
+
+       /*
+        * We need words 0-2 of the generic preamble for the LLD's
+        */
+       uint32_t        preamble_word0; /* revision & IN_ID */
+       uint32_t        preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+       uint32_t        preamble_word2; /* Cmd Code, Max Size */
+
+};
+/* Response:
+ *
+ * The reply structure is an fc_bsg_ctels_reply structure
+ */
+
+
+/* FC_BSG_HST_VENDOR : */
+
+/* Request:
+ * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
+ *   formatting requirements specified in scsi_netlink.h
+ */
+struct fc_bsg_host_vendor {
+       /*
+        * Identifies the vendor that the message is formatted for. This
+        * should be the recipient of the message.
+        */
+       uint64_t vendor_id;
+
+       /* start of vendor command area */
+       uint32_t vendor_cmd[0];
+};
+
+/* Response:
+ */
+struct fc_bsg_host_vendor_reply {
+       /* start of vendor response area */
+       uint32_t vendor_rsp[0];
+};
+
+
+
+/*
+ * FC Remote Port Messages
+ */
+
+/* FC_BSG_RPT_ELS : */
+
+/* Request:
+ * This message requests that an ELS be performed with the rport.
+ */
+struct fc_bsg_rport_els {
+       /*
+        * ELS Command Code being sent (must be the same as
+        * byte 0 of the payload)
+        */
+       uint8_t els_code;
+};
+
+/* Response:
+ *
+ * The reply structure is an fc_bsg_ctels_reply structure
+ */
+
+
+/* FC_BSG_RPT_CT : */
+
+/* Request:
+ * This message requests that a CT Request be performed with the rport.
+ */
+struct fc_bsg_rport_ct {
+       /*
+        * We need words 0-2 of the generic preamble for the LLD's
+        */
+       uint32_t        preamble_word0; /* revision & IN_ID */
+       uint32_t        preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+       uint32_t        preamble_word2; /* Cmd Code, Max Size */
+};
+/* Response:
+ *
+ * The reply structure is an fc_bsg_ctels_reply structure
+ */
+
+
+
+
+/* request (CDB) structure of the sg_io_v4 */
+struct fc_bsg_request {
+       uint32_t msgcode;
+       union {
+               struct fc_bsg_host_add_rport    h_addrport;
+               struct fc_bsg_host_del_rport    h_delrport;
+               struct fc_bsg_host_els          h_els;
+               struct fc_bsg_host_ct           h_ct;
+               struct fc_bsg_host_vendor       h_vendor;
+
+               struct fc_bsg_rport_els         r_els;
+               struct fc_bsg_rport_ct          r_ct;
+       } rqst_data;
+} __attribute__((packed));
+
+
+/* response (request sense data) structure of the sg_io_v4 */
+struct fc_bsg_reply {
+       /*
+        * The completion result. Result exists in two forms:
+        *  if negative, it is an -Exxx system errno value. There will
+        *    be no further reply information supplied.
+        *  else, it's the 4-byte scsi error result, with driver, host,
+        *    msg and status fields. The per-msgcode reply structure
+        *    will contain valid data.
+        */
+       uint32_t result;
+
+       /* If there was reply_payload, how much was recevied ? */
+       uint32_t reply_payload_rcv_len;
+
+       union {
+               struct fc_bsg_host_vendor_reply         vendor_reply;
+
+               struct fc_bsg_ctels_reply               ctels_reply;
+       } reply_data;
+};
+
+
+#endif /* SCSI_BSG_FC_H */
+
diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
new file mode 100644 (file)
index 0000000..62b4eda
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ *  SCSI Transport Netlink Interface
+ *    Used for the posting of outbound SCSI transport events
+ *
+ *  Copyright (C) 2006   James Smart, Emulex Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef SCSI_NETLINK_H
+#define SCSI_NETLINK_H
+
+#include <linux/netlink.h>
+#include <linux/types.h>
+
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+/* Single Netlink Message type to send all SCSI Transport messages */
+#define SCSI_TRANSPORT_MSG             NLMSG_MIN_TYPE + 1
+
+/* SCSI Transport Broadcast Groups */
+       /* leaving groups 0 and 1 unassigned */
+#define SCSI_NL_GRP_FC_EVENTS          (1<<2)          /* Group 2 */
+#define SCSI_NL_GRP_CNT                        3
+
+
+/* SCSI_TRANSPORT_MSG event message header */
+struct scsi_nl_hdr {
+       uint8_t version;
+       uint8_t transport;
+       uint16_t magic;
+       uint16_t msgtype;
+       uint16_t msglen;
+} __attribute__((aligned(sizeof(uint64_t))));
+
+/* scsi_nl_hdr->version value */
+#define SCSI_NL_VERSION                                1
+
+/* scsi_nl_hdr->magic value */
+#define SCSI_NL_MAGIC                          0xA1B2
+
+/* scsi_nl_hdr->transport value */
+#define SCSI_NL_TRANSPORT                      0
+#define SCSI_NL_TRANSPORT_FC                   1
+#define SCSI_NL_MAX_TRANSPORTS                 2
+
+/* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */
+
+/*
+ * GENERIC SCSI scsi_nl_hdr->msgtype Values
+ */
+       /* kernel -> user */
+#define SCSI_NL_SHOST_VENDOR                   0x0001
+       /* user -> kernel */
+/* SCSI_NL_SHOST_VENDOR msgtype is kernel->user and user->kernel */
+
+
+/*
+ * Message Structures :
+ */
+
+/* macro to round up message lengths to 8byte boundary */
+#define SCSI_NL_MSGALIGN(len)          (((len) + 7) & ~7)
+
+
+/*
+ * SCSI HOST Vendor Unique messages :
+ *   SCSI_NL_SHOST_VENDOR
+ *
+ * Note: The Vendor Unique message payload will begin directly after
+ *      this structure, with the length of the payload per vmsg_datalen.
+ *
+ * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
+ *   formatting requirements specified below
+ */
+struct scsi_nl_host_vendor_msg {
+       struct scsi_nl_hdr snlh;                /* must be 1st element ! */
+       uint64_t vendor_id;
+       uint16_t host_no;
+       uint16_t vmsg_datalen;
+} __attribute__((aligned(sizeof(uint64_t))));
+
+
+/*
+ * Vendor ID:
+ *   If transports post vendor-unique events, they must pass a well-known
+ *   32-bit vendor identifier. This identifier consists of 8 bits indicating
+ *   the "type" of identifier contained, and 24 bits of id data.
+ *
+ *   Identifiers for each type:
+ *    PCI :  ID data is the 16 bit PCI Registered Vendor ID
+ */
+#define SCSI_NL_VID_TYPE_SHIFT         56
+#define SCSI_NL_VID_TYPE_MASK          ((__u64)0xFF << SCSI_NL_VID_TYPE_SHIFT)
+#define SCSI_NL_VID_TYPE_PCI           ((__u64)0x01 << SCSI_NL_VID_TYPE_SHIFT)
+#define SCSI_NL_VID_ID_MASK            (~ SCSI_NL_VID_TYPE_MASK)
+
+
+#define INIT_SCSI_NL_HDR(hdr, t, mtype, mlen)                  \
+       {                                                       \
+       (hdr)->version = SCSI_NL_VERSION;                       \
+       (hdr)->transport = t;                                   \
+       (hdr)->magic = SCSI_NL_MAGIC;                           \
+       (hdr)->msgtype = mtype;                                 \
+       (hdr)->msglen = mlen;                                   \
+       }
+
+#endif /* SCSI_NETLINK_H */
+
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
new file mode 100644 (file)
index 0000000..cbf76e4
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ *  FC Transport Netlink Interface
+ *
+ *  Copyright (C) 2006   James Smart, Emulex Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#ifndef SCSI_NETLINK_FC_H
+#define SCSI_NETLINK_FC_H
+
+#include <scsi/scsi_netlink.h>
+
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+/*
+ * FC Transport Message Types
+ */
+       /* kernel -> user */
+#define FC_NL_ASYNC_EVENT                      0x0100
+       /* user -> kernel */
+/* none */
+
+
+/*
+ * Message Structures :
+ */
+
+/* macro to round up message lengths to 8byte boundary */
+#define FC_NL_MSGALIGN(len)            (((len) + 7) & ~7)
+
+
+/*
+ * FC Transport Broadcast Event Message :
+ *   FC_NL_ASYNC_EVENT
+ *
+ * Note: if Vendor Unique message, &event_data will be  start of
+ *      vendor unique payload, and the length of the payload is
+ *       per event_datalen
+ *
+ * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
+ *   formatting requirements specified in scsi_netlink.h
+ */
+struct fc_nl_event {
+       struct scsi_nl_hdr snlh;                /* must be 1st element ! */
+       uint64_t seconds;
+       uint64_t vendor_id;
+       uint16_t host_no;
+       uint16_t event_datalen;
+       uint32_t event_num;
+       uint32_t event_code;
+       uint32_t event_data;
+} __attribute__((aligned(sizeof(uint64_t))));
+
+
+#endif /* SCSI_NETLINK_FC_H */
+
index aafaa5a..ac7203b 100644 (file)
@@ -1 +1,4 @@
 # UAPI Header export list
+header-y += edid.h
+header-y += sisfb.h
+header-y += uvesafb.h
diff --git a/include/uapi/video/edid.h b/include/uapi/video/edid.h
new file mode 100644 (file)
index 0000000..8c0f032
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _UAPI__linux_video_edid_h__
+#define _UAPI__linux_video_edid_h__
+
+struct edid_info {
+       unsigned char dummy[128];
+};
+
+
+#endif /* _UAPI__linux_video_edid_h__ */
diff --git a/include/uapi/video/sisfb.h b/include/uapi/video/sisfb.h
new file mode 100644 (file)
index 0000000..9250b22
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * sisfb.h - definitions for the SiS framebuffer driver
+ *
+ * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the named License,
+ * or any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#ifndef _UAPI_LINUX_SISFB_H_
+#define _UAPI_LINUX_SISFB_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+/**********************************************/
+/*                   PUBLIC                   */
+/**********************************************/
+
+/* vbflags, public (others in sis.h) */
+#define CRT2_DEFAULT           0x00000001
+#define CRT2_LCD               0x00000002
+#define CRT2_TV                        0x00000004
+#define CRT2_VGA               0x00000008
+#define TV_NTSC                        0x00000010
+#define TV_PAL                 0x00000020
+#define TV_HIVISION            0x00000040
+#define TV_YPBPR               0x00000080
+#define TV_AVIDEO              0x00000100
+#define TV_SVIDEO              0x00000200
+#define TV_SCART               0x00000400
+#define TV_PALM                        0x00001000
+#define TV_PALN                        0x00002000
+#define TV_NTSCJ               0x00001000
+#define TV_CHSCART             0x00008000
+#define TV_CHYPBPR525I         0x00010000
+#define CRT1_VGA               0x00000000
+#define CRT1_LCDA              0x00020000
+#define VGA2_CONNECTED          0x00040000
+#define VB_DISPTYPE_CRT1       0x00080000      /* CRT1 connected and used */
+#define VB_SINGLE_MODE         0x20000000      /* CRT1 or CRT2; determined by DISPTYPE_CRTx */
+#define VB_MIRROR_MODE         0x40000000      /* CRT1 + CRT2 identical (mirror mode) */
+#define VB_DUALVIEW_MODE       0x80000000      /* CRT1 + CRT2 independent (dual head mode) */
+
+/* Aliases: */
+#define CRT2_ENABLE            (CRT2_LCD | CRT2_TV | CRT2_VGA)
+#define TV_STANDARD            (TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ)
+#define TV_INTERFACE           (TV_AVIDEO|TV_SVIDEO|TV_SCART|TV_HIVISION|TV_YPBPR|TV_CHSCART|TV_CHYPBPR525I)
+
+/* Only if TV_YPBPR is set: */
+#define TV_YPBPR525I           TV_NTSC
+#define TV_YPBPR525P           TV_PAL
+#define TV_YPBPR750P           TV_PALM
+#define TV_YPBPR1080I          TV_PALN
+#define TV_YPBPRALL            (TV_YPBPR525I | TV_YPBPR525P | TV_YPBPR750P | TV_YPBPR1080I)
+
+#define VB_DISPTYPE_DISP2      CRT2_ENABLE
+#define VB_DISPTYPE_CRT2       CRT2_ENABLE
+#define VB_DISPTYPE_DISP1      VB_DISPTYPE_CRT1
+#define VB_DISPMODE_SINGLE     VB_SINGLE_MODE
+#define VB_DISPMODE_MIRROR     VB_MIRROR_MODE
+#define VB_DISPMODE_DUAL       VB_DUALVIEW_MODE
+#define VB_DISPLAY_MODE                (SINGLE_MODE | MIRROR_MODE | DUALVIEW_MODE)
+
+/* Structure argument for SISFB_GET_INFO ioctl  */
+struct sisfb_info {
+       __u32   sisfb_id;               /* for identifying sisfb */
+#ifndef SISFB_ID
+#define SISFB_ID         0x53495346    /* Identify myself with 'SISF' */
+#endif
+       __u32   chip_id;                /* PCI-ID of detected chip */
+       __u32   memory;                 /* total video memory in KB */
+       __u32   heapstart;              /* heap start offset in KB */
+       __u8    fbvidmode;              /* current sisfb mode */
+
+       __u8    sisfb_version;
+       __u8    sisfb_revision;
+       __u8    sisfb_patchlevel;
+
+       __u8    sisfb_caps;             /* sisfb capabilities */
+
+       __u32   sisfb_tqlen;            /* turbo queue length (in KB) */
+
+       __u32   sisfb_pcibus;           /* The card's PCI ID */
+       __u32   sisfb_pcislot;
+       __u32   sisfb_pcifunc;
+
+       __u8    sisfb_lcdpdc;           /* PanelDelayCompensation */
+
+       __u8    sisfb_lcda;             /* Detected status of LCDA for low res/text modes */
+
+       __u32   sisfb_vbflags;
+       __u32   sisfb_currentvbflags;
+
+       __u32   sisfb_scalelcd;
+       __u32   sisfb_specialtiming;
+
+       __u8    sisfb_haveemi;
+       __u8    sisfb_emi30,sisfb_emi31,sisfb_emi32,sisfb_emi33;
+       __u8    sisfb_haveemilcd;
+
+       __u8    sisfb_lcdpdca;          /* PanelDelayCompensation for LCD-via-CRT1 */
+
+       __u16   sisfb_tvxpos, sisfb_tvypos;     /* Warning: Values + 32 ! */
+
+       __u32   sisfb_heapsize;         /* heap size (in KB) */
+       __u32   sisfb_videooffset;      /* Offset of viewport in video memory (in bytes) */
+
+       __u32   sisfb_curfstn;          /* currently running FSTN/DSTN mode */
+       __u32   sisfb_curdstn;
+
+       __u16   sisfb_pci_vendor;       /* PCI vendor (SiS or XGI) */
+
+       __u32   sisfb_vbflags2;         /* ivideo->vbflags2 */
+
+       __u8    sisfb_can_post;         /* sisfb can POST this card */
+       __u8    sisfb_card_posted;      /* card is POSTED */
+       __u8    sisfb_was_boot_device;  /* This card was the boot video device (ie is primary) */
+
+       __u8    reserved[183];          /* for future use */
+};
+
+#define SISFB_CMD_GETVBFLAGS   0x55AA0001      /* no arg; result[1] = vbflags */
+#define SISFB_CMD_SWITCHCRT1   0x55AA0010      /* arg[0]: 99 = query, 0 = off, 1 = on */
+/* more to come */
+
+#define SISFB_CMD_ERR_OK       0x80000000      /* command succeeded */
+#define SISFB_CMD_ERR_LOCKED   0x80000001      /* sisfb is locked */
+#define SISFB_CMD_ERR_EARLY    0x80000002      /* request before sisfb took over gfx system */
+#define SISFB_CMD_ERR_NOVB     0x80000003      /* No video bridge */
+#define SISFB_CMD_ERR_NOCRT2   0x80000004      /* can't change CRT1 status, CRT2 disabled */
+/* more to come */
+#define SISFB_CMD_ERR_UNKNOWN   0x8000ffff     /* Unknown command */
+#define SISFB_CMD_ERR_OTHER    0x80010000      /* Other error */
+
+/* Argument for SISFB_CMD ioctl */
+struct sisfb_cmd {
+       __u32  sisfb_cmd;
+       __u32  sisfb_arg[16];
+       __u32  sisfb_result[4];
+};
+
+/* Additional IOCTLs for communication sisfb <> X driver                */
+/* If changing this, vgatypes.h must also be changed (for X driver)    */
+
+/* ioctl for identifying and giving some info (esp. memory heap start) */
+#define SISFB_GET_INFO_SIZE    _IOR(0xF3,0x00,__u32)
+#define SISFB_GET_INFO         _IOR(0xF3,0x01,struct sisfb_info)
+
+/* ioctrl to get current vertical retrace status */
+#define SISFB_GET_VBRSTATUS    _IOR(0xF3,0x02,__u32)
+
+/* ioctl to enable/disable panning auto-maximize (like nomax parameter) */
+#define SISFB_GET_AUTOMAXIMIZE _IOR(0xF3,0x03,__u32)
+#define SISFB_SET_AUTOMAXIMIZE _IOW(0xF3,0x03,__u32)
+
+/* ioctls to relocate TV output (x=D[31:16], y=D[15:0], + 32)*/
+#define SISFB_GET_TVPOSOFFSET  _IOR(0xF3,0x04,__u32)
+#define SISFB_SET_TVPOSOFFSET  _IOW(0xF3,0x04,__u32)
+
+/* ioctl for internal sisfb commands (sisfbctrl) */
+#define SISFB_COMMAND          _IOWR(0xF3,0x05,struct sisfb_cmd)
+
+/* ioctl for locking sisfb (no register access during lock) */
+/* As of now, only used to avoid register access during
+ * the ioctls listed above.
+ */
+#define SISFB_SET_LOCK         _IOW(0xF3,0x06,__u32)
+
+/* ioctls 0xF3 up to 0x3F reserved for sisfb */
+
+/****************************************************************/
+/* The following are deprecated and should not be used anymore: */
+/****************************************************************/
+/* ioctl for identifying and giving some info (esp. memory heap start) */
+#define SISFB_GET_INFO_OLD        _IOR('n',0xF8,__u32)
+/* ioctrl to get current vertical retrace status */
+#define SISFB_GET_VBRSTATUS_OLD           _IOR('n',0xF9,__u32)
+/* ioctl to enable/disable panning auto-maximize (like nomax parameter) */
+#define SISFB_GET_AUTOMAXIMIZE_OLD _IOR('n',0xFA,__u32)
+#define SISFB_SET_AUTOMAXIMIZE_OLD _IOW('n',0xFA,__u32)
+/****************************************************************/
+/*               End of deprecated ioctl numbers                */
+/****************************************************************/
+
+/* For fb memory manager (FBIO_ALLOC, FBIO_FREE) */
+struct sis_memreq {
+       __u32   offset;
+       __u32   size;
+};
+
+/**********************************************/
+/*                  PRIVATE                   */
+/*         (for IN-KERNEL usage only)         */
+/**********************************************/
+
+
+#endif /* _UAPI_LINUX_SISFB_H_ */
diff --git a/include/uapi/video/uvesafb.h b/include/uapi/video/uvesafb.h
new file mode 100644 (file)
index 0000000..cee063d
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef _UAPI_UVESAFB_H
+#define _UAPI_UVESAFB_H
+
+#include <linux/types.h>
+
+struct v86_regs {
+       __u32 ebx;
+       __u32 ecx;
+       __u32 edx;
+       __u32 esi;
+       __u32 edi;
+       __u32 ebp;
+       __u32 eax;
+       __u32 eip;
+       __u32 eflags;
+       __u32 esp;
+       __u16 cs;
+       __u16 ss;
+       __u16 es;
+       __u16 ds;
+       __u16 fs;
+       __u16 gs;
+};
+
+/* Task flags */
+#define TF_VBEIB       0x01
+#define TF_BUF_ESDI    0x02
+#define TF_BUF_ESBX    0x04
+#define TF_BUF_RET     0x08
+#define TF_EXIT                0x10
+
+struct uvesafb_task {
+       __u8 flags;
+       int buf_len;
+       struct v86_regs regs;
+};
+
+/* Constants for the capabilities field
+ * in vbe_ib */
+#define VBE_CAP_CAN_SWITCH_DAC 0x01
+#define VBE_CAP_VGACOMPAT      0x02
+
+/* The VBE Info Block */
+struct vbe_ib {
+       char  vbe_signature[4];
+       __u16 vbe_version;
+       __u32 oem_string_ptr;
+       __u32 capabilities;
+       __u32 mode_list_ptr;
+       __u16 total_memory;
+       __u16 oem_software_rev;
+       __u32 oem_vendor_name_ptr;
+       __u32 oem_product_name_ptr;
+       __u32 oem_product_rev_ptr;
+       __u8  reserved[222];
+       char  oem_data[256];
+       char  misc_data[512];
+} __attribute__ ((packed));
+
+#endif /* _UAPI_UVESAFB_H */
index ad3e622..e69de29 100644 (file)
@@ -1,3 +0,0 @@
-header-y += edid.h
-header-y += sisfb.h
-header-y += uvesafb.h
index c5f1987..0cb8b2a 100644 (file)
@@ -1,14 +1,9 @@
 #ifndef __linux_video_edid_h__
 #define __linux_video_edid_h__
 
-struct edid_info {
-       unsigned char dummy[128];
-};
+#include <uapi/video/edid.h>
 
-#ifdef __KERNEL__
 #ifdef CONFIG_X86
 extern struct edid_info edid_info;
 #endif
-#endif
-
 #endif /* __linux_video_edid_h__ */
index 6dc5df9..6ddff93 100644 (file)
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
  */
-
 #ifndef _LINUX_SISFB_H_
 #define _LINUX_SISFB_H_
 
-#include <linux/types.h>
-#include <asm/ioctl.h>
-
-/**********************************************/
-/*                   PUBLIC                   */
-/**********************************************/
-
-/* vbflags, public (others in sis.h) */
-#define CRT2_DEFAULT           0x00000001
-#define CRT2_LCD               0x00000002
-#define CRT2_TV                        0x00000004
-#define CRT2_VGA               0x00000008
-#define TV_NTSC                        0x00000010
-#define TV_PAL                 0x00000020
-#define TV_HIVISION            0x00000040
-#define TV_YPBPR               0x00000080
-#define TV_AVIDEO              0x00000100
-#define TV_SVIDEO              0x00000200
-#define TV_SCART               0x00000400
-#define TV_PALM                        0x00001000
-#define TV_PALN                        0x00002000
-#define TV_NTSCJ               0x00001000
-#define TV_CHSCART             0x00008000
-#define TV_CHYPBPR525I         0x00010000
-#define CRT1_VGA               0x00000000
-#define CRT1_LCDA              0x00020000
-#define VGA2_CONNECTED          0x00040000
-#define VB_DISPTYPE_CRT1       0x00080000      /* CRT1 connected and used */
-#define VB_SINGLE_MODE         0x20000000      /* CRT1 or CRT2; determined by DISPTYPE_CRTx */
-#define VB_MIRROR_MODE         0x40000000      /* CRT1 + CRT2 identical (mirror mode) */
-#define VB_DUALVIEW_MODE       0x80000000      /* CRT1 + CRT2 independent (dual head mode) */
-
-/* Aliases: */
-#define CRT2_ENABLE            (CRT2_LCD | CRT2_TV | CRT2_VGA)
-#define TV_STANDARD            (TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ)
-#define TV_INTERFACE           (TV_AVIDEO|TV_SVIDEO|TV_SCART|TV_HIVISION|TV_YPBPR|TV_CHSCART|TV_CHYPBPR525I)
-
-/* Only if TV_YPBPR is set: */
-#define TV_YPBPR525I           TV_NTSC
-#define TV_YPBPR525P           TV_PAL
-#define TV_YPBPR750P           TV_PALM
-#define TV_YPBPR1080I          TV_PALN
-#define TV_YPBPRALL            (TV_YPBPR525I | TV_YPBPR525P | TV_YPBPR750P | TV_YPBPR1080I)
-
-#define VB_DISPTYPE_DISP2      CRT2_ENABLE
-#define VB_DISPTYPE_CRT2       CRT2_ENABLE
-#define VB_DISPTYPE_DISP1      VB_DISPTYPE_CRT1
-#define VB_DISPMODE_SINGLE     VB_SINGLE_MODE
-#define VB_DISPMODE_MIRROR     VB_MIRROR_MODE
-#define VB_DISPMODE_DUAL       VB_DUALVIEW_MODE
-#define VB_DISPLAY_MODE                (SINGLE_MODE | MIRROR_MODE | DUALVIEW_MODE)
-
-/* Structure argument for SISFB_GET_INFO ioctl  */
-struct sisfb_info {
-       __u32   sisfb_id;               /* for identifying sisfb */
-#ifndef SISFB_ID
-#define SISFB_ID         0x53495346    /* Identify myself with 'SISF' */
-#endif
-       __u32   chip_id;                /* PCI-ID of detected chip */
-       __u32   memory;                 /* total video memory in KB */
-       __u32   heapstart;              /* heap start offset in KB */
-       __u8    fbvidmode;              /* current sisfb mode */
-
-       __u8    sisfb_version;
-       __u8    sisfb_revision;
-       __u8    sisfb_patchlevel;
-
-       __u8    sisfb_caps;             /* sisfb capabilities */
-
-       __u32   sisfb_tqlen;            /* turbo queue length (in KB) */
-
-       __u32   sisfb_pcibus;           /* The card's PCI ID */
-       __u32   sisfb_pcislot;
-       __u32   sisfb_pcifunc;
-
-       __u8    sisfb_lcdpdc;           /* PanelDelayCompensation */
-
-       __u8    sisfb_lcda;             /* Detected status of LCDA for low res/text modes */
-
-       __u32   sisfb_vbflags;
-       __u32   sisfb_currentvbflags;
-
-       __u32   sisfb_scalelcd;
-       __u32   sisfb_specialtiming;
-
-       __u8    sisfb_haveemi;
-       __u8    sisfb_emi30,sisfb_emi31,sisfb_emi32,sisfb_emi33;
-       __u8    sisfb_haveemilcd;
-
-       __u8    sisfb_lcdpdca;          /* PanelDelayCompensation for LCD-via-CRT1 */
-
-       __u16   sisfb_tvxpos, sisfb_tvypos;     /* Warning: Values + 32 ! */
-
-       __u32   sisfb_heapsize;         /* heap size (in KB) */
-       __u32   sisfb_videooffset;      /* Offset of viewport in video memory (in bytes) */
-
-       __u32   sisfb_curfstn;          /* currently running FSTN/DSTN mode */
-       __u32   sisfb_curdstn;
-
-       __u16   sisfb_pci_vendor;       /* PCI vendor (SiS or XGI) */
-
-       __u32   sisfb_vbflags2;         /* ivideo->vbflags2 */
-
-       __u8    sisfb_can_post;         /* sisfb can POST this card */
-       __u8    sisfb_card_posted;      /* card is POSTED */
-       __u8    sisfb_was_boot_device;  /* This card was the boot video device (ie is primary) */
-
-       __u8    reserved[183];          /* for future use */
-};
-
-#define SISFB_CMD_GETVBFLAGS   0x55AA0001      /* no arg; result[1] = vbflags */
-#define SISFB_CMD_SWITCHCRT1   0x55AA0010      /* arg[0]: 99 = query, 0 = off, 1 = on */
-/* more to come */
-
-#define SISFB_CMD_ERR_OK       0x80000000      /* command succeeded */
-#define SISFB_CMD_ERR_LOCKED   0x80000001      /* sisfb is locked */
-#define SISFB_CMD_ERR_EARLY    0x80000002      /* request before sisfb took over gfx system */
-#define SISFB_CMD_ERR_NOVB     0x80000003      /* No video bridge */
-#define SISFB_CMD_ERR_NOCRT2   0x80000004      /* can't change CRT1 status, CRT2 disabled */
-/* more to come */
-#define SISFB_CMD_ERR_UNKNOWN   0x8000ffff     /* Unknown command */
-#define SISFB_CMD_ERR_OTHER    0x80010000      /* Other error */
-
-/* Argument for SISFB_CMD ioctl */
-struct sisfb_cmd {
-       __u32  sisfb_cmd;
-       __u32  sisfb_arg[16];
-       __u32  sisfb_result[4];
-};
-
-/* Additional IOCTLs for communication sisfb <> X driver                */
-/* If changing this, vgatypes.h must also be changed (for X driver)    */
-
-/* ioctl for identifying and giving some info (esp. memory heap start) */
-#define SISFB_GET_INFO_SIZE    _IOR(0xF3,0x00,__u32)
-#define SISFB_GET_INFO         _IOR(0xF3,0x01,struct sisfb_info)
-
-/* ioctrl to get current vertical retrace status */
-#define SISFB_GET_VBRSTATUS    _IOR(0xF3,0x02,__u32)
-
-/* ioctl to enable/disable panning auto-maximize (like nomax parameter) */
-#define SISFB_GET_AUTOMAXIMIZE _IOR(0xF3,0x03,__u32)
-#define SISFB_SET_AUTOMAXIMIZE _IOW(0xF3,0x03,__u32)
-
-/* ioctls to relocate TV output (x=D[31:16], y=D[15:0], + 32)*/
-#define SISFB_GET_TVPOSOFFSET  _IOR(0xF3,0x04,__u32)
-#define SISFB_SET_TVPOSOFFSET  _IOW(0xF3,0x04,__u32)
-
-/* ioctl for internal sisfb commands (sisfbctrl) */
-#define SISFB_COMMAND          _IOWR(0xF3,0x05,struct sisfb_cmd)
-
-/* ioctl for locking sisfb (no register access during lock) */
-/* As of now, only used to avoid register access during
- * the ioctls listed above.
- */
-#define SISFB_SET_LOCK         _IOW(0xF3,0x06,__u32)
-
-/* ioctls 0xF3 up to 0x3F reserved for sisfb */
-
-/****************************************************************/
-/* The following are deprecated and should not be used anymore: */
-/****************************************************************/
-/* ioctl for identifying and giving some info (esp. memory heap start) */
-#define SISFB_GET_INFO_OLD        _IOR('n',0xF8,__u32)
-/* ioctrl to get current vertical retrace status */
-#define SISFB_GET_VBRSTATUS_OLD           _IOR('n',0xF9,__u32)
-/* ioctl to enable/disable panning auto-maximize (like nomax parameter) */
-#define SISFB_GET_AUTOMAXIMIZE_OLD _IOR('n',0xFA,__u32)
-#define SISFB_SET_AUTOMAXIMIZE_OLD _IOW('n',0xFA,__u32)
-/****************************************************************/
-/*               End of deprecated ioctl numbers                */
-/****************************************************************/
-
-/* For fb memory manager (FBIO_ALLOC, FBIO_FREE) */
-struct sis_memreq {
-       __u32   offset;
-       __u32   size;
-};
-
-/**********************************************/
-/*                  PRIVATE                   */
-/*         (for IN-KERNEL usage only)         */
-/**********************************************/
-
-#ifdef __KERNEL__
 
 #include <linux/pci.h>
+#include <uapi/video/sisfb.h>
 
 #define        UNKNOWN_VGA  0
 #define        SIS_300_VGA  1
@@ -220,5 +35,3 @@ extern void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
 extern void sis_free(u32 base);
 extern void sis_free_new(struct pci_dev *pdev, u32 base);
 #endif
-
-#endif
index 0993a22..1a91850 100644 (file)
@@ -1,63 +1,8 @@
 #ifndef _UVESAFB_H
 #define _UVESAFB_H
 
-#include <linux/types.h>
-
-struct v86_regs {
-       __u32 ebx;
-       __u32 ecx;
-       __u32 edx;
-       __u32 esi;
-       __u32 edi;
-       __u32 ebp;
-       __u32 eax;
-       __u32 eip;
-       __u32 eflags;
-       __u32 esp;
-       __u16 cs;
-       __u16 ss;
-       __u16 es;
-       __u16 ds;
-       __u16 fs;
-       __u16 gs;
-};
-
-/* Task flags */
-#define TF_VBEIB       0x01
-#define TF_BUF_ESDI    0x02
-#define TF_BUF_ESBX    0x04
-#define TF_BUF_RET     0x08
-#define TF_EXIT                0x10
-
-struct uvesafb_task {
-       __u8 flags;
-       int buf_len;
-       struct v86_regs regs;
-};
-
-/* Constants for the capabilities field
- * in vbe_ib */
-#define VBE_CAP_CAN_SWITCH_DAC 0x01
-#define VBE_CAP_VGACOMPAT      0x02
-
-/* The VBE Info Block */
-struct vbe_ib {
-       char  vbe_signature[4];
-       __u16 vbe_version;
-       __u32 oem_string_ptr;
-       __u32 capabilities;
-       __u32 mode_list_ptr;
-       __u16 total_memory;
-       __u16 oem_software_rev;
-       __u32 oem_vendor_name_ptr;
-       __u32 oem_product_name_ptr;
-       __u32 oem_product_rev_ptr;
-       __u8  reserved[222];
-       char  oem_data[256];
-       char  misc_data[512];
-} __attribute__ ((packed));
+#include <uapi/video/uvesafb.h>
 
-#ifdef __KERNEL__
 
 /* VBE CRTC Info Block */
 struct vbe_crtc_ib {
@@ -191,5 +136,4 @@ struct uvesafb_par {
        struct vbe_crtc_ib crtc;
 };
 
-#endif /* __KERNEL__ */
 #endif /* _UVESAFB_H */
index 0a5e80f..22616cd 100644 (file)
@@ -1230,6 +1230,14 @@ config SYSCTL_ARCH_UNALIGN_NO_WARN
          Allows arch to define/use @no_unaligned_warning to possibly warn
          about unaligned access emulation going on under the hood.
 
+config SYSCTL_ARCH_UNALIGN_ALLOW
+       bool
+       help
+         Enable support for /proc/sys/kernel/unaligned-trap
+         Allows arches to define/use @unaligned_enabled to runtime toggle
+         the unaligned access emulation.
+         see arch/parisc/kernel/unaligned.c for reference
+
 config KALLSYMS
         bool "Load all symbols for debugging/ksymoops" if EXPERT
         default y
index 023c986..e5c4f60 100644 (file)
@@ -477,7 +477,7 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
                                size_t count, loff_t *off)
 {
-       struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+       struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
        char buffer[FILENT_SIZE];
        ssize_t ret;
 
@@ -498,13 +498,13 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
        if (ret <= 0)
                return ret;
 
-       filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
+       file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
        return ret;
 }
 
 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 {
-       struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+       struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 
        spin_lock(&info->lock);
        if (task_tgid(current) == info->notify_owner)
@@ -516,7 +516,7 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 
 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 {
-       struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
+       struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
        int retval = 0;
 
        poll_wait(filp, &info->wait_q, poll_tab);
@@ -973,7 +973,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
                goto out;
        }
 
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
@@ -1089,7 +1089,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                goto out;
        }
 
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
@@ -1249,7 +1249,7 @@ retry:
                goto out;
        }
 
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
@@ -1323,7 +1323,7 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
                goto out;
        }
 
-       inode = f.file->f_path.dentry->d_inode;
+       inode = file_inode(f.file);
        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
index be3ec9a..cb858df 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -193,7 +193,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
        if (!is_file_hugepages(shp->shm_file))
                shmem_lock(shp->shm_file, 0, shp->mlock_user);
        else if (shp->mlock_user)
-               user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
+               user_shm_unlock(file_inode(shp->shm_file)->i_size,
                                                shp->mlock_user);
        fput (shp->shm_file);
        security_shm_free(shp);
@@ -529,7 +529,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
         * shmid gets reported as "inode#" in /proc/pid/maps.
         * proc-ps tools use this. Changing this will break them.
         */
-       file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
+       file_inode(file)->i_ino = shp->shm_perm.id;
 
        ns->shm_tot += numpages;
        error = shp->shm_perm.id;
@@ -678,7 +678,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
 {
        struct inode *inode;
 
-       inode = shp->shm_file->f_path.dentry->d_inode;
+       inode = file_inode(shp->shm_file);
 
        if (is_file_hugepages(shp->shm_file)) {
                struct address_space *mapping = inode->i_mapping;
@@ -1042,7 +1042,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
                          is_file_hugepages(shp->shm_file) ?
                                &shm_file_operations_huge :
                                &shm_file_operations);
-       if (!file)
+       err = PTR_ERR(file);
+       if (IS_ERR(file))
                goto out_free;
 
        file->private_data = sfd;
@@ -1175,7 +1176,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
                        (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
 
 
-                       size = vma->vm_file->f_path.dentry->d_inode->i_size;
+                       size = file_inode(vma->vm_file)->i_size;
                        do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
                        /*
                         * We discovered the size of the shm segment, so
index 74e1d9c..464a8ab 100644 (file)
@@ -252,7 +252,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
 {
        kuid_t euid;
        kgid_t egid;
-       int id, err;
+       int id;
        int next_id = ids->next_id;
 
        if (size > IPCMNI)
@@ -261,17 +261,21 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
        if (ids->in_use >= size)
                return -ENOSPC;
 
+       idr_preload(GFP_KERNEL);
+
        spin_lock_init(&new->lock);
        new->deleted = 0;
        rcu_read_lock();
        spin_lock(&new->lock);
 
-       err = idr_get_new_above(&ids->ipcs_idr, new,
-                               (next_id < 0) ? 0 : ipcid_to_idx(next_id), &id);
-       if (err) {
+       id = idr_alloc(&ids->ipcs_idr, new,
+                      (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
+                      GFP_NOWAIT);
+       idr_preload_end();
+       if (id < 0) {
                spin_unlock(&new->lock);
                rcu_read_unlock();
-               return err;
+               return id;
        }
 
        ids->in_use++;
@@ -307,19 +311,10 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
                struct ipc_ops *ops, struct ipc_params *params)
 {
        int err;
-retry:
-       err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
-
-       if (!err)
-               return -ENOMEM;
 
        down_write(&ids->rw_mutex);
        err = ops->getnew(ns, params);
        up_write(&ids->rw_mutex);
-
-       if (err == -EAGAIN)
-               goto retry;
-
        return err;
 }
 
@@ -376,8 +371,6 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
        struct kern_ipc_perm *ipcp;
        int flg = params->flg;
        int err;
-retry:
-       err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
 
        /*
         * Take the lock as a writer since we are potentially going to add
@@ -389,8 +382,6 @@ retry:
                /* key not used */
                if (!(flg & IPC_CREAT))
                        err = -ENOENT;
-               else if (!err)
-                       err = -ENOMEM;
                else
                        err = ops->getnew(ns, params);
        } else {
@@ -413,9 +404,6 @@ retry:
        }
        up_write(&ids->rw_mutex);
 
-       if (err == -EAGAIN)
-               goto retry;
-
        return err;
 }
 
index eceac38..bbde5f1 100644 (file)
@@ -7,7 +7,7 @@ obj-y     = fork.o exec_domain.o panic.o printk.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
            signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
            rcupdate.o extable.o params.o posix-timers.o \
-           kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+           kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o cred.o \
            async.o range.o groups.o lglock.o smpboot.o
@@ -25,9 +25,7 @@ endif
 obj-y += sched/
 obj-y += power/
 
-ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
-obj-$(CONFIG_X86) += kcmp.o
-endif
+obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -127,11 +125,19 @@ $(obj)/config_data.h: $(obj)/config_data.gz FORCE
 
 $(obj)/time.o: $(obj)/timeconst.h
 
-quiet_cmd_timeconst  = TIMEC   $@
-      cmd_timeconst  = $(PERL) $< $(CONFIG_HZ) > $@
+quiet_cmd_hzfile = HZFILE  $@
+      cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
+
+targets += hz.bc
+$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
+       $(call if_changed,hzfile)
+
+quiet_cmd_bc  = BC      $@
+      cmd_bc  = bc -q $(filter-out FORCE,$^) > $@
+
 targets += timeconst.h
-$(obj)/timeconst.h: $(src)/timeconst.pl FORCE
-       $(call if_changed,timeconst)
+$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
+       $(call if_changed,bc)
 
 ifeq ($(CONFIG_MODULE_SIG),y)
 #
index e8b1627..b9bd7f0 100644 (file)
@@ -205,7 +205,7 @@ static int acct_on(struct filename *pathname)
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       if (!S_ISREG(file->f_path.dentry->d_inode->i_mode)) {
+       if (!S_ISREG(file_inode(file)->i_mode)) {
                filp_close(file, NULL);
                return -EACCES;
        }
index b5c6432..a32f943 100644 (file)
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
 {
        int i;
        struct cgroupfs_root *root = cgrp->root;
-       struct hlist_node *node;
        struct css_set *cg;
        unsigned long key;
 
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
        }
 
        key = css_set_hash(template);
-       hash_for_each_possible(css_set_table, cg, node, hlist, key) {
+       hash_for_each_possible(css_set_table, cg, hlist, key) {
                if (!compare_css_sets(cg, oldcg, cgrp, template))
                        continue;
 
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                struct cgroupfs_root *existing_root;
                const struct cred *cred;
                int i;
-               struct hlist_node *node;
                struct css_set *cg;
 
                BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                /* Link the top cgroup in this hierarchy into all
                 * the css_set objects */
                write_lock(&css_set_lock);
-               hash_for_each(css_set_table, i, node, cg, hlist)
+               hash_for_each(css_set_table, i, cg, hlist)
                        link_css_set(&tmp_cg_links, cg, root_cgrp);
                write_unlock(&css_set_lock);
 
@@ -2645,7 +2643,7 @@ static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, un
  */
 static inline struct cftype *__file_cft(struct file *file)
 {
-       if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
+       if (file_inode(file)->i_fop != &cgroup_file_operations)
                return ERR_PTR(-EINVAL);
        return __d_cft(file->f_dentry);
 }
@@ -3902,7 +3900,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
 
        /* the process need read permission on control file */
        /* AV: shouldn't we check that it's been opened for read instead? */
-       ret = inode_permission(cfile->f_path.dentry->d_inode, MAY_READ);
+       ret = inode_permission(file_inode(cfile), MAY_READ);
        if (ret < 0)
                goto fail;
 
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
 {
        struct cgroup_subsys_state *css;
        int i, ret;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        struct css_set *cg;
        unsigned long key;
 
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
         * this is all done under the css_set_lock.
         */
        write_lock(&css_set_lock);
-       hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
+       hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
                /* skip entries that we already rehashed */
                if (cg->subsys[ss->subsys_id])
                        continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
                cg->subsys[ss->subsys_id] = css;
                /* recompute hash and restore entry */
                key = css_set_hash(cg->subsys);
-               hash_add(css_set_table, node, key);
+               hash_add(css_set_table, &cg->hlist, key);
        }
        write_unlock(&css_set_lock);
 
@@ -4618,10 +4616,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
        offline_css(ss, dummytop);
        ss->active = 0;
 
-       if (ss->use_id) {
-               idr_remove_all(&ss->idr);
+       if (ss->use_id)
                idr_destroy(&ss->idr);
-       }
 
        /* deassign the subsys_id */
        subsys[ss->subsys_id] = NULL;
@@ -5322,7 +5318,7 @@ EXPORT_SYMBOL_GPL(free_css_id);
 static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
 {
        struct css_id *newid;
-       int myid, error, size;
+       int ret, size;
 
        BUG_ON(!ss->use_id);
 
@@ -5330,35 +5326,24 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
        newid = kzalloc(size, GFP_KERNEL);
        if (!newid)
                return ERR_PTR(-ENOMEM);
-       /* get id */
-       if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
-               error = -ENOMEM;
-               goto err_out;
-       }
+
+       idr_preload(GFP_KERNEL);
        spin_lock(&ss->id_lock);
        /* Don't use 0. allocates an ID of 1-65535 */
-       error = idr_get_new_above(&ss->idr, newid, 1, &myid);
+       ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
        spin_unlock(&ss->id_lock);
+       idr_preload_end();
 
        /* Returns error when there are no free spaces for new ID.*/
-       if (error) {
-               error = -ENOSPC;
+       if (ret < 0)
                goto err_out;
-       }
-       if (myid > CSS_ID_MAX)
-               goto remove_idr;
 
-       newid->id = myid;
+       newid->id = ret;
        newid->depth = depth;
        return newid;
-remove_idr:
-       error = -ENOSPC;
-       spin_lock(&ss->id_lock);
-       idr_remove(&ss->idr, myid);
-       spin_unlock(&ss->id_lock);
 err_out:
        kfree(newid);
-       return ERR_PTR(error);
+       return ERR_PTR(ret);
 
 }
 
@@ -5489,7 +5474,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
        struct inode *inode;
        struct cgroup_subsys_state *css;
 
-       inode = f->f_dentry->d_inode;
+       inode = file_inode(f);
        /* check in cgroup filesystem dir */
        if (inode->i_op != &cgroup_dir_inode_operations)
                return ERR_PTR(-EBADF);
index 3494c28..2235967 100644 (file)
@@ -72,6 +72,8 @@ extern int dbg_kdb_mode;
 #ifdef CONFIG_KGDB_KDB
 extern int kdb_stub(struct kgdb_state *ks);
 extern int kdb_parse(const char *cmdstr);
+extern int kdb_common_init_state(struct kgdb_state *ks);
+extern int kdb_common_deinit_state(void);
 #else /* ! CONFIG_KGDB_KDB */
 static inline int kdb_stub(struct kgdb_state *ks)
 {
index 38573f3..19d9a57 100644 (file)
@@ -783,7 +783,10 @@ static void gdb_cmd_query(struct kgdb_state *ks)
                        len = len / 2;
                        remcom_out_buffer[len++] = 0;
 
+                       kdb_common_init_state(ks);
                        kdb_parse(remcom_out_buffer);
+                       kdb_common_deinit_state();
+
                        strcpy(remcom_out_buffer, "OK");
                }
                break;
index 8418c2f..70a5046 100644 (file)
@@ -486,11 +486,9 @@ static int kdb_bc(int argc, const char **argv)
 /*
  * kdb_ss
  *
- *     Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
- *     commands.
+ *     Process the 'ss' (Single Step) command.
  *
  *     ss
- *     ssb
  *
  * Parameters:
  *     argc    Argument count
@@ -498,35 +496,23 @@ static int kdb_bc(int argc, const char **argv)
  * Outputs:
  *     None.
  * Returns:
- *     KDB_CMD_SS[B] for success, a kdb error if failure.
+ *     KDB_CMD_SS for success, a kdb error if failure.
  * Locking:
  *     None.
  * Remarks:
  *
  *     Set the arch specific option to trigger a debug trap after the next
  *     instruction.
- *
- *     For 'ssb', set the trace flag in the debug trap handler
- *     after printing the current insn and return directly without
- *     invoking the kdb command processor, until a branch instruction
- *     is encountered.
  */
 
 static int kdb_ss(int argc, const char **argv)
 {
-       int ssb = 0;
-
-       ssb = (strcmp(argv[0], "ssb") == 0);
        if (argc != 0)
                return KDB_ARGCOUNT;
        /*
         * Set trace flag and go.
         */
        KDB_STATE_SET(DOING_SS);
-       if (ssb) {
-               KDB_STATE_SET(DOING_SSB);
-               return KDB_CMD_SSB;
-       }
        return KDB_CMD_SS;
 }
 
@@ -561,8 +547,6 @@ void __init kdb_initbptab(void)
 
        kdb_register_repeat("ss", kdb_ss, "",
                "Single Step", 1, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("ssb", kdb_ss, "",
-               "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
        /*
         * Architecture dependent initialization.
         */
index be7b33b..328d18e 100644 (file)
@@ -34,6 +34,22 @@ EXPORT_SYMBOL_GPL(kdb_poll_idx);
 
 static struct kgdb_state *kdb_ks;
 
+int kdb_common_init_state(struct kgdb_state *ks)
+{
+       kdb_initial_cpu = atomic_read(&kgdb_active);
+       kdb_current_task = kgdb_info[ks->cpu].task;
+       kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+       return 0;
+}
+
+int kdb_common_deinit_state(void)
+{
+       kdb_initial_cpu = -1;
+       kdb_current_task = NULL;
+       kdb_current_regs = NULL;
+       return 0;
+}
+
 int kdb_stub(struct kgdb_state *ks)
 {
        int error = 0;
@@ -94,13 +110,10 @@ int kdb_stub(struct kgdb_state *ks)
        }
        /* Set initial kdb state variables */
        KDB_STATE_CLEAR(KGDB_TRANS);
-       kdb_initial_cpu = atomic_read(&kgdb_active);
-       kdb_current_task = kgdb_info[ks->cpu].task;
-       kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+       kdb_common_init_state(ks);
        /* Remove any breakpoints as needed by kdb and clear single step */
        kdb_bp_remove();
        KDB_STATE_CLEAR(DOING_SS);
-       KDB_STATE_CLEAR(DOING_SSB);
        KDB_STATE_SET(PAGER);
        /* zero out any offline cpu data */
        for_each_present_cpu(i) {
@@ -125,9 +138,7 @@ int kdb_stub(struct kgdb_state *ks)
         * Upon exit from the kdb main loop setup break points and restart
         * the system based on the requested continue state
         */
-       kdb_initial_cpu = -1;
-       kdb_current_task = NULL;
-       kdb_current_regs = NULL;
+       kdb_common_deinit_state();
        KDB_STATE_CLEAR(PAGER);
        kdbnearsym_cleanup();
        if (error == KDB_CMD_KGDB) {
index 8875254..00eb8f7 100644 (file)
@@ -124,7 +124,7 @@ static kdbmsg_t kdbmsgs[] = {
 };
 #undef KDBMSG
 
-static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+static const int __nkdb_err = ARRAY_SIZE(kdbmsgs);
 
 
 /*
@@ -175,7 +175,7 @@ static char *__env[] = {
  (char *)0,
 };
 
-static const int __nenv = (sizeof(__env) / sizeof(char *));
+static const int __nenv = ARRAY_SIZE(__env);
 
 struct task_struct *kdb_curr_task(int cpu)
 {
@@ -681,34 +681,50 @@ static int kdb_defcmd(int argc, const char **argv)
        }
        if (argc != 3)
                return KDB_ARGCOUNT;
-       defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
-                            GFP_KDB);
-       if (!defcmd_set) {
-               kdb_printf("Could not allocate new defcmd_set entry for %s\n",
-                          argv[1]);
-               defcmd_set = save_defcmd_set;
+       if (in_dbg_master()) {
+               kdb_printf("Command only available during kdb_init()\n");
                return KDB_NOTIMP;
        }
+       defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+                            GFP_KDB);
+       if (!defcmd_set)
+               goto fail_defcmd;
        memcpy(defcmd_set, save_defcmd_set,
               defcmd_set_count * sizeof(*defcmd_set));
-       kfree(save_defcmd_set);
        s = defcmd_set + defcmd_set_count;
        memset(s, 0, sizeof(*s));
        s->usable = 1;
        s->name = kdb_strdup(argv[1], GFP_KDB);
+       if (!s->name)
+               goto fail_name;
        s->usage = kdb_strdup(argv[2], GFP_KDB);
+       if (!s->usage)
+               goto fail_usage;
        s->help = kdb_strdup(argv[3], GFP_KDB);
+       if (!s->help)
+               goto fail_help;
        if (s->usage[0] == '"') {
-               strcpy(s->usage, s->usage+1);
+               strcpy(s->usage, argv[2]+1);
                s->usage[strlen(s->usage)-1] = '\0';
        }
        if (s->help[0] == '"') {
-               strcpy(s->help, s->help+1);
+               strcpy(s->help, argv[3]+1);
                s->help[strlen(s->help)-1] = '\0';
        }
        ++defcmd_set_count;
        defcmd_in_progress = 1;
+       kfree(save_defcmd_set);
        return 0;
+fail_help:
+       kfree(s->usage);
+fail_usage:
+       kfree(s->name);
+fail_name:
+       kfree(defcmd_set);
+fail_defcmd:
+       kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]);
+       defcmd_set = save_defcmd_set;
+       return KDB_NOTIMP;
 }
 
 /*
@@ -1112,7 +1128,6 @@ void kdb_set_current_task(struct task_struct *p)
  *     KDB_CMD_GO      User typed 'go'.
  *     KDB_CMD_CPU     User switched to another cpu.
  *     KDB_CMD_SS      Single step.
- *     KDB_CMD_SSB     Single step until branch.
  */
 static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                     kdb_dbtrap_t db_result)
@@ -1151,14 +1166,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                        kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
                                   instruction_pointer(regs));
                        break;
-               case KDB_DB_SSB:
-                       /*
-                        * In the midst of ssb command. Just return.
-                        */
-                       KDB_DEBUG_STATE("kdb_local 3", reason);
-                       return KDB_CMD_SSB;     /* Continue with SSB command */
-
-                       break;
                case KDB_DB_SS:
                        break;
                case KDB_DB_SSBPT:
@@ -1281,7 +1288,6 @@ do_full_getstr:
                if (diag == KDB_CMD_GO
                 || diag == KDB_CMD_CPU
                 || diag == KDB_CMD_SS
-                || diag == KDB_CMD_SSB
                 || diag == KDB_CMD_KGDB)
                        break;
 
@@ -1368,12 +1374,6 @@ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
                        break;
                }
 
-               if (result == KDB_CMD_SSB) {
-                       KDB_STATE_SET(DOING_SS);
-                       KDB_STATE_SET(DOING_SSB);
-                       break;
-               }
-
                if (result == KDB_CMD_KGDB) {
                        if (!KDB_STATE(DOING_KGDB))
                                kdb_printf("Entering please attach debugger "
@@ -2350,69 +2350,6 @@ static int kdb_pid(int argc, const char **argv)
        return 0;
 }
 
-/*
- * kdb_ll - This function implements the 'll' command which follows a
- *     linked list and executes an arbitrary command for each
- *     element.
- */
-static int kdb_ll(int argc, const char **argv)
-{
-       int diag = 0;
-       unsigned long addr;
-       long offset = 0;
-       unsigned long va;
-       unsigned long linkoffset;
-       int nextarg;
-       const char *command;
-
-       if (argc != 3)
-               return KDB_ARGCOUNT;
-
-       nextarg = 1;
-       diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
-       if (diag)
-               return diag;
-
-       diag = kdbgetularg(argv[2], &linkoffset);
-       if (diag)
-               return diag;
-
-       /*
-        * Using the starting address as
-        * the first element in the list, and assuming that
-        * the list ends with a null pointer.
-        */
-
-       va = addr;
-       command = kdb_strdup(argv[3], GFP_KDB);
-       if (!command) {
-               kdb_printf("%s: cannot duplicate command\n", __func__);
-               return 0;
-       }
-       /* Recursive use of kdb_parse, do not use argv after this point */
-       argv = NULL;
-
-       while (va) {
-               char buf[80];
-
-               if (KDB_FLAG(CMD_INTERRUPT))
-                       goto out;
-
-               sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
-               diag = kdb_parse(buf);
-               if (diag)
-                       goto out;
-
-               addr = va + linkoffset;
-               if (kdb_getword(&va, addr, sizeof(va)))
-                       goto out;
-       }
-
-out:
-       kfree(command);
-       return diag;
-}
-
 static int kdb_kgdb(int argc, const char **argv)
 {
        return KDB_CMD_KGDB;
@@ -2430,11 +2367,15 @@ static int kdb_help(int argc, const char **argv)
        kdb_printf("-----------------------------"
                   "-----------------------------\n");
        for_each_kdbcmd(kt, i) {
-               if (kt->cmd_name)
-                       kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
-                                  kt->cmd_usage, kt->cmd_help);
+               char *space = "";
                if (KDB_FLAG(CMD_INTERRUPT))
                        return 0;
+               if (!kt->cmd_name)
+                       continue;
+               if (strlen(kt->cmd_usage) > 20)
+                       space = "\n                                    ";
+               kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
+                          kt->cmd_usage, space, kt->cmd_help);
        }
        return 0;
 }
@@ -2739,7 +2680,7 @@ int kdb_register_repeat(char *cmd,
                          (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
                        kfree(kdb_commands);
                }
-               memset(new + kdb_max_commands, 0,
+               memset(new + kdb_max_commands - KDB_BASE_CMD_MAX, 0,
                       kdb_command_extend * sizeof(*new));
                kdb_commands = new;
                kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
@@ -2843,15 +2784,13 @@ static void __init kdb_inittab(void)
          "Stack traceback", 1, KDB_REPEAT_NONE);
        kdb_register_repeat("btp", kdb_bt, "<pid>",
          "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
-         "Display stack all processes", 0, KDB_REPEAT_NONE);
+       kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+         "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
        kdb_register_repeat("btc", kdb_bt, "",
          "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
        kdb_register_repeat("btt", kdb_bt, "<vaddr>",
          "Backtrace process given its struct task address", 0,
                            KDB_REPEAT_NONE);
-       kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
-         "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
        kdb_register_repeat("env", kdb_env, "",
          "Show environment variables", 0, KDB_REPEAT_NONE);
        kdb_register_repeat("set", kdb_set, "",
index 392ec6a..7afd3c8 100644 (file)
@@ -19,7 +19,6 @@
 #define KDB_CMD_GO     (-1001)
 #define KDB_CMD_CPU    (-1002)
 #define KDB_CMD_SS     (-1003)
-#define KDB_CMD_SSB    (-1004)
 #define KDB_CMD_KGDB (-1005)
 
 /* Internal debug flags */
@@ -125,8 +124,6 @@ extern int kdb_state;
                                                 * kdb control */
 #define KDB_STATE_HOLD_CPU     0x00000010      /* Hold this cpu inside kdb */
 #define KDB_STATE_DOING_SS     0x00000020      /* Doing ss command */
-#define KDB_STATE_DOING_SSB    0x00000040      /* Doing ssb command,
-                                                * DOING_SS is also set */
 #define KDB_STATE_SSBPT                0x00000080      /* Install breakpoint
                                                 * after one ss, independent of
                                                 * DOING_SS */
@@ -191,7 +188,6 @@ extern void kdb_bp_remove(void);
 typedef enum {
        KDB_DB_BPT,     /* Breakpoint */
        KDB_DB_SS,      /* Single-step trap */
-       KDB_DB_SSB,     /* Single step to branch */
        KDB_DB_SSBPT,   /* Single step over breakpoint */
        KDB_DB_NOBPT    /* Spurious breakpoint */
 } kdb_dbtrap_t;
index 5c75791..b0cd865 100644 (file)
@@ -3691,7 +3691,7 @@ unlock:
 
 static int perf_fasync(int fd, struct file *filp, int on)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct perf_event *event = filp->private_data;
        int retval;
 
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
 {
        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
        struct perf_event *event;
-       struct hlist_node *node;
        struct hlist_head *head;
 
        rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
        if (!head)
                goto end;
 
-       hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
+       hlist_for_each_entry_rcu(event, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
                        perf_swevent_event(event, nr, data, regs);
        }
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 {
        struct perf_sample_data data;
        struct perf_event *event;
-       struct hlist_node *node;
 
        struct perf_raw_record raw = {
                .size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
        perf_sample_data_init(&data, addr, 0);
        data.raw = &raw;
 
-       hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
+       hlist_for_each_entry_rcu(event, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
                        perf_swevent_event(event, count, &data, regs);
        }
@@ -5965,13 +5963,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
        pmu->name = name;
 
        if (type < 0) {
-               int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
-               if (!err)
-                       goto free_pdc;
-
-               err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
-               if (err) {
-                       ret = err;
+               type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
+               if (type < 0) {
+                       ret = type;
                        goto free_pdc;
                }
        }
index 7dd2040..51e485c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/tsacct_kern.h>
 #include <linux/file.h>
 #include <linux/fdtable.h>
+#include <linux/freezer.h>
 #include <linux/binfmts.h>
 #include <linux/nsproxy.h>
 #include <linux/pid_namespace.h>
@@ -31,7 +32,6 @@
 #include <linux/mempolicy.h>
 #include <linux/taskstats_kern.h>
 #include <linux/delayacct.h>
-#include <linux/freezer.h>
 #include <linux/cgroup.h>
 #include <linux/syscalls.h>
 #include <linux/signal.h>
@@ -485,7 +485,7 @@ static void exit_mm(struct task_struct * tsk)
                        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
                        if (!self.task) /* see coredump_finish() */
                                break;
-                       schedule();
+                       freezable_schedule();
                }
                __set_task_state(tsk, TASK_RUNNING);
                down_read(&mm->mmap_sem);
@@ -835,7 +835,7 @@ void do_exit(long code)
        /*
         * Make sure we are holding no locks:
         */
-       debug_check_no_locks_held(tsk);
+       debug_check_no_locks_held();
        /*
         * We can do this unlocked here. The futex code uses this flag
         * just to verify whether the pi state cleanup has been done
index 4133876..8d932b1 100644 (file)
@@ -413,7 +413,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                tmp->vm_next = tmp->vm_prev = NULL;
                file = tmp->vm_file;
                if (file) {
-                       struct inode *inode = file->f_path.dentry->d_inode;
+                       struct inode *inode = file_inode(file);
                        struct address_space *mapping = file->f_mapping;
 
                        get_file(file);
@@ -1861,10 +1861,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                        exit_sem(current);
                }
 
-               if (new_nsproxy) {
+               if (new_nsproxy)
                        switch_task_namespaces(current, new_nsproxy);
-                       new_nsproxy = NULL;
-               }
 
                task_lock(current);
 
@@ -1894,9 +1892,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                }
        }
 
-       if (new_nsproxy)
-               put_nsproxy(new_nsproxy);
-
 bad_unshare_cleanup_cred:
        if (new_cred)
                put_cred(new_cred);
index fbc07a2..f0090a9 100644 (file)
@@ -226,7 +226,7 @@ static void drop_futex_key_refs(union futex_key *key)
  * Returns a negative error code or 0
  * The key words are stored in *key on success.
  *
- * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
+ * For shared mappings, it's (page->index, file_inode(vma->vm_file),
  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
  * We can usually work out the index without swapping in the page.
  *
index 4bd4faa..397db02 100644 (file)
@@ -76,7 +76,7 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
 static ssize_t write_irq_affinity(int type, struct file *file,
                const char __user *buffer, size_t count, loff_t *pos)
 {
-       unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
+       unsigned int irq = (int)(long)PDE(file_inode(file))->data;
        cpumask_var_t new_value;
        int err;
 
index 2436ffc..bddd3d7 100644 (file)
@@ -229,6 +229,8 @@ out:
 
 }
 
+static void kimage_free_page_list(struct list_head *list);
+
 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
                                unsigned long nr_segments,
                                struct kexec_segment __user *segments)
@@ -242,8 +244,6 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
        if (result)
                goto out;
 
-       *rimage = image;
-
        /*
         * Find a location for the control code buffer, and add it
         * the vector of segments so that it's pages will also be
@@ -254,22 +254,22 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
                                           get_order(KEXEC_CONTROL_PAGE_SIZE));
        if (!image->control_code_page) {
                printk(KERN_ERR "Could not allocate control_code_buffer\n");
-               goto out;
+               goto out_free;
        }
 
        image->swap_page = kimage_alloc_control_pages(image, 0);
        if (!image->swap_page) {
                printk(KERN_ERR "Could not allocate swap buffer\n");
-               goto out;
+               goto out_free;
        }
 
-       result = 0;
- out:
-       if (result == 0)
-               *rimage = image;
-       else
-               kfree(image);
+       *rimage = image;
+       return 0;
 
+out_free:
+       kimage_free_page_list(&image->control_pages);
+       kfree(image);
+out:
        return result;
 }
 
@@ -316,7 +316,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
                mend = mstart + image->segment[i].memsz - 1;
                /* Ensure we are within the crash kernel limits */
                if ((mstart < crashk_res.start) || (mend > crashk_res.end))
-                       goto out;
+                       goto out_free;
        }
 
        /*
@@ -329,16 +329,15 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
                                           get_order(KEXEC_CONTROL_PAGE_SIZE));
        if (!image->control_code_page) {
                printk(KERN_ERR "Could not allocate control_code_buffer\n");
-               goto out;
+               goto out_free;
        }
 
-       result = 0;
-out:
-       if (result == 0)
-               *rimage = image;
-       else
-               kfree(image);
+       *rimage = image;
+       return 0;
 
+out_free:
+       kfree(image);
+out:
        return result;
 }
 
@@ -503,8 +502,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 
                if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
                        break;
-               if (hole_end > crashk_res.end)
-                       break;
                /* See if I overlap any of the segments */
                for (i = 0; i < image->nr_segments; i++) {
                        unsigned long mstart, mend;
@@ -1514,6 +1511,8 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_OFFSET(page, _count);
        VMCOREINFO_OFFSET(page, mapping);
        VMCOREINFO_OFFSET(page, lru);
+       VMCOREINFO_OFFSET(page, _mapcount);
+       VMCOREINFO_OFFSET(page, private);
        VMCOREINFO_OFFSET(pglist_data, node_zones);
        VMCOREINFO_OFFSET(pglist_data, nr_zones);
 #ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1536,6 +1535,11 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_NUMBER(PG_lru);
        VMCOREINFO_NUMBER(PG_private);
        VMCOREINFO_NUMBER(PG_swapcache);
+       VMCOREINFO_NUMBER(PG_slab);
+#ifdef CONFIG_MEMORY_FAILURE
+       VMCOREINFO_NUMBER(PG_hwpoison);
+#endif
+       VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
 
        arch_crash_save_vmcoreinfo();
        update_vmcoreinfo_note();
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
deleted file mode 100644 (file)
index 59dcf5b..0000000
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * A generic kernel FIFO implementation
- *
- * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/log2.h>
-#include <linux/uaccess.h>
-#include <linux/kfifo.h>
-
-/*
- * internal helper to calculate the unused elements in a fifo
- */
-static inline unsigned int kfifo_unused(struct __kfifo *fifo)
-{
-       return (fifo->mask + 1) - (fifo->in - fifo->out);
-}
-
-int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
-               size_t esize, gfp_t gfp_mask)
-{
-       /*
-        * round down to the next power of 2, since our 'let the indices
-        * wrap' technique works only in this case.
-        */
-       if (!is_power_of_2(size))
-               size = rounddown_pow_of_two(size);
-
-       fifo->in = 0;
-       fifo->out = 0;
-       fifo->esize = esize;
-
-       if (size < 2) {
-               fifo->data = NULL;
-               fifo->mask = 0;
-               return -EINVAL;
-       }
-
-       fifo->data = kmalloc(size * esize, gfp_mask);
-
-       if (!fifo->data) {
-               fifo->mask = 0;
-               return -ENOMEM;
-       }
-       fifo->mask = size - 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(__kfifo_alloc);
-
-void __kfifo_free(struct __kfifo *fifo)
-{
-       kfree(fifo->data);
-       fifo->in = 0;
-       fifo->out = 0;
-       fifo->esize = 0;
-       fifo->data = NULL;
-       fifo->mask = 0;
-}
-EXPORT_SYMBOL(__kfifo_free);
-
-int __kfifo_init(struct __kfifo *fifo, void *buffer,
-               unsigned int size, size_t esize)
-{
-       size /= esize;
-
-       if (!is_power_of_2(size))
-               size = rounddown_pow_of_two(size);
-
-       fifo->in = 0;
-       fifo->out = 0;
-       fifo->esize = esize;
-       fifo->data = buffer;
-
-       if (size < 2) {
-               fifo->mask = 0;
-               return -EINVAL;
-       }
-       fifo->mask = size - 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(__kfifo_init);
-
-static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
-               unsigned int len, unsigned int off)
-{
-       unsigned int size = fifo->mask + 1;
-       unsigned int esize = fifo->esize;
-       unsigned int l;
-
-       off &= fifo->mask;
-       if (esize != 1) {
-               off *= esize;
-               size *= esize;
-               len *= esize;
-       }
-       l = min(len, size - off);
-
-       memcpy(fifo->data + off, src, l);
-       memcpy(fifo->data, src + l, len - l);
-       /*
-        * make sure that the data in the fifo is up to date before
-        * incrementing the fifo->in index counter
-        */
-       smp_wmb();
-}
-
-unsigned int __kfifo_in(struct __kfifo *fifo,
-               const void *buf, unsigned int len)
-{
-       unsigned int l;
-
-       l = kfifo_unused(fifo);
-       if (len > l)
-               len = l;
-
-       kfifo_copy_in(fifo, buf, len, fifo->in);
-       fifo->in += len;
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_in);
-
-static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
-               unsigned int len, unsigned int off)
-{
-       unsigned int size = fifo->mask + 1;
-       unsigned int esize = fifo->esize;
-       unsigned int l;
-
-       off &= fifo->mask;
-       if (esize != 1) {
-               off *= esize;
-               size *= esize;
-               len *= esize;
-       }
-       l = min(len, size - off);
-
-       memcpy(dst, fifo->data + off, l);
-       memcpy(dst + l, fifo->data, len - l);
-       /*
-        * make sure that the data is copied before
-        * incrementing the fifo->out index counter
-        */
-       smp_wmb();
-}
-
-unsigned int __kfifo_out_peek(struct __kfifo *fifo,
-               void *buf, unsigned int len)
-{
-       unsigned int l;
-
-       l = fifo->in - fifo->out;
-       if (len > l)
-               len = l;
-
-       kfifo_copy_out(fifo, buf, len, fifo->out);
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_out_peek);
-
-unsigned int __kfifo_out(struct __kfifo *fifo,
-               void *buf, unsigned int len)
-{
-       len = __kfifo_out_peek(fifo, buf, len);
-       fifo->out += len;
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_out);
-
-static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
-       const void __user *from, unsigned int len, unsigned int off,
-       unsigned int *copied)
-{
-       unsigned int size = fifo->mask + 1;
-       unsigned int esize = fifo->esize;
-       unsigned int l;
-       unsigned long ret;
-
-       off &= fifo->mask;
-       if (esize != 1) {
-               off *= esize;
-               size *= esize;
-               len *= esize;
-       }
-       l = min(len, size - off);
-
-       ret = copy_from_user(fifo->data + off, from, l);
-       if (unlikely(ret))
-               ret = DIV_ROUND_UP(ret + len - l, esize);
-       else {
-               ret = copy_from_user(fifo->data, from + l, len - l);
-               if (unlikely(ret))
-                       ret = DIV_ROUND_UP(ret, esize);
-       }
-       /*
-        * make sure that the data in the fifo is up to date before
-        * incrementing the fifo->in index counter
-        */
-       smp_wmb();
-       *copied = len - ret;
-       /* return the number of elements which are not copied */
-       return ret;
-}
-
-int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
-               unsigned long len, unsigned int *copied)
-{
-       unsigned int l;
-       unsigned long ret;
-       unsigned int esize = fifo->esize;
-       int err;
-
-       if (esize != 1)
-               len /= esize;
-
-       l = kfifo_unused(fifo);
-       if (len > l)
-               len = l;
-
-       ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
-       if (unlikely(ret)) {
-               len -= ret;
-               err = -EFAULT;
-       } else
-               err = 0;
-       fifo->in += len;
-       return err;
-}
-EXPORT_SYMBOL(__kfifo_from_user);
-
-static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
-               unsigned int len, unsigned int off, unsigned int *copied)
-{
-       unsigned int l;
-       unsigned long ret;
-       unsigned int size = fifo->mask + 1;
-       unsigned int esize = fifo->esize;
-
-       off &= fifo->mask;
-       if (esize != 1) {
-               off *= esize;
-               size *= esize;
-               len *= esize;
-       }
-       l = min(len, size - off);
-
-       ret = copy_to_user(to, fifo->data + off, l);
-       if (unlikely(ret))
-               ret = DIV_ROUND_UP(ret + len - l, esize);
-       else {
-               ret = copy_to_user(to + l, fifo->data, len - l);
-               if (unlikely(ret))
-                       ret = DIV_ROUND_UP(ret, esize);
-       }
-       /*
-        * make sure that the data is copied before
-        * incrementing the fifo->out index counter
-        */
-       smp_wmb();
-       *copied = len - ret;
-       /* return the number of elements which are not copied */
-       return ret;
-}
-
-int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
-               unsigned long len, unsigned int *copied)
-{
-       unsigned int l;
-       unsigned long ret;
-       unsigned int esize = fifo->esize;
-       int err;
-
-       if (esize != 1)
-               len /= esize;
-
-       l = fifo->in - fifo->out;
-       if (len > l)
-               len = l;
-       ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
-       if (unlikely(ret)) {
-               len -= ret;
-               err = -EFAULT;
-       } else
-               err = 0;
-       fifo->out += len;
-       return err;
-}
-EXPORT_SYMBOL(__kfifo_to_user);
-
-static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
-               int nents, unsigned int len)
-{
-       int n;
-       unsigned int l;
-       unsigned int off;
-       struct page *page;
-
-       if (!nents)
-               return 0;
-
-       if (!len)
-               return 0;
-
-       n = 0;
-       page = virt_to_page(buf);
-       off = offset_in_page(buf);
-       l = 0;
-
-       while (len >= l + PAGE_SIZE - off) {
-               struct page *npage;
-
-               l += PAGE_SIZE;
-               buf += PAGE_SIZE;
-               npage = virt_to_page(buf);
-               if (page_to_phys(page) != page_to_phys(npage) - l) {
-                       sg_set_page(sgl, page, l - off, off);
-                       sgl = sg_next(sgl);
-                       if (++n == nents || sgl == NULL)
-                               return n;
-                       page = npage;
-                       len -= l - off;
-                       l = off = 0;
-               }
-       }
-       sg_set_page(sgl, page, len, off);
-       return n + 1;
-}
-
-static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
-               int nents, unsigned int len, unsigned int off)
-{
-       unsigned int size = fifo->mask + 1;
-       unsigned int esize = fifo->esize;
-       unsigned int l;
-       unsigned int n;
-
-       off &= fifo->mask;
-       if (esize != 1) {
-               off *= esize;
-               size *= esize;
-               len *= esize;
-       }
-       l = min(len, size - off);
-
-       n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
-       n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
-
-       return n;
-}
-
-unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
-               struct scatterlist *sgl, int nents, unsigned int len)
-{
-       unsigned int l;
-
-       l = kfifo_unused(fifo);
-       if (len > l)
-               len = l;
-
-       return setup_sgl(fifo, sgl, nents, len, fifo->in);
-}
-EXPORT_SYMBOL(__kfifo_dma_in_prepare);
-
-unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
-               struct scatterlist *sgl, int nents, unsigned int len)
-{
-       unsigned int l;
-
-       l = fifo->in - fifo->out;
-       if (len > l)
-               len = l;
-
-       return setup_sgl(fifo, sgl, nents, len, fifo->out);
-}
-EXPORT_SYMBOL(__kfifo_dma_out_prepare);
-
-unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
-{
-       unsigned int max = (1 << (recsize << 3)) - 1;
-
-       if (len > max)
-               return max;
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_max_r);
-
-#define        __KFIFO_PEEK(data, out, mask) \
-       ((data)[(out) & (mask)])
-/*
- * __kfifo_peek_n internal helper function for determinate the length of
- * the next record in the fifo
- */
-static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
-{
-       unsigned int l;
-       unsigned int mask = fifo->mask;
-       unsigned char *data = fifo->data;
-
-       l = __KFIFO_PEEK(data, fifo->out, mask);
-
-       if (--recsize)
-               l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
-
-       return l;
-}
-
-#define        __KFIFO_POKE(data, in, mask, val) \
-       ( \
-       (data)[(in) & (mask)] = (unsigned char)(val) \
-       )
-
-/*
- * __kfifo_poke_n internal helper function for storeing the length of
- * the record into the fifo
- */
-static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
-{
-       unsigned int mask = fifo->mask;
-       unsigned char *data = fifo->data;
-
-       __KFIFO_POKE(data, fifo->in, mask, n);
-
-       if (recsize > 1)
-               __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
-}
-
-unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
-{
-       return __kfifo_peek_n(fifo, recsize);
-}
-EXPORT_SYMBOL(__kfifo_len_r);
-
-unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
-               unsigned int len, size_t recsize)
-{
-       if (len + recsize > kfifo_unused(fifo))
-               return 0;
-
-       __kfifo_poke_n(fifo, len, recsize);
-
-       kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
-       fifo->in += len + recsize;
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_in_r);
-
-static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
-       void *buf, unsigned int len, size_t recsize, unsigned int *n)
-{
-       *n = __kfifo_peek_n(fifo, recsize);
-
-       if (len > *n)
-               len = *n;
-
-       kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
-       return len;
-}
-
-unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
-               unsigned int len, size_t recsize)
-{
-       unsigned int n;
-
-       if (fifo->in == fifo->out)
-               return 0;
-
-       return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
-}
-EXPORT_SYMBOL(__kfifo_out_peek_r);
-
-unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
-               unsigned int len, size_t recsize)
-{
-       unsigned int n;
-
-       if (fifo->in == fifo->out)
-               return 0;
-
-       len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
-       fifo->out += n + recsize;
-       return len;
-}
-EXPORT_SYMBOL(__kfifo_out_r);
-
-void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
-{
-       unsigned int n;
-
-       n = __kfifo_peek_n(fifo, recsize);
-       fifo->out += n + recsize;
-}
-EXPORT_SYMBOL(__kfifo_skip_r);
-
-int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
-       unsigned long len, unsigned int *copied, size_t recsize)
-{
-       unsigned long ret;
-
-       len = __kfifo_max_r(len, recsize);
-
-       if (len + recsize > kfifo_unused(fifo)) {
-               *copied = 0;
-               return 0;
-       }
-
-       __kfifo_poke_n(fifo, len, recsize);
-
-       ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
-       if (unlikely(ret)) {
-               *copied = 0;
-               return -EFAULT;
-       }
-       fifo->in += len + recsize;
-       return 0;
-}
-EXPORT_SYMBOL(__kfifo_from_user_r);
-
-int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
-       unsigned long len, unsigned int *copied, size_t recsize)
-{
-       unsigned long ret;
-       unsigned int n;
-
-       if (fifo->in == fifo->out) {
-               *copied = 0;
-               return 0;
-       }
-
-       n = __kfifo_peek_n(fifo, recsize);
-       if (len > n)
-               len = n;
-
-       ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
-       if (unlikely(ret)) {
-               *copied = 0;
-               return -EFAULT;
-       }
-       fifo->out += n + recsize;
-       return 0;
-}
-EXPORT_SYMBOL(__kfifo_to_user_r);
-
-unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
-       struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
-{
-       if (!nents)
-               BUG();
-
-       len = __kfifo_max_r(len, recsize);
-
-       if (len + recsize > kfifo_unused(fifo))
-               return 0;
-
-       return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
-}
-EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
-
-void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
-       unsigned int len, size_t recsize)
-{
-       len = __kfifo_max_r(len, recsize);
-       __kfifo_poke_n(fifo, len, recsize);
-       fifo->in += len + recsize;
-}
-EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
-
-unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
-       struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
-{
-       if (!nents)
-               BUG();
-
-       len = __kfifo_max_r(len, recsize);
-
-       if (len + recsize > fifo->in - fifo->out)
-               return 0;
-
-       return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
-}
-EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
-
-void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
-{
-       unsigned int len;
-
-       len = __kfifo_peek_n(fifo, recsize);
-       fifo->out += len + recsize;
-}
-EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
index 550294d..e35be53 100644 (file)
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
 struct kprobe __kprobes *get_kprobe(void *addr)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
 
        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, hlist) {
+       hlist_for_each_entry_rcu(p, head, hlist) {
                if (p->addr == addr)
                        return p;
        }
@@ -799,7 +798,6 @@ out:
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
        unsigned int i;
 
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
        kprobes_allow_optimization = true;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, node, head, hlist)
+               hlist_for_each_entry_rcu(p, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
        unsigned int i;
 
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
        kprobes_allow_optimization = false;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, node, head, hlist) {
+               hlist_for_each_entry_rcu(p, head, hlist) {
                        if (!kprobe_disabled(p))
                                unoptimize_kprobe(p, false);
                }
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
 {
        struct kretprobe_instance *ri;
        struct hlist_head *head, empty_rp;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        unsigned long hash, flags = 0;
 
        if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
        kretprobe_table_lock(hash, &flags);
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
                if (ri->task == tk)
                        recycle_rp_inst(ri, &empty_rp);
        }
        kretprobe_table_unlock(hash, &flags);
-       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
 static inline void free_rp_inst(struct kretprobe *rp)
 {
        struct kretprobe_instance *ri;
-       struct hlist_node *pos, *next;
+       struct hlist_node *next;
 
-       hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
+       hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
        }
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
 {
        unsigned long flags, hash;
        struct kretprobe_instance *ri;
-       struct hlist_node *pos, *next;
+       struct hlist_node *next;
        struct hlist_head *head;
 
        /* No race here */
        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
                kretprobe_table_lock(hash, &flags);
                head = &kretprobe_inst_table[hash];
-               hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
+               hlist_for_each_entry_safe(ri, next, head, hlist) {
                        if (ri->rp == rp)
                                ri->rp = NULL;
                }
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
 {
        struct module *mod = data;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
        unsigned int i;
        int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
        mutex_lock(&kprobe_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, node, head, hlist)
+               hlist_for_each_entry_rcu(p, head, hlist)
                        if (within_module_init((unsigned long)p->addr, mod) ||
                            (checkcore &&
                             within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p, *kp;
        const char *sym = NULL;
        unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
 
        head = &kprobe_table[i];
        preempt_disable();
-       hlist_for_each_entry_rcu(p, node, head, hlist) {
+       hlist_for_each_entry_rcu(p, head, hlist) {
                sym = kallsyms_lookup((unsigned long)p->addr, NULL,
                                        &offset, &modname, namebuf);
                if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
 static void __kprobes arm_all_kprobes(void)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
        unsigned int i;
 
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
        /* Arming kprobes doesn't optimize kprobe itself */
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, node, head, hlist)
+               hlist_for_each_entry_rcu(p, head, hlist)
                        if (!kprobe_disabled(p))
                                arm_kprobe(p);
        }
@@ -2265,7 +2259,6 @@ already_enabled:
 static void __kprobes disarm_all_kprobes(void)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct kprobe *p;
        unsigned int i;
 
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
 
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, node, head, hlist) {
+               hlist_for_each_entry_rcu(p, head, hlist) {
                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
                                disarm_kprobe(p, false);
                }
index 8a0efac..259db20 100644 (file)
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(struct task_struct *curr)
+static void print_held_locks_bug(void)
 {
        if (!debug_locks_off())
                return;
@@ -4097,22 +4097,21 @@ static void print_held_locks_bug(struct task_struct *curr)
 
        printk("\n");
        printk("=====================================\n");
-       printk("[ BUG: lock held at task exit time! ]\n");
+       printk("[ BUG: %s/%d still has locks held! ]\n",
+              current->comm, task_pid_nr(current));
        print_kernel_ident();
        printk("-------------------------------------\n");
-       printk("%s/%d is exiting with locks still held!\n",
-               curr->comm, task_pid_nr(curr));
-       lockdep_print_held_locks(curr);
-
+       lockdep_print_held_locks(current);
        printk("\nstack backtrace:\n");
        dump_stack();
 }
 
-void debug_check_no_locks_held(struct task_struct *task)
+void debug_check_no_locks_held(void)
 {
-       if (unlikely(task->lockdep_depth > 0))
-               print_held_locks_bug(task);
+       if (unlikely(current->lockdep_depth > 0))
+               print_held_locks_bug();
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
index 921bed4..0925c9a 100644 (file)
@@ -2541,7 +2541,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
        if (err)
                goto out;
 
-       err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
+       err = vfs_getattr(&file->f_path, &stat);
        if (err)
                goto out;
 
index b781e66..afc0456 100644 (file)
@@ -250,7 +250,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, nstype)
                return PTR_ERR(file);
 
        err = -EINVAL;
-       ei = PROC_I(file->f_dentry->d_inode);
+       ei = PROC_I(file_inode(file));
        ops = ei->ns_ops;
        if (nstype && (ops->type != nstype))
                goto out;
index f2c6a68..047dc62 100644 (file)
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
 
 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 {
-       struct hlist_node *elem;
        struct upid *pnr;
 
-       hlist_for_each_entry_rcu(pnr, elem,
+       hlist_for_each_entry_rcu(pnr,
                        &pid_hash[pid_hashfn(nr, ns)], pid_chain)
                if (pnr->nr == nr && pnr->ns == ns)
                        return container_of(pnr, struct pid,
index 10349d5..6edbb2c 100644 (file)
@@ -552,24 +552,22 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
                return -EAGAIN;
 
        spin_lock_init(&new_timer->it_lock);
- retry:
-       if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
-               error = -EAGAIN;
-               goto out;
-       }
+
+       idr_preload(GFP_KERNEL);
        spin_lock_irq(&idr_lock);
-       error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
+       error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
        spin_unlock_irq(&idr_lock);
-       if (error) {
-               if (error == -EAGAIN)
-                       goto retry;
+       idr_preload_end();
+       if (error < 0) {
                /*
                 * Weird looking, but we return EAGAIN if the IDR is
                 * full (proper POSIX return value for this)
                 */
-               error = -EAGAIN;
+               if (error == -ENOSPC)
+                       error = -EAGAIN;
                goto out;
        }
+       new_timer_id = error;
 
        it_id_set = IT_ID_SET;
        new_timer->it_id = (timer_t) new_timer_id;
@@ -639,6 +637,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
 {
        struct k_itimer *timr;
 
+       /*
+        * timer_t could be any type >= int and we want to make sure any
+        * @timer_id outside positive int range fails lookup.
+        */
+       if ((unsigned long long)timer_id > INT_MAX)
+               return NULL;
+
        rcu_read_lock();
        timr = idr_find(&posix_timers_id, (int)timer_id);
        if (timr) {
index e8cd202..01ab081 100644 (file)
@@ -1139,7 +1139,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
        if (!desc->count)
                return 0;
 
-       mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
+       mutex_lock(&file_inode(filp)->i_mutex);
        do {
                if (!relay_file_read_avail(buf, *ppos))
                        break;
@@ -1159,7 +1159,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
                        *ppos = relay_file_read_end_pos(buf, read_start, ret);
                }
        } while (desc->count && ret);
-       mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex);
+       mutex_unlock(&file_inode(filp)->i_mutex);
 
        return desc->written;
 }
index fc9103e..7f12624 100644 (file)
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
        struct preempt_notifier *notifier;
-       struct hlist_node *node;
 
-       hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+       hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
                notifier->ops->sched_in(notifier, raw_smp_processor_id());
 }
 
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
                                 struct task_struct *next)
 {
        struct preempt_notifier *notifier;
-       struct hlist_node *node;
 
-       hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+       hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
                notifier->ops->sched_out(notifier, next);
 }
 
@@ -1979,11 +1977,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
 }
 
 /*
- * nr_running, nr_uninterruptible and nr_context_switches:
+ * nr_running and nr_context_switches:
  *
  * externally visible scheduler statistics: current number of runnable
- * threads, current number of uninterruptible-sleeping threads, total
- * number of context switches performed since bootup.
+ * threads, total number of context switches performed since bootup.
  */
 unsigned long nr_running(void)
 {
@@ -1995,23 +1992,6 @@ unsigned long nr_running(void)
        return sum;
 }
 
-unsigned long nr_uninterruptible(void)
-{
-       unsigned long i, sum = 0;
-
-       for_each_possible_cpu(i)
-               sum += cpu_rq(i)->nr_uninterruptible;
-
-       /*
-        * Since we read the counters lockless, it might be slightly
-        * inaccurate. Do not allow it to go below zero though:
-        */
-       if (unlikely((long)sum < 0))
-               sum = 0;
-
-       return sum;
-}
-
 unsigned long long nr_context_switches(void)
 {
        int i;
@@ -3278,7 +3258,8 @@ void complete_all(struct completion *x)
 EXPORT_SYMBOL(complete_all);
 
 static inline long __sched
-do_wait_for_common(struct completion *x, long timeout, int state)
+do_wait_for_common(struct completion *x,
+                  long (*action)(long), long timeout, int state)
 {
        if (!x->done) {
                DECLARE_WAITQUEUE(wait, current);
@@ -3291,7 +3272,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
                        }
                        __set_current_state(state);
                        spin_unlock_irq(&x->wait.lock);
-                       timeout = schedule_timeout(timeout);
+                       timeout = action(timeout);
                        spin_lock_irq(&x->wait.lock);
                } while (!x->done && timeout);
                __remove_wait_queue(&x->wait, &wait);
@@ -3302,17 +3283,30 @@ do_wait_for_common(struct completion *x, long timeout, int state)
        return timeout ?: 1;
 }
 
-static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
+static inline long __sched
+__wait_for_common(struct completion *x,
+                 long (*action)(long), long timeout, int state)
 {
        might_sleep();
 
        spin_lock_irq(&x->wait.lock);
-       timeout = do_wait_for_common(x, timeout, state);
+       timeout = do_wait_for_common(x, action, timeout, state);
        spin_unlock_irq(&x->wait.lock);
        return timeout;
 }
 
+static long __sched
+wait_for_common(struct completion *x, long timeout, int state)
+{
+       return __wait_for_common(x, schedule_timeout, timeout, state);
+}
+
+static long __sched
+wait_for_common_io(struct completion *x, long timeout, int state)
+{
+       return __wait_for_common(x, io_schedule_timeout, timeout, state);
+}
+
 /**
  * wait_for_completion: - waits for completion of a task
  * @x:  holds the state of this particular completion
@@ -3348,6 +3342,39 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout)
 }
 EXPORT_SYMBOL(wait_for_completion_timeout);
 
+/**
+ * wait_for_completion_io: - waits for completion of a task
+ * @x:  holds the state of this particular completion
+ *
+ * This waits to be signaled for completion of a specific task. It is NOT
+ * interruptible and there is no timeout. The caller is accounted as waiting
+ * for IO.
+ */
+void __sched wait_for_completion_io(struct completion *x)
+{
+       wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_for_completion_io);
+
+/**
+ * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
+ * @x:  holds the state of this particular completion
+ * @timeout:  timeout value in jiffies
+ *
+ * This waits for either a completion of a specific task to be signaled or for a
+ * specified timeout to expire. The timeout is in jiffies. It is not
+ * interruptible. The caller is accounted as waiting for IO.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
+ */
+unsigned long __sched
+wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
+{
+       return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_for_completion_io_timeout);
+
 /**
  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
  * @x:  holds the state of this particular completion
index 9857329..ed12cbb 100644 (file)
@@ -604,7 +604,7 @@ static unsigned long long vtime_delta(struct task_struct *tsk)
 {
        unsigned long long clock;
 
-       clock = sched_clock();
+       clock = local_clock();
        if (clock < tsk->vtime_snap)
                return 0;
 
index 557e7b5..75024a6 100644 (file)
@@ -262,11 +262,11 @@ static void print_cpu(struct seq_file *m, int cpu)
        {
                unsigned int freq = cpu_khz ? : 1;
 
-               SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
+               SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
                           cpu, freq / 1000, (freq % 1000));
        }
 #else
-       SEQ_printf(m, "\ncpu#%d\n", cpu);
+       SEQ_printf(m, "cpu#%d\n", cpu);
 #endif
 
 #define P(x)                                                           \
@@ -323,6 +323,7 @@ do {                                                                        \
        print_rq(m, rq, cpu);
        rcu_read_unlock();
        spin_unlock_irqrestore(&sched_debug_lock, flags);
+       SEQ_printf(m, "\n");
 }
 
 static const char *sched_tunable_scaling_names[] = {
@@ -331,11 +332,10 @@ static const char *sched_tunable_scaling_names[] = {
        "linear"
 };
 
-static int sched_debug_show(struct seq_file *m, void *v)
+static void sched_debug_header(struct seq_file *m)
 {
        u64 ktime, sched_clk, cpu_clk;
        unsigned long flags;
-       int cpu;
 
        local_irq_save(flags);
        ktime = ktime_to_ns(ktime_get());
@@ -377,33 +377,101 @@ static int sched_debug_show(struct seq_file *m, void *v)
 #undef PN
 #undef P
 
-       SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
+       SEQ_printf(m, "  .%-40s: %d (%s)\n",
+               "sysctl_sched_tunable_scaling",
                sysctl_sched_tunable_scaling,
                sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
+       SEQ_printf(m, "\n");
+}
 
-       for_each_online_cpu(cpu)
-               print_cpu(m, cpu);
+static int sched_debug_show(struct seq_file *m, void *v)
+{
+       int cpu = (unsigned long)(v - 2);
 
-       SEQ_printf(m, "\n");
+       if (cpu != -1)
+               print_cpu(m, cpu);
+       else
+               sched_debug_header(m);
 
        return 0;
 }
 
 void sysrq_sched_debug_show(void)
 {
-       sched_debug_show(NULL, NULL);
+       int cpu;
+
+       sched_debug_header(NULL);
+       for_each_online_cpu(cpu)
+               print_cpu(NULL, cpu);
+
+}
+
+/*
+ * This itererator needs some explanation.
+ * It returns 1 for the header position.
+ * This means 2 is cpu 0.
+ * In a hotplugged system some cpus, including cpu 0, may be missing so we have
+ * to use cpumask_* to iterate over the cpus.
+ */
+static void *sched_debug_start(struct seq_file *file, loff_t *offset)
+{
+       unsigned long n = *offset;
+
+       if (n == 0)
+               return (void *) 1;
+
+       n--;
+
+       if (n > 0)
+               n = cpumask_next(n - 1, cpu_online_mask);
+       else
+               n = cpumask_first(cpu_online_mask);
+
+       *offset = n + 1;
+
+       if (n < nr_cpu_ids)
+               return (void *)(unsigned long)(n + 2);
+       return NULL;
+}
+
+static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
+{
+       (*offset)++;
+       return sched_debug_start(file, offset);
+}
+
+static void sched_debug_stop(struct seq_file *file, void *data)
+{
+}
+
+static const struct seq_operations sched_debug_sops = {
+       .start = sched_debug_start,
+       .next = sched_debug_next,
+       .stop = sched_debug_stop,
+       .show = sched_debug_show,
+};
+
+static int sched_debug_release(struct inode *inode, struct file *file)
+{
+       seq_release(inode, file);
+
+       return 0;
 }
 
 static int sched_debug_open(struct inode *inode, struct file *filp)
 {
-       return single_open(filp, sched_debug_show, NULL);
+       int ret = 0;
+
+       ret = seq_open(filp, &sched_debug_sops);
+
+       return ret;
 }
 
 static const struct file_operations sched_debug_fops = {
        .open           = sched_debug_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release,
+       .release        = sched_debug_release,
 };
 
 static int __init init_sched_debug_procfs(void)
index 903ffa9..e036eda 100644 (file)
@@ -21,14 +21,17 @@ static int show_schedstat(struct seq_file *seq, void *v)
        if (mask_str == NULL)
                return -ENOMEM;
 
-       seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
-       seq_printf(seq, "timestamp %lu\n", jiffies);
-       for_each_online_cpu(cpu) {
-               struct rq *rq = cpu_rq(cpu);
+       if (v == (void *)1) {
+               seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
+               seq_printf(seq, "timestamp %lu\n", jiffies);
+       } else {
+               struct rq *rq;
 #ifdef CONFIG_SMP
                struct sched_domain *sd;
                int dcount = 0;
 #endif
+               cpu = (unsigned long)(v - 2);
+               rq = cpu_rq(cpu);
 
                /* runqueue-specific stats */
                seq_printf(seq,
@@ -77,30 +80,66 @@ static int show_schedstat(struct seq_file *seq, void *v)
        return 0;
 }
 
-static int schedstat_open(struct inode *inode, struct file *file)
+/*
+ * This itererator needs some explanation.
+ * It returns 1 for the header position.
+ * This means 2 is cpu 0.
+ * In a hotplugged system some cpus, including cpu 0, may be missing so we have
+ * to use cpumask_* to iterate over the cpus.
+ */
+static void *schedstat_start(struct seq_file *file, loff_t *offset)
 {
-       unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
-       char *buf = kmalloc(size, GFP_KERNEL);
-       struct seq_file *m;
-       int res;
+       unsigned long n = *offset;
 
-       if (!buf)
-               return -ENOMEM;
-       res = single_open(file, show_schedstat, NULL);
-       if (!res) {
-               m = file->private_data;
-               m->buf = buf;
-               m->size = size;
-       } else
-               kfree(buf);
-       return res;
+       if (n == 0)
+               return (void *) 1;
+
+       n--;
+
+       if (n > 0)
+               n = cpumask_next(n - 1, cpu_online_mask);
+       else
+               n = cpumask_first(cpu_online_mask);
+
+       *offset = n + 1;
+
+       if (n < nr_cpu_ids)
+               return (void *)(unsigned long)(n + 2);
+       return NULL;
+}
+
+static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
+{
+       (*offset)++;
+       return schedstat_start(file, offset);
+}
+
+static void schedstat_stop(struct seq_file *file, void *data)
+{
+}
+
+static const struct seq_operations schedstat_sops = {
+       .start = schedstat_start,
+       .next  = schedstat_next,
+       .stop  = schedstat_stop,
+       .show  = show_schedstat,
+};
+
+static int schedstat_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &schedstat_sops);
 }
 
+static int schedstat_release(struct inode *inode, struct file *file)
+{
+       return 0;
+};
+
 static const struct file_operations proc_schedstat_operations = {
        .open    = schedstat_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = single_release,
+       .release = schedstat_release,
 };
 
 static int __init proc_schedstat_init(void)
index 2a7ae29..2ec870a 100644 (file)
@@ -1157,11 +1157,11 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
 static void print_fatal_signal(int signr)
 {
        struct pt_regs *regs = signal_pt_regs();
-       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+       printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
                current->comm, task_pid_nr(current), signr);
 
 #if defined(__i386__) && !defined(__arch_um__)
-       printk("code at %08lx: ", regs->ip);
+       printk(KERN_INFO "code at %08lx: ", regs->ip);
        {
                int i;
                for (i = 0; i < 16; i++) {
@@ -1169,11 +1169,11 @@ static void print_fatal_signal(int signr)
 
                        if (get_user(insn, (unsigned char *)(regs->ip + i)))
                                break;
-                       printk("%02x ", insn);
+                       printk(KERN_CONT "%02x ", insn);
                }
        }
+       printk(KERN_CONT "\n");
 #endif
-       printk("\n");
        preempt_disable();
        show_regs(regs);
        preempt_enable();
@@ -2653,7 +2653,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
        if (oset) {
                compat_sigset_t old32;
                sigset_to_compat(&old32, &old_set);
-               if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
+               if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
                        return -EFAULT;
        }
        return 0;
@@ -2996,7 +2996,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
        /* Not even root can pretend to send signals from the kernel.
         * Nor can they impersonate a kill()/tgkill(), which adds source info.
         */
-       if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+       if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+           (task_pid_vnr(current) != pid)) {
                /* We used to allow any < 0 si_code */
                WARN_ON_ONCE(info->si_code < 0);
                return -EPERM;
@@ -3045,7 +3046,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
        /* Not even root can pretend to send signals from the kernel.
         * Nor can they impersonate a kill()/tgkill(), which adds source info.
         */
-       if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+       if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
+           (task_pid_vnr(current) != pid)) {
                /* We used to allow any < 0 si_code */
                WARN_ON_ONCE(info->si_code < 0);
                return -EPERM;
index d4abac2..25d3d8b 100644 (file)
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
                        continue;
                }
 
-               BUG_ON(td->cpu != smp_processor_id());
+               //BUG_ON(td->cpu != smp_processor_id());
 
                /* Check for state change setup */
                switch (td->status) {
@@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
 {
        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
+       if (ht->pre_unpark)
+               ht->pre_unpark(cpu);
        kthread_unpark(tsk);
 }
 
index b4d252f..14d7758 100644 (file)
@@ -323,18 +323,10 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (!force_irqthreads) {
-#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       if (!force_irqthreads)
                __do_softirq();
-#else
-               do_softirq();
-#endif
-       } else {
-               __local_bh_disable((unsigned long)__builtin_return_address(0),
-                               SOFTIRQ_OFFSET);
+       else
                wakeup_softirqd();
-               __local_bh_enable(SOFTIRQ_OFFSET);
-       }
 }
 
 /*
@@ -342,9 +334,15 @@ static inline void invoke_softirq(void)
  */
 void irq_exit(void)
 {
+#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       local_irq_disable();
+#else
+       WARN_ON_ONCE(!irqs_disabled());
+#endif
+
        account_irq_exit_time(current);
        trace_hardirq_exit();
-       sub_preempt_count(IRQ_EXIT_OFFSET);
+       sub_preempt_count(HARDIRQ_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
 
@@ -354,7 +352,6 @@ void irq_exit(void)
                tick_nohz_irq_exit();
 #endif
        rcu_irq_exit();
-       sched_preempt_enable_no_resched();
 }
 
 /*
index 95d178c..c09f295 100644 (file)
@@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
        .create                 = cpu_stop_create,
        .setup                  = cpu_stop_unpark,
        .park                   = cpu_stop_park,
-       .unpark                 = cpu_stop_unpark,
+       .pre_unpark             = cpu_stop_unpark,
        .selfparking            = true,
 };
 
index 2e18d33..81f5644 100644 (file)
@@ -1794,14 +1794,14 @@ SYSCALL_DEFINE1(umask, int, mask)
 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
 {
        struct fd exe;
-       struct dentry *dentry;
+       struct inode *inode;
        int err;
 
        exe = fdget(fd);
        if (!exe.file)
                return -EBADF;
 
-       dentry = exe.file->f_path.dentry;
+       inode = file_inode(exe.file);
 
        /*
         * Because the original mm->exe_file points to executable file, make
@@ -1809,11 +1809,11 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
         * overall picture.
         */
        err = -EACCES;
-       if (!S_ISREG(dentry->d_inode->i_mode)   ||
+       if (!S_ISREG(inode->i_mode)     ||
            exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
                goto exit;
 
-       err = inode_permission(dentry->d_inode, MAY_EXEC);
+       err = inode_permission(inode, MAY_EXEC);
        if (err)
                goto exit;
 
@@ -2185,11 +2185,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
 
 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
 
-static void argv_cleanup(struct subprocess_info *info)
-{
-       argv_free(info->argv);
-}
-
 static int __orderly_poweroff(void)
 {
        int argc;
@@ -2209,9 +2204,8 @@ static int __orderly_poweroff(void)
        }
 
        ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
-                                     NULL, argv_cleanup, NULL);
-       if (ret == -ENOMEM)
-               argv_free(argv);
+                                     NULL, NULL, NULL);
+       argv_free(argv);
 
        return ret;
 }
index d8df00e..afc1dc6 100644 (file)
@@ -157,6 +157,9 @@ extern int sysctl_tsb_ratio;
 
 #ifdef __hppa__
 extern int pwrsw_enabled;
+#endif
+
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
 extern int unaligned_enabled;
 #endif
 
@@ -555,6 +558,8 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+#endif
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
        {
                .procname       = "unaligned-trap",
                .data           = &unaligned_enabled,
@@ -2095,7 +2100,7 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
 static void validate_coredump_safety(void)
 {
 #ifdef CONFIG_COREDUMP
-       if (suid_dumpable == SUID_DUMPABLE_SAFE &&
+       if (suid_dumpable == SUID_DUMP_ROOT &&
            core_pattern[0] != '/' && core_pattern[0] != '|') {
                printk(KERN_WARNING "Unsafe core_pattern used with "\
                        "suid_dumpable=2. Pipe handler or fully qualified "\
index b669ca1..ebf7235 100644 (file)
@@ -970,7 +970,6 @@ out:
 static ssize_t bin_intvec(struct file *file,
        void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
-       mm_segment_t old_fs = get_fs();
        ssize_t copied = 0;
        char *buffer;
        ssize_t result;
@@ -983,13 +982,10 @@ static ssize_t bin_intvec(struct file *file,
        if (oldval && oldlen) {
                unsigned __user *vec = oldval;
                size_t length = oldlen / sizeof(*vec);
-               loff_t pos = 0;
                char *str, *end;
                int i;
 
-               set_fs(KERNEL_DS);
-               result = vfs_read(file, buffer, BUFSZ - 1, &pos);
-               set_fs(old_fs);
+               result = kernel_read(file, 0, buffer, BUFSZ - 1);
                if (result < 0)
                        goto out_kfree;
 
@@ -1016,7 +1012,6 @@ static ssize_t bin_intvec(struct file *file,
        if (newval && newlen) {
                unsigned __user *vec = newval;
                size_t length = newlen / sizeof(*vec);
-               loff_t pos = 0;
                char *str, *end;
                int i;
 
@@ -1032,9 +1027,7 @@ static ssize_t bin_intvec(struct file *file,
                        str += snprintf(str, end - str, "%lu\t", value);
                }
 
-               set_fs(KERNEL_DS);
-               result = vfs_write(file, buffer, str - buffer, &pos);
-               set_fs(old_fs);
+               result = kernel_write(file, buffer, str - buffer, 0);
                if (result < 0)
                        goto out_kfree;
        }
@@ -1048,7 +1041,6 @@ out:
 static ssize_t bin_ulongvec(struct file *file,
        void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
-       mm_segment_t old_fs = get_fs();
        ssize_t copied = 0;
        char *buffer;
        ssize_t result;
@@ -1061,13 +1053,10 @@ static ssize_t bin_ulongvec(struct file *file,
        if (oldval && oldlen) {
                unsigned long __user *vec = oldval;
                size_t length = oldlen / sizeof(*vec);
-               loff_t pos = 0;
                char *str, *end;
                int i;
 
-               set_fs(KERNEL_DS);
-               result = vfs_read(file, buffer, BUFSZ - 1, &pos);
-               set_fs(old_fs);
+               result = kernel_read(file, 0, buffer, BUFSZ - 1);
                if (result < 0)
                        goto out_kfree;
 
@@ -1094,7 +1083,6 @@ static ssize_t bin_ulongvec(struct file *file,
        if (newval && newlen) {
                unsigned long __user *vec = newval;
                size_t length = newlen / sizeof(*vec);
-               loff_t pos = 0;
                char *str, *end;
                int i;
 
@@ -1110,9 +1098,7 @@ static ssize_t bin_ulongvec(struct file *file,
                        str += snprintf(str, end - str, "%lu\t", value);
                }
 
-               set_fs(KERNEL_DS);
-               result = vfs_write(file, buffer, str - buffer, &pos);
-               set_fs(old_fs);
+               result = kernel_write(file, buffer, str - buffer, 0);
                if (result < 0)
                        goto out_kfree;
        }
@@ -1126,19 +1112,15 @@ out:
 static ssize_t bin_uuid(struct file *file,
        void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
-       mm_segment_t old_fs = get_fs();
        ssize_t result, copied = 0;
 
        /* Only supports reads */
        if (oldval && oldlen) {
-               loff_t pos = 0;
                char buf[40], *str = buf;
                unsigned char uuid[16];
                int i;
 
-               set_fs(KERNEL_DS);
-               result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
-               set_fs(old_fs);
+               result = kernel_read(file, 0, buf, sizeof(buf) - 1);
                if (result < 0)
                        goto out;
 
@@ -1174,18 +1156,14 @@ out:
 static ssize_t bin_dn_node_address(struct file *file,
        void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
 {
-       mm_segment_t old_fs = get_fs();
        ssize_t result, copied = 0;
 
        if (oldval && oldlen) {
-               loff_t pos = 0;
                char buf[15], *nodep;
                unsigned long area, node;
                __le16 dnaddr;
 
-               set_fs(KERNEL_DS);
-               result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
-               set_fs(old_fs);
+               result = kernel_read(file, 0, buf, sizeof(buf) - 1);
                if (result < 0)
                        goto out;
 
@@ -1193,9 +1171,10 @@ static ssize_t bin_dn_node_address(struct file *file,
 
                /* Convert the decnet address to binary */
                result = -EIO;
-               nodep = strchr(buf, '.') + 1;
+               nodep = strchr(buf, '.');
                if (!nodep)
                        goto out;
+               ++nodep;
 
                area = simple_strtoul(buf, NULL, 10);
                node = simple_strtoul(nodep, NULL, 10);
@@ -1214,7 +1193,6 @@ static ssize_t bin_dn_node_address(struct file *file,
        }
 
        if (newval && newlen) {
-               loff_t pos = 0;
                __le16 dnaddr;
                char buf[15];
                int len;
@@ -1231,9 +1209,7 @@ static ssize_t bin_dn_node_address(struct file *file,
                                le16_to_cpu(dnaddr) >> 10,
                                le16_to_cpu(dnaddr) & 0x3ff);
 
-               set_fs(KERNEL_DS);
-               result = vfs_write(file, buf, len, &pos);
-               set_fs(old_fs);
+               result = kernel_write(file, buf, len, 0);
                if (result < 0)
                        goto out;
        }
index 314b9ee..a19a399 100644 (file)
@@ -554,6 +554,7 @@ void tick_nohz_idle_enter(void)
 
        local_irq_enable();
 }
+EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
 
 /**
  * tick_nohz_irq_exit - update next tick event from interrupt exit
@@ -685,6 +686,7 @@ void tick_nohz_idle_exit(void)
 
        local_irq_enable();
 }
+EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
 
 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
 {
diff --git a/kernel/timeconst.bc b/kernel/timeconst.bc
new file mode 100644 (file)
index 0000000..511bdf2
--- /dev/null
@@ -0,0 +1,108 @@
+scale=0
+
+define gcd(a,b) {
+       auto t;
+       while (b) {
+               t = b;
+               b = a % b;
+               a = t;
+       }
+       return a;
+}
+
+/* Division by reciprocal multiplication. */
+define fmul(b,n,d) {
+       return (2^b*n+d-1)/d;
+}
+
+/* Adjustment factor when a ceiling value is used.  Use as:
+   (imul * n) + (fmulxx * n + fadjxx) >> xx) */
+define fadj(b,n,d) {
+       auto v;
+       d = d/gcd(n,d);
+       v = 2^b*(d-1)/d;
+       return v;
+}
+
+/* Compute the appropriate mul/adj values as well as a shift count,
+   which brings the mul value into the range 2^b-1 <= x < 2^b.  Such
+   a shift value will be correct in the signed integer range and off
+   by at most one in the upper half of the unsigned range. */
+define fmuls(b,n,d) {
+       auto s, m;
+       for (s = 0; 1; s++) {
+               m = fmul(s,n,d);
+               if (m >= 2^(b-1))
+                       return s;
+       }
+       return 0;
+}
+
+define timeconst(hz) {
+       print "/* Automatically generated by kernel/timeconst.bc */\n"
+       print "/* Time conversion constants for HZ == ", hz, " */\n"
+       print "\n"
+
+       print "#ifndef KERNEL_TIMECONST_H\n"
+       print "#define KERNEL_TIMECONST_H\n\n"
+
+       print "#include <linux/param.h>\n"
+       print "#include <linux/types.h>\n\n"
+
+       print "#if HZ != ", hz, "\n"
+       print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+       print "#endif\n\n"
+
+       if (hz < 2) {
+               print "#error Totally bogus HZ value!\n"
+       } else {
+               s=fmuls(32,1000,hz)
+               obase=16
+               print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n"
+               print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n"
+               obase=10
+               print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
+
+               s=fmuls(32,hz,1000)
+               obase=16
+               print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n"
+               print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n"
+               obase=10
+               print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
+
+               obase=10
+               cd=gcd(hz,1000)
+               print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
+               print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
+               print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+               print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
+               print "\n"
+
+               s=fmuls(32,1000000,hz)
+               obase=16
+               print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n"
+               print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n"
+               obase=10
+               print "#define HZ_TO_USEC_SHR32\t", s, "\n"
+
+               s=fmuls(32,hz,1000000)
+               obase=16
+               print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n"
+               print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n"
+               obase=10
+               print "#define USEC_TO_HZ_SHR32\t", s, "\n"
+
+               obase=10
+               cd=gcd(hz,1000000)
+               print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n"
+               print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
+               print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+               print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+               print "\n"
+
+               print "#endif /* KERNEL_TIMECONST_H */\n"
+       }
+       halt
+}
+
+timeconst(hz)
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
deleted file mode 100644 (file)
index 3f42652..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/perl
-# -----------------------------------------------------------------------
-#
-#   Copyright 2007-2008 rPath, Inc. - All Rights Reserved
-#
-#   This file is part of the Linux kernel, and is made available under
-#   the terms of the GNU General Public License version 2 or (at your
-#   option) any later version; incorporated herein by reference.
-#
-# -----------------------------------------------------------------------
-#
-
-#
-# Usage: timeconst.pl HZ > timeconst.h
-#
-
-# Precomputed values for systems without Math::BigInt
-# Generated by:
-# timeconst.pl --can 24 32 48 64 100 122 128 200 250 256 300 512 1000 1024 1200
-%canned_values = (
-       24 => [
-               '0xa6aaaaab','0x2aaaaaa',26,
-               125,3,
-               '0xc49ba5e4','0x1fbe76c8b4',37,
-               3,125,
-               '0xa2c2aaab','0xaaaa',16,
-               125000,3,
-               '0xc9539b89','0x7fffbce4217d',47,
-               3,125000,
-       ], 32 => [
-               '0xfa000000','0x6000000',27,
-               125,4,
-               '0x83126e98','0xfdf3b645a',36,
-               4,125,
-               '0xf4240000','0x0',17,
-               31250,1,
-               '0x8637bd06','0x3fff79c842fa',46,
-               1,31250,
-       ], 48 => [
-               '0xa6aaaaab','0x6aaaaaa',27,
-               125,6,
-               '0xc49ba5e4','0xfdf3b645a',36,
-               6,125,
-               '0xa2c2aaab','0x15555',17,
-               62500,3,
-               '0xc9539b89','0x3fffbce4217d',46,
-               3,62500,
-       ], 64 => [
-               '0xfa000000','0xe000000',28,
-               125,8,
-               '0x83126e98','0x7ef9db22d',35,
-               8,125,
-               '0xf4240000','0x0',18,
-               15625,1,
-               '0x8637bd06','0x1fff79c842fa',45,
-               1,15625,
-       ], 100 => [
-               '0xa0000000','0x0',28,
-               10,1,
-               '0xcccccccd','0x733333333',35,
-               1,10,
-               '0x9c400000','0x0',18,
-               10000,1,
-               '0xd1b71759','0x1fff2e48e8a7',45,
-               1,10000,
-       ], 122 => [
-               '0x8325c53f','0xfbcda3a',28,
-               500,61,
-               '0xf9db22d1','0x7fbe76c8b',35,
-               61,500,
-               '0x8012e2a0','0x3ef36',18,
-               500000,61,
-               '0xffda4053','0x1ffffbce4217',45,
-               61,500000,
-       ], 128 => [
-               '0xfa000000','0x1e000000',29,
-               125,16,
-               '0x83126e98','0x3f7ced916',34,
-               16,125,
-               '0xf4240000','0x40000',19,
-               15625,2,
-               '0x8637bd06','0xfffbce4217d',44,
-               2,15625,
-       ], 200 => [
-               '0xa0000000','0x0',29,
-               5,1,
-               '0xcccccccd','0x333333333',34,
-               1,5,
-               '0x9c400000','0x0',19,
-               5000,1,
-               '0xd1b71759','0xfff2e48e8a7',44,
-               1,5000,
-       ], 250 => [
-               '0x80000000','0x0',29,
-               4,1,
-               '0x80000000','0x180000000',33,
-               1,4,
-               '0xfa000000','0x0',20,
-               4000,1,
-               '0x83126e98','0x7ff7ced9168',43,
-               1,4000,
-       ], 256 => [
-               '0xfa000000','0x3e000000',30,
-               125,32,
-               '0x83126e98','0x1fbe76c8b',33,
-               32,125,
-               '0xf4240000','0xc0000',20,
-               15625,4,
-               '0x8637bd06','0x7ffde7210be',43,
-               4,15625,
-       ], 300 => [
-               '0xd5555556','0x2aaaaaaa',30,
-               10,3,
-               '0x9999999a','0x1cccccccc',33,
-               3,10,
-               '0xd0555556','0xaaaaa',20,
-               10000,3,
-               '0x9d495183','0x7ffcb923a29',43,
-               3,10000,
-       ], 512 => [
-               '0xfa000000','0x7e000000',31,
-               125,64,
-               '0x83126e98','0xfdf3b645',32,
-               64,125,
-               '0xf4240000','0x1c0000',21,
-               15625,8,
-               '0x8637bd06','0x3ffef39085f',42,
-               8,15625,
-       ], 1000 => [
-               '0x80000000','0x0',31,
-               1,1,
-               '0x80000000','0x0',31,
-               1,1,
-               '0xfa000000','0x0',22,
-               1000,1,
-               '0x83126e98','0x1ff7ced9168',41,
-               1,1000,
-       ], 1024 => [
-               '0xfa000000','0xfe000000',32,
-               125,128,
-               '0x83126e98','0x7ef9db22',31,
-               128,125,
-               '0xf4240000','0x3c0000',22,
-               15625,16,
-               '0x8637bd06','0x1fff79c842f',41,
-               16,15625,
-       ], 1200 => [
-               '0xd5555556','0xd5555555',32,
-               5,6,
-               '0x9999999a','0x66666666',31,
-               6,5,
-               '0xd0555556','0x2aaaaa',22,
-               2500,3,
-               '0x9d495183','0x1ffcb923a29',41,
-               3,2500,
-       ]
-);
-
-$has_bigint = eval 'use Math::BigInt qw(bgcd); 1;';
-
-sub bint($)
-{
-       my($x) = @_;
-       return Math::BigInt->new($x);
-}
-
-#
-# Constants for division by reciprocal multiplication.
-# (bits, numerator, denominator)
-#
-sub fmul($$$)
-{
-       my ($b,$n,$d) = @_;
-
-       $n = bint($n);
-       $d = bint($d);
-
-       return scalar (($n << $b)+$d-bint(1))/$d;
-}
-
-sub fadj($$$)
-{
-       my($b,$n,$d) = @_;
-
-       $n = bint($n);
-       $d = bint($d);
-
-       $d = $d/bgcd($n, $d);
-       return scalar (($d-bint(1)) << $b)/$d;
-}
-
-sub fmuls($$$) {
-       my($b,$n,$d) = @_;
-       my($s,$m);
-       my($thres) = bint(1) << ($b-1);
-
-       $n = bint($n);
-       $d = bint($d);
-
-       for ($s = 0; 1; $s++) {
-               $m = fmul($s,$n,$d);
-               return $s if ($m >= $thres);
-       }
-       return 0;
-}
-
-# Generate a hex value if the result fits in 64 bits;
-# otherwise skip.
-sub bignum_hex($) {
-       my($x) = @_;
-       my $s = $x->as_hex();
-
-       return (length($s) > 18) ? undef : $s;
-}
-
-# Provides mul, adj, and shr factors for a specific
-# (bit, time, hz) combination
-sub muladj($$$) {
-       my($b, $t, $hz) = @_;
-       my $s = fmuls($b, $t, $hz);
-       my $m = fmul($s, $t, $hz);
-       my $a = fadj($s, $t, $hz);
-       return (bignum_hex($m), bignum_hex($a), $s);
-}
-
-# Provides numerator, denominator values
-sub numden($$) {
-       my($n, $d) = @_;
-       my $g = bgcd($n, $d);
-       return ($n/$g, $d/$g);
-}
-
-# All values for a specific (time, hz) combo
-sub conversions($$) {
-       my ($t, $hz) = @_;
-       my @val = ();
-
-       # HZ_TO_xx
-       push(@val, muladj(32, $t, $hz));
-       push(@val, numden($t, $hz));
-
-       # xx_TO_HZ
-       push(@val, muladj(32, $hz, $t));
-       push(@val, numden($hz, $t));
-
-       return @val;
-}
-
-sub compute_values($) {
-       my($hz) = @_;
-       my @val = ();
-       my $s, $m, $a, $g;
-
-       if (!$has_bigint) {
-               die "$0: HZ == $hz not canned and ".
-                   "Math::BigInt not available\n";
-       }
-
-       # MSEC conversions
-       push(@val, conversions(1000, $hz));
-
-       # USEC conversions
-       push(@val, conversions(1000000, $hz));
-
-       return @val;
-}
-
-sub outputval($$)
-{
-       my($name, $val) = @_;
-       my $csuf;
-
-       if (defined($val)) {
-           if ($name !~ /SHR/) {
-               $val = "U64_C($val)";
-           }
-           printf "#define %-23s %s\n", $name.$csuf, $val.$csuf;
-       }
-}
-
-sub output($@)
-{
-       my($hz, @val) = @_;
-       my $pfx, $bit, $suf, $s, $m, $a;
-
-       print "/* Automatically generated by kernel/timeconst.pl */\n";
-       print "/* Conversion constants for HZ == $hz */\n";
-       print "\n";
-       print "#ifndef KERNEL_TIMECONST_H\n";
-       print "#define KERNEL_TIMECONST_H\n";
-       print "\n";
-
-       print "#include <linux/param.h>\n";
-       print "#include <linux/types.h>\n";
-
-       print "\n";
-       print "#if HZ != $hz\n";
-       print "#error \"kernel/timeconst.h has the wrong HZ value!\"\n";
-       print "#endif\n";
-       print "\n";
-
-       foreach $pfx ('HZ_TO_MSEC','MSEC_TO_HZ',
-                     'HZ_TO_USEC','USEC_TO_HZ') {
-               foreach $bit (32) {
-                       foreach $suf ('MUL', 'ADJ', 'SHR') {
-                               outputval("${pfx}_$suf$bit", shift(@val));
-                       }
-               }
-               foreach $suf ('NUM', 'DEN') {
-                       outputval("${pfx}_$suf", shift(@val));
-               }
-       }
-
-       print "\n";
-       print "#endif /* KERNEL_TIMECONST_H */\n";
-}
-
-# Pretty-print Perl values
-sub perlvals(@) {
-       my $v;
-       my @l = ();
-
-       foreach $v (@_) {
-               if (!defined($v)) {
-                       push(@l, 'undef');
-               } elsif ($v =~ /^0x/) {
-                       push(@l, "\'".$v."\'");
-               } else {
-                       push(@l, $v.'');
-               }
-       }
-       return join(',', @l);
-}
-
-($hz) = @ARGV;
-
-# Use this to generate the %canned_values structure
-if ($hz eq '--can') {
-       shift(@ARGV);
-       @hzlist = sort {$a <=> $b} (@ARGV);
-
-       print "# Precomputed values for systems without Math::BigInt\n";
-       print "# Generated by:\n";
-       print "# timeconst.pl --can ", join(' ', @hzlist), "\n";
-       print "\%canned_values = (\n";
-       my $pf = "\t";
-       foreach $hz (@hzlist) {
-               my @values = compute_values($hz);
-               print "$pf$hz => [\n";
-               while (scalar(@values)) {
-                       my $bit;
-                       foreach $bit (32) {
-                               my $m = shift(@values);
-                               my $a = shift(@values);
-                               my $s = shift(@values);
-                               print "\t\t", perlvals($m,$a,$s), ",\n";
-                       }
-                       my $n = shift(@values);
-                       my $d = shift(@values);
-                       print "\t\t", perlvals($n,$d), ",\n";
-               }
-               print "\t]";
-               $pf = ', ';
-       }
-       print "\n);\n";
-} else {
-       $hz += 0;                       # Force to number
-       if ($hz < 1) {
-               die "Usage: $0 HZ\n";
-       }
-
-       $cv = $canned_values{$hz};
-       @val = defined($cv) ? @$cv : compute_values($hz);
-       output($hz, @val);
-}
-exit 0;
index 71259e2..9e5b8c2 100644 (file)
@@ -739,6 +739,12 @@ static void blk_add_trace_rq_complete(void *ignore,
                                      struct request_queue *q,
                                      struct request *rq)
 {
+       struct blk_trace *bt = q->blk_trace;
+
+       /* if control ever passes through here, it's a request based driver */
+       if (unlikely(bt && !bt->rq_based))
+               bt->rq_based = true;
+
        blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
 }
 
@@ -774,15 +780,30 @@ static void blk_add_trace_bio_bounce(void *ignore,
        blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
-static void blk_add_trace_bio_complete(void *ignore,
-                                      struct request_queue *q, struct bio *bio,
-                                      int error)
+static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
 {
+       struct request_queue *q;
+       struct blk_trace *bt;
+
+       if (!bio->bi_bdev)
+               return;
+
+       q = bdev_get_queue(bio->bi_bdev);
+       bt = q->blk_trace;
+
+       /*
+        * Request based drivers will generate both rq and bio completions.
+        * Ignore bio ones.
+        */
+       if (likely(!bt) || bt->rq_based)
+               return;
+
        blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
                                        struct request_queue *q,
+                                       struct request *rq,
                                        struct bio *bio)
 {
        blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
@@ -790,6 +811,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
                                         struct request_queue *q,
+                                        struct request *rq,
                                         struct bio *bio)
 {
        blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
index ce8c3d6..ab25b88 100644 (file)
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 {
        struct ftrace_profile *rec;
        struct hlist_head *hhd;
-       struct hlist_node *n;
        unsigned long key;
 
        key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
        if (hlist_empty(hhd))
                return NULL;
 
-       hlist_for_each_entry_rcu(rec, n, hhd, node) {
+       hlist_for_each_entry_rcu(rec, hhd, node) {
                if (rec->ip == ip)
                        return rec;
        }
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
        unsigned long key;
        struct ftrace_func_entry *entry;
        struct hlist_head *hhd;
-       struct hlist_node *n;
 
        if (ftrace_hash_empty(hash))
                return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 
        hhd = &hash->buckets[key];
 
-       hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
+       hlist_for_each_entry_rcu(entry, hhd, hlist) {
                if (entry->ip == ip)
                        return entry;
        }
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
 static void ftrace_hash_clear(struct ftrace_hash *hash)
 {
        struct hlist_head *hhd;
-       struct hlist_node *tp, *tn;
+       struct hlist_node *tn;
        struct ftrace_func_entry *entry;
        int size = 1 << hash->size_bits;
        int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
 
        for (i = 0; i < size; i++) {
                hhd = &hash->buckets[i];
-               hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
+               hlist_for_each_entry_safe(entry, tn, hhd, hlist)
                        free_hash_entry(hash, entry);
        }
        FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 {
        struct ftrace_func_entry *entry;
        struct ftrace_hash *new_hash;
-       struct hlist_node *tp;
        int size;
        int ret;
        int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 
        size = 1 << hash->size_bits;
        for (i = 0; i < size; i++) {
-               hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
                        ret = add_hash_entry(new_hash, entry->ip);
                        if (ret < 0)
                                goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
                 struct ftrace_hash **dst, struct ftrace_hash *src)
 {
        struct ftrace_func_entry *entry;
-       struct hlist_node *tp, *tn;
+       struct hlist_node *tn;
        struct hlist_head *hhd;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
                hhd = &src->buckets[i];
-               hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
+               hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
                        if (bits > 0)
                                key = hash_long(entry->ip, bits);
                        else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 {
        struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
-       struct hlist_node *n;
        unsigned long key;
 
        key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
         * on the hash. rcu_read_lock is too dangerous here.
         */
        preempt_disable_notrace();
-       hlist_for_each_entry_rcu(entry, n, hhd, node) {
+       hlist_for_each_entry_rcu(entry, hhd, node) {
                if (entry->ip == ip)
                        entry->ops->func(ip, parent_ip, &entry->data);
        }
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
 {
        struct ftrace_func_probe *entry;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
        int type = MATCH_FULL;
        int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
 
-               hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+               hlist_for_each_entry_safe(entry, tmp, hhd, node) {
 
                        /* break up if statements for readability */
                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
@@ -3996,37 +3992,51 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+                                     unsigned long val, void *data)
 {
        struct module *mod = data;
 
-       switch (val) {
-       case MODULE_STATE_COMING:
+       if (val == MODULE_STATE_COMING)
                ftrace_init_module(mod, mod->ftrace_callsites,
                                   mod->ftrace_callsites +
                                   mod->num_ftrace_callsites);
-               break;
-       case MODULE_STATE_GOING:
+       return 0;
+}
+
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       if (val == MODULE_STATE_GOING)
                ftrace_release_mod(mod);
-               break;
-       }
 
        return 0;
 }
 #else
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+                                     unsigned long val, void *data)
+{
+       return 0;
+}
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
 {
        return 0;
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_nb = {
-       .notifier_call = ftrace_module_notify,
+struct notifier_block ftrace_module_enter_nb = {
+       .notifier_call = ftrace_module_notify_enter,
        .priority = INT_MAX,    /* Run before anything that can use kprobes */
 };
 
+struct notifier_block ftrace_module_exit_nb = {
+       .notifier_call = ftrace_module_notify_exit,
+       .priority = INT_MIN,    /* Run after anything that can remove kprobes */
+};
+
 extern unsigned long __start_mcount_loc[];
 extern unsigned long __stop_mcount_loc[];
 
@@ -4058,9 +4068,13 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_nb);
+       ret = register_module_notifier(&ftrace_module_enter_nb);
+       if (ret)
+               pr_warning("Failed to register trace ftrace module enter notifier\n");
+
+       ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
-               pr_warning("Failed to register trace ftrace module notifier\n");
+               pr_warning("Failed to register trace ftrace module exit notifier\n");
 
        set_ftrace_early_filters();
 
index 7244acd..6989df2 100644 (file)
@@ -178,7 +178,7 @@ void tracing_off_permanent(void)
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 #define RB_EVNT_MIN_SIZE       8U      /* two 32bit words */
 
-#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 # define RB_FORCE_8BYTE_ALIGNMENT      0
 # define RB_ARCH_ALIGNMENT             RB_ALIGNMENT
 #else
@@ -186,6 +186,8 @@ void tracing_off_permanent(void)
 # define RB_ARCH_ALIGNMENT             8U
 #endif
 
+#define RB_ALIGN_DATA          __aligned(RB_ARCH_ALIGNMENT)
+
 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 
@@ -334,7 +336,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
        local_t          commit;        /* write committed index */
-       unsigned char    data[];        /* data of buffer page */
+       unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
 };
 
 /*
index 194d796..697e88d 100644 (file)
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
 struct trace_event *ftrace_find_event(int type)
 {
        struct trace_event *event;
-       struct hlist_node *n;
        unsigned key;
 
        key = type & (EVENT_HASHSIZE - 1);
 
-       hlist_for_each_entry(event, n, &event_hash[key], node) {
+       hlist_for_each_entry(event, &event_hash[key], node) {
                if (event->type == type)
                        return event;
        }
index 5329e13..7a809e3 100644 (file)
@@ -1,5 +1,6 @@
 #include <trace/syscall.h>
 #include <trace/events/syscalls.h>
+#include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/module.h>      /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
 }
 #endif
 
+#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+/*
+ * Some architectures that allow for 32bit applications
+ * to run on a 64bit kernel, do not map the syscalls for
+ * the 32bit tasks the same as they do for 64bit tasks.
+ *
+ *     *cough*x86*cough*
+ *
+ * In such a case, instead of reporting the wrong syscalls,
+ * simply ignore them.
+ *
+ * For an arch to ignore the compat syscalls it needs to
+ * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
+ * define the function arch_trace_is_compat_syscall() to let
+ * the tracing system know that it should ignore it.
+ */
+static int
+trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
+{
+       if (unlikely(arch_trace_is_compat_syscall(regs)))
+               return -1;
+
+       return syscall_get_nr(task, regs);
+}
+#else
+static inline int
+trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
+{
+       return syscall_get_nr(task, regs);
+}
+#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
+
 static __init struct syscall_metadata *
 find_syscall_meta(unsigned long syscall)
 {
@@ -276,10 +309,10 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        struct syscall_metadata *sys_data;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
-       int size;
        int syscall_nr;
+       int size;
 
-       syscall_nr = syscall_get_nr(current, regs);
+       syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
        if (!test_bit(syscall_nr, enabled_enter_syscalls))
@@ -313,7 +346,7 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        struct ring_buffer *buffer;
        int syscall_nr;
 
-       syscall_nr = syscall_get_nr(current, regs);
+       syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
        if (!test_bit(syscall_nr, enabled_exit_syscalls))
@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        int rctx;
        int size;
 
-       syscall_nr = syscall_get_nr(current, regs);
+       syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
        if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        int rctx;
        int size;
 
-       syscall_nr = syscall_get_nr(current, regs);
+       syscall_nr = trace_get_syscall_nr(current, regs);
        if (syscall_nr < 0)
                return;
        if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
index d96ba22..0c05a45 100644 (file)
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
 static struct tracepoint_entry *get_tracepoint(const char *name)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct tracepoint_entry *e;
        u32 hash = jhash(name, strlen(name), 0);
 
        head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       hlist_for_each_entry(e, node, head, hlist) {
+       hlist_for_each_entry(e, head, hlist) {
                if (!strcmp(name, e->name))
                        return e;
        }
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
 static struct tracepoint_entry *add_tracepoint(const char *name)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct tracepoint_entry *e;
        size_t name_len = strlen(name) + 1;
        u32 hash = jhash(name, name_len-1, 0);
 
        head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       hlist_for_each_entry(e, node, head, hlist) {
+       hlist_for_each_entry(e, head, hlist) {
                if (!strcmp(name, e->name)) {
                        printk(KERN_NOTICE
                                "tracepoint %s busy\n", name);
index 1744bb8..394f70b 100644 (file)
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
 void fire_user_return_notifiers(void)
 {
        struct user_return_notifier *urn;
-       struct hlist_node *tmp1, *tmp2;
+       struct hlist_node *tmp2;
        struct hlist_head *head;
 
        head = &get_cpu_var(return_notifier_list);
-       hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
+       hlist_for_each_entry_safe(urn, tmp2, head, link)
                urn->on_user_return(urn);
        put_cpu_var(return_notifier_list);
 }
index 57ebfd4..e81978e 100644 (file)
@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
 {
        struct user_struct *user;
-       struct hlist_node *h;
 
-       hlist_for_each_entry(user, h, hashent, uidhash_node) {
+       hlist_for_each_entry(user, hashent, uidhash_node) {
                if (uid_eq(user->uid, uid)) {
                        atomic_inc(&user->__count);
                        return user;
index 08b197e..a47fc5d 100644 (file)
@@ -30,7 +30,7 @@ static struct uts_namespace *create_uts_ns(void)
 /*
  * Clone a new ns copying an original utsname, setting refcount to 1
  * @old_ns: namespace to clone
- * Return NULL on error (failure to kmalloc), new ns otherwise
+ * Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
  */
 static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
                                          struct uts_namespace *old_ns)
index 63da38c..4f69f9a 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/sysctl.h>
 #include <linux/wait.h>
 
+#ifdef CONFIG_PROC_SYSCTL
+
 static void *get_uts(ctl_table *table, int write)
 {
        char *which = table->data;
@@ -38,7 +40,6 @@ static void put_uts(ctl_table *table, int write, void *which)
                up_write(&uts_sem);
 }
 
-#ifdef CONFIG_PROC_SYSCTL
 /*
  *     Special case of dostring for the UTS structure. This has locks
  *     to observe. Should this be in kernel/sys.c ????
index f4feaca..81f2457 100644 (file)
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
        for ((pool) = &std_worker_pools(cpu)[0];                        \
             (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
 
-#define for_each_busy_worker(worker, i, pos, pool)                     \
-       hash_for_each(pool->busy_hash, i, pos, worker, hentry)
+#define for_each_busy_worker(worker, i, pool)                          \
+       hash_for_each(pool->busy_hash, i, worker, hentry)
 
 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
                                unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
                                                 struct work_struct *work)
 {
        struct worker *worker;
-       struct hlist_node *tmp;
 
-       hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
+       hash_for_each_possible(pool->busy_hash, worker, hentry,
                               (unsigned long)work)
                if (worker->current_work == work &&
                    worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
 static void rebind_workers(struct worker_pool *pool)
 {
        struct worker *worker, *n;
-       struct hlist_node *pos;
        int i;
 
        lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
        }
 
        /* rebind busy workers */
-       for_each_busy_worker(worker, i, pos, pool) {
+       for_each_busy_worker(worker, i, pool) {
                struct work_struct *rebind_work = &worker->rebind_work;
                struct workqueue_struct *wq;
 
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
        int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
-       struct hlist_node *pos;
        int i;
 
        for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
                list_for_each_entry(worker, &pool->idle_list, entry)
                        worker->flags |= WORKER_UNBOUND;
 
-               for_each_busy_worker(worker, i, pos, pool)
+               for_each_busy_worker(worker, i, pool)
                        worker->flags |= WORKER_UNBOUND;
 
                pool->flags |= POOL_DISASSOCIATED;
index e4a7f80..28be08c 100644 (file)
@@ -674,7 +674,7 @@ config STACKTRACE
 
 config DEBUG_STACK_USAGE
        bool "Stack utilization instrumentation"
-       depends on DEBUG_KERNEL && !IA64 && !PARISC
+       depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG
        help
          Enables the display of the minimum amount of free stack which each
          task has ever had available in the sysrq-T and sysrq-P debug output.
@@ -855,7 +855,7 @@ config FRAME_POINTER
        bool "Compile the kernel with frame pointers"
        depends on DEBUG_KERNEL && \
                (CRIS || M68K || FRV || UML || \
-                AVR32 || SUPERH || BLACKFIN || MN10300) || \
+                AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \
                ARCH_WANT_FRAME_POINTERS
        default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
        help
index dbb58ae..140e878 100644 (file)
@@ -80,4 +80,22 @@ config KDB_KEYBOARD
        help
          KDB can use a PS/2 type keyboard for an input device
 
+config KDB_CONTINUE_CATASTROPHIC
+       int "KDB: continue after catastrophic errors"
+       depends on KGDB_KDB
+       default "0"
+       help
+         This integer controls the behaviour of kdb when the kernel gets a
+         catastrophic error, i.e. for a panic or oops.
+         When KDB is active and a catastrophic error occurs, nothing extra
+         will happen until you type 'go'.
+         CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
+         you type 'go', you will be warned by kdb. The secend time you type
+         'go', KDB tries to continue. No guarantees that the
+         kernel is still usable in this situation.
+         CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue.
+         No guarantees that the kernel is still usable in this situation.
+         CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
+         If you are not sure, say 0.
+
 endif # KGDB
index 02ed6c0..d7946ff 100644 (file)
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
-        bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o
+        bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 
index 12dceb2..129775e 100644 (file)
@@ -102,6 +102,7 @@ out:
 }
 #endif
 
+#ifndef ip_fast_csum
 /*
  *     This is a version of ip_compute_csum() optimized for IP headers,
  *     which always checksum on 4 octet boundaries.
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
        return (__force __sum16)~do_csum(iph, ihl*4);
 }
 EXPORT_SYMBOL(ip_fast_csum);
+#endif
 
 /*
  * computes the checksum of a memory block at buff, length len,
index d11808c..37061ed 100644 (file)
@@ -109,11 +109,10 @@ static void fill_pool(void)
  */
 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 {
-       struct hlist_node *node;
        struct debug_obj *obj;
        int cnt = 0;
 
-       hlist_for_each_entry(obj, node, &b->list, node) {
+       hlist_for_each_entry(obj, &b->list, node) {
                cnt++;
                if (obj->object == addr)
                        return obj;
@@ -213,7 +212,7 @@ static void free_object(struct debug_obj *obj)
 static void debug_objects_oom(void)
 {
        struct debug_bucket *db = obj_hash;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        HLIST_HEAD(freelist);
        struct debug_obj *obj;
        unsigned long flags;
@@ -227,7 +226,7 @@ static void debug_objects_oom(void)
                raw_spin_unlock_irqrestore(&db->lock, flags);
 
                /* Now free them */
-               hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+               hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
                        hlist_del(&obj->node);
                        free_object(obj);
                }
@@ -658,7 +657,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 {
        unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        HLIST_HEAD(freelist);
        struct debug_obj_descr *descr;
        enum debug_obj_state state;
@@ -678,7 +677,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 repeat:
                cnt = 0;
                raw_spin_lock_irqsave(&db->lock, flags);
-               hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+               hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
                        cnt++;
                        oaddr = (unsigned long) obj->object;
                        if (oaddr < saddr || oaddr >= eaddr)
@@ -702,7 +701,7 @@ repeat:
                raw_spin_unlock_irqrestore(&db->lock, flags);
 
                /* Now free them */
-               hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+               hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
                        hlist_del(&obj->node);
                        free_object(obj);
                }
@@ -1013,7 +1012,7 @@ void __init debug_objects_early_init(void)
 static int __init debug_objects_replace_static_objects(void)
 {
        struct debug_bucket *db = obj_hash;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        struct debug_obj *obj, *new;
        HLIST_HEAD(objects);
        int i, cnt = 0;
@@ -1033,7 +1032,7 @@ static int __init debug_objects_replace_static_objects(void)
        local_irq_disable();
 
        /* Remove the statically allocated objects from the pool */
-       hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
+       hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
                hlist_del(&obj->node);
        /* Move the allocated objects to the pool */
        hlist_move_list(&objects, &obj_pool);
@@ -1042,7 +1041,7 @@ static int __init debug_objects_replace_static_objects(void)
        for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
                hlist_move_list(&db->list, &objects);
 
-               hlist_for_each_entry(obj, node, &objects, node) {
+               hlist_for_each_entry(obj, &objects, node) {
                        new = hlist_entry(obj_pool.first, typeof(*obj), node);
                        hlist_del(&new->node);
                        /* copy object data */
@@ -1057,7 +1056,7 @@ static int __init debug_objects_replace_static_objects(void)
               obj_pool_used);
        return 0;
 free:
-       hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
+       hlist_for_each_entry_safe(obj, tmp, &objects, node) {
                hlist_del(&obj->node);
                kmem_cache_free(obj_cache, obj);
        }
index 4531294..960183d 100644 (file)
@@ -31,7 +31,7 @@
  */
 
 #ifdef STATIC
-#include "lzo/lzo1x_decompress.c"
+#include "lzo/lzo1x_decompress_safe.c"
 #else
 #include <linux/decompress/unlzo.h>
 #endif
index 88ad759..8235331 100644 (file)
@@ -227,6 +227,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
                               devm_ioport_map_match, (void *)addr));
 }
 EXPORT_SYMBOL(devm_ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT */
 
 #ifdef CONFIG_PCI
 /*
@@ -432,4 +433,3 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
 }
 EXPORT_SYMBOL(pcim_iounmap_regions);
 #endif /* CONFIG_PCI */
-#endif /* CONFIG_HAS_IOPORT */
index 6482390..73f4d53 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
 #include <linux/string.h>
 #include <linux/idr.h>
 #include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+#define MAX_IDR_SHIFT          (sizeof(int) * 8 - 1)
+#define MAX_IDR_BIT            (1U << MAX_IDR_SHIFT)
+
+/* Leave the possibility of an incomplete final layer */
+#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
+
+/* Number of id_layer structs to leave in free list */
+#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
 
 static struct kmem_cache *idr_layer_cache;
+static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
+static DEFINE_PER_CPU(int, idr_preload_cnt);
 static DEFINE_SPINLOCK(simple_ida_lock);
 
+/* the maximum ID which can be allocated given idr->layers */
+static int idr_max(int layers)
+{
+       int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
+
+       return (1 << bits) - 1;
+}
+
+/*
+ * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
+ * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
+ * so on.
+ */
+static int idr_layer_prefix_mask(int layer)
+{
+       return ~idr_max(layer + 1);
+}
+
 static struct idr_layer *get_from_free_list(struct idr *idp)
 {
        struct idr_layer *p;
@@ -54,6 +85,50 @@ static struct idr_layer *get_from_free_list(struct idr *idp)
        return(p);
 }
 
+/**
+ * idr_layer_alloc - allocate a new idr_layer
+ * @gfp_mask: allocation mask
+ * @layer_idr: optional idr to allocate from
+ *
+ * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
+ * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
+ * an idr_layer from @idr->id_free.
+ *
+ * @layer_idr is to maintain backward compatibility with the old alloc
+ * interface - idr_pre_get() and idr_get_new*() - and will be removed
+ * together with per-pool preload buffer.
+ */
+static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
+{
+       struct idr_layer *new;
+
+       /* this is the old path, bypass to get_from_free_list() */
+       if (layer_idr)
+               return get_from_free_list(layer_idr);
+
+       /* try to allocate directly from kmem_cache */
+       new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+       if (new)
+               return new;
+
+       /*
+        * Try to fetch one from the per-cpu preload buffer if in process
+        * context.  See idr_preload() for details.
+        */
+       if (in_interrupt())
+               return NULL;
+
+       preempt_disable();
+       new = __this_cpu_read(idr_preload_head);
+       if (new) {
+               __this_cpu_write(idr_preload_head, new->ary[0]);
+               __this_cpu_dec(idr_preload_cnt);
+               new->ary[0] = NULL;
+       }
+       preempt_enable();
+       return new;
+}
+
 static void idr_layer_rcu_free(struct rcu_head *head)
 {
        struct idr_layer *layer;
@@ -62,8 +137,10 @@ static void idr_layer_rcu_free(struct rcu_head *head)
        kmem_cache_free(idr_layer_cache, layer);
 }
 
-static inline void free_layer(struct idr_layer *p)
+static inline void free_layer(struct idr *idr, struct idr_layer *p)
 {
+       if (idr->hint && idr->hint == p)
+               RCU_INIT_POINTER(idr->hint, NULL);
        call_rcu(&p->rcu_head, idr_layer_rcu_free);
 }
 
@@ -92,18 +169,18 @@ static void idr_mark_full(struct idr_layer **pa, int id)
        struct idr_layer *p = pa[0];
        int l = 0;
 
-       __set_bit(id & IDR_MASK, &p->bitmap);
+       __set_bit(id & IDR_MASK, p->bitmap);
        /*
         * If this layer is full mark the bit in the layer above to
         * show that this part of the radix tree is full.  This may
         * complete the layer above and require walking up the radix
         * tree.
         */
-       while (p->bitmap == IDR_FULL) {
+       while (bitmap_full(p->bitmap, IDR_SIZE)) {
                if (!(p = pa[++l]))
                        break;
                id = id >> IDR_BITS;
-               __set_bit((id & IDR_MASK), &p->bitmap);
+               __set_bit((id & IDR_MASK), p->bitmap);
        }
 }
 
@@ -133,12 +210,29 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(idr_pre_get);
 
-static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
+/**
+ * sub_alloc - try to allocate an id without growing the tree depth
+ * @idp: idr handle
+ * @starting_id: id to start search at
+ * @id: pointer to the allocated handle
+ * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
+ * @gfp_mask: allocation mask for idr_layer_alloc()
+ * @layer_idr: optional idr passed to idr_layer_alloc()
+ *
+ * Allocate an id in range [@starting_id, INT_MAX] from @idp without
+ * growing its depth.  Returns
+ *
+ *  the allocated id >= 0 if successful,
+ *  -EAGAIN if the tree needs to grow for allocation to succeed,
+ *  -ENOSPC if the id space is exhausted,
+ *  -ENOMEM if more idr_layers need to be allocated.
+ */
+static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
+                    gfp_t gfp_mask, struct idr *layer_idr)
 {
        int n, m, sh;
        struct idr_layer *p, *new;
        int l, id, oid;
-       unsigned long bm;
 
        id = *starting_id;
  restart:
@@ -150,8 +244,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
                 * We run around this while until we reach the leaf node...
                 */
                n = (id >> (IDR_BITS*l)) & IDR_MASK;
-               bm = ~p->bitmap;
-               m = find_next_bit(&bm, IDR_SIZE, n);
+               m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
                if (m == IDR_SIZE) {
                        /* no space available go back to previous layer. */
                        l++;
@@ -161,7 +254,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
                        /* if already at the top layer, we need to grow */
                        if (id >= 1 << (idp->layers * IDR_BITS)) {
                                *starting_id = id;
-                               return IDR_NEED_TO_GROW;
+                               return -EAGAIN;
                        }
                        p = pa[l];
                        BUG_ON(!p);
@@ -180,17 +273,18 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
                        id = ((id >> sh) ^ n ^ m) << sh;
                }
                if ((id >= MAX_IDR_BIT) || (id < 0))
-                       return IDR_NOMORE_SPACE;
+                       return -ENOSPC;
                if (l == 0)
                        break;
                /*
                 * Create the layer below if it is missing.
                 */
                if (!p->ary[m]) {
-                       new = get_from_free_list(idp);
+                       new = idr_layer_alloc(gfp_mask, layer_idr);
                        if (!new)
-                               return -1;
+                               return -ENOMEM;
                        new->layer = l-1;
+                       new->prefix = id & idr_layer_prefix_mask(new->layer);
                        rcu_assign_pointer(p->ary[m], new);
                        p->count++;
                }
@@ -203,7 +297,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
 }
 
 static int idr_get_empty_slot(struct idr *idp, int starting_id,
-                             struct idr_layer **pa)
+                             struct idr_layer **pa, gfp_t gfp_mask,
+                             struct idr *layer_idr)
 {
        struct idr_layer *p, *new;
        int layers, v, id;
@@ -214,8 +309,8 @@ build_up:
        p = idp->top;
        layers = idp->layers;
        if (unlikely(!p)) {
-               if (!(p = get_from_free_list(idp)))
-                       return -1;
+               if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
+                       return -ENOMEM;
                p->layer = 0;
                layers = 1;
        }
@@ -223,7 +318,7 @@ build_up:
         * Add a new layer to the top of the tree if the requested
         * id is larger than the currently allocated space.
         */
-       while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+       while (id > idr_max(layers)) {
                layers++;
                if (!p->count) {
                        /* special case: if the tree is currently empty,
@@ -231,9 +326,10 @@ build_up:
                         * upwards.
                         */
                        p->layer++;
+                       WARN_ON_ONCE(p->prefix);
                        continue;
                }
-               if (!(new = get_from_free_list(idp))) {
+               if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
                        /*
                         * The allocation failed.  If we built part of
                         * the structure tear it down.
@@ -242,45 +338,42 @@ build_up:
                        for (new = p; p && p != idp->top; new = p) {
                                p = p->ary[0];
                                new->ary[0] = NULL;
-                               new->bitmap = new->count = 0;
+                               new->count = 0;
+                               bitmap_clear(new->bitmap, 0, IDR_SIZE);
                                __move_to_free_list(idp, new);
                        }
                        spin_unlock_irqrestore(&idp->lock, flags);
-                       return -1;
+                       return -ENOMEM;
                }
                new->ary[0] = p;
                new->count = 1;
                new->layer = layers-1;
-               if (p->bitmap == IDR_FULL)
-                       __set_bit(0, &new->bitmap);
+               new->prefix = id & idr_layer_prefix_mask(new->layer);
+               if (bitmap_full(p->bitmap, IDR_SIZE))
+                       __set_bit(0, new->bitmap);
                p = new;
        }
        rcu_assign_pointer(idp->top, p);
        idp->layers = layers;
-       v = sub_alloc(idp, &id, pa);
-       if (v == IDR_NEED_TO_GROW)
+       v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
+       if (v == -EAGAIN)
                goto build_up;
        return(v);
 }
 
-static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+/*
+ * @id and @pa are from a successful allocation from idr_get_empty_slot().
+ * Install the user pointer @ptr and mark the slot full.
+ */
+static void idr_fill_slot(struct idr *idr, void *ptr, int id,
+                         struct idr_layer **pa)
 {
-       struct idr_layer *pa[MAX_IDR_LEVEL];
-       int id;
-
-       id = idr_get_empty_slot(idp, starting_id, pa);
-       if (id >= 0) {
-               /*
-                * Successfully found an empty slot.  Install the user
-                * pointer and mark the slot full.
-                */
-               rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
-                               (struct idr_layer *)ptr);
-               pa[0]->count++;
-               idr_mark_full(pa, id);
-       }
+       /* update hint used for lookup, cleared from free_layer() */
+       rcu_assign_pointer(idr->hint, pa[0]);
 
-       return id;
+       rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
+       pa[0]->count++;
+       idr_mark_full(pa, id);
 }
 
 /**
@@ -303,49 +396,124 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  */
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
 {
+       struct idr_layer *pa[MAX_IDR_LEVEL + 1];
        int rv;
 
-       rv = idr_get_new_above_int(idp, ptr, starting_id);
-       /*
-        * This is a cheap hack until the IDR code can be fixed to
-        * return proper error values.
-        */
+       rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
        if (rv < 0)
-               return _idr_rc_to_errno(rv);
+               return rv == -ENOMEM ? -EAGAIN : rv;
+
+       idr_fill_slot(idp, ptr, rv, pa);
        *id = rv;
        return 0;
 }
 EXPORT_SYMBOL(idr_get_new_above);
 
 /**
- * idr_get_new - allocate new idr entry
- * @idp: idr handle
- * @ptr: pointer you want associated with the id
- * @id: pointer to the allocated handle
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
  *
- * If allocation from IDR's private freelist fails, idr_get_new_above() will
- * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill
- * IDR's preallocation and then retry the idr_get_new_above() call.
+ * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
+ * process context and each idr_preload() invocation should be matched with
+ * idr_preload_end().  Note that preemption is disabled while preloaded.
  *
- * If the idr is full idr_get_new_above() will return %-ENOSPC.
+ * The first idr_alloc() in the preloaded section can be treated as if it
+ * were invoked with @gfp_mask used for preloading.  This allows using more
+ * permissive allocation masks for idrs protected by spinlocks.
+ *
+ * For example, if idr_alloc() below fails, the failure can be treated as
+ * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
+ *
+ *     idr_preload(GFP_KERNEL);
+ *     spin_lock(lock);
+ *
+ *     id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
  *
- * @id returns a value in the range %0 ... %0x7fffffff
+ *     spin_unlock(lock);
+ *     idr_preload_end();
+ *     if (id < 0)
+ *             error;
  */
-int idr_get_new(struct idr *idp, void *ptr, int *id)
+void idr_preload(gfp_t gfp_mask)
 {
-       int rv;
+       /*
+        * Consuming preload buffer from non-process context breaks preload
+        * allocation guarantee.  Disallow usage from those contexts.
+        */
+       WARN_ON_ONCE(in_interrupt());
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       preempt_disable();
 
-       rv = idr_get_new_above_int(idp, ptr, 0);
        /*
-        * This is a cheap hack until the IDR code can be fixed to
-        * return proper error values.
+        * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+        * return value from idr_alloc() needs to be checked for failure
+        * anyway.  Silently give up if allocation fails.  The caller can
+        * treat failures from idr_alloc() as if idr_alloc() were called
+        * with @gfp_mask which should be enough.
         */
-       if (rv < 0)
-               return _idr_rc_to_errno(rv);
-       *id = rv;
-       return 0;
+       while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+               struct idr_layer *new;
+
+               preempt_enable();
+               new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+               preempt_disable();
+               if (!new)
+                       break;
+
+               /* link the new one to per-cpu preload list */
+               new->ary[0] = __this_cpu_read(idr_preload_head);
+               __this_cpu_write(idr_preload_head, new);
+               __this_cpu_inc(idr_preload_cnt);
+       }
 }
-EXPORT_SYMBOL(idr_get_new);
+EXPORT_SYMBOL(idr_preload);
+
+/**
+ * idr_alloc - allocate new idr entry
+ * @idr: the (initialized) idr
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive, <= 0 for max)
+ * @gfp_mask: memory allocation flags
+ *
+ * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
+ * available in the specified range, returns -ENOSPC.  On memory allocation
+ * failure, returns -ENOMEM.
+ *
+ * Note that @end is treated as max when <= 0.  This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * The user is responsible for exclusively synchronizing all operations
+ * which may modify @idr.  However, read-only accesses such as idr_find()
+ * or iteration can be performed under RCU read lock provided the user
+ * destroys @ptr in RCU-safe way after removal from idr.
+ */
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+       int max = end > 0 ? end - 1 : INT_MAX;  /* inclusive upper limit */
+       struct idr_layer *pa[MAX_IDR_LEVEL + 1];
+       int id;
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       /* sanity checks */
+       if (WARN_ON_ONCE(start < 0))
+               return -EINVAL;
+       if (unlikely(max < start))
+               return -ENOSPC;
+
+       /* allocate id */
+       id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
+       if (unlikely(id < 0))
+               return id;
+       if (unlikely(id > max))
+               return -ENOSPC;
+
+       idr_fill_slot(idr, ptr, id, pa);
+       return id;
+}
+EXPORT_SYMBOL_GPL(idr_alloc);
 
 static void idr_remove_warning(int id)
 {
@@ -357,7 +525,7 @@ static void idr_remove_warning(int id)
 static void sub_remove(struct idr *idp, int shift, int id)
 {
        struct idr_layer *p = idp->top;
-       struct idr_layer **pa[MAX_IDR_LEVEL];
+       struct idr_layer **pa[MAX_IDR_LEVEL + 1];
        struct idr_layer ***paa = &pa[0];
        struct idr_layer *to_free;
        int n;
@@ -367,26 +535,26 @@ static void sub_remove(struct idr *idp, int shift, int id)
 
        while ((shift > 0) && p) {
                n = (id >> shift) & IDR_MASK;
-               __clear_bit(n, &p->bitmap);
+               __clear_bit(n, p->bitmap);
                *++paa = &p->ary[n];
                p = p->ary[n];
                shift -= IDR_BITS;
        }
        n = id & IDR_MASK;
-       if (likely(p != NULL && test_bit(n, &p->bitmap))){
-               __clear_bit(n, &p->bitmap);
+       if (likely(p != NULL && test_bit(n, p->bitmap))) {
+               __clear_bit(n, p->bitmap);
                rcu_assign_pointer(p->ary[n], NULL);
                to_free = NULL;
                while(*paa && ! --((**paa)->count)){
                        if (to_free)
-                               free_layer(to_free);
+                               free_layer(idp, to_free);
                        to_free = **paa;
                        **paa-- = NULL;
                }
                if (!*paa)
                        idp->layers = 0;
                if (to_free)
-                       free_layer(to_free);
+                       free_layer(idp, to_free);
        } else
                idr_remove_warning(id);
 }
@@ -401,8 +569,9 @@ void idr_remove(struct idr *idp, int id)
        struct idr_layer *p;
        struct idr_layer *to_free;
 
-       /* Mask off upper bits we don't use for the search. */
-       id &= MAX_IDR_MASK;
+       /* see comment in idr_find_slowpath() */
+       if (WARN_ON_ONCE(id < 0))
+               return;
 
        sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
        if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -417,8 +586,9 @@ void idr_remove(struct idr *idp, int id)
                p = idp->top->ary[0];
                rcu_assign_pointer(idp->top, p);
                --idp->layers;
-               to_free->bitmap = to_free->count = 0;
-               free_layer(to_free);
+               to_free->count = 0;
+               bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
+               free_layer(idp, to_free);
        }
        while (idp->id_free_cnt >= MAX_IDR_FREE) {
                p = get_from_free_list(idp);
@@ -433,34 +603,21 @@ void idr_remove(struct idr *idp, int id)
 }
 EXPORT_SYMBOL(idr_remove);
 
-/**
- * idr_remove_all - remove all ids from the given idr tree
- * @idp: idr handle
- *
- * idr_destroy() only frees up unused, cached idp_layers, but this
- * function will remove all id mappings and leave all idp_layers
- * unused.
- *
- * A typical clean-up sequence for objects stored in an idr tree will
- * use idr_for_each() to free all objects, if necessay, then
- * idr_remove_all() to remove all ids, and idr_destroy() to free
- * up the cached idr_layers.
- */
-void idr_remove_all(struct idr *idp)
+void __idr_remove_all(struct idr *idp)
 {
        int n, id, max;
        int bt_mask;
        struct idr_layer *p;
-       struct idr_layer *pa[MAX_IDR_LEVEL];
+       struct idr_layer *pa[MAX_IDR_LEVEL + 1];
        struct idr_layer **paa = &pa[0];
 
        n = idp->layers * IDR_BITS;
        p = idp->top;
        rcu_assign_pointer(idp->top, NULL);
-       max = 1 << n;
+       max = idr_max(idp->layers);
 
        id = 0;
-       while (id < max) {
+       while (id >= 0 && id <= max) {
                while (n > IDR_BITS && p) {
                        n -= IDR_BITS;
                        *paa++ = p;
@@ -472,21 +629,32 @@ void idr_remove_all(struct idr *idp)
                /* Get the highest bit that the above add changed from 0->1. */
                while (n < fls(id ^ bt_mask)) {
                        if (p)
-                               free_layer(p);
+                               free_layer(idp, p);
                        n += IDR_BITS;
                        p = *--paa;
                }
        }
        idp->layers = 0;
 }
-EXPORT_SYMBOL(idr_remove_all);
+EXPORT_SYMBOL(__idr_remove_all);
 
 /**
  * idr_destroy - release all cached layers within an idr tree
  * @idp: idr handle
+ *
+ * Free all id mappings and all idp_layers.  After this function, @idp is
+ * completely unused and can be freed / recycled.  The caller is
+ * responsible for ensuring that no one else accesses @idp during or after
+ * idr_destroy().
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessay, then idr_destroy() to
+ * free up the id mappings and cached idr_layers.
  */
 void idr_destroy(struct idr *idp)
 {
+       __idr_remove_all(idp);
+
        while (idp->id_free_cnt) {
                struct idr_layer *p = get_from_free_list(idp);
                kmem_cache_free(idr_layer_cache, p);
@@ -494,32 +662,28 @@ void idr_destroy(struct idr *idp)
 }
 EXPORT_SYMBOL(idr_destroy);
 
-/**
- * idr_find - return pointer for given id
- * @idp: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with.  A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
- */
-void *idr_find(struct idr *idp, int id)
+void *idr_find_slowpath(struct idr *idp, int id)
 {
        int n;
        struct idr_layer *p;
 
+       /*
+        * If @id is negative, idr_find() used to ignore the sign bit and
+        * performed lookup with the rest of bits, which is weird and can
+        * lead to very obscure bugs.  We're now returning NULL for all
+        * negative IDs but just in case somebody was depending on the sign
+        * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
+        * be detected and fixed.  WARN_ON_ONCE() can later be removed.
+        */
+       if (WARN_ON_ONCE(id < 0))
+               return NULL;
+
        p = rcu_dereference_raw(idp->top);
        if (!p)
                return NULL;
        n = (p->layer+1) * IDR_BITS;
 
-       /* Mask off upper bits we don't use for the search. */
-       id &= MAX_IDR_MASK;
-
-       if (id >= (1 << n))
+       if (id > idr_max(p->layer + 1))
                return NULL;
        BUG_ON(n == 0);
 
@@ -530,7 +694,7 @@ void *idr_find(struct idr *idp, int id)
        }
        return((void *)p);
 }
-EXPORT_SYMBOL(idr_find);
+EXPORT_SYMBOL(idr_find_slowpath);
 
 /**
  * idr_for_each - iterate through all stored pointers
@@ -555,15 +719,15 @@ int idr_for_each(struct idr *idp,
 {
        int n, id, max, error = 0;
        struct idr_layer *p;
-       struct idr_layer *pa[MAX_IDR_LEVEL];
+       struct idr_layer *pa[MAX_IDR_LEVEL + 1];
        struct idr_layer **paa = &pa[0];
 
        n = idp->layers * IDR_BITS;
        p = rcu_dereference_raw(idp->top);
-       max = 1 << n;
+       max = idr_max(idp->layers);
 
        id = 0;
-       while (id < max) {
+       while (id >= 0 && id <= max) {
                while (n > 0 && p) {
                        n -= IDR_BITS;
                        *paa++ = p;
@@ -601,7 +765,7 @@ EXPORT_SYMBOL(idr_for_each);
  */
 void *idr_get_next(struct idr *idp, int *nextidp)
 {
-       struct idr_layer *p, *pa[MAX_IDR_LEVEL];
+       struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
        struct idr_layer **paa = &pa[0];
        int id = *nextidp;
        int n, max;
@@ -611,9 +775,9 @@ void *idr_get_next(struct idr *idp, int *nextidp)
        if (!p)
                return NULL;
        n = (p->layer + 1) * IDR_BITS;
-       max = 1 << n;
+       max = idr_max(p->layer + 1);
 
-       while (id < max) {
+       while (id >= 0 && id <= max) {
                while (n > 0 && p) {
                        n -= IDR_BITS;
                        *paa++ = p;
@@ -625,7 +789,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
                        return p;
                }
 
-               id += 1 << n;
+               /*
+                * Proceed to the next layer at the current level.  Unlike
+                * idr_for_each(), @id isn't guaranteed to be aligned to
+                * layer boundary at this point and adding 1 << n may
+                * incorrectly skip IDs.  Make sure we jump to the
+                * beginning of the next layer using round_up().
+                */
+               id = round_up(id + 1, 1 << n);
                while (n < fls(id)) {
                        n += IDR_BITS;
                        p = *--paa;
@@ -653,14 +824,16 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
        int n;
        struct idr_layer *p, *old_p;
 
+       /* see comment in idr_find_slowpath() */
+       if (WARN_ON_ONCE(id < 0))
+               return ERR_PTR(-EINVAL);
+
        p = idp->top;
        if (!p)
                return ERR_PTR(-EINVAL);
 
        n = (p->layer+1) * IDR_BITS;
 
-       id &= MAX_IDR_MASK;
-
        if (id >= (1 << n))
                return ERR_PTR(-EINVAL);
 
@@ -671,7 +844,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
        }
 
        n = id & IDR_MASK;
-       if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+       if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
                return ERR_PTR(-ENOENT);
 
        old_p = p->ary[n];
@@ -780,7 +953,7 @@ EXPORT_SYMBOL(ida_pre_get);
  */
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 {
-       struct idr_layer *pa[MAX_IDR_LEVEL];
+       struct idr_layer *pa[MAX_IDR_LEVEL + 1];
        struct ida_bitmap *bitmap;
        unsigned long flags;
        int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -789,9 +962,9 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 
  restart:
        /* get vacant slot */
-       t = idr_get_empty_slot(&ida->idr, idr_id, pa);
+       t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
        if (t < 0)
-               return _idr_rc_to_errno(t);
+               return t == -ENOMEM ? -EAGAIN : t;
 
        if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
                return -ENOSPC;
@@ -851,25 +1024,6 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 }
 EXPORT_SYMBOL(ida_get_new_above);
 
-/**
- * ida_get_new - allocate new ID
- * @ida:       idr handle
- * @p_id:      pointer to the allocated handle
- *
- * Allocate new ID.  It should be called with any required locks.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the idr_pre_get() call.  If the idr is full, it will
- * return %-ENOSPC.
- *
- * @p_id returns a value in the range %0 ... %0x7fffffff.
- */
-int ida_get_new(struct ida *ida, int *p_id)
-{
-       return ida_get_new_above(ida, 0, p_id);
-}
-EXPORT_SYMBOL(ida_get_new);
-
 /**
  * ida_remove - remove the given ID
  * @ida:       ida handle
@@ -887,7 +1041,7 @@ void ida_remove(struct ida *ida, int id)
        /* clear full bits while looking up the leaf idr_layer */
        while ((shift > 0) && p) {
                n = (idr_id >> shift) & IDR_MASK;
-               __clear_bit(n, &p->bitmap);
+               __clear_bit(n, p->bitmap);
                p = p->ary[n];
                shift -= IDR_BITS;
        }
@@ -896,7 +1050,7 @@ void ida_remove(struct ida *ida, int id)
                goto err;
 
        n = idr_id & IDR_MASK;
-       __clear_bit(n, &p->bitmap);
+       __clear_bit(n, p->bitmap);
 
        bitmap = (void *)p->ary[n];
        if (!test_bit(offset, bitmap->bitmap))
@@ -905,7 +1059,7 @@ void ida_remove(struct ida *ida, int id)
        /* update bitmap and remove it if empty */
        __clear_bit(offset, bitmap->bitmap);
        if (--bitmap->nr_busy == 0) {
-               __set_bit(n, &p->bitmap);       /* to please idr_remove() */
+               __set_bit(n, p->bitmap);        /* to please idr_remove() */
                idr_remove(&ida->idr, idr_id);
                free_bitmap(ida, bitmap);
        }
diff --git a/lib/kfifo.c b/lib/kfifo.c
new file mode 100644 (file)
index 0000000..7b7f830
--- /dev/null
@@ -0,0 +1,607 @@
+/*
+ * A generic kernel FIFO implementation
+ *
+ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+
+/*
+ * internal helper to calculate the unused elements in a fifo
+ */
+static inline unsigned int kfifo_unused(struct __kfifo *fifo)
+{
+       return (fifo->mask + 1) - (fifo->in - fifo->out);
+}
+
+int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+               size_t esize, gfp_t gfp_mask)
+{
+       /*
+        * round down to the next power of 2, since our 'let the indices
+        * wrap' technique works only in this case.
+        */
+       size = roundup_pow_of_two(size);
+
+       fifo->in = 0;
+       fifo->out = 0;
+       fifo->esize = esize;
+
+       if (size < 2) {
+               fifo->data = NULL;
+               fifo->mask = 0;
+               return -EINVAL;
+       }
+
+       fifo->data = kmalloc(size * esize, gfp_mask);
+
+       if (!fifo->data) {
+               fifo->mask = 0;
+               return -ENOMEM;
+       }
+       fifo->mask = size - 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(__kfifo_alloc);
+
+void __kfifo_free(struct __kfifo *fifo)
+{
+       kfree(fifo->data);
+       fifo->in = 0;
+       fifo->out = 0;
+       fifo->esize = 0;
+       fifo->data = NULL;
+       fifo->mask = 0;
+}
+EXPORT_SYMBOL(__kfifo_free);
+
+int __kfifo_init(struct __kfifo *fifo, void *buffer,
+               unsigned int size, size_t esize)
+{
+       size /= esize;
+
+       size = roundup_pow_of_two(size);
+
+       fifo->in = 0;
+       fifo->out = 0;
+       fifo->esize = esize;
+       fifo->data = buffer;
+
+       if (size < 2) {
+               fifo->mask = 0;
+               return -EINVAL;
+       }
+       fifo->mask = size - 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(__kfifo_init);
+
+static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
+               unsigned int len, unsigned int off)
+{
+       unsigned int size = fifo->mask + 1;
+       unsigned int esize = fifo->esize;
+       unsigned int l;
+
+       off &= fifo->mask;
+       if (esize != 1) {
+               off *= esize;
+               size *= esize;
+               len *= esize;
+       }
+       l = min(len, size - off);
+
+       memcpy(fifo->data + off, src, l);
+       memcpy(fifo->data, src + l, len - l);
+       /*
+        * make sure that the data in the fifo is up to date before
+        * incrementing the fifo->in index counter
+        */
+       smp_wmb();
+}
+
+unsigned int __kfifo_in(struct __kfifo *fifo,
+               const void *buf, unsigned int len)
+{
+       unsigned int l;
+
+       l = kfifo_unused(fifo);
+       if (len > l)
+               len = l;
+
+       kfifo_copy_in(fifo, buf, len, fifo->in);
+       fifo->in += len;
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_in);
+
+static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
+               unsigned int len, unsigned int off)
+{
+       unsigned int size = fifo->mask + 1;
+       unsigned int esize = fifo->esize;
+       unsigned int l;
+
+       off &= fifo->mask;
+       if (esize != 1) {
+               off *= esize;
+               size *= esize;
+               len *= esize;
+       }
+       l = min(len, size - off);
+
+       memcpy(dst, fifo->data + off, l);
+       memcpy(dst + l, fifo->data, len - l);
+       /*
+        * make sure that the data is copied before
+        * incrementing the fifo->out index counter
+        */
+       smp_wmb();
+}
+
+unsigned int __kfifo_out_peek(struct __kfifo *fifo,
+               void *buf, unsigned int len)
+{
+       unsigned int l;
+
+       l = fifo->in - fifo->out;
+       if (len > l)
+               len = l;
+
+       kfifo_copy_out(fifo, buf, len, fifo->out);
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_out_peek);
+
+unsigned int __kfifo_out(struct __kfifo *fifo,
+               void *buf, unsigned int len)
+{
+       len = __kfifo_out_peek(fifo, buf, len);
+       fifo->out += len;
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_out);
+
+static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
+       const void __user *from, unsigned int len, unsigned int off,
+       unsigned int *copied)
+{
+       unsigned int size = fifo->mask + 1;
+       unsigned int esize = fifo->esize;
+       unsigned int l;
+       unsigned long ret;
+
+       off &= fifo->mask;
+       if (esize != 1) {
+               off *= esize;
+               size *= esize;
+               len *= esize;
+       }
+       l = min(len, size - off);
+
+       ret = copy_from_user(fifo->data + off, from, l);
+       if (unlikely(ret))
+               ret = DIV_ROUND_UP(ret + len - l, esize);
+       else {
+               ret = copy_from_user(fifo->data, from + l, len - l);
+               if (unlikely(ret))
+                       ret = DIV_ROUND_UP(ret, esize);
+       }
+       /*
+        * make sure that the data in the fifo is up to date before
+        * incrementing the fifo->in index counter
+        */
+       smp_wmb();
+       *copied = len - ret;
+       /* return the number of elements which are not copied */
+       return ret;
+}
+
+int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
+               unsigned long len, unsigned int *copied)
+{
+       unsigned int l;
+       unsigned long ret;
+       unsigned int esize = fifo->esize;
+       int err;
+
+       if (esize != 1)
+               len /= esize;
+
+       l = kfifo_unused(fifo);
+       if (len > l)
+               len = l;
+
+       ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
+       if (unlikely(ret)) {
+               len -= ret;
+               err = -EFAULT;
+       } else
+               err = 0;
+       fifo->in += len;
+       return err;
+}
+EXPORT_SYMBOL(__kfifo_from_user);
+
+static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
+               unsigned int len, unsigned int off, unsigned int *copied)
+{
+       unsigned int l;
+       unsigned long ret;
+       unsigned int size = fifo->mask + 1;
+       unsigned int esize = fifo->esize;
+
+       off &= fifo->mask;
+       if (esize != 1) {
+               off *= esize;
+               size *= esize;
+               len *= esize;
+       }
+       l = min(len, size - off);
+
+       ret = copy_to_user(to, fifo->data + off, l);
+       if (unlikely(ret))
+               ret = DIV_ROUND_UP(ret + len - l, esize);
+       else {
+               ret = copy_to_user(to + l, fifo->data, len - l);
+               if (unlikely(ret))
+                       ret = DIV_ROUND_UP(ret, esize);
+       }
+       /*
+        * make sure that the data is copied before
+        * incrementing the fifo->out index counter
+        */
+       smp_wmb();
+       *copied = len - ret;
+       /* return the number of elements which are not copied */
+       return ret;
+}
+
+int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
+               unsigned long len, unsigned int *copied)
+{
+       unsigned int l;
+       unsigned long ret;
+       unsigned int esize = fifo->esize;
+       int err;
+
+       if (esize != 1)
+               len /= esize;
+
+       l = fifo->in - fifo->out;
+       if (len > l)
+               len = l;
+       ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
+       if (unlikely(ret)) {
+               len -= ret;
+               err = -EFAULT;
+       } else
+               err = 0;
+       fifo->out += len;
+       return err;
+}
+EXPORT_SYMBOL(__kfifo_to_user);
+
+static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
+               int nents, unsigned int len)
+{
+       int n;
+       unsigned int l;
+       unsigned int off;
+       struct page *page;
+
+       if (!nents)
+               return 0;
+
+       if (!len)
+               return 0;
+
+       n = 0;
+       page = virt_to_page(buf);
+       off = offset_in_page(buf);
+       l = 0;
+
+       while (len >= l + PAGE_SIZE - off) {
+               struct page *npage;
+
+               l += PAGE_SIZE;
+               buf += PAGE_SIZE;
+               npage = virt_to_page(buf);
+               if (page_to_phys(page) != page_to_phys(npage) - l) {
+                       sg_set_page(sgl, page, l - off, off);
+                       sgl = sg_next(sgl);
+                       if (++n == nents || sgl == NULL)
+                               return n;
+                       page = npage;
+                       len -= l - off;
+                       l = off = 0;
+               }
+       }
+       sg_set_page(sgl, page, len, off);
+       return n + 1;
+}
+
+static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
+               int nents, unsigned int len, unsigned int off)
+{
+       unsigned int size = fifo->mask + 1;
+       unsigned int esize = fifo->esize;
+       unsigned int l;
+       unsigned int n;
+
+       off &= fifo->mask;
+       if (esize != 1) {
+               off *= esize;
+               size *= esize;
+               len *= esize;
+       }
+       l = min(len, size - off);
+
+       n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
+       n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+
+       return n;
+}
+
+unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
+               struct scatterlist *sgl, int nents, unsigned int len)
+{
+       unsigned int l;
+
+       l = kfifo_unused(fifo);
+       if (len > l)
+               len = l;
+
+       return setup_sgl(fifo, sgl, nents, len, fifo->in);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare);
+
+unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
+               struct scatterlist *sgl, int nents, unsigned int len)
+{
+       unsigned int l;
+
+       l = fifo->in - fifo->out;
+       if (len > l)
+               len = l;
+
+       return setup_sgl(fifo, sgl, nents, len, fifo->out);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare);
+
+unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
+{
+       unsigned int max = (1 << (recsize << 3)) - 1;
+
+       if (len > max)
+               return max;
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_max_r);
+
+#define        __KFIFO_PEEK(data, out, mask) \
+       ((data)[(out) & (mask)])
+/*
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
+{
+       unsigned int l;
+       unsigned int mask = fifo->mask;
+       unsigned char *data = fifo->data;
+
+       l = __KFIFO_PEEK(data, fifo->out, mask);
+
+       if (--recsize)
+               l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
+
+       return l;
+}
+
+#define        __KFIFO_POKE(data, in, mask, val) \
+       ( \
+       (data)[(in) & (mask)] = (unsigned char)(val) \
+       )
+
+/*
+ * __kfifo_poke_n internal helper function for storeing the length of
+ * the record into the fifo
+ */
+static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
+{
+       unsigned int mask = fifo->mask;
+       unsigned char *data = fifo->data;
+
+       __KFIFO_POKE(data, fifo->in, mask, n);
+
+       if (recsize > 1)
+               __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
+}
+
+unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
+{
+       return __kfifo_peek_n(fifo, recsize);
+}
+EXPORT_SYMBOL(__kfifo_len_r);
+
+unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
+               unsigned int len, size_t recsize)
+{
+       if (len + recsize > kfifo_unused(fifo))
+               return 0;
+
+       __kfifo_poke_n(fifo, len, recsize);
+
+       kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
+       fifo->in += len + recsize;
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_in_r);
+
+static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
+       void *buf, unsigned int len, size_t recsize, unsigned int *n)
+{
+       *n = __kfifo_peek_n(fifo, recsize);
+
+       if (len > *n)
+               len = *n;
+
+       kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
+       return len;
+}
+
+unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
+               unsigned int len, size_t recsize)
+{
+       unsigned int n;
+
+       if (fifo->in == fifo->out)
+               return 0;
+
+       return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+}
+EXPORT_SYMBOL(__kfifo_out_peek_r);
+
+unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
+               unsigned int len, size_t recsize)
+{
+       unsigned int n;
+
+       if (fifo->in == fifo->out)
+               return 0;
+
+       len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+       fifo->out += n + recsize;
+       return len;
+}
+EXPORT_SYMBOL(__kfifo_out_r);
+
+void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
+{
+       unsigned int n;
+
+       n = __kfifo_peek_n(fifo, recsize);
+       fifo->out += n + recsize;
+}
+EXPORT_SYMBOL(__kfifo_skip_r);
+
+int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
+       unsigned long len, unsigned int *copied, size_t recsize)
+{
+       unsigned long ret;
+
+       len = __kfifo_max_r(len, recsize);
+
+       if (len + recsize > kfifo_unused(fifo)) {
+               *copied = 0;
+               return 0;
+       }
+
+       __kfifo_poke_n(fifo, len, recsize);
+
+       ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
+       if (unlikely(ret)) {
+               *copied = 0;
+               return -EFAULT;
+       }
+       fifo->in += len + recsize;
+       return 0;
+}
+EXPORT_SYMBOL(__kfifo_from_user_r);
+
+int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
+       unsigned long len, unsigned int *copied, size_t recsize)
+{
+       unsigned long ret;
+       unsigned int n;
+
+       if (fifo->in == fifo->out) {
+               *copied = 0;
+               return 0;
+       }
+
+       n = __kfifo_peek_n(fifo, recsize);
+       if (len > n)
+               len = n;
+
+       ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
+       if (unlikely(ret)) {
+               *copied = 0;
+               return -EFAULT;
+       }
+       fifo->out += n + recsize;
+       return 0;
+}
+EXPORT_SYMBOL(__kfifo_to_user_r);
+
+unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
+       struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+       if (!nents)
+               BUG();
+
+       len = __kfifo_max_r(len, recsize);
+
+       if (len + recsize > kfifo_unused(fifo))
+               return 0;
+
+       return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
+
+void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
+       unsigned int len, size_t recsize)
+{
+       len = __kfifo_max_r(len, recsize);
+       __kfifo_poke_n(fifo, len, recsize);
+       fifo->in += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
+
+unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
+       struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+       if (!nents)
+               BUG();
+
+       len = __kfifo_max_r(len, recsize);
+
+       if (len + recsize > fifo->in - fifo->out)
+               return 0;
+
+       return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
+
+void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
+{
+       unsigned int len;
+
+       len = __kfifo_peek_n(fifo, recsize);
+       fifo->out += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
index d71d894..8335d39 100644 (file)
@@ -262,12 +262,11 @@ static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
 static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
                bool include_changing)
 {
-       struct hlist_node *n;
        struct lc_element *e;
 
        BUG_ON(!lc);
        BUG_ON(!lc->nr_elements);
-       hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
+       hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
                /* "about to be changed" elements, pending transaction commit,
                 * are hashed by their "new number". "Normal" elements have
                 * lc_number == lc_new_number. */
index e764116..f0f7d7c 100644 (file)
@@ -1,5 +1,5 @@
 lzo_compress-objs := lzo1x_compress.o
-lzo_decompress-objs := lzo1x_decompress.o
+lzo_decompress-objs := lzo1x_decompress_safe.o
 
 obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o
index a604099..236eb21 100644 (file)
 /*
- *  LZO1X Compressor from MiniLZO
+ *  LZO1X Compressor from LZO
  *
- *  Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
  *
  *  The full LZO package can be found at:
  *  http://www.oberhumer.com/opensource/lzo/
  *
- *  Changed for kernel use by:
+ *  Changed for Linux kernel use by:
  *  Nitin Gupta <nitingupta910@gmail.com>
  *  Richard Purdie <rpurdie@openedhand.com>
  */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/lzo.h>
 #include <asm/unaligned.h>
+#include <linux/lzo.h>
 #include "lzodefs.h"
 
 static noinline size_t
-_lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
-               unsigned char *out, size_t *out_len, void *wrkmem)
+lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+                   unsigned char *out, size_t *out_len,
+                   size_t ti, void *wrkmem)
 {
+       const unsigned char *ip;
+       unsigned char *op;
        const unsigned char * const in_end = in + in_len;
-       const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5;
-       const unsigned char ** const dict = wrkmem;
-       const unsigned char *ip = in, *ii = ip;
-       const unsigned char *end, *m, *m_pos;
-       size_t m_off, m_len, dindex;
-       unsigned char *op = out;
+       const unsigned char * const ip_end = in + in_len - 20;
+       const unsigned char *ii;
+       lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
 
-       ip += 4;
+       op = out;
+       ip = in;
+       ii = ip;
+       ip += ti < 4 ? 4 - ti : 0;
 
        for (;;) {
-               dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK;
-               m_pos = dict[dindex];
-
-               if (m_pos < in)
-                       goto literal;
-
-               if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
-                       goto literal;
-
-               m_off = ip - m_pos;
-               if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
-                       goto try_match;
-
-               dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f);
-               m_pos = dict[dindex];
-
-               if (m_pos < in)
-                       goto literal;
-
-               if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
-                       goto literal;
-
-               m_off = ip - m_pos;
-               if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
-                       goto try_match;
-
-               goto literal;
-
-try_match:
-               if (get_unaligned((const unsigned short *)m_pos)
-                               == get_unaligned((const unsigned short *)ip)) {
-                       if (likely(m_pos[2] == ip[2]))
-                                       goto match;
-               }
-
+               const unsigned char *m_pos;
+               size_t t, m_len, m_off;
+               u32 dv;
 literal:
-               dict[dindex] = ip;
-               ++ip;
+               ip += 1 + ((ip - ii) >> 5);
+next:
                if (unlikely(ip >= ip_end))
                        break;
-               continue;
-
-match:
-               dict[dindex] = ip;
-               if (ip != ii) {
-                       size_t t = ip - ii;
+               dv = get_unaligned_le32(ip);
+               t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+               m_pos = in + dict[t];
+               dict[t] = (lzo_dict_t) (ip - in);
+               if (unlikely(dv != get_unaligned_le32(m_pos)))
+                       goto literal;
 
+               ii -= ti;
+               ti = 0;
+               t = ip - ii;
+               if (t != 0) {
                        if (t <= 3) {
                                op[-2] |= t;
-                       } else if (t <= 18) {
+                               COPY4(op, ii);
+                               op += t;
+                       } else if (t <= 16) {
                                *op++ = (t - 3);
+                               COPY8(op, ii);
+                               COPY8(op + 8, ii + 8);
+                               op += t;
                        } else {
-                               size_t tt = t - 18;
-
-                               *op++ = 0;
-                               while (tt > 255) {
-                                       tt -= 255;
+                               if (t <= 18) {
+                                       *op++ = (t - 3);
+                               } else {
+                                       size_t tt = t - 18;
                                        *op++ = 0;
+                                       while (unlikely(tt > 255)) {
+                                               tt -= 255;
+                                               *op++ = 0;
+                                       }
+                                       *op++ = tt;
                                }
-                               *op++ = tt;
+                               do {
+                                       COPY8(op, ii);
+                                       COPY8(op + 8, ii + 8);
+                                       op += 16;
+                                       ii += 16;
+                                       t -= 16;
+                               } while (t >= 16);
+                               if (t > 0) do {
+                                       *op++ = *ii++;
+                               } while (--t > 0);
                        }
-                       do {
-                               *op++ = *ii++;
-                       } while (--t > 0);
                }
 
-               ip += 3;
-               if (m_pos[3] != *ip++ || m_pos[4] != *ip++
-                               || m_pos[5] != *ip++ || m_pos[6] != *ip++
-                               || m_pos[7] != *ip++ || m_pos[8] != *ip++) {
-                       --ip;
-                       m_len = ip - ii;
+               m_len = 4;
+               {
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
+               u64 v;
+               v = get_unaligned((const u64 *) (ip + m_len)) ^
+                   get_unaligned((const u64 *) (m_pos + m_len));
+               if (unlikely(v == 0)) {
+                       do {
+                               m_len += 8;
+                               v = get_unaligned((const u64 *) (ip + m_len)) ^
+                                   get_unaligned((const u64 *) (m_pos + m_len));
+                               if (unlikely(ip + m_len >= ip_end))
+                                       goto m_len_done;
+                       } while (v == 0);
+               }
+#  if defined(__LITTLE_ENDIAN)
+               m_len += (unsigned) __builtin_ctzll(v) / 8;
+#  elif defined(__BIG_ENDIAN)
+               m_len += (unsigned) __builtin_clzll(v) / 8;
+#  else
+#    error "missing endian definition"
+#  endif
+#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
+               u32 v;
+               v = get_unaligned((const u32 *) (ip + m_len)) ^
+                   get_unaligned((const u32 *) (m_pos + m_len));
+               if (unlikely(v == 0)) {
+                       do {
+                               m_len += 4;
+                               v = get_unaligned((const u32 *) (ip + m_len)) ^
+                                   get_unaligned((const u32 *) (m_pos + m_len));
+                               if (v != 0)
+                                       break;
+                               m_len += 4;
+                               v = get_unaligned((const u32 *) (ip + m_len)) ^
+                                   get_unaligned((const u32 *) (m_pos + m_len));
+                               if (unlikely(ip + m_len >= ip_end))
+                                       goto m_len_done;
+                       } while (v == 0);
+               }
+#  if defined(__LITTLE_ENDIAN)
+               m_len += (unsigned) __builtin_ctz(v) / 8;
+#  elif defined(__BIG_ENDIAN)
+               m_len += (unsigned) __builtin_clz(v) / 8;
+#  else
+#    error "missing endian definition"
+#  endif
+#else
+               if (unlikely(ip[m_len] == m_pos[m_len])) {
+                       do {
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (ip[m_len] != m_pos[m_len])
+                                       break;
+                               m_len += 1;
+                               if (unlikely(ip + m_len >= ip_end))
+                                       goto m_len_done;
+                       } while (ip[m_len] == m_pos[m_len]);
+               }
+#endif
+               }
+m_len_done:
 
-                       if (m_off <= M2_MAX_OFFSET) {
-                               m_off -= 1;
-                               *op++ = (((m_len - 1) << 5)
-                                               | ((m_off & 7) << 2));
-                               *op++ = (m_off >> 3);
-                       } else if (m_off <= M3_MAX_OFFSET) {
-                               m_off -= 1;
+               m_off = ip - m_pos;
+               ip += m_len;
+               ii = ip;
+               if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
+                       m_off -= 1;
+                       *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
+                       *op++ = (m_off >> 3);
+               } else if (m_off <= M3_MAX_OFFSET) {
+                       m_off -= 1;
+                       if (m_len <= M3_MAX_LEN)
                                *op++ = (M3_MARKER | (m_len - 2));
-                               goto m3_m4_offset;
-                       } else {
-                               m_off -= 0x4000;
-
-                               *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11)
-                                               | (m_len - 2));
-                               goto m3_m4_offset;
+                       else {
+                               m_len -= M3_MAX_LEN;
+                               *op++ = M3_MARKER | 0;
+                               while (unlikely(m_len > 255)) {
+                                       m_len -= 255;
+                                       *op++ = 0;
+                               }
+                               *op++ = (m_len);
                        }
+                       *op++ = (m_off << 2);
+                       *op++ = (m_off >> 6);
                } else {
-                       end = in_end;
-                       m = m_pos + M2_MAX_LEN + 1;
-
-                       while (ip < end && *m == *ip) {
-                               m++;
-                               ip++;
-                       }
-                       m_len = ip - ii;
-
-                       if (m_off <= M3_MAX_OFFSET) {
-                               m_off -= 1;
-                               if (m_len <= 33) {
-                                       *op++ = (M3_MARKER | (m_len - 2));
-                               } else {
-                                       m_len -= 33;
-                                       *op++ = M3_MARKER | 0;
-                                       goto m3_m4_len;
-                               }
-                       } else {
-                               m_off -= 0x4000;
-                               if (m_len <= M4_MAX_LEN) {
-                                       *op++ = (M4_MARKER
-                                               | ((m_off & 0x4000) >> 11)
+                       m_off -= 0x4000;
+                       if (m_len <= M4_MAX_LEN)
+                               *op++ = (M4_MARKER | ((m_off >> 11) & 8)
                                                | (m_len - 2));
-                               } else {
-                                       m_len -= M4_MAX_LEN;
-                                       *op++ = (M4_MARKER
-                                               | ((m_off & 0x4000) >> 11));
-m3_m4_len:
-                                       while (m_len > 255) {
-                                               m_len -= 255;
-                                               *op++ = 0;
-                                       }
-
-                                       *op++ = (m_len);
+                       else {
+                               m_len -= M4_MAX_LEN;
+                               *op++ = (M4_MARKER | ((m_off >> 11) & 8));
+                               while (unlikely(m_len > 255)) {
+                                       m_len -= 255;
+                                       *op++ = 0;
                                }
+                               *op++ = (m_len);
                        }
-m3_m4_offset:
-                       *op++ = ((m_off & 63) << 2);
+                       *op++ = (m_off << 2);
                        *op++ = (m_off >> 6);
                }
-
-               ii = ip;
-               if (unlikely(ip >= ip_end))
-                       break;
+               goto next;
        }
-
        *out_len = op - out;
-       return in_end - ii;
+       return in_end - (ii - ti);
 }
 
-int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out,
-                       size_t *out_len, void *wrkmem)
+int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+                    unsigned char *out, size_t *out_len,
+                    void *wrkmem)
 {
-       const unsigned char *ii;
+       const unsigned char *ip = in;
        unsigned char *op = out;
-       size_t t;
+       size_t l = in_len;
+       size_t t = 0;
 
-       if (unlikely(in_len <= M2_MAX_LEN + 5)) {
-               t = in_len;
-       } else {
-               t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem);
+       while (l > 20) {
+               size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
+               uintptr_t ll_end = (uintptr_t) ip + ll;
+               if ((ll_end + ((t + ll) >> 5)) <= ll_end)
+                       break;
+               BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
+               memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
+               t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
+               ip += ll;
                op += *out_len;
+               l  -= ll;
        }
+       t += l;
 
        if (t > 0) {
-               ii = in + in_len - t;
+               const unsigned char *ii = in + in_len - t;
 
                if (op == out && t <= 238) {
                        *op++ = (17 + t);
@@ -198,16 +247,21 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out,
                        *op++ = (t - 3);
                } else {
                        size_t tt = t - 18;
-
                        *op++ = 0;
                        while (tt > 255) {
                                tt -= 255;
                                *op++ = 0;
                        }
-
                        *op++ = tt;
                }
-               do {
+               if (t >= 16) do {
+                       COPY8(op, ii);
+                       COPY8(op + 8, ii + 8);
+                       op += 16;
+                       ii += 16;
+                       t -= 16;
+               } while (t >= 16);
+               if (t > 0) do {
                        *op++ = *ii++;
                } while (--t > 0);
        }
@@ -223,4 +277,3 @@ EXPORT_SYMBOL_GPL(lzo1x_1_compress);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LZO1X-1 Compressor");
-
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
deleted file mode 100644 (file)
index f2fd098..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- *  LZO1X Decompressor from MiniLZO
- *
- *  Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
- *
- *  The full LZO package can be found at:
- *  http://www.oberhumer.com/opensource/lzo/
- *
- *  Changed for kernel use by:
- *  Nitin Gupta <nitingupta910@gmail.com>
- *  Richard Purdie <rpurdie@openedhand.com>
- */
-
-#ifndef STATIC
-#include <linux/module.h>
-#include <linux/kernel.h>
-#endif
-
-#include <asm/unaligned.h>
-#include <linux/lzo.h>
-#include "lzodefs.h"
-
-#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
-#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x))
-#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op)
-
-#define COPY4(dst, src)        \
-               put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
-
-int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
-                       unsigned char *out, size_t *out_len)
-{
-       const unsigned char * const ip_end = in + in_len;
-       unsigned char * const op_end = out + *out_len;
-       const unsigned char *ip = in, *m_pos;
-       unsigned char *op = out;
-       size_t t;
-
-       *out_len = 0;
-
-       if (*ip > 17) {
-               t = *ip++ - 17;
-               if (t < 4)
-                       goto match_next;
-               if (HAVE_OP(t, op_end, op))
-                       goto output_overrun;
-               if (HAVE_IP(t + 1, ip_end, ip))
-                       goto input_overrun;
-               do {
-                       *op++ = *ip++;
-               } while (--t > 0);
-               goto first_literal_run;
-       }
-
-       while ((ip < ip_end)) {
-               t = *ip++;
-               if (t >= 16)
-                       goto match;
-               if (t == 0) {
-                       if (HAVE_IP(1, ip_end, ip))
-                               goto input_overrun;
-                       while (*ip == 0) {
-                               t += 255;
-                               ip++;
-                               if (HAVE_IP(1, ip_end, ip))
-                                       goto input_overrun;
-                       }
-                       t += 15 + *ip++;
-               }
-               if (HAVE_OP(t + 3, op_end, op))
-                       goto output_overrun;
-               if (HAVE_IP(t + 4, ip_end, ip))
-                       goto input_overrun;
-
-               COPY4(op, ip);
-               op += 4;
-               ip += 4;
-               if (--t > 0) {
-                       if (t >= 4) {
-                               do {
-                                       COPY4(op, ip);
-                                       op += 4;
-                                       ip += 4;
-                                       t -= 4;
-                               } while (t >= 4);
-                               if (t > 0) {
-                                       do {
-                                               *op++ = *ip++;
-                                       } while (--t > 0);
-                               }
-                       } else {
-                               do {
-                                       *op++ = *ip++;
-                               } while (--t > 0);
-                       }
-               }
-
-first_literal_run:
-               t = *ip++;
-               if (t >= 16)
-                       goto match;
-               m_pos = op - (1 + M2_MAX_OFFSET);
-               m_pos -= t >> 2;
-               m_pos -= *ip++ << 2;
-
-               if (HAVE_LB(m_pos, out, op))
-                       goto lookbehind_overrun;
-
-               if (HAVE_OP(3, op_end, op))
-                       goto output_overrun;
-               *op++ = *m_pos++;
-               *op++ = *m_pos++;
-               *op++ = *m_pos;
-
-               goto match_done;
-
-               do {
-match:
-                       if (t >= 64) {
-                               m_pos = op - 1;
-                               m_pos -= (t >> 2) & 7;
-                               m_pos -= *ip++ << 3;
-                               t = (t >> 5) - 1;
-                               if (HAVE_LB(m_pos, out, op))
-                                       goto lookbehind_overrun;
-                               if (HAVE_OP(t + 3 - 1, op_end, op))
-                                       goto output_overrun;
-                               goto copy_match;
-                       } else if (t >= 32) {
-                               t &= 31;
-                               if (t == 0) {
-                                       if (HAVE_IP(1, ip_end, ip))
-                                               goto input_overrun;
-                                       while (*ip == 0) {
-                                               t += 255;
-                                               ip++;
-                                               if (HAVE_IP(1, ip_end, ip))
-                                                       goto input_overrun;
-                                       }
-                                       t += 31 + *ip++;
-                               }
-                               m_pos = op - 1;
-                               m_pos -= get_unaligned_le16(ip) >> 2;
-                               ip += 2;
-                       } else if (t >= 16) {
-                               m_pos = op;
-                               m_pos -= (t & 8) << 11;
-
-                               t &= 7;
-                               if (t == 0) {
-                                       if (HAVE_IP(1, ip_end, ip))
-                                               goto input_overrun;
-                                       while (*ip == 0) {
-                                               t += 255;
-                                               ip++;
-                                               if (HAVE_IP(1, ip_end, ip))
-                                                       goto input_overrun;
-                                       }
-                                       t += 7 + *ip++;
-                               }
-                               m_pos -= get_unaligned_le16(ip) >> 2;
-                               ip += 2;
-                               if (m_pos == op)
-                                       goto eof_found;
-                               m_pos -= 0x4000;
-                       } else {
-                               m_pos = op - 1;
-                               m_pos -= t >> 2;
-                               m_pos -= *ip++ << 2;
-
-                               if (HAVE_LB(m_pos, out, op))
-                                       goto lookbehind_overrun;
-                               if (HAVE_OP(2, op_end, op))
-                                       goto output_overrun;
-
-                               *op++ = *m_pos++;
-                               *op++ = *m_pos;
-                               goto match_done;
-                       }
-
-                       if (HAVE_LB(m_pos, out, op))
-                               goto lookbehind_overrun;
-                       if (HAVE_OP(t + 3 - 1, op_end, op))
-                               goto output_overrun;
-
-                       if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) {
-                               COPY4(op, m_pos);
-                               op += 4;
-                               m_pos += 4;
-                               t -= 4 - (3 - 1);
-                               do {
-                                       COPY4(op, m_pos);
-                                       op += 4;
-                                       m_pos += 4;
-                                       t -= 4;
-                               } while (t >= 4);
-                               if (t > 0)
-                                       do {
-                                               *op++ = *m_pos++;
-                                       } while (--t > 0);
-                       } else {
-copy_match:
-                               *op++ = *m_pos++;
-                               *op++ = *m_pos++;
-                               do {
-                                       *op++ = *m_pos++;
-                               } while (--t > 0);
-                       }
-match_done:
-                       t = ip[-2] & 3;
-                       if (t == 0)
-                               break;
-match_next:
-                       if (HAVE_OP(t, op_end, op))
-                               goto output_overrun;
-                       if (HAVE_IP(t + 1, ip_end, ip))
-                               goto input_overrun;
-
-                       *op++ = *ip++;
-                       if (t > 1) {
-                               *op++ = *ip++;
-                               if (t > 2)
-                                       *op++ = *ip++;
-                       }
-
-                       t = *ip++;
-               } while (ip < ip_end);
-       }
-
-       *out_len = op - out;
-       return LZO_E_EOF_NOT_FOUND;
-
-eof_found:
-       *out_len = op - out;
-       return (ip == ip_end ? LZO_E_OK :
-               (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
-input_overrun:
-       *out_len = op - out;
-       return LZO_E_INPUT_OVERRUN;
-
-output_overrun:
-       *out_len = op - out;
-       return LZO_E_OUTPUT_OVERRUN;
-
-lookbehind_overrun:
-       *out_len = op - out;
-       return LZO_E_LOOKBEHIND_OVERRUN;
-}
-#ifndef STATIC
-EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LZO1X Decompressor");
-
-#endif
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
new file mode 100644 (file)
index 0000000..569985d
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ *  LZO1X Decompressor from LZO
+ *
+ *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ *  The full LZO package can be found at:
+ *  http://www.oberhumer.com/opensource/lzo/
+ *
+ *  Changed for Linux kernel use by:
+ *  Nitin Gupta <nitingupta910@gmail.com>
+ *  Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#ifndef STATIC
+#include <linux/module.h>
+#include <linux/kernel.h>
+#endif
+#include <asm/unaligned.h>
+#include <linux/lzo.h>
+#include "lzodefs.h"
+
+#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+
+int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+                         unsigned char *out, size_t *out_len)
+{
+       unsigned char *op;
+       const unsigned char *ip;
+       size_t t, next;
+       size_t state = 0;
+       const unsigned char *m_pos;
+       const unsigned char * const ip_end = in + in_len;
+       unsigned char * const op_end = out + *out_len;
+
+       op = out;
+       ip = in;
+
+       if (unlikely(in_len < 3))
+               goto input_overrun;
+       if (*ip > 17) {
+               t = *ip++ - 17;
+               if (t < 4) {
+                       next = t;
+                       goto match_next;
+               }
+               goto copy_literal_run;
+       }
+
+       for (;;) {
+               t = *ip++;
+               if (t < 16) {
+                       if (likely(state == 0)) {
+                               if (unlikely(t == 0)) {
+                                       while (unlikely(*ip == 0)) {
+                                               t += 255;
+                                               ip++;
+                                               NEED_IP(1);
+                                       }
+                                       t += 15 + *ip++;
+                               }
+                               t += 3;
+copy_literal_run:
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                                       const unsigned char *ie = ip + t;
+                                       unsigned char *oe = op + t;
+                                       do {
+                                               COPY8(op, ip);
+                                               op += 8;
+                                               ip += 8;
+                                               COPY8(op, ip);
+                                               op += 8;
+                                               ip += 8;
+                                       } while (ip < ie);
+                                       ip = ie;
+                                       op = oe;
+                               } else
+#endif
+                               {
+                                       NEED_OP(t);
+                                       NEED_IP(t + 3);
+                                       do {
+                                               *op++ = *ip++;
+                                       } while (--t > 0);
+                               }
+                               state = 4;
+                               continue;
+                       } else if (state != 4) {
+                               next = t & 3;
+                               m_pos = op - 1;
+                               m_pos -= t >> 2;
+                               m_pos -= *ip++ << 2;
+                               TEST_LB(m_pos);
+                               NEED_OP(2);
+                               op[0] = m_pos[0];
+                               op[1] = m_pos[1];
+                               op += 2;
+                               goto match_next;
+                       } else {
+                               next = t & 3;
+                               m_pos = op - (1 + M2_MAX_OFFSET);
+                               m_pos -= t >> 2;
+                               m_pos -= *ip++ << 2;
+                               t = 3;
+                       }
+               } else if (t >= 64) {
+                       next = t & 3;
+                       m_pos = op - 1;
+                       m_pos -= (t >> 2) & 7;
+                       m_pos -= *ip++ << 3;
+                       t = (t >> 5) - 1 + (3 - 1);
+               } else if (t >= 32) {
+                       t = (t & 31) + (3 - 1);
+                       if (unlikely(t == 2)) {
+                               while (unlikely(*ip == 0)) {
+                                       t += 255;
+                                       ip++;
+                                       NEED_IP(1);
+                               }
+                               t += 31 + *ip++;
+                               NEED_IP(2);
+                       }
+                       m_pos = op - 1;
+                       next = get_unaligned_le16(ip);
+                       ip += 2;
+                       m_pos -= next >> 2;
+                       next &= 3;
+               } else {
+                       m_pos = op;
+                       m_pos -= (t & 8) << 11;
+                       t = (t & 7) + (3 - 1);
+                       if (unlikely(t == 2)) {
+                               while (unlikely(*ip == 0)) {
+                                       t += 255;
+                                       ip++;
+                                       NEED_IP(1);
+                               }
+                               t += 7 + *ip++;
+                               NEED_IP(2);
+                       }
+                       next = get_unaligned_le16(ip);
+                       ip += 2;
+                       m_pos -= next >> 2;
+                       next &= 3;
+                       if (m_pos == op)
+                               goto eof_found;
+                       m_pos -= 0x4000;
+               }
+               TEST_LB(m_pos);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+               if (op - m_pos >= 8) {
+                       unsigned char *oe = op + t;
+                       if (likely(HAVE_OP(t + 15))) {
+                               do {
+                                       COPY8(op, m_pos);
+                                       op += 8;
+                                       m_pos += 8;
+                                       COPY8(op, m_pos);
+                                       op += 8;
+                                       m_pos += 8;
+                               } while (op < oe);
+                               op = oe;
+                               if (HAVE_IP(6)) {
+                                       state = next;
+                                       COPY4(op, ip);
+                                       op += next;
+                                       ip += next;
+                                       continue;
+                               }
+                       } else {
+                               NEED_OP(t);
+                               do {
+                                       *op++ = *m_pos++;
+                               } while (op < oe);
+                       }
+               } else
+#endif
+               {
+                       unsigned char *oe = op + t;
+                       NEED_OP(t);
+                       op[0] = m_pos[0];
+                       op[1] = m_pos[1];
+                       op += 2;
+                       m_pos += 2;
+                       do {
+                               *op++ = *m_pos++;
+                       } while (op < oe);
+               }
+match_next:
+               state = next;
+               t = next;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+                       COPY4(op, ip);
+                       op += t;
+                       ip += t;
+               } else
+#endif
+               {
+                       NEED_IP(t + 3);
+                       NEED_OP(t);
+                       while (t > 0) {
+                               *op++ = *ip++;
+                               t--;
+                       }
+               }
+       }
+
+eof_found:
+       *out_len = op - out;
+       return (t != 3       ? LZO_E_ERROR :
+               ip == ip_end ? LZO_E_OK :
+               ip <  ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
+
+input_overrun:
+       *out_len = op - out;
+       return LZO_E_INPUT_OVERRUN;
+
+output_overrun:
+       *out_len = op - out;
+       return LZO_E_OUTPUT_OVERRUN;
+
+lookbehind_overrun:
+       *out_len = op - out;
+       return LZO_E_LOOKBEHIND_OVERRUN;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZO1X Decompressor");
+
+#endif
index b6d482c..6710b83 100644 (file)
@@ -1,19 +1,37 @@
 /*
  *  lzodefs.h -- architecture, OS and compiler specific defines
  *
- *  Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
  *
  *  The full LZO package can be found at:
  *  http://www.oberhumer.com/opensource/lzo/
  *
- *  Changed for kernel use by:
+ *  Changed for Linux kernel use by:
  *  Nitin Gupta <nitingupta910@gmail.com>
  *  Richard Purdie <rpurdie@openedhand.com>
  */
 
-#define LZO_VERSION            0x2020
-#define LZO_VERSION_STRING     "2.02"
-#define LZO_VERSION_DATE       "Oct 17 2005"
+
+#define COPY4(dst, src)        \
+               put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
+#if defined(__x86_64__)
+#define COPY8(dst, src)        \
+               put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
+#else
+#define COPY8(dst, src)        \
+               COPY4(dst, src); COPY4((dst) + 4, (src) + 4)
+#endif
+
+#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+#error "conflicting endian definitions"
+#elif defined(__x86_64__)
+#define LZO_USE_CTZ64  1
+#define LZO_USE_CTZ32  1
+#elif defined(__i386__) || defined(__powerpc__)
+#define LZO_USE_CTZ32  1
+#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5)
+#define LZO_USE_CTZ32  1
+#endif
 
 #define M1_MAX_OFFSET  0x0400
 #define M2_MAX_OFFSET  0x0800
 #define M3_MARKER      32
 #define M4_MARKER      16
 
-#define D_BITS         14
-#define D_MASK         ((1u << D_BITS) - 1)
+#define lzo_dict_t      unsigned short
+#define D_BITS         13
+#define D_SIZE         (1u << D_BITS)
+#define D_MASK         (D_SIZE - 1)
 #define D_HIGH         ((D_MASK >> 1) + 1)
-
-#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \
-                                                       << (s1)) ^ (p)[0])
-#define DX3(p, s1, s2, s3)     ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0])
index 7874b01..b83c144 100644 (file)
@@ -394,6 +394,44 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
 }
 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 
+void __sg_page_iter_start(struct sg_page_iter *piter,
+                         struct scatterlist *sglist, unsigned int nents,
+                         unsigned long pgoffset)
+{
+       piter->__pg_advance = 0;
+       piter->__nents = nents;
+
+       piter->page = NULL;
+       piter->sg = sglist;
+       piter->sg_pgoffset = pgoffset;
+}
+EXPORT_SYMBOL(__sg_page_iter_start);
+
+static int sg_page_count(struct scatterlist *sg)
+{
+       return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_next(struct sg_page_iter *piter)
+{
+       if (!piter->__nents || !piter->sg)
+               return false;
+
+       piter->sg_pgoffset += piter->__pg_advance;
+       piter->__pg_advance = 1;
+
+       while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
+               piter->sg_pgoffset -= sg_page_count(piter->sg);
+               piter->sg = sg_next(piter->sg);
+               if (!--piter->__nents || !piter->sg)
+                       return false;
+       }
+       piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+
+       return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_next);
+
 /**
  * sg_miter_start - start mapping iteration over a sg list
  * @miter: sg mapping iter to be started
@@ -411,9 +449,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 {
        memset(miter, 0, sizeof(struct sg_mapping_iter));
 
-       miter->__sg = sgl;
-       miter->__nents = nents;
-       miter->__offset = 0;
+       __sg_page_iter_start(&miter->piter, sgl, nents, 0);
        WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
        miter->__flags = flags;
 }
@@ -438,36 +474,35 @@ EXPORT_SYMBOL(sg_miter_start);
  */
 bool sg_miter_next(struct sg_mapping_iter *miter)
 {
-       unsigned int off, len;
-
-       /* check for end and drop resources from the last iteration */
-       if (!miter->__nents)
-               return false;
-
        sg_miter_stop(miter);
 
-       /* get to the next sg if necessary.  __offset is adjusted by stop */
-       while (miter->__offset == miter->__sg->length) {
-               if (--miter->__nents) {
-                       miter->__sg = sg_next(miter->__sg);
-                       miter->__offset = 0;
-               } else
+       /*
+        * Get to the next page if necessary.
+        * __remaining, __offset is adjusted by sg_miter_stop
+        */
+       if (!miter->__remaining) {
+               struct scatterlist *sg;
+               unsigned long pgoffset;
+
+               if (!__sg_page_iter_next(&miter->piter))
                        return false;
-       }
 
-       /* map the next page */
-       off = miter->__sg->offset + miter->__offset;
-       len = miter->__sg->length - miter->__offset;
+               sg = miter->piter.sg;
+               pgoffset = miter->piter.sg_pgoffset;
 
-       miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
-       off &= ~PAGE_MASK;
-       miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
-       miter->consumed = miter->length;
+               miter->__offset = pgoffset ? 0 : sg->offset;
+               miter->__remaining = sg->offset + sg->length -
+                               (pgoffset << PAGE_SHIFT) - miter->__offset;
+               miter->__remaining = min_t(unsigned long, miter->__remaining,
+                                          PAGE_SIZE - miter->__offset);
+       }
+       miter->page = miter->piter.page;
+       miter->consumed = miter->length = miter->__remaining;
 
        if (miter->__flags & SG_MITER_ATOMIC)
-               miter->addr = kmap_atomic(miter->page) + off;
+               miter->addr = kmap_atomic(miter->page) + miter->__offset;
        else
-               miter->addr = kmap(miter->page) + off;
+               miter->addr = kmap(miter->page) + miter->__offset;
 
        return true;
 }
@@ -494,6 +529,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
        /* drop resources from the last iteration */
        if (miter->addr) {
                miter->__offset += miter->consumed;
+               miter->__remaining -= miter->consumed;
 
                if (miter->__flags & SG_MITER_TO_SG)
                        flush_kernel_dcache_page(miter->page);
index 2c7aea7..ae55c1e 100644 (file)
@@ -287,7 +287,7 @@ config NR_QUICK
 
 config VIRT_TO_BUS
        def_bool y
-       depends on !ARCH_NO_VIRT_TO_BUS
+       depends on HAVE_VIRT_TO_BUS
 
 config MMU_NOTIFIER
        bool
index 32e6f41..d76ba74 100644 (file)
@@ -89,7 +89,7 @@ static int cleancache_get_key(struct inode *inode,
                fhfn = sb->s_export_op->encode_fh;
                if  (fhfn) {
                        len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
-                       if (len <= 0 || len == 255)
+                       if (len <= FILEID_ROOT || len == FILEID_INVALID)
                                return -1;
                        if (maxlen > CLEANCACHE_KEY_MAX)
                                return -1;
index 909ec55..7e09268 100644 (file)
@@ -39,7 +39,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
        if (!f.file)
                return -EBADF;
 
-       if (S_ISFIFO(f.file->f_path.dentry->d_inode->i_mode)) {
+       if (S_ISFIFO(file_inode(f.file)->i_mode)) {
                ret = -ESPIPE;
                goto out;
        }
index c610076..e1979fd 100644 (file)
@@ -1711,7 +1711,7 @@ EXPORT_SYMBOL(filemap_fault);
 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        int ret = VM_FAULT_LOCKED;
 
        sb_start_pagefault(inode->i_sb);
index bfa142e..e2f7f5a 100644 (file)
@@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
-       struct hlist_node *node;
 
-       hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
+       hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
                if (mm == mm_slot->mm)
                        return mm_slot;
 
index cdb64e4..0a0be33 100644 (file)
@@ -127,7 +127,7 @@ static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
 
 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 {
-       return subpool_inode(vma->vm_file->f_dentry->d_inode);
+       return subpool_inode(file_inode(vma->vm_file));
 }
 
 /*
@@ -2479,7 +2479,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
        address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-       mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+       mapping = file_inode(vma->vm_file)->i_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As
index 1c0c4cc..8562de0 100644 (file)
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
  * must be called with vma's mmap_sem held for read or write, and page locked.
  */
 extern void mlock_vma_page(struct page *page);
-extern void munlock_vma_page(struct page *page);
+extern unsigned int munlock_vma_page(struct page *page);
 
 /*
  * Clear the page's PageMlocked().  This can be useful in a situation where
index 83dd5fb..c8d7f31 100644 (file)
@@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object)
  */
 static void free_object_rcu(struct rcu_head *rcu)
 {
-       struct hlist_node *elem, *tmp;
+       struct hlist_node *tmp;
        struct kmemleak_scan_area *area;
        struct kmemleak_object *object =
                container_of(rcu, struct kmemleak_object, rcu);
@@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu)
         * Once use_count is 0 (guaranteed by put_object), there is no other
         * code accessing this object, hence no need for locking.
         */
-       hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
-               hlist_del(elem);
+       hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
+               hlist_del(&area->node);
                kmem_cache_free(scan_area_cache, area);
        }
        kmem_cache_free(object_cache, object);
@@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end,
 static void scan_object(struct kmemleak_object *object)
 {
        struct kmemleak_scan_area *area;
-       struct hlist_node *elem;
        unsigned long flags;
 
        /*
@@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object)
                        spin_lock_irqsave(&object->lock, flags);
                }
        } else
-               hlist_for_each_entry(area, elem, &object->area_list, node)
+               hlist_for_each_entry(area, &object->area_list, node)
                        scan_block((void *)area->start,
                                   (void *)(area->start + area->size),
                                   object, 0);
index ab2ba9a..85bfd4c 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
 
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
-       struct hlist_node *node;
        struct mm_slot *slot;
 
-       hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm)
+       hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
                if (slot->mm == mm)
                        return slot;
 
@@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn)
 static void remove_node_from_stable_tree(struct stable_node *stable_node)
 {
        struct rmap_item *rmap_item;
-       struct hlist_node *hlist;
 
-       hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+       hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
                if (rmap_item->hlist.next)
                        ksm_pages_sharing--;
                else
@@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
 {
        struct stable_node *stable_node;
        struct rmap_item *rmap_item;
-       struct hlist_node *hlist;
        unsigned int mapcount = page_mapcount(page);
        int referenced = 0;
        int search_new_forks = 0;
@@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
        if (!stable_node)
                return 0;
 again:
-       hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+       hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
@@ -1952,7 +1949,6 @@ out:
 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
 {
        struct stable_node *stable_node;
-       struct hlist_node *hlist;
        struct rmap_item *rmap_item;
        int ret = SWAP_AGAIN;
        int search_new_forks = 0;
@@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
        if (!stable_node)
                return SWAP_FAIL;
 again:
-       hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+       hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
@@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
                  struct vm_area_struct *, unsigned long, void *), void *arg)
 {
        struct stable_node *stable_node;
-       struct hlist_node *hlist;
        struct rmap_item *rmap_item;
        int ret = SWAP_AGAIN;
        int search_new_forks = 0;
@@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
        if (!stable_node)
                return ret;
 again:
-       hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+       hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
index 1bcd9b9..b8d9147 100644 (file)
@@ -92,58 +92,9 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
  *
  * Find @size free area aligned to @align in the specified range and node.
  *
- * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
- * memory we found if not in hotpluggable ranges.
- *
  * RETURNS:
  * Found address on success, %0 on failure.
  */
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
-                                       phys_addr_t end, phys_addr_t size,
-                                       phys_addr_t align, int nid)
-{
-       phys_addr_t this_start, this_end, cand;
-       u64 i;
-       int curr = movablemem_map.nr_map - 1;
-
-       /* pump up @end */
-       if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
-               end = memblock.current_limit;
-
-       /* avoid allocating the first page */
-       start = max_t(phys_addr_t, start, PAGE_SIZE);
-       end = max(start, end);
-
-       for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
-               this_start = clamp(this_start, start, end);
-               this_end = clamp(this_end, start, end);
-
-restart:
-               if (this_end <= this_start || this_end < size)
-                       continue;
-
-               for (; curr >= 0; curr--) {
-                       if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT)
-                           < this_end)
-                               break;
-               }
-
-               cand = round_down(this_end - size, align);
-               if (curr >= 0 &&
-                   cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) {
-                       this_end = movablemem_map.map[curr].start_pfn
-                                  << PAGE_SHIFT;
-                       goto restart;
-               }
-
-               if (cand >= this_start)
-                       return cand;
-       }
-
-       return 0;
-}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
                                        phys_addr_t end, phys_addr_t size,
                                        phys_addr_t align, int nid)
@@ -172,7 +123,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
        }
        return 0;
 }
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 /**
  * memblock_find_in_range - find free area in given range
index e6638f5..1c5e33f 100644 (file)
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
  * can't isolate the page, we leave it for putback_lru_page() and vmscan
  * [page_referenced()/try_to_unmap()] to deal with.
  */
-void munlock_vma_page(struct page *page)
+unsigned int munlock_vma_page(struct page *page)
 {
+       unsigned int page_mask = 0;
+
        BUG_ON(!PageLocked(page));
 
        if (TestClearPageMlocked(page)) {
-               mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   -hpage_nr_pages(page));
+               unsigned int nr_pages = hpage_nr_pages(page);
+               mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+               page_mask = nr_pages - 1;
                if (!isolate_lru_page(page)) {
                        int ret = SWAP_AGAIN;
 
@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
                                count_vm_event(UNEVICTABLE_PGMUNLOCKED);
                }
        }
+
+       return page_mask;
 }
 
 /**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end, int *nonblocking)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long addr = start;
        unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
 
@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
         * We made sure addr is within a VMA, so the following will
         * not result in a stack expansion that recurses back here.
         */
-       return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+       return __get_user_pages(current, mm, start, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
 
@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
 void munlock_vma_pages_range(struct vm_area_struct *vma,
                             unsigned long start, unsigned long end)
 {
-       unsigned long addr;
-
-       lru_add_drain();
        vma->vm_flags &= ~VM_LOCKED;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE) {
+       while (start < end) {
                struct page *page;
+               unsigned int page_mask, page_increm;
+
                /*
                 * Although FOLL_DUMP is intended for get_dump_page(),
                 * it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                 * suits munlock very well (and if somehow an abnormal page
                 * has sneaked into the range, we won't oops here: great).
                 */
-               page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+               page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
+                                       &page_mask);
                if (page && !IS_ERR(page)) {
                        lock_page(page);
-                       munlock_vma_page(page);
+                       lru_add_drain();
+                       /*
+                        * Any THP page found by follow_page_mask() may have
+                        * gotten split before reaching munlock_vma_page(),
+                        * so we need to recompute the page_mask here.
+                        */
+                       page_mask = munlock_vma_page(page);
                        unlock_page(page);
                        put_page(page);
                }
+               page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+               start += page_increm * PAGE_SIZE;
                cond_resched();
        }
 }
index 318e121..2664a47 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -203,7 +203,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+               atomic_inc(&file_inode(file)->i_writecount);
        if (vma->vm_flags & VM_SHARED)
                mapping->i_mmap_writable--;
 
@@ -576,7 +576,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
+                       atomic_dec(&file_inode(file)->i_writecount);
                if (vma->vm_flags & VM_SHARED)
                        mapping->i_mmap_writable++;
 
@@ -1229,7 +1229,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        return -EAGAIN;
        }
 
-       inode = file ? file->f_path.dentry->d_inode : NULL;
+       inode = file ? file_inode(file) : NULL;
 
        if (file) {
                switch (flags & MAP_TYPE) {
@@ -1431,7 +1431,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        int error;
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
-       struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
+       struct inode *inode =  file ? file_inode(file) : NULL;
 
        /* Clear old maps */
        error = -ENOMEM;
@@ -2185,9 +2185,28 @@ int expand_downwards(struct vm_area_struct *vma,
        return error;
 }
 
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *next;
+
+       address &= PAGE_MASK;
+       next = vma->vm_next;
+       if (next && next->vm_start == address + PAGE_SIZE) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+       }
        return expand_upwards(vma, address);
 }
 
@@ -2209,6 +2228,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *prev;
+
+       address &= PAGE_MASK;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end == address) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+       }
        return expand_downwards(vma, address);
 }
 
index 2175fb0..be04122 100644 (file)
@@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
                                        unsigned long address)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int young = 0, id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->clear_flush_young)
                        young |= mn->ops->clear_flush_young(mn, mm, address);
        }
@@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
                              unsigned long address)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int young = 0, id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->test_young) {
                        young = mn->ops->test_young(mn, mm, address);
                        if (young)
@@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
                               pte_t pte)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->change_pte)
                        mn->ops->change_pte(mn, mm, address, pte);
        }
@@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
                                          unsigned long address)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_page)
                        mn->ops->invalidate_page(mn, mm, address);
        }
@@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_range_start)
                        mn->ops->invalidate_range_start(mn, mm, start, end);
        }
@@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
                                  unsigned long start, unsigned long end)
 {
        struct mmu_notifier *mn;
-       struct hlist_node *n;
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->invalidate_range_end)
                        mn->ops->invalidate_range_end(mn, mm, start, end);
        }
index da0d210..e193280 100644 (file)
@@ -943,7 +943,7 @@ static int validate_mmap_request(struct file *file,
                 */
                mapping = file->f_mapping;
                if (!mapping)
-                       mapping = file->f_path.dentry->d_inode->i_mapping;
+                       mapping = file_inode(file)->i_mapping;
 
                capabilities = 0;
                if (mapping && mapping->backing_dev_info)
@@ -952,7 +952,7 @@ static int validate_mmap_request(struct file *file,
                if (!capabilities) {
                        /* no explicit capabilities set, so assume some
                         * defaults */
-                       switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
+                       switch (file_inode(file)->i_mode & S_IFMT) {
                        case S_IFREG:
                        case S_IFBLK:
                                capabilities = BDI_CAP_MAP_COPY;
@@ -987,11 +987,11 @@ static int validate_mmap_request(struct file *file,
                            !(file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (IS_APPEND(file->f_path.dentry->d_inode) &&
+                       if (IS_APPEND(file_inode(file)) &&
                            (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       if (locks_verify_locked(file->f_path.dentry->d_inode))
+                       if (locks_verify_locked(file_inode(file)))
                                return -EAGAIN;
 
                        if (!(capabilities & BDI_CAP_MAP_DIRECT))
@@ -1327,8 +1327,8 @@ unsigned long do_mmap_pgoff(struct file *file,
                                continue;
 
                        /* search for overlapping mappings on the same file */
-                       if (pregion->vm_file->f_path.dentry->d_inode !=
-                           file->f_path.dentry->d_inode)
+                       if (file_inode(pregion->vm_file) !=
+                           file_inode(file))
                                continue;
 
                        if (pregion->vm_pgoff >= pgend)
index cdc377c..efe6814 100644 (file)
@@ -696,7 +696,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         *     => fast response on large errors; small oscillation near setpoint
         */
        setpoint = (freerun + limit) / 2;
-       x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
+       x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
                    limit - setpoint + 1);
        pos_ratio = x;
        pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -1986,6 +1986,8 @@ int __set_page_dirty_no_writeback(struct page *page)
  */
 void account_page_dirtied(struct page *page, struct address_space *mapping)
 {
+       trace_writeback_dirty_page(page, mapping);
+
        if (mapping_cap_account_dirty(mapping)) {
                __inc_zone_page_state(page, NR_FILE_DIRTY);
                __inc_zone_page_state(page, NR_DIRTIED);
index 0dade3f..8fcced7 100644 (file)
@@ -202,18 +202,11 @@ static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-/* Movable memory ranges, will also be used by memblock subsystem. */
-struct movablemem_map movablemem_map = {
-       .acpi = false,
-       .nr_map = 0,
-};
-
 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 static unsigned long __initdata required_kernelcore;
 static unsigned long __initdata required_movablecore;
 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
 
 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 int movable_zone;
@@ -4412,77 +4405,6 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 }
 
-/**
- * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
- *
- * zone_movable_limit is initialized as 0. This function will try to get
- * the first ZONE_MOVABLE pfn of each node from movablemem_map, and
- * assigne them to zone_movable_limit.
- * zone_movable_limit[nid] == 0 means no limit for the node.
- *
- * Note: Each range is represented as [start_pfn, end_pfn)
- */
-static void __meminit sanitize_zone_movable_limit(void)
-{
-       int map_pos = 0, i, nid;
-       unsigned long start_pfn, end_pfn;
-
-       if (!movablemem_map.nr_map)
-               return;
-
-       /* Iterate all ranges from minimum to maximum */
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
-               /*
-                * If we have found lowest pfn of ZONE_MOVABLE of the node
-                * specified by user, just go on to check next range.
-                */
-               if (zone_movable_limit[nid])
-                       continue;
-
-#ifdef CONFIG_ZONE_DMA
-               /* Skip DMA memory. */
-               if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
-                       start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
-               /* Skip DMA32 memory. */
-               if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
-                       start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
-#endif
-
-#ifdef CONFIG_HIGHMEM
-               /* Skip lowmem if ZONE_MOVABLE is highmem. */
-               if (zone_movable_is_highmem() &&
-                   start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
-                       start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
-#endif
-
-               if (start_pfn >= end_pfn)
-                       continue;
-
-               while (map_pos < movablemem_map.nr_map) {
-                       if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
-                               break;
-
-                       if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
-                               map_pos++;
-                               continue;
-                       }
-
-                       /*
-                        * The start_pfn of ZONE_MOVABLE is either the minimum
-                        * pfn specified by movablemem_map, or 0, which means
-                        * the node has no ZONE_MOVABLE.
-                        */
-                       zone_movable_limit[nid] = max(start_pfn,
-                                       movablemem_map.map[map_pos].start_pfn);
-
-                       break;
-               }
-       }
-}
-
 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
                                        unsigned long zone_type,
@@ -4500,6 +4422,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
 
        return zholes_size[zone_type];
 }
+
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
@@ -4941,19 +4864,12 @@ static void __init find_zone_movable_pfns_for_nodes(void)
                required_kernelcore = max(required_kernelcore, corepages);
        }
 
-       /*
-        * If neither kernelcore/movablecore nor movablemem_map is specified,
-        * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
-        * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
-        */
-       if (!required_kernelcore) {
-               if (movablemem_map.nr_map)
-                       memcpy(zone_movable_pfn, zone_movable_limit,
-                               sizeof(zone_movable_pfn));
+       /* If kernelcore was not specified, there is no ZONE_MOVABLE */
+       if (!required_kernelcore)
                goto out;
-       }
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+       find_usable_zone_for_movable();
        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
 
 restart:
@@ -4981,24 +4897,10 @@ restart:
                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
                        unsigned long size_pages;
 
-                       /*
-                        * Find more memory for kernelcore in
-                        * [zone_movable_pfn[nid], zone_movable_limit[nid]).
-                        */
                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
                        if (start_pfn >= end_pfn)
                                continue;
 
-                       if (zone_movable_limit[nid]) {
-                               end_pfn = min(end_pfn, zone_movable_limit[nid]);
-                               /* No range left for kernelcore in this node */
-                               if (start_pfn >= end_pfn) {
-                                       zone_movable_pfn[nid] =
-                                                       zone_movable_limit[nid];
-                                       break;
-                               }
-                       }
-
                        /* Account for what is only usable for kernelcore */
                        if (start_pfn < usable_startpfn) {
                                unsigned long kernel_pages;
@@ -5058,12 +4960,12 @@ restart:
        if (usable_nodes && required_kernelcore > usable_nodes)
                goto restart;
 
-out:
        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =
                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 
+out:
        /* restore the node_state */
        node_states[N_MEMORY] = saved_node_state;
 }
@@ -5126,8 +5028,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 
        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
-       find_usable_zone_for_movable();
-       sanitize_zone_movable_limit();
        find_zone_movable_pfns_for_nodes();
 
        /* Print out the zone ranges */
@@ -5211,181 +5111,6 @@ static int __init cmdline_parse_movablecore(char *p)
 early_param("kernelcore", cmdline_parse_kernelcore);
 early_param("movablecore", cmdline_parse_movablecore);
 
-/**
- * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
- * @start_pfn: start pfn of the range to be checked
- * @end_pfn:   end pfn of the range to be checked (exclusive)
- *
- * This function checks if a given memory range [start_pfn, end_pfn) overlaps
- * the movablemem_map.map[] array.
- *
- * Return: index of the first overlapped element in movablemem_map.map[]
- *         or -1 if they don't overlap each other.
- */
-int __init movablemem_map_overlap(unsigned long start_pfn,
-                                  unsigned long end_pfn)
-{
-       int overlap;
-
-       if (!movablemem_map.nr_map)
-               return -1;
-
-       for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
-               if (start_pfn < movablemem_map.map[overlap].end_pfn)
-                       break;
-
-       if (overlap == movablemem_map.nr_map ||
-           end_pfn <= movablemem_map.map[overlap].start_pfn)
-               return -1;
-
-       return overlap;
-}
-
-/**
- * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
- * @start_pfn: start pfn of the range
- * @end_pfn:   end pfn of the range
- *
- * This function will also merge the overlapped ranges, and sort the array
- * by start_pfn in monotonic increasing order.
- */
-void __init insert_movablemem_map(unsigned long start_pfn,
-                                 unsigned long end_pfn)
-{
-       int pos, overlap;
-
-       /*
-        * pos will be at the 1st overlapped range, or the position
-        * where the element should be inserted.
-        */
-       for (pos = 0; pos < movablemem_map.nr_map; pos++)
-               if (start_pfn <= movablemem_map.map[pos].end_pfn)
-                       break;
-
-       /* If there is no overlapped range, just insert the element. */
-       if (pos == movablemem_map.nr_map ||
-           end_pfn < movablemem_map.map[pos].start_pfn) {
-               /*
-                * If pos is not the end of array, we need to move all
-                * the rest elements backward.
-                */
-               if (pos < movablemem_map.nr_map)
-                       memmove(&movablemem_map.map[pos+1],
-                               &movablemem_map.map[pos],
-                               sizeof(struct movablemem_entry) *
-                               (movablemem_map.nr_map - pos));
-               movablemem_map.map[pos].start_pfn = start_pfn;
-               movablemem_map.map[pos].end_pfn = end_pfn;
-               movablemem_map.nr_map++;
-               return;
-       }
-
-       /* overlap will be at the last overlapped range */
-       for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
-               if (end_pfn < movablemem_map.map[overlap].start_pfn)
-                       break;
-
-       /*
-        * If there are more ranges overlapped, we need to merge them,
-        * and move the rest elements forward.
-        */
-       overlap--;
-       movablemem_map.map[pos].start_pfn = min(start_pfn,
-                                       movablemem_map.map[pos].start_pfn);
-       movablemem_map.map[pos].end_pfn = max(end_pfn,
-                                       movablemem_map.map[overlap].end_pfn);
-
-       if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
-               memmove(&movablemem_map.map[pos+1],
-                       &movablemem_map.map[overlap+1],
-                       sizeof(struct movablemem_entry) *
-                       (movablemem_map.nr_map - overlap - 1));
-
-       movablemem_map.nr_map -= overlap - pos;
-}
-
-/**
- * movablemem_map_add_region - Add a memory range into movablemem_map.
- * @start:     physical start address of range
- * @end:       physical end address of range
- *
- * This function transform the physical address into pfn, and then add the
- * range into movablemem_map by calling insert_movablemem_map().
- */
-static void __init movablemem_map_add_region(u64 start, u64 size)
-{
-       unsigned long start_pfn, end_pfn;
-
-       /* In case size == 0 or start + size overflows */
-       if (start + size <= start)
-               return;
-
-       if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
-               pr_err("movablemem_map: too many entries;"
-                       " ignoring [mem %#010llx-%#010llx]\n",
-                       (unsigned long long) start,
-                       (unsigned long long) (start + size - 1));
-               return;
-       }
-
-       start_pfn = PFN_DOWN(start);
-       end_pfn = PFN_UP(start + size);
-       insert_movablemem_map(start_pfn, end_pfn);
-}
-
-/*
- * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
- * @p: The boot option of the following format:
- *     movablemem_map=nn[KMG]@ss[KMG]
- *
- * This option sets the memory range [ss, ss+nn) to be used as movable memory.
- *
- * Return: 0 on success or -EINVAL on failure.
- */
-static int __init cmdline_parse_movablemem_map(char *p)
-{
-       char *oldp;
-       u64 start_at, mem_size;
-
-       if (!p)
-               goto err;
-
-       if (!strcmp(p, "acpi"))
-               movablemem_map.acpi = true;
-
-       /*
-        * If user decide to use info from BIOS, all the other user specified
-        * ranges will be ingored.
-        */
-       if (movablemem_map.acpi) {
-               if (movablemem_map.nr_map) {
-                       memset(movablemem_map.map, 0,
-                               sizeof(struct movablemem_entry)
-                               * movablemem_map.nr_map);
-                       movablemem_map.nr_map = 0;
-               }
-               return 0;
-       }
-
-       oldp = p;
-       mem_size = memparse(p, &p);
-       if (p == oldp)
-               goto err;
-
-       if (*p == '@') {
-               oldp = ++p;
-               start_at = memparse(p, &p);
-               if (p == oldp || *p != '\0')
-                       goto err;
-
-               movablemem_map_add_region(start_at, mem_size);
-               return 0;
-       }
-err:
-       return -EINVAL;
-}
-early_param("movablemem_map", cmdline_parse_movablemem_map);
-
 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 
 /**
index 39de1d6..1c44af7 100644 (file)
@@ -1294,7 +1294,7 @@ unlock:
 
 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        int error;
        int ret = VM_FAULT_LOCKED;
 
@@ -1312,14 +1312,14 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 #ifdef CONFIG_NUMA
 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 {
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
 }
 
 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
                                          unsigned long addr)
 {
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        pgoff_t index;
 
        index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -1329,7 +1329,7 @@ static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
 
 int shmem_lock(struct file *file, int lock, struct user_struct *user)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct shmem_inode_info *info = SHMEM_I(inode);
        int retval = -ENOMEM;
 
@@ -1464,7 +1464,7 @@ shmem_write_end(struct file *file, struct address_space *mapping,
 
 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct address_space *mapping = inode->i_mapping;
        pgoff_t index;
        unsigned long offset;
@@ -1807,7 +1807,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        struct shmem_falloc shmem_falloc;
        pgoff_t start, index, end;
@@ -2350,7 +2350,7 @@ static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
 {
        if (*len < 3) {
                *len = 3;
-               return 255;
+               return FILEID_INVALID;
        }
 
        if (inode_unhashed(inode)) {
@@ -2879,6 +2879,16 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
 /* common code */
 
+static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+       return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
+                               dentry->d_name.name);
+}
+
+static struct dentry_operations anon_ops = {
+       .d_dname = shmem_dname
+};
+
 /**
  * shmem_file_setup - get an unlinked file living in tmpfs
  * @name: name for dentry (to be seen in /proc/<pid>/maps
@@ -2887,15 +2897,14 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
  */
 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
 {
-       int error;
-       struct file *file;
+       struct file *res;
        struct inode *inode;
        struct path path;
-       struct dentry *root;
+       struct super_block *sb;
        struct qstr this;
 
        if (IS_ERR(shm_mnt))
-               return (void *)shm_mnt;
+               return ERR_CAST(shm_mnt);
 
        if (size < 0 || size > MAX_LFS_FILESIZE)
                return ERR_PTR(-EINVAL);
@@ -2903,18 +2912,19 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
        if (shmem_acct_size(flags, size))
                return ERR_PTR(-ENOMEM);
 
-       error = -ENOMEM;
+       res = ERR_PTR(-ENOMEM);
        this.name = name;
        this.len = strlen(name);
        this.hash = 0; /* will go */
-       root = shm_mnt->mnt_root;
-       path.dentry = d_alloc(root, &this);
+       sb = shm_mnt->mnt_sb;
+       path.dentry = d_alloc_pseudo(sb, &this);
        if (!path.dentry)
                goto put_memory;
+       d_set_d_op(path.dentry, &anon_ops);
        path.mnt = mntget(shm_mnt);
 
-       error = -ENOSPC;
-       inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
+       res = ERR_PTR(-ENOSPC);
+       inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
        if (!inode)
                goto put_dentry;
 
@@ -2922,24 +2932,23 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
        inode->i_size = size;
        clear_nlink(inode);     /* It is unlinked */
 #ifndef CONFIG_MMU
-       error = ramfs_nommu_expand_for_mapping(inode, size);
-       if (error)
+       res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
+       if (IS_ERR(res))
                goto put_dentry;
 #endif
 
-       error = -ENFILE;
-       file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
+       res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
                  &shmem_file_operations);
-       if (!file)
+       if (IS_ERR(res))
                goto put_dentry;
 
-       return file;
+       return res;
 
 put_dentry:
        path_put(&path);
 put_memory:
        shmem_unacct_size(flags, size);
-       return ERR_PTR(error);
+       return res;
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
index c72c648..a1f7772 100644 (file)
@@ -1774,7 +1774,7 @@ static int swap_show(struct seq_file *swap, void *v)
        len = seq_path(swap, &file->f_path, " \t\n\\");
        seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
                        len < 40 ? 40 - len : 1, " ",
-                       S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
+                       S_ISBLK(file_inode(file)->i_mode) ?
                                "partition" : "file\t",
                        si->pages << (PAGE_SHIFT - 10),
                        si->inuse_pages << (PAGE_SHIFT - 10),
index 2ab2de7..126fd0d 100644 (file)
@@ -221,15 +221,13 @@ EXPORT_SYMBOL(p9_error_init);
 int p9_errstr2errno(char *errstr, int len)
 {
        int errno;
-       struct hlist_node *p;
        struct errormap *c;
        int bucket;
 
        errno = 0;
-       p = NULL;
        c = NULL;
        bucket = jhash(errstr, len, 0) % ERRHASHSZ;
-       hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
+       hlist_for_each_entry(c, &hash_errmap[bucket], list) {
                if (c->namelen == len && !memcmp(c->name, errstr, len)) {
                        errno = c->val;
                        break;
index de2e950..74dea37 100644 (file)
@@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = {
        .create = p9_virtio_create,
        .close = p9_virtio_close,
        .request = p9_virtio_request,
-       .zc_request = p9_virtio_zc_request,
+       //.zc_request = p9_virtio_zc_request,
        .cancel = p9_virtio_cancel,
        /*
         * We leave one entry for input and one entry for response
index 6ceeeb3..59f278e 100644 (file)
@@ -87,23 +87,18 @@ EXPORT_SYMBOL(p9_idpool_destroy);
 
 int p9_idpool_get(struct p9_idpool *p)
 {
-       int i = 0;
-       int error;
+       int i;
        unsigned long flags;
 
-retry:
-       if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
-               return -1;
-
+       idr_preload(GFP_NOFS);
        spin_lock_irqsave(&p->lock, flags);
 
        /* no need to store exactly p, we just need something non-null */
-       error = idr_get_new(&p->pool, p, &i);
-       spin_unlock_irqrestore(&p->lock, flags);
+       i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT);
 
-       if (error == -EAGAIN)
-               goto retry;
-       else if (error)
+       spin_unlock_irqrestore(&p->lock, flags);
+       idr_preload_end();
+       if (i < 0)
                return -1;
 
        p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
index 3347529..4a141e3 100644 (file)
@@ -93,10 +93,9 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to,
                                        struct atalk_iface *atif)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        read_lock_bh(&atalk_sockets_lock);
-       sk_for_each(s, node, &atalk_sockets) {
+       sk_for_each(s, &atalk_sockets) {
                struct atalk_sock *at = at_sk(s);
 
                if (to->sat_port != at->src_port)
@@ -141,11 +140,10 @@ static struct sock *atalk_find_or_insert_socket(struct sock *sk,
                                                struct sockaddr_at *sat)
 {
        struct sock *s;
-       struct hlist_node *node;
        struct atalk_sock *at;
 
        write_lock_bh(&atalk_sockets_lock);
-       sk_for_each(s, node, &atalk_sockets) {
+       sk_for_each(s, &atalk_sockets) {
                at = at_sk(s);
 
                if (at->src_net == sat->sat_addr.s_net &&
@@ -1084,9 +1082,8 @@ static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
             sat->sat_port < ATPORT_LAST;
             sat->sat_port++) {
                struct sock *s;
-               struct hlist_node *node;
 
-               sk_for_each(s, node, &atalk_sockets) {
+               sk_for_each(s, &atalk_sockets) {
                        struct atalk_sock *at = at_sk(s);
 
                        if (at->src_net == sat->sat_addr.s_net &&
index 806fc0a..7b49100 100644 (file)
@@ -270,11 +270,11 @@ void atm_dev_release_vccs(struct atm_dev *dev)
        write_lock_irq(&vcc_sklist_lock);
        for (i = 0; i < VCC_HTABLE_SIZE; i++) {
                struct hlist_head *head = &vcc_hash[i];
-               struct hlist_node *node, *tmp;
+               struct hlist_node *tmp;
                struct sock *s;
                struct atm_vcc *vcc;
 
-               sk_for_each_safe(s, node, tmp, head) {
+               sk_for_each_safe(s, tmp, head) {
                        vcc = atm_sk(s);
                        if (vcc->dev == dev) {
                                vcc_release_async(vcc, -EPIPE);
@@ -317,11 +317,10 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
 static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
 {
        struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
-       struct hlist_node *node;
        struct sock *s;
        struct atm_vcc *walk;
 
-       sk_for_each(s, node, head) {
+       sk_for_each(s, head) {
                walk = atm_sk(s);
                if (walk->dev != vcc->dev)
                        continue;
index 2e3d942..f23916b 100644 (file)
@@ -842,7 +842,9 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl,
                --*l;
        }
 
-       hlist_for_each_entry_from(tmp, e, next) {
+       tmp = container_of(e, struct lec_arp_table, next);
+
+       hlist_for_each_entry_from(tmp, next) {
                if (--*l < 0)
                        break;
        }
@@ -1307,7 +1309,6 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
 static int
 lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
 {
-       struct hlist_node *node;
        struct lec_arp_table *entry;
        int i, remove_vcc = 1;
 
@@ -1326,7 +1327,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
                 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
                 */
                for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-                       hlist_for_each_entry(entry, node,
+                       hlist_for_each_entry(entry,
                                             &priv->lec_arp_tables[i], next) {
                                if (memcmp(to_remove->atm_addr,
                                           entry->atm_addr, ATM_ESA_LEN) == 0) {
@@ -1364,14 +1365,13 @@ static const char *get_status_string(unsigned char st)
 
 static void dump_arp_table(struct lec_priv *priv)
 {
-       struct hlist_node *node;
        struct lec_arp_table *rulla;
        char buf[256];
        int i, j, offset;
 
        pr_info("Dump %p:\n", priv);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry(rulla, node,
+               hlist_for_each_entry(rulla,
                                     &priv->lec_arp_tables[i], next) {
                        offset = 0;
                        offset += sprintf(buf, "%d: %p\n", i, rulla);
@@ -1403,7 +1403,7 @@ static void dump_arp_table(struct lec_priv *priv)
 
        if (!hlist_empty(&priv->lec_no_forward))
                pr_info("No forward\n");
-       hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
+       hlist_for_each_entry(rulla, &priv->lec_no_forward, next) {
                offset = 0;
                offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
                offset += sprintf(buf + offset, " Atm:");
@@ -1428,7 +1428,7 @@ static void dump_arp_table(struct lec_priv *priv)
 
        if (!hlist_empty(&priv->lec_arp_empty_ones))
                pr_info("Empty ones\n");
-       hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
+       hlist_for_each_entry(rulla, &priv->lec_arp_empty_ones, next) {
                offset = 0;
                offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
                offset += sprintf(buf + offset, " Atm:");
@@ -1453,7 +1453,7 @@ static void dump_arp_table(struct lec_priv *priv)
 
        if (!hlist_empty(&priv->mcast_fwds))
                pr_info("Multicast Forward VCCs\n");
-       hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
+       hlist_for_each_entry(rulla, &priv->mcast_fwds, next) {
                offset = 0;
                offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
                offset += sprintf(buf + offset, " Atm:");
@@ -1487,7 +1487,7 @@ static void dump_arp_table(struct lec_priv *priv)
 static void lec_arp_destroy(struct lec_priv *priv)
 {
        unsigned long flags;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry;
        int i;
 
@@ -1499,7 +1499,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
 
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry_safe(entry, node, next,
+               hlist_for_each_entry_safe(entry, next,
                                          &priv->lec_arp_tables[i], next) {
                        lec_arp_remove(priv, entry);
                        lec_arp_put(entry);
@@ -1507,7 +1507,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
                INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
        }
 
-       hlist_for_each_entry_safe(entry, node, next,
+       hlist_for_each_entry_safe(entry, next,
                                  &priv->lec_arp_empty_ones, next) {
                del_timer_sync(&entry->timer);
                lec_arp_clear_vccs(entry);
@@ -1516,7 +1516,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
        }
        INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
 
-       hlist_for_each_entry_safe(entry, node, next,
+       hlist_for_each_entry_safe(entry, next,
                                  &priv->lec_no_forward, next) {
                del_timer_sync(&entry->timer);
                lec_arp_clear_vccs(entry);
@@ -1525,7 +1525,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
        }
        INIT_HLIST_HEAD(&priv->lec_no_forward);
 
-       hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
+       hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
                /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
                lec_arp_clear_vccs(entry);
                hlist_del(&entry->next);
@@ -1542,14 +1542,13 @@ static void lec_arp_destroy(struct lec_priv *priv)
 static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
                                          const unsigned char *mac_addr)
 {
-       struct hlist_node *node;
        struct hlist_head *head;
        struct lec_arp_table *entry;
 
        pr_debug("%pM\n", mac_addr);
 
        head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
-       hlist_for_each_entry(entry, node, head, next) {
+       hlist_for_each_entry(entry, head, next) {
                if (ether_addr_equal(mac_addr, entry->mac_addr))
                        return entry;
        }
@@ -1686,7 +1685,7 @@ static void lec_arp_check_expire(struct work_struct *work)
        unsigned long flags;
        struct lec_priv *priv =
                container_of(work, struct lec_priv, lec_arp_work.work);
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry;
        unsigned long now;
        int i;
@@ -1696,7 +1695,7 @@ static void lec_arp_check_expire(struct work_struct *work)
 restart:
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry_safe(entry, node, next,
+               hlist_for_each_entry_safe(entry, next,
                                          &priv->lec_arp_tables[i], next) {
                        if (__lec_arp_check_expire(entry, now, priv)) {
                                struct sk_buff *skb;
@@ -1823,14 +1822,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
                unsigned long permanent)
 {
        unsigned long flags;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry;
        int i;
 
        pr_debug("\n");
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry_safe(entry, node, next,
+               hlist_for_each_entry_safe(entry, next,
                                          &priv->lec_arp_tables[i], next) {
                        if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
                            (permanent ||
@@ -1855,7 +1854,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
               unsigned int targetless_le_arp)
 {
        unsigned long flags;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry, *tmp;
        int i;
 
@@ -1870,7 +1869,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
                                 * we have no entry in the cache. 7.1.30
                                 */
        if (!hlist_empty(&priv->lec_arp_empty_ones)) {
-               hlist_for_each_entry_safe(entry, node, next,
+               hlist_for_each_entry_safe(entry, next,
                                          &priv->lec_arp_empty_ones, next) {
                        if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
                                hlist_del(&entry->next);
@@ -1915,7 +1914,7 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
        memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
        del_timer(&entry->timer);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry(tmp, node,
+               hlist_for_each_entry(tmp,
                                     &priv->lec_arp_tables[i], next) {
                        if (entry != tmp &&
                            !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
@@ -1956,7 +1955,6 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
              void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb))
 {
        unsigned long flags;
-       struct hlist_node *node;
        struct lec_arp_table *entry;
        int i, found_entry = 0;
 
@@ -2026,7 +2024,7 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
                 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
                 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry(entry, node,
+               hlist_for_each_entry(entry,
                                     &priv->lec_arp_tables[i], next) {
                        if (memcmp
                            (ioc_data->atm_addr, entry->atm_addr,
@@ -2103,7 +2101,6 @@ out:
 static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
 {
        unsigned long flags;
-       struct hlist_node *node;
        struct lec_arp_table *entry;
        int i;
 
@@ -2111,7 +2108,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
 restart:
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry(entry, node,
+               hlist_for_each_entry(entry,
                                     &priv->lec_arp_tables[i], next) {
                        if (entry->flush_tran_id == tran_id &&
                            entry->status == ESI_FLUSH_PENDING) {
@@ -2140,13 +2137,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
                      const unsigned char *atm_addr, unsigned long tran_id)
 {
        unsigned long flags;
-       struct hlist_node *node;
        struct lec_arp_table *entry;
        int i;
 
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
-               hlist_for_each_entry(entry, node,
+               hlist_for_each_entry(entry,
                                     &priv->lec_arp_tables[i], next) {
                        if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
                                entry->flush_tran_id = tran_id;
@@ -2198,7 +2194,7 @@ out:
 static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
 {
        unsigned long flags;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry;
        int i;
 
@@ -2208,7 +2204,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
 
        for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
-               hlist_for_each_entry_safe(entry, node, next,
+               hlist_for_each_entry_safe(entry, next,
                                          &priv->lec_arp_tables[i], next) {
                        if (vcc == entry->vcc) {
                                lec_arp_remove(priv, entry);
@@ -2219,7 +2215,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
                }
        }
 
-       hlist_for_each_entry_safe(entry, node, next,
+       hlist_for_each_entry_safe(entry, next,
                                  &priv->lec_arp_empty_ones, next) {
                if (entry->vcc == vcc) {
                        lec_arp_clear_vccs(entry);
@@ -2229,7 +2225,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
                }
        }
 
-       hlist_for_each_entry_safe(entry, node, next,
+       hlist_for_each_entry_safe(entry, next,
                                  &priv->lec_no_forward, next) {
                if (entry->recv_vcc == vcc) {
                        lec_arp_clear_vccs(entry);
@@ -2239,7 +2235,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
                }
        }
 
-       hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) {
+       hlist_for_each_entry_safe(entry, next, &priv->mcast_fwds, next) {
                if (entry->recv_vcc == vcc) {
                        lec_arp_clear_vccs(entry);
                        /* No timer, LANEv2 7.1.20 and 2.3.5.3 */
@@ -2257,13 +2253,13 @@ lec_arp_check_empties(struct lec_priv *priv,
                      struct atm_vcc *vcc, struct sk_buff *skb)
 {
        unsigned long flags;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        struct lec_arp_table *entry, *tmp;
        struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
        unsigned char *src = hdr->h_source;
 
        spin_lock_irqsave(&priv->lec_arp_lock, flags);
-       hlist_for_each_entry_safe(entry, node, next,
+       hlist_for_each_entry_safe(entry, next,
                                  &priv->lec_arp_empty_ones, next) {
                if (vcc == entry->vcc) {
                        del_timer(&entry->timer);
index b4e7534..6ac35ff 100644 (file)
@@ -385,7 +385,7 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
        page = get_zeroed_page(GFP_KERNEL);
        if (!page)
                return -ENOMEM;
-       dev = PDE(file->f_path.dentry->d_inode)->data;
+       dev = PDE(file_inode(file))->data;
        if (!dev->ops->proc_read)
                length = -EINVAL;
        else {
index 86767ca..4176887 100644 (file)
@@ -217,7 +217,6 @@ static void purge_vcc(struct atm_vcc *vcc)
 
 static void sigd_close(struct atm_vcc *vcc)
 {
-       struct hlist_node *node;
        struct sock *s;
        int i;
 
@@ -231,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc)
        for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
                struct hlist_head *head = &vcc_hash[i];
 
-               sk_for_each(s, node, head) {
+               sk_for_each(s, head) {
                        vcc = atm_sk(s);
 
                        purge_vcc(vcc);
index 69a06c4..7b11f8b 100644 (file)
@@ -81,14 +81,13 @@ static void ax25_kill_by_device(struct net_device *dev)
 {
        ax25_dev *ax25_dev;
        ax25_cb *s;
-       struct hlist_node *node;
 
        if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
                return;
 
        spin_lock_bh(&ax25_list_lock);
 again:
-       ax25_for_each(s, node, &ax25_list) {
+       ax25_for_each(s, &ax25_list) {
                if (s->ax25_dev == ax25_dev) {
                        s->ax25_dev = NULL;
                        spin_unlock_bh(&ax25_list_lock);
@@ -158,10 +157,9 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
        struct net_device *dev, int type)
 {
        ax25_cb *s;
-       struct hlist_node *node;
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(s, node, &ax25_list) {
+       ax25_for_each(s, &ax25_list) {
                if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
                        continue;
                if (s->sk && !ax25cmp(&s->source_addr, addr) &&
@@ -187,10 +185,9 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
 {
        struct sock *sk = NULL;
        ax25_cb *s;
-       struct hlist_node *node;
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(s, node, &ax25_list) {
+       ax25_for_each(s, &ax25_list) {
                if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
                    !ax25cmp(&s->dest_addr, dest_addr) &&
                    s->sk->sk_type == type) {
@@ -213,10 +210,9 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
        ax25_digi *digi, struct net_device *dev)
 {
        ax25_cb *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&ax25_list_lock);
-       ax25_for_each(s, node, &ax25_list) {
+       ax25_for_each(s, &ax25_list) {
                if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
                        continue;
                if (s->ax25_dev == NULL)
@@ -248,10 +244,9 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
 {
        ax25_cb *s;
        struct sk_buff *copy;
-       struct hlist_node *node;
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(s, node, &ax25_list) {
+       ax25_for_each(s, &ax25_list) {
                if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
                    s->sk->sk_type == SOCK_RAW &&
                    s->sk->sk_protocol == proto &&
index 5ea7fd3..e05bd57 100644 (file)
@@ -39,7 +39,6 @@ void ax25_ds_nr_error_recovery(ax25_cb *ax25)
 void ax25_ds_enquiry_response(ax25_cb *ax25)
 {
        ax25_cb *ax25o;
-       struct hlist_node *node;
 
        /* Please note that neither DK4EG's nor DG2FEF's
         * DAMA spec mention the following behaviour as seen
@@ -80,7 +79,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
        ax25_ds_set_timer(ax25->ax25_dev);
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(ax25o, node, &ax25_list) {
+       ax25_for_each(ax25o, &ax25_list) {
                if (ax25o == ax25)
                        continue;
 
@@ -159,10 +158,9 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
 {
        ax25_cb *ax25;
        int res = 0;
-       struct hlist_node *node;
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(ax25, node, &ax25_list)
+       ax25_for_each(ax25, &ax25_list)
                if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
                        res = 1;
                        break;
index 993c439..951cd57 100644 (file)
@@ -70,7 +70,6 @@ static void ax25_ds_timeout(unsigned long arg)
 {
        ax25_dev *ax25_dev = (struct ax25_dev *) arg;
        ax25_cb *ax25;
-       struct hlist_node *node;
 
        if (ax25_dev == NULL || !ax25_dev->dama.slave)
                return;                 /* Yikes! */
@@ -81,7 +80,7 @@ static void ax25_ds_timeout(unsigned long arg)
        }
 
        spin_lock(&ax25_list_lock);
-       ax25_for_each(ax25, node, &ax25_list) {
+       ax25_for_each(ax25, &ax25_list) {
                if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
                        continue;
 
index 7d5f24b..7f16e8a 100644 (file)
@@ -193,10 +193,9 @@ int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
 void ax25_link_failed(ax25_cb *ax25, int reason)
 {
        struct ax25_linkfail *lf;
-       struct hlist_node *node;
 
        spin_lock_bh(&linkfail_lock);
-       hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node)
+       hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
                lf->func(ax25, reason);
        spin_unlock_bh(&linkfail_lock);
 }
index 957999e..71c4bad 100644 (file)
@@ -54,10 +54,9 @@ EXPORT_SYMBOL(ax25_uid_policy);
 ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
 {
        ax25_uid_assoc *ax25_uid, *res = NULL;
-       struct hlist_node *node;
 
        read_lock(&ax25_uid_lock);
-       ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+       ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
                if (uid_eq(ax25_uid->uid, uid)) {
                        ax25_uid_hold(ax25_uid);
                        res = ax25_uid;
@@ -74,7 +73,6 @@ EXPORT_SYMBOL(ax25_findbyuid);
 int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
 {
        ax25_uid_assoc *ax25_uid;
-       struct hlist_node *node;
        ax25_uid_assoc *user;
        unsigned long res;
 
@@ -82,7 +80,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
        case SIOCAX25GETUID:
                res = -ENOENT;
                read_lock(&ax25_uid_lock);
-               ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+               ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
                        if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
                                res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
                                break;
@@ -126,7 +124,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
 
                ax25_uid = NULL;
                write_lock(&ax25_uid_lock);
-               ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+               ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
                        if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
                                break;
                }
@@ -212,11 +210,10 @@ const struct file_operations ax25_uid_fops = {
 void __exit ax25_uid_free(void)
 {
        ax25_uid_assoc *ax25_uid;
-       struct hlist_node *node;
 
        write_lock(&ax25_uid_lock);
 again:
-       ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
+       ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
                hlist_del_init(&ax25_uid->uid_node);
                ax25_uid_put(ax25_uid);
                goto again;
index 72fe1bb..a0b253e 100644 (file)
@@ -487,7 +487,6 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
         */
        struct batadv_forw_packet *forw_packet_aggr = NULL;
        struct batadv_forw_packet *forw_packet_pos = NULL;
-       struct hlist_node *tmp_node;
        struct batadv_ogm_packet *batadv_ogm_packet;
        bool direct_link;
        unsigned long max_aggregation_jiffies;
@@ -500,7 +499,7 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
        spin_lock_bh(&bat_priv->forw_bat_list_lock);
        /* own packets are not to be aggregated */
        if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
-               hlist_for_each_entry(forw_packet_pos, tmp_node,
+               hlist_for_each_entry(forw_packet_pos,
                                     &bat_priv->forw_bat_list, list) {
                        if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
                                                        bat_priv, packet_len,
@@ -655,7 +654,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
        struct batadv_neigh_node *router = NULL;
        struct batadv_orig_node *orig_node_tmp;
-       struct hlist_node *node;
        int if_num;
        uint8_t sum_orig, sum_neigh;
        uint8_t *neigh_addr;
@@ -665,7 +663,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                   "update_originator(): Searching and updating originator entry of received packet\n");
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+       hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
                neigh_addr = tmp_neigh_node->addr;
                if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
@@ -801,7 +799,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
-       struct hlist_node *node;
        uint8_t total_count;
        uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
@@ -810,7 +807,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
 
        /* find corresponding one hop neighbor */
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+       hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_neigh_node->neigh_list, list) {
                if (!batadv_compare_eth(tmp_neigh_node->addr,
                                        orig_neigh_node->orig))
@@ -920,7 +917,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *tmp_neigh_node;
-       struct hlist_node *node;
        int is_duplicate = 0;
        int32_t seq_diff;
        int need_update = 0;
@@ -943,7 +939,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                goto out;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+       hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
                is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
                                                orig_node->last_real_seqno,
index 30f4652..6a4f728 100644 (file)
@@ -144,7 +144,6 @@ static struct batadv_bla_claim
 {
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_bla_claim *claim;
        struct batadv_bla_claim *claim_tmp = NULL;
        int index;
@@ -156,7 +155,7 @@ static struct batadv_bla_claim
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(claim, head, hash_entry) {
                if (!batadv_compare_claim(&claim->hash_entry, data))
                        continue;
 
@@ -185,7 +184,6 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
 {
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_bla_backbone_gw search_entry, *backbone_gw;
        struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
        int index;
@@ -200,7 +198,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
                                                &search_entry))
                        continue;
@@ -221,7 +219,7 @@ static void
 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 {
        struct batadv_hashtable *hash;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        struct batadv_bla_claim *claim;
        int i;
@@ -236,13 +234,13 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(claim, node, node_tmp,
+               hlist_for_each_entry_safe(claim, node_tmp,
                                          head, hash_entry) {
                        if (claim->backbone_gw != backbone_gw)
                                continue;
 
                        batadv_claim_free_ref(claim);
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&claim->hash_entry);
                }
                spin_unlock_bh(list_lock);
        }
@@ -460,7 +458,6 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                                      struct batadv_hard_iface *primary_if,
                                      short vid)
 {
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
        struct batadv_bla_claim *claim;
@@ -481,7 +478,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(claim, head, hash_entry) {
                        /* only own claims are interesting */
                        if (claim->backbone_gw != backbone_gw)
                                continue;
@@ -958,7 +955,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
        spinlock_t *list_lock;  /* protects write access to the hash lists */
@@ -973,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+               hlist_for_each_entry_safe(backbone_gw, node_tmp,
                                          head, hash_entry) {
                        if (now)
                                goto purge_now;
@@ -992,7 +989,7 @@ purge_now:
 
                        batadv_bla_del_backbone_claims(backbone_gw);
 
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&backbone_gw->hash_entry);
                        batadv_backbone_gw_free_ref(backbone_gw);
                }
                spin_unlock_bh(list_lock);
@@ -1013,7 +1010,6 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                                    int now)
 {
        struct batadv_bla_claim *claim;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
        int i;
@@ -1026,7 +1022,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(claim, head, hash_entry) {
                        if (now)
                                goto purge_now;
                        if (!batadv_compare_eth(claim->backbone_gw->orig,
@@ -1062,7 +1058,6 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *oldif)
 {
        struct batadv_bla_backbone_gw *backbone_gw;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
        __be16 group;
@@ -1086,7 +1081,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                        /* own orig still holds the old value. */
                        if (!batadv_compare_eth(backbone_gw->orig,
                                                oldif->net_dev->dev_addr))
@@ -1112,7 +1107,6 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
        struct batadv_priv_bla *priv_bla;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hashtable *hash;
@@ -1140,7 +1134,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                        if (!batadv_compare_eth(backbone_gw->orig,
                                                primary_if->net_dev->dev_addr))
                                continue;
@@ -1322,7 +1316,6 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 {
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_bla_backbone_gw *backbone_gw;
        int i;
 
@@ -1336,7 +1329,7 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                        if (batadv_compare_eth(backbone_gw->orig, orig)) {
                                rcu_read_unlock();
                                return 1;
@@ -1607,7 +1600,6 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct batadv_bla_claim *claim;
        struct batadv_hard_iface *primary_if;
-       struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
        bool is_own;
@@ -1628,7 +1620,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(claim, head, hash_entry) {
                        is_own = batadv_compare_eth(claim->backbone_gw->orig,
                                                    primary_addr);
                        seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
@@ -1652,7 +1644,6 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hard_iface *primary_if;
-       struct hlist_node *node;
        struct hlist_head *head;
        int secs, msecs;
        uint32_t i;
@@ -1674,7 +1665,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
                        msecs = jiffies_to_msecs(jiffies -
                                                 backbone_gw->lasttime);
                        secs = msecs / 1000;
index 761a590..d54188a 100644 (file)
@@ -83,7 +83,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
 {
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_dat_entry *dat_entry;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
 
@@ -95,7 +95,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
                list_lock = &bat_priv->dat.hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(dat_entry, node, node_tmp, head,
+               hlist_for_each_entry_safe(dat_entry, node_tmp, head,
                                          hash_entry) {
                        /* if an helper function has been passed as parameter,
                         * ask it if the entry has to be purged or not
@@ -103,7 +103,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
                        if (to_purge && !to_purge(dat_entry))
                                continue;
 
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&dat_entry->hash_entry);
                        batadv_dat_entry_free_ref(dat_entry);
                }
                spin_unlock_bh(list_lock);
@@ -235,7 +235,6 @@ static struct batadv_dat_entry *
 batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
        struct batadv_hashtable *hash = bat_priv->dat.hash;
        uint32_t index;
@@ -247,7 +246,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
                if (dat_entry->ip != ip)
                        continue;
 
@@ -465,7 +464,6 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
        batadv_dat_addr_t max = 0, tmp_max = 0;
        struct batadv_orig_node *orig_node, *max_orig_node = NULL;
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        int i;
 
@@ -481,7 +479,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        /* the dht space is a ring and addresses are unsigned */
                        tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
                                  ip_key;
@@ -686,7 +684,6 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_hashtable *hash = bat_priv->dat.hash;
        struct batadv_dat_entry *dat_entry;
        struct batadv_hard_iface *primary_if;
-       struct hlist_node *node;
        struct hlist_head *head;
        unsigned long last_seen_jiffies;
        int last_seen_msecs, last_seen_secs, last_seen_mins;
@@ -704,7 +701,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
                        last_seen_jiffies = jiffies - dat_entry->last_update;
                        last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
                        last_seen_mins = last_seen_msecs / 60000;
index 074107f..34f99a4 100644 (file)
@@ -114,7 +114,6 @@ static struct batadv_gw_node *
 batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
 {
        struct batadv_neigh_node *router;
-       struct hlist_node *node;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
        uint32_t gw_divisor;
@@ -127,7 +126,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        gw_divisor *= 64;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+       hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
@@ -344,7 +343,6 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node,
                           uint8_t new_gwflags)
 {
-       struct hlist_node *node;
        struct batadv_gw_node *gw_node, *curr_gw;
 
        /* Note: We don't need a NULL check here, since curr_gw never gets
@@ -355,7 +353,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+       hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
                if (gw_node->orig_node != orig_node)
                        continue;
 
@@ -403,7 +401,7 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 void batadv_gw_node_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_gw_node *gw_node, *curr_gw;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
        int do_deselect = 0;
 
@@ -411,7 +409,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
 
        spin_lock_bh(&bat_priv->gw.list_lock);
 
-       hlist_for_each_entry_safe(gw_node, node, node_tmp,
+       hlist_for_each_entry_safe(gw_node, node_tmp,
                                  &bat_priv->gw.list, list) {
                if (((!gw_node->deleted) ||
                     (time_before(jiffies, gw_node->deleted + timeout))) &&
@@ -476,7 +474,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
        struct batadv_hard_iface *primary_if;
        struct batadv_gw_node *gw_node;
-       struct hlist_node *node;
        int gw_count = 0;
 
        primary_if = batadv_seq_print_text_primary_if_get(seq);
@@ -490,7 +487,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                   primary_if->net_dev->dev_addr, net_dev->name);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
+       hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
index 21fe698..0488d70 100644 (file)
@@ -345,9 +345,8 @@ void batadv_recv_handler_unregister(uint8_t packet_type)
 static struct batadv_algo_ops *batadv_algo_get(char *name)
 {
        struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
-       struct hlist_node *node;
 
-       hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
+       hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
                if (strcmp(bat_algo_ops_tmp->name, name) != 0)
                        continue;
 
@@ -411,11 +410,10 @@ out:
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct batadv_algo_ops *bat_algo_ops;
-       struct hlist_node *node;
 
        seq_printf(seq, "Available routing algorithms:\n");
 
-       hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
+       hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
                seq_printf(seq, "%s\n", bat_algo_ops->name);
        }
 
index 457ea44..96fb80b 100644 (file)
@@ -118,7 +118,7 @@ out:
 
 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 {
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
        struct batadv_orig_node *orig_node;
 
@@ -134,7 +134,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        }
 
        /* for all neighbors towards this originator ... */
-       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+       hlist_for_each_entry_safe(neigh_node, node_tmp,
                                  &orig_node->neigh_list, list) {
                hlist_del_rcu(&neigh_node->list);
                batadv_neigh_node_free_ref(neigh_node);
@@ -161,7 +161,7 @@ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct batadv_orig_node *orig_node;
@@ -179,9 +179,9 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+               hlist_for_each_entry_safe(orig_node, node_tmp,
                                          head, hash_entry) {
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&orig_node->hash_entry);
                        batadv_orig_node_free_ref(orig_node);
                }
                spin_unlock_bh(list_lock);
@@ -274,7 +274,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig_node,
                            struct batadv_neigh_node **best_neigh_node)
 {
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
        bool neigh_purged = false;
        unsigned long last_seen;
@@ -285,7 +285,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
        spin_lock_bh(&orig_node->neigh_list_lock);
 
        /* for all neighbors towards this originator ... */
-       hlist_for_each_entry_safe(neigh_node, node, node_tmp,
+       hlist_for_each_entry_safe(neigh_node, node_tmp,
                                  &orig_node->neigh_list, list) {
                last_seen = neigh_node->last_seen;
                if_incoming = neigh_node->if_incoming;
@@ -348,7 +348,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct batadv_orig_node *orig_node;
@@ -363,13 +363,13 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(orig_node, node, node_tmp,
+               hlist_for_each_entry_safe(orig_node, node_tmp,
                                          head, hash_entry) {
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
                                if (orig_node->gw_flags)
                                        batadv_gw_node_delete(bat_priv,
                                                              orig_node);
-                               hlist_del_rcu(node);
+                               hlist_del_rcu(&orig_node->hash_entry);
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
@@ -408,7 +408,6 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        struct batadv_hard_iface *primary_if;
        struct batadv_orig_node *orig_node;
@@ -434,7 +433,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        neigh_node = batadv_orig_node_get_router(orig_node);
                        if (!neigh_node)
                                continue;
@@ -453,7 +452,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
                                   neigh_node->addr,
                                   neigh_node->if_incoming->net_dev->name);
 
-                       hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
+                       hlist_for_each_entry_rcu(neigh_node_tmp,
                                                 &orig_node->neigh_list, list) {
                                seq_printf(seq, " %pM (%3i)",
                                           neigh_node_tmp->addr,
@@ -511,7 +510,6 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        uint32_t i;
@@ -524,7 +522,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        spin_lock_bh(&orig_node->ogm_cnt_lock);
                        ret = batadv_orig_node_add_if(orig_node, max_if_num);
                        spin_unlock_bh(&orig_node->ogm_cnt_lock);
@@ -595,7 +593,6 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hard_iface *hard_iface_tmp;
        struct batadv_orig_node *orig_node;
@@ -609,7 +606,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        spin_lock_bh(&orig_node->ogm_cnt_lock);
                        ret = batadv_orig_node_del_if(orig_node, max_if_num,
                                                      hard_iface->if_num);
index 286bf74..7df48fa 100644 (file)
@@ -68,7 +68,6 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
        int index;
 
@@ -79,7 +78,7 @@ batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                if (!batadv_compare_eth(orig_node, data))
                        continue;
 
index 60ba03f..5ee21ce 100644 (file)
@@ -37,7 +37,6 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        unsigned long *word;
@@ -49,7 +48,7 @@ void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        spin_lock_bh(&orig_node->ogm_cnt_lock);
                        word_index = hard_iface->if_num * BATADV_NUM_WORDS;
                        word = &(orig_node->bcast_own[word_index]);
@@ -146,7 +145,6 @@ out:
 void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node)
 {
-       struct hlist_node *node;
        struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
        uint8_t interference_candidate = 0;
 
@@ -169,7 +167,7 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
         * interface. If we do, we won't select this candidate because of
         * possible interference.
         */
-       hlist_for_each_entry_rcu(tmp_neigh_node, node,
+       hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
                if (tmp_neigh_node == neigh_node)
                        continue;
index 80ca65f..a67cffd 100644 (file)
@@ -316,7 +316,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
                                 const struct batadv_hard_iface *hard_iface)
 {
        struct batadv_forw_packet *forw_packet;
-       struct hlist_node *tmp_node, *safe_tmp_node;
+       struct hlist_node *safe_tmp_node;
        bool pending;
 
        if (hard_iface)
@@ -329,7 +329,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
        /* free bcast list */
        spin_lock_bh(&bat_priv->forw_bcast_list_lock);
-       hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
+       hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
                                  &bat_priv->forw_bcast_list, list) {
                /* if purge_outstanding_packets() was called with an argument
                 * we delete only packets belonging to the given interface
@@ -355,7 +355,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
        /* free batman packet list */
        spin_lock_bh(&bat_priv->forw_bat_list_lock);
-       hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
+       hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
                                  &bat_priv->forw_bat_list, list) {
                /* if purge_outstanding_packets() was called with an argument
                 * we delete only packets belonging to the given interface
index d44672f..98a66a0 100644 (file)
@@ -56,7 +56,6 @@ static struct batadv_tt_common_entry *
 batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
        uint32_t index;
@@ -68,7 +67,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
                if (!batadv_compare_eth(tt_common_entry, data))
                        continue;
 
@@ -257,7 +256,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_orig_list_entry *orig_entry;
        int hash_added;
        bool roamed_back = false;
@@ -339,7 +337,7 @@ check_roaming:
                /* These node are probably going to update their tt table */
                head = &tt_global->orig_list;
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+               hlist_for_each_entry_rcu(orig_entry, head, list) {
                        batadv_send_roam_adv(bat_priv, tt_global->common.addr,
                                             orig_entry->orig_node);
                }
@@ -470,7 +468,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
-       struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
        int last_seen_secs;
@@ -494,7 +491,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
@@ -605,9 +602,9 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        struct batadv_tt_common_entry *tt_common_entry;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
 
-       hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+       hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
                                  hash_entry) {
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -651,7 +648,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
 
@@ -665,9 +662,9 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node_tmp,
                                          head, hash_entry) {
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&tt_common_entry->hash_entry);
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
@@ -724,11 +721,10 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
 {
        struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
        const struct hlist_head *head;
-       struct hlist_node *node;
 
        rcu_read_lock();
        head = &entry->orig_list;
-       hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+       hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
                if (tmp_orig_entry->orig_node != orig_node)
                        continue;
                if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
@@ -940,12 +936,11 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
 {
        struct batadv_neigh_node *router = NULL;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
        int best_tq = 0;
 
        head = &tt_global_entry->orig_list;
-       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+       hlist_for_each_entry_rcu(orig_entry, head, list) {
                router = batadv_orig_node_get_router(orig_entry->orig_node);
                if (!router)
                        continue;
@@ -973,7 +968,6 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
                             struct seq_file *seq)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
        struct batadv_tt_common_entry *tt_common_entry;
        uint16_t flags;
@@ -997,7 +991,7 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
 
        head = &tt_global_entry->orig_list;
 
-       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+       hlist_for_each_entry_rcu(orig_entry, head, list) {
                if (best_entry == orig_entry)
                        continue;
 
@@ -1020,7 +1014,6 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global;
        struct batadv_hard_iface *primary_if;
-       struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
 
@@ -1039,7 +1032,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
@@ -1059,13 +1052,13 @@ static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 {
        struct hlist_head *head;
-       struct hlist_node *node, *safe;
+       struct hlist_node *safe;
        struct batadv_tt_orig_list_entry *orig_entry;
 
        spin_lock_bh(&tt_global_entry->list_lock);
        head = &tt_global_entry->orig_list;
-       hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
-               hlist_del_rcu(node);
+       hlist_for_each_entry_safe(orig_entry, safe, head, list) {
+               hlist_del_rcu(&orig_entry->list);
                batadv_tt_orig_list_entry_free_ref(orig_entry);
        }
        spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1078,18 +1071,18 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
                                const char *message)
 {
        struct hlist_head *head;
-       struct hlist_node *node, *safe;
+       struct hlist_node *safe;
        struct batadv_tt_orig_list_entry *orig_entry;
 
        spin_lock_bh(&tt_global_entry->list_lock);
        head = &tt_global_entry->orig_list;
-       hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+       hlist_for_each_entry_safe(orig_entry, safe, head, list) {
                if (orig_entry->orig_node == orig_node) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
                                   "Deleting %pM from global tt entry %pM: %s\n",
                                   orig_node->orig,
                                   tt_global_entry->common.addr, message);
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&orig_entry->list);
                        batadv_tt_orig_list_entry_free_ref(orig_entry);
                }
        }
@@ -1108,7 +1101,6 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 {
        bool last_entry = true;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_orig_list_entry *orig_entry;
 
        /* no local entry exists, case 1:
@@ -1117,7 +1109,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 
        rcu_read_lock();
        head = &tt_global_entry->orig_list;
-       hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+       hlist_for_each_entry_rcu(orig_entry, head, list) {
                if (orig_entry->orig_node != orig_node) {
                        last_entry = false;
                        break;
@@ -1202,7 +1194,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct batadv_tt_common_entry *tt_common_entry;
        uint32_t i;
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
-       struct hlist_node *node, *safe;
+       struct hlist_node *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
 
@@ -1214,7 +1206,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_common_entry, node, safe,
+               hlist_for_each_entry_safe(tt_common_entry, safe,
                                          head, hash_entry) {
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
@@ -1227,7 +1219,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                                batadv_dbg(BATADV_DBG_TT, bat_priv,
                                           "Deleting global tt entry %pM: %s\n",
                                           tt_global->common.addr, message);
-                               hlist_del_rcu(node);
+                               hlist_del_rcu(&tt_common_entry->hash_entry);
                                batadv_tt_global_entry_free_ref(tt_global);
                        }
                }
@@ -1262,7 +1254,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_head *head;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
        char *msg = NULL;
@@ -1274,7 +1266,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+               hlist_for_each_entry_safe(tt_common, node_tmp, head,
                                          hash_entry) {
                        tt_global = container_of(tt_common,
                                                 struct batadv_tt_global_entry,
@@ -1287,7 +1279,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
                                   "Deleting global tt entry (%pM): %s\n",
                                   tt_global->common.addr, msg);
 
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&tt_common->hash_entry);
 
                        batadv_tt_global_entry_free_ref(tt_global);
                }
@@ -1301,7 +1293,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
 
@@ -1315,9 +1307,9 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node_tmp,
                                          head, hash_entry) {
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&tt_common_entry->hash_entry);
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
                                                 common);
@@ -1397,7 +1389,6 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
-       struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
        int j;
@@ -1406,7 +1397,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
                        tt_global = container_of(tt_common,
                                                 struct batadv_tt_global_entry,
                                                 common);
@@ -1449,7 +1440,6 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
        uint16_t total = 0, total_one;
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
-       struct hlist_node *node;
        struct hlist_head *head;
        uint32_t i;
        int j;
@@ -1458,7 +1448,7 @@ static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC
                         */
@@ -1597,7 +1587,6 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_query_packet *tt_response;
        struct batadv_tt_change *tt_change;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct sk_buff *skb = NULL;
        uint16_t tt_tot, tt_count;
@@ -1627,7 +1616,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_rcu(tt_common_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
                        if (tt_count == tt_tot)
                                break;
@@ -2307,7 +2296,6 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
        uint32_t i;
        uint16_t changed_num = 0;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_tt_common_entry *tt_common_entry;
 
        if (!hash)
@@ -2317,7 +2305,7 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
                        if (enable) {
                                if ((tt_common_entry->flags & flags) == flags)
@@ -2342,7 +2330,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
@@ -2355,7 +2343,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+               hlist_for_each_entry_safe(tt_common, node_tmp, head,
                                          hash_entry) {
                        if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
                                continue;
@@ -2365,7 +2353,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                   tt_common->addr);
 
                        atomic_dec(&bat_priv->tt.local_entry_num);
-                       hlist_del_rcu(node);
+                       hlist_del_rcu(&tt_common->hash_entry);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
                                                common);
index 22d2785..c053244 100644 (file)
@@ -97,7 +97,6 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
        struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
        uint32_t index;
 
@@ -108,8 +107,8 @@ batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
-               if (!batadv_vis_info_cmp(node, data))
+       hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
+               if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
                        continue;
 
                vis_info_tmp = vis_info;
@@ -128,9 +127,8 @@ static void batadv_vis_data_insert_interface(const uint8_t *interface,
                                             bool primary)
 {
        struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(entry, pos, if_list, list) {
+       hlist_for_each_entry(entry, if_list, list) {
                if (batadv_compare_eth(entry->addr, interface))
                        return;
        }
@@ -148,9 +146,8 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
                                          const struct hlist_head *if_list)
 {
        struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(entry, pos, if_list, list) {
+       hlist_for_each_entry(entry, if_list, list) {
                if (entry->primary)
                        seq_printf(seq, "PRIMARY, ");
                else
@@ -198,9 +195,8 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
 {
        int i;
        struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(entry, pos, list, list) {
+       hlist_for_each_entry(entry, list, list) {
                seq_printf(seq, "%pM,", entry->addr);
 
                for (i = 0; i < packet->entries; i++)
@@ -218,17 +214,16 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
 static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
                                             const struct hlist_head *head)
 {
-       struct hlist_node *node;
        struct batadv_vis_info *info;
        struct batadv_vis_packet *packet;
        uint8_t *entries_pos;
        struct batadv_vis_info_entry *entries;
        struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
 
        HLIST_HEAD(vis_if_list);
 
-       hlist_for_each_entry_rcu(info, node, head, hash_entry) {
+       hlist_for_each_entry_rcu(info, head, hash_entry) {
                packet = (struct batadv_vis_packet *)info->skb_packet->data;
                entries_pos = (uint8_t *)packet + sizeof(*packet);
                entries = (struct batadv_vis_info_entry *)entries_pos;
@@ -240,7 +235,7 @@ static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
                batadv_vis_data_read_entries(seq, &vis_if_list, packet,
                                             entries);
 
-               hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+               hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
                        hlist_del(&entry->list);
                        kfree(entry);
                }
@@ -519,7 +514,6 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
        struct batadv_neigh_node *router;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_vis_packet *packet;
@@ -532,7 +526,7 @@ static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        router = batadv_orig_node_get_router(orig_node);
                        if (!router)
                                continue;
@@ -571,7 +565,6 @@ static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
 static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *router;
@@ -605,7 +598,7 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        router = batadv_orig_node_get_router(orig_node);
                        if (!router)
                                continue;
@@ -644,7 +637,7 @@ next:
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, node, head,
+               hlist_for_each_entry_rcu(tt_common_entry, head,
                                         hash_entry) {
                        packet_pos = skb_put(info->skb_packet, sizeof(*entry));
                        entry = (struct batadv_vis_info_entry *)packet_pos;
@@ -673,14 +666,14 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 {
        uint32_t i;
        struct batadv_hashtable *hash = bat_priv->vis.hash;
-       struct hlist_node *node, *node_tmp;
+       struct hlist_node *node_tmp;
        struct hlist_head *head;
        struct batadv_vis_info *info;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_safe(info, node, node_tmp,
+               hlist_for_each_entry_safe(info, node_tmp,
                                          head, hash_entry) {
                        /* never purge own data. */
                        if (info == bat_priv->vis.my_info)
@@ -688,7 +681,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 
                        if (batadv_has_timed_out(info->first_seen,
                                                 BATADV_VIS_TIMEOUT)) {
-                               hlist_del(node);
+                               hlist_del(&info->hash_entry);
                                batadv_send_list_del(info);
                                kref_put(&info->refcount, batadv_free_info);
                        }
@@ -700,7 +693,6 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
                                        struct batadv_vis_info *info)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_vis_packet *packet;
@@ -715,7 +707,7 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
                        /* if it's a vis server and reachable, send it. */
                        if (!(orig_node->flags & BATADV_VIS_SERVER))
                                continue;
index 07f0739..6a93614 100644 (file)
@@ -70,14 +70,13 @@ static struct bt_sock_list hci_sk_list = {
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct sk_buff *skb_copy = NULL;
 
        BT_DBG("hdev %p len %d", hdev, skb->len);
 
        read_lock(&hci_sk_list.lock);
 
-       sk_for_each(sk, node, &hci_sk_list.head) {
+       sk_for_each(sk, &hci_sk_list.head) {
                struct hci_filter *flt;
                struct sk_buff *nskb;
 
@@ -142,13 +141,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        BT_DBG("len %d", skb->len);
 
        read_lock(&hci_sk_list.lock);
 
-       sk_for_each(sk, node, &hci_sk_list.head) {
+       sk_for_each(sk, &hci_sk_list.head) {
                struct sk_buff *nskb;
 
                /* Skip the original socket */
@@ -176,7 +174,6 @@ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct sk_buff *skb_copy = NULL;
        __le16 opcode;
 
@@ -210,7 +207,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 
        read_lock(&hci_sk_list.lock);
 
-       sk_for_each(sk, node, &hci_sk_list.head) {
+       sk_for_each(sk, &hci_sk_list.head) {
                struct sk_buff *nskb;
 
                if (sk->sk_state != BT_BOUND)
@@ -251,13 +248,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 static void send_monitor_event(struct sk_buff *skb)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        BT_DBG("len %d", skb->len);
 
        read_lock(&hci_sk_list.lock);
 
-       sk_for_each(sk, node, &hci_sk_list.head) {
+       sk_for_each(sk, &hci_sk_list.head) {
                struct sk_buff *nskb;
 
                if (sk->sk_state != BT_BOUND)
@@ -393,11 +389,10 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
        if (event == HCI_DEV_UNREG) {
                struct sock *sk;
-               struct hlist_node *node;
 
                /* Detach sockets from device */
                read_lock(&hci_sk_list.lock);
-               sk_for_each(sk, node, &hci_sk_list.head) {
+               sk_for_each(sk, &hci_sk_list.head) {
                        bh_lock_sock_nested(sk);
                        if (hci_pi(sk)->hdev == hdev) {
                                hci_pi(sk)->hdev = NULL;
index ce3f665..c23bae8 100644 (file)
@@ -107,15 +107,14 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
 static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
 {
        struct sock *sk = NULL;
-       struct hlist_node *node;
 
-       sk_for_each(sk, node, &rfcomm_sk_list.head) {
+       sk_for_each(sk, &rfcomm_sk_list.head) {
                if (rfcomm_pi(sk)->channel == channel &&
                                !bacmp(&bt_sk(sk)->src, src))
                        break;
        }
 
-       return node ? sk : NULL;
+       return sk ? sk : NULL;
 }
 
 /* Find socket with channel and source bdaddr.
@@ -124,11 +123,10 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
 static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
 {
        struct sock *sk = NULL, *sk1 = NULL;
-       struct hlist_node *node;
 
        read_lock(&rfcomm_sk_list.lock);
 
-       sk_for_each(sk, node, &rfcomm_sk_list.head) {
+       sk_for_each(sk, &rfcomm_sk_list.head) {
                if (state && sk->sk_state != state)
                        continue;
 
@@ -145,7 +143,7 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
 
        read_unlock(&rfcomm_sk_list.lock);
 
-       return node ? sk : sk1;
+       return sk ? sk : sk1;
 }
 
 static void rfcomm_sock_destruct(struct sock *sk)
@@ -970,11 +968,10 @@ done:
 static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        read_lock(&rfcomm_sk_list.lock);
 
-       sk_for_each(sk, node, &rfcomm_sk_list.head) {
+       sk_for_each(sk, &rfcomm_sk_list.head) {
                seq_printf(f, "%pMR %pMR %d %d\n",
                           &bt_sk(sk)->src, &bt_sk(sk)->dst,
                           sk->sk_state, rfcomm_pi(sk)->channel);
index b5178d6..79d87d8 100644 (file)
@@ -259,10 +259,9 @@ drop:
 /* -------- Socket interface ---------- */
 static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
 {
-       struct hlist_node *node;
        struct sock *sk;
 
-       sk_for_each(sk, node, &sco_sk_list.head) {
+       sk_for_each(sk, &sco_sk_list.head) {
                if (sk->sk_state != BT_LISTEN)
                        continue;
 
@@ -279,11 +278,10 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
 static struct sock *sco_get_sock_listen(bdaddr_t *src)
 {
        struct sock *sk = NULL, *sk1 = NULL;
-       struct hlist_node *node;
 
        read_lock(&sco_sk_list.lock);
 
-       sk_for_each(sk, node, &sco_sk_list.head) {
+       sk_for_each(sk, &sco_sk_list.head) {
                if (sk->sk_state != BT_LISTEN)
                        continue;
 
@@ -298,7 +296,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
 
        read_unlock(&sco_sk_list.lock);
 
-       return node ? sk : sk1;
+       return sk ? sk : sk1;
 }
 
 static void sco_sock_destruct(struct sock *sk)
@@ -951,14 +949,13 @@ static void sco_conn_ready(struct sco_conn *conn)
 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
 {
        struct sock *sk;
-       struct hlist_node *node;
        int lm = 0;
 
        BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
 
        /* Find listening sockets */
        read_lock(&sco_sk_list.lock);
-       sk_for_each(sk, node, &sco_sk_list.head) {
+       sk_for_each(sk, &sco_sk_list.head) {
                if (sk->sk_state != BT_LISTEN)
                        continue;
 
@@ -1018,11 +1015,10 @@ drop:
 static int sco_debugfs_show(struct seq_file *f, void *p)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        read_lock(&sco_sk_list.lock);
 
-       sk_for_each(sk, node, &sco_sk_list.head) {
+       sk_for_each(sk, &sco_sk_list.head) {
                seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
                           &bt_sk(sk)->dst, sk->sk_state);
        }
index 8117900..b0812c9 100644 (file)
@@ -181,9 +181,9 @@ void br_fdb_cleanup(unsigned long _data)
        spin_lock(&br->hash_lock);
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
-               struct hlist_node *h, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
+               hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
                        unsigned long this_timer;
                        if (f->is_static)
                                continue;
@@ -207,8 +207,8 @@ void br_fdb_flush(struct net_bridge *br)
        spin_lock_bh(&br->hash_lock);
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
-               struct hlist_node *h, *n;
-               hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
+               struct hlist_node *n;
+               hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
                        if (!f->is_static)
                                fdb_delete(br, f);
                }
@@ -266,10 +266,9 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
                                          const unsigned char *addr,
                                          __u16 vid)
 {
-       struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
-       hlist_for_each_entry_rcu(fdb, h,
+       hlist_for_each_entry_rcu(fdb,
                                &br->hash[br_mac_hash(addr, vid)], hlist) {
                if (ether_addr_equal(fdb->addr.addr, addr) &&
                    fdb->vlan_id == vid) {
@@ -315,14 +314,13 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 {
        struct __fdb_entry *fe = buf;
        int i, num = 0;
-       struct hlist_node *h;
        struct net_bridge_fdb_entry *f;
 
        memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
 
        rcu_read_lock();
        for (i = 0; i < BR_HASH_SIZE; i++) {
-               hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+               hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
                        if (num >= maxnum)
                                goto out;
 
@@ -363,10 +361,9 @@ static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
                                             const unsigned char *addr,
                                             __u16 vid)
 {
-       struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
-       hlist_for_each_entry(fdb, h, head, hlist) {
+       hlist_for_each_entry(fdb, head, hlist) {
                if (ether_addr_equal(fdb->addr.addr, addr) &&
                    fdb->vlan_id == vid)
                        return fdb;
@@ -378,10 +375,9 @@ static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
                                                 const unsigned char *addr,
                                                 __u16 vid)
 {
-       struct hlist_node *h;
        struct net_bridge_fdb_entry *fdb;
 
-       hlist_for_each_entry_rcu(fdb, h, head, hlist) {
+       hlist_for_each_entry_rcu(fdb, head, hlist) {
                if (ether_addr_equal(fdb->addr.addr, addr) &&
                    fdb->vlan_id == vid)
                        return fdb;
@@ -593,10 +589,9 @@ int br_fdb_dump(struct sk_buff *skb,
                goto out;
 
        for (i = 0; i < BR_HASH_SIZE; i++) {
-               struct hlist_node *h;
                struct net_bridge_fdb_entry *f;
 
-               hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+               hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
                        if (idx < cb->args[0])
                                goto skip;
 
index 38991e0..9f97b85 100644 (file)
@@ -18,7 +18,6 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
 {
        struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *p;
-       struct hlist_node *n;
        struct nlattr *nest;
 
        if (!br->multicast_router || hlist_empty(&br->router_list))
@@ -28,7 +27,7 @@ static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
        if (nest == NULL)
                return -EMSGSIZE;
 
-       hlist_for_each_entry_rcu(p, n, &br->router_list, rlist) {
+       hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
                if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
                        goto fail;
        }
@@ -61,12 +60,11 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                return -EMSGSIZE;
 
        for (i = 0; i < mdb->max; i++) {
-               struct hlist_node *h;
                struct net_bridge_mdb_entry *mp;
                struct net_bridge_port_group *p, **pp;
                struct net_bridge_port *port;
 
-               hlist_for_each_entry_rcu(mp, h, &mdb->mhash[i], hlist[mdb->ver]) {
+               hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
                        if (idx < s_idx)
                                goto skip;
 
index 7d886b0..10e6fce 100644 (file)
@@ -86,9 +86,8 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
        struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
 {
        struct net_bridge_mdb_entry *mp;
-       struct hlist_node *p;
 
-       hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+       hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
                if (br_ip_equal(&mp->addr, dst))
                        return mp;
        }
@@ -178,13 +177,12 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
                       int elasticity)
 {
        struct net_bridge_mdb_entry *mp;
-       struct hlist_node *p;
        int maxlen;
        int len;
        int i;
 
        for (i = 0; i < old->max; i++)
-               hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
+               hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
                        hlist_add_head(&mp->hlist[new->ver],
                                       &new->mhash[br_ip_hash(new, &mp->addr)]);
 
@@ -194,7 +192,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
        maxlen = 0;
        for (i = 0; i < new->max; i++) {
                len = 0;
-               hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
+               hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
                        len++;
                if (len > maxlen)
                        maxlen = len;
@@ -510,14 +508,13 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
 {
        struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
-       struct hlist_node *p;
        unsigned int count = 0;
        unsigned int max;
        int elasticity;
        int err;
 
        mdb = rcu_dereference_protected(br->mdb, 1);
-       hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+       hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
                count++;
                if (unlikely(br_ip_equal(group, &mp->addr)))
                        return mp;
@@ -882,10 +879,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
 {
        struct net_bridge *br = port->br;
        struct net_bridge_port_group *pg;
-       struct hlist_node *p, *n;
+       struct hlist_node *n;
 
        spin_lock(&br->multicast_lock);
-       hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
+       hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
                br_multicast_del_pg(br, pg);
 
        if (!hlist_unhashed(&port->rlist))
@@ -1025,12 +1022,12 @@ static void br_multicast_add_router(struct net_bridge *br,
                                    struct net_bridge_port *port)
 {
        struct net_bridge_port *p;
-       struct hlist_node *n, *slot = NULL;
+       struct hlist_node *slot = NULL;
 
-       hlist_for_each_entry(p, n, &br->router_list, rlist) {
+       hlist_for_each_entry(p, &br->router_list, rlist) {
                if ((unsigned long) port >= (unsigned long) p)
                        break;
-               slot = n;
+               slot = &p->rlist;
        }
 
        if (slot)
@@ -1653,7 +1650,7 @@ void br_multicast_stop(struct net_bridge *br)
 {
        struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
-       struct hlist_node *p, *n;
+       struct hlist_node *n;
        u32 ver;
        int i;
 
@@ -1670,7 +1667,7 @@ void br_multicast_stop(struct net_bridge *br)
 
        ver = mdb->ver;
        for (i = 0; i < mdb->max; i++) {
-               hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
+               hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
index 1ae1d9c..21760f0 100644 (file)
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        return NULL;
 }
 
-void caif_flow_cb(struct sk_buff *skb)
+static void caif_flow_cb(struct sk_buff *skb)
 {
        struct caif_device_entry *caifd;
        void (*dtor)(struct sk_buff *skb) = NULL;
index 3ebc8cb..ef8ebaa 100644 (file)
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                layr->up->ctrlcmd(layr->up, ctrl, layr->id);
 }
 
-struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
-                                       u8 braddr[ETH_ALEN])
+static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+                                     u8 braddr[ETH_ALEN])
 {
        struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
 
index ddac1ee..c48e522 100644 (file)
@@ -516,7 +516,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
 {
        struct receiver *r = NULL;
        struct hlist_head *rl;
-       struct hlist_node *next;
        struct dev_rcv_lists *d;
 
        if (dev && dev->type != ARPHRD_CAN)
@@ -540,7 +539,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
         * been registered before.
         */
 
-       hlist_for_each_entry_rcu(r, next, rl, list) {
+       hlist_for_each_entry_rcu(r, rl, list) {
                if (r->can_id == can_id && r->mask == mask &&
                    r->func == func && r->data == data)
                        break;
@@ -552,7 +551,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
         * will be NULL, while r will point to the last item of the list.
         */
 
-       if (!next) {
+       if (!r) {
                printk(KERN_ERR "BUG: receive list entry not found for "
                       "dev %s, id %03X, mask %03X\n",
                       DNAME(dev), can_id, mask);
@@ -590,7 +589,6 @@ static inline void deliver(struct sk_buff *skb, struct receiver *r)
 static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
 {
        struct receiver *r;
-       struct hlist_node *n;
        int matches = 0;
        struct can_frame *cf = (struct can_frame *)skb->data;
        canid_t can_id = cf->can_id;
@@ -600,7 +598,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
 
        if (can_id & CAN_ERR_FLAG) {
                /* check for error message frame entries only */
-               hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
+               hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
                        if (can_id & r->mask) {
                                deliver(skb, r);
                                matches++;
@@ -610,13 +608,13 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
        }
 
        /* check for unfiltered entries */
-       hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
+       hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
                deliver(skb, r);
                matches++;
        }
 
        /* check for can_id/mask entries */
-       hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
+       hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
                if ((can_id & r->mask) == r->can_id) {
                        deliver(skb, r);
                        matches++;
@@ -624,7 +622,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
        }
 
        /* check for inverted can_id/mask entries */
-       hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
+       hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
                if ((can_id & r->mask) != r->can_id) {
                        deliver(skb, r);
                        matches++;
@@ -636,7 +634,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
                return matches;
 
        if (can_id & CAN_EFF_FLAG) {
-               hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
+               hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
                        if (r->can_id == can_id) {
                                deliver(skb, r);
                                matches++;
@@ -644,7 +642,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
                }
        } else {
                can_id &= CAN_SFF_MASK;
-               hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
+               hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
                        deliver(skb, r);
                        matches++;
                }
index c185fcd..2d117dc 100644 (file)
@@ -457,11 +457,11 @@ static int cgw_notifier(struct notifier_block *nb,
        if (msg == NETDEV_UNREGISTER) {
 
                struct cgw_job *gwj = NULL;
-               struct hlist_node *n, *nx;
+               struct hlist_node *nx;
 
                ASSERT_RTNL();
 
-               hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+               hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
 
                        if (gwj->src.dev == dev || gwj->dst.dev == dev) {
                                hlist_del(&gwj->list);
@@ -575,12 +575,11 @@ cancel:
 static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct cgw_job *gwj = NULL;
-       struct hlist_node *n;
        int idx = 0;
        int s_idx = cb->args[0];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
+       hlist_for_each_entry_rcu(gwj, &cgw_list, list) {
                if (idx < s_idx)
                        goto cont;
 
@@ -858,11 +857,11 @@ out:
 static void cgw_remove_all_jobs(void)
 {
        struct cgw_job *gwj = NULL;
-       struct hlist_node *n, *nx;
+       struct hlist_node *nx;
 
        ASSERT_RTNL();
 
-       hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+       hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
                hlist_del(&gwj->list);
                cgw_unregister_filter(gwj);
                kfree(gwj);
@@ -872,7 +871,7 @@ static void cgw_remove_all_jobs(void)
 static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 {
        struct cgw_job *gwj = NULL;
-       struct hlist_node *n, *nx;
+       struct hlist_node *nx;
        struct rtcanmsg *r;
        struct cf_mod mod;
        struct can_can_gw ccgw;
@@ -907,7 +906,7 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
        ASSERT_RTNL();
 
        /* remove only the first matching entry */
-       hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+       hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
 
                if (gwj->flags != r->flags)
                        continue;
index 4973358..1ab8c88 100644 (file)
@@ -195,9 +195,8 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
                              struct net_device *dev)
 {
        struct receiver *r;
-       struct hlist_node *n;
 
-       hlist_for_each_entry_rcu(r, n, rx_list, list) {
+       hlist_for_each_entry_rcu(r, rx_list, list) {
                char *fmt = (r->can_id & CAN_EFF_FLAG)?
                        "   %-5s  %08x  %08x  %pK  %pK  %8ld  %s\n" :
                        "   %-5s     %03x    %08x  %pK  %pK  %8ld  %s\n";
index 1deb29a..e65e6e4 100644 (file)
 #include "crypto.h"
 
 
+/*
+ * Module compatibility interface.  For now it doesn't do anything,
+ * but its existence signals a certain level of functionality.
+ *
+ * The data buffer is used to pass information both to and from
+ * libceph.  The return value indicates whether libceph determines
+ * it is compatible with the caller (from another kernel module),
+ * given the provided data.
+ *
+ * The data pointer can be null.
+ */
+bool libceph_compatible(void *data)
+{
+       return true;
+}
+EXPORT_SYMBOL(libceph_compatible);
 
 /*
  * find filename portion of a path (/foo/bar/baz -> baz)
@@ -590,10 +606,8 @@ static int __init init_ceph_lib(void)
        if (ret < 0)
                goto out_crypto;
 
-       pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
-               CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
-               CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
-               CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
+       pr_info("loaded (mon/osd proto %d/%d)\n",
+               CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL);
 
        return 0;
 
index 3fbda04..1348df9 100644 (file)
@@ -21,9 +21,15 @@ const char *ceph_osd_op_name(int op)
        switch (op) {
        case CEPH_OSD_OP_READ: return "read";
        case CEPH_OSD_OP_STAT: return "stat";
+       case CEPH_OSD_OP_MAPEXT: return "mapext";
+       case CEPH_OSD_OP_SPARSE_READ: return "sparse-read";
+       case CEPH_OSD_OP_NOTIFY: return "notify";
+       case CEPH_OSD_OP_NOTIFY_ACK: return "notify-ack";
+       case CEPH_OSD_OP_ASSERT_VER: return "assert-version";
 
        case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
 
+       case CEPH_OSD_OP_CREATE: return "create";
        case CEPH_OSD_OP_WRITE: return "write";
        case CEPH_OSD_OP_DELETE: return "delete";
        case CEPH_OSD_OP_TRUNCATE: return "truncate";
@@ -39,6 +45,11 @@ const char *ceph_osd_op_name(int op)
        case CEPH_OSD_OP_TMAPUP: return "tmapup";
        case CEPH_OSD_OP_TMAPGET: return "tmapget";
        case CEPH_OSD_OP_TMAPPUT: return "tmapput";
+       case CEPH_OSD_OP_WATCH: return "watch";
+
+       case CEPH_OSD_OP_CLONERANGE: return "clonerange";
+       case CEPH_OSD_OP_ASSERT_SRC_VERSION: return "assert-src-version";
+       case CEPH_OSD_OP_SRC_CMPXATTR: return "src-cmpxattr";
 
        case CEPH_OSD_OP_GETXATTR: return "getxattr";
        case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
@@ -53,6 +64,10 @@ const char *ceph_osd_op_name(int op)
        case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
        case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
        case CEPH_OSD_OP_SCRUB: return "scrub";
+       case CEPH_OSD_OP_SCRUB_RESERVE: return "scrub-reserve";
+       case CEPH_OSD_OP_SCRUB_UNRESERVE: return "scrub-unreserve";
+       case CEPH_OSD_OP_SCRUB_STOP: return "scrub-stop";
+       case CEPH_OSD_OP_SCRUB_MAP: return "scrub-map";
 
        case CEPH_OSD_OP_WRLOCK: return "wrlock";
        case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
@@ -64,10 +79,34 @@ const char *ceph_osd_op_name(int op)
        case CEPH_OSD_OP_CALL: return "call";
 
        case CEPH_OSD_OP_PGLS: return "pgls";
+       case CEPH_OSD_OP_PGLS_FILTER: return "pgls-filter";
+       case CEPH_OSD_OP_OMAPGETKEYS: return "omap-get-keys";
+       case CEPH_OSD_OP_OMAPGETVALS: return "omap-get-vals";
+       case CEPH_OSD_OP_OMAPGETHEADER: return "omap-get-header";
+       case CEPH_OSD_OP_OMAPGETVALSBYKEYS: return "omap-get-vals-by-keys";
+       case CEPH_OSD_OP_OMAPSETVALS: return "omap-set-vals";
+       case CEPH_OSD_OP_OMAPSETHEADER: return "omap-set-header";
+       case CEPH_OSD_OP_OMAPCLEAR: return "omap-clear";
+       case CEPH_OSD_OP_OMAPRMKEYS: return "omap-rm-keys";
        }
        return "???";
 }
 
+const char *ceph_osd_state_name(int s)
+{
+       switch (s) {
+       case CEPH_OSD_EXISTS:
+               return "exists";
+       case CEPH_OSD_UP:
+               return "up";
+       case CEPH_OSD_AUTOOUT:
+               return "autoout";
+       case CEPH_OSD_NEW:
+               return "new";
+       default:
+               return "???";
+       }
+}
 
 const char *ceph_pool_op_name(int op)
 {
index 35fce75..cbd06a9 100644 (file)
@@ -287,6 +287,7 @@ static int is_out(const struct crush_map *map, const __u32 *weight, int item, in
  * @outpos: our position in that vector
  * @firstn: true if choosing "first n" items, false if choosing "indep"
  * @recurse_to_leaf: true if we want one device under each item of given type
+ * @descend_once: true if we should only try one descent before giving up
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  */
 static int crush_choose(const struct crush_map *map,
@@ -295,7 +296,7 @@ static int crush_choose(const struct crush_map *map,
                        int x, int numrep, int type,
                        int *out, int outpos,
                        int firstn, int recurse_to_leaf,
-                       int *out2)
+                       int descend_once, int *out2)
 {
        int rep;
        unsigned int ftotal, flocal;
@@ -391,7 +392,7 @@ static int crush_choose(const struct crush_map *map,
                                }
 
                                reject = 0;
-                               if (recurse_to_leaf) {
+                               if (!collide && recurse_to_leaf) {
                                        if (item < 0) {
                                                if (crush_choose(map,
                                                         map->buckets[-1-item],
@@ -399,6 +400,7 @@ static int crush_choose(const struct crush_map *map,
                                                         x, outpos+1, 0,
                                                         out2, outpos,
                                                         firstn, 0,
+                                                        map->chooseleaf_descend_once,
                                                         NULL) <= outpos)
                                                        /* didn't get leaf */
                                                        reject = 1;
@@ -422,7 +424,10 @@ reject:
                                        ftotal++;
                                        flocal++;
 
-                                       if (collide && flocal <= map->choose_local_tries)
+                                       if (reject && descend_once)
+                                               /* let outer call try again */
+                                               skip_rep = 1;
+                                       else if (collide && flocal <= map->choose_local_tries)
                                                /* retry locally a few times */
                                                retry_bucket = 1;
                                        else if (map->choose_local_fallback_tries > 0 &&
@@ -485,6 +490,7 @@ int crush_do_rule(const struct crush_map *map,
        int i, j;
        int numrep;
        int firstn;
+       const int descend_once = 0;
 
        if ((__u32)ruleno >= map->max_rules) {
                dprintk(" bad ruleno %d\n", ruleno);
@@ -544,7 +550,8 @@ int crush_do_rule(const struct crush_map *map,
                                                      curstep->arg2,
                                                      o+osize, j,
                                                      firstn,
-                                                     recurse_to_leaf, c+osize);
+                                                     recurse_to_leaf,
+                                                     descend_once, c+osize);
                        }
 
                        if (recurse_to_leaf)
index af14cb4..6e7a236 100644 (file)
@@ -423,7 +423,8 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
        }
 }
 
-int ceph_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+static int ceph_key_instantiate(struct key *key,
+                               struct key_preparsed_payload *prep)
 {
        struct ceph_crypto_key *ckey;
        size_t datalen = prep->datalen;
@@ -458,12 +459,12 @@ err:
        return ret;
 }
 
-int ceph_key_match(const struct key *key, const void *description)
+static int ceph_key_match(const struct key *key, const void *description)
 {
        return strcmp(key->description, description) == 0;
 }
 
-void ceph_key_destroy(struct key *key) {
+static void ceph_key_destroy(struct key *key) {
        struct ceph_crypto_key *ckey = key->payload.data;
 
        ceph_crypto_key_destroy(ckey);
index 38b5dc1..00d051f 100644 (file)
@@ -66,9 +66,9 @@ static int osdmap_show(struct seq_file *s, void *p)
        for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
                struct ceph_pg_pool_info *pool =
                        rb_entry(n, struct ceph_pg_pool_info, node);
-               seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
-                          pool->id, pool->v.pg_num, pool->pg_num_mask,
-                          pool->v.lpg_num, pool->lpg_num_mask);
+               seq_printf(s, "pg_pool %llu pg_num %d / %d\n",
+                          (unsigned long long)pool->id, pool->pg_num,
+                          pool->pg_num_mask);
        }
        for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
                struct ceph_entity_addr *addr =
@@ -123,26 +123,16 @@ static int osdc_show(struct seq_file *s, void *pp)
        mutex_lock(&osdc->request_mutex);
        for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
                struct ceph_osd_request *req;
-               struct ceph_osd_request_head *head;
-               struct ceph_osd_op *op;
-               int num_ops;
-               int opcode, olen;
+               int opcode;
                int i;
 
                req = rb_entry(p, struct ceph_osd_request, r_node);
 
-               seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
+               seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
                           req->r_osd ? req->r_osd->o_osd : -1,
-                          le32_to_cpu(req->r_pgid.pool),
-                          le16_to_cpu(req->r_pgid.ps));
+                          req->r_pgid.pool, req->r_pgid.seed);
 
-               head = req->r_request->front.iov_base;
-               op = (void *)(head + 1);
-
-               num_ops = le16_to_cpu(head->num_ops);
-               olen = le32_to_cpu(head->object_len);
-               seq_printf(s, "%.*s", olen,
-                          (const char *)(head->ops + num_ops));
+               seq_printf(s, "%.*s", req->r_oid_len, req->r_oid);
 
                if (req->r_reassert_version.epoch)
                        seq_printf(s, "\t%u'%llu",
@@ -151,10 +141,9 @@ static int osdc_show(struct seq_file *s, void *pp)
                else
                        seq_printf(s, "\t");
 
-               for (i = 0; i < num_ops; i++) {
-                       opcode = le16_to_cpu(op->op);
+               for (i = 0; i < req->r_num_ops; i++) {
+                       opcode = le16_to_cpu(req->r_request_ops[i].op);
                        seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
-                       op++;
                }
 
                seq_printf(s, "\n");
index 5ccf87e..2c0669f 100644 (file)
@@ -9,8 +9,9 @@
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
+#ifdef CONFIG_BLOCK
 #include <linux/bio.h>
-#include <linux/blkdev.h>
+#endif /* CONFIG_BLOCK */
 #include <linux/dns_resolver.h>
 #include <net/tcp.h>
 
 #define CON_FLAG_SOCK_CLOSED      3  /* socket state changed to closed */
 #define CON_FLAG_BACKOFF           4  /* need to retry queuing delayed work */
 
+static bool con_flag_valid(unsigned long con_flag)
+{
+       switch (con_flag) {
+       case CON_FLAG_LOSSYTX:
+       case CON_FLAG_KEEPALIVE_PENDING:
+       case CON_FLAG_WRITE_PENDING:
+       case CON_FLAG_SOCK_CLOSED:
+       case CON_FLAG_BACKOFF:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
+{
+       BUG_ON(!con_flag_valid(con_flag));
+
+       clear_bit(con_flag, &con->flags);
+}
+
+static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
+{
+       BUG_ON(!con_flag_valid(con_flag));
+
+       set_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
+{
+       BUG_ON(!con_flag_valid(con_flag));
+
+       return test_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test_and_clear(struct ceph_connection *con,
+                                       unsigned long con_flag)
+{
+       BUG_ON(!con_flag_valid(con_flag));
+
+       return test_and_clear_bit(con_flag, &con->flags);
+}
+
+static bool con_flag_test_and_set(struct ceph_connection *con,
+                                       unsigned long con_flag)
+{
+       BUG_ON(!con_flag_valid(con_flag));
+
+       return test_and_set_bit(con_flag, &con->flags);
+}
+
 /* static tag bytes (protocol control messages) */
 static char tag_msg = CEPH_MSGR_TAG_MSG;
 static char tag_ack = CEPH_MSGR_TAG_ACK;
@@ -114,7 +166,7 @@ static struct lock_class_key socket_class;
 
 static void queue_con(struct ceph_connection *con);
 static void con_work(struct work_struct *);
-static void ceph_fault(struct ceph_connection *con);
+static void con_fault(struct ceph_connection *con);
 
 /*
  * Nicely render a sockaddr as a string.  An array of formatted
@@ -171,7 +223,7 @@ static void encode_my_addr(struct ceph_messenger *msgr)
  */
 static struct workqueue_struct *ceph_msgr_wq;
 
-void _ceph_msgr_exit(void)
+static void _ceph_msgr_exit(void)
 {
        if (ceph_msgr_wq) {
                destroy_workqueue(ceph_msgr_wq);
@@ -308,7 +360,7 @@ static void ceph_sock_write_space(struct sock *sk)
         * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
         * and net/core/stream.c:sk_stream_write_space().
         */
-       if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
+       if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
                if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
                        dout("%s %p queueing write work\n", __func__, con);
                        clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -333,7 +385,7 @@ static void ceph_sock_state_change(struct sock *sk)
        case TCP_CLOSE_WAIT:
                dout("%s TCP_CLOSE_WAIT\n", __func__);
                con_sock_state_closing(con);
-               set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+               con_flag_set(con, CON_FLAG_SOCK_CLOSED);
                queue_con(con);
                break;
        case TCP_ESTABLISHED:
@@ -474,7 +526,7 @@ static int con_close_socket(struct ceph_connection *con)
         * received a socket close event before we had the chance to
         * shut the socket down.
         */
-       clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+       con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
 
        con_sock_state_closed(con);
        return rc;
@@ -538,11 +590,10 @@ void ceph_con_close(struct ceph_connection *con)
             ceph_pr_addr(&con->peer_addr.in_addr));
        con->state = CON_STATE_CLOSED;
 
-       clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
-       clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
-       clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
-       clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
-       clear_bit(CON_FLAG_BACKOFF, &con->flags);
+       con_flag_clear(con, CON_FLAG_LOSSYTX);  /* so we retry next connect */
+       con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
+       con_flag_clear(con, CON_FLAG_WRITE_PENDING);
+       con_flag_clear(con, CON_FLAG_BACKOFF);
 
        reset_connection(con);
        con->peer_global_seq = 0;
@@ -798,7 +849,7 @@ static void prepare_write_message(struct ceph_connection *con)
                /* no, queue up footer too and be done */
                prepare_write_message_footer(con);
 
-       set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_set(con, CON_FLAG_WRITE_PENDING);
 }
 
 /*
@@ -819,7 +870,7 @@ static void prepare_write_ack(struct ceph_connection *con)
                                &con->out_temp_ack);
 
        con->out_more = 1;  /* more will follow.. eventually.. */
-       set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_set(con, CON_FLAG_WRITE_PENDING);
 }
 
 /*
@@ -830,7 +881,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
        dout("prepare_write_keepalive %p\n", con);
        con_out_kvec_reset(con);
        con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
-       set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_set(con, CON_FLAG_WRITE_PENDING);
 }
 
 /*
@@ -873,7 +924,7 @@ static void prepare_write_banner(struct ceph_connection *con)
                                        &con->msgr->my_enc_addr);
 
        con->out_more = 0;
-       set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_set(con, CON_FLAG_WRITE_PENDING);
 }
 
 static int prepare_write_connect(struct ceph_connection *con)
@@ -923,7 +974,7 @@ static int prepare_write_connect(struct ceph_connection *con)
                                        auth->authorizer_buf);
 
        con->out_more = 0;
-       set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_set(con, CON_FLAG_WRITE_PENDING);
 
        return 0;
 }
@@ -1643,7 +1694,7 @@ static int process_connect(struct ceph_connection *con)
                        le32_to_cpu(con->in_reply.connect_seq));
 
                if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
-                       set_bit(CON_FLAG_LOSSYTX, &con->flags);
+                       con_flag_set(con, CON_FLAG_LOSSYTX);
 
                con->delay = 0;      /* reset backoff memory */
 
@@ -2080,15 +2131,14 @@ do_next:
                        prepare_write_ack(con);
                        goto more;
                }
-               if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
-                                      &con->flags)) {
+               if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
                        prepare_write_keepalive(con);
                        goto more;
                }
        }
 
        /* Nothing to do! */
-       clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+       con_flag_clear(con, CON_FLAG_WRITE_PENDING);
        dout("try_write nothing else to write.\n");
        ret = 0;
 out:
@@ -2268,7 +2318,7 @@ static void queue_con(struct ceph_connection *con)
 
 static bool con_sock_closed(struct ceph_connection *con)
 {
-       if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
+       if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
                return false;
 
 #define CASE(x)                                                                \
@@ -2295,6 +2345,41 @@ static bool con_sock_closed(struct ceph_connection *con)
        return true;
 }
 
+static bool con_backoff(struct ceph_connection *con)
+{
+       int ret;
+
+       if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
+               return false;
+
+       ret = queue_con_delay(con, round_jiffies_relative(con->delay));
+       if (ret) {
+               dout("%s: con %p FAILED to back off %lu\n", __func__,
+                       con, con->delay);
+               BUG_ON(ret == -ENOENT);
+               con_flag_set(con, CON_FLAG_BACKOFF);
+       }
+
+       return true;
+}
+
+/* Finish fault handling; con->mutex must *not* be held here */
+
+static void con_fault_finish(struct ceph_connection *con)
+{
+       /*
+        * in case we faulted due to authentication, invalidate our
+        * current tickets so that we can get new ones.
+        */
+       if (con->auth_retry && con->ops->invalidate_authorizer) {
+               dout("calling invalidate_authorizer()\n");
+               con->ops->invalidate_authorizer(con);
+       }
+
+       if (con->ops->fault)
+               con->ops->fault(con);
+}
+
 /*
  * Do some work on a connection.  Drop a connection ref when we're done.
  */
@@ -2302,73 +2387,68 @@ static void con_work(struct work_struct *work)
 {
        struct ceph_connection *con = container_of(work, struct ceph_connection,
                                                   work.work);
-       int ret;
+       bool fault;
 
        mutex_lock(&con->mutex);
-restart:
-       if (con_sock_closed(con))
-               goto fault;
+       while (true) {
+               int ret;
 
-       if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
-               dout("con_work %p backing off\n", con);
-               ret = queue_con_delay(con, round_jiffies_relative(con->delay));
-               if (ret) {
-                       dout("con_work %p FAILED to back off %lu\n", con,
-                            con->delay);
-                       BUG_ON(ret == -ENOENT);
-                       set_bit(CON_FLAG_BACKOFF, &con->flags);
+               if ((fault = con_sock_closed(con))) {
+                       dout("%s: con %p SOCK_CLOSED\n", __func__, con);
+                       break;
+               }
+               if (con_backoff(con)) {
+                       dout("%s: con %p BACKOFF\n", __func__, con);
+                       break;
+               }
+               if (con->state == CON_STATE_STANDBY) {
+                       dout("%s: con %p STANDBY\n", __func__, con);
+                       break;
+               }
+               if (con->state == CON_STATE_CLOSED) {
+                       dout("%s: con %p CLOSED\n", __func__, con);
+                       BUG_ON(con->sock);
+                       break;
+               }
+               if (con->state == CON_STATE_PREOPEN) {
+                       dout("%s: con %p PREOPEN\n", __func__, con);
+                       BUG_ON(con->sock);
                }
-               goto done;
-       }
 
-       if (con->state == CON_STATE_STANDBY) {
-               dout("con_work %p STANDBY\n", con);
-               goto done;
-       }
-       if (con->state == CON_STATE_CLOSED) {
-               dout("con_work %p CLOSED\n", con);
-               BUG_ON(con->sock);
-               goto done;
-       }
-       if (con->state == CON_STATE_PREOPEN) {
-               dout("con_work OPENING\n");
-               BUG_ON(con->sock);
-       }
+               ret = try_read(con);
+               if (ret < 0) {
+                       if (ret == -EAGAIN)
+                               continue;
+                       con->error_msg = "socket error on read";
+                       fault = true;
+                       break;
+               }
 
-       ret = try_read(con);
-       if (ret == -EAGAIN)
-               goto restart;
-       if (ret < 0) {
-               con->error_msg = "socket error on read";
-               goto fault;
-       }
+               ret = try_write(con);
+               if (ret < 0) {
+                       if (ret == -EAGAIN)
+                               continue;
+                       con->error_msg = "socket error on write";
+                       fault = true;
+               }
 
-       ret = try_write(con);
-       if (ret == -EAGAIN)
-               goto restart;
-       if (ret < 0) {
-               con->error_msg = "socket error on write";
-               goto fault;
+               break;  /* If we make it to here, we're done */
        }
-
-done:
+       if (fault)
+               con_fault(con);
        mutex_unlock(&con->mutex);
-done_unlocked:
-       con->ops->put(con);
-       return;
 
-fault:
-       ceph_fault(con);     /* error/fault path */
-       goto done_unlocked;
-}
+       if (fault)
+               con_fault_finish(con);
 
+       con->ops->put(con);
+}
 
 /*
  * Generic error/fault handler.  A retry mechanism is used with
  * exponential backoff
  */
-static void ceph_fault(struct ceph_connection *con)
-       __releases(con->mutex)
+static void con_fault(struct ceph_connection *con)
 {
        pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
               ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
@@ -2381,10 +2461,10 @@ static void ceph_fault(struct ceph_connection *con)
 
        con_close_socket(con);
 
-       if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
+       if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
                dout("fault on LOSSYTX channel, marking CLOSED\n");
                con->state = CON_STATE_CLOSED;
-               goto out_unlock;
+               return;
        }
 
        if (con->in_msg) {
@@ -2401,9 +2481,9 @@ static void ceph_fault(struct ceph_connection *con)
        /* If there are no messages queued or keepalive pending, place
         * the connection in a STANDBY state */
        if (list_empty(&con->out_queue) &&
-           !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
+           !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
                dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
-               clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+               con_flag_clear(con, CON_FLAG_WRITE_PENDING);
                con->state = CON_STATE_STANDBY;
        } else {
                /* retry after a delay. */
@@ -2412,23 +2492,9 @@ static void ceph_fault(struct ceph_connection *con)
                        con->delay = BASE_DELAY_INTERVAL;
                else if (con->delay < MAX_DELAY_INTERVAL)
                        con->delay *= 2;
-               set_bit(CON_FLAG_BACKOFF, &con->flags);
+               con_flag_set(con, CON_FLAG_BACKOFF);
                queue_con(con);
        }
-
-out_unlock:
-       mutex_unlock(&con->mutex);
-       /*
-        * in case we faulted due to authentication, invalidate our
-        * current tickets so that we can get new ones.
-        */
-       if (con->auth_retry && con->ops->invalidate_authorizer) {
-               dout("calling invalidate_authorizer()\n");
-               con->ops->invalidate_authorizer(con);
-       }
-
-       if (con->ops->fault)
-               con->ops->fault(con);
 }
 
 
@@ -2469,8 +2535,8 @@ static void clear_standby(struct ceph_connection *con)
                dout("clear_standby %p and ++connect_seq\n", con);
                con->state = CON_STATE_PREOPEN;
                con->connect_seq++;
-               WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
-               WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
+               WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
+               WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
        }
 }
 
@@ -2511,7 +2577,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
 
        /* if there wasn't anything waiting to send before, queue
         * new work */
-       if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
+       if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
                queue_con(con);
 }
 EXPORT_SYMBOL(ceph_con_send);
@@ -2600,8 +2666,8 @@ void ceph_con_keepalive(struct ceph_connection *con)
        mutex_lock(&con->mutex);
        clear_standby(con);
        mutex_unlock(&con->mutex);
-       if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
-           test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
+       if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
+           con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
                queue_con(con);
 }
 EXPORT_SYMBOL(ceph_con_keepalive);
@@ -2651,9 +2717,11 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
        m->page_alignment = 0;
        m->pages = NULL;
        m->pagelist = NULL;
+#ifdef CONFIG_BLOCK
        m->bio = NULL;
        m->bio_iter = NULL;
        m->bio_seg = 0;
+#endif /* CONFIG_BLOCK */
        m->trail = NULL;
 
        /* front */
index 812eb3b..aef5b10 100644 (file)
@@ -697,7 +697,7 @@ int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
                            u32 pool, u64 snapid)
 {
        return do_poolop(monc,  POOL_OP_CREATE_UNMANAGED_SNAP,
-                                  pool, snapid, 0, 0);
+                                  pool, snapid, NULL, 0);
 
 }
 
index eb9a444..d730dd4 100644 (file)
@@ -23,7 +23,7 @@
 
 static const struct ceph_connection_operations osd_con_ops;
 
-static void send_queued(struct ceph_osd_client *osdc);
+static void __send_queued(struct ceph_osd_client *osdc);
 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
 static void __register_request(struct ceph_osd_client *osdc,
                               struct ceph_osd_request *req);
@@ -32,64 +32,12 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
 static void __send_request(struct ceph_osd_client *osdc,
                           struct ceph_osd_request *req);
 
-static int op_needs_trail(int op)
-{
-       switch (op) {
-       case CEPH_OSD_OP_GETXATTR:
-       case CEPH_OSD_OP_SETXATTR:
-       case CEPH_OSD_OP_CMPXATTR:
-       case CEPH_OSD_OP_CALL:
-       case CEPH_OSD_OP_NOTIFY:
-               return 1;
-       default:
-               return 0;
-       }
-}
-
 static int op_has_extent(int op)
 {
        return (op == CEPH_OSD_OP_READ ||
                op == CEPH_OSD_OP_WRITE);
 }
 
-int ceph_calc_raw_layout(struct ceph_osd_client *osdc,
-                       struct ceph_file_layout *layout,
-                       u64 snapid,
-                       u64 off, u64 *plen, u64 *bno,
-                       struct ceph_osd_request *req,
-                       struct ceph_osd_req_op *op)
-{
-       struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
-       u64 orig_len = *plen;
-       u64 objoff, objlen;    /* extent in object */
-       int r;
-
-       reqhead->snapid = cpu_to_le64(snapid);
-
-       /* object extent? */
-       r = ceph_calc_file_object_mapping(layout, off, plen, bno,
-                                         &objoff, &objlen);
-       if (r < 0)
-               return r;
-       if (*plen < orig_len)
-               dout(" skipping last %llu, final file extent %llu~%llu\n",
-                    orig_len - *plen, off, *plen);
-
-       if (op_has_extent(op->op)) {
-               op->extent.offset = objoff;
-               op->extent.length = objlen;
-       }
-       req->r_num_pages = calc_pages_for(off, *plen);
-       req->r_page_alignment = off & ~PAGE_MASK;
-       if (op->op == CEPH_OSD_OP_WRITE)
-               op->payload_len = *plen;
-
-       dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
-            *bno, objoff, objlen, req->r_num_pages);
-       return 0;
-}
-EXPORT_SYMBOL(ceph_calc_raw_layout);
-
 /*
  * Implement client access to distributed object storage cluster.
  *
@@ -115,20 +63,48 @@ EXPORT_SYMBOL(ceph_calc_raw_layout);
  *
  * fill osd op in request message.
  */
-static int calc_layout(struct ceph_osd_client *osdc,
-                      struct ceph_vino vino,
+static int calc_layout(struct ceph_vino vino,
                       struct ceph_file_layout *layout,
                       u64 off, u64 *plen,
                       struct ceph_osd_request *req,
                       struct ceph_osd_req_op *op)
 {
-       u64 bno;
+       u64 orig_len = *plen;
+       u64 bno = 0;
+       u64 objoff = 0;
+       u64 objlen = 0;
        int r;
 
-       r = ceph_calc_raw_layout(osdc, layout, vino.snap, off,
-                                plen, &bno, req, op);
+       /* object extent? */
+       r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno,
+                                         &objoff, &objlen);
        if (r < 0)
                return r;
+       if (objlen < orig_len) {
+               *plen = objlen;
+               dout(" skipping last %llu, final file extent %llu~%llu\n",
+                    orig_len - *plen, off, *plen);
+       }
+
+       if (op_has_extent(op->op)) {
+               u32 osize = le32_to_cpu(layout->fl_object_size);
+               op->extent.offset = objoff;
+               op->extent.length = objlen;
+               if (op->extent.truncate_size <= off - objoff) {
+                       op->extent.truncate_size = 0;
+               } else {
+                       op->extent.truncate_size -= off - objoff;
+                       if (op->extent.truncate_size > osize)
+                               op->extent.truncate_size = osize;
+               }
+       }
+       req->r_num_pages = calc_pages_for(off, *plen);
+       req->r_page_alignment = off & ~PAGE_MASK;
+       if (op->op == CEPH_OSD_OP_WRITE)
+               op->payload_len = *plen;
+
+       dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
+            bno, objoff, objlen, req->r_num_pages);
 
        snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
        req->r_oid_len = strlen(req->r_oid);
@@ -148,25 +124,19 @@ void ceph_osdc_release_request(struct kref *kref)
        if (req->r_request)
                ceph_msg_put(req->r_request);
        if (req->r_con_filling_msg) {
-               dout("%s revoking pages %p from con %p\n", __func__,
-                    req->r_pages, req->r_con_filling_msg);
+               dout("%s revoking msg %p from con %p\n", __func__,
+                    req->r_reply, req->r_con_filling_msg);
                ceph_msg_revoke_incoming(req->r_reply);
                req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
+               req->r_con_filling_msg = NULL;
        }
        if (req->r_reply)
                ceph_msg_put(req->r_reply);
        if (req->r_own_pages)
                ceph_release_page_vector(req->r_pages,
                                         req->r_num_pages);
-#ifdef CONFIG_BLOCK
-       if (req->r_bio)
-               bio_put(req->r_bio);
-#endif
        ceph_put_snap_context(req->r_snapc);
-       if (req->r_trail) {
-               ceph_pagelist_release(req->r_trail);
-               kfree(req->r_trail);
-       }
+       ceph_pagelist_release(&req->r_trail);
        if (req->r_mempool)
                mempool_free(req, req->r_osdc->req_mempool);
        else
@@ -174,37 +144,25 @@ void ceph_osdc_release_request(struct kref *kref)
 }
 EXPORT_SYMBOL(ceph_osdc_release_request);
 
-static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
-{
-       int i = 0;
-
-       if (needs_trail)
-               *needs_trail = 0;
-       while (ops[i].op) {
-               if (needs_trail && op_needs_trail(ops[i].op))
-                       *needs_trail = 1;
-               i++;
-       }
-
-       return i;
-}
-
 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
-                                              int flags,
                                               struct ceph_snap_context *snapc,
-                                              struct ceph_osd_req_op *ops,
+                                              unsigned int num_ops,
                                               bool use_mempool,
-                                              gfp_t gfp_flags,
-                                              struct page **pages,
-                                              struct bio *bio)
+                                              gfp_t gfp_flags)
 {
        struct ceph_osd_request *req;
        struct ceph_msg *msg;
-       int needs_trail;
-       int num_op = get_num_ops(ops, &needs_trail);
-       size_t msg_size = sizeof(struct ceph_osd_request_head);
-
-       msg_size += num_op*sizeof(struct ceph_osd_op);
+       size_t msg_size;
+
+       msg_size = 4 + 4 + 8 + 8 + 4+8;
+       msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
+       msg_size += 1 + 8 + 4 + 4;     /* pg_t */
+       msg_size += 4 + MAX_OBJ_NAME_SIZE;
+       msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
+       msg_size += 8;  /* snapid */
+       msg_size += 8;  /* snap_seq */
+       msg_size += 8 * (snapc ? snapc->num_snaps : 0);  /* snaps */
+       msg_size += 4;
 
        if (use_mempool) {
                req = mempool_alloc(osdc->req_mempool, gfp_flags);
@@ -228,10 +186,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
        INIT_LIST_HEAD(&req->r_req_lru_item);
        INIT_LIST_HEAD(&req->r_osd_item);
 
-       req->r_flags = flags;
-
-       WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
-
        /* create reply message */
        if (use_mempool)
                msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
@@ -244,20 +198,9 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
        }
        req->r_reply = msg;
 
-       /* allocate space for the trailing data */
-       if (needs_trail) {
-               req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
-               if (!req->r_trail) {
-                       ceph_osdc_put_request(req);
-                       return NULL;
-               }
-               ceph_pagelist_init(req->r_trail);
-       }
+       ceph_pagelist_init(&req->r_trail);
 
        /* create request message; allow space for oid */
-       msg_size += MAX_OBJ_NAME_SIZE;
-       if (snapc)
-               msg_size += sizeof(u64) * snapc->num_snaps;
        if (use_mempool)
                msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
        else
@@ -270,13 +213,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
        memset(msg->front.iov_base, 0, msg->front.iov_len);
 
        req->r_request = msg;
-       req->r_pages = pages;
-#ifdef CONFIG_BLOCK
-       if (bio) {
-               req->r_bio = bio;
-               bio_get(req->r_bio);
-       }
-#endif
 
        return req;
 }
@@ -289,6 +225,8 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
        dst->op = cpu_to_le16(src->op);
 
        switch (src->op) {
+       case CEPH_OSD_OP_STAT:
+               break;
        case CEPH_OSD_OP_READ:
        case CEPH_OSD_OP_WRITE:
                dst->extent.offset =
@@ -300,52 +238,20 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
                dst->extent.truncate_seq =
                        cpu_to_le32(src->extent.truncate_seq);
                break;
-
-       case CEPH_OSD_OP_GETXATTR:
-       case CEPH_OSD_OP_SETXATTR:
-       case CEPH_OSD_OP_CMPXATTR:
-               BUG_ON(!req->r_trail);
-
-               dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
-               dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
-               dst->xattr.cmp_op = src->xattr.cmp_op;
-               dst->xattr.cmp_mode = src->xattr.cmp_mode;
-               ceph_pagelist_append(req->r_trail, src->xattr.name,
-                                    src->xattr.name_len);
-               ceph_pagelist_append(req->r_trail, src->xattr.val,
-                                    src->xattr.value_len);
-               break;
        case CEPH_OSD_OP_CALL:
-               BUG_ON(!req->r_trail);
-
                dst->cls.class_len = src->cls.class_len;
                dst->cls.method_len = src->cls.method_len;
                dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
 
-               ceph_pagelist_append(req->r_trail, src->cls.class_name,
+               ceph_pagelist_append(&req->r_trail, src->cls.class_name,
                                     src->cls.class_len);
-               ceph_pagelist_append(req->r_trail, src->cls.method_name,
+               ceph_pagelist_append(&req->r_trail, src->cls.method_name,
                                     src->cls.method_len);
-               ceph_pagelist_append(req->r_trail, src->cls.indata,
+               ceph_pagelist_append(&req->r_trail, src->cls.indata,
                                     src->cls.indata_len);
                break;
-       case CEPH_OSD_OP_ROLLBACK:
-               dst->snap.snapid = cpu_to_le64(src->snap.snapid);
-               break;
        case CEPH_OSD_OP_STARTSYNC:
                break;
-       case CEPH_OSD_OP_NOTIFY:
-               {
-                       __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
-                       __le32 timeout = cpu_to_le32(src->watch.timeout);
-
-                       BUG_ON(!req->r_trail);
-
-                       ceph_pagelist_append(req->r_trail,
-                                               &prot_ver, sizeof(prot_ver));
-                       ceph_pagelist_append(req->r_trail,
-                                               &timeout, sizeof(timeout));
-               }
        case CEPH_OSD_OP_NOTIFY_ACK:
        case CEPH_OSD_OP_WATCH:
                dst->watch.cookie = cpu_to_le64(src->watch.cookie);
@@ -356,6 +262,64 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
                pr_err("unrecognized osd opcode %d\n", dst->op);
                WARN_ON(1);
                break;
+       case CEPH_OSD_OP_MAPEXT:
+       case CEPH_OSD_OP_MASKTRUNC:
+       case CEPH_OSD_OP_SPARSE_READ:
+       case CEPH_OSD_OP_NOTIFY:
+       case CEPH_OSD_OP_ASSERT_VER:
+       case CEPH_OSD_OP_WRITEFULL:
+       case CEPH_OSD_OP_TRUNCATE:
+       case CEPH_OSD_OP_ZERO:
+       case CEPH_OSD_OP_DELETE:
+       case CEPH_OSD_OP_APPEND:
+       case CEPH_OSD_OP_SETTRUNC:
+       case CEPH_OSD_OP_TRIMTRUNC:
+       case CEPH_OSD_OP_TMAPUP:
+       case CEPH_OSD_OP_TMAPPUT:
+       case CEPH_OSD_OP_TMAPGET:
+       case CEPH_OSD_OP_CREATE:
+       case CEPH_OSD_OP_ROLLBACK:
+       case CEPH_OSD_OP_OMAPGETKEYS:
+       case CEPH_OSD_OP_OMAPGETVALS:
+       case CEPH_OSD_OP_OMAPGETHEADER:
+       case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
+       case CEPH_OSD_OP_MODE_RD:
+       case CEPH_OSD_OP_OMAPSETVALS:
+       case CEPH_OSD_OP_OMAPSETHEADER:
+       case CEPH_OSD_OP_OMAPCLEAR:
+       case CEPH_OSD_OP_OMAPRMKEYS:
+       case CEPH_OSD_OP_OMAP_CMP:
+       case CEPH_OSD_OP_CLONERANGE:
+       case CEPH_OSD_OP_ASSERT_SRC_VERSION:
+       case CEPH_OSD_OP_SRC_CMPXATTR:
+       case CEPH_OSD_OP_GETXATTR:
+       case CEPH_OSD_OP_GETXATTRS:
+       case CEPH_OSD_OP_CMPXATTR:
+       case CEPH_OSD_OP_SETXATTR:
+       case CEPH_OSD_OP_SETXATTRS:
+       case CEPH_OSD_OP_RESETXATTRS:
+       case CEPH_OSD_OP_RMXATTR:
+       case CEPH_OSD_OP_PULL:
+       case CEPH_OSD_OP_PUSH:
+       case CEPH_OSD_OP_BALANCEREADS:
+       case CEPH_OSD_OP_UNBALANCEREADS:
+       case CEPH_OSD_OP_SCRUB:
+       case CEPH_OSD_OP_SCRUB_RESERVE:
+       case CEPH_OSD_OP_SCRUB_UNRESERVE:
+       case CEPH_OSD_OP_SCRUB_STOP:
+       case CEPH_OSD_OP_SCRUB_MAP:
+       case CEPH_OSD_OP_WRLOCK:
+       case CEPH_OSD_OP_WRUNLOCK:
+       case CEPH_OSD_OP_RDLOCK:
+       case CEPH_OSD_OP_RDUNLOCK:
+       case CEPH_OSD_OP_UPLOCK:
+       case CEPH_OSD_OP_DNLOCK:
+       case CEPH_OSD_OP_PGLS:
+       case CEPH_OSD_OP_PGLS_FILTER:
+               pr_err("unsupported osd opcode %s\n",
+                       ceph_osd_op_name(dst->op));
+               WARN_ON(1);
+               break;
        }
        dst->payload_len = cpu_to_le32(src->payload_len);
 }
@@ -365,75 +329,95 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
  *
  */
 void ceph_osdc_build_request(struct ceph_osd_request *req,
-                            u64 off, u64 *plen,
+                            u64 off, u64 len, unsigned int num_ops,
                             struct ceph_osd_req_op *src_ops,
-                            struct ceph_snap_context *snapc,
-                            struct timespec *mtime,
-                            const char *oid,
-                            int oid_len)
+                            struct ceph_snap_context *snapc, u64 snap_id,
+                            struct timespec *mtime)
 {
        struct ceph_msg *msg = req->r_request;
-       struct ceph_osd_request_head *head;
        struct ceph_osd_req_op *src_op;
-       struct ceph_osd_op *op;
        void *p;
-       int num_op = get_num_ops(src_ops, NULL);
-       size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
+       size_t msg_size;
        int flags = req->r_flags;
-       u64 data_len = 0;
+       u64 data_len;
        int i;
 
-       head = msg->front.iov_base;
-       op = (void *)(head + 1);
-       p = (void *)(op + num_op);
-
+       req->r_num_ops = num_ops;
+       req->r_snapid = snap_id;
        req->r_snapc = ceph_get_snap_context(snapc);
 
-       head->client_inc = cpu_to_le32(1); /* always, for now. */
-       head->flags = cpu_to_le32(flags);
-       if (flags & CEPH_OSD_FLAG_WRITE)
-               ceph_encode_timespec(&head->mtime, mtime);
-       head->num_ops = cpu_to_le16(num_op);
-
-
-       /* fill in oid */
-       head->object_len = cpu_to_le32(oid_len);
-       memcpy(p, oid, oid_len);
-       p += oid_len;
+       /* encode request */
+       msg->hdr.version = cpu_to_le16(4);
 
+       p = msg->front.iov_base;
+       ceph_encode_32(&p, 1);   /* client_inc  is always 1 */
+       req->r_request_osdmap_epoch = p;
+       p += 4;
+       req->r_request_flags = p;
+       p += 4;
+       if (req->r_flags & CEPH_OSD_FLAG_WRITE)
+               ceph_encode_timespec(p, mtime);
+       p += sizeof(struct ceph_timespec);
+       req->r_request_reassert_version = p;
+       p += sizeof(struct ceph_eversion); /* will get filled in */
+
+       /* oloc */
+       ceph_encode_8(&p, 4);
+       ceph_encode_8(&p, 4);
+       ceph_encode_32(&p, 8 + 4 + 4);
+       req->r_request_pool = p;
+       p += 8;
+       ceph_encode_32(&p, -1);  /* preferred */
+       ceph_encode_32(&p, 0);   /* key len */
+
+       ceph_encode_8(&p, 1);
+       req->r_request_pgid = p;
+       p += 8 + 4;
+       ceph_encode_32(&p, -1);  /* preferred */
+
+       /* oid */
+       ceph_encode_32(&p, req->r_oid_len);
+       memcpy(p, req->r_oid, req->r_oid_len);
+       dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
+       p += req->r_oid_len;
+
+       /* ops */
+       ceph_encode_16(&p, num_ops);
        src_op = src_ops;
-       while (src_op->op) {
-               osd_req_encode_op(req, op, src_op);
-               src_op++;
-               op++;
+       req->r_request_ops = p;
+       for (i = 0; i < num_ops; i++, src_op++) {
+               osd_req_encode_op(req, p, src_op);
+               p += sizeof(struct ceph_osd_op);
        }
 
-       if (req->r_trail)
-               data_len += req->r_trail->length;
-
-       if (snapc) {
-               head->snap_seq = cpu_to_le64(snapc->seq);
-               head->num_snaps = cpu_to_le32(snapc->num_snaps);
+       /* snaps */
+       ceph_encode_64(&p, req->r_snapid);
+       ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
+       ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
+       if (req->r_snapc) {
                for (i = 0; i < snapc->num_snaps; i++) {
-                       put_unaligned_le64(snapc->snaps[i], p);
-                       p += sizeof(u64);
+                       ceph_encode_64(&p, req->r_snapc->snaps[i]);
                }
        }
 
+       req->r_request_attempts = p;
+       p += 4;
+
+       data_len = req->r_trail.length;
        if (flags & CEPH_OSD_FLAG_WRITE) {
                req->r_request->hdr.data_off = cpu_to_le16(off);
-               req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
-       } else if (data_len) {
-               req->r_request->hdr.data_off = 0;
-               req->r_request->hdr.data_len = cpu_to_le32(data_len);
+               data_len += len;
        }
-
+       req->r_request->hdr.data_len = cpu_to_le32(data_len);
        req->r_request->page_alignment = req->r_page_alignment;
 
        BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
        msg_size = p - msg->front.iov_base;
        msg->front.iov_len = msg_size;
        msg->hdr.front_len = cpu_to_le32(msg_size);
+
+       dout("build_request msg_size was %d num_ops %d\n", (int)msg_size,
+            num_ops);
        return;
 }
 EXPORT_SYMBOL(ceph_osdc_build_request);
@@ -459,34 +443,33 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
                                               u32 truncate_seq,
                                               u64 truncate_size,
                                               struct timespec *mtime,
-                                              bool use_mempool, int num_reply,
+                                              bool use_mempool,
                                               int page_align)
 {
-       struct ceph_osd_req_op ops[3];
+       struct ceph_osd_req_op ops[2];
        struct ceph_osd_request *req;
+       unsigned int num_op = 1;
        int r;
 
+       memset(&ops, 0, sizeof ops);
+
        ops[0].op = opcode;
        ops[0].extent.truncate_seq = truncate_seq;
        ops[0].extent.truncate_size = truncate_size;
-       ops[0].payload_len = 0;
 
        if (do_sync) {
                ops[1].op = CEPH_OSD_OP_STARTSYNC;
-               ops[1].payload_len = 0;
-               ops[2].op = 0;
-       } else
-               ops[1].op = 0;
-
-       req = ceph_osdc_alloc_request(osdc, flags,
-                                        snapc, ops,
-                                        use_mempool,
-                                        GFP_NOFS, NULL, NULL);
+               num_op++;
+       }
+
+       req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
+                                       GFP_NOFS);
        if (!req)
                return ERR_PTR(-ENOMEM);
+       req->r_flags = flags;
 
        /* calculate max write size */
-       r = calc_layout(osdc, vino, layout, off, plen, req, ops);
+       r = calc_layout(vino, layout, off, plen, req, ops);
        if (r < 0)
                return ERR_PTR(r);
        req->r_file_layout = *layout;  /* keep a copy */
@@ -496,10 +479,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
        req->r_num_pages = calc_pages_for(page_align, *plen);
        req->r_page_alignment = page_align;
 
-       ceph_osdc_build_request(req, off, plen, ops,
-                               snapc,
-                               mtime,
-                               req->r_oid, req->r_oid_len);
+       ceph_osdc_build_request(req, off, *plen, num_op, ops,
+                               snapc, vino.snap, mtime);
 
        return req;
 }
@@ -623,8 +604,8 @@ static void osd_reset(struct ceph_connection *con)
        down_read(&osdc->map_sem);
        mutex_lock(&osdc->request_mutex);
        __kick_osd_requests(osdc, osd);
+       __send_queued(osdc);
        mutex_unlock(&osdc->request_mutex);
-       send_queued(osdc);
        up_read(&osdc->map_sem);
 }
 
@@ -739,31 +720,35 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
  */
 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
-       struct ceph_osd_request *req;
-       int ret = 0;
+       struct ceph_entity_addr *peer_addr;
 
        dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
        if (list_empty(&osd->o_requests) &&
            list_empty(&osd->o_linger_requests)) {
                __remove_osd(osdc, osd);
-               ret = -ENODEV;
-       } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
-                         &osd->o_con.peer_addr,
-                         sizeof(osd->o_con.peer_addr)) == 0 &&
-                  !ceph_con_opened(&osd->o_con)) {
+
+               return -ENODEV;
+       }
+
+       peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
+       if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
+                       !ceph_con_opened(&osd->o_con)) {
+               struct ceph_osd_request *req;
+
                dout(" osd addr hasn't changed and connection never opened,"
                     " letting msgr retry");
                /* touch each r_stamp for handle_timeout()'s benfit */
                list_for_each_entry(req, &osd->o_requests, r_osd_item)
                        req->r_stamp = jiffies;
-               ret = -EAGAIN;
-       } else {
-               ceph_con_close(&osd->o_con);
-               ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
-                             &osdc->osdmap->osd_addr[osd->o_osd]);
-               osd->o_incarnation++;
+
+               return -EAGAIN;
        }
-       return ret;
+
+       ceph_con_close(&osd->o_con);
+       ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
+       osd->o_incarnation++;
+
+       return 0;
 }
 
 static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
@@ -961,20 +946,18 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
 static int __map_request(struct ceph_osd_client *osdc,
                         struct ceph_osd_request *req, int force_resend)
 {
-       struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
        struct ceph_pg pgid;
        int acting[CEPH_PG_MAX_SIZE];
        int o = -1, num = 0;
        int err;
 
        dout("map_request %p tid %lld\n", req, req->r_tid);
-       err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
+       err = ceph_calc_object_layout(&pgid, req->r_oid,
                                      &req->r_file_layout, osdc->osdmap);
        if (err) {
                list_move(&req->r_req_lru_item, &osdc->req_notarget);
                return err;
        }
-       pgid = reqhead->layout.ol_pgid;
        req->r_pgid = pgid;
 
        err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
@@ -991,8 +974,8 @@ static int __map_request(struct ceph_osd_client *osdc,
            (req->r_osd == NULL && o == -1))
                return 0;  /* no change */
 
-       dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
-            req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
+       dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
+            req->r_tid, pgid.pool, pgid.seed, o,
             req->r_osd ? req->r_osd->o_osd : -1);
 
        /* record full pg acting set */
@@ -1041,15 +1024,22 @@ out:
 static void __send_request(struct ceph_osd_client *osdc,
                           struct ceph_osd_request *req)
 {
-       struct ceph_osd_request_head *reqhead;
-
-       dout("send_request %p tid %llu to osd%d flags %d\n",
-            req, req->r_tid, req->r_osd->o_osd, req->r_flags);
+       void *p;
 
-       reqhead = req->r_request->front.iov_base;
-       reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
-       reqhead->flags |= cpu_to_le32(req->r_flags);  /* e.g., RETRY */
-       reqhead->reassert_version = req->r_reassert_version;
+       dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
+            req, req->r_tid, req->r_osd->o_osd, req->r_flags,
+            (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
+
+       /* fill in message content that changes each time we send it */
+       put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
+       put_unaligned_le32(req->r_flags, req->r_request_flags);
+       put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
+       p = req->r_request_pgid;
+       ceph_encode_64(&p, req->r_pgid.pool);
+       ceph_encode_32(&p, req->r_pgid.seed);
+       put_unaligned_le64(1, req->r_request_attempts);  /* FIXME */
+       memcpy(req->r_request_reassert_version, &req->r_reassert_version,
+              sizeof(req->r_reassert_version));
 
        req->r_stamp = jiffies;
        list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
@@ -1062,16 +1052,13 @@ static void __send_request(struct ceph_osd_client *osdc,
 /*
  * Send any requests in the queue (req_unsent).
  */
-static void send_queued(struct ceph_osd_client *osdc)
+static void __send_queued(struct ceph_osd_client *osdc)
 {
        struct ceph_osd_request *req, *tmp;
 
-       dout("send_queued\n");
-       mutex_lock(&osdc->request_mutex);
-       list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
+       dout("__send_queued\n");
+       list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
                __send_request(osdc, req);
-       }
-       mutex_unlock(&osdc->request_mutex);
 }
 
 /*
@@ -1123,8 +1110,8 @@ static void handle_timeout(struct work_struct *work)
        }
 
        __schedule_osd_timeout(osdc);
+       __send_queued(osdc);
        mutex_unlock(&osdc->request_mutex);
-       send_queued(osdc);
        up_read(&osdc->map_sem);
 }
 
@@ -1152,6 +1139,26 @@ static void complete_request(struct ceph_osd_request *req)
        complete_all(&req->r_safe_completion);  /* fsync waiter */
 }
 
+static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid)
+{
+       __u8 v;
+
+       ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad);
+       v = ceph_decode_8(p);
+       if (v > 1) {
+               pr_warning("do not understand pg encoding %d > 1", v);
+               return -EINVAL;
+       }
+       pgid->pool = ceph_decode_64(p);
+       pgid->seed = ceph_decode_32(p);
+       *p += 4;
+       return 0;
+
+bad:
+       pr_warning("incomplete pg encoding");
+       return -EINVAL;
+}
+
 /*
  * handle osd op reply.  either call the callback if it is specified,
  * or do the completion to wake up the waiting thread.
@@ -1159,22 +1166,42 @@ static void complete_request(struct ceph_osd_request *req)
 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
                         struct ceph_connection *con)
 {
-       struct ceph_osd_reply_head *rhead = msg->front.iov_base;
+       void *p, *end;
        struct ceph_osd_request *req;
        u64 tid;
-       int numops, object_len, flags;
+       int object_len;
+       int numops, payload_len, flags;
        s32 result;
+       s32 retry_attempt;
+       struct ceph_pg pg;
+       int err;
+       u32 reassert_epoch;
+       u64 reassert_version;
+       u32 osdmap_epoch;
+       int i;
 
        tid = le64_to_cpu(msg->hdr.tid);
-       if (msg->front.iov_len < sizeof(*rhead))
-               goto bad;
-       numops = le32_to_cpu(rhead->num_ops);
-       object_len = le32_to_cpu(rhead->object_len);
-       result = le32_to_cpu(rhead->result);
-       if (msg->front.iov_len != sizeof(*rhead) + object_len +
-           numops * sizeof(struct ceph_osd_op))
+       dout("handle_reply %p tid %llu\n", msg, tid);
+
+       p = msg->front.iov_base;
+       end = p + msg->front.iov_len;
+
+       ceph_decode_need(&p, end, 4, bad);
+       object_len = ceph_decode_32(&p);
+       ceph_decode_need(&p, end, object_len, bad);
+       p += object_len;
+
+       err = __decode_pgid(&p, end, &pg);
+       if (err)
                goto bad;
-       dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
+
+       ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
+       flags = ceph_decode_64(&p);
+       result = ceph_decode_32(&p);
+       reassert_epoch = ceph_decode_32(&p);
+       reassert_version = ceph_decode_64(&p);
+       osdmap_epoch = ceph_decode_32(&p);
+
        /* lookup */
        mutex_lock(&osdc->request_mutex);
        req = __lookup_request(osdc, tid);
@@ -1184,7 +1211,38 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
                return;
        }
        ceph_osdc_get_request(req);
-       flags = le32_to_cpu(rhead->flags);
+
+       dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
+            req, result);
+
+       ceph_decode_need(&p, end, 4, bad);
+       numops = ceph_decode_32(&p);
+       if (numops > CEPH_OSD_MAX_OP)
+               goto bad_put;
+       if (numops != req->r_num_ops)
+               goto bad_put;
+       payload_len = 0;
+       ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
+       for (i = 0; i < numops; i++) {
+               struct ceph_osd_op *op = p;
+               int len;
+
+               len = le32_to_cpu(op->payload_len);
+               req->r_reply_op_len[i] = len;
+               dout(" op %d has %d bytes\n", i, len);
+               payload_len += len;
+               p += sizeof(*op);
+       }
+       if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
+               pr_warning("sum of op payload lens %d != data_len %d",
+                          payload_len, le32_to_cpu(msg->hdr.data_len));
+               goto bad_put;
+       }
+
+       ceph_decode_need(&p, end, 4 + numops * 4, bad);
+       retry_attempt = ceph_decode_32(&p);
+       for (i = 0; i < numops; i++)
+               req->r_reply_op_result[i] = ceph_decode_32(&p);
 
        /*
         * if this connection filled our message, drop our reference now, to
@@ -1199,7 +1257,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (!req->r_got_reply) {
                unsigned int bytes;
 
-               req->r_result = le32_to_cpu(rhead->result);
+               req->r_result = result;
                bytes = le32_to_cpu(msg->hdr.data_len);
                dout("handle_reply result %d bytes %d\n", req->r_result,
                     bytes);
@@ -1207,7 +1265,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
                        req->r_result = bytes;
 
                /* in case this is a write and we need to replay, */
-               req->r_reassert_version = rhead->reassert_version;
+               req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
+               req->r_reassert_version.version = cpu_to_le64(reassert_version);
 
                req->r_got_reply = 1;
        } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
@@ -1242,10 +1301,11 @@ done:
        ceph_osdc_put_request(req);
        return;
 
+bad_put:
+       ceph_osdc_put_request(req);
 bad:
-       pr_err("corrupt osd_op_reply got %d %d expected %d\n",
-              (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
-              (int)sizeof(*rhead));
+       pr_err("corrupt osd_op_reply got %d %d\n",
+              (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
        ceph_msg_dump(msg);
 }
 
@@ -1462,7 +1522,9 @@ done:
        if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
                ceph_monc_request_next_osdmap(&osdc->client->monc);
 
-       send_queued(osdc);
+       mutex_lock(&osdc->request_mutex);
+       __send_queued(osdc);
+       mutex_unlock(&osdc->request_mutex);
        up_read(&osdc->map_sem);
        wake_up_all(&osdc->client->auth_wq);
        return;
@@ -1556,8 +1618,7 @@ static void __remove_event(struct ceph_osd_event *event)
 
 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
                           void (*event_cb)(u64, u64, u8, void *),
-                          int one_shot, void *data,
-                          struct ceph_osd_event **pevent)
+                          void *data, struct ceph_osd_event **pevent)
 {
        struct ceph_osd_event *event;
 
@@ -1567,14 +1628,13 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
 
        dout("create_event %p\n", event);
        event->cb = event_cb;
-       event->one_shot = one_shot;
+       event->one_shot = 0;
        event->data = data;
        event->osdc = osdc;
        INIT_LIST_HEAD(&event->osd_node);
        RB_CLEAR_NODE(&event->node);
        kref_init(&event->kref);   /* one ref for us */
        kref_get(&event->kref);    /* one ref for the caller */
-       init_completion(&event->completion);
 
        spin_lock(&osdc->event_lock);
        event->cookie = ++osdc->event_count;
@@ -1610,7 +1670,6 @@ static void do_event_work(struct work_struct *work)
 
        dout("do_event_work completing %p\n", event);
        event->cb(ver, notify_id, opcode, event->data);
-       complete(&event->completion);
        dout("do_event_work completed %p\n", event);
        ceph_osdc_put_event(event);
        kfree(event_work);
@@ -1620,7 +1679,8 @@ static void do_event_work(struct work_struct *work)
 /*
  * Process osd watch notifications
  */
-void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+static void handle_watch_notify(struct ceph_osd_client *osdc,
+                               struct ceph_msg *msg)
 {
        void *p, *end;
        u8 proto_ver;
@@ -1641,9 +1701,8 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        spin_lock(&osdc->event_lock);
        event = __find_event(osdc, cookie);
        if (event) {
+               BUG_ON(event->one_shot);
                get_event(event);
-               if (event->one_shot)
-                       __remove_event(event);
        }
        spin_unlock(&osdc->event_lock);
        dout("handle_watch_notify cookie %lld ver %lld event %p\n",
@@ -1668,7 +1727,6 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        return;
 
 done_err:
-       complete(&event->completion);
        ceph_osdc_put_event(event);
        return;
 
@@ -1677,21 +1735,6 @@ bad:
        return;
 }
 
-int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
-{
-       int err;
-
-       dout("wait_event %p\n", event);
-       err = wait_for_completion_interruptible_timeout(&event->completion,
-                                                       timeout * HZ);
-       ceph_osdc_put_event(event);
-       if (err > 0)
-               err = 0;
-       dout("wait_event %p returns %d\n", event, err);
-       return err;
-}
-EXPORT_SYMBOL(ceph_osdc_wait_event);
-
 /*
  * Register request, send initial attempt.
  */
@@ -1706,7 +1749,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
 #ifdef CONFIG_BLOCK
        req->r_request->bio = req->r_bio;
 #endif
-       req->r_request->trail = req->r_trail;
+       req->r_request->trail = &req->r_trail;
 
        register_request(osdc, req);
 
@@ -1865,7 +1908,6 @@ out_mempool:
 out:
        return err;
 }
-EXPORT_SYMBOL(ceph_osdc_init);
 
 void ceph_osdc_stop(struct ceph_osd_client *osdc)
 {
@@ -1882,7 +1924,6 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);
 }
-EXPORT_SYMBOL(ceph_osdc_stop);
 
 /*
  * Read some contiguous pages.  If we cross a stripe boundary, shorten
@@ -1902,7 +1943,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
        req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
                                    CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
                                    NULL, 0, truncate_seq, truncate_size, NULL,
-                                   false, 1, page_align);
+                                   false, page_align);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1931,8 +1972,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
                         u64 off, u64 len,
                         u32 truncate_seq, u64 truncate_size,
                         struct timespec *mtime,
-                        struct page **pages, int num_pages,
-                        int flags, int do_sync, bool nofail)
+                        struct page **pages, int num_pages)
 {
        struct ceph_osd_request *req;
        int rc = 0;
@@ -1941,11 +1981,10 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
        BUG_ON(vino.snap != CEPH_NOSNAP);
        req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
                                    CEPH_OSD_OP_WRITE,
-                                   flags | CEPH_OSD_FLAG_ONDISK |
-                                           CEPH_OSD_FLAG_WRITE,
-                                   snapc, do_sync,
+                                   CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
+                                   snapc, 0,
                                    truncate_seq, truncate_size, mtime,
-                                   nofail, 1, page_align);
+                                   true, page_align);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1954,7 +1993,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
        dout("writepages %llu~%llu (%d pages)\n", off, len,
             req->r_num_pages);
 
-       rc = ceph_osdc_start_request(osdc, req, nofail);
+       rc = ceph_osdc_start_request(osdc, req, true);
        if (!rc)
                rc = ceph_osdc_wait_request(osdc, req);
 
@@ -2047,7 +2086,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
        if (data_len > 0) {
                int want = calc_pages_for(req->r_page_alignment, data_len);
 
-               if (unlikely(req->r_num_pages < want)) {
+               if (req->r_pages && unlikely(req->r_num_pages < want)) {
                        pr_warning("tid %lld reply has %d bytes %d pages, we"
                                   " had only %d pages ready\n", tid, data_len,
                                   want, req->r_num_pages);
index de73214..69bc4bf 100644 (file)
 
 char *ceph_osdmap_state_str(char *str, int len, int state)
 {
-       int flag = 0;
-
        if (!len)
-               goto done;
-
-       *str = '\0';
-       if (state) {
-               if (state & CEPH_OSD_EXISTS) {
-                       snprintf(str, len, "exists");
-                       flag = 1;
-               }
-               if (state & CEPH_OSD_UP) {
-                       snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
-                                "up");
-                       flag = 1;
-               }
-       } else {
+               return str;
+
+       if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
+               snprintf(str, len, "exists, up");
+       else if (state & CEPH_OSD_EXISTS)
+               snprintf(str, len, "exists");
+       else if (state & CEPH_OSD_UP)
+               snprintf(str, len, "up");
+       else
                snprintf(str, len, "doesn't exist");
-       }
-done:
+
        return str;
 }
 
@@ -53,13 +45,8 @@ static int calc_bits_of(unsigned int t)
  */
 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
 {
-       pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
-       pi->pgp_num_mask =
-               (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
-       pi->lpg_num_mask =
-               (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
-       pi->lpgp_num_mask =
-               (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
+       pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
+       pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
 }
 
 /*
@@ -170,6 +157,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
         c->choose_local_tries = 2;
         c->choose_local_fallback_tries = 5;
         c->choose_total_tries = 19;
+       c->chooseleaf_descend_once = 0;
 
        ceph_decode_need(p, end, 4*sizeof(u32), bad);
        magic = ceph_decode_32(p);
@@ -336,6 +324,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
         dout("crush decode tunable choose_total_tries = %d",
              c->choose_total_tries);
 
+       ceph_decode_need(p, end, sizeof(u32), done);
+       c->chooseleaf_descend_once = ceph_decode_32(p);
+       dout("crush decode tunable chooseleaf_descend_once = %d",
+            c->chooseleaf_descend_once);
+
 done:
        dout("crush_decode success\n");
        return c;
@@ -354,12 +347,13 @@ bad:
  */
 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
 {
-       u64 a = *(u64 *)&l;
-       u64 b = *(u64 *)&r;
-
-       if (a < b)
+       if (l.pool < r.pool)
+               return -1;
+       if (l.pool > r.pool)
+               return 1;
+       if (l.seed < r.seed)
                return -1;
-       if (a > b)
+       if (l.seed > r.seed)
                return 1;
        return 0;
 }
@@ -405,8 +399,8 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
                } else if (c > 0) {
                        n = n->rb_right;
                } else {
-                       dout("__lookup_pg_mapping %llx got %p\n",
-                            *(u64 *)&pgid, pg);
+                       dout("__lookup_pg_mapping %lld.%x got %p\n",
+                            pgid.pool, pgid.seed, pg);
                        return pg;
                }
        }
@@ -418,12 +412,13 @@ static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
        struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
 
        if (pg) {
-               dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
+               dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
+                    pg);
                rb_erase(&pg->node, root);
                kfree(pg);
                return 0;
        }
-       dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
+       dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
        return -ENOENT;
 }
 
@@ -452,7 +447,7 @@ static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
        return 0;
 }
 
-static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
+static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
 {
        struct ceph_pg_pool_info *pi;
        struct rb_node *n = root->rb_node;
@@ -508,24 +503,57 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 
 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
 {
-       unsigned int n, m;
+       u8 ev, cv;
+       unsigned len, num;
+       void *pool_end;
+
+       ceph_decode_need(p, end, 2 + 4, bad);
+       ev = ceph_decode_8(p);  /* encoding version */
+       cv = ceph_decode_8(p); /* compat version */
+       if (ev < 5) {
+               pr_warning("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
+               return -EINVAL;
+       }
+       if (cv > 7) {
+               pr_warning("got v %d cv %d > 7 of ceph_pg_pool\n", ev, cv);
+               return -EINVAL;
+       }
+       len = ceph_decode_32(p);
+       ceph_decode_need(p, end, len, bad);
+       pool_end = *p + len;
 
-       ceph_decode_copy(p, &pi->v, sizeof(pi->v));
-       calc_pg_masks(pi);
+       pi->type = ceph_decode_8(p);
+       pi->size = ceph_decode_8(p);
+       pi->crush_ruleset = ceph_decode_8(p);
+       pi->object_hash = ceph_decode_8(p);
+
+       pi->pg_num = ceph_decode_32(p);
+       pi->pgp_num = ceph_decode_32(p);
+
+       *p += 4 + 4;  /* skip lpg* */
+       *p += 4;      /* skip last_change */
+       *p += 8 + 4;  /* skip snap_seq, snap_epoch */
 
-       /* num_snaps * snap_info_t */
-       n = le32_to_cpu(pi->v.num_snaps);
-       while (n--) {
-               ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
-                                sizeof(struct ceph_timespec), bad);
-               *p += sizeof(u64) +       /* key */
-                       1 + sizeof(u64) + /* u8, snapid */
-                       sizeof(struct ceph_timespec);
-               m = ceph_decode_32(p);    /* snap name */
-               *p += m;
+       /* skip snaps */
+       num = ceph_decode_32(p);
+       while (num--) {
+               *p += 8;  /* snapid key */
+               *p += 1 + 1; /* versions */
+               len = ceph_decode_32(p);
+               *p += len;
        }
 
-       *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
+       /* skip removed snaps */
+       num = ceph_decode_32(p);
+       *p += num * (8 + 8);
+
+       *p += 8;  /* skip auid */
+       pi->flags = ceph_decode_64(p);
+
+       /* ignore the rest */
+
+       *p = pool_end;
+       calc_pg_masks(pi);
        return 0;
 
 bad:
@@ -535,14 +563,15 @@ bad:
 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
 {
        struct ceph_pg_pool_info *pi;
-       u32 num, len, pool;
+       u32 num, len;
+       u64 pool;
 
        ceph_decode_32_safe(p, end, num, bad);
        dout(" %d pool names\n", num);
        while (num--) {
-               ceph_decode_32_safe(p, end, pool, bad);
+               ceph_decode_64_safe(p, end, pool, bad);
                ceph_decode_32_safe(p, end, len, bad);
-               dout("  pool %d len %d\n", pool, len);
+               dout("  pool %llu len %d\n", pool, len);
                ceph_decode_need(p, end, len, bad);
                pi = __lookup_pg_pool(&map->pg_pools, pool);
                if (pi) {
@@ -633,7 +662,6 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
        struct ceph_osdmap *map;
        u16 version;
        u32 len, max, i;
-       u8 ev;
        int err = -EINVAL;
        void *start = *p;
        struct ceph_pg_pool_info *pi;
@@ -646,9 +674,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
        map->pg_temp = RB_ROOT;
 
        ceph_decode_16_safe(p, end, version, bad);
-       if (version > CEPH_OSDMAP_VERSION) {
-               pr_warning("got unknown v %d > %d of osdmap\n", version,
-                          CEPH_OSDMAP_VERSION);
+       if (version > 6) {
+               pr_warning("got unknown v %d > 6 of osdmap\n", version);
+               goto bad;
+       }
+       if (version < 6) {
+               pr_warning("got old v %d < 6 of osdmap\n", version);
                goto bad;
        }
 
@@ -660,20 +691,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
 
        ceph_decode_32_safe(p, end, max, bad);
        while (max--) {
-               ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
+               ceph_decode_need(p, end, 8 + 2, bad);
                err = -ENOMEM;
                pi = kzalloc(sizeof(*pi), GFP_NOFS);
                if (!pi)
                        goto bad;
-               pi->id = ceph_decode_32(p);
-               err = -EINVAL;
-               ev = ceph_decode_8(p); /* encoding version */
-               if (ev > CEPH_PG_POOL_VERSION) {
-                       pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
-                                  ev, CEPH_PG_POOL_VERSION);
-                       kfree(pi);
-                       goto bad;
-               }
+               pi->id = ceph_decode_64(p);
                err = __decode_pool(p, end, pi);
                if (err < 0) {
                        kfree(pi);
@@ -682,12 +705,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
                __insert_pg_pool(&map->pg_pools, pi);
        }
 
-       if (version >= 5) {
-               err = __decode_pool_names(p, end, map);
-               if (err < 0) {
-                       dout("fail to decode pool names");
-                       goto bad;
-               }
+       err = __decode_pool_names(p, end, map);
+       if (err < 0) {
+               dout("fail to decode pool names");
+               goto bad;
        }
 
        ceph_decode_32_safe(p, end, map->pool_max, bad);
@@ -724,10 +745,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
        for (i = 0; i < len; i++) {
                int n, j;
                struct ceph_pg pgid;
+               struct ceph_pg_v1 pgid_v1;
                struct ceph_pg_mapping *pg;
 
                ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
-               ceph_decode_copy(p, &pgid, sizeof(pgid));
+               ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
+               pgid.pool = le32_to_cpu(pgid_v1.pool);
+               pgid.seed = le16_to_cpu(pgid_v1.ps);
                n = ceph_decode_32(p);
                err = -EINVAL;
                if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@@ -745,7 +769,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
                err = __insert_pg_mapping(pg, &map->pg_temp);
                if (err)
                        goto bad;
-               dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
+               dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed,
+                    len);
        }
 
        /* crush */
@@ -784,16 +809,17 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        struct ceph_fsid fsid;
        u32 epoch = 0;
        struct ceph_timespec modified;
-       u32 len, pool;
-       __s32 new_pool_max, new_flags, max;
+       s32 len;
+       u64 pool;
+       __s64 new_pool_max;
+       __s32 new_flags, max;
        void *start = *p;
        int err = -EINVAL;
        u16 version;
 
        ceph_decode_16_safe(p, end, version, bad);
-       if (version > CEPH_OSDMAP_INC_VERSION) {
-               pr_warning("got unknown v %d > %d of inc osdmap\n", version,
-                          CEPH_OSDMAP_INC_VERSION);
+       if (version > 6) {
+               pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6);
                goto bad;
        }
 
@@ -803,7 +829,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        epoch = ceph_decode_32(p);
        BUG_ON(epoch != map->epoch+1);
        ceph_decode_copy(p, &modified, sizeof(modified));
-       new_pool_max = ceph_decode_32(p);
+       new_pool_max = ceph_decode_64(p);
        new_flags = ceph_decode_32(p);
 
        /* full map? */
@@ -853,18 +879,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        /* new_pool */
        ceph_decode_32_safe(p, end, len, bad);
        while (len--) {
-               __u8 ev;
                struct ceph_pg_pool_info *pi;
 
-               ceph_decode_32_safe(p, end, pool, bad);
-               ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
-               ev = ceph_decode_8(p);  /* encoding version */
-               if (ev > CEPH_PG_POOL_VERSION) {
-                       pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
-                                  ev, CEPH_PG_POOL_VERSION);
-                       err = -EINVAL;
-                       goto bad;
-               }
+               ceph_decode_64_safe(p, end, pool, bad);
                pi = __lookup_pg_pool(&map->pg_pools, pool);
                if (!pi) {
                        pi = kzalloc(sizeof(*pi), GFP_NOFS);
@@ -890,7 +907,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        while (len--) {
                struct ceph_pg_pool_info *pi;
 
-               ceph_decode_32_safe(p, end, pool, bad);
+               ceph_decode_64_safe(p, end, pool, bad);
                pi = __lookup_pg_pool(&map->pg_pools, pool);
                if (pi)
                        __remove_pg_pool(&map->pg_pools, pi);
@@ -946,10 +963,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
        while (len--) {
                struct ceph_pg_mapping *pg;
                int j;
+               struct ceph_pg_v1 pgid_v1;
                struct ceph_pg pgid;
                u32 pglen;
                ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
-               ceph_decode_copy(p, &pgid, sizeof(pgid));
+               ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
+               pgid.pool = le32_to_cpu(pgid_v1.pool);
+               pgid.seed = le16_to_cpu(pgid_v1.ps);
                pglen = ceph_decode_32(p);
 
                if (pglen) {
@@ -975,8 +995,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                                kfree(pg);
                                goto bad;
                        }
-                       dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
-                            pglen);
+                       dout(" added pg_temp %lld.%x len %d\n", pgid.pool,
+                            pgid.seed, pglen);
                } else {
                        /* remove */
                        __remove_pg_mapping(&map->pg_temp, pgid);
@@ -1010,7 +1030,7 @@ bad:
  * pass a stride back to the caller.
  */
 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
-                                  u64 off, u64 *plen,
+                                  u64 off, u64 len,
                                   u64 *ono,
                                   u64 *oxoff, u64 *oxlen)
 {
@@ -1021,7 +1041,7 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
        u32 su_per_object;
        u64 t, su_offset;
 
-       dout("mapping %llu~%llu  osize %u fl_su %u\n", off, *plen,
+       dout("mapping %llu~%llu  osize %u fl_su %u\n", off, len,
             osize, su);
        if (su == 0 || sc == 0)
                goto invalid;
@@ -1054,11 +1074,10 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
 
        /*
         * Calculate the length of the extent being written to the selected
-        * object. This is the minimum of the full length requested (plen) or
+        * object. This is the minimum of the full length requested (len) or
         * the remainder of the current stripe being written to.
         */
-       *oxlen = min_t(u64, *plen, su - su_offset);
-       *plen = *oxlen;
+       *oxlen = min_t(u64, len, su - su_offset);
 
        dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
        return 0;
@@ -1076,33 +1095,24 @@ EXPORT_SYMBOL(ceph_calc_file_object_mapping);
  * calculate an object layout (i.e. pgid) from an oid,
  * file_layout, and osdmap
  */
-int ceph_calc_object_layout(struct ceph_object_layout *ol,
+int ceph_calc_object_layout(struct ceph_pg *pg,
                            const char *oid,
                            struct ceph_file_layout *fl,
                            struct ceph_osdmap *osdmap)
 {
        unsigned int num, num_mask;
-       struct ceph_pg pgid;
-       int poolid = le32_to_cpu(fl->fl_pg_pool);
        struct ceph_pg_pool_info *pool;
-       unsigned int ps;
 
        BUG_ON(!osdmap);
-
-       pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+       pg->pool = le32_to_cpu(fl->fl_pg_pool);
+       pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool);
        if (!pool)
                return -EIO;
-       ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
-       num = le32_to_cpu(pool->v.pg_num);
+       pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid));
+       num = pool->pg_num;
        num_mask = pool->pg_num_mask;
 
-       pgid.ps = cpu_to_le16(ps);
-       pgid.preferred = cpu_to_le16(-1);
-       pgid.pool = fl->fl_pg_pool;
-       dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
-
-       ol->ol_pgid = pgid;
-       ol->ol_stripe_unit = fl->fl_object_stripe_unit;
+       dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed);
        return 0;
 }
 EXPORT_SYMBOL(ceph_calc_object_layout);
@@ -1117,19 +1127,16 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        struct ceph_pg_mapping *pg;
        struct ceph_pg_pool_info *pool;
        int ruleno;
-       unsigned int poolid, ps, pps, t, r;
-
-       poolid = le32_to_cpu(pgid.pool);
-       ps = le16_to_cpu(pgid.ps);
+       int r;
+       u32 pps;
 
-       pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
+       pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
        if (!pool)
                return NULL;
 
        /* pg_temp? */
-       t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
-                           pool->pgp_num_mask);
-       pgid.ps = cpu_to_le16(t);
+       pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
+                                   pool->pgp_num_mask);
        pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
        if (pg) {
                *num = pg->len;
@@ -1137,26 +1144,39 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
        }
 
        /* crush */
-       ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
-                                pool->v.type, pool->v.size);
+       ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
+                                pool->type, pool->size);
        if (ruleno < 0) {
-               pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
-                      poolid, pool->v.crush_ruleset, pool->v.type,
-                      pool->v.size);
+               pr_err("no crush rule pool %lld ruleset %d type %d size %d\n",
+                      pgid.pool, pool->crush_ruleset, pool->type,
+                      pool->size);
                return NULL;
        }
 
-       pps = ceph_stable_mod(ps,
-                             le32_to_cpu(pool->v.pgp_num),
-                             pool->pgp_num_mask);
-       pps += poolid;
+       if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
+               /* hash pool id and seed sothat pool PGs do not overlap */
+               pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
+                                    ceph_stable_mod(pgid.seed, pool->pgp_num,
+                                                    pool->pgp_num_mask),
+                                    pgid.pool);
+       } else {
+               /*
+                * legacy ehavior: add ps and pool together.  this is
+                * not a great approach because the PGs from each pool
+                * will overlap on top of each other: 0.5 == 1.4 ==
+                * 2.3 == ...
+                */
+               pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
+                                     pool->pgp_num_mask) +
+                       (unsigned)pgid.pool;
+       }
        r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
-                         min_t(int, pool->v.size, *num),
+                         min_t(int, pool->size, *num),
                          osdmap->osd_weight);
        if (r < 0) {
-               pr_err("error %d from crush rule: pool %d ruleset %d type %d"
-                      " size %d\n", r, poolid, pool->v.crush_ruleset,
-                      pool->v.type, pool->v.size);
+               pr_err("error %d from crush rule: pool %lld ruleset %d type %d"
+                      " size %d\n", r, pgid.pool, pool->crush_ruleset,
+                      pool->type, pool->size);
                return NULL;
        }
        *num = r;
index cd9c21d..815a224 100644 (file)
@@ -12,7 +12,7 @@
 /*
  * build a vector of user pages
  */
-struct page **ceph_get_direct_page_vector(const char __user *data,
+struct page **ceph_get_direct_page_vector(const void __user *data,
                                          int num_pages, bool write_page)
 {
        struct page **pages;
@@ -93,7 +93,7 @@ EXPORT_SYMBOL(ceph_alloc_page_vector);
  * copy user data into a page vector
  */
 int ceph_copy_user_to_page_vector(struct page **pages,
-                                        const char __user *data,
+                                        const void __user *data,
                                         loff_t off, size_t len)
 {
        int i = 0;
@@ -118,17 +118,17 @@ int ceph_copy_user_to_page_vector(struct page **pages,
 }
 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
 
-int ceph_copy_to_page_vector(struct page **pages,
-                                   const char *data,
+void ceph_copy_to_page_vector(struct page **pages,
+                                   const void *data,
                                    loff_t off, size_t len)
 {
        int i = 0;
        size_t po = off & ~PAGE_CACHE_MASK;
        size_t left = len;
-       size_t l;
 
        while (left > 0) {
-               l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+
                memcpy(page_address(pages[i]) + po, data, l);
                data += l;
                left -= l;
@@ -138,21 +138,20 @@ int ceph_copy_to_page_vector(struct page **pages,
                        i++;
                }
        }
-       return len;
 }
 EXPORT_SYMBOL(ceph_copy_to_page_vector);
 
-int ceph_copy_from_page_vector(struct page **pages,
-                                   char *data,
+void ceph_copy_from_page_vector(struct page **pages,
+                                   void *data,
                                    loff_t off, size_t len)
 {
        int i = 0;
        size_t po = off & ~PAGE_CACHE_MASK;
        size_t left = len;
-       size_t l;
 
        while (left > 0) {
-               l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+               size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+
                memcpy(data, page_address(pages[i]) + po, l);
                data += l;
                left -= l;
@@ -162,7 +161,6 @@ int ceph_copy_from_page_vector(struct page **pages,
                        i++;
                }
        }
-       return len;
 }
 EXPORT_SYMBOL(ceph_copy_from_page_vector);
 
@@ -170,7 +168,7 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector);
  * copy user data from a page vector into a user pointer
  */
 int ceph_copy_page_vector_to_user(struct page **pages,
-                                        char __user *data,
+                                        void __user *data,
                                         loff_t off, size_t len)
 {
        int i = 0;
index 18d8b5a..8f152f9 100644 (file)
@@ -658,11 +658,10 @@ __setup("netdev=", netdev_boot_setup);
 
 struct net_device *__dev_get_by_name(struct net *net, const char *name)
 {
-       struct hlist_node *p;
        struct net_device *dev;
        struct hlist_head *head = dev_name_hash(net, name);
 
-       hlist_for_each_entry(dev, p, head, name_hlist)
+       hlist_for_each_entry(dev, head, name_hlist)
                if (!strncmp(dev->name, name, IFNAMSIZ))
                        return dev;
 
@@ -684,11 +683,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
 
 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
 {
-       struct hlist_node *p;
        struct net_device *dev;
        struct hlist_head *head = dev_name_hash(net, name);
 
-       hlist_for_each_entry_rcu(dev, p, head, name_hlist)
+       hlist_for_each_entry_rcu(dev, head, name_hlist)
                if (!strncmp(dev->name, name, IFNAMSIZ))
                        return dev;
 
@@ -735,11 +733,10 @@ EXPORT_SYMBOL(dev_get_by_name);
 
 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
 {
-       struct hlist_node *p;
        struct net_device *dev;
        struct hlist_head *head = dev_index_hash(net, ifindex);
 
-       hlist_for_each_entry(dev, p, head, index_hlist)
+       hlist_for_each_entry(dev, head, index_hlist)
                if (dev->ifindex == ifindex)
                        return dev;
 
@@ -760,11 +757,10 @@ EXPORT_SYMBOL(__dev_get_by_index);
 
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
 {
-       struct hlist_node *p;
        struct net_device *dev;
        struct hlist_head *head = dev_index_hash(net, ifindex);
 
-       hlist_for_each_entry_rcu(dev, p, head, index_hlist)
+       hlist_for_each_entry_rcu(dev, head, index_hlist)
                if (dev->ifindex == ifindex)
                        return dev;
 
@@ -4107,7 +4103,7 @@ static void net_rx_action(struct softirq_action *h)
                 * Allow this to run for 2 jiffies since which will allow
                 * an average latency of 1.5/HZ.
                 */
-               if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
+               if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
                        goto softnet_break;
 
                local_irq_enable();
@@ -4784,7 +4780,7 @@ EXPORT_SYMBOL(dev_set_mac_address);
 /**
  *     dev_change_carrier - Change device carrier
  *     @dev: device
- *     @new_carries: new value
+ *     @new_carrier: new value
  *
  *     Change device carrier
  */
index 43f7495..c56ea6f 100644 (file)
@@ -132,14 +132,14 @@ static void __flow_cache_shrink(struct flow_cache *fc,
                                int shrink_to)
 {
        struct flow_cache_entry *fle;
-       struct hlist_node *entry, *tmp;
+       struct hlist_node *tmp;
        LIST_HEAD(gc_list);
        int i, deleted = 0;
 
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
                int saved = 0;
 
-               hlist_for_each_entry_safe(fle, entry, tmp,
+               hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
                        if (saved < shrink_to &&
                            flow_entry_valid(fle)) {
@@ -211,7 +211,6 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
        struct flow_cache *fc = &flow_cache_global;
        struct flow_cache_percpu *fcp;
        struct flow_cache_entry *fle, *tfle;
-       struct hlist_node *entry;
        struct flow_cache_object *flo;
        size_t keysize;
        unsigned int hash;
@@ -235,7 +234,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                flow_new_hash_rnd(fc, fcp);
 
        hash = flow_hash_code(fc, fcp, key, keysize);
-       hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
+       hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
                if (tfle->net == net &&
                    tfle->family == family &&
                    tfle->dir == dir &&
@@ -301,13 +300,13 @@ static void flow_cache_flush_tasklet(unsigned long data)
        struct flow_cache *fc = info->cache;
        struct flow_cache_percpu *fcp;
        struct flow_cache_entry *fle;
-       struct hlist_node *entry, *tmp;
+       struct hlist_node *tmp;
        LIST_HEAD(gc_list);
        int i, deleted = 0;
 
        fcp = this_cpu_ptr(fc->percpu);
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
-               hlist_for_each_entry_safe(fle, entry, tmp,
+               hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
                        if (flow_entry_valid(fle))
                                continue;
index 0f6bb6f..3174f19 100644 (file)
@@ -16,12 +16,11 @@ static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff
 {
        struct net *net = seq_file_net(seq);
        struct net_device *dev;
-       struct hlist_node *p;
        struct hlist_head *h;
        unsigned int count = 0, offset = get_offset(*pos);
 
        h = &net->dev_name_head[get_bucket(*pos)];
-       hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
+       hlist_for_each_entry_rcu(dev, h, name_hlist) {
                if (++count == offset)
                        return dev;
        }
index 8acce01..80e271d 100644 (file)
@@ -344,7 +344,7 @@ struct net *get_net_ns_by_fd(int fd)
        if (IS_ERR(file))
                return ERR_CAST(file);
 
-       ei = PROC_I(file->f_dentry->d_inode);
+       ei = PROC_I(file_inode(file));
        if (ei->ns_ops == &netns_operations)
                net = get_net(ei->ns);
        else
index d8aa20f..b376410 100644 (file)
@@ -1060,7 +1060,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        int idx = 0, s_idx;
        struct net_device *dev;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
 
@@ -1080,7 +1079,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
index c4a2def..c21f200 100644 (file)
@@ -175,12 +175,11 @@ static struct hlist_head *dn_find_list(struct sock *sk)
 static int check_port(__le16 port)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        if (port == 0)
                return -1;
 
-       sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
+       sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
                struct dn_scp *scp = DN_SK(sk);
                if (scp->addrloc == port)
                        return -1;
@@ -374,11 +373,10 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
 {
        struct hlist_head *list = listen_hash(addr);
-       struct hlist_node *node;
        struct sock *sk;
 
        read_lock(&dn_hash_lock);
-       sk_for_each(sk, node, list) {
+       sk_for_each(sk, list) {
                struct dn_scp *scp = DN_SK(sk);
                if (sk->sk_state != TCP_LISTEN)
                        continue;
@@ -414,11 +412,10 @@ struct sock *dn_find_by_skb(struct sk_buff *skb)
 {
        struct dn_skb_cb *cb = DN_SKB_CB(skb);
        struct sock *sk;
-       struct hlist_node *node;
        struct dn_scp *scp;
 
        read_lock(&dn_hash_lock);
-       sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
+       sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
                scp = DN_SK(sk);
                if (cb->src != dn_saddr2dn(&scp->peer))
                        continue;
index f968c1b..6c2445b 100644 (file)
@@ -483,7 +483,6 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
        struct dn_fib_table *tb;
-       struct hlist_node *node;
        int dumped = 0;
 
        if (!net_eq(net, &init_net))
@@ -498,7 +497,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
                e = 0;
-               hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) {
+               hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
                        if (e < s_e)
                                goto next;
                        if (dumped)
@@ -828,7 +827,6 @@ out:
 struct dn_fib_table *dn_fib_get_table(u32 n, int create)
 {
        struct dn_fib_table *t;
-       struct hlist_node *node;
        unsigned int h;
 
        if (n < RT_TABLE_MIN)
@@ -839,7 +837,7 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create)
 
        h = n & (DN_FIB_TABLE_HASHSZ - 1);
        rcu_read_lock();
-       hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) {
+       hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
                if (t->n == n) {
                        rcu_read_unlock();
                        return t;
@@ -885,11 +883,10 @@ void dn_fib_flush(void)
 {
        int flushed = 0;
        struct dn_fib_table *tb;
-       struct hlist_node *node;
        unsigned int h;
 
        for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
-               hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
+               hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
                        flushed += tb->flush(tb);
        }
 
@@ -908,12 +905,12 @@ void __init dn_fib_table_init(void)
 void __exit dn_fib_table_cleanup(void)
 {
        struct dn_fib_table *t;
-       struct hlist_node *node, *next;
+       struct hlist_node *next;
        unsigned int h;
 
        write_lock(&dn_fib_tables_lock);
        for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
-               hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
+               hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
                                          hlist) {
                        hlist_del(&t->hlist);
                        kfree(t);
index 1670561..e0da175 100644 (file)
@@ -350,7 +350,6 @@ static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
 int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
 {
        struct sock *sk, *prev = NULL;
-       struct hlist_node *node;
        int ret = NET_RX_SUCCESS;
        u16 pan_id, short_addr;
 
@@ -361,7 +360,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
        short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
 
        read_lock(&dgram_lock);
-       sk_for_each(sk, node, &dgram_head) {
+       sk_for_each(sk, &dgram_head) {
                if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
                                        dgram_sk(sk))) {
                        if (prev) {
index 50e8239..41f538b 100644 (file)
@@ -221,10 +221,9 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
        read_lock(&raw_lock);
-       sk_for_each(sk, node, &raw_head) {
+       sk_for_each(sk, &raw_head) {
                bh_lock_sock(sk);
                if (!sk->sk_bound_dev_if ||
                    sk->sk_bound_dev_if == dev->ifindex) {
index 5281314..f678507 100644 (file)
@@ -139,10 +139,9 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
        u32 hash = inet_addr_hash(net, addr);
        struct net_device *result = NULL;
        struct in_ifaddr *ifa;
-       struct hlist_node *node;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+       hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
                if (ifa->ifa_local == addr) {
                        struct net_device *dev = ifa->ifa_dev->dev;
 
@@ -588,7 +587,6 @@ static void check_lifetime(struct work_struct *work)
 {
        unsigned long now, next, next_sec, next_sched;
        struct in_ifaddr *ifa;
-       struct hlist_node *node;
        int i;
 
        now = jiffies;
@@ -596,8 +594,7 @@ static void check_lifetime(struct work_struct *work)
 
        rcu_read_lock();
        for (i = 0; i < IN4_ADDR_HSIZE; i++) {
-               hlist_for_each_entry_rcu(ifa, node,
-                                        &inet_addr_lst[i], hash) {
+               hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
                        unsigned long age;
 
                        if (ifa->ifa_flags & IFA_F_PERMANENT)
@@ -1493,7 +1490,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
        struct in_device *in_dev;
        struct in_ifaddr *ifa;
        struct hlist_head *head;
-       struct hlist_node *node;
 
        s_h = cb->args[0];
        s_idx = idx = cb->args[1];
@@ -1503,7 +1499,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        if (h > s_h || idx > s_idx)
index 99f00d3..eb4bb12 100644 (file)
@@ -112,7 +112,6 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
 struct fib_table *fib_get_table(struct net *net, u32 id)
 {
        struct fib_table *tb;
-       struct hlist_node *node;
        struct hlist_head *head;
        unsigned int h;
 
@@ -122,7 +121,7 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
 
        rcu_read_lock();
        head = &net->ipv4.fib_table_hash[h];
-       hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+       hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                if (tb->tb_id == id) {
                        rcu_read_unlock();
                        return tb;
@@ -137,13 +136,12 @@ static void fib_flush(struct net *net)
 {
        int flushed = 0;
        struct fib_table *tb;
-       struct hlist_node *node;
        struct hlist_head *head;
        unsigned int h;
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, node, head, tb_hlist)
+               hlist_for_each_entry(tb, head, tb_hlist)
                        flushed += fib_table_flush(tb);
        }
 
@@ -656,7 +654,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int h, s_h;
        unsigned int e = 0, s_e;
        struct fib_table *tb;
-       struct hlist_node *node;
        struct hlist_head *head;
        int dumped = 0;
 
@@ -670,7 +667,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
                e = 0;
                head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry(tb, node, head, tb_hlist) {
+               hlist_for_each_entry(tb, head, tb_hlist) {
                        if (e < s_e)
                                goto next;
                        if (dumped)
@@ -1117,11 +1114,11 @@ static void ip_fib_net_exit(struct net *net)
        for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
                struct fib_table *tb;
                struct hlist_head *head;
-               struct hlist_node *node, *tmp;
+               struct hlist_node *tmp;
 
                head = &net->ipv4.fib_table_hash[i];
-               hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
-                       hlist_del(node);
+               hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
+                       hlist_del(&tb->tb_hlist);
                        fib_table_flush(tb);
                        fib_free_table(tb);
                }
index 4797a80..8f6cb7a 100644 (file)
@@ -298,14 +298,13 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
 static struct fib_info *fib_find_info(const struct fib_info *nfi)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct fib_info *fi;
        unsigned int hash;
 
        hash = fib_info_hashfn(nfi);
        head = &fib_info_hash[hash];
 
-       hlist_for_each_entry(fi, node, head, fib_hash) {
+       hlist_for_each_entry(fi, head, fib_hash) {
                if (!net_eq(fi->fib_net, nfi->fib_net))
                        continue;
                if (fi->fib_nhs != nfi->fib_nhs)
@@ -331,7 +330,6 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
 int ip_fib_check_default(__be32 gw, struct net_device *dev)
 {
        struct hlist_head *head;
-       struct hlist_node *node;
        struct fib_nh *nh;
        unsigned int hash;
 
@@ -339,7 +337,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
 
        hash = fib_devindex_hashfn(dev->ifindex);
        head = &fib_info_devhash[hash];
-       hlist_for_each_entry(nh, node, head, nh_hash) {
+       hlist_for_each_entry(nh, head, nh_hash) {
                if (nh->nh_dev == dev &&
                    nh->nh_gw == gw &&
                    !(nh->nh_flags & RTNH_F_DEAD)) {
@@ -721,10 +719,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
 
        for (i = 0; i < old_size; i++) {
                struct hlist_head *head = &fib_info_hash[i];
-               struct hlist_node *node, *n;
+               struct hlist_node *n;
                struct fib_info *fi;
 
-               hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
+               hlist_for_each_entry_safe(fi, n, head, fib_hash) {
                        struct hlist_head *dest;
                        unsigned int new_hash;
 
@@ -739,10 +737,10 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
 
        for (i = 0; i < old_size; i++) {
                struct hlist_head *lhead = &fib_info_laddrhash[i];
-               struct hlist_node *node, *n;
+               struct hlist_node *n;
                struct fib_info *fi;
 
-               hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
+               hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
                        struct hlist_head *ldest;
                        unsigned int new_hash;
 
@@ -1096,13 +1094,12 @@ int fib_sync_down_addr(struct net *net, __be32 local)
        int ret = 0;
        unsigned int hash = fib_laddr_hashfn(local);
        struct hlist_head *head = &fib_info_laddrhash[hash];
-       struct hlist_node *node;
        struct fib_info *fi;
 
        if (fib_info_laddrhash == NULL || local == 0)
                return 0;
 
-       hlist_for_each_entry(fi, node, head, fib_lhash) {
+       hlist_for_each_entry(fi, head, fib_lhash) {
                if (!net_eq(fi->fib_net, net))
                        continue;
                if (fi->fib_prefsrc == local) {
@@ -1120,13 +1117,12 @@ int fib_sync_down_dev(struct net_device *dev, int force)
        struct fib_info *prev_fi = NULL;
        unsigned int hash = fib_devindex_hashfn(dev->ifindex);
        struct hlist_head *head = &fib_info_devhash[hash];
-       struct hlist_node *node;
        struct fib_nh *nh;
 
        if (force)
                scope = -1;
 
-       hlist_for_each_entry(nh, node, head, nh_hash) {
+       hlist_for_each_entry(nh, head, nh_hash) {
                struct fib_info *fi = nh->nh_parent;
                int dead;
 
@@ -1232,7 +1228,6 @@ int fib_sync_up(struct net_device *dev)
        struct fib_info *prev_fi;
        unsigned int hash;
        struct hlist_head *head;
-       struct hlist_node *node;
        struct fib_nh *nh;
        int ret;
 
@@ -1244,7 +1239,7 @@ int fib_sync_up(struct net_device *dev)
        head = &fib_info_devhash[hash];
        ret = 0;
 
-       hlist_for_each_entry(nh, node, head, nh_hash) {
+       hlist_for_each_entry(nh, head, nh_hash) {
                struct fib_info *fi = nh->nh_parent;
                int alive;
 
index 61e03da..ff06b75 100644 (file)
@@ -920,10 +920,9 @@ nomem:
 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
 {
        struct hlist_head *head = &l->list;
-       struct hlist_node *node;
        struct leaf_info *li;
 
-       hlist_for_each_entry_rcu(li, node, head, hlist)
+       hlist_for_each_entry_rcu(li, head, hlist)
                if (li->plen == plen)
                        return li;
 
@@ -943,12 +942,11 @@ static inline struct list_head *get_fa_head(struct leaf *l, int plen)
 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
 {
        struct leaf_info *li = NULL, *last = NULL;
-       struct hlist_node *node;
 
        if (hlist_empty(head)) {
                hlist_add_head_rcu(&new->hlist, head);
        } else {
-               hlist_for_each_entry(li, node, head, hlist) {
+               hlist_for_each_entry(li, head, hlist) {
                        if (new->plen > li->plen)
                                break;
 
@@ -1354,9 +1352,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
 {
        struct leaf_info *li;
        struct hlist_head *hhead = &l->list;
-       struct hlist_node *node;
 
-       hlist_for_each_entry_rcu(li, node, hhead, hlist) {
+       hlist_for_each_entry_rcu(li, hhead, hlist) {
                struct fib_alias *fa;
 
                if (l->key != (key & li->mask_plen))
@@ -1740,10 +1737,10 @@ static int trie_flush_leaf(struct leaf *l)
 {
        int found = 0;
        struct hlist_head *lih = &l->list;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        struct leaf_info *li = NULL;
 
-       hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
+       hlist_for_each_entry_safe(li, tmp, lih, hlist) {
                found += trie_flush_list(&li->falh);
 
                if (list_empty(&li->falh)) {
@@ -1895,14 +1892,13 @@ static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
                        struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct leaf_info *li;
-       struct hlist_node *node;
        int i, s_i;
 
        s_i = cb->args[4];
        i = 0;
 
        /* rcu_read_lock is hold by caller */
-       hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+       hlist_for_each_entry_rcu(li, &l->list, hlist) {
                if (i < s_i) {
                        i++;
                        continue;
@@ -2092,14 +2088,13 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
                if (IS_LEAF(n)) {
                        struct leaf *l = (struct leaf *)n;
                        struct leaf_info *li;
-                       struct hlist_node *tmp;
 
                        s->leaves++;
                        s->totdepth += iter.depth;
                        if (iter.depth > s->maxdepth)
                                s->maxdepth = iter.depth;
 
-                       hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
+                       hlist_for_each_entry_rcu(li, &l->list, hlist)
                                ++s->prefixes;
                } else {
                        const struct tnode *tn = (const struct tnode *) n;
@@ -2200,10 +2195,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
-               struct hlist_node *node;
                struct fib_table *tb;
 
-               hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                        struct trie *t = (struct trie *) tb->tb_data;
                        struct trie_stat stat;
 
@@ -2245,10 +2239,9 @@ static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 
        for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
-               struct hlist_node *node;
                struct fib_table *tb;
 
-               hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                        struct rt_trie_node *n;
 
                        for (n = fib_trie_get_first(iter,
@@ -2298,7 +2291,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        /* new hash chain */
        while (++h < FIB_TABLE_HASHSZ) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[h];
-               hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb_hlist) {
                        n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
                        if (n)
                                goto found;
@@ -2381,13 +2374,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
        } else {
                struct leaf *l = (struct leaf *) n;
                struct leaf_info *li;
-               struct hlist_node *node;
                __be32 val = htonl(l->key);
 
                seq_indent(seq, iter->depth);
                seq_printf(seq, "  |-- %pI4\n", &val);
 
-               hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+               hlist_for_each_entry_rcu(li, &l->list, hlist) {
                        struct fib_alias *fa;
 
                        list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2532,7 +2524,6 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
 {
        struct leaf *l = v;
        struct leaf_info *li;
-       struct hlist_node *node;
 
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
@@ -2541,7 +2532,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
                return 0;
        }
 
-       hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
+       hlist_for_each_entry_rcu(li, &l->list, hlist) {
                struct fib_alias *fa;
                __be32 mask, prefix;
 
index 11cb497..7d1874b 100644 (file)
@@ -57,7 +57,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
                           const struct inet_bind_bucket *tb, bool relax)
 {
        struct sock *sk2;
-       struct hlist_node *node;
        int reuse = sk->sk_reuse;
        int reuseport = sk->sk_reuseport;
        kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -69,7 +68,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
         * one this bucket belongs to.
         */
 
-       sk_for_each_bound(sk2, node, &tb->owners) {
+       sk_for_each_bound(sk2, &tb->owners) {
                if (sk != sk2 &&
                    !inet_v6_ipv6only(sk2) &&
                    (!sk->sk_bound_dev_if ||
@@ -95,7 +94,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
                        }
                }
        }
-       return node != NULL;
+       return sk2 != NULL;
 }
 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
 
@@ -106,7 +105,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_bind_hashbucket *head;
-       struct hlist_node *node;
        struct inet_bind_bucket *tb;
        int ret, attempts = 5;
        struct net *net = sock_net(sk);
@@ -129,7 +127,7 @@ again:
                        head = &hashinfo->bhash[inet_bhashfn(net, rover,
                                        hashinfo->bhash_size)];
                        spin_lock(&head->lock);
-                       inet_bind_bucket_for_each(tb, node, &head->chain)
+                       inet_bind_bucket_for_each(tb, &head->chain)
                                if (net_eq(ib_net(tb), net) && tb->port == rover) {
                                        if (((tb->fastreuse > 0 &&
                                              sk->sk_reuse &&
@@ -183,7 +181,7 @@ have_snum:
                head = &hashinfo->bhash[inet_bhashfn(net, snum,
                                hashinfo->bhash_size)];
                spin_lock(&head->lock);
-               inet_bind_bucket_for_each(tb, node, &head->chain)
+               inet_bind_bucket_for_each(tb, &head->chain)
                        if (net_eq(ib_net(tb), net) && tb->port == snum)
                                goto tb_found;
        }
index 2e453bd..245ae07 100644 (file)
@@ -33,9 +33,9 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
        get_random_bytes(&f->rnd, sizeof(u32));
        for (i = 0; i < INETFRAGS_HASHSZ; i++) {
                struct inet_frag_queue *q;
-               struct hlist_node *p, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
+               hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
                        unsigned int hval = f->hashfn(q);
 
                        if (hval != i) {
@@ -203,7 +203,6 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 {
        struct inet_frag_queue *qp;
 #ifdef CONFIG_SMP
-       struct hlist_node *n;
 #endif
        unsigned int hash;
 
@@ -219,7 +218,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
         * such entry could be created on other cpu, while we
         * promoted read lock to write lock.
         */
-       hlist_for_each_entry(qp, n, &f->hash[hash], list) {
+       hlist_for_each_entry(qp, &f->hash[hash], list) {
                if (qp->net == nf && f->match(qp, arg)) {
                        atomic_inc(&qp->refcnt);
                        write_unlock(&f->lock);
@@ -278,9 +277,8 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
        __releases(&f->lock)
 {
        struct inet_frag_queue *q;
-       struct hlist_node *n;
 
-       hlist_for_each_entry(q, n, &f->hash[hash], list) {
+       hlist_for_each_entry(q, &f->hash[hash], list) {
                if (q->net == nf && f->match(q, key)) {
                        atomic_inc(&q->refcnt);
                        read_unlock(&f->lock);
index 0ce0595..6af375a 100644 (file)
@@ -120,13 +120,12 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
                 * that the listener socket's icsk_bind_hash is the same
                 * as that of the child socket. We have to look up or
                 * create a new bind bucket for the child here. */
-               struct hlist_node *node;
-               inet_bind_bucket_for_each(tb, node, &head->chain) {
+               inet_bind_bucket_for_each(tb, &head->chain) {
                        if (net_eq(ib_net(tb), sock_net(sk)) &&
                            tb->port == port)
                                break;
                }
-               if (!node) {
+               if (!tb) {
                        tb = inet_bind_bucket_create(table->bind_bucket_cachep,
                                                     sock_net(sk), head, port);
                        if (!tb) {
@@ -493,7 +492,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                int i, remaining, low, high, port;
                static u32 hint;
                u32 offset = hint + port_offset;
-               struct hlist_node *node;
                struct inet_timewait_sock *tw = NULL;
 
                inet_get_local_port_range(&low, &high);
@@ -512,7 +510,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                         * because the established check is already
                         * unique enough.
                         */
-                       inet_bind_bucket_for_each(tb, node, &head->chain) {
+                       inet_bind_bucket_for_each(tb, &head->chain) {
                                if (net_eq(ib_net(tb), net) &&
                                    tb->port == port) {
                                        if (tb->fastreuse >= 0 ||
index 2784db3..1f27c9f 100644 (file)
@@ -216,7 +216,6 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
                                    const int slot)
 {
        struct inet_timewait_sock *tw;
-       struct hlist_node *node;
        unsigned int killed;
        int ret;
 
@@ -229,7 +228,7 @@ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
        killed = 0;
        ret = 0;
 rescan:
-       inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
+       inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
                __inet_twsk_del_dead_node(tw);
                spin_unlock(&twdr->death_lock);
                __inet_twsk_kill(tw, twdr->hashinfo);
@@ -438,10 +437,10 @@ void inet_twdr_twcal_tick(unsigned long data)
 
        for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
                if (time_before_eq(j, now)) {
-                       struct hlist_node *node, *safe;
+                       struct hlist_node *safe;
                        struct inet_timewait_sock *tw;
 
-                       inet_twsk_for_each_inmate_safe(tw, node, safe,
+                       inet_twsk_for_each_inmate_safe(tw, safe,
                                                       &twdr->twcal_row[slot]) {
                                __inet_twsk_del_dead_node(tw);
                                __inet_twsk_kill(tw, twdr->hashinfo);
index 87abd3e..2bdf802 100644 (file)
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                                        icmp_send(skb, ICMP_DEST_UNREACH,
                                                  ICMP_PROT_UNREACH, 0);
                                }
-                       } else
+                               kfree_skb(skb);
+                       } else {
                                IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
-                       kfree_skb(skb);
+                               consume_skb(skb);
+                       }
                }
        }
  out:
index f6289bf..310a364 100644 (file)
@@ -423,7 +423,7 @@ int ip_options_compile(struct net *net,
                                        put_unaligned_be32(midtime, timeptr);
                                        opt->is_changed = 1;
                                }
-                       } else {
+                       } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
                                unsigned int overflow = optptr[3]>>4;
                                if (overflow == 15) {
                                        pp_ptr = optptr + 3;
index 75e33a7..5852b24 100644 (file)
@@ -657,7 +657,7 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
 static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
                                size_t size, loff_t *ofs)
 {
-       struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
+       struct clusterip_config *c = PDE(file_inode(file))->data;
 #define PROC_WRITELEN  10
        char buffer[PROC_WRITELEN+1];
        unsigned long nodenum;
index 53ddebc..dd44e0a 100644 (file)
@@ -111,9 +111,7 @@ EXPORT_SYMBOL_GPL(raw_unhash_sk);
 static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
                unsigned short num, __be32 raddr, __be32 laddr, int dif)
 {
-       struct hlist_node *node;
-
-       sk_for_each_from(sk, node) {
+       sk_for_each_from(sk) {
                struct inet_sock *inet = inet_sk(sk);
 
                if (net_eq(sock_net(sk), net) && inet->inet_num == num  &&
@@ -914,9 +912,7 @@ static struct sock *raw_get_first(struct seq_file *seq)
 
        for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
                        ++state->bucket) {
-               struct hlist_node *node;
-
-               sk_for_each(sk, node, &state->h->ht[state->bucket])
+               sk_for_each(sk, &state->h->ht[state->bucket])
                        if (sock_net(sk) == seq_file_net(seq))
                                goto found;
        }
index a759e19..0d9bdac 100644 (file)
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                if (tcp_checksum_complete_user(sk, skb))
                                        goto csum_error;
 
+                               if ((int)skb->truesize > sk->sk_forward_alloc)
+                                       goto step5;
+
                                /* Predicted packet is in window by definition.
                                 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
                                 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                tcp_rcv_rtt_measure_ts(sk, skb);
 
-                               if ((int)skb->truesize > sk->sk_forward_alloc)
-                                       goto step5;
-
                                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
index 145d3bf..4a8ec45 100644 (file)
@@ -954,7 +954,6 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
-       struct hlist_node *pos;
        unsigned int size = sizeof(struct in_addr);
        struct tcp_md5sig_info *md5sig;
 
@@ -968,7 +967,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
        if (family == AF_INET6)
                size = sizeof(struct in6_addr);
 #endif
-       hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
+       hlist_for_each_entry_rcu(key, &md5sig->head, node) {
                if (key->family != family)
                        continue;
                if (!memcmp(&key->addr, addr, size))
@@ -1069,14 +1068,14 @@ static void tcp_clear_md5_list(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        struct tcp_md5sig_info *md5sig;
 
        md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
 
        if (!hlist_empty(&md5sig->head))
                tcp_free_md5sig_pool();
-       hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
+       hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
                hlist_del_rcu(&key->node);
                atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
                kfree_rcu(key, rcu);
index 4dc0d44..f2c7e61 100644 (file)
@@ -1419,11 +1419,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
                  struct net_device *dev, int strict)
 {
        struct inet6_ifaddr *ifp;
-       struct hlist_node *node;
        unsigned int hash = inet6_addr_hash(addr);
 
        rcu_read_lock_bh();
-       hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+       hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1445,9 +1444,8 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 {
        unsigned int hash = inet6_addr_hash(addr);
        struct inet6_ifaddr *ifp;
-       struct hlist_node *node;
 
-       hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+       hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -1487,10 +1485,9 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
 {
        struct inet6_ifaddr *ifp, *result = NULL;
        unsigned int hash = inet6_addr_hash(addr);
-       struct hlist_node *node;
 
        rcu_read_lock_bh();
-       hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+       hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2907,11 +2904,10 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        /* Step 2: clear hash table */
        for (i = 0; i < IN6_ADDR_HSIZE; i++) {
                struct hlist_head *h = &inet6_addr_lst[i];
-               struct hlist_node *n;
 
                spin_lock_bh(&addrconf_hash_lock);
        restart:
-               hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+               hlist_for_each_entry_rcu(ifa, h, addr_lst) {
                        if (ifa->idev == idev) {
                                hlist_del_init_rcu(&ifa->addr_lst);
                                addrconf_del_timer(ifa);
@@ -3218,8 +3214,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
        }
 
        for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
-               struct hlist_node *n;
-               hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+               hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
                                         addr_lst) {
                        if (!net_eq(dev_net(ifa->idev->dev), net))
                                continue;
@@ -3244,9 +3239,8 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
 {
        struct if6_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
-       struct hlist_node *n = &ifa->addr_lst;
 
-       hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
+       hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
                if (!net_eq(dev_net(ifa->idev->dev), net))
                        continue;
                state->offset++;
@@ -3255,7 +3249,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
 
        while (++state->bucket < IN6_ADDR_HSIZE) {
                state->offset = 0;
-               hlist_for_each_entry_rcu_bh(ifa, n,
+               hlist_for_each_entry_rcu_bh(ifa,
                                     &inet6_addr_lst[state->bucket], addr_lst) {
                        if (!net_eq(dev_net(ifa->idev->dev), net))
                                continue;
@@ -3357,11 +3351,10 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
 {
        int ret = 0;
        struct inet6_ifaddr *ifp = NULL;
-       struct hlist_node *n;
        unsigned int hash = inet6_addr_hash(addr);
 
        rcu_read_lock_bh();
-       hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
+       hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3383,7 +3376,6 @@ static void addrconf_verify(unsigned long foo)
 {
        unsigned long now, next, next_sec, next_sched;
        struct inet6_ifaddr *ifp;
-       struct hlist_node *node;
        int i;
 
        rcu_read_lock_bh();
@@ -3395,7 +3387,7 @@ static void addrconf_verify(unsigned long foo)
 
        for (i = 0; i < IN6_ADDR_HSIZE; i++) {
 restart:
-               hlist_for_each_entry_rcu_bh(ifp, node,
+               hlist_for_each_entry_rcu_bh(ifp,
                                         &inet6_addr_lst[i], addr_lst) {
                        unsigned long age;
 
@@ -3866,7 +3858,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
        struct net_device *dev;
        struct inet6_dev *idev;
        struct hlist_head *head;
-       struct hlist_node *node;
 
        s_h = cb->args[0];
        s_idx = idx = cb->args[1];
@@ -3876,7 +3867,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        if (h > s_h || idx > s_idx)
@@ -4222,7 +4213,6 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct net_device *dev;
        struct inet6_dev *idev;
        struct hlist_head *head;
-       struct hlist_node *node;
 
        s_h = cb->args[0];
        s_idx = cb->args[1];
@@ -4231,7 +4221,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
                        idev = __in6_dev_get(dev);
index ff76eec..aad6435 100644 (file)
@@ -173,9 +173,8 @@ static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
                                                  const struct in6_addr *addr,
                                                  int type, int ifindex)
 {
-       struct hlist_node *pos;
        struct ip6addrlbl_entry *p;
-       hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
+       hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
                if (__ip6addrlbl_match(net, p, addr, type, ifindex))
                        return p;
        }
@@ -261,9 +260,9 @@ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
        if (hlist_empty(&ip6addrlbl_table.head)) {
                hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
        } else {
-               struct hlist_node *pos, *n;
+               struct hlist_node *n;
                struct ip6addrlbl_entry *p = NULL;
-               hlist_for_each_entry_safe(p, pos, n,
+               hlist_for_each_entry_safe(p, n,
                                          &ip6addrlbl_table.head, list) {
                        if (p->prefixlen == newp->prefixlen &&
                            net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
@@ -319,13 +318,13 @@ static int __ip6addrlbl_del(struct net *net,
                            int ifindex)
 {
        struct ip6addrlbl_entry *p = NULL;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        int ret = -ESRCH;
 
        ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
                  __func__, prefix, prefixlen, ifindex);
 
-       hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
+       hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
                if (p->prefixlen == prefixlen &&
                    net_eq(ip6addrlbl_net(p), net) &&
                    p->ifindex == ifindex &&
@@ -380,11 +379,11 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
 static void __net_exit ip6addrlbl_net_exit(struct net *net)
 {
        struct ip6addrlbl_entry *p = NULL;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
 
        /* Remove all labels belonging to the exiting net */
        spin_lock(&ip6addrlbl_table.lock);
-       hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) {
+       hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
                if (net_eq(ip6addrlbl_net(p), net)) {
                        hlist_del_rcu(&p->list);
                        ip6addrlbl_put(p);
@@ -505,12 +504,11 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        struct ip6addrlbl_entry *p;
-       struct hlist_node *pos;
        int idx = 0, s_idx = cb->args[0];
        int err;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) {
+       hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
                if (idx >= s_idx &&
                    net_eq(ip6addrlbl_net(p), net)) {
                        if ((err = ip6addrlbl_fill(skb, p,
index b386a2c..9bfab19 100644 (file)
@@ -31,7 +31,6 @@ int inet6_csk_bind_conflict(const struct sock *sk,
                            const struct inet_bind_bucket *tb, bool relax)
 {
        const struct sock *sk2;
-       const struct hlist_node *node;
        int reuse = sk->sk_reuse;
        int reuseport = sk->sk_reuseport;
        kuid_t uid = sock_i_uid((struct sock *)sk);
@@ -41,7 +40,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
         * See comment in inet_csk_bind_conflict about sock lookup
         * vs net namespaces issues.
         */
-       sk_for_each_bound(sk2, node, &tb->owners) {
+       sk_for_each_bound(sk2, &tb->owners) {
                if (sk != sk2 &&
                    (!sk->sk_bound_dev_if ||
                     !sk2->sk_bound_dev_if ||
@@ -58,7 +57,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
                }
        }
 
-       return node != NULL;
+       return sk2 != NULL;
 }
 
 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
index 710cafd..192dd1a 100644 (file)
@@ -224,7 +224,6 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
 {
        struct fib6_table *tb;
        struct hlist_head *head;
-       struct hlist_node *node;
        unsigned int h;
 
        if (id == 0)
@@ -232,7 +231,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
        h = id & (FIB6_TABLE_HASHSZ - 1);
        rcu_read_lock();
        head = &net->ipv6.fib_table_hash[h];
-       hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
+       hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
                if (tb->tb6_id == id) {
                        rcu_read_unlock();
                        return tb;
@@ -363,7 +362,6 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        struct rt6_rtnl_dump_arg arg;
        struct fib6_walker_t *w;
        struct fib6_table *tb;
-       struct hlist_node *node;
        struct hlist_head *head;
        int res = 0;
 
@@ -398,7 +396,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
                e = 0;
                head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
+               hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
                        if (e < s_e)
                                goto next;
                        res = fib6_dump_table(tb, skb, cb);
@@ -1520,14 +1518,13 @@ void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg
                    int prune, void *arg)
 {
        struct fib6_table *table;
-       struct hlist_node *node;
        struct hlist_head *head;
        unsigned int h;
 
        rcu_read_lock();
        for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
                head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+               hlist_for_each_entry_rcu(table, head, tb6_hlist) {
                        read_lock_bh(&table->tb6_lock);
                        fib6_clean_tree(net, &table->tb6_root,
                                        func, prune, arg);
@@ -1540,14 +1537,13 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg)
 {
        struct fib6_table *table;
-       struct hlist_node *node;
        struct hlist_head *head;
        unsigned int h;
 
        rcu_read_lock();
        for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
                head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
+               hlist_for_each_entry_rcu(table, head, tb6_hlist) {
                        write_lock_bh(&table->tb6_lock);
                        fib6_clean_tree(net, &table->tb6_root,
                                        func, prune, arg);
index 5b10414..b1876e5 100644 (file)
@@ -241,9 +241,11 @@ resubmit:
                                icmpv6_send(skb, ICMPV6_PARAMPROB,
                                            ICMPV6_UNK_NEXTHDR, nhoff);
                        }
-               } else
+                       kfree_skb(skb);
+               } else {
                        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
-               kfree_skb(skb);
+                       consume_skb(skb);
+               }
        }
        rcu_read_unlock();
        return 0;
index c65907d..330b5e7 100644 (file)
@@ -71,10 +71,9 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
                unsigned short num, const struct in6_addr *loc_addr,
                const struct in6_addr *rmt_addr, int dif)
 {
-       struct hlist_node *node;
        bool is_multicast = ipv6_addr_is_multicast(loc_addr);
 
-       sk_for_each_from(sk, node)
+       sk_for_each_from(sk)
                if (inet_sk(sk)->inet_num == num) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
 
index 9282665..e5fe004 100644 (file)
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
 restart:
        read_lock_bh(&table->tb6_lock);
        for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
+               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+                   (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
                        dst_hold(&rt->dst);
                        read_unlock_bh(&table->tb6_lock);
                        ip6_del_rt(rt);
index 6cc4801..de2bcfa 100644 (file)
@@ -89,9 +89,8 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
 {
        struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
        struct xfrm6_tunnel_spi *x6spi;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry_rcu(x6spi, pos,
+       hlist_for_each_entry_rcu(x6spi,
                             &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
                             list_byaddr) {
                if (xfrm6_addr_equal(&x6spi->addr, saddr))
@@ -120,9 +119,8 @@ static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
        struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
        struct xfrm6_tunnel_spi *x6spi;
        int index = xfrm6_tunnel_spi_hash_byspi(spi);
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(x6spi, pos,
+       hlist_for_each_entry(x6spi,
                             &xfrm6_tn->spi_byspi[index],
                             list_byspi) {
                if (x6spi->spi == spi)
@@ -203,11 +201,11 @@ static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
 {
        struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
        struct xfrm6_tunnel_spi *x6spi;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
 
        spin_lock_bh(&xfrm6_tunnel_spi_lock);
 
-       hlist_for_each_entry_safe(x6spi, pos, n,
+       hlist_for_each_entry_safe(x6spi, n,
                                  &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
                                  list_byaddr)
        {
index dfd6faa..f547a47 100644 (file)
@@ -228,9 +228,8 @@ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc,
                                         __be16 port)
 {
        struct sock *s;
-       struct hlist_node *node;
 
-       sk_for_each(s, node, &intrfc->if_sklist)
+       sk_for_each(s, &intrfc->if_sklist)
                if (ipx_sk(s)->port == port)
                        goto found;
        s = NULL;
@@ -259,12 +258,11 @@ static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc,
                                                __be16 port)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        ipxitf_hold(intrfc);
        spin_lock_bh(&intrfc->if_sklist_lock);
 
-       sk_for_each(s, node, &intrfc->if_sklist) {
+       sk_for_each(s, &intrfc->if_sklist) {
                struct ipx_sock *ipxs = ipx_sk(s);
 
                if (ipxs->port == port &&
@@ -282,14 +280,14 @@ found:
 static void __ipxitf_down(struct ipx_interface *intrfc)
 {
        struct sock *s;
-       struct hlist_node *node, *t;
+       struct hlist_node *t;
 
        /* Delete all routes associated with this interface */
        ipxrtr_del_routes(intrfc);
 
        spin_lock_bh(&intrfc->if_sklist_lock);
        /* error sockets */
-       sk_for_each_safe(s, node, t, &intrfc->if_sklist) {
+       sk_for_each_safe(s, t, &intrfc->if_sklist) {
                struct ipx_sock *ipxs = ipx_sk(s);
 
                s->sk_err = ENOLINK;
@@ -385,12 +383,11 @@ static int ipxitf_demux_socket(struct ipx_interface *intrfc,
        int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
                                   IPX_NODE_LEN);
        struct sock *s;
-       struct hlist_node *node;
        int rc;
 
        spin_lock_bh(&intrfc->if_sklist_lock);
 
-       sk_for_each(s, node, &intrfc->if_sklist) {
+       sk_for_each(s, &intrfc->if_sklist) {
                struct ipx_sock *ipxs = ipx_sk(s);
 
                if (ipxs->port == ipx->ipx_dest.sock &&
@@ -446,12 +443,11 @@ static struct sock *ncp_connection_hack(struct ipx_interface *intrfc,
                connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8);
 
        if (connection) {
-               struct hlist_node *node;
                /* Now we have to look for a special NCP connection handling
                 * socket. Only these sockets have ipx_ncp_conn != 0, set by
                 * SIOCIPXNCPCONN. */
                spin_lock_bh(&intrfc->if_sklist_lock);
-               sk_for_each(sk, node, &intrfc->if_sklist)
+               sk_for_each(sk, &intrfc->if_sklist)
                        if (ipx_sk(sk)->ipx_ncp_conn == connection) {
                                sock_hold(sk);
                                goto found;
index 02ff7f2..65e8833 100644 (file)
@@ -103,19 +103,18 @@ out:
 static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
 {
        struct sock *s = NULL;
-       struct hlist_node *node;
        struct ipx_interface *i;
 
        list_for_each_entry(i, &ipx_interfaces, node) {
                spin_lock_bh(&i->if_sklist_lock);
-               sk_for_each(s, node, &i->if_sklist) {
+               sk_for_each(s, &i->if_sklist) {
                        if (!pos)
                                break;
                        --pos;
                }
                spin_unlock_bh(&i->if_sklist_lock);
                if (!pos) {
-                       if (node)
+                       if (s)
                                goto found;
                        break;
                }
index 9a5fd3c..362ba47 100644 (file)
@@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
        struct tty_port *port = &self->port;
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       int             do_clocal = 0;
        unsigned long   flags;
 
        IRDA_DEBUG(2, "%s()\n", __func__ );
@@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
         * If non-blocking mode is set, or the port is not enabled,
         * then make the check up front and then exit.
         */
-       if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
-               /* nonblock mode is set or port is not enabled */
+       if (test_bit(TTY_IO_ERROR, &tty->flags)) {
+               port->flags |= ASYNC_NORMAL_ACTIVE;
+               return 0;
+       }
+
+       if (filp->f_flags & O_NONBLOCK) {
+               /* nonblock mode is set */
+               if (tty->termios.c_cflag & CBAUD)
+                       tty_port_raise_dtr_rts(port);
                port->flags |= ASYNC_NORMAL_ACTIVE;
                IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );
                return 0;
@@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
              __FILE__, __LINE__, tty->driver->name, port->count);
 
        spin_lock_irqsave(&port->lock, flags);
-       if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+       if (!tty_hung_up_p(filp))
                port->count--;
-       }
-       spin_unlock_irqrestore(&port->lock, flags);
        port->blocked_open++;
+       spin_unlock_irqrestore(&port->lock, flags);
 
        while (1) {
                if (tty->termios.c_cflag & CBAUD)
                        tty_port_raise_dtr_rts(port);
 
-               current->state = TASK_INTERRUPTIBLE;
+               set_current_state(TASK_INTERRUPTIBLE);
 
                if (tty_hung_up_p(filp) ||
                    !test_bit(ASYNCB_INITIALIZED, &port->flags)) {
@@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
        __set_current_state(TASK_RUNNING);
        remove_wait_queue(&port->open_wait, &wait);
 
-       if (extra_count) {
-               /* ++ is not atomic, so this should be protected - Jean II */
-               spin_lock_irqsave(&port->lock, flags);
+       spin_lock_irqsave(&port->lock, flags);
+       if (!tty_hung_up_p(filp))
                port->count++;
-               spin_unlock_irqrestore(&port->lock, flags);
-       }
        port->blocked_open--;
+       spin_unlock_irqrestore(&port->lock, flags);
 
        IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
              __FILE__, __LINE__, tty->driver->name, port->count);
index e71e85b..29340a9 100644 (file)
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
 /*             case CS_ISO_8859_9: */
 /*             case CS_UNICODE: */
                default:
-                       IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
-                                  __func__, ias_charset_types[charset]);
+                       IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
+                                  __func__, charset,
+                                  charset < ARRAY_SIZE(ias_charset_types) ?
+                                       ias_charset_types[charset] :
+                                       "(unknown)");
 
                        /* Aborting, close connection! */
                        iriap_disconnect_request(self);
index cd6f7a9..a7d11ff 100644 (file)
@@ -156,14 +156,13 @@ static int afiucv_pm_freeze(struct device *dev)
 {
        struct iucv_sock *iucv;
        struct sock *sk;
-       struct hlist_node *node;
        int err = 0;
 
 #ifdef CONFIG_PM_DEBUG
        printk(KERN_WARNING "afiucv_pm_freeze\n");
 #endif
        read_lock(&iucv_sk_list.lock);
-       sk_for_each(sk, node, &iucv_sk_list.head) {
+       sk_for_each(sk, &iucv_sk_list.head) {
                iucv = iucv_sk(sk);
                switch (sk->sk_state) {
                case IUCV_DISCONN:
@@ -194,13 +193,12 @@ static int afiucv_pm_freeze(struct device *dev)
 static int afiucv_pm_restore_thaw(struct device *dev)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
 #ifdef CONFIG_PM_DEBUG
        printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
 #endif
        read_lock(&iucv_sk_list.lock);
-       sk_for_each(sk, node, &iucv_sk_list.head) {
+       sk_for_each(sk, &iucv_sk_list.head) {
                switch (sk->sk_state) {
                case IUCV_CONNECTED:
                        sk->sk_err = EPIPE;
@@ -390,9 +388,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
 static struct sock *__iucv_get_sock_by_name(char *nm)
 {
        struct sock *sk;
-       struct hlist_node *node;
 
-       sk_for_each(sk, node, &iucv_sk_list.head)
+       sk_for_each(sk, &iucv_sk_list.head)
                if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
                        return sk;
 
@@ -1678,7 +1675,6 @@ static int iucv_callback_connreq(struct iucv_path *path,
        unsigned char user_data[16];
        unsigned char nuser_data[16];
        unsigned char src_name[8];
-       struct hlist_node *node;
        struct sock *sk, *nsk;
        struct iucv_sock *iucv, *niucv;
        int err;
@@ -1689,7 +1685,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
        read_lock(&iucv_sk_list.lock);
        iucv = NULL;
        sk = NULL;
-       sk_for_each(sk, node, &iucv_sk_list.head)
+       sk_for_each(sk, &iucv_sk_list.head)
                if (sk->sk_state == IUCV_LISTEN &&
                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
                        /*
@@ -2115,7 +2111,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct hlist_node *node;
        struct sock *sk;
        struct iucv_sock *iucv;
        struct af_iucv_trans_hdr *trans_hdr;
@@ -2132,7 +2127,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        iucv = NULL;
        sk = NULL;
        read_lock(&iucv_sk_list.lock);
-       sk_for_each(sk, node, &iucv_sk_list.head) {
+       sk_for_each(sk, &iucv_sk_list.head) {
                if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
                        if ((!memcmp(&iucv_sk(sk)->src_name,
                                     trans_hdr->destAppName, 8)) &&
@@ -2225,10 +2220,9 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
        struct sk_buff *list_skb;
        struct sk_buff *nskb;
        unsigned long flags;
-       struct hlist_node *node;
 
        read_lock_irqsave(&iucv_sk_list.lock, flags);
-       sk_for_each(sk, node, &iucv_sk_list.head)
+       sk_for_each(sk, &iucv_sk_list.head)
                if (sk == isk) {
                        iucv = iucv_sk(sk);
                        break;
@@ -2299,14 +2293,13 @@ static int afiucv_netdev_event(struct notifier_block *this,
                               unsigned long event, void *ptr)
 {
        struct net_device *event_dev = (struct net_device *)ptr;
-       struct hlist_node *node;
        struct sock *sk;
        struct iucv_sock *iucv;
 
        switch (event) {
        case NETDEV_REBOOT:
        case NETDEV_GOING_DOWN:
-               sk_for_each(sk, node, &iucv_sk_list.head) {
+               sk_for_each(sk, &iucv_sk_list.head) {
                        iucv = iucv_sk(sk);
                        if ((iucv->hs_dev == event_dev) &&
                            (sk->sk_state == IUCV_CONNECTED)) {
index 9ef7985..556fdaf 100644 (file)
@@ -225,7 +225,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
 {
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
-       struct hlist_node *node;
        struct sk_buff *skb2 = NULL;
        int err = -ESRCH;
 
@@ -236,7 +235,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                return -ENOMEM;
 
        rcu_read_lock();
-       sk_for_each_rcu(sk, node, &net_pfkey->table) {
+       sk_for_each_rcu(sk, &net_pfkey->table) {
                struct pfkey_sock *pfk = pfkey_sk(sk);
                int err2;
 
index dcfd64e..d36875f 100644 (file)
@@ -221,10 +221,9 @@ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
        struct hlist_head *session_list =
                l2tp_session_id_hash_2(pn, session_id);
        struct l2tp_session *session;
-       struct hlist_node *walk;
 
        rcu_read_lock_bh();
-       hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+       hlist_for_each_entry_rcu(session, session_list, global_hlist) {
                if (session->session_id == session_id) {
                        rcu_read_unlock_bh();
                        return session;
@@ -253,7 +252,6 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
 {
        struct hlist_head *session_list;
        struct l2tp_session *session;
-       struct hlist_node *walk;
 
        /* In L2TPv3, session_ids are unique over all tunnels and we
         * sometimes need to look them up before we know the
@@ -264,7 +262,7 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
 
        session_list = l2tp_session_id_hash(tunnel, session_id);
        read_lock_bh(&tunnel->hlist_lock);
-       hlist_for_each_entry(session, walk, session_list, hlist) {
+       hlist_for_each_entry(session, session_list, hlist) {
                if (session->session_id == session_id) {
                        read_unlock_bh(&tunnel->hlist_lock);
                        return session;
@@ -279,13 +277,12 @@ EXPORT_SYMBOL_GPL(l2tp_session_find);
 struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
 {
        int hash;
-       struct hlist_node *walk;
        struct l2tp_session *session;
        int count = 0;
 
        read_lock_bh(&tunnel->hlist_lock);
        for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
-               hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
+               hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
                        if (++count > nth) {
                                read_unlock_bh(&tunnel->hlist_lock);
                                return session;
@@ -306,12 +303,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
 {
        struct l2tp_net *pn = l2tp_pernet(net);
        int hash;
-       struct hlist_node *walk;
        struct l2tp_session *session;
 
        rcu_read_lock_bh();
        for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
-               hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
+               hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
                        if (!strcmp(session->ifname, ifname)) {
                                rcu_read_unlock_bh();
                                return session;
index f7ac8f4..7f41b70 100644 (file)
@@ -49,10 +49,9 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 
 static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
 {
-       struct hlist_node *node;
        struct sock *sk;
 
-       sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
+       sk_for_each_bound(sk, &l2tp_ip_bind_table) {
                struct inet_sock *inet = inet_sk(sk);
                struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
 
index 8ee4a86..41f2f81 100644 (file)
@@ -60,10 +60,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
                                           struct in6_addr *laddr,
                                           int dif, u32 tunnel_id)
 {
-       struct hlist_node *node;
        struct sock *sk;
 
-       sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
+       sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
                struct in6_addr *addr = inet6_rcv_saddr(sk);
                struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
index 3f4e3af..6a53371 100644 (file)
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
        l2tp_xmit_skb(session, skb, session->hdr_len);
 
        sock_put(ps->tunnel_sock);
+       sock_put(sk);
 
        return error;
 
index 7c5073b..78be45c 100644 (file)
@@ -393,12 +393,11 @@ static void llc_sap_mcast(struct llc_sap *sap,
 {
        int i = 0, count = 256 / sizeof(struct sock *);
        struct sock *sk, *stack[count];
-       struct hlist_node *node;
        struct llc_sock *llc;
        struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
 
        spin_lock_bh(&sap->sk_lock);
-       hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
+       hlist_for_each_entry(llc, dev_hb, dev_hash_node) {
 
                sk = &llc->sk;
 
index 09d96a8..fb30681 100644 (file)
@@ -3285,6 +3285,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
                                     struct cfg80211_chan_def *chandef)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_chanctx_conf *chanctx_conf;
        int ret = -ENODATA;
 
@@ -3293,6 +3294,16 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
        if (chanctx_conf) {
                *chandef = chanctx_conf->def;
                ret = 0;
+       } else if (local->open_count > 0 &&
+                  local->open_count == local->monitors &&
+                  sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+               if (local->use_chanctx)
+                       *chandef = local->monitor_chandef;
+               else
+                       cfg80211_chandef_create(chandef,
+                                               local->_oper_channel,
+                                               local->_oper_channel_type);
+               ret = 0;
        }
        rcu_read_unlock();
 
index 2c059e5..baaa860 100644 (file)
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
 
        lockdep_assert_held(&local->mtx);
 
-       active = !list_empty(&local->chanctx_list);
+       active = !list_empty(&local->chanctx_list) || local->monitors;
 
        if (!local->ops->remain_on_channel) {
                list_for_each_entry(roc, &local->roc_list, list) {
@@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 
                ieee80211_adjust_monitor_flags(sdata, 1);
                ieee80211_configure_filter(local);
+               mutex_lock(&local->mtx);
+               ieee80211_recalc_idle(local);
+               mutex_unlock(&local->mtx);
 
                netif_carrier_on(dev);
                break;
@@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
                ieee80211_adjust_monitor_flags(sdata, -1);
                ieee80211_configure_filter(local);
+               mutex_lock(&local->mtx);
+               ieee80211_recalc_idle(local);
+               mutex_unlock(&local->mtx);
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* relies on synchronize_rcu() below */
index d0dd111..1a8591b 100644 (file)
@@ -647,8 +647,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        spin_lock_init(&local->ack_status_lock);
        idr_init(&local->ack_status_frames);
-       /* preallocate at least one entry */
-       idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
 
        sta_info_init(local);
 
index 6b3c4e1..dc7c8df 100644 (file)
@@ -72,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
  * it's used twice. So it is illegal to do
  *     for_each_mesh_entry(rcu_dereference(...), ...)
  */
-#define for_each_mesh_entry(tbl, p, node, i) \
+#define for_each_mesh_entry(tbl, node, i) \
        for (i = 0; i <= tbl->hash_mask; i++) \
-               hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
+               hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
 
 
 static struct mesh_table *mesh_table_alloc(int size_order)
@@ -139,7 +139,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
        }
        if (free_leafs) {
                spin_lock_bh(&tbl->gates_lock);
-               hlist_for_each_entry_safe(gate, p, q,
+               hlist_for_each_entry_safe(gate, q,
                                         tbl->known_gates, list) {
                        hlist_del(&gate->list);
                        kfree(gate);
@@ -333,12 +333,11 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
                                      struct ieee80211_sub_if_data *sdata)
 {
        struct mesh_path *mpath;
-       struct hlist_node *n;
        struct hlist_head *bucket;
        struct mpath_node *node;
 
        bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
-       hlist_for_each_entry_rcu(node, n, bucket, list) {
+       hlist_for_each_entry_rcu(node, bucket, list) {
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
                    ether_addr_equal(dst, mpath->dst)) {
@@ -389,11 +388,10 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 {
        struct mesh_table *tbl = rcu_dereference(mesh_paths);
        struct mpath_node *node;
-       struct hlist_node *p;
        int i;
        int j = 0;
 
-       for_each_mesh_entry(tbl, p, node, i) {
+       for_each_mesh_entry(tbl, node, i) {
                if (sdata && node->mpath->sdata != sdata)
                        continue;
                if (j++ == idx) {
@@ -417,13 +415,12 @@ int mesh_path_add_gate(struct mesh_path *mpath)
 {
        struct mesh_table *tbl;
        struct mpath_node *gate, *new_gate;
-       struct hlist_node *n;
        int err;
 
        rcu_read_lock();
        tbl = rcu_dereference(mesh_paths);
 
-       hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+       hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
                if (gate->mpath == mpath) {
                        err = -EEXIST;
                        goto err_rcu;
@@ -460,9 +457,9 @@ err_rcu:
 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
 {
        struct mpath_node *gate;
-       struct hlist_node *p, *q;
+       struct hlist_node *q;
 
-       hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) {
+       hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
                if (gate->mpath != mpath)
                        continue;
                spin_lock_bh(&tbl->gates_lock);
@@ -504,7 +501,6 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
        struct mesh_path *mpath, *new_mpath;
        struct mpath_node *node, *new_node;
        struct hlist_head *bucket;
-       struct hlist_node *n;
        int grow = 0;
        int err = 0;
        u32 hash_idx;
@@ -550,7 +546,7 @@ int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
        spin_lock(&tbl->hashwlock[hash_idx]);
 
        err = -EEXIST;
-       hlist_for_each_entry(node, n, bucket, list) {
+       hlist_for_each_entry(node, bucket, list) {
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
                    ether_addr_equal(dst, mpath->dst))
@@ -640,7 +636,6 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
        struct mesh_path *mpath, *new_mpath;
        struct mpath_node *node, *new_node;
        struct hlist_head *bucket;
-       struct hlist_node *n;
        int grow = 0;
        int err = 0;
        u32 hash_idx;
@@ -680,7 +675,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
        spin_lock(&tbl->hashwlock[hash_idx]);
 
        err = -EEXIST;
-       hlist_for_each_entry(node, n, bucket, list) {
+       hlist_for_each_entry(node, bucket, list) {
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
                    ether_addr_equal(dst, mpath->dst))
@@ -725,14 +720,13 @@ void mesh_plink_broken(struct sta_info *sta)
        static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct mesh_path *mpath;
        struct mpath_node *node;
-       struct hlist_node *p;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        int i;
        __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
 
        rcu_read_lock();
        tbl = rcu_dereference(mesh_paths);
-       for_each_mesh_entry(tbl, p, node, i) {
+       for_each_mesh_entry(tbl, node, i) {
                mpath = node->mpath;
                if (rcu_dereference(mpath->next_hop) == sta &&
                    mpath->flags & MESH_PATH_ACTIVE &&
@@ -792,13 +786,12 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        struct mesh_table *tbl;
        struct mesh_path *mpath;
        struct mpath_node *node;
-       struct hlist_node *p;
        int i;
 
        rcu_read_lock();
        read_lock_bh(&pathtbl_resize_lock);
        tbl = resize_dereference_mesh_paths();
-       for_each_mesh_entry(tbl, p, node, i) {
+       for_each_mesh_entry(tbl, node, i) {
                mpath = node->mpath;
                if (rcu_dereference(mpath->next_hop) == sta) {
                        spin_lock(&tbl->hashwlock[i]);
@@ -815,11 +808,10 @@ static void table_flush_by_iface(struct mesh_table *tbl,
 {
        struct mesh_path *mpath;
        struct mpath_node *node;
-       struct hlist_node *p;
        int i;
 
        WARN_ON(!rcu_read_lock_held());
-       for_each_mesh_entry(tbl, p, node, i) {
+       for_each_mesh_entry(tbl, node, i) {
                mpath = node->mpath;
                if (mpath->sdata != sdata)
                        continue;
@@ -865,7 +857,6 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
        struct mesh_path *mpath;
        struct mpath_node *node;
        struct hlist_head *bucket;
-       struct hlist_node *n;
        int hash_idx;
        int err = 0;
 
@@ -875,7 +866,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
        bucket = &tbl->hash_buckets[hash_idx];
 
        spin_lock(&tbl->hashwlock[hash_idx]);
-       hlist_for_each_entry(node, n, bucket, list) {
+       hlist_for_each_entry(node, bucket, list) {
                mpath = node->mpath;
                if (mpath->sdata == sdata &&
                    ether_addr_equal(addr, mpath->dst)) {
@@ -920,7 +911,6 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
 int mesh_path_send_to_gates(struct mesh_path *mpath)
 {
        struct ieee80211_sub_if_data *sdata = mpath->sdata;
-       struct hlist_node *n;
        struct mesh_table *tbl;
        struct mesh_path *from_mpath = mpath;
        struct mpath_node *gate = NULL;
@@ -935,7 +925,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
        if (!known_gates)
                return -EHOSTUNREACH;
 
-       hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+       hlist_for_each_entry_rcu(gate, known_gates, list) {
                if (gate->mpath->sdata != sdata)
                        continue;
 
@@ -951,7 +941,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
                }
        }
 
-       hlist_for_each_entry_rcu(gate, n, known_gates, list)
+       hlist_for_each_entry_rcu(gate, known_gates, list)
                if (gate->mpath->sdata == sdata) {
                        mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
                        mesh_path_tx_pending(gate->mpath);
@@ -1096,12 +1086,11 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
        struct mesh_table *tbl;
        struct mesh_path *mpath;
        struct mpath_node *node;
-       struct hlist_node *p;
        int i;
 
        rcu_read_lock();
        tbl = rcu_dereference(mesh_paths);
-       for_each_mesh_entry(tbl, p, node, i) {
+       for_each_mesh_entry(tbl, node, i) {
                if (node->mpath->sdata != sdata)
                        continue;
                mpath = node->mpath;
index 9f6464f..1415774 100644 (file)
@@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
                                                                mask) >> shift;
 
+               if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+                       continue;
+
                switch (ap_mcs) {
                default:
                        if (our_mcs <= ap_mcs)
@@ -3502,6 +3505,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       /*
+        * Stop timers before deleting work items, as timers
+        * could race and re-add the work-items. They will be
+        * re-established on connection.
+        */
+       del_timer_sync(&ifmgd->conn_mon_timer);
+       del_timer_sync(&ifmgd->bcn_mon_timer);
+
        /*
         * we need to use atomic bitops for the running bits
         * only because both timers might fire at the same
@@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
        if (del_timer_sync(&ifmgd->timer))
                set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 
-       cancel_work_sync(&ifmgd->chswitch_work);
        if (del_timer_sync(&ifmgd->chswitch_timer))
                set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
-
-       /* these will just be re-established on connection */
-       del_timer_sync(&ifmgd->conn_mon_timer);
-       del_timer_sync(&ifmgd->bcn_mon_timer);
+       cancel_work_sync(&ifmgd->chswitch_work);
 }
 
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
@@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       /*
+        * Make sure some work items will not run after this,
+        * they will not do anything but might not have been
+        * cancelled when disconnecting.
+        */
+       cancel_work_sync(&ifmgd->monitor_work);
+       cancel_work_sync(&ifmgd->beacon_connection_loss_work);
+       cancel_work_sync(&ifmgd->request_smps_work);
+       cancel_work_sync(&ifmgd->csa_connection_drop_work);
+       cancel_work_sync(&ifmgd->chswitch_work);
+
        mutex_lock(&ifmgd->mtx);
        if (ifmgd->assoc_data)
                ieee80211_destroy_assoc_data(sdata, false);
index 5b9602b..8914d2d 100644 (file)
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                if (local->queue_stop_reasons[q] ||
                    (!txpending && !skb_queue_empty(&local->pending[q]))) {
                        if (unlikely(info->flags &
-                                       IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
-                                    local->queue_stop_reasons[q] &
-                                       ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
+                                    IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
+                               if (local->queue_stop_reasons[q] &
+                                   ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
+                                       /*
+                                        * Drop off-channel frames if queues
+                                        * are stopped for any reason other
+                                        * than off-channel operation. Never
+                                        * queue them.
+                                        */
+                                       spin_unlock_irqrestore(
+                                               &local->queue_stop_reason_lock,
+                                               flags);
+                                       ieee80211_purge_tx_queue(&local->hw,
+                                                                skbs);
+                                       return true;
+                               }
+                       } else {
+
                                /*
-                                * Drop off-channel frames if queues are stopped
-                                * for any reason other than off-channel
-                                * operation. Never queue them.
+                                * Since queue is stopped, queue up frames for
+                                * later transmission from the tx-pending
+                                * tasklet when the queue is woken again.
                                 */
-                               spin_unlock_irqrestore(
-                                       &local->queue_stop_reason_lock, flags);
-                               ieee80211_purge_tx_queue(&local->hw, skbs);
-                               return true;
+                               if (txpending)
+                                       skb_queue_splice_init(skbs,
+                                                             &local->pending[q]);
+                               else
+                                       skb_queue_splice_tail_init(skbs,
+                                                                  &local->pending[q]);
+
+                               spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+                                                      flags);
+                               return false;
                        }
-
-                       /*
-                        * Since queue is stopped, queue up frames for later
-                        * transmission from the tx-pending tasklet when the
-                        * queue is woken again.
-                        */
-                       if (txpending)
-                               skb_queue_splice_init(skbs, &local->pending[q]);
-                       else
-                               skb_queue_splice_tail_init(skbs,
-                                                          &local->pending[q]);
-
-                       spin_unlock_irqrestore(&local->queue_stop_reason_lock,
-                                              flags);
-                       return false;
                }
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                }
 
                if (!is_multicast_ether_addr(skb->data)) {
+                       struct sta_info *next_hop;
+                       bool mpp_lookup = true;
+
                        mpath = mesh_path_lookup(sdata, skb->data);
-                       if (!mpath)
+                       if (mpath) {
+                               mpp_lookup = false;
+                               next_hop = rcu_dereference(mpath->next_hop);
+                               if (!next_hop ||
+                                   !(mpath->flags & (MESH_PATH_ACTIVE |
+                                                     MESH_PATH_RESOLVING)))
+                                       mpp_lookup = true;
+                       }
+
+                       if (mpp_lookup)
                                mppath = mpp_path_lookup(sdata, skb->data);
+
+                       if (mppath && mpath)
+                               mesh_path_del(mpath->sdata, mpath->dst);
                }
 
                /*
@@ -2017,24 +2038,14 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                skb = skb_clone(skb, GFP_ATOMIC);
                if (skb) {
                        unsigned long flags;
-                       int id, r;
+                       int id;
 
                        spin_lock_irqsave(&local->ack_status_lock, flags);
-                       r = idr_get_new_above(&local->ack_status_frames,
-                                             orig_skb, 1, &id);
-                       if (r == -EAGAIN) {
-                               idr_pre_get(&local->ack_status_frames,
-                                           GFP_ATOMIC);
-                               r = idr_get_new_above(&local->ack_status_frames,
-                                                     orig_skb, 1, &id);
-                       }
-                       if (WARN_ON(!id) || id > 0xffff) {
-                               idr_remove(&local->ack_status_frames, id);
-                               r = -ERANGE;
-                       }
+                       id = idr_alloc(&local->ack_status_frames, orig_skb,
+                                      1, 0x10000, GFP_ATOMIC);
                        spin_unlock_irqrestore(&local->ack_status_lock, flags);
 
-                       if (!r) {
+                       if (id >= 0) {
                                info_id = id;
                                info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
                        } else if (skb_shared(skb)) {
@@ -2360,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
        if (local->tim_in_locked_section) {
                __ieee80211_beacon_add_tim(sdata, ps, skb);
        } else {
-               spin_lock(&local->tim_lock);
+               spin_lock_bh(&local->tim_lock);
                __ieee80211_beacon_add_tim(sdata, ps, skb);
-               spin_unlock(&local->tim_lock);
+               spin_unlock_bh(&local->tim_lock);
        }
 
        return 0;
@@ -2734,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                                cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                }
 
-               sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
                if (!ieee80211_tx_prepare(sdata, &tx, skb))
                        break;
                dev_kfree_skb_any(skb);
index 9f00db7..704e514 100644 (file)
@@ -259,13 +259,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned int hash;
        struct ip_vs_conn *cp;
-       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
                    p->cport == cp->cport && p->vport == cp->vport &&
                    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -344,13 +343,12 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
        unsigned int hash;
        struct ip_vs_conn *cp;
-       struct hlist_node *n;
 
        hash = ip_vs_conn_hashkey_param(p, false);
 
        ct_read_lock(hash);
 
-       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
                if (!ip_vs_conn_net_eq(cp, p->net))
                        continue;
                if (p->pe_data && p->pe->ct_match) {
@@ -394,7 +392,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
        unsigned int hash;
        struct ip_vs_conn *cp, *ret=NULL;
-       struct hlist_node *n;
 
        /*
         *      Check for "full" addressed entries
@@ -403,7 +400,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
        ct_read_lock(hash);
 
-       hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+       hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
                if (cp->af == p->af &&
                    p->vport == cp->cport && p->cport == cp->dport &&
                    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -953,11 +950,10 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
        int idx;
        struct ip_vs_conn *cp;
        struct ip_vs_iter_state *iter = seq->private;
-       struct hlist_node *n;
 
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
                ct_read_lock_bh(idx);
-               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+               hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
                        if (pos-- == 0) {
                                iter->l = &ip_vs_conn_tab[idx];
                                return cp;
@@ -981,7 +977,6 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct ip_vs_conn *cp = v;
        struct ip_vs_iter_state *iter = seq->private;
-       struct hlist_node *e;
        struct hlist_head *l = iter->l;
        int idx;
 
@@ -990,15 +985,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                return ip_vs_conn_array(seq, 0);
 
        /* more on same hash chain? */
-       if ((e = cp->c_list.next))
-               return hlist_entry(e, struct ip_vs_conn, c_list);
+       if (cp->c_list.next)
+               return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list);
 
        idx = l - ip_vs_conn_tab;
        ct_read_unlock_bh(idx);
 
        while (++idx < ip_vs_conn_tab_size) {
                ct_read_lock_bh(idx);
-               hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
+               hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
                        iter->l = &ip_vs_conn_tab[idx];
                        return cp;
                }
@@ -1200,14 +1195,13 @@ void ip_vs_random_dropentry(struct net *net)
         */
        for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
                unsigned int hash = net_random() & ip_vs_conn_tab_mask;
-               struct hlist_node *n;
 
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(hash);
 
-               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
+               hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
                        if (cp->flags & IP_VS_CONN_F_TEMPLATE)
                                /* connection template */
                                continue;
@@ -1255,14 +1249,12 @@ static void ip_vs_conn_flush(struct net *net)
 
 flush_again:
        for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
-               struct hlist_node *n;
-
                /*
                 *  Lock is actually needed in this loop.
                 */
                ct_write_lock_bh(idx);
 
-               hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
+               hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
                        if (!ip_vs_conn_net_eq(cp, net))
                                continue;
                        IP_VS_DBG(4, "del connection\n");
index 3921e5b..8c10e3d 100644 (file)
@@ -90,14 +90,13 @@ __nf_ct_expect_find(struct net *net, u16 zone,
                    const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i;
-       struct hlist_node *n;
        unsigned int h;
 
        if (!net->ct.expect_count)
                return NULL;
 
        h = nf_ct_expect_dst_hash(tuple);
-       hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
+       hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
                if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
                    nf_ct_zone(i->master) == zone)
                        return i;
@@ -130,14 +129,13 @@ nf_ct_find_expectation(struct net *net, u16 zone,
                       const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_expect *i, *exp = NULL;
-       struct hlist_node *n;
        unsigned int h;
 
        if (!net->ct.expect_count)
                return NULL;
 
        h = nf_ct_expect_dst_hash(tuple);
-       hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
+       hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
                if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
                    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
                    nf_ct_zone(i->master) == zone) {
@@ -172,13 +170,13 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
 {
        struct nf_conn_help *help = nfct_help(ct);
        struct nf_conntrack_expect *exp;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
 
        /* Optimization: most connection never expect any others. */
        if (!help)
                return;
 
-       hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+       hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if (del_timer(&exp->timeout)) {
                        nf_ct_unlink_expect(exp);
                        nf_ct_expect_put(exp);
@@ -348,9 +346,8 @@ static void evict_oldest_expect(struct nf_conn *master,
 {
        struct nf_conn_help *master_help = nfct_help(master);
        struct nf_conntrack_expect *exp, *last = NULL;
-       struct hlist_node *n;
 
-       hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
+       hlist_for_each_entry(exp, &master_help->expectations, lnode) {
                if (exp->class == new->class)
                        last = exp;
        }
@@ -369,7 +366,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        struct nf_conn_help *master_help = nfct_help(master);
        struct nf_conntrack_helper *helper;
        struct net *net = nf_ct_exp_net(expect);
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        unsigned int h;
        int ret = 1;
 
@@ -378,7 +375,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
                goto out;
        }
        h = nf_ct_expect_dst_hash(&expect->tuple);
-       hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
+       hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
                if (expect_matches(i, expect)) {
                        if (del_timer(&i->timeout)) {
                                nf_ct_unlink_expect(i);
index bb4188f..94b4b98 100644 (file)
@@ -116,14 +116,13 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_helper *helper;
        struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
-       struct hlist_node *n;
        unsigned int h;
 
        if (!nf_ct_helper_count)
                return NULL;
 
        h = helper_hash(tuple);
-       hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) {
+       hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
                if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
                        return helper;
        }
@@ -134,11 +133,10 @@ struct nf_conntrack_helper *
 __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
 {
        struct nf_conntrack_helper *h;
-       struct hlist_node *n;
        unsigned int i;
 
        for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
+               hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
                        if (!strcmp(h->name, name) &&
                            h->tuple.src.l3num == l3num &&
                            h->tuple.dst.protonum == protonum)
@@ -366,7 +364,6 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 {
        int ret = 0;
        struct nf_conntrack_helper *cur;
-       struct hlist_node *n;
        unsigned int h = helper_hash(&me->tuple);
 
        BUG_ON(me->expect_policy == NULL);
@@ -374,7 +371,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
        BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
 
        mutex_lock(&nf_ct_helper_mutex);
-       hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
+       hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
                if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
                    cur->tuple.src.l3num == me->tuple.src.l3num &&
                    cur->tuple.dst.protonum == me->tuple.dst.protonum) {
@@ -395,13 +392,13 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conntrack_expect *exp;
-       const struct hlist_node *n, *next;
+       const struct hlist_node *next;
        const struct hlist_nulls_node *nn;
        unsigned int i;
 
        /* Get rid of expectations */
        for (i = 0; i < nf_ct_expect_hsize; i++) {
-               hlist_for_each_entry_safe(exp, n, next,
+               hlist_for_each_entry_safe(exp, next,
                                          &net->ct.expect_hash[i], hnode) {
                        struct nf_conn_help *help = nfct_help(exp->master);
                        if ((rcu_dereference_protected(
index 5d60e04..9904b15 100644 (file)
@@ -2370,14 +2370,13 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct net *net = sock_net(skb->sk);
        struct nf_conntrack_expect *exp, *last;
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-       struct hlist_node *n;
        u_int8_t l3proto = nfmsg->nfgen_family;
 
        rcu_read_lock();
        last = (struct nf_conntrack_expect *)cb->args[1];
        for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
 restart:
-               hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
+               hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
                                     hnode) {
                        if (l3proto && exp->tuple.src.l3num != l3proto)
                                continue;
@@ -2510,7 +2509,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
        struct nf_conntrack_expect *exp;
        struct nf_conntrack_tuple tuple;
        struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        u_int8_t u3 = nfmsg->nfgen_family;
        unsigned int i;
        u16 zone;
@@ -2557,7 +2556,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                /* delete all expectations for this helper */
                spin_lock_bh(&nf_conntrack_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
-                       hlist_for_each_entry_safe(exp, n, next,
+                       hlist_for_each_entry_safe(exp, next,
                                                  &net->ct.expect_hash[i],
                                                  hnode) {
                                m_help = nfct_help(exp->master);
@@ -2575,7 +2574,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                /* This basically means we have to flush everything*/
                spin_lock_bh(&nf_conntrack_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
-                       hlist_for_each_entry_safe(exp, n, next,
+                       hlist_for_each_entry_safe(exp, next,
                                                  &net->ct.expect_hash[i],
                                                  hnode) {
                                if (del_timer(&exp->timeout)) {
index 069229d..0e7d423 100644 (file)
@@ -855,11 +855,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
 {
        struct nf_conn_help *help = nfct_help(ct);
        struct nf_conntrack_expect *exp;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        int found = 0;
 
        spin_lock_bh(&nf_conntrack_lock);
-       hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+       hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if (exp->class != SIP_EXPECT_SIGNALLING ||
                    !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
                    exp->tuple.dst.protonum != proto ||
@@ -881,10 +881,10 @@ static void flush_expectations(struct nf_conn *ct, bool media)
 {
        struct nf_conn_help *help = nfct_help(ct);
        struct nf_conntrack_expect *exp;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
 
        spin_lock_bh(&nf_conntrack_lock);
-       hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
+       hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
                        continue;
                if (!del_timer(&exp->timeout))
index 5f2f910..8d5769c 100644 (file)
@@ -191,9 +191,8 @@ find_appropriate_src(struct net *net, u16 zone,
        unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn_nat *nat;
        const struct nf_conn *ct;
-       const struct hlist_node *n;
 
-       hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) {
+       hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
                ct = nat->ct;
                if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
                        /* Copy source part from reply tuple. */
index 945950a..a191b6d 100644 (file)
@@ -282,7 +282,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
        const char *helper_name;
        struct nf_conntrack_helper *cur, *helper = NULL;
        struct nf_conntrack_tuple tuple;
-       struct hlist_node *n;
        int ret = 0, i;
 
        if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
@@ -296,7 +295,7 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
 
        rcu_read_lock();
        for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
-               hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
 
                        /* skip non-userspace conntrack helpers. */
                        if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -452,13 +451,12 @@ static int
 nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct nf_conntrack_helper *cur, *last;
-       struct hlist_node *n;
 
        rcu_read_lock();
        last = (struct nf_conntrack_helper *)cb->args[1];
        for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
 restart:
-               hlist_for_each_entry_rcu(cur, n,
+               hlist_for_each_entry_rcu(cur,
                                &nf_ct_helper_hash[cb->args[0]], hnode) {
 
                        /* skip non-userspace conntrack helpers. */
@@ -495,7 +493,6 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
 {
        int ret = -ENOENT, i;
        struct nf_conntrack_helper *cur;
-       struct hlist_node *n;
        struct sk_buff *skb2;
        char *helper_name = NULL;
        struct nf_conntrack_tuple tuple;
@@ -520,7 +517,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
        }
 
        for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
 
                        /* skip non-userspace conntrack helpers. */
                        if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -568,7 +565,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
 {
        char *helper_name = NULL;
        struct nf_conntrack_helper *cur;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        struct nf_conntrack_tuple tuple;
        bool tuple_set = false, found = false;
        int i, j = 0, ret;
@@ -585,7 +582,7 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
        }
 
        for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
                                                                hnode) {
                        /* skip non-userspace conntrack helpers. */
                        if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
@@ -654,13 +651,13 @@ err_out:
 static void __exit nfnl_cthelper_exit(void)
 {
        struct nf_conntrack_helper *cur;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        int i;
 
        nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
 
        for (i=0; i<nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
                                                                        hnode) {
                        /* skip non-userspace conntrack helpers. */
                        if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
index 92fd8ec..f248db5 100644 (file)
@@ -87,11 +87,10 @@ static struct nfulnl_instance *
 __instance_lookup(u_int16_t group_num)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct nfulnl_instance *inst;
 
        head = &instance_table[instance_hashfn(group_num)];
-       hlist_for_each_entry_rcu(inst, pos, head, hlist) {
+       hlist_for_each_entry_rcu(inst, head, hlist) {
                if (inst->group_num == group_num)
                        return inst;
        }
@@ -717,11 +716,11 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
                /* destroy all instances for this portid */
                spin_lock_bh(&instances_lock);
                for  (i = 0; i < INSTANCE_BUCKETS; i++) {
-                       struct hlist_node *tmp, *t2;
+                       struct hlist_node *t2;
                        struct nfulnl_instance *inst;
                        struct hlist_head *head = &instance_table[i];
 
-                       hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
+                       hlist_for_each_entry_safe(inst, t2, head, hlist) {
                                if ((net_eq(n->net, &init_net)) &&
                                    (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
index 3158d87..858fd52 100644 (file)
@@ -80,11 +80,10 @@ static struct nfqnl_instance *
 instance_lookup(u_int16_t queue_num)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct nfqnl_instance *inst;
 
        head = &instance_table[instance_hashfn(queue_num)];
-       hlist_for_each_entry_rcu(inst, pos, head, hlist) {
+       hlist_for_each_entry_rcu(inst, head, hlist) {
                if (inst->queue_num == queue_num)
                        return inst;
        }
@@ -583,11 +582,10 @@ nfqnl_dev_drop(int ifindex)
        rcu_read_lock();
 
        for (i = 0; i < INSTANCE_BUCKETS; i++) {
-               struct hlist_node *tmp;
                struct nfqnl_instance *inst;
                struct hlist_head *head = &instance_table[i];
 
-               hlist_for_each_entry_rcu(inst, tmp, head, hlist)
+               hlist_for_each_entry_rcu(inst, head, hlist)
                        nfqnl_flush(inst, dev_cmp, ifindex);
        }
 
@@ -627,11 +625,11 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
                /* destroy all instances for this portid */
                spin_lock(&instances_lock);
                for (i = 0; i < INSTANCE_BUCKETS; i++) {
-                       struct hlist_node *tmp, *t2;
+                       struct hlist_node *t2;
                        struct nfqnl_instance *inst;
                        struct hlist_head *head = &instance_table[i];
 
-                       hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
+                       hlist_for_each_entry_safe(inst, t2, head, hlist) {
                                if ((n->net == &init_net) &&
                                    (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
index f264032..370adf6 100644 (file)
@@ -43,12 +43,11 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
 struct xt_rateest *xt_rateest_lookup(const char *name)
 {
        struct xt_rateest *est;
-       struct hlist_node *n;
        unsigned int h;
 
        h = xt_rateest_hash(name);
        mutex_lock(&xt_rateest_mutex);
-       hlist_for_each_entry(est, n, &rateest_hash[h], list) {
+       hlist_for_each_entry(est, &rateest_hash[h], list) {
                if (strcmp(est->name, name) == 0) {
                        est->refcnt++;
                        mutex_unlock(&xt_rateest_mutex);
index 70b5591..c40b269 100644 (file)
@@ -101,7 +101,7 @@ static int count_them(struct net *net,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct xt_connlimit_conn *conn;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        struct nf_conn *found_ct;
        struct hlist_head *hash;
        bool addit = true;
@@ -115,7 +115,7 @@ static int count_them(struct net *net,
        rcu_read_lock();
 
        /* check the saved connections */
-       hlist_for_each_entry_safe(conn, pos, n, hash, node) {
+       hlist_for_each_entry_safe(conn, n, hash, node) {
                found    = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
                                                 &conn->tuple);
                found_ct = NULL;
@@ -258,14 +258,14 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
 {
        const struct xt_connlimit_info *info = par->matchinfo;
        struct xt_connlimit_conn *conn;
-       struct hlist_node *pos, *n;
+       struct hlist_node *n;
        struct hlist_head *hash = info->data->iphash;
        unsigned int i;
 
        nf_ct_l3proto_module_put(par->family);
 
        for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
-               hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) {
+               hlist_for_each_entry_safe(conn, n, &hash[i], node) {
                        hlist_del(&conn->node);
                        kfree(conn);
                }
index 98218c8..f330e8b 100644 (file)
@@ -141,11 +141,10 @@ dsthash_find(const struct xt_hashlimit_htable *ht,
             const struct dsthash_dst *dst)
 {
        struct dsthash_ent *ent;
-       struct hlist_node *pos;
        u_int32_t hash = hash_dst(ht, dst);
 
        if (!hlist_empty(&ht->hash[hash])) {
-               hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
+               hlist_for_each_entry_rcu(ent, &ht->hash[hash], node)
                        if (dst_cmp(ent, dst)) {
                                spin_lock(&ent->lock);
                                return ent;
@@ -297,8 +296,8 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
        spin_lock_bh(&ht->lock);
        for (i = 0; i < ht->cfg.size; i++) {
                struct dsthash_ent *dh;
-               struct hlist_node *pos, *n;
-               hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
+               struct hlist_node *n;
+               hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
                        if ((*select)(ht, dh))
                                dsthash_free(ht, dh);
                }
@@ -343,9 +342,8 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
 {
        struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
        struct xt_hashlimit_htable *hinfo;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
+       hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
                if (!strcmp(name, hinfo->pde->name) &&
                    hinfo->family == family) {
                        hinfo->use++;
@@ -821,10 +819,9 @@ static int dl_seq_show(struct seq_file *s, void *v)
        struct xt_hashlimit_htable *htable = s->private;
        unsigned int *bucket = (unsigned int *)v;
        struct dsthash_ent *ent;
-       struct hlist_node *pos;
 
        if (!hlist_empty(&htable->hash[*bucket])) {
-               hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
+               hlist_for_each_entry(ent, &htable->hash[*bucket], node)
                        if (dl_seq_real_show(ent, htable->family, s))
                                return -1;
        }
@@ -877,7 +874,6 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
 static void __net_exit hashlimit_proc_net_exit(struct net *net)
 {
        struct xt_hashlimit_htable *hinfo;
-       struct hlist_node *pos;
        struct proc_dir_entry *pde;
        struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 
@@ -890,7 +886,7 @@ static void __net_exit hashlimit_proc_net_exit(struct net *net)
        if (pde == NULL)
                pde = hashlimit_net->ip6t_hashlimit;
 
-       hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node)
+       hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
                remove_proc_entry(hinfo->pde->name, pde);
 
        hashlimit_net->ipt_hashlimit = NULL;
index 31bf233..d9cad31 100644 (file)
@@ -540,7 +540,7 @@ static ssize_t
 recent_mt_proc_write(struct file *file, const char __user *input,
                     size_t size, loff_t *loff)
 {
-       const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+       const struct proc_dir_entry *pde = PDE(file_inode(file));
        struct recent_table *t = pde->data;
        struct recent_entry *e;
        char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
index 3d55e0c..1e3fd5b 100644 (file)
@@ -248,11 +248,10 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
        struct nl_portid_hash *hash = &nl_table[protocol].hash;
        struct hlist_head *head;
        struct sock *sk;
-       struct hlist_node *node;
 
        read_lock(&nl_table_lock);
        head = nl_portid_hashfn(hash, portid);
-       sk_for_each(sk, node, head) {
+       sk_for_each(sk, head) {
                if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
                        sock_hold(sk);
                        goto found;
@@ -312,9 +311,9 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
 
        for (i = 0; i <= omask; i++) {
                struct sock *sk;
-               struct hlist_node *node, *tmp;
+               struct hlist_node *tmp;
 
-               sk_for_each_safe(sk, node, tmp, &otable[i])
+               sk_for_each_safe(sk, tmp, &otable[i])
                        __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
        }
 
@@ -344,7 +343,6 @@ static void
 netlink_update_listeners(struct sock *sk)
 {
        struct netlink_table *tbl = &nl_table[sk->sk_protocol];
-       struct hlist_node *node;
        unsigned long mask;
        unsigned int i;
        struct listeners *listeners;
@@ -355,7 +353,7 @@ netlink_update_listeners(struct sock *sk)
 
        for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
                mask = 0;
-               sk_for_each_bound(sk, node, &tbl->mc_list) {
+               sk_for_each_bound(sk, &tbl->mc_list) {
                        if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
                                mask |= nlk_sk(sk)->groups[i];
                }
@@ -371,18 +369,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
        struct hlist_head *head;
        int err = -EADDRINUSE;
        struct sock *osk;
-       struct hlist_node *node;
        int len;
 
        netlink_table_grab();
        head = nl_portid_hashfn(hash, portid);
        len = 0;
-       sk_for_each(osk, node, head) {
+       sk_for_each(osk, head) {
                if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
                        break;
                len++;
        }
-       if (node)
+       if (osk)
                goto err;
 
        err = -EBUSY;
@@ -575,7 +572,6 @@ static int netlink_autobind(struct socket *sock)
        struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
        struct hlist_head *head;
        struct sock *osk;
-       struct hlist_node *node;
        s32 portid = task_tgid_vnr(current);
        int err;
        static s32 rover = -4097;
@@ -584,7 +580,7 @@ retry:
        cond_resched();
        netlink_table_grab();
        head = nl_portid_hashfn(hash, portid);
-       sk_for_each(osk, node, head) {
+       sk_for_each(osk, head) {
                if (!net_eq(sock_net(osk), net))
                        continue;
                if (nlk_sk(osk)->portid == portid) {
@@ -809,7 +805,7 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
 
 struct sock *netlink_getsockbyfilp(struct file *filp)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct sock *sock;
 
        if (!S_ISSOCK(inode->i_mode))
@@ -1101,7 +1097,6 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
 {
        struct net *net = sock_net(ssk);
        struct netlink_broadcast_data info;
-       struct hlist_node *node;
        struct sock *sk;
 
        skb = netlink_trim(skb, allocation);
@@ -1124,7 +1119,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
 
        netlink_lock_table();
 
-       sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
+       sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
                do_one_broadcast(sk, &info);
 
        consume_skb(skb);
@@ -1200,7 +1195,6 @@ out:
 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 {
        struct netlink_set_err_data info;
-       struct hlist_node *node;
        struct sock *sk;
        int ret = 0;
 
@@ -1212,7 +1206,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 
        read_lock(&nl_table_lock);
 
-       sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
+       sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
                ret += do_one_set_err(sk, &info);
 
        read_unlock(&nl_table_lock);
@@ -1676,10 +1670,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
 
-       sk_for_each_bound(sk, node, &tbl->mc_list)
+       sk_for_each_bound(sk, &tbl->mc_list)
                netlink_update_socket_mc(nlk_sk(sk), group, 0);
 }
 
@@ -1974,14 +1967,13 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
        struct nl_seq_iter *iter = seq->private;
        int i, j;
        struct sock *s;
-       struct hlist_node *node;
        loff_t off = 0;
 
        for (i = 0; i < MAX_LINKS; i++) {
                struct nl_portid_hash *hash = &nl_table[i].hash;
 
                for (j = 0; j <= hash->mask; j++) {
-                       sk_for_each(s, node, &hash->table[j]) {
+                       sk_for_each(s, &hash->table[j]) {
                                if (sock_net(s) != seq_file_net(seq))
                                        continue;
                                if (off == pos) {
index 297b07a..d1fa1d9 100644 (file)
@@ -104,10 +104,9 @@ static void nr_remove_socket(struct sock *sk)
 static void nr_kill_by_device(struct net_device *dev)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_list_lock);
-       sk_for_each(s, node, &nr_list)
+       sk_for_each(s, &nr_list)
                if (nr_sk(s)->device == dev)
                        nr_disconnect(s, ENETUNREACH);
        spin_unlock_bh(&nr_list_lock);
@@ -149,10 +148,9 @@ static void nr_insert_socket(struct sock *sk)
 static struct sock *nr_find_listener(ax25_address *addr)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_list_lock);
-       sk_for_each(s, node, &nr_list)
+       sk_for_each(s, &nr_list)
                if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
                    s->sk_state == TCP_LISTEN) {
                        bh_lock_sock(s);
@@ -170,10 +168,9 @@ found:
 static struct sock *nr_find_socket(unsigned char index, unsigned char id)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_list_lock);
-       sk_for_each(s, node, &nr_list) {
+       sk_for_each(s, &nr_list) {
                struct nr_sock *nr = nr_sk(s);
 
                if (nr->my_index == index && nr->my_id == id) {
@@ -194,10 +191,9 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
        ax25_address *dest)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_list_lock);
-       sk_for_each(s, node, &nr_list) {
+       sk_for_each(s, &nr_list) {
                struct nr_sock *nr = nr_sk(s);
 
                if (nr->your_index == index && nr->your_id == id &&
index 70ffff7..b976d5e 100644 (file)
@@ -49,10 +49,9 @@ static struct nr_node *nr_node_get(ax25_address *callsign)
 {
        struct nr_node *found = NULL;
        struct nr_node *nr_node;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_node_list_lock);
-       nr_node_for_each(nr_node, node, &nr_node_list)
+       nr_node_for_each(nr_node, &nr_node_list)
                if (ax25cmp(callsign, &nr_node->callsign) == 0) {
                        nr_node_hold(nr_node);
                        found = nr_node;
@@ -67,10 +66,9 @@ static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
 {
        struct nr_neigh *found = NULL;
        struct nr_neigh *nr_neigh;
-       struct hlist_node *node;
 
        spin_lock_bh(&nr_neigh_list_lock);
-       nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
+       nr_neigh_for_each(nr_neigh, &nr_neigh_list)
                if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
                    nr_neigh->dev == dev) {
                        nr_neigh_hold(nr_neigh);
@@ -114,10 +112,9 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
         */
        if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
                struct nr_node *nr_nodet;
-               struct hlist_node *node;
 
                spin_lock_bh(&nr_node_list_lock);
-               nr_node_for_each(nr_nodet, node, &nr_node_list) {
+               nr_node_for_each(nr_nodet, &nr_node_list) {
                        nr_node_lock(nr_nodet);
                        for (i = 0; i < nr_nodet->count; i++)
                                if (nr_nodet->routes[i].neighbour == nr_neigh)
@@ -485,11 +482,11 @@ static int nr_dec_obs(void)
 {
        struct nr_neigh *nr_neigh;
        struct nr_node  *s;
-       struct hlist_node *node, *nodet;
+       struct hlist_node *nodet;
        int i;
 
        spin_lock_bh(&nr_node_list_lock);
-       nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
+       nr_node_for_each_safe(s, nodet, &nr_node_list) {
                nr_node_lock(s);
                for (i = 0; i < s->count; i++) {
                        switch (s->routes[i].obs_count) {
@@ -540,15 +537,15 @@ static int nr_dec_obs(void)
 void nr_rt_device_down(struct net_device *dev)
 {
        struct nr_neigh *s;
-       struct hlist_node *node, *nodet, *node2, *node2t;
+       struct hlist_node *nodet, *node2t;
        struct nr_node  *t;
        int i;
 
        spin_lock_bh(&nr_neigh_list_lock);
-       nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
+       nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
                if (s->dev == dev) {
                        spin_lock_bh(&nr_node_list_lock);
-                       nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
+                       nr_node_for_each_safe(t, node2t, &nr_node_list) {
                                nr_node_lock(t);
                                for (i = 0; i < t->count; i++) {
                                        if (t->routes[i].neighbour == s) {
@@ -737,11 +734,10 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
 void nr_link_failed(ax25_cb *ax25, int reason)
 {
        struct nr_neigh *s, *nr_neigh = NULL;
-       struct hlist_node *node;
        struct nr_node  *nr_node = NULL;
 
        spin_lock_bh(&nr_neigh_list_lock);
-       nr_neigh_for_each(s, node, &nr_neigh_list) {
+       nr_neigh_for_each(s, &nr_neigh_list) {
                if (s->ax25 == ax25) {
                        nr_neigh_hold(s);
                        nr_neigh = s;
@@ -761,7 +757,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
                return;
        }
        spin_lock_bh(&nr_node_list_lock);
-       nr_node_for_each(nr_node, node, &nr_node_list) {
+       nr_node_for_each(nr_node, &nr_node_list) {
                nr_node_lock(nr_node);
                if (nr_node->which < nr_node->count &&
                    nr_node->routes[nr_node->which].neighbour == nr_neigh)
@@ -1013,16 +1009,16 @@ void __exit nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
-       struct hlist_node *node, *nodet;
+       struct hlist_node *nodet;
 
        spin_lock_bh(&nr_neigh_list_lock);
        spin_lock_bh(&nr_node_list_lock);
-       nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
+       nr_node_for_each_safe(t, nodet, &nr_node_list) {
                nr_node_lock(t);
                nr_remove_node_locked(t);
                nr_node_unlock(t);
        }
-       nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
+       nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
                while(s->count) {
                        s->count--;
                        nr_neigh_put(s);
index 746f5a2..7f8266d 100644 (file)
@@ -71,14 +71,14 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
 static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
 {
        struct sock *sk;
-       struct hlist_node *node, *tmp;
+       struct hlist_node *tmp;
        struct nfc_llcp_sock *llcp_sock;
 
        skb_queue_purge(&local->tx_queue);
 
        write_lock(&local->sockets.lock);
 
-       sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
+       sk_for_each_safe(sk, tmp, &local->sockets.head) {
                llcp_sock = nfc_llcp_sock(sk);
 
                bh_lock_sock(sk);
@@ -171,7 +171,6 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
                                               u8 ssap, u8 dsap)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct nfc_llcp_sock *llcp_sock, *tmp_sock;
 
        pr_debug("ssap dsap %d %d\n", ssap, dsap);
@@ -183,7 +182,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
 
        llcp_sock = NULL;
 
-       sk_for_each(sk, node, &local->sockets.head) {
+       sk_for_each(sk, &local->sockets.head) {
                tmp_sock = nfc_llcp_sock(sk);
 
                if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
@@ -272,7 +271,6 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
                                            u8 *sn, size_t sn_len)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct nfc_llcp_sock *llcp_sock, *tmp_sock;
 
        pr_debug("sn %zd %p\n", sn_len, sn);
@@ -284,7 +282,7 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
 
        llcp_sock = NULL;
 
-       sk_for_each(sk, node, &local->sockets.head) {
+       sk_for_each(sk, &local->sockets.head) {
                tmp_sock = nfc_llcp_sock(sk);
 
                pr_debug("llcp sock %p\n", tmp_sock);
@@ -601,14 +599,13 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
 void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
                               struct sk_buff *skb, u8 direction)
 {
-       struct hlist_node *node;
        struct sk_buff *skb_copy = NULL, *nskb;
        struct sock *sk;
        u8 *data;
 
        read_lock(&local->raw_sockets.lock);
 
-       sk_for_each(sk, node, &local->raw_sockets.head) {
+       sk_for_each(sk, &local->raw_sockets.head) {
                if (sk->sk_state != LLCP_BOUND)
                        continue;
 
@@ -697,11 +694,10 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local
 {
        struct sock *sk;
        struct nfc_llcp_sock *llcp_sock;
-       struct hlist_node *node;
 
        read_lock(&local->connecting_sockets.lock);
 
-       sk_for_each(sk, node, &local->connecting_sockets.head) {
+       sk_for_each(sk, &local->connecting_sockets.head) {
                llcp_sock = nfc_llcp_sock(sk);
 
                if (llcp_sock->ssap == ssap) {
index 9dc537d..e87a265 100644 (file)
@@ -158,11 +158,10 @@ static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
 {
        struct vport *vport;
-       struct hlist_node *n;
        struct hlist_head *head;
 
        head = vport_hash_bucket(dp, port_no);
-       hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+       hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
                if (vport->port_no == port_no)
                        return vport;
        }
@@ -1386,9 +1385,9 @@ static void __dp_destroy(struct datapath *dp)
 
        for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
                struct vport *vport;
-               struct hlist_node *node, *n;
+               struct hlist_node *n;
 
-               hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+               hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
                        if (vport->port_no != OVSP_LOCAL)
                                ovs_dp_detach_port(vport);
        }
@@ -1825,10 +1824,9 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
                struct vport *vport;
-               struct hlist_node *n;
 
                j = 0;
-               hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+               hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
                        if (j >= skip &&
                            ovs_vport_cmd_fill_info(vport, skb,
                                                    NETLINK_CB(cb->skb).portid,
index c3294ce..20605ec 100644 (file)
@@ -299,10 +299,10 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
        for (i = 0; i < table->n_buckets; i++) {
                struct sw_flow *flow;
                struct hlist_head *head = flex_array_get(table->buckets, i);
-               struct hlist_node *node, *n;
+               struct hlist_node *n;
                int ver = table->node_ver;
 
-               hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+               hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
                        hlist_del_rcu(&flow->hash_node[ver]);
                        ovs_flow_free(flow);
                }
@@ -332,7 +332,6 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
 {
        struct sw_flow *flow;
        struct hlist_head *head;
-       struct hlist_node *n;
        int ver;
        int i;
 
@@ -340,7 +339,7 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
        while (*bucket < table->n_buckets) {
                i = 0;
                head = flex_array_get(table->buckets, *bucket);
-               hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+               hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
                        if (i < *last) {
                                i++;
                                continue;
@@ -367,11 +366,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
        for (i = 0; i < old->n_buckets; i++) {
                struct sw_flow *flow;
                struct hlist_head *head;
-               struct hlist_node *n;
 
                head = flex_array_get(old->buckets, i);
 
-               hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+               hlist_for_each_entry(flow, head, hash_node[old_ver])
                        ovs_flow_tbl_insert(new, flow);
        }
        old->keep_flows = true;
@@ -766,14 +764,13 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
                                struct sw_flow_key *key, int key_len)
 {
        struct sw_flow *flow;
-       struct hlist_node *n;
        struct hlist_head *head;
        u32 hash;
 
        hash = ovs_flow_hash(key, key_len);
 
        head = find_bucket(table, hash);
-       hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+       hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
 
                if (flow->hash == hash &&
                    !memcmp(&flow->key, key, key_len)) {
index 70af0be..ba717cc 100644 (file)
@@ -86,9 +86,8 @@ struct vport *ovs_vport_locate(struct net *net, const char *name)
 {
        struct hlist_head *bucket = hash_bucket(net, name);
        struct vport *vport;
-       struct hlist_node *node;
 
-       hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+       hlist_for_each_entry_rcu(vport, bucket, hash_node)
                if (!strcmp(name, vport->ops->get_name(vport)) &&
                    net_eq(ovs_dp_get_net(vport->dp), net))
                        return vport;
index c7bfeff..1d6793d 100644 (file)
@@ -3263,12 +3263,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
 {
        struct sock *sk;
-       struct hlist_node *node;
        struct net_device *dev = data;
        struct net *net = dev_net(dev);
 
        rcu_read_lock();
-       sk_for_each_rcu(sk, node, &net->packet.sklist) {
+       sk_for_each_rcu(sk, &net->packet.sklist) {
                struct packet_sock *po = pkt_sk(sk);
 
                switch (msg) {
index 8db6e21..d3fcd1e 100644 (file)
@@ -172,13 +172,12 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct packet_diag_req *req;
        struct net *net;
        struct sock *sk;
-       struct hlist_node *node;
 
        net = sock_net(skb->sk);
        req = nlmsg_data(cb->nlh);
 
        mutex_lock(&net->packet.sklist_lock);
-       sk_for_each(sk, node, &net->packet.sklist) {
+       sk_for_each(sk, &net->packet.sklist) {
                if (!net_eq(sock_net(sk), net))
                        continue;
                if (num < s_num)
index 576f22c..e774117 100644 (file)
@@ -640,11 +640,10 @@ static struct sock *pep_find_pipe(const struct hlist_head *hlist,
                                        const struct sockaddr_pn *dst,
                                        u8 pipe_handle)
 {
-       struct hlist_node *node;
        struct sock *sknode;
        u16 dobj = pn_sockaddr_get_object(dst);
 
-       sk_for_each(sknode, node, hlist) {
+       sk_for_each(sknode, hlist) {
                struct pep_sock *pnnode = pep_sk(sknode);
 
                /* Ports match, but addresses might not: */
index b7e9827..1afd138 100644 (file)
@@ -76,7 +76,6 @@ static struct hlist_head *pn_hash_list(u16 obj)
  */
 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
 {
-       struct hlist_node *node;
        struct sock *sknode;
        struct sock *rval = NULL;
        u16 obj = pn_sockaddr_get_object(spn);
@@ -84,7 +83,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
        struct hlist_head *hlist = pn_hash_list(obj);
 
        rcu_read_lock();
-       sk_for_each_rcu(sknode, node, hlist) {
+       sk_for_each_rcu(sknode, hlist) {
                struct pn_sock *pn = pn_sk(sknode);
                BUG_ON(!pn->sobject); /* unbound socket */
 
@@ -120,10 +119,9 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
 
        rcu_read_lock();
        for (h = 0; h < PN_HASHSIZE; h++) {
-               struct hlist_node *node;
                struct sock *sknode;
 
-               sk_for_each(sknode, node, hlist) {
+               sk_for_each(sknode, hlist) {
                        struct sk_buff *clone;
 
                        if (!net_eq(sock_net(sknode), net))
@@ -543,12 +541,11 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
 {
        struct net *net = seq_file_net(seq);
        struct hlist_head *hlist = pnsocks.hlist;
-       struct hlist_node *node;
        struct sock *sknode;
        unsigned int h;
 
        for (h = 0; h < PN_HASHSIZE; h++) {
-               sk_for_each_rcu(sknode, node, hlist) {
+               sk_for_each_rcu(sknode, hlist) {
                        if (!net_eq(net, sock_net(sknode)))
                                continue;
                        if (!pos)
index 637bde5..b5ad65a 100644 (file)
@@ -52,13 +52,12 @@ static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
                                        struct rds_sock *insert)
 {
        struct rds_sock *rs;
-       struct hlist_node *node;
        struct hlist_head *head = hash_to_bucket(addr, port);
        u64 cmp;
        u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
+       hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
                cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
                      be16_to_cpu(rs->rs_bound_port);
 
index 9e07c75..642ad42 100644 (file)
@@ -69,9 +69,8 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
                                              struct rds_transport *trans)
 {
        struct rds_connection *conn, *ret = NULL;
-       struct hlist_node *pos;
 
-       hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+       hlist_for_each_entry_rcu(conn, head, c_hash_node) {
                if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
                                conn->c_trans == trans) {
                        ret = conn;
@@ -376,7 +375,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
                                  int want_send)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct list_head *list;
        struct rds_connection *conn;
        struct rds_message *rm;
@@ -390,7 +388,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
 
        for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
             i++, head++) {
-               hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+               hlist_for_each_entry_rcu(conn, head, c_hash_node) {
                        if (want_send)
                                list = &conn->c_send_queue;
                        else
@@ -439,7 +437,6 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
 {
        uint64_t buffer[(item_len + 7) / 8];
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct rds_connection *conn;
        size_t i;
 
@@ -450,7 +447,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
 
        for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
             i++, head++) {
-               hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
+               hlist_for_each_entry_rcu(conn, head, c_hash_node) {
 
                        /* XXX no c_lock usage.. */
                        if (!visitor(conn, buffer))
index f0a4658..aba232f 100644 (file)
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
 void rds_message_put(struct rds_message *rm)
 {
        rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
-       if (atomic_read(&rm->m_refcount) == 0) {
-printk(KERN_CRIT "danger refcount zero on %p\n", rm);
-WARN_ON(1);
-       }
+       WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
        if (atomic_dec_and_test(&rm->m_refcount)) {
                BUG_ON(!list_empty(&rm->m_sock_item));
                BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
 {
        struct rds_message *rm;
 
+       if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
+               return NULL;
+
        rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
        if (!rm)
                goto out;
index b768fe9..cf68e6e 100644 (file)
@@ -165,10 +165,9 @@ static void rose_remove_socket(struct sock *sk)
 void rose_kill_by_neigh(struct rose_neigh *neigh)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&rose_list_lock);
-       sk_for_each(s, node, &rose_list) {
+       sk_for_each(s, &rose_list) {
                struct rose_sock *rose = rose_sk(s);
 
                if (rose->neighbour == neigh) {
@@ -186,10 +185,9 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
 static void rose_kill_by_device(struct net_device *dev)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&rose_list_lock);
-       sk_for_each(s, node, &rose_list) {
+       sk_for_each(s, &rose_list) {
                struct rose_sock *rose = rose_sk(s);
 
                if (rose->device == dev) {
@@ -246,10 +244,9 @@ static void rose_insert_socket(struct sock *sk)
 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&rose_list_lock);
-       sk_for_each(s, node, &rose_list) {
+       sk_for_each(s, &rose_list) {
                struct rose_sock *rose = rose_sk(s);
 
                if (!rosecmp(&rose->source_addr, addr) &&
@@ -258,7 +255,7 @@ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
                        goto found;
        }
 
-       sk_for_each(s, node, &rose_list) {
+       sk_for_each(s, &rose_list) {
                struct rose_sock *rose = rose_sk(s);
 
                if (!rosecmp(&rose->source_addr, addr) &&
@@ -278,10 +275,9 @@ found:
 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock_bh(&rose_list_lock);
-       sk_for_each(s, node, &rose_list) {
+       sk_for_each(s, &rose_list) {
                struct rose_sock *rose = rose_sk(s);
 
                if (rose->lci == lci && rose->neighbour == neigh)
index a181b48..c297e2a 100644 (file)
@@ -545,7 +545,7 @@ static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 {
        struct Qdisc_class_common *cl;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        struct hlist_head *nhash, *ohash;
        unsigned int nsize, nmask, osize;
        unsigned int i, h;
@@ -564,7 +564,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
 
        sch_tree_lock(sch);
        for (i = 0; i < osize; i++) {
-               hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
+               hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
                        h = qdisc_class_hash(cl->classid, nmask);
                        hlist_add_head(&cl->hnode, &nhash[h]);
                }
index 0e19948..13aa47a 100644 (file)
@@ -1041,14 +1041,13 @@ static void cbq_adjust_levels(struct cbq_class *this)
 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
 {
        struct cbq_class *cl;
-       struct hlist_node *n;
        unsigned int h;
 
        if (q->quanta[prio] == 0)
                return;
 
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
                        /* BUGGGG... Beware! This expression suffer of
                         * arithmetic overflows!
                         */
@@ -1087,10 +1086,9 @@ static void cbq_sync_defmap(struct cbq_class *cl)
                        continue;
 
                for (h = 0; h < q->clhash.hashsize; h++) {
-                       struct hlist_node *n;
                        struct cbq_class *c;
 
-                       hlist_for_each_entry(c, n, &q->clhash.hash[h],
+                       hlist_for_each_entry(c, &q->clhash.hash[h],
                                             common.hnode) {
                                if (c->split == split && c->level < level &&
                                    c->defmap & (1<<i)) {
@@ -1210,7 +1208,6 @@ cbq_reset(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
-       struct hlist_node *n;
        int prio;
        unsigned int h;
 
@@ -1228,7 +1225,7 @@ cbq_reset(struct Qdisc *sch)
                q->active[prio] = NULL;
 
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
                        qdisc_reset(cl->q);
 
                        cl->next_alive = NULL;
@@ -1697,7 +1694,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
 static void cbq_destroy(struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        struct cbq_class *cl;
        unsigned int h;
 
@@ -1710,11 +1707,11 @@ static void cbq_destroy(struct Qdisc *sch)
         * be bound to classes which have been destroyed already. --TGR '04
         */
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
                        tcf_destroy_chain(&cl->filter_list);
        }
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
+               hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
                                          common.hnode)
                        cbq_destroy_class(sch, cl);
        }
@@ -2013,14 +2010,13 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl;
-       struct hlist_node *n;
        unsigned int h;
 
        if (arg->stop)
                return;
 
        for (h = 0; h < q->clhash.hashsize; h++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
                        if (arg->count < arg->skip) {
                                arg->count++;
                                continue;
index 71e50c8..759b308 100644 (file)
@@ -293,14 +293,13 @@ static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        if (arg->stop)
                return;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (arg->count < arg->skip) {
                                arg->count++;
                                continue;
@@ -451,11 +450,10 @@ static void drr_reset_qdisc(struct Qdisc *sch)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (cl->qdisc->q.qlen)
                                list_del(&cl->alist);
                        qdisc_reset(cl->qdisc);
@@ -468,13 +466,13 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        unsigned int i;
 
        tcf_destroy_chain(&q->filter_list);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+               hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
                                          common.hnode)
                        drr_destroy_class(sch, cl);
        }
index 6c2ec45..9facea0 100644 (file)
@@ -1389,7 +1389,6 @@ static void
 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
-       struct hlist_node *n;
        struct hfsc_class *cl;
        unsigned int i;
 
@@ -1397,7 +1396,7 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
                return;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i],
+               hlist_for_each_entry(cl, &q->clhash.hash[i],
                                     cl_common.hnode) {
                        if (arg->count < arg->skip) {
                                arg->count++;
@@ -1523,11 +1522,10 @@ hfsc_reset_qdisc(struct Qdisc *sch)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
                        hfsc_reset_class(cl);
        }
        q->eligible = RB_ROOT;
@@ -1540,16 +1538,16 @@ static void
 hfsc_destroy_qdisc(struct Qdisc *sch)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        struct hfsc_class *cl;
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
                        tcf_destroy_chain(&cl->filter_list);
        }
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+               hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
                                          cl_common.hnode)
                        hfsc_destroy_class(sch, cl);
        }
@@ -1564,12 +1562,11 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
        struct hfsc_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        sch->qstats.backlog = 0;
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
                        sch->qstats.backlog += cl->qdisc->qstats.backlog;
        }
 
index 03c2692..571f1d2 100644 (file)
@@ -949,11 +949,10 @@ static void htb_reset(struct Qdisc *sch)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (cl->level)
                                memset(&cl->un.inner, 0, sizeof(cl->un.inner));
                        else {
@@ -1218,7 +1217,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
 static void htb_destroy(struct Qdisc *sch)
 {
        struct htb_sched *q = qdisc_priv(sch);
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        struct htb_class *cl;
        unsigned int i;
 
@@ -1232,11 +1231,11 @@ static void htb_destroy(struct Qdisc *sch)
        tcf_destroy_chain(&q->filter_list);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
                        tcf_destroy_chain(&cl->filter_list);
        }
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+               hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
                                          common.hnode)
                        htb_destroy_class(sch, cl);
        }
@@ -1516,14 +1515,13 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        if (arg->stop)
                return;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (arg->count < arg->skip) {
                                arg->count++;
                                continue;
index 6ed3765..d51852b 100644 (file)
@@ -276,9 +276,8 @@ static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
                                          u32 lmax, u32 weight)
 {
        struct qfq_aggregate *agg;
-       struct hlist_node *n;
 
-       hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next)
+       hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
                if (agg->lmax == lmax && agg->class_weight == weight)
                        return agg;
 
@@ -299,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
            new_num_classes == q->max_agg_classes - 1) /* agg no more full */
                hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
 
+       /* The next assignment may let
+        * agg->initial_budget > agg->budgetmax
+        * hold, we will take it into account in charge_actual_service().
+        */
        agg->budgetmax = new_num_classes * agg->lmax;
        new_agg_weight = agg->class_weight * new_num_classes;
        agg->inv_w = ONE_FP/new_agg_weight;
@@ -670,14 +673,13 @@ static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        if (arg->stop)
                return;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (arg->count < arg->skip) {
                                arg->count++;
                                continue;
@@ -819,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)
        unsigned long old_vslot = q->oldV >> q->min_slot_shift;
 
        if (vslot != old_vslot) {
-               unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
+               unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
                qfq_move_groups(q, mask, IR, ER);
                qfq_move_groups(q, mask, IB, EB);
        }
@@ -990,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
 /* Update F according to the actual service received by the aggregate. */
 static inline void charge_actual_service(struct qfq_aggregate *agg)
 {
-       /* compute the service received by the aggregate */
-       u32 service_received = agg->initial_budget - agg->budget;
+       /* Compute the service received by the aggregate, taking into
+        * account that, after decreasing the number of classes in
+        * agg, it may happen that
+        * agg->initial_budget - agg->budget > agg->bugdetmax
+        */
+       u32 service_received = min(agg->budgetmax,
+                                  agg->initial_budget - agg->budget);
 
        agg->F = agg->S + (u64)service_received * agg->inv_w;
 }
 
+static inline void qfq_update_agg_ts(struct qfq_sched *q,
+                                    struct qfq_aggregate *agg,
+                                    enum update_reason reason);
+
+static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
+
 static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
@@ -1023,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
                in_serv_agg->initial_budget = in_serv_agg->budget =
                        in_serv_agg->budgetmax;
 
-               if (!list_empty(&in_serv_agg->active))
+               if (!list_empty(&in_serv_agg->active)) {
                        /*
                         * Still active: reschedule for
                         * service. Possible optimization: if no other
@@ -1034,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
                         * handle it, we would need to maintain an
                         * extra num_active_aggs field.
                        */
-                       qfq_activate_agg(q, in_serv_agg, requeue);
-               else if (sch->q.qlen == 0) { /* no aggregate to serve */
+                       qfq_update_agg_ts(q, in_serv_agg, requeue);
+                       qfq_schedule_agg(q, in_serv_agg);
+               } else if (sch->q.qlen == 0) { /* no aggregate to serve */
                        q->in_serv_agg = NULL;
                        return NULL;
                }
@@ -1054,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
        qdisc_bstats_update(sch, skb);
 
        agg_dequeue(in_serv_agg, cl, len);
-       in_serv_agg->budget -= len;
+       /* If lmax is lowered, through qfq_change_class, for a class
+        * owning pending packets with larger size than the new value
+        * of lmax, then the following condition may hold.
+        */
+       if (unlikely(in_serv_agg->budget < len))
+               in_serv_agg->budget = 0;
+       else
+               in_serv_agg->budget -= len;
+
        q->V += (u64)len * IWSUM;
        pr_debug("qfq dequeue: len %u F %lld now %lld\n",
                 len, (unsigned long long) in_serv_agg->F,
@@ -1219,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        cl->deficit = agg->lmax;
        list_add_tail(&cl->alist, &agg->active);
 
-       if (list_first_entry(&agg->active, struct qfq_class, alist) != cl)
-               return err; /* aggregate was not empty, nothing else to do */
+       if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
+           q->in_serv_agg == agg)
+               return err; /* non-empty or in service, nothing else to do */
 
-       /* recharge budget */
-       agg->initial_budget = agg->budget = agg->budgetmax;
-
-       qfq_update_agg_ts(q, agg, enqueue);
-       if (q->in_serv_agg == NULL)
-               q->in_serv_agg = agg;
-       else if (agg != q->in_serv_agg)
-               qfq_schedule_agg(q, agg);
+       qfq_activate_agg(q, agg, enqueue);
 
        return err;
 }
@@ -1263,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
                /* group was surely ineligible, remove */
                __clear_bit(grp->index, &q->bitmaps[IR]);
                __clear_bit(grp->index, &q->bitmaps[IB]);
-       } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
+       } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
+                  q->in_serv_agg == NULL)
                q->V = roundedS;
 
        grp->S = roundedS;
@@ -1286,8 +1303,15 @@ skip_update:
 static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
                             enum update_reason reason)
 {
+       agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
+
        qfq_update_agg_ts(q, agg, reason);
-       qfq_schedule_agg(q, agg);
+       if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
+               q->in_serv_agg = agg; /* start serving this aggregate */
+                /* update V: to be in service, agg must be eligible */
+               q->oldV = q->V = agg->S;
+       } else if (agg != q->in_serv_agg)
+               qfq_schedule_agg(q, agg);
 }
 
 static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
@@ -1359,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
                        __set_bit(grp->index, &q->bitmaps[s]);
                }
        }
-
-       qfq_update_eligible(q);
 }
 
 static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1376,11 +1398,10 @@ static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
                                       struct hlist_head *slot)
 {
        struct qfq_aggregate *agg;
-       struct hlist_node *n;
        struct qfq_class *cl;
        unsigned int len;
 
-       hlist_for_each_entry(agg, n, slot, next) {
+       hlist_for_each_entry(agg, slot, next) {
                list_for_each_entry(cl, &agg->active, alist) {
 
                        if (!cl->qdisc->ops->drop)
@@ -1459,11 +1480,10 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
-       struct hlist_node *n;
        unsigned int i;
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+               hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
                        if (cl->qdisc->q.qlen > 0)
                                qfq_deactivate_class(q, cl);
 
@@ -1477,13 +1497,13 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
-       struct hlist_node *n, *next;
+       struct hlist_node *next;
        unsigned int i;
 
        tcf_destroy_chain(&q->filter_list);
 
        for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+               hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
                                          common.hnode) {
                        qfq_destroy_class(sch, cl);
                }
index 2f95f5a..43cd0dd 100644 (file)
@@ -1591,32 +1591,31 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
 /* Set an association id for a given association */
 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
 {
-       int assoc_id;
-       int error = 0;
+       bool preload = gfp & __GFP_WAIT;
+       int ret;
 
        /* If the id is already assigned, keep it. */
        if (asoc->assoc_id)
-               return error;
-retry:
-       if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
-               return -ENOMEM;
+               return 0;
 
+       if (preload)
+               idr_preload(gfp);
        spin_lock_bh(&sctp_assocs_id_lock);
-       error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
-                                   idr_low, &assoc_id);
-       if (!error) {
-               idr_low = assoc_id + 1;
+       /* 0 is not a valid id, idr_low is always >= 1 */
+       ret = idr_alloc(&sctp_assocs_id, asoc, idr_low, 0, GFP_NOWAIT);
+       if (ret >= 0) {
+               idr_low = ret + 1;
                if (idr_low == INT_MAX)
                        idr_low = 1;
        }
        spin_unlock_bh(&sctp_assocs_id_lock);
-       if (error == -EAGAIN)
-               goto retry;
-       else if (error)
-               return error;
+       if (preload)
+               idr_preload_end();
+       if (ret < 0)
+               return ret;
 
-       asoc->assoc_id = (sctp_assoc_t) assoc_id;
-       return error;
+       asoc->assoc_id = (sctp_assoc_t)ret;
+       return 0;
 }
 
 /* Free the ASCONF queue */
index 73aad3d..12ed45d 100644 (file)
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
 
        /* SCTP-AUTH extensions*/
        INIT_LIST_HEAD(&ep->endpoint_shared_keys);
-       null_key = sctp_auth_shkey_create(0, GFP_KERNEL);
+       null_key = sctp_auth_shkey_create(0, gfp);
        if (!null_key)
                goto nomem;
 
@@ -332,7 +332,6 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
        struct sctp_transport *t = NULL;
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
-       struct hlist_node *node;
        int hash;
        int rport;
 
@@ -350,7 +349,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
                                 rport);
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                tmp = sctp_assoc(epb);
                if (tmp->ep != ep || rport != tmp->peer.port)
                        continue;
index 965bbbb..4b2c831 100644 (file)
@@ -784,13 +784,12 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
        struct sctp_endpoint *ep;
-       struct hlist_node *node;
        int hash;
 
        hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
        head = &sctp_ep_hashtable[hash];
        read_lock(&head->lock);
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                ep = sctp_ep(epb);
                if (sctp_endpoint_is_match(ep, net, laddr))
                        goto hit;
@@ -876,7 +875,6 @@ static struct sctp_association *__sctp_lookup_association(
        struct sctp_ep_common *epb;
        struct sctp_association *asoc;
        struct sctp_transport *transport;
-       struct hlist_node *node;
        int hash;
 
        /* Optimize here for direct hit, only listening connections can
@@ -886,7 +884,7 @@ static struct sctp_association *__sctp_lookup_association(
                                 ntohs(peer->v4.sin_port));
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                asoc = sctp_assoc(epb);
                transport = sctp_assoc_is_match(asoc, net, local, peer);
                if (transport)
index 8c19e97..ab3bba8 100644 (file)
@@ -213,7 +213,6 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        struct sctp_ep_common *epb;
        struct sctp_endpoint *ep;
        struct sock *sk;
-       struct hlist_node *node;
        int    hash = *(loff_t *)v;
 
        if (hash >= sctp_ep_hashsize)
@@ -222,7 +221,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        head = &sctp_ep_hashtable[hash];
        sctp_local_bh_disable();
        read_lock(&head->lock);
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                ep = sctp_ep(epb);
                sk = epb->sk;
                if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -321,7 +320,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        struct sctp_ep_common *epb;
        struct sctp_association *assoc;
        struct sock *sk;
-       struct hlist_node *node;
        int    hash = *(loff_t *)v;
 
        if (hash >= sctp_assoc_hashsize)
@@ -330,7 +328,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        head = &sctp_assoc_hashtable[hash];
        sctp_local_bh_disable();
        read_lock(&head->lock);
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                assoc = sctp_assoc(epb);
                sk = epb->sk;
                if (!net_eq(sock_net(sk), seq_file_net(seq)))
@@ -436,7 +434,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
        struct sctp_association *assoc;
-       struct hlist_node *node;
        struct sctp_transport *tsp;
        int    hash = *(loff_t *)v;
 
@@ -447,7 +444,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        sctp_local_bh_disable();
        read_lock(&head->lock);
        rcu_read_lock();
-       sctp_for_each_hentry(epb, node, &head->chain) {
+       sctp_for_each_hentry(epb, &head->chain) {
                if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
                        continue;
                assoc = sctp_assoc(epb);
index cedd9bf..b907073 100644 (file)
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        if (len < sizeof(sctp_assoc_t))
                return -EINVAL;
 
+       /* Allow the struct to grow and fill in as much as possible */
+       len = min_t(size_t, len, sizeof(sas));
+
        if (copy_from_user(&sas, optval, len))
                return -EFAULT;
 
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        /* Mark beginning of a new observation period */
        asoc->stats.max_obs_rto = asoc->rto_min;
 
-       /* Allow the struct to grow and fill in as much as possible */
-       len = min_t(size_t, len, sizeof(sas));
-
        if (put_user(len, optlen))
                return -EFAULT;
 
@@ -5882,8 +5882,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
        struct sctp_bind_hashbucket *head; /* hash list */
-       struct sctp_bind_bucket *pp; /* hash list port iterator */
-       struct hlist_node *node;
+       struct sctp_bind_bucket *pp;
        unsigned short snum;
        int ret;
 
@@ -5910,7 +5909,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                        index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
                        sctp_spin_lock(&head->lock);
-                       sctp_for_each_hentry(pp, node, &head->chain)
+                       sctp_for_each_hentry(pp, &head->chain)
                                if ((pp->port == rover) &&
                                    net_eq(sock_net(sk), pp->net))
                                        goto next;
@@ -5938,7 +5937,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                 */
                head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
                sctp_spin_lock(&head->lock);
-               sctp_for_each_hentry(pp, node, &head->chain) {
+               sctp_for_each_hentry(pp, &head->chain) {
                        if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
                                goto pp_found;
                }
@@ -5970,7 +5969,7 @@ pp_found:
                 * that this port/socket (sk) combination are already
                 * in an endpoint.
                 */
-               sk_for_each_bound(sk2, node, &pp->owner) {
+               sk_for_each_bound(sk2, &pp->owner) {
                        struct sctp_endpoint *ep2;
                        ep2 = sctp_sk(sk2)->ep;
 
index 442ad4e..825ea94 100644 (file)
@@ -41,8 +41,6 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-#define MAX_KMALLOC_SIZE       131072
-
 static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
                                            __u16 out);
 
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
        int size;
 
        size = sctp_ssnmap_size(in, out);
-       if (size <= MAX_KMALLOC_SIZE)
+       if (size <= KMALLOC_MAX_SIZE)
                retval = kmalloc(size, gfp);
        else
                retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
        return retval;
 
 fail_map:
-       if (size <= MAX_KMALLOC_SIZE)
+       if (size <= KMALLOC_MAX_SIZE)
                kfree(retval);
        else
                free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
                int size;
 
                size = sctp_ssnmap_size(map->in.len, map->out.len);
-               if (size <= MAX_KMALLOC_SIZE)
+               if (size <= KMALLOC_MAX_SIZE)
                        kfree(map);
                else
                        free_pages((unsigned long)map, get_order(size));
index 5f25e0c..396c451 100644 (file)
@@ -51,7 +51,7 @@
 static void sctp_tsnmap_update(struct sctp_tsnmap *map);
 static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
                                     __u16 len, __u16 *start, __u16 *end);
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
 
 /* Initialize a block of memory as a tsnmap.  */
 struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
 
        gap = tsn - map->base_tsn;
 
-       if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
+       if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
                return -ENOMEM;
 
        if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
        return ngaps;
 }
 
-static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
+static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
 {
        unsigned long *new;
        unsigned long inc;
        u16  len;
 
-       if (gap >= SCTP_TSN_MAP_SIZE)
+       if (size > SCTP_TSN_MAP_SIZE)
                return 0;
 
-       inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
+       inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
        len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
 
        new = kzalloc(len>>3, GFP_ATOMIC);
        if (!new)
                return 0;
 
-       bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
+       bitmap_copy(new, map->tsn_map,
+               map->max_tsn_seen - map->cumulative_tsn_ack_point);
        kfree(map->tsn_map);
        map->tsn_map = new;
        map->len = len;
index ada1746..0fd5b3d 100644 (file)
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 {
        struct sk_buff_head temp;
        struct sctp_ulpevent *event;
+       int event_eor = 0;
 
        /* Create an event from the incoming chunk. */
        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        /* Send event to the ULP.  'event' is the sctp_ulpevent for
         * very first SKB on the 'temp' list.
         */
-       if (event)
+       if (event) {
+               event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
                sctp_ulpq_tail_event(ulpq, event);
+       }
 
-       return 0;
+       return event_eor;
 }
 
 /* Add a new event for propagation to the ULP.  */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
                ctsn = cevent->tsn;
 
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+               case SCTP_DATA_FIRST_FRAG:
+                       if (!first_frag)
+                               return NULL;
+                       goto done;
                case SCTP_DATA_MIDDLE_FRAG:
                        if (!first_frag) {
                                first_frag = pos;
                                next_tsn = ctsn + 1;
                                last_frag = pos;
-                       } else if (next_tsn == ctsn)
+                       } else if (next_tsn == ctsn) {
                                next_tsn++;
-                       else
+                               last_frag = pos;
+                       } else
                                goto done;
                        break;
                case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
                        } else
                                goto done;
                        break;
+
+               case SCTP_DATA_LAST_FRAG:
+                       if (!first_frag)
+                               return NULL;
+                       else
+                               goto done;
+                       break;
+
                default:
                        return NULL;
                }
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
                struct sk_buff_head *list, __u16 needed)
 {
        __u16 freed = 0;
-       __u32 tsn;
-       struct sk_buff *skb;
+       __u32 tsn, last_tsn;
+       struct sk_buff *skb, *flist, *last;
        struct sctp_ulpevent *event;
        struct sctp_tsnmap *tsnmap;
 
        tsnmap = &ulpq->asoc->peer.tsn_map;
 
-       while ((skb = __skb_dequeue_tail(list)) != NULL) {
-               freed += skb_headlen(skb);
+       while ((skb = skb_peek_tail(list)) != NULL) {
                event = sctp_skb2event(skb);
                tsn = event->tsn;
 
+               /* Don't renege below the Cumulative TSN ACK Point. */
+               if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
+                       break;
+
+               /* Events in ordering queue may have multiple fragments
+                * corresponding to additional TSNs.  Sum the total
+                * freed space; find the last TSN.
+                */
+               freed += skb_headlen(skb);
+               flist = skb_shinfo(skb)->frag_list;
+               for (last = flist; flist; flist = flist->next) {
+                       last = flist;
+                       freed += skb_headlen(last);
+               }
+               if (last)
+                       last_tsn = sctp_skb2event(last)->tsn;
+               else
+                       last_tsn = tsn;
+
+               /* Unlink the event, then renege all applicable TSNs. */
+               __skb_unlink(skb, list);
                sctp_ulpevent_free(event);
-               sctp_tsnmap_renege(tsnmap, tsn);
+               while (TSN_lte(tsn, last_tsn)) {
+                       sctp_tsnmap_renege(tsnmap, tsn);
+                       tsn++;
+               }
                if (freed >= needed)
                        return freed;
        }
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
        struct sctp_ulpevent *event;
        struct sctp_association *asoc;
        struct sctp_sock *sp;
+       __u32 ctsn;
+       struct sk_buff *skb;
 
        asoc = ulpq->asoc;
        sp = sctp_sk(asoc->base.sk);
 
        /* If the association is already in Partial Delivery mode
-        * we have noting to do.
+        * we have nothing to do.
         */
        if (ulpq->pd_mode)
                return;
 
+       /* Data must be at or below the Cumulative TSN ACK Point to
+        * start partial delivery.
+        */
+       skb = skb_peek(&asoc->ulpq.reasm);
+       if (skb != NULL) {
+               ctsn = sctp_skb2event(skb)->tsn;
+               if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
+                       return;
+       }
+
        /* If the user enabled fragment interleave socket option,
         * multiple associations can enter partial delivery.
         * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        }
        /* If able to free enough room, accept this chunk. */
        if (chunk && (freed >= needed)) {
-               __u32 tsn;
-               tsn = ntohl(chunk->subh.data_hdr->tsn);
-               sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
-               sctp_ulpq_tail_data(ulpq, chunk, gfp);
-
-               sctp_ulpq_partial_delivery(ulpq, gfp);
+               int retval;
+               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+               /*
+                * Enter partial delivery if chunk has not been
+                * delivered; otherwise, drain the reassembly queue.
+                */
+               if (retval <= 0)
+                       sctp_ulpq_partial_delivery(ulpq, gfp);
+               else if (retval == 1)
+                       sctp_ulpq_reasm_drain(ulpq);
        }
 
        sk_mem_reclaim(asoc->base.sk);
index ee0d029..88f759a 100644 (file)
@@ -369,16 +369,15 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 
        file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
                  &socket_file_ops);
-       if (unlikely(!file)) {
+       if (unlikely(IS_ERR(file))) {
                /* drop dentry, keep inode */
                ihold(path.dentry->d_inode);
                path_put(&path);
-               return ERR_PTR(-ENFILE);
+               return file;
        }
 
        sock->file = file;
        file->f_flags = O_RDWR | (flags & O_NONBLOCK);
-       file->f_pos = 0;
        file->private_data = sock;
        return file;
 }
index d11418f..a622ad6 100644 (file)
@@ -17,7 +17,8 @@
  */
 
 #include <net/ipv6.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/msg_prot.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 
index 392adc4..f529404 100644 (file)
@@ -407,7 +407,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
 {
        LIST_HEAD(free);
        struct rpc_cred_cache *cache = auth->au_credcache;
-       struct hlist_node *pos;
        struct rpc_cred *cred = NULL,
                        *entry, *new;
        unsigned int nr;
@@ -415,7 +414,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
        nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
+       hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
                if (!entry->cr_ops->crmatch(acred, entry, flags))
                        continue;
                spin_lock(&cache->lock);
@@ -439,7 +438,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
        }
 
        spin_lock(&cache->lock);
-       hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) {
+       hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) {
                if (!entry->cr_ops->crmatch(acred, entry, flags))
                        continue;
                cred = get_rpccred(entry);
index 6ea29f4..5257d29 100644 (file)
@@ -620,7 +620,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        const void *p, *end;
        void *buf;
        struct gss_upcall_msg *gss_msg;
-       struct rpc_pipe *pipe = RPC_I(filp->f_dentry->d_inode)->pipe;
+       struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
        struct gss_cl_ctx *ctx;
        uid_t id;
        kuid_t uid;
index 107c452..88edec9 100644 (file)
@@ -574,6 +574,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
        buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
 
+       /* Trim off the checksum blob */
+       xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
        return GSS_S_COMPLETE;
 }
 
index ecd1d58..f7d34e7 100644 (file)
@@ -182,12 +182,6 @@ static void rsi_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-       return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-
-
 static int rsi_parse(struct cache_detail *cd,
                    char *mesg, int mlen)
 {
@@ -275,7 +269,7 @@ static struct cache_detail rsi_cache_template = {
        .hash_size      = RSI_HASHMAX,
        .name           = "auth.rpcsec.init",
        .cache_put      = rsi_put,
-       .cache_upcall   = rsi_upcall,
+       .cache_request  = rsi_request,
        .cache_parse    = rsi_parse,
        .match          = rsi_match,
        .init           = rsi_init,
@@ -825,13 +819,17 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
  *     The server uses base of head iovec as read pointer, while the
  *     client uses separate pointer. */
 static int
-unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
+unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
 {
        int stat = -EINVAL;
        u32 integ_len, maj_stat;
        struct xdr_netobj mic;
        struct xdr_buf integ_buf;
 
+       /* Did we already verify the signature on the original pass through? */
+       if (rqstp->rq_deferred)
+               return 0;
+
        integ_len = svc_getnl(&buf->head[0]);
        if (integ_len & 3)
                return stat;
@@ -854,6 +852,8 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
                goto out;
        if (svc_getnl(&buf->head[0]) != seq)
                goto out;
+       /* trim off the mic at the end before returning */
+       xdr_buf_trim(buf, mic.len + 4);
        stat = 0;
 out:
        kfree(mic.data);
@@ -1198,7 +1198,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
                        /* placeholders for length and seq. number: */
                        svc_putnl(resv, 0);
                        svc_putnl(resv, 0);
-                       if (unwrap_integ_data(&rqstp->rq_arg,
+                       if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
                                        gc->gc_seq, rsci->mechctx))
                                goto garbage_args;
                        break;
index 9afa439..25d58e7 100644 (file)
@@ -196,9 +196,9 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 
 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 {
-       if (!cd->cache_upcall)
-               return -EINVAL;
-       return cd->cache_upcall(cd, h);
+       if (cd->cache_upcall)
+               return cd->cache_upcall(cd, h);
+       return sunrpc_cache_pipe_upcall(cd, h);
 }
 
 static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
@@ -670,13 +670,13 @@ static void cache_revisit_request(struct cache_head *item)
 {
        struct cache_deferred_req *dreq;
        struct list_head pending;
-       struct hlist_node *lp, *tmp;
+       struct hlist_node *tmp;
        int hash = DFR_HASH(item);
 
        INIT_LIST_HEAD(&pending);
        spin_lock(&cache_defer_lock);
 
-       hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+       hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
                if (dreq->item == item) {
                        __unhash_deferred_req(dreq);
                        list_add(&dreq->recent, &pending);
@@ -750,12 +750,24 @@ struct cache_reader {
        int                     offset; /* if non-0, we have a refcnt on next request */
 };
 
+static int cache_request(struct cache_detail *detail,
+                              struct cache_request *crq)
+{
+       char *bp = crq->buf;
+       int len = PAGE_SIZE;
+
+       detail->cache_request(detail, crq->item, &bp, &len);
+       if (len < 0)
+               return -EAGAIN;
+       return PAGE_SIZE - len;
+}
+
 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
                          loff_t *ppos, struct cache_detail *cd)
 {
        struct cache_reader *rp = filp->private_data;
        struct cache_request *rq;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int err;
 
        if (count == 0)
@@ -784,6 +796,13 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
                rq->readers++;
        spin_unlock(&queue_lock);
 
+       if (rq->len == 0) {
+               err = cache_request(cd, rq);
+               if (err < 0)
+                       goto out;
+               rq->len = err;
+       }
+
        if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
                err = -EAGAIN;
                spin_lock(&queue_lock);
@@ -886,7 +905,7 @@ static ssize_t cache_write(struct file *filp, const char __user *buf,
                           struct cache_detail *cd)
 {
        struct address_space *mapping = filp->f_mapping;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        ssize_t ret = -EINVAL;
 
        if (!cd->cache_parse)
@@ -1140,17 +1159,14 @@ static bool cache_listeners_exist(struct cache_detail *detail)
  *
  * Each request is at most one page long.
  */
-int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
-               void (*cache_request)(struct cache_detail *,
-                                     struct cache_head *,
-                                     char **,
-                                     int *))
+int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
 {
 
        char *buf;
        struct cache_request *crq;
-       char *bp;
-       int len;
+
+       if (!detail->cache_request)
+               return -EINVAL;
 
        if (!cache_listeners_exist(detail)) {
                warn_no_listener(detail);
@@ -1167,19 +1183,10 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
                return -EAGAIN;
        }
 
-       bp = buf; len = PAGE_SIZE;
-
-       cache_request(detail, h, &bp, &len);
-
-       if (len < 0) {
-               kfree(buf);
-               kfree(crq);
-               return -EAGAIN;
-       }
        crq->q.reader = 0;
        crq->item = cache_get(h);
        crq->buf = buf;
-       crq->len = PAGE_SIZE - len;
+       crq->len = 0;
        crq->readers = 0;
        spin_lock(&queue_lock);
        list_add_tail(&crq->q.list, &detail->queue);
@@ -1454,7 +1461,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+       struct cache_detail *cd = PDE(file_inode(filp))->data;
 
        return cache_read(filp, buf, count, ppos, cd);
 }
@@ -1462,14 +1469,14 @@ static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
                                  size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+       struct cache_detail *cd = PDE(file_inode(filp))->data;
 
        return cache_write(filp, buf, count, ppos, cd);
 }
 
 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
 {
-       struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+       struct cache_detail *cd = PDE(file_inode(filp))->data;
 
        return cache_poll(filp, wait, cd);
 }
@@ -1477,7 +1484,7 @@ static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
 static long cache_ioctl_procfs(struct file *filp,
                               unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct cache_detail *cd = PDE(inode)->data;
 
        return cache_ioctl(inode, filp, cmd, arg, cd);
@@ -1546,7 +1553,7 @@ static int release_flush_procfs(struct inode *inode, struct file *filp)
 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
                            size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+       struct cache_detail *cd = PDE(file_inode(filp))->data;
 
        return read_flush(filp, buf, count, ppos, cd);
 }
@@ -1555,7 +1562,7 @@ static ssize_t write_flush_procfs(struct file *filp,
                                  const char __user *buf,
                                  size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+       struct cache_detail *cd = PDE(file_inode(filp))->data;
 
        return write_flush(filp, buf, count, ppos, cd);
 }
@@ -1605,7 +1612,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
        if (p == NULL)
                goto out_nomem;
 
-       if (cd->cache_upcall || cd->cache_parse) {
+       if (cd->cache_request || cd->cache_parse) {
                p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
                                     cd->u.procfs.proc_ent,
                                     &cache_file_operations_procfs, cd);
@@ -1614,7 +1621,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
                        goto out_nomem;
        }
        if (cd->cache_show) {
-               p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
+               p = proc_create_data("content", S_IFREG|S_IRUSR,
                                cd->u.procfs.proc_ent,
                                &content_file_operations_procfs, cd);
                cd->u.procfs.content_ent = p;
@@ -1686,7 +1693,7 @@ EXPORT_SYMBOL_GPL(cache_destroy_net);
 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+       struct cache_detail *cd = RPC_I(file_inode(filp))->private;
 
        return cache_read(filp, buf, count, ppos, cd);
 }
@@ -1694,14 +1701,14 @@ static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
                                  size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+       struct cache_detail *cd = RPC_I(file_inode(filp))->private;
 
        return cache_write(filp, buf, count, ppos, cd);
 }
 
 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
 {
-       struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+       struct cache_detail *cd = RPC_I(file_inode(filp))->private;
 
        return cache_poll(filp, wait, cd);
 }
@@ -1709,7 +1716,7 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
 static long cache_ioctl_pipefs(struct file *filp,
                              unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct cache_detail *cd = RPC_I(inode)->private;
 
        return cache_ioctl(inode, filp, cmd, arg, cd);
@@ -1778,7 +1785,7 @@ static int release_flush_pipefs(struct inode *inode, struct file *filp)
 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
                            size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+       struct cache_detail *cd = RPC_I(file_inode(filp))->private;
 
        return read_flush(filp, buf, count, ppos, cd);
 }
@@ -1787,7 +1794,7 @@ static ssize_t write_flush_pipefs(struct file *filp,
                                  const char __user *buf,
                                  size_t count, loff_t *ppos)
 {
-       struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+       struct cache_detail *cd = RPC_I(file_inode(filp))->private;
 
        return write_flush(filp, buf, count, ppos, cd);
 }
index a9f7906..dcc446e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/rcupdate.h>
 
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/rpc_pipe_fs.h>
 #include <linux/sunrpc/metrics.h>
 #include <linux/sunrpc/bc_xprt.h>
@@ -1195,6 +1196,21 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
 }
 EXPORT_SYMBOL_GPL(rpc_max_payload);
 
+/**
+ * rpc_get_timeout - Get timeout for transport in units of HZ
+ * @clnt: RPC client to query
+ */
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
+{
+       unsigned long ret;
+
+       rcu_read_lock();
+       ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_get_timeout);
+
 /**
  * rpc_force_rebind - force transport to check that remote port is unchanged
  * @clnt: client to rebind
index fd10981..7b9b402 100644 (file)
@@ -284,7 +284,7 @@ out:
 static ssize_t
 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct rpc_pipe *pipe;
        struct rpc_pipe_msg *msg;
        int res = 0;
@@ -328,7 +328,7 @@ out_unlock:
 static ssize_t
 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        int res;
 
        mutex_lock(&inode->i_mutex);
@@ -342,7 +342,7 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
 static unsigned int
 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct rpc_inode *rpci = RPC_I(inode);
        unsigned int mask = POLLOUT | POLLWRNORM;
 
@@ -360,7 +360,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
 static long
 rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct rpc_pipe *pipe;
        int len;
 
@@ -830,7 +830,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
  * responses to upcalls.  They will result in calls to @msg->downcall.
  *
  * The @private argument passed here will be available to all these methods
- * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
+ * from the file pointer, via RPC_I(file_inode(file))->private.
  */
 struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
                                 void *private, struct rpc_pipe *pipe)
index 795a0f4..3df764d 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ipv6.h>
 
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/xprtsock.h>
 
index dbf12ac..89a588b 100644 (file)
@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
 
 void svc_shutdown_net(struct svc_serv *serv, struct net *net)
 {
-       /*
-        * The set of xprts (contained in the sv_tempsocks and
-        * sv_permsocks lists) is now constant, since it is modified
-        * only by accepting new sockets (done by service threads in
-        * svc_recv) or aging old ones (done by sv_temptimer), or
-        * configuration changes (excluded by whatever locking the
-        * caller is using--nfsd_mutex in the case of nfsd).  So it's
-        * safe to traverse those lists and shut everything down:
-        */
        svc_close_net(serv, net);
 
        if (serv->sv_shutdown)
@@ -1042,6 +1033,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
 /*
  * dprintk the given error with the address of the client that caused it.
  */
+#ifdef RPC_DEBUG
 static __printf(2, 3)
 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
 {
@@ -1058,6 +1050,9 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
 
        va_end(args);
 }
+#else
+static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
+#endif
 
 /*
  * Common routine for processing the RPC request.
index b8e47fa..80a6640 100644 (file)
@@ -499,7 +499,8 @@ void svc_wake_up(struct svc_serv *serv)
                        rqstp->rq_xprt = NULL;
                         */
                        wake_up(&rqstp->rq_wait);
-               }
+               } else
+                       pool->sp_task_pending = 1;
                spin_unlock_bh(&pool->sp_lock);
        }
 }
@@ -634,7 +635,13 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
                 * long for cache updates.
                 */
                rqstp->rq_chandle.thread_wait = 1*HZ;
+               pool->sp_task_pending = 0;
        } else {
+               if (pool->sp_task_pending) {
+                       pool->sp_task_pending = 0;
+                       spin_unlock_bh(&pool->sp_lock);
+                       return ERR_PTR(-EAGAIN);
+               }
                /* No data pending. Go to sleep */
                svc_thread_enqueue(pool, rqstp);
 
@@ -856,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure)
        struct svc_serv *serv = (struct svc_serv *)closure;
        struct svc_xprt *xprt;
        struct list_head *le, *next;
-       LIST_HEAD(to_be_aged);
 
        dprintk("svc_age_temp_xprts\n");
 
@@ -877,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure)
                if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
                    test_bit(XPT_BUSY, &xprt->xpt_flags))
                        continue;
-               svc_xprt_get(xprt);
-               list_move(le, &to_be_aged);
+               list_del_init(le);
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
                set_bit(XPT_DETACHED, &xprt->xpt_flags);
-       }
-       spin_unlock_bh(&serv->sv_lock);
-
-       while (!list_empty(&to_be_aged)) {
-               le = to_be_aged.next;
-               /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
-               list_del_init(le);
-               xprt = list_entry(le, struct svc_xprt, xpt_list);
-
                dprintk("queuing xprt %p for closing\n", xprt);
 
                /* a thread will dequeue and close it soon */
                svc_xprt_enqueue(xprt);
-               svc_xprt_put(xprt);
        }
+       spin_unlock_bh(&serv->sv_lock);
 
        mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
 }
@@ -959,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
 }
 EXPORT_SYMBOL_GPL(svc_close_xprt);
 
-static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
 {
        struct svc_xprt *xprt;
+       int ret = 0;
 
        spin_lock(&serv->sv_lock);
        list_for_each_entry(xprt, xprt_list, xpt_list) {
                if (xprt->xpt_net != net)
                        continue;
+               ret++;
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
-               set_bit(XPT_BUSY, &xprt->xpt_flags);
+               svc_xprt_enqueue(xprt);
        }
        spin_unlock(&serv->sv_lock);
+       return ret;
 }
 
-static void svc_clear_pools(struct svc_serv *serv, struct net *net)
+static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
 {
        struct svc_pool *pool;
        struct svc_xprt *xprt;
@@ -988,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
                        if (xprt->xpt_net != net)
                                continue;
                        list_del_init(&xprt->xpt_ready);
+                       spin_unlock_bh(&pool->sp_lock);
+                       return xprt;
                }
                spin_unlock_bh(&pool->sp_lock);
        }
+       return NULL;
 }
 
-static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
 {
        struct svc_xprt *xprt;
-       struct svc_xprt *tmp;
-       LIST_HEAD(victims);
 
-       spin_lock(&serv->sv_lock);
-       list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
-               if (xprt->xpt_net != net)
-                       continue;
-               list_move(&xprt->xpt_list, &victims);
-       }
-       spin_unlock(&serv->sv_lock);
-
-       list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
+       while ((xprt = svc_dequeue_net(serv, net))) {
+               set_bit(XPT_CLOSE, &xprt->xpt_flags);
                svc_delete_xprt(xprt);
+       }
 }
 
+/*
+ * Server threads may still be running (especially in the case where the
+ * service is still running in other network namespaces).
+ *
+ * So we shut down sockets the same way we would on a running server, by
+ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
+ * the close.  In the case there are no such other threads,
+ * threads running, svc_clean_up_xprts() does a simple version of a
+ * server's main event loop, and in the case where there are other
+ * threads, we may need to wait a little while and then check again to
+ * see if they're done.
+ */
 void svc_close_net(struct svc_serv *serv, struct net *net)
 {
-       svc_close_list(serv, &serv->sv_tempsocks, net);
-       svc_close_list(serv, &serv->sv_permsocks, net);
+       int delay = 0;
 
-       svc_clear_pools(serv, net);
-       /*
-        * At this point the sp_sockets lists will stay empty, since
-        * svc_xprt_enqueue will not add new entries without taking the
-        * sp_lock and checking XPT_BUSY.
-        */
-       svc_clear_list(serv, &serv->sv_tempsocks, net);
-       svc_clear_list(serv, &serv->sv_permsocks, net);
+       while (svc_close_list(serv, &serv->sv_permsocks, net) +
+              svc_close_list(serv, &serv->sv_tempsocks, net)) {
+
+               svc_clean_up_xprts(serv, net);
+               msleep(delay++);
+       }
 }
 
 /*
index 7963569..2af7b0c 100644 (file)
@@ -138,13 +138,12 @@ auth_domain_lookup(char *name, struct auth_domain *new)
 {
        struct auth_domain *hp;
        struct hlist_head *head;
-       struct hlist_node *np;
 
        head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
 
        spin_lock(&auth_domain_lock);
 
-       hlist_for_each_entry(hp, np, head, hash) {
+       hlist_for_each_entry(hp, head, hash) {
                if (strcmp(hp->name, name)==0) {
                        kref_get(&hp->ref);
                        spin_unlock(&auth_domain_lock);
index a1852e1..c3f9e1e 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/svcauth.h>
 #include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
 #include <linux/hash.h>
@@ -17,7 +18,6 @@
 #include <linux/user_namespace.h>
 #define RPCDBG_FACILITY        RPCDBG_AUTH
 
-#include <linux/sunrpc/clnt.h>
 
 #include "netns.h"
 
@@ -157,11 +157,6 @@ static void ip_map_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-       return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
-}
-
 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
 
@@ -475,11 +470,6 @@ static void unix_gid_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-       return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
-}
-
 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
 
 static int unix_gid_parse(struct cache_detail *cd,
@@ -586,7 +576,7 @@ static struct cache_detail unix_gid_cache_template = {
        .hash_size      = GID_HASHMAX,
        .name           = "auth.unix.gid",
        .cache_put      = unix_gid_put,
-       .cache_upcall   = unix_gid_upcall,
+       .cache_request  = unix_gid_request,
        .cache_parse    = unix_gid_parse,
        .cache_show     = unix_gid_show,
        .match          = unix_gid_match,
@@ -885,7 +875,7 @@ static struct cache_detail ip_map_cache_template = {
        .hash_size      = IP_HASHMAX,
        .name           = "auth.unix.ip",
        .cache_put      = ip_map_put,
-       .cache_upcall   = ip_map_upcall,
+       .cache_request  = ip_map_request,
        .cache_parse    = ip_map_parse,
        .cache_show     = ip_map_show,
        .match          = ip_map_match,
index 5605563..75edcfa 100644 (file)
@@ -879,6 +879,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 }
 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 
+/**
+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
+ * @buf: buf to be trimmed
+ * @len: number of bytes to reduce "buf" by
+ *
+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
+ * that it's possible that we'll trim less than that amount if the xdr_buf is
+ * too small, or if (for instance) it's all in the head and the parser has
+ * already read too far into it.
+ */
+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
+{
+       size_t cur;
+       unsigned int trim = len;
+
+       if (buf->tail[0].iov_len) {
+               cur = min_t(size_t, buf->tail[0].iov_len, trim);
+               buf->tail[0].iov_len -= cur;
+               trim -= cur;
+               if (!trim)
+                       goto fix_len;
+       }
+
+       if (buf->page_len) {
+               cur = min_t(unsigned int, buf->page_len, trim);
+               buf->page_len -= cur;
+               trim -= cur;
+               if (!trim)
+                       goto fix_len;
+       }
+
+       if (buf->head[0].iov_len) {
+               cur = min_t(size_t, buf->head[0].iov_len, trim);
+               buf->head[0].iov_len -= cur;
+               trim -= cur;
+       }
+fix_len:
+       buf->len -= (len - trim);
+}
+EXPORT_SYMBOL_GPL(xdr_buf_trim);
+
 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 {
        unsigned int this_len;
index 846c34f..b7478d5 100644 (file)
@@ -487,13 +487,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
  * @task: task to be put to sleep
  * @action: function pointer to be executed after wait
+ *
+ * Note that we only set the timer for the case of RPC_IS_SOFT(), since
+ * we don't in general want to force a socket disconnection due to
+ * an incomplete RPC call transmission.
  */
 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
 
-       task->tk_timeout = req->rq_timeout;
+       task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
        rpc_sleep_on(&xprt->pending, task, action);
 }
 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
index d007428..794312f 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
+#include <linux/sunrpc/addr.h>
 
 #include "xprt_rdma.h"
 
index 37cbda6..c1d8476 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/udp.h>
 #include <linux/tcp.h>
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/xprtsock.h>
@@ -1867,13 +1868,9 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
  * @xprt: RPC transport to connect
  * @transport: socket transport to connect
  * @create_sock: function to create a socket of the correct type
- *
- * Invoked by a work queue tasklet.
  */
-static void xs_local_setup_socket(struct work_struct *work)
+static int xs_local_setup_socket(struct sock_xprt *transport)
 {
-       struct sock_xprt *transport =
-               container_of(work, struct sock_xprt, connect_worker.work);
        struct rpc_xprt *xprt = &transport->xprt;
        struct socket *sock;
        int status = -EIO;
@@ -1918,6 +1915,30 @@ out:
        xprt_clear_connecting(xprt);
        xprt_wake_pending_tasks(xprt, status);
        current->flags &= ~PF_FSTRANS;
+       return status;
+}
+
+static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       int ret;
+
+        if (RPC_IS_ASYNC(task)) {
+               /*
+                * We want the AF_LOCAL connect to be resolved in the
+                * filesystem namespace of the process making the rpc
+                * call.  Thus we connect synchronously.
+                *
+                * If we want to support asynchronous AF_LOCAL calls,
+                * we'll need to figure out how to pass a namespace to
+                * connect.
+                */
+               rpc_exit(task, -ENOTCONN);
+               return;
+       }
+       ret = xs_local_setup_socket(transport);
+       if (ret && !RPC_IS_SOFTCONN(task))
+               msleep_interruptible(15000);
 }
 
 #ifdef CONFIG_SUNRPC_SWAP
@@ -2455,7 +2476,7 @@ static struct rpc_xprt_ops xs_local_ops = {
        .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = xs_local_rpcbind,
        .set_port               = xs_local_set_port,
-       .connect                = xs_connect,
+       .connect                = xs_local_connect,
        .buf_alloc              = rpc_malloc,
        .buf_free               = rpc_free,
        .send_request           = xs_local_send_request,
@@ -2628,8 +2649,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
                        goto out_err;
                }
                xprt_set_bound(xprt);
-               INIT_DELAYED_WORK(&transport->connect_worker,
-                                       xs_local_setup_socket);
                xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
                break;
        default:
index 4675477..24b1679 100644 (file)
@@ -473,11 +473,10 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 static struct name_seq *nametbl_find_seq(u32 type)
 {
        struct hlist_head *seq_head;
-       struct hlist_node *seq_node;
        struct name_seq *ns;
 
        seq_head = &table.types[hash(type)];
-       hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
+       hlist_for_each_entry(ns, seq_head, ns_list) {
                if (ns->type == type)
                        return ns;
        }
@@ -853,7 +852,6 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                         u32 type, u32 lowbound, u32 upbound)
 {
        struct hlist_head *seq_head;
-       struct hlist_node *seq_node;
        struct name_seq *seq;
        int all_types;
        int ret = 0;
@@ -873,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                upbound = ~0;
                for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                        seq_head = &table.types[i];
-                       hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+                       hlist_for_each_entry(seq, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
                                                   depth, seq->type,
                                                   lowbound, upbound, i);
@@ -889,7 +887,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                ret += nametbl_header(buf + ret, len - ret, depth);
                i = hash(type);
                seq_head = &table.types[i];
-               hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+               hlist_for_each_entry(seq, seq_head, ns_list) {
                        if (seq->type == type) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
                                                   depth, type,
index 48f39dd..6e6c434 100644 (file)
@@ -69,12 +69,11 @@ static unsigned int tipc_hashfn(u32 addr)
 struct tipc_node *tipc_node_find(u32 addr)
 {
        struct tipc_node *node;
-       struct hlist_node *pos;
 
        if (unlikely(!in_own_cluster_exact(addr)))
                return NULL;
 
-       hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
+       hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
                if (node->addr == addr)
                        return node;
        }
index 87d2842..51be64f 100644 (file)
@@ -263,9 +263,8 @@ static struct sock *__unix_find_socket_byname(struct net *net,
                                              int len, int type, unsigned int hash)
 {
        struct sock *s;
-       struct hlist_node *node;
 
-       sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
+       sk_for_each(s, &unix_socket_table[hash ^ type]) {
                struct unix_sock *u = unix_sk(s);
 
                if (!net_eq(sock_net(s), net))
@@ -298,10 +297,9 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
 static struct sock *unix_find_socket_byinode(struct inode *i)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        spin_lock(&unix_table_lock);
-       sk_for_each(s, node,
+       sk_for_each(s,
                    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
                struct dentry *dentry = unix_sk(s)->path.dentry;
 
index 5ac19dc..d591091 100644 (file)
@@ -192,10 +192,9 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
             slot < ARRAY_SIZE(unix_socket_table);
             s_num = 0, slot++) {
                struct sock *sk;
-               struct hlist_node *node;
 
                num = 0;
-               sk_for_each(sk, node, &unix_socket_table[slot]) {
+               sk_for_each(sk, &unix_socket_table[slot]) {
                        if (!net_eq(sock_net(sk), net))
                                continue;
                        if (num < s_num)
@@ -226,9 +225,7 @@ static struct sock *unix_lookup_by_ino(int ino)
 
        spin_lock(&unix_table_lock);
        for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
-               struct hlist_node *node;
-
-               sk_for_each(sk, node, &unix_socket_table[i])
+               sk_for_each(sk, &unix_socket_table[i])
                        if (ino == sock_i_ino(sk)) {
                                sock_hold(sk);
                                spin_unlock(&unix_table_lock);
index b6f4b99..d0f6545 100644 (file)
@@ -99,7 +99,7 @@ unsigned int unix_tot_inflight;
 struct sock *unix_get_socket(struct file *filp)
 {
        struct sock *u_sock = NULL;
-       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
        /*
         *      Socket ?
index 5ffff03..ea4155f 100644 (file)
@@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.rts_threshold = (u32) -1;
        rdev->wiphy.coverage_class = 0;
 
-       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
-                              NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
+       rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
 
        return &rdev->wiphy;
 }
index 35545cc..d44ab21 100644 (file)
@@ -554,27 +554,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
        if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
            nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
                goto nla_put_failure;
-       if (chan->flags & IEEE80211_CHAN_RADAR) {
-               u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
-               if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
-                       goto nla_put_failure;
-               if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
-                               chan->dfs_state))
-                       goto nla_put_failure;
-               if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
-                       goto nla_put_failure;
-       }
-       if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
-               goto nla_put_failure;
-       if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
-           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
+       if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+           nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -900,9 +881,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
                    nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
                                c->max_interfaces))
                        goto nla_put_failure;
-               if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
-                               c->radar_detect_widths))
-                       goto nla_put_failure;
 
                nla_nest_end(msg, nl_combi);
        }
@@ -914,48 +892,6 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-#ifdef CONFIG_PM
-static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
-                                       struct sk_buff *msg)
-{
-       const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
-       struct nlattr *nl_tcp;
-
-       if (!tcp)
-               return 0;
-
-       nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
-       if (!nl_tcp)
-               return -ENOBUFS;
-
-       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
-                       tcp->data_payload_max))
-               return -ENOBUFS;
-
-       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
-                       tcp->data_payload_max))
-               return -ENOBUFS;
-
-       if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
-               return -ENOBUFS;
-
-       if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
-                               sizeof(*tcp->tok), tcp->tok))
-               return -ENOBUFS;
-
-       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
-                       tcp->data_interval_max))
-               return -ENOBUFS;
-
-       if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
-                       tcp->wake_payload_max))
-               return -ENOBUFS;
-
-       nla_nest_end(msg, nl_tcp);
-       return 0;
-}
-#endif
-
 static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *dev)
 {
@@ -1330,9 +1266,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
                                goto nla_put_failure;
                }
 
-               if (nl80211_send_wowlan_tcp_caps(dev, msg))
-                       goto nla_put_failure;
-
                nla_nest_end(msg, nl_wowlan);
        }
 #endif
@@ -1365,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
                        dev->wiphy.max_acl_mac_addrs))
                goto nla_put_failure;
 
-       if (dev->wiphy.extended_capabilities &&
-           (nla_put(msg, NL80211_ATTR_EXT_CAPA,
-                    dev->wiphy.extended_capabilities_len,
-                    dev->wiphy.extended_capabilities) ||
-            nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
-                    dev->wiphy.extended_capabilities_len,
-                    dev->wiphy.extended_capabilities_mask)))
-               goto nla_put_failure;
-
        return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -1383,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
 static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       int idx = 0;
+       int idx = 0, ret;
        int start = cb->args[0];
        struct cfg80211_registered_device *dev;
 
@@ -1393,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (++idx <= start)
                        continue;
-               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
-                                      cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                      dev) < 0) {
+               ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
+                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                        dev);
+               if (ret < 0) {
+                       /*
+                        * If sending the wiphy data didn't fit (ENOBUFS or
+                        * EMSGSIZE returned), this SKB is still empty (so
+                        * it's not too big because another wiphy dataset is
+                        * already in the skb) and we've not tried to adjust
+                        * the dump allocation yet ... then adjust the alloc
+                        * size to be bigger, and return 1 but with the empty
+                        * skb. This results in an empty message being RX'ed
+                        * in userspace, but that is ignored.
+                        *
+                        * We can then retry with the larger buffer.
+                        */
+                       if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
+                           !skb->len &&
+                           cb->min_dump_alloc < 4096) {
+                               cb->min_dump_alloc = 4096;
+                               mutex_unlock(&cfg80211_mutex);
+                               return 1;
+                       }
                        idx--;
                        break;
                }
@@ -1412,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        struct cfg80211_registered_device *dev = info->user_ptr[0];
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = nlmsg_new(4096, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
index a306bc6..37ca969 100644 (file)
@@ -208,11 +208,10 @@ static void x25_remove_socket(struct sock *sk)
 static void x25_kill_by_device(struct net_device *dev)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        write_lock_bh(&x25_list_lock);
 
-       sk_for_each(s, node, &x25_list)
+       sk_for_each(s, &x25_list)
                if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
                        x25_disconnect(s, ENETUNREACH, 0, 0);
 
@@ -280,12 +279,11 @@ static struct sock *x25_find_listener(struct x25_address *addr,
 {
        struct sock *s;
        struct sock *next_best;
-       struct hlist_node *node;
 
        read_lock_bh(&x25_list_lock);
        next_best = NULL;
 
-       sk_for_each(s, node, &x25_list)
+       sk_for_each(s, &x25_list)
                if ((!strcmp(addr->x25_addr,
                        x25_sk(s)->source_addr.x25_addr) ||
                                !strcmp(addr->x25_addr,
@@ -323,9 +321,8 @@ found:
 static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
 {
        struct sock *s;
-       struct hlist_node *node;
 
-       sk_for_each(s, node, &x25_list)
+       sk_for_each(s, &x25_list)
                if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
                        sock_hold(s);
                        goto found;
@@ -1782,11 +1779,10 @@ static struct notifier_block x25_dev_notifier = {
 void x25_kill_by_neigh(struct x25_neigh *nb)
 {
        struct sock *s;
-       struct hlist_node *node;
 
        write_lock_bh(&x25_list_lock);
 
-       sk_for_each(s, node, &x25_list)
+       sk_for_each(s, &x25_list)
                if (x25_sk(s)->neighbour == nb)
                        x25_disconnect(s, ENETUNREACH, 0, 0);
 
index 5b47180..167c67d 100644 (file)
@@ -379,27 +379,27 @@ static void xfrm_dst_hash_transfer(struct hlist_head *list,
                                   struct hlist_head *ndsttable,
                                   unsigned int nhashmask)
 {
-       struct hlist_node *entry, *tmp, *entry0 = NULL;
+       struct hlist_node *tmp, *entry0 = NULL;
        struct xfrm_policy *pol;
        unsigned int h0 = 0;
 
 redo:
-       hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
+       hlist_for_each_entry_safe(pol, tmp, list, bydst) {
                unsigned int h;
 
                h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
                                pol->family, nhashmask);
                if (!entry0) {
-                       hlist_del(entry);
+                       hlist_del(&pol->bydst);
                        hlist_add_head(&pol->bydst, ndsttable+h);
                        h0 = h;
                } else {
                        if (h != h0)
                                continue;
-                       hlist_del(entry);
+                       hlist_del(&pol->bydst);
                        hlist_add_after(entry0, &pol->bydst);
                }
-               entry0 = entry;
+               entry0 = &pol->bydst;
        }
        if (!hlist_empty(list)) {
                entry0 = NULL;
@@ -411,10 +411,10 @@ static void xfrm_idx_hash_transfer(struct hlist_head *list,
                                   struct hlist_head *nidxtable,
                                   unsigned int nhashmask)
 {
-       struct hlist_node *entry, *tmp;
+       struct hlist_node *tmp;
        struct xfrm_policy *pol;
 
-       hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
+       hlist_for_each_entry_safe(pol, tmp, list, byidx) {
                unsigned int h;
 
                h = __idx_hash(pol->index, nhashmask);
@@ -544,7 +544,6 @@ static u32 xfrm_gen_index(struct net *net, int dir)
        static u32 idx_generator;
 
        for (;;) {
-               struct hlist_node *entry;
                struct hlist_head *list;
                struct xfrm_policy *p;
                u32 idx;
@@ -556,7 +555,7 @@ static u32 xfrm_gen_index(struct net *net, int dir)
                        idx = 8;
                list = net->xfrm.policy_byidx + idx_hash(net, idx);
                found = 0;
-               hlist_for_each_entry(p, entry, list, byidx) {
+               hlist_for_each_entry(p, list, byidx) {
                        if (p->index == idx) {
                                found = 1;
                                break;
@@ -628,13 +627,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        struct xfrm_policy *pol;
        struct xfrm_policy *delpol;
        struct hlist_head *chain;
-       struct hlist_node *entry, *newpos;
+       struct hlist_node *newpos;
 
        write_lock_bh(&xfrm_policy_lock);
        chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
        delpol = NULL;
        newpos = NULL;
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                if (pol->type == policy->type &&
                    !selector_cmp(&pol->selector, &policy->selector) &&
                    xfrm_policy_mark_match(policy, pol) &&
@@ -691,13 +690,12 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
 {
        struct xfrm_policy *pol, *ret;
        struct hlist_head *chain;
-       struct hlist_node *entry;
 
        *err = 0;
        write_lock_bh(&xfrm_policy_lock);
        chain = policy_hash_bysel(net, sel, sel->family, dir);
        ret = NULL;
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                if (pol->type == type &&
                    (mark & pol->mark.m) == pol->mark.v &&
                    !selector_cmp(sel, &pol->selector) &&
@@ -729,7 +727,6 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
 {
        struct xfrm_policy *pol, *ret;
        struct hlist_head *chain;
-       struct hlist_node *entry;
 
        *err = -ENOENT;
        if (xfrm_policy_id2dir(id) != dir)
@@ -739,7 +736,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
        write_lock_bh(&xfrm_policy_lock);
        chain = net->xfrm.policy_byidx + idx_hash(net, id);
        ret = NULL;
-       hlist_for_each_entry(pol, entry, chain, byidx) {
+       hlist_for_each_entry(pol, chain, byidx) {
                if (pol->type == type && pol->index == id &&
                    (mark & pol->mark.m) == pol->mark.v) {
                        xfrm_pol_hold(pol);
@@ -772,10 +769,9 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
 
        for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
                struct xfrm_policy *pol;
-               struct hlist_node *entry;
                int i;
 
-               hlist_for_each_entry(pol, entry,
+               hlist_for_each_entry(pol,
                                     &net->xfrm.policy_inexact[dir], bydst) {
                        if (pol->type != type)
                                continue;
@@ -789,7 +785,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
                        }
                }
                for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
-                       hlist_for_each_entry(pol, entry,
+                       hlist_for_each_entry(pol,
                                             net->xfrm.policy_bydst[dir].table + i,
                                             bydst) {
                                if (pol->type != type)
@@ -828,11 +824,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
 
        for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
                struct xfrm_policy *pol;
-               struct hlist_node *entry;
                int i;
 
        again1:
-               hlist_for_each_entry(pol, entry,
+               hlist_for_each_entry(pol,
                                     &net->xfrm.policy_inexact[dir], bydst) {
                        if (pol->type != type)
                                continue;
@@ -852,7 +847,7 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
 
                for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
        again2:
-                       hlist_for_each_entry(pol, entry,
+                       hlist_for_each_entry(pol,
                                             net->xfrm.policy_bydst[dir].table + i,
                                             bydst) {
                                if (pol->type != type)
@@ -980,7 +975,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        int err;
        struct xfrm_policy *pol, *ret;
        const xfrm_address_t *daddr, *saddr;
-       struct hlist_node *entry;
        struct hlist_head *chain;
        u32 priority = ~0U;
 
@@ -992,7 +986,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        read_lock_bh(&xfrm_policy_lock);
        chain = policy_hash_direct(net, daddr, saddr, family, dir);
        ret = NULL;
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                err = xfrm_policy_match(pol, fl, type, family, dir);
                if (err) {
                        if (err == -ESRCH)
@@ -1008,7 +1002,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
                }
        }
        chain = &net->xfrm.policy_inexact[dir];
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                err = xfrm_policy_match(pol, fl, type, family, dir);
                if (err) {
                        if (err == -ESRCH)
@@ -3041,13 +3035,12 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
                                                     u8 dir, u8 type)
 {
        struct xfrm_policy *pol, *ret = NULL;
-       struct hlist_node *entry;
        struct hlist_head *chain;
        u32 priority = ~0U;
 
        read_lock_bh(&xfrm_policy_lock);
        chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                if (xfrm_migrate_selector_match(sel, &pol->selector) &&
                    pol->type == type) {
                        ret = pol;
@@ -3056,7 +3049,7 @@ static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector
                }
        }
        chain = &init_net.xfrm.policy_inexact[dir];
-       hlist_for_each_entry(pol, entry, chain, bydst) {
+       hlist_for_each_entry(pol, chain, bydst) {
                if (xfrm_migrate_selector_match(sel, &pol->selector) &&
                    pol->type == type &&
                    pol->priority < priority) {
index ae01bdb..2c341bd 100644 (file)
@@ -72,10 +72,10 @@ static void xfrm_hash_transfer(struct hlist_head *list,
                               struct hlist_head *nspitable,
                               unsigned int nhashmask)
 {
-       struct hlist_node *entry, *tmp;
+       struct hlist_node *tmp;
        struct xfrm_state *x;
 
-       hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
+       hlist_for_each_entry_safe(x, tmp, list, bydst) {
                unsigned int h;
 
                h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
@@ -368,14 +368,14 @@ static void xfrm_state_gc_task(struct work_struct *work)
 {
        struct net *net = container_of(work, struct net, xfrm.state_gc_work);
        struct xfrm_state *x;
-       struct hlist_node *entry, *tmp;
+       struct hlist_node *tmp;
        struct hlist_head gc_list;
 
        spin_lock_bh(&xfrm_state_gc_lock);
        hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
        spin_unlock_bh(&xfrm_state_gc_lock);
 
-       hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
+       hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
                xfrm_state_gc_destroy(x);
 
        wake_up(&net->xfrm.km_waitq);
@@ -577,10 +577,9 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
        int i, err = 0;
 
        for (i = 0; i <= net->xfrm.state_hmask; i++) {
-               struct hlist_node *entry;
                struct xfrm_state *x;
 
-               hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+               hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
                        if (xfrm_id_proto_match(x->id.proto, proto) &&
                           (err = security_xfrm_state_delete(x)) != 0) {
                                xfrm_audit_state_delete(x, 0,
@@ -613,10 +612,9 @@ int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
 
        err = -ESRCH;
        for (i = 0; i <= net->xfrm.state_hmask; i++) {
-               struct hlist_node *entry;
                struct xfrm_state *x;
 restart:
-               hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+               hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
                        if (!xfrm_state_kern(x) &&
                            xfrm_id_proto_match(x->id.proto, proto)) {
                                xfrm_state_hold(x);
@@ -685,9 +683,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
 {
        unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
        struct xfrm_state *x;
-       struct hlist_node *entry;
 
-       hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
+       hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
                if (x->props.family != family ||
                    x->id.spi       != spi ||
                    x->id.proto     != proto ||
@@ -710,9 +707,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
 {
        unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
        struct xfrm_state *x;
-       struct hlist_node *entry;
 
-       hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
+       hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
                if (x->props.family != family ||
                    x->id.proto     != proto ||
                    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -798,7 +794,6 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
        static xfrm_address_t saddr_wildcard = { };
        struct net *net = xp_net(pol);
        unsigned int h, h_wildcard;
-       struct hlist_node *entry;
        struct xfrm_state *x, *x0, *to_put;
        int acquire_in_progress = 0;
        int error = 0;
@@ -810,7 +805,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        spin_lock_bh(&xfrm_state_lock);
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
-       hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+       hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
                if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -826,7 +821,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                goto found;
 
        h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
-       hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
+       hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
                if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -906,11 +901,10 @@ xfrm_stateonly_find(struct net *net, u32 mark,
 {
        unsigned int h;
        struct xfrm_state *rx = NULL, *x = NULL;
-       struct hlist_node *entry;
 
        spin_lock(&xfrm_state_lock);
        h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
-       hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+       hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
                if (x->props.family == family &&
                    x->props.reqid == reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -972,12 +966,11 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
        unsigned short family = xnew->props.family;
        u32 reqid = xnew->props.reqid;
        struct xfrm_state *x;
-       struct hlist_node *entry;
        unsigned int h;
        u32 mark = xnew->mark.v & xnew->mark.m;
 
        h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
-       hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+       hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
                if (x->props.family     == family &&
                    x->props.reqid      == reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -1004,11 +997,10 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
                                          const xfrm_address_t *saddr, int create)
 {
        unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
-       struct hlist_node *entry;
        struct xfrm_state *x;
        u32 mark = m->v & m->m;
 
-       hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
+       hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
                if (x->props.reqid  != reqid ||
                    x->props.mode   != mode ||
                    x->props.family != family ||
@@ -1215,12 +1207,11 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
 {
        unsigned int h;
        struct xfrm_state *x;
-       struct hlist_node *entry;
 
        if (m->reqid) {
                h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr,
                                  m->reqid, m->old_family);
-               hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
+               hlist_for_each_entry(x, init_net.xfrm.state_bydst+h, bydst) {
                        if (x->props.mode != m->mode ||
                            x->id.proto != m->proto)
                                continue;
@@ -1237,7 +1228,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
        } else {
                h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr,
                                  m->old_family);
-               hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
+               hlist_for_each_entry(x, init_net.xfrm.state_bysrc+h, bysrc) {
                        if (x->props.mode != m->mode ||
                            x->id.proto != m->proto)
                                continue;
@@ -1466,10 +1457,9 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 s
        int i;
 
        for (i = 0; i <= net->xfrm.state_hmask; i++) {
-               struct hlist_node *entry;
                struct xfrm_state *x;
 
-               hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
+               hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
                        if (x->km.seq == seq &&
                            (mark & x->mark.m) == x->mark.v &&
                            x->km.state == XFRM_STATE_ACQ) {
index 06ba4a7..25f216a 100644 (file)
@@ -7,15 +7,15 @@
 #
 # ==========================================================================
 
-# called may set destination dir (when installing to asm/)
-_dst := $(or $(destination-y),$(dst),$(obj))
-
 # generated header directory
 gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
 
 kbuild-file := $(srctree)/$(obj)/Kbuild
 include $(kbuild-file)
 
+# called may set destination dir (when installing to asm/)
+_dst := $(or $(destination-y),$(dst),$(obj))
+
 old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild
 ifneq ($(wildcard $(old-kbuild-file)),)
 include $(old-kbuild-file)
index a1cb022..cf82c83 100644 (file)
@@ -66,10 +66,6 @@ modules   := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
 # Stop after building .o files if NOFINAL is set. Makes compile tests quicker
 _modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
 
-ifneq ($(KBUILD_BUILDHOST),$(ARCH))
-        cross_build := 1
-endif
-
 # Step 2), invoke modpost
 #  Includes step 3,4
 modpost = scripts/mod/modpost                    \
@@ -80,8 +76,7 @@ modpost = scripts/mod/modpost                    \
  $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
- $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \
- $(if $(cross_build),-c)
+ $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
 
 quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
       cmd_modpost = $(modpost) -s
index 747bcd7..b28cc38 100755 (executable)
@@ -2930,7 +2930,7 @@ sub process {
                        my $var = $1;
                        if ($var !~ /$Constant/ &&
                            $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ &&
-                           $var !~ /^Page[A-Z]/ &&
+                           $var !~ /"^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
                            !defined $camelcase{$var}) {
                                $camelcase{$var} = 1;
                                WARN("CAMELCASE",
index 17e3843..544aa56 100755 (executable)
@@ -34,7 +34,7 @@ use strict;
 # $1 (first bracket) matches the dynamic amount of the stack growth
 #
 # use anything else and feel the pain ;)
-my (@stack, $re, $dre, $x, $xs);
+my (@stack, $re, $dre, $x, $xs, $funcre);
 {
        my $arch = shift;
        if ($arch eq "") {
@@ -44,6 +44,7 @@ my (@stack, $re, $dre, $x, $xs);
 
        $x      = "[0-9a-f]";   # hex character
        $xs     = "[0-9a-f ]";  # hex character or space
+       $funcre = qr/^$x* <(.*)>:$/;
        if ($arch eq 'arm') {
                #c0008ffc:      e24dd064        sub     sp, sp, #100    ; 0x64
                $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
@@ -66,6 +67,10 @@ my (@stack, $re, $dre, $x, $xs);
                #    2b6c:       4e56 fb70       linkw %fp,#-1168
                #  1df770:       defc ffe4       addaw #-28,%sp
                $re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o;
+       } elsif ($arch eq 'metag') {
+               #400026fc:       40 00 00 82     ADD       A0StP,A0StP,#0x8
+               $re = qr/.*ADD.*A0StP,A0StP,\#(0x$x{1,8})/o;
+               $funcre = qr/^$x* <[^\$](.*)>:$/;
        } elsif ($arch eq 'mips64') {
                #8800402c:       67bdfff0        daddiu  sp,sp,-16
                $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
@@ -109,7 +114,6 @@ my (@stack, $re, $dre, $x, $xs);
 #
 # main()
 #
-my $funcre = qr/^$x* <(.*)>:$/;
 my ($func, $file, $lastslash);
 
 while (my $line = <STDIN>) {
index 1a49d1c..85d3189 100755 (executable)
@@ -2,6 +2,15 @@
 
 SPATCH="`which ${SPATCH:=spatch}`"
 
+# The verbosity may be set by the environmental parameter V=
+# as for example with 'make V=1 coccicheck'
+
+if [ -n "$V" -a "$V" != "0" ]; then
+       VERBOSE=1
+else
+       VERBOSE=0
+fi
+
 if [ "$C" = "1" -o "$C" = "2" ]; then
     ONLINE=1
 
@@ -46,6 +55,14 @@ if [ "$ONLINE" = "0" ] ; then
     echo ''
 fi
 
+run_cmd() {
+       if [ $VERBOSE -ne 0 ] ; then
+               echo "Running: $@"
+       fi
+       eval $@
+}
+
+
 coccinelle () {
     COCCI="$1"
 
@@ -55,7 +72,7 @@ coccinelle () {
 #
 #    $SPATCH -D $MODE $FLAGS -parse_cocci $COCCI $OPT > /dev/null
 
-    if [ "$ONLINE" = "0" ] ; then
+    if [ $VERBOSE -ne 0 ] ; then
 
        FILE=`echo $COCCI | sed "s|$srctree/||"`
 
@@ -91,15 +108,21 @@ coccinelle () {
     fi
 
     if [ "$MODE" = "chain" ] ; then
-       $SPATCH -D patch   $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
-       $SPATCH -D report  $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \
-       $SPATCH -D context $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
-       $SPATCH -D org     $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1
+       run_cmd $SPATCH -D patch   \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
+       run_cmd $SPATCH -D report  \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || \
+       run_cmd $SPATCH -D context \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS               || \
+       run_cmd $SPATCH -D org     \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff || exit 1
     elif [ "$MODE" = "rep+ctxt" ] ; then
-       $SPATCH -D report  $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \
-       $SPATCH -D context $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+       run_cmd $SPATCH -D report  \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS -no_show_diff && \
+       run_cmd $SPATCH -D context \
+               $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
     else
-       $SPATCH -D $MODE   $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
+       run_cmd $SPATCH -D $MODE   $FLAGS -sp_file $COCCI $OPT $OPTIONS || exit 1
     fi
 
 }
diff --git a/scripts/coccinelle/misc/memcpy-assign.cocci b/scripts/coccinelle/misc/memcpy-assign.cocci
new file mode 100644 (file)
index 0000000..afd058b
--- /dev/null
@@ -0,0 +1,103 @@
+//
+// Replace memcpy with struct assignment.
+//
+// Confidence: High
+// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual patch
+virtual report
+virtual context
+virtual org
+
+@r1 depends on !patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name from;
+struct struct_name *top;
+struct struct_name *fromp;
+position p;
+@@
+memcpy@p(\(&(to)\|top\), \(&(from)\|fromp\), \(sizeof(to)\|sizeof(from)\|sizeof(struct struct_name)\|sizeof(*top)\|sizeof(*fromp)\))
+
+@script:python depends on report@
+p << r1.p;
+@@
+coccilib.report.print_report(p[0],"Replace memcpy with struct assignment")
+
+@depends on context@
+position r1.p;
+@@
+*memcpy@p(...);
+
+@script:python depends on org@
+p << r1.p;
+@@
+cocci.print_main("Replace memcpy with struct assignment",p)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name from;
+@@
+(
+-memcpy(&(to), &(from), sizeof(to));
++to = from;
+|
+-memcpy(&(to), &(from), sizeof(from));
++to = from;
+|
+-memcpy(&(to), &(from), sizeof(struct struct_name));
++to = from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name to;
+struct struct_name *from;
+@@
+(
+-memcpy(&(to), from, sizeof(to));
++to = *from;
+|
+-memcpy(&(to), from, sizeof(*from));
++to = *from;
+|
+-memcpy(&(to), from, sizeof(struct struct_name));
++to = *from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name *to;
+struct struct_name from;
+@@
+(
+-memcpy(to, &(from), sizeof(*to));
++ *to = from;
+|
+-memcpy(to, &(from), sizeof(from));
++ *to = from;
+|
+-memcpy(to, &(from), sizeof(struct struct_name));
++ *to = from;
+)
+
+@depends on patch@
+identifier struct_name;
+struct struct_name *to;
+struct struct_name *from;
+@@
+(
+-memcpy(to, from, sizeof(*to));
++ *to = *from;
+|
+-memcpy(to, from, sizeof(*from));
++ *to = *from;
+|
+-memcpy(to, from, sizeof(struct struct_name));
++ *to = *from;
+)
+
diff --git a/scripts/coccinelle/misc/orplus.cocci b/scripts/coccinelle/misc/orplus.cocci
new file mode 100644 (file)
index 0000000..4a28cef
--- /dev/null
@@ -0,0 +1,55 @@
+/// Check for constants that are added but are used elsewhere as bitmasks
+/// The results should be checked manually to ensure that the nonzero
+/// bits in the two constants are actually disjoint.
+///
+// Confidence: Moderate
+// Copyright: (C) 2013 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2013 Gilles Muller, INRIA/LIP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+constant c;
+identifier i;
+expression e;
+@@
+
+(
+e | c@i
+|
+e & c@i
+|
+e |= c@i
+|
+e &= c@i
+)
+
+@s@
+constant r.c,c1;
+identifier i1;
+position p;
+@@
+
+(
+ c1 + c - 1
+|
+*c1@i1 +@p c
+)
+
+@script:python depends on org@
+p << s.p;
+@@
+
+cocci.print_main("sum of probable bitmasks, consider |",p)
+
+@script:python depends on report@
+p << s.p;
+@@
+
+msg = "WARNING: sum of probable bitmasks, consider |"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/semicolon.cocci b/scripts/coccinelle/misc/semicolon.cocci
new file mode 100644 (file)
index 0000000..a47eba2
--- /dev/null
@@ -0,0 +1,83 @@
+///
+/// Removes unneeded semicolon.
+///
+// Confidence: Moderate
+// Copyright: (C) 2012 Peter Senna Tschudin, INRIA/LIP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments: Some false positives on empty default cases in switch statements.
+// Options: --no-includes --include-headers
+
+virtual patch
+virtual report
+virtual context
+virtual org
+
+@r_default@
+position p;
+@@
+switch (...)
+{
+default: ...;@p
+}
+
+@r_case@
+position p;
+@@
+(
+switch (...)
+{
+case ...:;@p
+}
+|
+switch (...)
+{
+case ...:...
+case ...:;@p
+}
+|
+switch (...)
+{
+case ...:...
+case ...:
+case ...:;@p
+}
+)
+
+@r1@
+statement S;
+position p1;
+position p != {r_default.p, r_case.p};
+identifier label;
+@@
+(
+label:;
+|
+S@p1;@p
+)
+
+@script:python@
+p << r1.p;
+p1 << r1.p1;
+@@
+if p[0].line != p1[0].line_end:
+       cocci.include_match(False)
+
+@depends on patch@
+position r1.p;
+@@
+-;@p
+
+@script:python depends on report@
+p << r1.p;
+@@
+coccilib.report.print_report(p[0],"Unneeded semicolon")
+
+@depends on context@
+position r1.p;
+@@
+*;@p
+
+@script:python depends on org@
+p << r1.p;
+@@
+cocci.print_main("Unneeded semicolon",p)
index 2ae4817..122599b 100755 (executable)
@@ -2,16 +2,36 @@
 #
 # A depmod wrapper used by the toplevel Makefile
 
-if test $# -ne 2; then
-       echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+if test $# -ne 3; then
+       echo "Usage: $0 /sbin/depmod <kernelrelease> <symbolprefix>" >&2
        exit 1
 fi
 DEPMOD=$1
 KERNELRELEASE=$2
+SYMBOL_PREFIX=$3
 
 if ! test -r System.map -a -x "$DEPMOD"; then
        exit 0
 fi
+
+# older versions of depmod don't support -P <symbol-prefix>
+# support was added in module-init-tools 3.13
+if test -n "$SYMBOL_PREFIX"; then
+       release=$("$DEPMOD" --version)
+       package=$(echo "$release" | cut -d' ' -f 1)
+       if test "$package" = "module-init-tools"; then
+               version=$(echo "$release" | cut -d' ' -f 2)
+               later=$(printf '%s\n' "$version" "3.13" | sort -V | tail -n 1)
+               if test "$later" != "$version"; then
+                       # module-init-tools < 3.13, drop the symbol prefix
+                       SYMBOL_PREFIX=""
+               fi
+       fi
+       if test -n "$SYMBOL_PREFIX"; then
+               SYMBOL_PREFIX="-P $SYMBOL_PREFIX"
+       fi
+fi
+
 # older versions of depmod require the version string to start with three
 # numbers, so we cheat with a symlink here
 depmod_hack_needed=true
@@ -34,7 +54,7 @@ set -- -ae -F System.map
 if test -n "$INSTALL_MOD_PATH"; then
        set -- "$@" -b "$INSTALL_MOD_PATH"
 fi
-"$DEPMOD" "$@" "$KERNELRELEASE"
+"$DEPMOD" "$@" "$KERNELRELEASE" $SYMBOL_PREFIX
 ret=$?
 
 if $depmod_hack_needed; then
index 8a10649..d25e4a1 100644 (file)
@@ -826,7 +826,8 @@ int main(int argc, char **argv)
                        genksyms_usage();
                        return 1;
                }
-       if ((strcmp(arch, "h8300") == 0) || (strcmp(arch, "blackfin") == 0))
+       if ((strcmp(arch, "h8300") == 0) || (strcmp(arch, "blackfin") == 0) ||
+           (strcmp(arch, "metag") == 0))
                mod_prefix = "_";
        {
                extern int yydebug;
index 18d4ab5..ce4cc83 100755 (executable)
@@ -611,6 +611,10 @@ sub get_maintainers {
                                    $hash{$tvi} = $value_pd;
                                }
                            }
+                       } elsif ($type eq 'K') {
+                           if ($file =~ m/$value/x) {
+                               $hash{$tvi} = 0;
+                           }
                        }
                    }
                }
index 3091794..231b475 100644 (file)
@@ -11,6 +11,9 @@ else
 Kconfig := Kconfig
 endif
 
+# We need this, in case the user has it in its environment
+unexport CONFIG_
+
 xconfig: $(obj)/qconf
        $< $(Kconfig)
 
index 4da3b4a..e39fcd8 100644 (file)
@@ -36,6 +36,7 @@ enum input_mode {
 } input_mode = oldaskconfig;
 
 static int indent = 1;
+static int tty_stdio;
 static int valid_stdin = 1;
 static int sync_kconfig;
 static int conf_cnt;
@@ -108,6 +109,8 @@ static int conf_askvalue(struct symbol *sym, const char *def)
        case oldaskconfig:
                fflush(stdout);
                xfgets(line, 128, stdin);
+               if (!tty_stdio)
+                       printf("\n");
                return 1;
        default:
                break;
@@ -495,6 +498,8 @@ int main(int ac, char **av)
        bindtextdomain(PACKAGE, LOCALEDIR);
        textdomain(PACKAGE);
 
+       tty_stdio = isatty(0) && isatty(1) && isatty(2);
+
        while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
                input_mode = (enum input_mode)opt;
                switch (opt) {
@@ -621,7 +626,7 @@ int main(int ac, char **av)
                                return 1;
                        }
                }
-               valid_stdin = isatty(0) && isatty(1) && isatty(2);
+               valid_stdin = tty_stdio;
        }
 
        switch (input_mode) {
index 290ce41..d662652 100644 (file)
@@ -13,7 +13,7 @@
 
 struct expr *expr_alloc_symbol(struct symbol *sym)
 {
-       struct expr *e = calloc(1, sizeof(*e));
+       struct expr *e = xcalloc(1, sizeof(*e));
        e->type = E_SYMBOL;
        e->left.sym = sym;
        return e;
@@ -21,7 +21,7 @@ struct expr *expr_alloc_symbol(struct symbol *sym)
 
 struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
 {
-       struct expr *e = calloc(1, sizeof(*e));
+       struct expr *e = xcalloc(1, sizeof(*e));
        e->type = type;
        e->left.expr = ce;
        return e;
@@ -29,7 +29,7 @@ struct expr *expr_alloc_one(enum expr_type type, struct expr *ce)
 
 struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e2)
 {
-       struct expr *e = calloc(1, sizeof(*e));
+       struct expr *e = xcalloc(1, sizeof(*e));
        e->type = type;
        e->left.expr = e1;
        e->right.expr = e2;
@@ -38,7 +38,7 @@ struct expr *expr_alloc_two(enum expr_type type, struct expr *e1, struct expr *e
 
 struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2)
 {
-       struct expr *e = calloc(1, sizeof(*e));
+       struct expr *e = xcalloc(1, sizeof(*e));
        e->type = type;
        e->left.sym = s1;
        e->right.sym = s2;
@@ -66,7 +66,7 @@ struct expr *expr_copy(const struct expr *org)
        if (!org)
                return NULL;
 
-       e = malloc(sizeof(*org));
+       e = xmalloc(sizeof(*org));
        memcpy(e, org, sizeof(*org));
        switch (org->type) {
        case E_SYMBOL:
index adc2306..f2bee70 100644 (file)
@@ -10,6 +10,7 @@
 #  include <config.h>
 #endif
 
+#include <stdlib.h>
 #include "lkc.h"
 #include "images.c"
 
@@ -22,7 +23,6 @@
 #include <string.h>
 #include <unistd.h>
 #include <time.h>
-#include <stdlib.h>
 
 //#define DEBUG
 
index c18f2bd..f8aee5f 100644 (file)
@@ -39,6 +39,12 @@ extern "C" {
 #ifndef CONFIG_
 #define CONFIG_ "CONFIG_"
 #endif
+static inline const char *CONFIG_prefix(void)
+{
+       return getenv( "CONFIG_" ) ?: CONFIG_;
+}
+#undef CONFIG_
+#define CONFIG_ CONFIG_prefix()
 
 #define TF_COMMAND     0x0001
 #define TF_PARAM       0x0002
@@ -116,6 +122,8 @@ void menu_set_type(int type);
 /* util.c */
 struct file *file_lookup(const char *name);
 int file_write_dep(const char *name);
+void *xmalloc(size_t size);
+void *xcalloc(size_t nmemb, size_t size);
 
 struct gstr {
        size_t len;
index c8e8a71..8078813 100644 (file)
@@ -21,6 +21,7 @@ ccflags()
 {
        if [ -f /usr/include/ncursesw/curses.h ]; then
                echo '-I/usr/include/ncursesw -DCURSES_LOC="<ncursesw/curses.h>"'
+               echo ' -DNCURSES_WIDECHAR=1'
        elif [ -f /usr/include/ncurses/ncurses.h ]; then
                echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"'
        elif [ -f /usr/include/ncurses/curses.h ]; then
index ee17a52..307022a 100644 (file)
@@ -221,7 +221,6 @@ int dialog_menu(const char *title, const char *prompt,
                const void *selected, int *s_scroll);
 int dialog_checklist(const char *title, const char *prompt, int height,
                     int width, int list_height);
-extern char dialog_input_result[];
 int dialog_inputbox(const char *title, const char *prompt, int height,
                    int width, const char *init);
 
index dd8e587..21404a0 100644 (file)
@@ -45,7 +45,8 @@ int dialog_inputbox(const char *title, const char *prompt, int height, int width
                     const char *init)
 {
        int i, x, y, box_y, box_x, box_width;
-       int input_x = 0, scroll = 0, key = 0, button = -1;
+       int input_x = 0, key = 0, button = -1;
+       int show_x, len, pos;
        char *instr = dialog_input_result;
        WINDOW *dialog;
 
@@ -97,14 +98,17 @@ do_resize:
        wmove(dialog, box_y, box_x);
        wattrset(dialog, dlg.inputbox.atr);
 
-       input_x = strlen(instr);
+       len = strlen(instr);
+       pos = len;
 
-       if (input_x >= box_width) {
-               scroll = input_x - box_width + 1;
+       if (len >= box_width) {
+               show_x = len - box_width + 1;
                input_x = box_width - 1;
                for (i = 0; i < box_width - 1; i++)
-                       waddch(dialog, instr[scroll + i]);
+                       waddch(dialog, instr[show_x + i]);
        } else {
+               show_x = 0;
+               input_x = len;
                waddstr(dialog, instr);
        }
 
@@ -121,45 +125,104 @@ do_resize:
                        case KEY_UP:
                        case KEY_DOWN:
                                break;
-                       case KEY_LEFT:
-                               continue;
-                       case KEY_RIGHT:
-                               continue;
                        case KEY_BACKSPACE:
                        case 127:
-                               if (input_x || scroll) {
+                               if (pos) {
                                        wattrset(dialog, dlg.inputbox.atr);
-                                       if (!input_x) {
-                                               scroll = scroll < box_width - 1 ? 0 : scroll - (box_width - 1);
-                                               wmove(dialog, box_y, box_x);
-                                               for (i = 0; i < box_width; i++)
-                                                       waddch(dialog,
-                                                              instr[scroll + input_x + i] ?
-                                                              instr[scroll + input_x + i] : ' ');
-                                               input_x = strlen(instr) - scroll;
+                                       if (input_x == 0) {
+                                               show_x--;
                                        } else
                                                input_x--;
-                                       instr[scroll + input_x] = '\0';
-                                       mvwaddch(dialog, box_y, input_x + box_x, ' ');
+
+                                       if (pos < len) {
+                                               for (i = pos - 1; i < len; i++) {
+                                                       instr[i] = instr[i+1];
+                                               }
+                                       }
+
+                                       pos--;
+                                       len--;
+                                       instr[len] = '\0';
+                                       wmove(dialog, box_y, box_x);
+                                       for (i = 0; i < box_width; i++) {
+                                               if (!instr[show_x + i]) {
+                                                       waddch(dialog, ' ');
+                                                       break;
+                                               }
+                                               waddch(dialog, instr[show_x + i]);
+                                       }
                                        wmove(dialog, box_y, input_x + box_x);
                                        wrefresh(dialog);
                                }
                                continue;
+                       case KEY_LEFT:
+                               if (pos > 0) {
+                                       if (input_x > 0) {
+                                               wmove(dialog, box_y, --input_x + box_x);
+                                       } else if (input_x == 0) {
+                                               show_x--;
+                                               wmove(dialog, box_y, box_x);
+                                               for (i = 0; i < box_width; i++) {
+                                                       if (!instr[show_x + i]) {
+                                                               waddch(dialog, ' ');
+                                                               break;
+                                                       }
+                                                       waddch(dialog, instr[show_x + i]);
+                                               }
+                                               wmove(dialog, box_y, box_x);
+                                       }
+                                       pos--;
+                               }
+                               continue;
+                       case KEY_RIGHT:
+                               if (pos < len) {
+                                       if (input_x < box_width - 1) {
+                                               wmove(dialog, box_y, ++input_x + box_x);
+                                       } else if (input_x == box_width - 1) {
+                                               show_x++;
+                                               wmove(dialog, box_y, box_x);
+                                               for (i = 0; i < box_width; i++) {
+                                                       if (!instr[show_x + i]) {
+                                                               waddch(dialog, ' ');
+                                                               break;
+                                                       }
+                                                       waddch(dialog, instr[show_x + i]);
+                                               }
+                                               wmove(dialog, box_y, input_x + box_x);
+                                       }
+                                       pos++;
+                               }
+                               continue;
                        default:
                                if (key < 0x100 && isprint(key)) {
-                                       if (scroll + input_x < MAX_LEN) {
+                                       if (len < MAX_LEN) {
                                                wattrset(dialog, dlg.inputbox.atr);
-                                               instr[scroll + input_x] = key;
-                                               instr[scroll + input_x + 1] = '\0';
+                                               if (pos < len) {
+                                                       for (i = len; i > pos; i--)
+                                                               instr[i] = instr[i-1];
+                                                       instr[pos] = key;
+                                               } else {
+                                                       instr[len] = key;
+                                               }
+                                               pos++;
+                                               len++;
+                                               instr[len] = '\0';
+
                                                if (input_x == box_width - 1) {
-                                                       scroll++;
-                                                       wmove(dialog, box_y, box_x);
-                                                       for (i = 0; i < box_width - 1; i++)
-                                                               waddch(dialog, instr [scroll + i]);
+                                                       show_x++;
                                                } else {
-                                                       wmove(dialog, box_y, input_x++ + box_x);
-                                                       waddch(dialog, key);
+                                                       input_x++;
+                                               }
+
+                                               wmove(dialog, box_y, box_x);
+                                               for (i = 0; i < box_width; i++) {
+                                                       if (!instr[show_x + i]) {
+                                                               waddch(dialog, ' ');
+                                                               break;
+                                                       }
+                                                       waddch(dialog, instr[show_x + i]);
                                                }
+                                               wmove(dialog, box_y, input_x + box_x);
                                                wrefresh(dialog);
                                        } else
                                                flash();        /* Alarm user about overflow */
index 1d60473..48d382e 100644 (file)
@@ -26,7 +26,7 @@
  *
  *    *)  A bugfix for the Page-Down problem
  *
- *    *)  Formerly when I used Page Down and Page Up, the cursor would be set 
+ *    *)  Formerly when I used Page Down and Page Up, the cursor would be set
  *        to the first position in the menu box.  Now lxdialog is a bit
  *        smarter and works more like other menu systems (just have a look at
  *        it).
@@ -154,12 +154,14 @@ static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x,
  */
 static void print_buttons(WINDOW * win, int height, int width, int selected)
 {
-       int x = width / 2 - 16;
+       int x = width / 2 - 28;
        int y = height - 2;
 
        print_button(win, gettext("Select"), y, x, selected == 0);
        print_button(win, gettext(" Exit "), y, x + 12, selected == 1);
        print_button(win, gettext(" Help "), y, x + 24, selected == 2);
+       print_button(win, gettext(" Save "), y, x + 36, selected == 3);
+       print_button(win, gettext(" Load "), y, x + 48, selected == 4);
 
        wmove(win, y, x + 1 + 12 * selected);
        wrefresh(win);
@@ -372,7 +374,7 @@ do_resize:
                case TAB:
                case KEY_RIGHT:
                        button = ((key == KEY_LEFT ? --button : ++button) < 0)
-                           ? 2 : (button > 2 ? 0 : button);
+                           ? 4 : (button > 4 ? 0 : button);
 
                        print_buttons(dialog, height, width, button);
                        wrefresh(menu);
@@ -399,17 +401,17 @@ do_resize:
                                return 2;
                        case 's':
                        case 'y':
-                               return 3;
+                               return 5;
                        case 'n':
-                               return 4;
+                               return 6;
                        case 'm':
-                               return 5;
+                               return 7;
                        case ' ':
-                               return 6;
+                               return 8;
                        case '/':
-                               return 7;
+                               return 9;
                        case 'z':
-                               return 8;
+                               return 10;
                        case '\n':
                                return button;
                        }
index 53975cf..566288a 100644 (file)
@@ -280,6 +280,7 @@ static struct menu *current_menu;
 static int child_count;
 static int single_menu_mode;
 static int show_all_options;
+static int save_and_exit;
 
 static void conf(struct menu *menu, struct menu *active_menu);
 static void conf_choice(struct menu *menu);
@@ -348,15 +349,19 @@ static void search_conf(void)
 {
        struct symbol **sym_arr;
        struct gstr res;
+       struct gstr title;
        char *dialog_input;
        int dres, vscroll = 0, hscroll = 0;
        bool again;
 
+       title = str_new();
+       str_printf( &title, _("Enter %s (sub)string to search for "
+                             "(with or without \"%s\")"), CONFIG_, CONFIG_);
+
 again:
        dialog_clear();
        dres = dialog_inputbox(_("Search Configuration Parameter"),
-                             _("Enter " CONFIG_ " (sub)string to search for "
-                               "(with or without \"" CONFIG_ "\")"),
+                             str_get(&title),
                              10, 75, "");
        switch (dres) {
        case 0:
@@ -365,6 +370,7 @@ again:
                show_helptext(_("Search Configuration"), search_help);
                goto again;
        default:
+               str_free(&title);
                return;
        }
 
@@ -398,6 +404,7 @@ again:
                str_free(&res);
        } while (again);
        free(sym_arr);
+       str_free(&title);
 }
 
 static void build_conf(struct menu *menu)
@@ -592,14 +599,6 @@ static void conf(struct menu *menu, struct menu *active_menu)
                build_conf(menu);
                if (!child_count)
                        break;
-               if (menu == &rootmenu) {
-                       item_make("--- ");
-                       item_set_tag(':');
-                       item_make(_("    Load an Alternate Configuration File"));
-                       item_set_tag('L');
-                       item_make(_("    Save an Alternate Configuration File"));
-                       item_set_tag('S');
-               }
                dialog_clear();
                res = dialog_menu(prompt ? _(prompt) : _("Main Menu"),
                                  _(menu_instructions),
@@ -636,12 +635,6 @@ static void conf(struct menu *menu, struct menu *active_menu)
                        case 's':
                                conf_string(submenu);
                                break;
-                       case 'L':
-                               conf_load();
-                               break;
-                       case 'S':
-                               conf_save();
-                               break;
                        }
                        break;
                case 2:
@@ -651,6 +644,12 @@ static void conf(struct menu *menu, struct menu *active_menu)
                                show_helptext(_("README"), _(mconf_readme));
                        break;
                case 3:
+                       conf_save();
+                       break;
+               case 4:
+                       conf_load();
+                       break;
+               case 5:
                        if (item_is_tag('t')) {
                                if (sym_set_tristate_value(sym, yes))
                                        break;
@@ -658,24 +657,24 @@ static void conf(struct menu *menu, struct menu *active_menu)
                                        show_textbox(NULL, setmod_text, 6, 74);
                        }
                        break;
-               case 4:
+               case 6:
                        if (item_is_tag('t'))
                                sym_set_tristate_value(sym, no);
                        break;
-               case 5:
+               case 7:
                        if (item_is_tag('t'))
                                sym_set_tristate_value(sym, mod);
                        break;
-               case 6:
+               case 8:
                        if (item_is_tag('t'))
                                sym_toggle_tristate_value(sym);
                        else if (item_is_tag('m'))
                                conf(submenu, NULL);
                        break;
-               case 7:
+               case 9:
                        search_conf();
                        break;
-               case 8:
+               case 10:
                        show_all_options = !show_all_options;
                        break;
                }
@@ -702,6 +701,17 @@ static void show_helptext(const char *title, const char *text)
        show_textbox(title, text, 0, 0);
 }
 
+static void conf_message_callback(const char *fmt, va_list ap)
+{
+       char buf[PATH_MAX+1];
+
+       vsnprintf(buf, sizeof(buf), fmt, ap);
+       if (save_and_exit)
+               printf("%s", buf);
+       else
+               show_textbox(NULL, buf, 6, 60);
+}
+
 static void show_help(struct menu *menu)
 {
        struct gstr help = str_new();
@@ -870,6 +880,7 @@ static int handle_exit(void)
 {
        int res;
 
+       save_and_exit = 1;
        dialog_clear();
        if (conf_get_changed())
                res = dialog_yesno(NULL,
@@ -941,6 +952,7 @@ int main(int ac, char **av)
        }
 
        set_config_filename(conf_get_configname());
+       conf_set_message_callback(conf_message_callback);
        do {
                conf(&rootmenu, NULL);
                res = handle_exit();
index e98a05c..f3bffa3 100644 (file)
@@ -48,7 +48,7 @@ void menu_add_entry(struct symbol *sym)
 {
        struct menu *menu;
 
-       menu = malloc(sizeof(*menu));
+       menu = xmalloc(sizeof(*menu));
        memset(menu, 0, sizeof(*menu));
        menu->sym = sym;
        menu->parent = current_menu;
@@ -531,7 +531,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
                        location = menu;
        }
        if (head && location) {
-               jump = malloc(sizeof(struct jump_key));
+               jump = xmalloc(sizeof(struct jump_key));
 
                if (menu_is_visible(prop->menu)) {
                        /*
index 974d5cb..05274fc 100755 (executable)
@@ -32,11 +32,13 @@ usage() {
        echo "  -m    only merge the fragments, do not execute the make command"
        echo "  -n    use allnoconfig instead of alldefconfig"
        echo "  -r    list redundant entries when merging fragments"
+       echo "  -O    dir to put generated output files"
 }
 
 MAKE=true
 ALLTARGET=alldefconfig
 WARNREDUN=false
+OUTPUT=.
 
 while true; do
        case $1 in
@@ -59,6 +61,16 @@ while true; do
                shift
                continue
                ;;
+       "-O")
+               if [ -d $2 ];then
+                       OUTPUT=$(echo $2 | sed 's/\/*$//')
+               else
+                       echo "output directory $2 does not exist" 1>&2
+                       exit 1
+               fi
+               shift 2
+               continue
+               ;;
        *)
                break
                ;;
@@ -100,9 +112,9 @@ for MERGE_FILE in $MERGE_LIST ; do
 done
 
 if [ "$MAKE" = "false" ]; then
-       cp $TMP_FILE .config
+       cp $TMP_FILE $OUTPUT/.config
        echo "#"
-       echo "# merged configuration written to .config (needs make)"
+       echo "# merged configuration written to $OUTPUT/.config (needs make)"
        echo "#"
        clean_up
        exit
@@ -111,14 +123,14 @@ fi
 # Use the merged file as the starting point for:
 # alldefconfig: Fills in any missing symbols with Kconfig default
 # allnoconfig: Fills in any missing symbols with # CONFIG_* is not set
-make KCONFIG_ALLCONFIG=$TMP_FILE $ALLTARGET
+make KCONFIG_ALLCONFIG=$TMP_FILE O=$OUTPUT $ALLTARGET
 
 
 # Check all specified config values took (might have missed-dependency issues)
 for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
 
        REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
-       ACTUAL_VAL=$(grep -w -e "$CFG" .config)
+       ACTUAL_VAL=$(grep -w -e "$CFG" $OUTPUT/.config)
        if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then
                echo "Value requested for $CFG not in final .config"
                echo "Requested value:  $REQUESTED_VAL"
index 87d4b15..dbf31ed 100644 (file)
  */
 #define _GNU_SOURCE
 #include <string.h>
+#include <stdlib.h>
 
 #include "lkc.h"
 #include "nconf.h"
 #include <ctype.h>
 
-static const char nconf_readme[] = N_(
-"Overview\n"
-"--------\n"
-"This interface let you select features and parameters for the build.\n"
-"Features can either be built-in, modularized, or ignored. Parameters\n"
-"must be entered in as decimal or hexadecimal numbers or text.\n"
+static const char nconf_global_help[] = N_(
+"Help windows\n"
+"------------\n"
+"o  Global help:  Unless in a data entry window, pressing <F1> will give \n"
+"   you the global help window, which you are just reading.\n"
 "\n"
-"Menu items beginning with following braces represent features that\n"
-"  [ ] can be built in or removed\n"
-"  < > can be built in, modularized or removed\n"
-"  { } can be built in or modularized (selected by other feature)\n"
-"  - - are selected by other feature,\n"
-"  XXX cannot be selected. Use Symbol Info to find out why,\n"
-"while *, M or whitespace inside braces means to build in, build as\n"
-"a module or to exclude the feature respectively.\n"
+"o  A short version of the global help is available by pressing <F3>.\n"
 "\n"
-"To change any of these features, highlight it with the cursor\n"
-"keys and press <Y> to build it in, <M> to make it a module or\n"
-"<N> to removed it.  You may also press the <Space Bar> to cycle\n"
-"through the available options (ie. Y->N->M->Y).\n"
+"o  Local help:  To get help related to the current menu entry, use any\n"
+"   of <?> <h>, or if in a data entry window then press <F1>.\n"
 "\n"
-"Some additional keyboard hints:\n"
 "\n"
-"Menus\n"
-"----------\n"
-"o  Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
-"   you wish to change use <Enter> or <Space>. Goto submenu by \n"
-"   pressing <Enter> of <right-arrow>. Use <Esc> or <left-arrow> to go back.\n"
-"   Submenus are designated by \"--->\".\n"
-"\n"
-"   Searching: pressing '/' triggers interactive search mode.\n"
-"              nconfig performs a case insensitive search for the string\n"
-"              in the menu prompts (no regex support).\n"
-"              Pressing the up/down keys highlights the previous/next\n"
-"              matching item. Backspace removes one character from the\n"
-"              match string. Pressing either '/' again or ESC exits\n"
-"              search mode. All other keys behave normally.\n"
+"Menu entries\n"
+"------------\n"
+"This interface lets you select features and parameters for the kernel\n"
+"build.  Kernel features can either be built-in, modularized, or removed.\n"
+"Parameters must be entered as text or decimal or hexadecimal numbers.\n"
 "\n"
-"   You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
-"   unseen options into view.\n"
+"Menu entries beginning with following braces represent features that\n"
+"  [ ]  can be built in or removed\n"
+"  < >  can be built in, modularized or removed\n"
+"  { }  can be built in or modularized, are selected by another feature\n"
+"  - -  are selected by another feature\n"
+"  XXX  cannot be selected.  Symbol Info <F2> tells you why.\n"
+"*, M or whitespace inside braces means to build in, build as a module\n"
+"or to exclude the feature respectively.\n"
 "\n"
-"o  To exit a menu use the just press <ESC> <F5> <F8> or <left-arrow>.\n"
+"To change any of these features, highlight it with the movement keys\n"
+"listed below and press <y> to build it in, <m> to make it a module or\n"
+"<n> to remove it.  You may press the <Space> key to cycle through the\n"
+"available options.\n"
 "\n"
-"o  To get help with an item, press <F1>\n"
-"   Shortcut: Press <h> or <?>.\n"
+"A trailing \"--->\" designates a submenu.\n"
 "\n"
 "\n"
-"Radiolists  (Choice lists)\n"
-"-----------\n"
-"o  Use the cursor keys to select the option you wish to set and press\n"
-"   <S> or the <SPACE BAR>.\n"
+"Menu navigation keys\n"
+"----------------------------------------------------------------------\n"
+"Linewise up                 <Up>\n"
+"Linewise down               <Down>\n"
+"Pagewise up                 <Page Up>\n"
+"Pagewise down               <Page Down>\n"
+"First entry                 <Home>\n"
+"Last entry                  <End>\n"
+"Enter a submenu             <Right>  <Enter>\n"
+"Go back to parent menu      <Left>   <Esc>  <F5>\n"
+"Close a help window         <Enter>  <Esc>  <F5>\n"
+"Close entry window, apply   <Enter>\n"
+"Close entry window, forget  <Esc>  <F5>\n"
+"Start incremental, case-insensitive search for STRING in menu entries,\n"
+"    no regex support, STRING is displayed in upper left corner\n"
+"                            </>STRING\n"
+"    Remove last character   <Backspace>\n"
+"    Jump to next hit        <Down>\n"
+"    Jump to previous hit    <Up>\n"
+"Exit menu search mode       </>  <Esc>\n"
+"Search for configuration variables with or without leading CONFIG_\n"
+"                            <F8>RegExpr<Enter>\n"
+"Verbose search help         <F8><F1>\n"
+"----------------------------------------------------------------------\n"
 "\n"
-"   Shortcut: Press the first letter of the option you wish to set then\n"
-"             press <S> or <SPACE BAR>.\n"
+"Unless in a data entry window, key <1> may be used instead of <F1>,\n"
+"<2> instead of <F2>, etc.\n"
 "\n"
-"o  To see available help for the item, press <F1>\n"
-"   Shortcut: Press <H> or <?>.\n"
 "\n"
+"Radiolist (Choice list)\n"
+"-----------------------\n"
+"Use the movement keys listed above to select the option you wish to set\n"
+"and press <Space>.\n"
 "\n"
-"Data Entry\n"
-"-----------\n"
-"o  Enter the requested information and press <ENTER>\n"
-"   If you are entering hexadecimal values, it is not necessary to\n"
-"   add the '0x' prefix to the entry.\n"
 "\n"
-"o  For help, press <F1>.\n"
+"Data entry\n"
+"----------\n"
+"Enter the requested information and press <Enter>.  Hexadecimal values\n"
+"may be entered without the \"0x\" prefix.\n"
 "\n"
 "\n"
-"Text Box    (Help Window)\n"
-"--------\n"
-"o  Use the cursor keys to scroll up/down/left/right.  The VI editor\n"
-"   keys h,j,k,l function here as do <u>, <d> and <SPACE BAR> for\n"
-"   those who are familiar with less and lynx.\n"
+"Text Box (Help Window)\n"
+"----------------------\n"
+"Use movement keys as listed in table above.\n"
 "\n"
-"o  Press <Enter>, <F1>, <F5>, <F9>, <q> or <Esc> to exit.\n"
+"Press any of <Enter> <Esc> <q> <F5> <F9> to exit.\n"
 "\n"
 "\n"
-"Alternate Configuration Files\n"
+"Alternate configuration files\n"
 "-----------------------------\n"
-"nconfig supports the use of alternate configuration files for\n"
-"those who, for various reasons, find it necessary to switch\n"
-"between different configurations.\n"
+"nconfig supports switching between different configurations.\n"
+"Press <F6> to save your current configuration.  Press <F7> and enter\n"
+"a file name to load a previously saved configuration.\n"
 "\n"
-"At the end of the main menu you will find two options.  One is\n"
-"for saving the current configuration to a file of your choosing.\n"
-"The other option is for loading a previously saved alternate\n"
-"configuration.\n"
 "\n"
-"Even if you don't use alternate configuration files, but you\n"
-"find during a nconfig session that you have completely messed\n"
-"up your settings, you may use the \"Load Alternate...\" option to\n"
-"restore your previously saved settings from \".config\" without\n"
-"restarting nconfig.\n"
+"Terminal configuration\n"
+"----------------------\n"
+"If you use nconfig in a xterm window, make sure your TERM environment\n"
+"variable specifies a terminal configuration which supports at least\n"
+"16 colors.  Otherwise nconfig will look rather bad.\n"
 "\n"
-"Other information\n"
-"-----------------\n"
-"If you use nconfig in an XTERM window make sure you have your\n"
-"$TERM variable set to point to a xterm definition which supports color.\n"
-"Otherwise, nconfig will look rather bad.  nconfig will not\n"
-"display correctly in a RXVT window because rxvt displays only one\n"
-"intensity of color, bright.\n"
+"If the \"stty size\" command reports the current terminalsize correctly,\n"
+"nconfig will adapt to sizes larger than the traditional 80x25 \"standard\"\n"
+"and display longer menus properly.\n"
 "\n"
-"nconfig will display larger menus on screens or xterms which are\n"
-"set to display more than the standard 25 row by 80 column geometry.\n"
-"In order for this to work, the \"stty size\" command must be able to\n"
-"display the screen's current row and column geometry.  I STRONGLY\n"
-"RECOMMEND that you make sure you do NOT have the shell variables\n"
-"LINES and COLUMNS exported into your environment.  Some distributions\n"
-"export those variables via /etc/profile.  Some ncurses programs can\n"
-"become confused when those variables (LINES & COLUMNS) don't reflect\n"
-"the true screen size.\n"
 "\n"
-"Optional personality available\n"
-"------------------------------\n"
-"If you prefer to have all of the options listed in a single menu, rather\n"
-"than the default multimenu hierarchy, run the nconfig with NCONFIG_MODE\n"
-"environment variable set to single_menu. Example:\n"
+"Single menu mode\n"
+"----------------\n"
+"If you prefer to have all of the menu entries listed in a single menu,\n"
+"rather than the default multimenu hierarchy, run nconfig with\n"
+"NCONFIG_MODE environment variable set to single_menu.  Example:\n"
 "\n"
 "make NCONFIG_MODE=single_menu nconfig\n"
 "\n"
-"<Enter> will then unroll the appropriate category, or enfold it if it\n"
-"is already unrolled.\n"
+"<Enter> will then unfold the appropriate category, or fold it if it\n"
+"is already unfolded.  Folded menu entries will be designated by a\n"
+"leading \"++>\" and unfolded entries by a leading \"-->\".\n"
 "\n"
-"Note that this mode can eventually be a little more CPU expensive\n"
-"(especially with a larger number of unrolled categories) than the\n"
-"default mode.\n"
+"Note that this mode can eventually be a little more CPU expensive than\n"
+"the default mode, especially with a larger number of unfolded submenus.\n"
 "\n"),
 menu_no_f_instructions[] = N_(
-" You do not have function keys support. Please follow the\n"
-" following instructions:\n"
-" Arrow keys navigate the menu.\n"
-" <Enter> or <right-arrow> selects submenus --->.\n"
-" Capital Letters are hotkeys.\n"
-" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
-" Pressing SpaceBar toggles between the above options.\n"
-" Press <Esc> or <left-arrow> to go back one menu,\n"
-" <?> or <h> for Help, </> for Search.\n"
-" <1> is interchangeable with <F1>, <2> with <F2>, etc.\n"
-" Legend: [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
-" <Esc> always leaves the current window.\n"),
+"Legend:  [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
+"Submenus are designated by a trailing \"--->\".\n"
+"\n"
+"Use the following keys to navigate the menus:\n"
+"Move up or down with <Up> and <Down>.\n"
+"Enter a submenu with <Enter> or <Right>.\n"
+"Exit a submenu to its parent menu with <Esc> or <Left>.\n"
+"Pressing <y> includes, <n> excludes, <m> modularizes features.\n"
+"Pressing <Space> cycles through the available options.\n"
+"To search for menu entries press </>.\n"
+"<Esc> always leaves the current window.\n"
+"\n"
+"You do not have function keys support.\n"
+"Press <1> instead of <F1>, <2> instead of <F2>, etc.\n"
+"For verbose global help use key <1>.\n"
+"For help related to the current menu entry press <?> or <h>.\n"),
 menu_instructions[] = N_(
-" Arrow keys navigate the menu.\n"
-" <Enter> or <right-arrow> selects submenus --->.\n"
-" Capital Letters are hotkeys.\n"
-" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
-" Pressing SpaceBar toggles between the above options\n"
-" Press <Esc>, <F5> or <left-arrow> to go back one menu,\n"
-" <?>, <F1> or <h> for Help, </> for Search.\n"
-" <1> is interchangeable with <F1>, <2> with <F2>, etc.\n"
-" Legend: [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
-" <Esc> always leaves the current window\n"),
+"Legend:  [*] built-in  [ ] excluded  <M> module  < > module capable.\n"
+"Submenus are designated by a trailing \"--->\".\n"
+"\n"
+"Use the following keys to navigate the menus:\n"
+"Move up or down with <Up> or <Down>.\n"
+"Enter a submenu with <Enter> or <Right>.\n"
+"Exit a submenu to its parent menu with <Esc> or <Left>.\n"
+"Pressing <y> includes, <n> excludes, <m> modularizes features.\n"
+"Pressing <Space> cycles through the available options.\n"
+"To search for menu entries press </>.\n"
+"<Esc> always leaves the current window.\n"
+"\n"
+"Pressing <1> may be used instead of <F1>, <2> instead of <F2>, etc.\n"
+"For verbose global help press <F1>.\n"
+"For help related to the current menu entry press <?> or <h>.\n"),
 radiolist_instructions[] = N_(
-" Use the arrow keys to navigate this window or\n"
-" press the hotkey of the item you wish to select\n"
-" followed by the <SPACE BAR>.\n"
-" Press <?>, <F1> or <h> for additional information about this option.\n"),
+"Press <Up>, <Down>, <Home> or <End> to navigate a radiolist, select\n"
+"with <Space>.\n"
+"For help related to the current entry press <?> or <h>.\n"
+"For global help press <F1>.\n"),
 inputbox_instructions_int[] = N_(
 "Please enter a decimal value.\n"
 "Fractions will not be accepted.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
 inputbox_instructions_hex[] = N_(
 "Please enter a hexadecimal value.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
 inputbox_instructions_string[] = N_(
 "Please enter a string value.\n"
-"Press <RETURN> to accept, <ESC> to cancel."),
+"Press <Enter> to apply, <Esc> to cancel."),
 setmod_text[] = N_(
-"This feature depends on another which\n"
-"has been configured as a module.\n"
-"As a result, this feature will be built as a module."),
+"This feature depends on another feature which has been configured as a\n"
+"module.  As a result, the current feature will be built as a module too."),
 load_config_text[] = N_(
 "Enter the name of the configuration file you wish to load.\n"
-"Accept the name shown to restore the configuration you\n"
-"last retrieved.  Leave blank to abort."),
+"Accept the name shown to restore the configuration you last\n"
+"retrieved.  Leave empty to abort."),
 load_config_help[] = N_(
-"\n"
 "For various reasons, one may wish to keep several different\n"
 "configurations available on a single machine.\n"
 "\n"
 "If you have saved a previous configuration in a file other than the\n"
-"default one, entering its name here will allow you to modify that\n"
-"configuration.\n"
+"default one, entering its name here will allow you to load and modify\n"
+"that configuration.\n"
 "\n"
-"If you are uncertain, then you have probably never used alternate\n"
-"configuration files.  You should therefor leave this blank to abort.\n"),
+"Leave empty to abort.\n"),
 save_config_text[] = N_(
 "Enter a filename to which this configuration should be saved\n"
-"as an alternate.  Leave blank to abort."),
+"as an alternate.  Leave empty to abort."),
 save_config_help[] = N_(
-"\n"
-"For various reasons, one may wish to keep different configurations\n"
-"available on a single machine.\n"
+"For various reasons, one may wish to keep several different\n"
+"configurations available on a single machine.\n"
 "\n"
 "Entering a file name here will allow you to later retrieve, modify\n"
 "and use the current configuration as an alternate to whatever\n"
 "configuration options you have selected at that time.\n"
 "\n"
-"If you are uncertain what all this means then you should probably\n"
-"leave this blank.\n"),
+"Leave empty to abort.\n"),
 search_help[] = N_(
-"\n"
-"Search for symbols and display their relations. Regular expressions\n"
-"are allowed.\n"
-"Example: search for \"^FOO\"\n"
+"Search for symbols (configuration variable names CONFIG_*) and display\n"
+"their relations.  Regular expressions are supported.\n"
+"Example:  Search for \"^FOO\".\n"
 "Result:\n"
 "-----------------------------------------------------------------\n"
 "Symbol: FOO [ = m]\n"
@@ -229,26 +222,26 @@ search_help[] = N_(
 "Selects: LIBCRC32\n"
 "Selected by: BAR\n"
 "-----------------------------------------------------------------\n"
-"o The line 'Prompt:' shows the text used in the menu structure for\n"
-"  this symbol\n"
-"o The 'Defined at' line tell at what file / line number the symbol\n"
-"  is defined\n"
-"o The 'Depends on:' line tell what symbols needs to be defined for\n"
-"  this symbol to be visible in the menu (selectable)\n"
-"o The 'Location:' lines tell where in the menu structure this symbol\n"
-"  is located\n"
-"    A location followed by a [ = y] indicate that this is a selectable\n"
-"    menu item - and current value is displayed inside brackets.\n"
-"o The 'Selects:' line tell what symbol will be automatically\n"
-"  selected if this symbol is selected (y or m)\n"
-"o The 'Selected by' line tell what symbol has selected this symbol\n"
+"o  The line 'Prompt:' shows the text displayed for this symbol in\n"
+"   the menu hierarchy.\n"
+"o  The 'Defined at' line tells at what file / line number the symbol is\n"
+"   defined.\n"
+"o  The 'Depends on:' line lists symbols that need to be defined for\n"
+"   this symbol to be visible and selectable in the menu.\n"
+"o  The 'Location:' lines tell, where in the menu structure this symbol\n"
+"   is located.  A location followed by a [ = y] indicates that this is\n"
+"   a selectable menu item, and the current value is displayed inside\n"
+"   brackets.\n"
+"o  The 'Selects:' line tells, what symbol will be automatically selected\n"
+"   if this symbol is selected (y or m).\n"
+"o  The 'Selected by' line tells what symbol has selected this symbol.\n"
 "\n"
 "Only relevant lines are shown.\n"
 "\n\n"
 "Search examples:\n"
-"Examples: USB  => find all symbols containing USB\n"
-"          ^USB => find all symbols starting with USB\n"
-"          USB$ => find all symbols ending with USB\n"
+"USB  => find all symbols containing USB\n"
+"^USB => find all symbols starting with USB\n"
+"USB$ => find all symbols ending with USB\n"
 "\n");
 
 struct mitem {
@@ -319,19 +312,19 @@ struct function_keys function_keys[] = {
        },
        {
                .key_str = "F2",
-               .func = "Sym Info",
+               .func = "SymInfo",
                .key = F_SYMBOL,
                .handler = handle_f2,
        },
        {
                .key_str = "F3",
-               .func = "Insts",
+               .func = "Help 2",
                .key = F_INSTS,
                .handler = handle_f3,
        },
        {
                .key_str = "F4",
-               .func = "Config",
+               .func = "ShowAll",
                .key = F_CONF,
                .handler = handle_f4,
        },
@@ -355,7 +348,7 @@ struct function_keys function_keys[] = {
        },
        {
                .key_str = "F8",
-               .func = "Sym Search",
+               .func = "SymSearch",
                .key = F_SEARCH,
                .handler = handle_f8,
        },
@@ -392,7 +385,7 @@ static void print_function_line(void)
 static void handle_f1(int *key, struct menu *current_item)
 {
        show_scroll_win(main_window,
-                       _("README"), _(nconf_readme));
+                       _("Global help"), _(nconf_global_help));
        return;
 }
 
@@ -407,7 +400,7 @@ static void handle_f2(int *key, struct menu *current_item)
 static void handle_f3(int *key, struct menu *current_item)
 {
        show_scroll_win(main_window,
-                       _("Instructions"),
+                       _("Short help"),
                        _(current_instructions));
        return;
 }
@@ -696,13 +689,18 @@ static void search_conf(void)
 {
        struct symbol **sym_arr;
        struct gstr res;
+       struct gstr title;
        char *dialog_input;
        int dres;
+
+       title = str_new();
+       str_printf( &title, _("Enter %s (sub)string to search for "
+                             "(with or without \"%s\")"), CONFIG_, CONFIG_);
+
 again:
        dres = dialog_inputbox(main_window,
                        _("Search Configuration Parameter"),
-                       _("Enter " CONFIG_ " (sub)string to search for "
-                               "(with or without \"" CONFIG_ "\")"),
+                       str_get(&title),
                        "", &dialog_input_result, &dialog_input_result_len);
        switch (dres) {
        case 0:
@@ -712,6 +710,7 @@ again:
                                _("Search Configuration"), search_help);
                goto again;
        default:
+               str_free(&title);
                return;
        }
 
@@ -726,6 +725,7 @@ again:
        show_scroll_win(main_window,
                        _("Search Results"), str_get(&res));
        str_free(&res);
+       str_free(&title);
 }
 
 
index 379003c..9f8c44e 100644 (file)
@@ -48,7 +48,7 @@ static void set_normal_colors(void)
        init_pair(INPUT_FIELD, -1, -1);
 
        init_pair(FUNCTION_HIGHLIGHT, -1, -1);
-       init_pair(FUNCTION_TEXT, COLOR_BLUE, -1);
+       init_pair(FUNCTION_TEXT, COLOR_YELLOW, -1);
 }
 
 /* available attributes:
index df274fe..1500c38 100644 (file)
@@ -6,6 +6,7 @@
 #include <qglobal.h>
 
 #if QT_VERSION < 0x040000
+#include <stddef.h>
 #include <qmainwindow.h>
 #include <qvbox.h>
 #include <qvaluelist.h>
index 22a3c40..ecc5aa5 100644 (file)
@@ -656,11 +656,11 @@ bool sym_set_string_value(struct symbol *sym, const char *newval)
        size = strlen(newval) + 1;
        if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) {
                size += 2;
-               sym->def[S_DEF_USER].val = val = malloc(size);
+               sym->def[S_DEF_USER].val = val = xmalloc(size);
                *val++ = '0';
                *val++ = 'x';
        } else if (!oldval || strcmp(oldval, newval))
-               sym->def[S_DEF_USER].val = val = malloc(size);
+               sym->def[S_DEF_USER].val = val = xmalloc(size);
        else
                return true;
 
@@ -812,7 +812,7 @@ struct symbol *sym_lookup(const char *name, int flags)
                hash = 0;
        }
 
-       symbol = malloc(sizeof(*symbol));
+       symbol = xmalloc(sizeof(*symbol));
        memset(symbol, 0, sizeof(*symbol));
        symbol->name = new_name;
        symbol->type = S_UNKNOWN;
@@ -863,7 +863,7 @@ const char *sym_expand_string_value(const char *in)
        size_t reslen;
 
        reslen = strlen(in) + 1;
-       res = malloc(reslen);
+       res = xmalloc(reslen);
        res[0] = '\0';
 
        while ((src = strchr(in, '$'))) {
@@ -921,7 +921,7 @@ const char *sym_escape_string_value(const char *in)
                p++;
        }
 
-       res = malloc(reslen);
+       res = xmalloc(reslen);
        res[0] = '\0';
 
        strcat(res, "\"");
@@ -1228,7 +1228,7 @@ struct property *prop_alloc(enum prop_type type, struct symbol *sym)
        struct property *prop;
        struct property **propp;
 
-       prop = malloc(sizeof(*prop));
+       prop = xmalloc(sizeof(*prop));
        memset(prop, 0, sizeof(*prop));
        prop->type = type;
        prop->sym = sym;
index d0b8b23..6e7fbf1 100644 (file)
@@ -23,7 +23,7 @@ struct file *file_lookup(const char *name)
                }
        }
 
-       file = malloc(sizeof(*file));
+       file = xmalloc(sizeof(*file));
        memset(file, 0, sizeof(*file));
        file->name = file_name;
        file->next = file_list;
@@ -81,7 +81,7 @@ int file_write_dep(const char *name)
 struct gstr str_new(void)
 {
        struct gstr gs;
-       gs.s = malloc(sizeof(char) * 64);
+       gs.s = xmalloc(sizeof(char) * 64);
        gs.len = 64;
        gs.max_width = 0;
        strcpy(gs.s, "\0");
@@ -138,3 +138,22 @@ const char *str_get(struct gstr *gs)
        return gs->s;
 }
 
+void *xmalloc(size_t size)
+{
+       void *p = malloc(size);
+       if (p)
+               return p;
+       fprintf(stderr, "Out of memory.\n");
+       exit(1);
+}
+
+void *xcalloc(size_t nmemb, size_t size)
+{
+       void *p = calloc(nmemb, size);
+       if (p)
+               return p;
+       fprintf(stderr, "Out of memory.\n");
+       exit(1);
+}
+
+
index 00f9d3a..6555a47 100644 (file)
@@ -40,7 +40,7 @@ static void zconf_endfile(void);
 
 static void new_string(void)
 {
-       text = malloc(START_STRSIZE);
+       text = xmalloc(START_STRSIZE);
        text_asize = START_STRSIZE;
        text_size = 0;
        *text = 0;
@@ -62,7 +62,7 @@ static void append_string(const char *str, int size)
 
 static void alloc_string(const char *str, int size)
 {
-       text = malloc(size + 1);
+       text = xmalloc(size + 1);
        memcpy(text, str, size);
        text[size] = 0;
 }
@@ -288,7 +288,7 @@ void zconf_initscan(const char *name)
                exit(1);
        }
 
-       current_buf = malloc(sizeof(*current_buf));
+       current_buf = xmalloc(sizeof(*current_buf));
        memset(current_buf, 0, sizeof(*current_buf));
 
        current_file = file_lookup(name);
@@ -299,7 +299,7 @@ void zconf_nextfile(const char *name)
 {
        struct file *iter;
        struct file *file = file_lookup(name);
-       struct buffer *buf = malloc(sizeof(*buf));
+       struct buffer *buf = xmalloc(sizeof(*buf));
        memset(buf, 0, sizeof(*buf));
 
        current_buf->state = YY_CURRENT_BUFFER;
index c32b1a4..a0521aa 100644 (file)
@@ -802,7 +802,7 @@ static void zconf_endfile(void);
 
 static void new_string(void)
 {
-       text = malloc(START_STRSIZE);
+       text = xmalloc(START_STRSIZE);
        text_asize = START_STRSIZE;
        text_size = 0;
        *text = 0;
@@ -824,7 +824,7 @@ static void append_string(const char *str, int size)
 
 static void alloc_string(const char *str, int size)
 {
-       text = malloc(size + 1);
+       text = xmalloc(size + 1);
        memcpy(text, str, size);
        text[size] = 0;
 }
@@ -2343,7 +2343,7 @@ void zconf_initscan(const char *name)
                exit(1);
        }
 
-       current_buf = malloc(sizeof(*current_buf));
+       current_buf = xmalloc(sizeof(*current_buf));
        memset(current_buf, 0, sizeof(*current_buf));
 
        current_file = file_lookup(name);
@@ -2354,7 +2354,7 @@ void zconf_nextfile(const char *name)
 {
        struct file *iter;
        struct file *file = file_lookup(name);
-       struct buffer *buf = malloc(sizeof(*buf));
+       struct buffer *buf = xmalloc(sizeof(*buf));
        memset(buf, 0, sizeof(*buf));
 
        current_buf->state = YY_CURRENT_BUFFER;
index f565536..4305b2f 100755 (executable)
@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
        # strip kmemcheck_bitfield_{begin,end}.*;
        $members =~ s/kmemcheck_bitfield_.*?;//gos;
        # strip attributes
-       $members =~ s/__aligned\s*\(\d+\)//gos;
+       $members =~ s/__aligned\s*\(.+\)//gos;
 
        create_parameterlist($members, ';', $file);
        check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
index b3d907e..3d569d6 100644 (file)
@@ -132,7 +132,14 @@ if [ "$1" = "clean" ]; then
 fi
 
 # We need access to CONFIG_ symbols
-. ./.config
+case "${KCONFIG_CONFIG}" in
+*/*)
+       . "${KCONFIG_CONFIG}"
+       ;;
+*)
+       # Force using a file from the current directory
+       . "./${KCONFIG_CONFIG}"
+esac
 
 #link vmlinux.o
 info LD vmlinux.o
index e9b7abe..33bae0d 100644 (file)
@@ -1,4 +1,5 @@
 elfconfig.h
 mk_elfconfig
 modpost
+devicetable-offsets.h
 
index ff954f8..9415b56 100644 (file)
@@ -3,9 +3,44 @@ always         := $(hostprogs-y) empty.o
 
 modpost-objs   := modpost.o file2alias.o sumversion.o
 
+devicetable-offsets-file := devicetable-offsets.h
+
+define sed-y
+       "/^->/{s:->#\(.*\):/* \1 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:->::; p;}"
+endef
+
+quiet_cmd_offsets = GEN     $@
+define cmd_offsets
+       (set -e; \
+        echo "#ifndef __DEVICEVTABLE_OFFSETS_H__"; \
+        echo "#define __DEVICEVTABLE_OFFSETS_H__"; \
+        echo "/*"; \
+        echo " * DO NOT MODIFY."; \
+        echo " *"; \
+        echo " * This file was generated by Kbuild"; \
+        echo " *"; \
+        echo " */"; \
+        echo ""; \
+        sed -ne $(sed-y) $<; \
+        echo ""; \
+        echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+scripts/mod/devicetable-offsets.s: scripts/mod/devicetable-offsets.c FORCE
+       $(Q)mkdir -p $(dir $@)
+       $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(devicetable-offsets-file): scripts/mod/devicetable-offsets.s
+       $(call cmd,offsets)
+
 # dependencies on generated files need to be listed explicitly
 
 $(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
+$(obj)/file2alias.o: $(obj)/$(devicetable-offsets-file)
 
 quiet_cmd_elfconfig = MKELF   $@
       cmd_elfconfig = $(obj)/mk_elfconfig < $< > $@
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
new file mode 100644 (file)
index 0000000..b45260b
--- /dev/null
@@ -0,0 +1,178 @@
+#include <linux/kbuild.h>
+#include <linux/mod_devicetable.h>
+
+#define DEVID(devid) DEFINE(SIZE_##devid, sizeof(struct devid))
+#define DEVID_FIELD(devid, field) \
+       DEFINE(OFF_##devid##_##field, offsetof(struct devid, field))
+
+int main(void)
+{
+       DEVID(usb_device_id);
+       DEVID_FIELD(usb_device_id, match_flags);
+       DEVID_FIELD(usb_device_id, idVendor);
+       DEVID_FIELD(usb_device_id, idProduct);
+       DEVID_FIELD(usb_device_id, bcdDevice_lo);
+       DEVID_FIELD(usb_device_id, bcdDevice_hi);
+       DEVID_FIELD(usb_device_id, bDeviceClass);
+       DEVID_FIELD(usb_device_id, bDeviceSubClass);
+       DEVID_FIELD(usb_device_id, bDeviceProtocol);
+       DEVID_FIELD(usb_device_id, bInterfaceClass);
+       DEVID_FIELD(usb_device_id, bInterfaceSubClass);
+       DEVID_FIELD(usb_device_id, bInterfaceProtocol);
+       DEVID_FIELD(usb_device_id, bInterfaceNumber);
+
+       DEVID(hid_device_id);
+       DEVID_FIELD(hid_device_id, bus);
+       DEVID_FIELD(hid_device_id, group);
+       DEVID_FIELD(hid_device_id, vendor);
+       DEVID_FIELD(hid_device_id, product);
+
+       DEVID(ieee1394_device_id);
+       DEVID_FIELD(ieee1394_device_id, match_flags);
+       DEVID_FIELD(ieee1394_device_id, vendor_id);
+       DEVID_FIELD(ieee1394_device_id, model_id);
+       DEVID_FIELD(ieee1394_device_id, specifier_id);
+       DEVID_FIELD(ieee1394_device_id, version);
+
+       DEVID(pci_device_id);
+       DEVID_FIELD(pci_device_id, vendor);
+       DEVID_FIELD(pci_device_id, device);
+       DEVID_FIELD(pci_device_id, subvendor);
+       DEVID_FIELD(pci_device_id, subdevice);
+       DEVID_FIELD(pci_device_id, class);
+       DEVID_FIELD(pci_device_id, class_mask);
+
+       DEVID(ccw_device_id);
+       DEVID_FIELD(ccw_device_id, match_flags);
+       DEVID_FIELD(ccw_device_id, cu_type);
+       DEVID_FIELD(ccw_device_id, cu_model);
+       DEVID_FIELD(ccw_device_id, dev_type);
+       DEVID_FIELD(ccw_device_id, dev_model);
+
+       DEVID(ap_device_id);
+       DEVID_FIELD(ap_device_id, dev_type);
+
+       DEVID(css_device_id);
+       DEVID_FIELD(css_device_id, type);
+
+       DEVID(serio_device_id);
+       DEVID_FIELD(serio_device_id, type);
+       DEVID_FIELD(serio_device_id, proto);
+       DEVID_FIELD(serio_device_id, id);
+       DEVID_FIELD(serio_device_id, extra);
+
+       DEVID(acpi_device_id);
+       DEVID_FIELD(acpi_device_id, id);
+
+       DEVID(pnp_device_id);
+       DEVID_FIELD(pnp_device_id, id);
+
+       DEVID(pnp_card_device_id);
+       DEVID_FIELD(pnp_card_device_id, devs);
+
+       DEVID(pcmcia_device_id);
+       DEVID_FIELD(pcmcia_device_id, match_flags);
+       DEVID_FIELD(pcmcia_device_id, manf_id);
+       DEVID_FIELD(pcmcia_device_id, card_id);
+       DEVID_FIELD(pcmcia_device_id, func_id);
+       DEVID_FIELD(pcmcia_device_id, function);
+       DEVID_FIELD(pcmcia_device_id, device_no);
+       DEVID_FIELD(pcmcia_device_id, prod_id_hash);
+
+       DEVID(of_device_id);
+       DEVID_FIELD(of_device_id, name);
+       DEVID_FIELD(of_device_id, type);
+       DEVID_FIELD(of_device_id, compatible);
+
+       DEVID(vio_device_id);
+       DEVID_FIELD(vio_device_id, type);
+       DEVID_FIELD(vio_device_id, compat);
+
+       DEVID(input_device_id);
+       DEVID_FIELD(input_device_id, flags);
+       DEVID_FIELD(input_device_id, bustype);
+       DEVID_FIELD(input_device_id, vendor);
+       DEVID_FIELD(input_device_id, product);
+       DEVID_FIELD(input_device_id, version);
+       DEVID_FIELD(input_device_id, evbit);
+       DEVID_FIELD(input_device_id, keybit);
+       DEVID_FIELD(input_device_id, relbit);
+       DEVID_FIELD(input_device_id, absbit);
+       DEVID_FIELD(input_device_id, mscbit);
+       DEVID_FIELD(input_device_id, ledbit);
+       DEVID_FIELD(input_device_id, sndbit);
+       DEVID_FIELD(input_device_id, ffbit);
+       DEVID_FIELD(input_device_id, swbit);
+
+       DEVID(eisa_device_id);
+       DEVID_FIELD(eisa_device_id, sig);
+
+       DEVID(parisc_device_id);
+       DEVID_FIELD(parisc_device_id, hw_type);
+       DEVID_FIELD(parisc_device_id, hversion);
+       DEVID_FIELD(parisc_device_id, hversion_rev);
+       DEVID_FIELD(parisc_device_id, sversion);
+
+       DEVID(sdio_device_id);
+       DEVID_FIELD(sdio_device_id, class);
+       DEVID_FIELD(sdio_device_id, vendor);
+       DEVID_FIELD(sdio_device_id, device);
+
+       DEVID(ssb_device_id);
+       DEVID_FIELD(ssb_device_id, vendor);
+       DEVID_FIELD(ssb_device_id, coreid);
+       DEVID_FIELD(ssb_device_id, revision);
+
+       DEVID(bcma_device_id);
+       DEVID_FIELD(bcma_device_id, manuf);
+       DEVID_FIELD(bcma_device_id, id);
+       DEVID_FIELD(bcma_device_id, rev);
+       DEVID_FIELD(bcma_device_id, class);
+
+       DEVID(virtio_device_id);
+       DEVID_FIELD(virtio_device_id, device);
+       DEVID_FIELD(virtio_device_id, vendor);
+
+       DEVID(hv_vmbus_device_id);
+       DEVID_FIELD(hv_vmbus_device_id, guid);
+
+       DEVID(i2c_device_id);
+       DEVID_FIELD(i2c_device_id, name);
+
+       DEVID(spi_device_id);
+       DEVID_FIELD(spi_device_id, name);
+
+       DEVID(dmi_system_id);
+       DEVID_FIELD(dmi_system_id, matches);
+
+       DEVID(platform_device_id);
+       DEVID_FIELD(platform_device_id, name);
+
+       DEVID(mdio_device_id);
+       DEVID_FIELD(mdio_device_id, phy_id);
+       DEVID_FIELD(mdio_device_id, phy_id_mask);
+
+       DEVID(zorro_device_id);
+       DEVID_FIELD(zorro_device_id, id);
+
+       DEVID(isapnp_device_id);
+       DEVID_FIELD(isapnp_device_id, vendor);
+       DEVID_FIELD(isapnp_device_id, function);
+
+       DEVID(ipack_device_id);
+       DEVID_FIELD(ipack_device_id, format);
+       DEVID_FIELD(ipack_device_id, vendor);
+       DEVID_FIELD(ipack_device_id, device);
+
+       DEVID(amba_id);
+       DEVID_FIELD(amba_id, id);
+       DEVID_FIELD(amba_id, mask);
+
+       DEVID(x86_cpu_id);
+       DEVID_FIELD(x86_cpu_id, feature);
+       DEVID_FIELD(x86_cpu_id, family);
+       DEVID_FIELD(x86_cpu_id, model);
+       DEVID_FIELD(x86_cpu_id, vendor);
+
+       return 0;
+}
index df4fc23..771ac17 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include "modpost.h"
+#include "devicetable-offsets.h"
 
 /* We use the ELF typedefs for kernel_ulong_t but bite the bullet and
  * use either stdint.h or inttypes.h for the rest. */
@@ -84,13 +85,25 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
 # define __used                        __attribute__((__used__))
 #endif
 
+/* Define a variable f that holds the value of field f of struct devid
+ * based at address m.
+ */
+#define DEF_FIELD(m, devid, f) \
+       typeof(((struct devid *)0)->f) f = TO_NATIVE(*(typeof(f) *)((m) + OFF_##devid##_##f))
+/* Define a variable f that holds the address of field f of struct devid
+ * based at address m.  Due to the way typeof works, for a field of type
+ * T[N] the variable has type T(*)[N], _not_ T*.
+ */
+#define DEF_FIELD_ADDR(m, devid, f) \
+       typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
+
 /* Add a table entry.  We test function type matches while we're here. */
 #define ADD_TO_DEVTABLE(device_id, type, function) \
        static struct devtable __cat(devtable,__LINE__) = {     \
                device_id + 0*sizeof((function)((const char *)NULL,     \
-                                               (type *)NULL,           \
+                                               (void *)NULL,           \
                                                (char *)NULL)),         \
-               sizeof(type), (function) };                             \
+               SIZE_##type, (function) };                              \
        static struct devtable *SECTION(__devtable) __used \
                __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
 
@@ -116,7 +129,6 @@ static inline void add_wildcard(char *str)
                strcat(str + len, "*");
 }
 
-unsigned int cross_build = 0;
 /**
  * Check that sizeof(device_id type) are consistent with size of section
  * in .o file. If in-consistent then userspace and kernel does not agree
@@ -131,8 +143,6 @@ static void device_id_check(const char *modname, const char *device_id,
        int i;
 
        if (size % id_size || size < id_size) {
-               if (cross_build != 0)
-                       return;
                fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
                      "of the size of section __mod_%s_device_table=%lu.\n"
                      "Fix definition of struct %s_device_id "
@@ -157,17 +167,29 @@ static void device_id_check(const char *modname, const char *device_id,
 
 /* USB is special because the bcdDevice can be matched against a numeric range */
 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
-static void do_usb_entry(struct usb_device_id *id,
+static void do_usb_entry(void *symval,
                         unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
                         unsigned char range_lo, unsigned char range_hi,
                         unsigned char max, struct module *mod)
 {
        char alias[500];
+       DEF_FIELD(symval, usb_device_id, match_flags);
+       DEF_FIELD(symval, usb_device_id, idVendor);
+       DEF_FIELD(symval, usb_device_id, idProduct);
+       DEF_FIELD(symval, usb_device_id, bcdDevice_lo);
+       DEF_FIELD(symval, usb_device_id, bDeviceClass);
+       DEF_FIELD(symval, usb_device_id, bDeviceSubClass);
+       DEF_FIELD(symval, usb_device_id, bDeviceProtocol);
+       DEF_FIELD(symval, usb_device_id, bInterfaceClass);
+       DEF_FIELD(symval, usb_device_id, bInterfaceSubClass);
+       DEF_FIELD(symval, usb_device_id, bInterfaceProtocol);
+       DEF_FIELD(symval, usb_device_id, bInterfaceNumber);
+
        strcpy(alias, "usb:");
-       ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR,
-           id->idVendor);
-       ADD(alias, "p", id->match_flags&USB_DEVICE_ID_MATCH_PRODUCT,
-           id->idProduct);
+       ADD(alias, "v", match_flags&USB_DEVICE_ID_MATCH_VENDOR,
+           idVendor);
+       ADD(alias, "p", match_flags&USB_DEVICE_ID_MATCH_PRODUCT,
+           idProduct);
 
        strcat(alias, "d");
        if (bcdDevice_initial_digits)
@@ -190,29 +212,23 @@ static void do_usb_entry(struct usb_device_id *id,
                                range_lo);
                }
        }
-       if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
+       if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
                strcat(alias, "*");
 
-       ADD(alias, "dc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS,
-           id->bDeviceClass);
-       ADD(alias, "dsc",
-           id->match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS,
-           id->bDeviceSubClass);
-       ADD(alias, "dp",
-           id->match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL,
-           id->bDeviceProtocol);
-       ADD(alias, "ic",
-           id->match_flags&USB_DEVICE_ID_MATCH_INT_CLASS,
-           id->bInterfaceClass);
-       ADD(alias, "isc",
-           id->match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS,
-           id->bInterfaceSubClass);
-       ADD(alias, "ip",
-           id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL,
-           id->bInterfaceProtocol);
-       ADD(alias, "in",
-           id->match_flags&USB_DEVICE_ID_MATCH_INT_NUMBER,
-           id->bInterfaceNumber);
+       ADD(alias, "dc", match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS,
+           bDeviceClass);
+       ADD(alias, "dsc", match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS,
+           bDeviceSubClass);
+       ADD(alias, "dp", match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL,
+           bDeviceProtocol);
+       ADD(alias, "ic", match_flags&USB_DEVICE_ID_MATCH_INT_CLASS,
+           bInterfaceClass);
+       ADD(alias, "isc", match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+           bInterfaceSubClass);
+       ADD(alias, "ip", match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL,
+           bInterfaceProtocol);
+       ADD(alias, "in", match_flags&USB_DEVICE_ID_MATCH_INT_NUMBER,
+           bInterfaceNumber);
 
        add_wildcard(alias);
        buf_printf(&mod->dev_table_buf,
@@ -258,24 +274,28 @@ static unsigned int incbcd(unsigned int *bcd,
        return init;
 }
 
-static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
+static void do_usb_entry_multi(void *symval, struct module *mod)
 {
        unsigned int devlo, devhi;
        unsigned char chi, clo, max;
        int ndigits;
 
-       id->match_flags = TO_NATIVE(id->match_flags);
-       id->idVendor = TO_NATIVE(id->idVendor);
-       id->idProduct = TO_NATIVE(id->idProduct);
+       DEF_FIELD(symval, usb_device_id, match_flags);
+       DEF_FIELD(symval, usb_device_id, idVendor);
+       DEF_FIELD(symval, usb_device_id, idProduct);
+       DEF_FIELD(symval, usb_device_id, bcdDevice_lo);
+       DEF_FIELD(symval, usb_device_id, bcdDevice_hi);
+       DEF_FIELD(symval, usb_device_id, bDeviceClass);
+       DEF_FIELD(symval, usb_device_id, bInterfaceClass);
 
-       devlo = id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO ?
-               TO_NATIVE(id->bcdDevice_lo) : 0x0U;
-       devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
-               TO_NATIVE(id->bcdDevice_hi) : ~0x0U;
+       devlo = match_flags & USB_DEVICE_ID_MATCH_DEV_LO ?
+               bcdDevice_lo : 0x0U;
+       devhi = match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
+               bcdDevice_hi : ~0x0U;
 
        /* Figure out if this entry is in bcd or hex format */
        max = 0x9; /* Default to decimal format */
-       for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) {
+       for (ndigits = 0 ; ndigits < sizeof(bcdDevice_lo) * 2 ; ndigits++) {
                clo = (devlo >> (ndigits << 2)) & 0xf;
                chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf;
                if (clo > max || chi > max) {
@@ -288,11 +308,11 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
         * Some modules (visor) have empty slots as placeholder for
         * run-time specification that results in catch-all alias
         */
-       if (!(id->idVendor | id->idProduct | id->bDeviceClass | id->bInterfaceClass))
+       if (!(idVendor | idProduct | bDeviceClass | bInterfaceClass))
                return;
 
        /* Convert numeric bcdDevice range into fnmatch-able pattern(s) */
-       for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
+       for (ndigits = sizeof(bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
                clo = devlo & 0xf;
                chi = devhi & 0xf;
                if (chi > max)  /* If we are in bcd mode, truncate if necessary */
@@ -301,20 +321,20 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
                devhi >>= 4;
 
                if (devlo == devhi || !ndigits) {
-                       do_usb_entry(id, devlo, ndigits, clo, chi, max, mod);
+                       do_usb_entry(symval, devlo, ndigits, clo, chi, max, mod);
                        break;
                }
 
                if (clo > 0x0)
-                       do_usb_entry(id,
+                       do_usb_entry(symval,
                                     incbcd(&devlo, 1, max,
-                                           sizeof(id->bcdDevice_lo) * 2),
+                                           sizeof(bcdDevice_lo) * 2),
                                     ndigits, clo, max, max, mod);
 
                if (chi < max)
-                       do_usb_entry(id,
+                       do_usb_entry(symval,
                                     incbcd(&devhi, -1, max,
-                                           sizeof(id->bcdDevice_lo) * 2),
+                                           sizeof(bcdDevice_lo) * 2),
                                     ndigits, 0x0, chi, max, mod);
        }
 }
@@ -323,7 +343,7 @@ static void do_usb_table(void *symval, unsigned long size,
                         struct module *mod)
 {
        unsigned int i;
-       const unsigned long id_size = sizeof(struct usb_device_id);
+       const unsigned long id_size = SIZE_usb_device_id;
 
        device_id_check(mod->name, "usb", size, id_size, symval);
 
@@ -336,81 +356,81 @@ static void do_usb_table(void *symval, unsigned long size,
 
 /* Looks like: hid:bNvNpN */
 static int do_hid_entry(const char *filename,
-                            struct hid_device_id *id, char *alias)
+                            void *symval, char *alias)
 {
-       id->bus = TO_NATIVE(id->bus);
-       id->group = TO_NATIVE(id->group);
-       id->vendor = TO_NATIVE(id->vendor);
-       id->product = TO_NATIVE(id->product);
+       DEF_FIELD(symval, hid_device_id, bus);
+       DEF_FIELD(symval, hid_device_id, group);
+       DEF_FIELD(symval, hid_device_id, vendor);
+       DEF_FIELD(symval, hid_device_id, product);
 
        sprintf(alias, "hid:");
-       ADD(alias, "b", id->bus != HID_BUS_ANY, id->bus);
-       ADD(alias, "g", id->group != HID_GROUP_ANY, id->group);
-       ADD(alias, "v", id->vendor != HID_ANY_ID, id->vendor);
-       ADD(alias, "p", id->product != HID_ANY_ID, id->product);
+       ADD(alias, "b", bus != HID_BUS_ANY, bus);
+       ADD(alias, "g", group != HID_GROUP_ANY, group);
+       ADD(alias, "v", vendor != HID_ANY_ID, vendor);
+       ADD(alias, "p", product != HID_ANY_ID, product);
 
        return 1;
 }
-ADD_TO_DEVTABLE("hid", struct hid_device_id, do_hid_entry);
+ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
 
 /* Looks like: ieee1394:venNmoNspNverN */
 static int do_ieee1394_entry(const char *filename,
-                            struct ieee1394_device_id *id, char *alias)
+                            void *symval, char *alias)
 {
-       id->match_flags = TO_NATIVE(id->match_flags);
-       id->vendor_id = TO_NATIVE(id->vendor_id);
-       id->model_id = TO_NATIVE(id->model_id);
-       id->specifier_id = TO_NATIVE(id->specifier_id);
-       id->version = TO_NATIVE(id->version);
+       DEF_FIELD(symval, ieee1394_device_id, match_flags);
+       DEF_FIELD(symval, ieee1394_device_id, vendor_id);
+       DEF_FIELD(symval, ieee1394_device_id, model_id);
+       DEF_FIELD(symval, ieee1394_device_id, specifier_id);
+       DEF_FIELD(symval, ieee1394_device_id, version);
 
        strcpy(alias, "ieee1394:");
-       ADD(alias, "ven", id->match_flags & IEEE1394_MATCH_VENDOR_ID,
-           id->vendor_id);
-       ADD(alias, "mo", id->match_flags & IEEE1394_MATCH_MODEL_ID,
-           id->model_id);
-       ADD(alias, "sp", id->match_flags & IEEE1394_MATCH_SPECIFIER_ID,
-           id->specifier_id);
-       ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION,
-           id->version);
+       ADD(alias, "ven", match_flags & IEEE1394_MATCH_VENDOR_ID,
+           vendor_id);
+       ADD(alias, "mo", match_flags & IEEE1394_MATCH_MODEL_ID,
+           model_id);
+       ADD(alias, "sp", match_flags & IEEE1394_MATCH_SPECIFIER_ID,
+           specifier_id);
+       ADD(alias, "ver", match_flags & IEEE1394_MATCH_VERSION,
+           version);
 
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("ieee1394", struct ieee1394_device_id, do_ieee1394_entry);
+ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
 
 /* Looks like: pci:vNdNsvNsdNbcNscNiN. */
 static int do_pci_entry(const char *filename,
-                       struct pci_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
        /* Class field can be divided into these three. */
        unsigned char baseclass, subclass, interface,
                baseclass_mask, subclass_mask, interface_mask;
 
-       id->vendor = TO_NATIVE(id->vendor);
-       id->device = TO_NATIVE(id->device);
-       id->subvendor = TO_NATIVE(id->subvendor);
-       id->subdevice = TO_NATIVE(id->subdevice);
-       id->class = TO_NATIVE(id->class);
-       id->class_mask = TO_NATIVE(id->class_mask);
+       DEF_FIELD(symval, pci_device_id, vendor);
+       DEF_FIELD(symval, pci_device_id, device);
+       DEF_FIELD(symval, pci_device_id, subvendor);
+       DEF_FIELD(symval, pci_device_id, subdevice);
+       DEF_FIELD(symval, pci_device_id, class);
+       DEF_FIELD(symval, pci_device_id, class_mask);
 
        strcpy(alias, "pci:");
-       ADD(alias, "v", id->vendor != PCI_ANY_ID, id->vendor);
-       ADD(alias, "d", id->device != PCI_ANY_ID, id->device);
-       ADD(alias, "sv", id->subvendor != PCI_ANY_ID, id->subvendor);
-       ADD(alias, "sd", id->subdevice != PCI_ANY_ID, id->subdevice);
-
-       baseclass = (id->class) >> 16;
-       baseclass_mask = (id->class_mask) >> 16;
-       subclass = (id->class) >> 8;
-       subclass_mask = (id->class_mask) >> 8;
-       interface = id->class;
-       interface_mask = id->class_mask;
+       ADD(alias, "v", vendor != PCI_ANY_ID, vendor);
+       ADD(alias, "d", device != PCI_ANY_ID, device);
+       ADD(alias, "sv", subvendor != PCI_ANY_ID, subvendor);
+       ADD(alias, "sd", subdevice != PCI_ANY_ID, subdevice);
+
+       baseclass = (class) >> 16;
+       baseclass_mask = (class_mask) >> 16;
+       subclass = (class) >> 8;
+       subclass_mask = (class_mask) >> 8;
+       interface = class;
+       interface_mask = class_mask;
 
        if ((baseclass_mask != 0 && baseclass_mask != 0xFF)
            || (subclass_mask != 0 && subclass_mask != 0xFF)
            || (interface_mask != 0 && interface_mask != 0xFF)) {
                warn("Can't handle masks in %s:%04X\n",
-                    filename, id->class_mask);
+                    filename, class_mask);
                return 0;
        }
 
@@ -420,101 +440,105 @@ static int do_pci_entry(const char *filename,
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("pci", struct pci_device_id, do_pci_entry);
+ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
 
 /* looks like: "ccw:tNmNdtNdmN" */
 static int do_ccw_entry(const char *filename,
-                       struct ccw_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
-       id->match_flags = TO_NATIVE(id->match_flags);
-       id->cu_type = TO_NATIVE(id->cu_type);
-       id->cu_model = TO_NATIVE(id->cu_model);
-       id->dev_type = TO_NATIVE(id->dev_type);
-       id->dev_model = TO_NATIVE(id->dev_model);
+       DEF_FIELD(symval, ccw_device_id, match_flags);
+       DEF_FIELD(symval, ccw_device_id, cu_type);
+       DEF_FIELD(symval, ccw_device_id, cu_model);
+       DEF_FIELD(symval, ccw_device_id, dev_type);
+       DEF_FIELD(symval, ccw_device_id, dev_model);
 
        strcpy(alias, "ccw:");
-       ADD(alias, "t", id->match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE,
-           id->cu_type);
-       ADD(alias, "m", id->match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL,
-           id->cu_model);
-       ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
-           id->dev_type);
-       ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
-           id->dev_model);
+       ADD(alias, "t", match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE,
+           cu_type);
+       ADD(alias, "m", match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL,
+           cu_model);
+       ADD(alias, "dt", match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
+           dev_type);
+       ADD(alias, "dm", match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
+           dev_model);
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("ccw", struct ccw_device_id, do_ccw_entry);
+ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
 
 /* looks like: "ap:tN" */
 static int do_ap_entry(const char *filename,
-                      struct ap_device_id *id, char *alias)
+                      void *symval, char *alias)
 {
-       sprintf(alias, "ap:t%02X*", id->dev_type);
+       DEF_FIELD(symval, ap_device_id, dev_type);
+
+       sprintf(alias, "ap:t%02X*", dev_type);
        return 1;
 }
-ADD_TO_DEVTABLE("ap", struct ap_device_id, do_ap_entry);
+ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
 
 /* looks like: "css:tN" */
 static int do_css_entry(const char *filename,
-                       struct css_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
-       sprintf(alias, "css:t%01X", id->type);
+       DEF_FIELD(symval, css_device_id, type);
+
+       sprintf(alias, "css:t%01X", type);
        return 1;
 }
-ADD_TO_DEVTABLE("css", struct css_device_id, do_css_entry);
+ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
 
 /* Looks like: "serio:tyNprNidNexN" */
 static int do_serio_entry(const char *filename,
-                         struct serio_device_id *id, char *alias)
+                         void *symval, char *alias)
 {
-       id->type = TO_NATIVE(id->type);
-       id->proto = TO_NATIVE(id->proto);
-       id->id = TO_NATIVE(id->id);
-       id->extra = TO_NATIVE(id->extra);
+       DEF_FIELD(symval, serio_device_id, type);
+       DEF_FIELD(symval, serio_device_id, proto);
+       DEF_FIELD(symval, serio_device_id, id);
+       DEF_FIELD(symval, serio_device_id, extra);
 
        strcpy(alias, "serio:");
-       ADD(alias, "ty", id->type != SERIO_ANY, id->type);
-       ADD(alias, "pr", id->proto != SERIO_ANY, id->proto);
-       ADD(alias, "id", id->id != SERIO_ANY, id->id);
-       ADD(alias, "ex", id->extra != SERIO_ANY, id->extra);
+       ADD(alias, "ty", type != SERIO_ANY, type);
+       ADD(alias, "pr", proto != SERIO_ANY, proto);
+       ADD(alias, "id", id != SERIO_ANY, id);
+       ADD(alias, "ex", extra != SERIO_ANY, extra);
 
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("serio", struct serio_device_id, do_serio_entry);
+ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
 
 /* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
 static int do_acpi_entry(const char *filename,
-                       struct acpi_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
-       sprintf(alias, "acpi*:%s:*", id->id);
+       DEF_FIELD_ADDR(symval, acpi_device_id, id);
+       sprintf(alias, "acpi*:%s:*", *id);
        return 1;
 }
-ADD_TO_DEVTABLE("acpi", struct acpi_device_id, do_acpi_entry);
+ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
 
 /* looks like: "pnp:dD" */
 static void do_pnp_device_entry(void *symval, unsigned long size,
                                struct module *mod)
 {
-       const unsigned long id_size = sizeof(struct pnp_device_id);
+       const unsigned long id_size = SIZE_pnp_device_id;
        const unsigned int count = (size / id_size)-1;
-       const struct pnp_device_id *devs = symval;
        unsigned int i;
 
        device_id_check(mod->name, "pnp", size, id_size, symval);
 
        for (i = 0; i < count; i++) {
-               const char *id = (char *)devs[i].id;
-               char acpi_id[sizeof(devs[0].id)];
+               DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
+               char acpi_id[sizeof(*id)];
                int j;
 
                buf_printf(&mod->dev_table_buf,
-                          "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
+                          "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
 
                /* fix broken pnp bus lowercasing */
                for (j = 0; j < sizeof(acpi_id); j++)
-                       acpi_id[j] = toupper(id[j]);
+                       acpi_id[j] = toupper((*id)[j]);
                buf_printf(&mod->dev_table_buf,
                           "MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id);
        }
@@ -524,19 +548,18 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
 static void do_pnp_card_entries(void *symval, unsigned long size,
                                struct module *mod)
 {
-       const unsigned long id_size = sizeof(struct pnp_card_device_id);
+       const unsigned long id_size = SIZE_pnp_card_device_id;
        const unsigned int count = (size / id_size)-1;
-       const struct pnp_card_device_id *cards = symval;
        unsigned int i;
 
        device_id_check(mod->name, "pnp", size, id_size, symval);
 
        for (i = 0; i < count; i++) {
                unsigned int j;
-               const struct pnp_card_device_id *card = &cards[i];
+               DEF_FIELD_ADDR(symval + i*id_size, pnp_card_device_id, devs);
 
                for (j = 0; j < PNP_MAX_DEVICES; j++) {
-                       const char *id = (char *)card->devs[j].id;
+                       const char *id = (char *)(*devs)[j].id;
                        int i2, j2;
                        int dup = 0;
 
@@ -545,10 +568,10 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
 
                        /* find duplicate, already added value */
                        for (i2 = 0; i2 < i && !dup; i2++) {
-                               const struct pnp_card_device_id *card2 = &cards[i2];
+                               DEF_FIELD_ADDR(symval + i2*id_size, pnp_card_device_id, devs);
 
                                for (j2 = 0; j2 < PNP_MAX_DEVICES; j2++) {
-                                       const char *id2 = (char *)card2->devs[j2].id;
+                                       const char *id2 = (char *)(*devs)[j2].id;
 
                                        if (!id2[0])
                                                break;
@@ -562,7 +585,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
 
                        /* add an individual alias for every device entry */
                        if (!dup) {
-                               char acpi_id[sizeof(card->devs[0].id)];
+                               char acpi_id[PNP_ID_LEN];
                                int k;
 
                                buf_printf(&mod->dev_table_buf,
@@ -580,54 +603,58 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
 
 /* Looks like: pcmcia:mNcNfNfnNpfnNvaNvbNvcNvdN. */
 static int do_pcmcia_entry(const char *filename,
-                          struct pcmcia_device_id *id, char *alias)
+                          void *symval, char *alias)
 {
        unsigned int i;
-
-       id->match_flags = TO_NATIVE(id->match_flags);
-       id->manf_id = TO_NATIVE(id->manf_id);
-       id->card_id = TO_NATIVE(id->card_id);
-       id->func_id = TO_NATIVE(id->func_id);
-       id->function = TO_NATIVE(id->function);
-       id->device_no = TO_NATIVE(id->device_no);
+       DEF_FIELD(symval, pcmcia_device_id, match_flags);
+       DEF_FIELD(symval, pcmcia_device_id, manf_id);
+       DEF_FIELD(symval, pcmcia_device_id, card_id);
+       DEF_FIELD(symval, pcmcia_device_id, func_id);
+       DEF_FIELD(symval, pcmcia_device_id, function);
+       DEF_FIELD(symval, pcmcia_device_id, device_no);
+       DEF_FIELD_ADDR(symval, pcmcia_device_id, prod_id_hash);
 
        for (i=0; i<4; i++) {
-               id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]);
-       }
-
-       strcpy(alias, "pcmcia:");
-       ADD(alias, "m", id->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
-          id->manf_id);
-       ADD(alias, "c", id->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
-          id->card_id);
-       ADD(alias, "f", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID,
-          id->func_id);
-       ADD(alias, "fn", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION,
-          id->function);
-       ADD(alias, "pfn", id->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
-          id->device_no);
-       ADD(alias, "pa", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, id->prod_id_hash[0]);
-       ADD(alias, "pb", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, id->prod_id_hash[1]);
-       ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]);
-       ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]);
+               (*prod_id_hash)[i] = TO_NATIVE((*prod_id_hash)[i]);
+       }
+
+       strcpy(alias, "pcmcia:");
+       ADD(alias, "m", match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
+           manf_id);
+       ADD(alias, "c", match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
+           card_id);
+       ADD(alias, "f", match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID,
+           func_id);
+       ADD(alias, "fn", match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION,
+           function);
+       ADD(alias, "pfn", match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
+           device_no);
+       ADD(alias, "pa", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, (*prod_id_hash)[0]);
+       ADD(alias, "pb", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, (*prod_id_hash)[1]);
+       ADD(alias, "pc", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, (*prod_id_hash)[2]);
+       ADD(alias, "pd", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, (*prod_id_hash)[3]);
 
        add_wildcard(alias);
-       return 1;
+       return 1;
 }
-ADD_TO_DEVTABLE("pcmcia", struct pcmcia_device_id, do_pcmcia_entry);
+ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
-static int do_of_entry (const char *filename, struct of_device_id *of, char *alias)
+static int do_of_entry (const char *filename, void *symval, char *alias)
 {
     int len;
     char *tmp;
+    DEF_FIELD_ADDR(symval, of_device_id, name);
+    DEF_FIELD_ADDR(symval, of_device_id, type);
+    DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
     len = sprintf (alias, "of:N%sT%s",
-                    of->name[0] ? of->name : "*",
-                    of->type[0] ? of->type : "*");
+                    (*name)[0] ? *name : "*",
+                    (*type)[0] ? *type : "*");
 
-    if (of->compatible[0])
+    if (compatible[0])
         sprintf (&alias[len], "%sC%s",
-                     of->type[0] ? "*" : "",
-                     of->compatible);
+                     (*type)[0] ? "*" : "",
+                     *compatible);
 
     /* Replace all whitespace with underscores */
     for (tmp = alias; tmp && *tmp; tmp++)
@@ -637,15 +664,17 @@ static int do_of_entry (const char *filename, struct of_device_id *of, char *ali
     add_wildcard(alias);
     return 1;
 }
-ADD_TO_DEVTABLE("of", struct of_device_id, do_of_entry);
+ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
 
-static int do_vio_entry(const char *filename, struct vio_device_id *vio,
+static int do_vio_entry(const char *filename, void *symval,
                char *alias)
 {
        char *tmp;
+       DEF_FIELD_ADDR(symval, vio_device_id, type);
+       DEF_FIELD_ADDR(symval, vio_device_id, compat);
 
-       sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*",
-                       vio->compat[0] ? vio->compat : "*");
+       sprintf(alias, "vio:T%sS%s", (*type)[0] ? *type : "*",
+                       (*compat)[0] ? *compat : "*");
 
        /* Replace all whitespace with underscores */
        for (tmp = alias; tmp && *tmp; tmp++)
@@ -655,7 +684,7 @@ static int do_vio_entry(const char *filename, struct vio_device_id *vio,
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("vio", struct vio_device_id, do_vio_entry);
+ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
@@ -664,154 +693,172 @@ static void do_input(char *alias,
 {
        unsigned int i;
 
+       for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
+               arr[i] = TO_NATIVE(arr[i]);
        for (i = min; i < max; i++)
                if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG)))
                        sprintf(alias + strlen(alias), "%X,*", i);
 }
 
 /* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */
-static int do_input_entry(const char *filename, struct input_device_id *id,
+static int do_input_entry(const char *filename, void *symval,
                          char *alias)
 {
+       DEF_FIELD(symval, input_device_id, flags);
+       DEF_FIELD(symval, input_device_id, bustype);
+       DEF_FIELD(symval, input_device_id, vendor);
+       DEF_FIELD(symval, input_device_id, product);
+       DEF_FIELD(symval, input_device_id, version);
+       DEF_FIELD_ADDR(symval, input_device_id, evbit);
+       DEF_FIELD_ADDR(symval, input_device_id, keybit);
+       DEF_FIELD_ADDR(symval, input_device_id, relbit);
+       DEF_FIELD_ADDR(symval, input_device_id, absbit);
+       DEF_FIELD_ADDR(symval, input_device_id, mscbit);
+       DEF_FIELD_ADDR(symval, input_device_id, ledbit);
+       DEF_FIELD_ADDR(symval, input_device_id, sndbit);
+       DEF_FIELD_ADDR(symval, input_device_id, ffbit);
+       DEF_FIELD_ADDR(symval, input_device_id, swbit);
+
        sprintf(alias, "input:");
 
-       ADD(alias, "b", id->flags & INPUT_DEVICE_ID_MATCH_BUS, id->bustype);
-       ADD(alias, "v", id->flags & INPUT_DEVICE_ID_MATCH_VENDOR, id->vendor);
-       ADD(alias, "p", id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT, id->product);
-       ADD(alias, "e", id->flags & INPUT_DEVICE_ID_MATCH_VERSION, id->version);
+       ADD(alias, "b", flags & INPUT_DEVICE_ID_MATCH_BUS, bustype);
+       ADD(alias, "v", flags & INPUT_DEVICE_ID_MATCH_VENDOR, vendor);
+       ADD(alias, "p", flags & INPUT_DEVICE_ID_MATCH_PRODUCT, product);
+       ADD(alias, "e", flags & INPUT_DEVICE_ID_MATCH_VERSION, version);
 
        sprintf(alias + strlen(alias), "-e*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT)
-               do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_EVBIT)
+               do_input(alias, *evbit, 0, INPUT_DEVICE_ID_EV_MAX);
        sprintf(alias + strlen(alias), "k*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT)
-               do_input(alias, id->keybit,
+       if (flags & INPUT_DEVICE_ID_MATCH_KEYBIT)
+               do_input(alias, *keybit,
                         INPUT_DEVICE_ID_KEY_MIN_INTERESTING,
                         INPUT_DEVICE_ID_KEY_MAX);
        sprintf(alias + strlen(alias), "r*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT)
-               do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_RELBIT)
+               do_input(alias, *relbit, 0, INPUT_DEVICE_ID_REL_MAX);
        sprintf(alias + strlen(alias), "a*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT)
-               do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_ABSBIT)
+               do_input(alias, *absbit, 0, INPUT_DEVICE_ID_ABS_MAX);
        sprintf(alias + strlen(alias), "m*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT)
-               do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_MSCIT)
+               do_input(alias, *mscbit, 0, INPUT_DEVICE_ID_MSC_MAX);
        sprintf(alias + strlen(alias), "l*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT)
-               do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_LEDBIT)
+               do_input(alias, *ledbit, 0, INPUT_DEVICE_ID_LED_MAX);
        sprintf(alias + strlen(alias), "s*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT)
-               do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_SNDBIT)
+               do_input(alias, *sndbit, 0, INPUT_DEVICE_ID_SND_MAX);
        sprintf(alias + strlen(alias), "f*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT)
-               do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_FFBIT)
+               do_input(alias, *ffbit, 0, INPUT_DEVICE_ID_FF_MAX);
        sprintf(alias + strlen(alias), "w*");
-       if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT)
-               do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX);
+       if (flags & INPUT_DEVICE_ID_MATCH_SWBIT)
+               do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
        return 1;
 }
-ADD_TO_DEVTABLE("input", struct input_device_id, do_input_entry);
+ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
 
-static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa,
+static int do_eisa_entry(const char *filename, void *symval,
                char *alias)
 {
-       if (eisa->sig[0])
-               sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig);
+       DEF_FIELD_ADDR(symval, eisa_device_id, sig);
+       if (sig[0])
+               sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig);
        else
                strcat(alias, "*");
        return 1;
 }
-ADD_TO_DEVTABLE("eisa", struct eisa_device_id, do_eisa_entry);
+ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
 
 /* Looks like: parisc:tNhvNrevNsvN */
-static int do_parisc_entry(const char *filename, struct parisc_device_id *id,
+static int do_parisc_entry(const char *filename, void *symval,
                char *alias)
 {
-       id->hw_type = TO_NATIVE(id->hw_type);
-       id->hversion = TO_NATIVE(id->hversion);
-       id->hversion_rev = TO_NATIVE(id->hversion_rev);
-       id->sversion = TO_NATIVE(id->sversion);
+       DEF_FIELD(symval, parisc_device_id, hw_type);
+       DEF_FIELD(symval, parisc_device_id, hversion);
+       DEF_FIELD(symval, parisc_device_id, hversion_rev);
+       DEF_FIELD(symval, parisc_device_id, sversion);
 
        strcpy(alias, "parisc:");
-       ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type);
-       ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion);
-       ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev);
-       ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion);
+       ADD(alias, "t", hw_type != PA_HWTYPE_ANY_ID, hw_type);
+       ADD(alias, "hv", hversion != PA_HVERSION_ANY_ID, hversion);
+       ADD(alias, "rev", hversion_rev != PA_HVERSION_REV_ANY_ID, hversion_rev);
+       ADD(alias, "sv", sversion != PA_SVERSION_ANY_ID, sversion);
 
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("parisc", struct parisc_device_id, do_parisc_entry);
+ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
 
 /* Looks like: sdio:cNvNdN. */
 static int do_sdio_entry(const char *filename,
-                       struct sdio_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
-       id->class = TO_NATIVE(id->class);
-       id->vendor = TO_NATIVE(id->vendor);
-       id->device = TO_NATIVE(id->device);
+       DEF_FIELD(symval, sdio_device_id, class);
+       DEF_FIELD(symval, sdio_device_id, vendor);
+       DEF_FIELD(symval, sdio_device_id, device);
 
        strcpy(alias, "sdio:");
-       ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class);
-       ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor);
-       ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device);
+       ADD(alias, "c", class != (__u8)SDIO_ANY_ID, class);
+       ADD(alias, "v", vendor != (__u16)SDIO_ANY_ID, vendor);
+       ADD(alias, "d", device != (__u16)SDIO_ANY_ID, device);
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("sdio", struct sdio_device_id, do_sdio_entry);
+ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
 
 /* Looks like: ssb:vNidNrevN. */
 static int do_ssb_entry(const char *filename,
-                       struct ssb_device_id *id, char *alias)
+                       void *symval, char *alias)
 {
-       id->vendor = TO_NATIVE(id->vendor);
-       id->coreid = TO_NATIVE(id->coreid);
-       id->revision = TO_NATIVE(id->revision);
+       DEF_FIELD(symval, ssb_device_id, vendor);
+       DEF_FIELD(symval, ssb_device_id, coreid);
+       DEF_FIELD(symval, ssb_device_id, revision);
 
        strcpy(alias, "ssb:");
-       ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor);
-       ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid);
-       ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision);
+       ADD(alias, "v", vendor != SSB_ANY_VENDOR, vendor);
+       ADD(alias, "id", coreid != SSB_ANY_ID, coreid);
+       ADD(alias, "rev", revision != SSB_ANY_REV, revision);
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("ssb", struct ssb_device_id, do_ssb_entry);
+ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
 
 /* Looks like: bcma:mNidNrevNclN. */
 static int do_bcma_entry(const char *filename,
-                        struct bcma_device_id *id, char *alias)
+                        void *symval, char *alias)
 {
-       id->manuf = TO_NATIVE(id->manuf);
-       id->id = TO_NATIVE(id->id);
-       id->rev = TO_NATIVE(id->rev);
-       id->class = TO_NATIVE(id->class);
+       DEF_FIELD(symval, bcma_device_id, manuf);
+       DEF_FIELD(symval, bcma_device_id, id);
+       DEF_FIELD(symval, bcma_device_id, rev);
+       DEF_FIELD(symval, bcma_device_id, class);
 
        strcpy(alias, "bcma:");
-       ADD(alias, "m", id->manuf != BCMA_ANY_MANUF, id->manuf);
-       ADD(alias, "id", id->id != BCMA_ANY_ID, id->id);
-       ADD(alias, "rev", id->rev != BCMA_ANY_REV, id->rev);
-       ADD(alias, "cl", id->class != BCMA_ANY_CLASS, id->class);
+       ADD(alias, "m", manuf != BCMA_ANY_MANUF, manuf);
+       ADD(alias, "id", id != BCMA_ANY_ID, id);
+       ADD(alias, "rev", rev != BCMA_ANY_REV, rev);
+       ADD(alias, "cl", class != BCMA_ANY_CLASS, class);
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("bcma", struct bcma_device_id, do_bcma_entry);
+ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
 
 /* Looks like: virtio:dNvN */
-static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
+static int do_virtio_entry(const char *filename, void *symval,
                           char *alias)
 {
-       id->device = TO_NATIVE(id->device);
-       id->vendor = TO_NATIVE(id->vendor);
+       DEF_FIELD(symval, virtio_device_id, device);
+       DEF_FIELD(symval, virtio_device_id, vendor);
 
        strcpy(alias, "virtio:");
-       ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device);
-       ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor);
+       ADD(alias, "d", device != VIRTIO_DEV_ANY_ID, device);
+       ADD(alias, "v", vendor != VIRTIO_DEV_ANY_ID, vendor);
 
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry);
+ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
 
 /*
  * Looks like: vmbus:guid
@@ -819,41 +866,44 @@ ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry);
  * in the name.
  */
 
-static int do_vmbus_entry(const char *filename, struct hv_vmbus_device_id *id,
+static int do_vmbus_entry(const char *filename, void *symval,
                          char *alias)
 {
        int i;
-       char guid_name[((sizeof(id->guid) + 1)) * 2];
+       DEF_FIELD_ADDR(symval, hv_vmbus_device_id, guid);
+       char guid_name[(sizeof(*guid) + 1) * 2];
 
-       for (i = 0; i < (sizeof(id->guid) * 2); i += 2)
-               sprintf(&guid_name[i], "%02x", id->guid[i/2]);
+       for (i = 0; i < (sizeof(*guid) * 2); i += 2)
+               sprintf(&guid_name[i], "%02x", TO_NATIVE((*guid)[i/2]));
 
        strcpy(alias, "vmbus:");
        strcat(alias, guid_name);
 
        return 1;
 }
-ADD_TO_DEVTABLE("vmbus", struct hv_vmbus_device_id, do_vmbus_entry);
+ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
 
 /* Looks like: i2c:S */
-static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
+static int do_i2c_entry(const char *filename, void *symval,
                        char *alias)
 {
-       sprintf(alias, I2C_MODULE_PREFIX "%s", id->name);
+       DEF_FIELD_ADDR(symval, i2c_device_id, name);
+       sprintf(alias, I2C_MODULE_PREFIX "%s", *name);
 
        return 1;
 }
-ADD_TO_DEVTABLE("i2c", struct i2c_device_id, do_i2c_entry);
+ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
 
 /* Looks like: spi:S */
-static int do_spi_entry(const char *filename, struct spi_device_id *id,
+static int do_spi_entry(const char *filename, void *symval,
                        char *alias)
 {
-       sprintf(alias, SPI_MODULE_PREFIX "%s", id->name);
+       DEF_FIELD_ADDR(symval, spi_device_id, name);
+       sprintf(alias, SPI_MODULE_PREFIX "%s", *name);
 
        return 1;
 }
-ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry);
+ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
 
 static const struct dmifield {
        const char *prefix;
@@ -885,21 +935,21 @@ static void dmi_ascii_filter(char *d, const char *s)
 }
 
 
-static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
+static int do_dmi_entry(const char *filename, void *symval,
                        char *alias)
 {
        int i, j;
-
+       DEF_FIELD_ADDR(symval, dmi_system_id, matches);
        sprintf(alias, "dmi*");
 
        for (i = 0; i < ARRAY_SIZE(dmi_fields); i++) {
                for (j = 0; j < 4; j++) {
-                       if (id->matches[j].slot &&
-                           id->matches[j].slot == dmi_fields[i].field) {
+                       if ((*matches)[j].slot &&
+                           (*matches)[j].slot == dmi_fields[i].field) {
                                sprintf(alias + strlen(alias), ":%s*",
                                        dmi_fields[i].prefix);
                                dmi_ascii_filter(alias + strlen(alias),
-                                                id->matches[j].substr);
+                                                (*matches)[j].substr);
                                strcat(alias, "*");
                        }
                }
@@ -908,27 +958,30 @@ static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
        strcat(alias, ":");
        return 1;
 }
-ADD_TO_DEVTABLE("dmi", struct dmi_system_id, do_dmi_entry);
+ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
 
 static int do_platform_entry(const char *filename,
-                            struct platform_device_id *id, char *alias)
+                            void *symval, char *alias)
 {
-       sprintf(alias, PLATFORM_MODULE_PREFIX "%s", id->name);
+       DEF_FIELD_ADDR(symval, platform_device_id, name);
+       sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
        return 1;
 }
-ADD_TO_DEVTABLE("platform", struct platform_device_id, do_platform_entry);
+ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
 
 static int do_mdio_entry(const char *filename,
-                        struct mdio_device_id *id, char *alias)
+                        void *symval, char *alias)
 {
        int i;
+       DEF_FIELD(symval, mdio_device_id, phy_id);
+       DEF_FIELD(symval, mdio_device_id, phy_id_mask);
 
        alias += sprintf(alias, MDIO_MODULE_PREFIX);
 
        for (i = 0; i < 32; i++) {
-               if (!((id->phy_id_mask >> (31-i)) & 1))
+               if (!((phy_id_mask >> (31-i)) & 1))
                        *(alias++) = '?';
-               else if ((id->phy_id >> (31-i)) & 1)
+               else if ((phy_id >> (31-i)) & 1)
                        *(alias++) = '1';
                else
                        *(alias++) = '0';
@@ -939,47 +992,50 @@ static int do_mdio_entry(const char *filename,
 
        return 1;
 }
-ADD_TO_DEVTABLE("mdio", struct mdio_device_id, do_mdio_entry);
+ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
 
 /* Looks like: zorro:iN. */
-static int do_zorro_entry(const char *filename, struct zorro_device_id *id,
+static int do_zorro_entry(const char *filename, void *symval,
                          char *alias)
 {
-       id->id = TO_NATIVE(id->id);
+       DEF_FIELD(symval, zorro_device_id, id);
        strcpy(alias, "zorro:");
-       ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id);
+       ADD(alias, "i", id != ZORRO_WILDCARD, id);
        return 1;
 }
-ADD_TO_DEVTABLE("zorro", struct zorro_device_id, do_zorro_entry);
+ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
 
 /* looks like: "pnp:dD" */
 static int do_isapnp_entry(const char *filename,
-                          struct isapnp_device_id *id, char *alias)
+                          void *symval, char *alias)
 {
+       DEF_FIELD(symval, isapnp_device_id, vendor);
+       DEF_FIELD(symval, isapnp_device_id, function);
        sprintf(alias, "pnp:d%c%c%c%x%x%x%x*",
-               'A' + ((id->vendor >> 2) & 0x3f) - 1,
-               'A' + (((id->vendor & 3) << 3) | ((id->vendor >> 13) & 7)) - 1,
-               'A' + ((id->vendor >> 8) & 0x1f) - 1,
-               (id->function >> 4) & 0x0f, id->function & 0x0f,
-               (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f);
+               'A' + ((vendor >> 2) & 0x3f) - 1,
+               'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
+               'A' + ((vendor >> 8) & 0x1f) - 1,
+               (function >> 4) & 0x0f, function & 0x0f,
+               (function >> 12) & 0x0f, (function >> 8) & 0x0f);
        return 1;
 }
-ADD_TO_DEVTABLE("isapnp", struct isapnp_device_id, do_isapnp_entry);
+ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
 
 /* Looks like: "ipack:fNvNdN". */
 static int do_ipack_entry(const char *filename,
-                         struct ipack_device_id *id, char *alias)
+                         void *symval, char *alias)
 {
-       id->vendor = TO_NATIVE(id->vendor);
-       id->device = TO_NATIVE(id->device);
+       DEF_FIELD(symval, ipack_device_id, format);
+       DEF_FIELD(symval, ipack_device_id, vendor);
+       DEF_FIELD(symval, ipack_device_id, device);
        strcpy(alias, "ipack:");
-       ADD(alias, "f", id->format != IPACK_ANY_FORMAT, id->format);
-       ADD(alias, "v", id->vendor != IPACK_ANY_ID, id->vendor);
-       ADD(alias, "d", id->device != IPACK_ANY_ID, id->device);
+       ADD(alias, "f", format != IPACK_ANY_FORMAT, format);
+       ADD(alias, "v", vendor != IPACK_ANY_ID, vendor);
+       ADD(alias, "d", device != IPACK_ANY_ID, device);
        add_wildcard(alias);
        return 1;
 }
-ADD_TO_DEVTABLE("ipack", struct ipack_device_id, do_ipack_entry);
+ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
 
 /*
  * Append a match expression for a single masked hex digit.
@@ -1030,25 +1086,27 @@ static void append_nibble_mask(char **outp,
  *     a ? or [] pattern matching exactly one digit.
  */
 static int do_amba_entry(const char *filename,
-                        struct amba_id *id, char *alias)
+                        void *symval, char *alias)
 {
        unsigned int digit;
        char *p = alias;
+       DEF_FIELD(symval, amba_id, id);
+       DEF_FIELD(symval, amba_id, mask);
 
-       if ((id->id & id->mask) != id->id)
+       if ((id & mask) != id)
                fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: "
                      "id=0x%08X, mask=0x%08X.  Please fix this driver.\n",
-                     filename, id->id, id->mask);
+                     filename, idmask);
 
        p += sprintf(alias, "amba:d");
        for (digit = 0; digit < 8; digit++)
                append_nibble_mask(&p,
-                                  (id->id >> (4 * (7 - digit))) & 0xf,
-                                  (id->mask >> (4 * (7 - digit))) & 0xf);
+                                  (id >> (4 * (7 - digit))) & 0xf,
+                                  (mask >> (4 * (7 - digit))) & 0xf);
 
        return 1;
 }
-ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry);
+ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
 
 /* LOOKS like x86cpu:vendor:VVVV:family:FFFF:model:MMMM:feature:*,FEAT,*
  * All fields are numbers. It would be nicer to use strings for vendor
@@ -1056,24 +1114,24 @@ ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry);
  * complicated.
  */
 
-static int do_x86cpu_entry(const char *filename, struct x86_cpu_id *id,
+static int do_x86cpu_entry(const char *filename, void *symval,
                           char *alias)
 {
-       id->feature = TO_NATIVE(id->feature);
-       id->family = TO_NATIVE(id->family);
-       id->model = TO_NATIVE(id->model);
-       id->vendor = TO_NATIVE(id->vendor);
+       DEF_FIELD(symval, x86_cpu_id, feature);
+       DEF_FIELD(symval, x86_cpu_id, family);
+       DEF_FIELD(symval, x86_cpu_id, model);
+       DEF_FIELD(symval, x86_cpu_id, vendor);
 
        strcpy(alias, "x86cpu:");
-       ADD(alias, "vendor:",  id->vendor != X86_VENDOR_ANY, id->vendor);
-       ADD(alias, ":family:", id->family != X86_FAMILY_ANY, id->family);
-       ADD(alias, ":model:",  id->model  != X86_MODEL_ANY,  id->model);
+       ADD(alias, "vendor:",  vendor != X86_VENDOR_ANY, vendor);
+       ADD(alias, ":family:", family != X86_FAMILY_ANY, family);
+       ADD(alias, ":model:",  model  != X86_MODEL_ANY,  model);
        strcat(alias, ":feature:*");
-       if (id->feature != X86_FEATURE_ANY)
-               sprintf(alias + strlen(alias), "%04X*", id->feature);
+       if (feature != X86_FEATURE_ANY)
+               sprintf(alias + strlen(alias), "%04X*", feature);
        return 1;
 }
-ADD_TO_DEVTABLE("x86cpu", struct x86_cpu_id, do_x86cpu_entry);
+ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
 
 /* Does namelen bytes of name exactly match the symbol? */
 static bool sym_is(const char *name, unsigned namelen, const char *symbol)
index 1c6fbb1..78b30c1 100644 (file)
@@ -2128,7 +2128,7 @@ int main(int argc, char **argv)
        struct ext_sym_list *extsym_iter;
        struct ext_sym_list *extsym_start = NULL;
 
-       while ((opt = getopt(argc, argv, "i:I:e:cmsSo:awM:K:")) != -1) {
+       while ((opt = getopt(argc, argv, "i:I:e:msSo:awM:K:")) != -1) {
                switch (opt) {
                case 'i':
                        kernel_read = optarg;
@@ -2137,9 +2137,6 @@ int main(int argc, char **argv)
                        module_read = optarg;
                        external_module = 1;
                        break;
-               case 'c':
-                       cross_build = 1;
-                       break;
                case 'e':
                        external_module = 1;
                        extsym_iter =
index 4bf17dd..fbbfd08 100755 (executable)
@@ -95,7 +95,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
 echo "%endif"
 echo "%endif"
 
-echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
+echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install'
 echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
 
 echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
index ee52cb8..9c22317 100644 (file)
 #include <string.h>
 #include <unistd.h>
 
+#ifndef EM_METAG
+/* Remove this when these make it to the standard system elf.h. */
+#define EM_METAG      174
+#define R_METAG_ADDR32                   2
+#define R_METAG_NONE                     3
+#endif
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static void *ehdr_curr; /* current ElfXX_Ehdr *  for resource cleanup */
@@ -341,6 +348,12 @@ do_file(char const *const fname)
                         altmcount = "__gnu_mcount_nc";
                         break;
        case EM_IA_64:   reltype = R_IA64_IMM64;   gpfx = '_'; break;
+       case EM_METAG:   reltype = R_METAG_ADDR32;
+                        altmcount = "_mcount_wrapper";
+                        rel_type_nop = R_METAG_NONE;
+                        /* We happen to have the same requirement as MIPS */
+                        is_fake_mcount32 = MIPS32_is_fake_mcount;
+                        break;
        case EM_MIPS:    /* reltype: e_class    */ gpfx = '_'; break;
        case EM_PPC:     reltype = R_PPC_ADDR32;   gpfx = '_'; break;
        case EM_PPC64:   reltype = R_PPC64_ADDR64; gpfx = '_'; break;
index bd6dca8..84b88f1 100755 (executable)
@@ -108,7 +108,7 @@ scm_version()
        fi
 
        # Check for svn and a svn repo.
-       if rev=`svn info 2>/dev/null | grep '^Last Changed Rev'`; then
+       if rev=`LANG= LC_ALL= LC_MESSAGES=C svn info 2>/dev/null | grep '^Last Changed Rev'`; then
                rev=`echo $rev | awk '{print $NF}'`
                printf -- '-svn%s' "$rev"
 
index 65f9595..26a87e6 100755 (executable)
@@ -217,34 +217,34 @@ exuberant()
 emacs()
 {
        all_target_sources | xargs $1 -a                        \
-       --regex='/^(ENTRY|_GLOBAL)(\([^)]*\)).*/\2/'            \
+       --regex='/^\(ENTRY\|_GLOBAL\)(\([^)]*\)).*/\2/'         \
        --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'   \
        --regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/'          \
        --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' \
-       --regex='/PAGEFLAG\(([^,)]*).*/Page\1/'                 \
-       --regex='/PAGEFLAG\(([^,)]*).*/SetPage\1/'              \
-       --regex='/PAGEFLAG\(([^,)]*).*/ClearPage\1/'            \
-       --regex='/TESTSETFLAG\(([^,)]*).*/TestSetPage\1/'       \
-       --regex='/TESTPAGEFLAG\(([^,)]*).*/Page\1/'             \
-       --regex='/SETPAGEFLAG\(([^,)]*).*/SetPage\1/'           \
-       --regex='/__SETPAGEFLAG\(([^,)]*).*/__SetPage\1/'       \
-       --regex='/TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/'   \
-       --regex='/__TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
-       --regex='/CLEARPAGEFLAG\(([^,)]*).*/ClearPage\1/'       \
-       --regex='/__CLEARPAGEFLAG\(([^,)]*).*/__ClearPage\1/'   \
-       --regex='/__PAGEFLAG\(([^,)]*).*/__SetPage\1/'          \
-       --regex='/__PAGEFLAG\(([^,)]*).*/__ClearPage\1/'        \
-       --regex='/PAGEFLAG_FALSE\(([^,)]*).*/Page\1/'           \
-       --regex='/TESTSCFLAG\(([^,)]*).*/TestSetPage\1/'        \
-       --regex='/TESTSCFLAG\(([^,)]*).*/TestClearPage\1/'      \
-       --regex='/SETPAGEFLAG_NOOP\(([^,)]*).*/SetPage\1/'      \
-       --regex='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/'  \
-       --regex='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
-       --regex='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
-       --regex='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/' \
-       --regex='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'            \
-       --regex='/PCI_OP_READ\(([a-z]*[a-z]).*[1-4]\)/pci_bus_read_config_\1/' \
-       --regex='/PCI_OP_WRITE\(([a-z]*[a-z]).*[1-4]\)/pci_bus_write_config_\1/'
+       --regex='/PAGEFLAG(\([^,)]*\).*/Page\1/'                        \
+       --regex='/PAGEFLAG(\([^,)]*\).*/SetPage\1/'             \
+       --regex='/PAGEFLAG(\([^,)]*\).*/ClearPage\1/'           \
+       --regex='/TESTSETFLAG(\([^,)]*\).*/TestSetPage\1/'      \
+       --regex='/TESTPAGEFLAG(\([^,)]*\).*/Page\1/'            \
+       --regex='/SETPAGEFLAG(\([^,)]*\).*/SetPage\1/'          \
+       --regex='/__SETPAGEFLAG(\([^,)]*\).*/__SetPage\1/'      \
+       --regex='/TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/'  \
+       --regex='/__TESTCLEARFLAG(\([^,)]*\).*/TestClearPage\1/'        \
+       --regex='/CLEARPAGEFLAG(\([^,)]*\).*/ClearPage\1/'      \
+       --regex='/__CLEARPAGEFLAG(\([^,)]*\).*/__ClearPage\1/'  \
+       --regex='/__PAGEFLAG(\([^,)]*\).*/__SetPage\1/'         \
+       --regex='/__PAGEFLAG(\([^,)]*\).*/__ClearPage\1/'       \
+       --regex='/PAGEFLAG_FALSE(\([^,)]*\).*/Page\1/'          \
+       --regex='/TESTSCFLAG(\([^,)]*\).*/TestSetPage\1/'       \
+       --regex='/TESTSCFLAG(\([^,)]*\).*/TestClearPage\1/'     \
+       --regex='/SETPAGEFLAG_NOOP(\([^,)]*\).*/SetPage\1/'     \
+       --regex='/CLEARPAGEFLAG_NOOP(\([^,)]*\).*/ClearPage\1/' \
+       --regex='/__CLEARPAGEFLAG_NOOP(\([^,)]*\).*/__ClearPage\1/' \
+       --regex='/TESTCLEARFLAG_FALSE(\([^,)]*\).*/TestClearPage\1/' \
+       --regex='/__TESTCLEARFLAG_FALSE(\([^,)]*\).*/__TestClearPage\1/' \
+       --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'           \
+       --regex='/PCI_OP_READ(\([a-z]*[a-z]\).*[1-4])/pci_bus_read_config_\1/' \
+       --regex='/PCI_OP_WRITE(\([a-z]*[a-z]\).*[1-4])/pci_bus_write_config_\1/'
 
        all_kconfigs | xargs $1 -a                              \
        --regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
index 60f0c76..859abda 100644 (file)
@@ -349,8 +349,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
        unsigned int state;
        struct file_perms perms = {};
        struct path_cond cond = {
-               bprm->file->f_path.dentry->d_inode->i_uid,
-               bprm->file->f_path.dentry->d_inode->i_mode
+               file_inode(bprm->file)->i_uid,
+               file_inode(bprm->file)->i_mode
        };
        const char *name = NULL, *target = NULL, *info = NULL;
        int error = cap_bprm_set_creds(bprm);
index cd21ec5..fdaa50c 100644 (file)
@@ -449,8 +449,8 @@ int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
                 u32 request)
 {
        struct path_cond cond = {
-               .uid = file->f_path.dentry->d_inode->i_uid,
-               .mode = file->f_path.dentry->d_inode->i_mode
+               .uid = file_inode(file)->i_uid,
+               .mode = file_inode(file)->i_mode
        };
 
        return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
index 8c2a7f6..b21830e 100644 (file)
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(file->f_path.dentry->d_inode))
+       if (!mediated_filesystem(file_inode(file)))
                return 0;
 
        /* If in exec, permission is handled by bprm hooks.
@@ -394,7 +394,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
 
        profile = aa_cred_profile(cred);
        if (!unconfined(profile)) {
-               struct inode *inode = file->f_path.dentry->d_inode;
+               struct inode *inode = file_inode(file);
                struct path_cond cond = { inode->i_uid, inode->i_mode };
 
                error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
        BUG_ON(!fprofile);
 
        if (!file->f_path.mnt ||
-           !mediated_filesystem(file->f_path.dentry->d_inode))
+           !mediated_filesystem(file_inode(file)))
                return 0;
 
        profile = __aa_current_profile();
index 7ee08c7..c44b6fe 100644 (file)
@@ -440,7 +440,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_c
        if (!file_caps_enabled)
                return 0;
 
-       if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
+       if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
                return 0;
 
        dentry = dget(bprm->file->f_dentry);
index d9030b2..1c03e8f 100644 (file)
@@ -140,12 +140,12 @@ int ima_must_measure(struct inode *inode, int mask, int function)
 int ima_collect_measurement(struct integrity_iint_cache *iint,
                            struct file *file)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        const char *filename = file->f_dentry->d_name.name;
        int result = 0;
 
        if (!(iint->flags & IMA_COLLECTED)) {
-               u64 i_version = file->f_dentry->d_inode->i_version;
+               u64 i_version = file_inode(file)->i_version;
 
                iint->ima_xattr.type = IMA_XATTR_DIGEST;
                result = ima_calc_file_hash(file, iint->ima_xattr.digest);
@@ -182,7 +182,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
        const char *op = "add_template_measure";
        const char *audit_cause = "ENOMEM";
        int result = -ENOMEM;
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct ima_template_entry *entry;
        int violation = 0;
 
index b691e0f..a02e079 100644 (file)
@@ -66,7 +66,7 @@ int ima_calc_file_hash(struct file *file, char *digest)
                file->f_mode |= FMODE_READ;
                read = 1;
        }
-       i_size = i_size_read(file->f_dentry->d_inode);
+       i_size = i_size_read(file_inode(file));
        while (offset < i_size) {
                int rbuf_len;
 
index 5b14a09..3b3b7e6 100644 (file)
@@ -126,7 +126,7 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
  */
 void ima_file_free(struct file *file)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct integrity_iint_cache *iint;
 
        if (!iint_initialized || !S_ISREG(inode->i_mode))
@@ -142,7 +142,7 @@ void ima_file_free(struct file *file)
 static int process_measurement(struct file *file, const char *filename,
                               int mask, int function)
 {
-       struct inode *inode = file->f_dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct integrity_iint_cache *iint;
        char *pathbuf = NULL;
        const char *pathname = NULL;
index 55a6271..ff63fe0 100644 (file)
@@ -45,12 +45,11 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
 {
        struct ima_queue_entry *qe, *ret = NULL;
        unsigned int key;
-       struct hlist_node *pos;
        int rc;
 
        key = ima_hash_key(digest_value);
        rcu_read_lock();
-       hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) {
+       hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
                rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
                if (rc == 0) {
                        ret = qe;
index 4d3fab4..dad36a6 100644 (file)
@@ -188,11 +188,9 @@ int avc_get_hash_stats(char *page)
        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
                head = &avc_cache.slots[i];
                if (!hlist_empty(head)) {
-                       struct hlist_node *next;
-
                        slots_used++;
                        chain_len = 0;
-                       hlist_for_each_entry_rcu(node, next, head, list)
+                       hlist_for_each_entry_rcu(node, head, list)
                                chain_len++;
                        if (chain_len > max_chain_len)
                                max_chain_len = chain_len;
@@ -241,7 +239,6 @@ static inline int avc_reclaim_node(void)
        int hvalue, try, ecx;
        unsigned long flags;
        struct hlist_head *head;
-       struct hlist_node *next;
        spinlock_t *lock;
 
        for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -253,7 +250,7 @@ static inline int avc_reclaim_node(void)
                        continue;
 
                rcu_read_lock();
-               hlist_for_each_entry(node, next, head, list) {
+               hlist_for_each_entry(node, head, list) {
                        avc_node_delete(node);
                        avc_cache_stats_incr(reclaims);
                        ecx++;
@@ -301,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
        struct avc_node *node, *ret = NULL;
        int hvalue;
        struct hlist_head *head;
-       struct hlist_node *next;
 
        hvalue = avc_hash(ssid, tsid, tclass);
        head = &avc_cache.slots[hvalue];
-       hlist_for_each_entry_rcu(node, next, head, list) {
+       hlist_for_each_entry_rcu(node, head, list) {
                if (ssid == node->ae.ssid &&
                    tclass == node->ae.tclass &&
                    tsid == node->ae.tsid) {
@@ -394,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
        node = avc_alloc_node();
        if (node) {
                struct hlist_head *head;
-               struct hlist_node *next;
                spinlock_t *lock;
 
                hvalue = avc_hash(ssid, tsid, tclass);
@@ -404,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
                lock = &avc_cache.slots_lock[hvalue];
 
                spin_lock_irqsave(lock, flag);
-               hlist_for_each_entry(pos, next, head, list) {
+               hlist_for_each_entry(pos, head, list) {
                        if (pos->ae.ssid == ssid &&
                            pos->ae.tsid == tsid &&
                            pos->ae.tclass == tclass) {
@@ -541,7 +536,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
        unsigned long flag;
        struct avc_node *pos, *node, *orig = NULL;
        struct hlist_head *head;
-       struct hlist_node *next;
        spinlock_t *lock;
 
        node = avc_alloc_node();
@@ -558,7 +552,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
 
        spin_lock_irqsave(lock, flag);
 
-       hlist_for_each_entry(pos, next, head, list) {
+       hlist_for_each_entry(pos, head, list) {
                if (ssid == pos->ae.ssid &&
                    tsid == pos->ae.tsid &&
                    tclass == pos->ae.tclass &&
@@ -614,7 +608,6 @@ out:
 static void avc_flush(void)
 {
        struct hlist_head *head;
-       struct hlist_node *next;
        struct avc_node *node;
        spinlock_t *lock;
        unsigned long flag;
@@ -630,7 +623,7 @@ static void avc_flush(void)
                 * prevent RCU grace periods from ending.
                 */
                rcu_read_lock();
-               hlist_for_each_entry(node, next, head, list)
+               hlist_for_each_entry(node, head, list)
                        avc_node_delete(node);
                rcu_read_unlock();
                spin_unlock_irqrestore(lock, flag);
index ef26e96..2fa28c8 100644 (file)
@@ -1528,7 +1528,7 @@ static int file_has_perm(const struct cred *cred,
                         u32 av)
 {
        struct file_security_struct *fsec = file->f_security;
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct common_audit_data ad;
        u32 sid = cred_sid(cred);
        int rc;
@@ -1957,7 +1957,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
        struct task_security_struct *new_tsec;
        struct inode_security_struct *isec;
        struct common_audit_data ad;
-       struct inode *inode = bprm->file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(bprm->file);
        int rc;
 
        rc = cap_bprm_set_creds(bprm);
@@ -2929,7 +2929,7 @@ static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
 static int selinux_revalidate_file_permission(struct file *file, int mask)
 {
        const struct cred *cred = current_cred();
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
 
        /* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
        if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
@@ -2941,7 +2941,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
 
 static int selinux_file_permission(struct file *file, int mask)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct file_security_struct *fsec = file->f_security;
        struct inode_security_struct *isec = inode->i_security;
        u32 sid = current_sid();
@@ -3135,11 +3135,6 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
 
        switch (cmd) {
        case F_SETFL:
-               if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
-                       err = -EINVAL;
-                       break;
-               }
-
                if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
                        err = file_has_perm(cred, file, FILE__WRITE);
                        break;
@@ -3162,10 +3157,6 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
        case F_SETLK64:
        case F_SETLKW64:
 #endif
-               if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
-                       err = -EINVAL;
-                       break;
-               }
                err = file_has_perm(cred, file, FILE__LOCK);
                break;
        }
@@ -3218,7 +3209,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
        struct inode_security_struct *isec;
 
        fsec = file->f_security;
-       isec = file->f_path.dentry->d_inode->i_security;
+       isec = file_inode(file)->i_security;
        /*
         * Save inode label and policy sequence number
         * at open-time so that selinux_file_permission
index 3a6e873..ff42773 100644 (file)
@@ -202,7 +202,7 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
 {
        char tmpbuf[TMPBUFLEN];
        ssize_t length;
-       ino_t ino = filp->f_path.dentry->d_inode->i_ino;
+       ino_t ino = file_inode(filp)->i_ino;
        int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
                security_get_reject_unknown() : !security_get_allow_unknown();
 
@@ -671,7 +671,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
 
 static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
 {
-       ino_t ino = file->f_path.dentry->d_inode->i_ino;
+       ino_t ino = file_inode(file)->i_ino;
        char *data;
        ssize_t rv;
 
@@ -1042,8 +1042,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
        ssize_t length;
        ssize_t ret;
        int cur_enforcing;
-       struct inode *inode = filep->f_path.dentry->d_inode;
-       unsigned index = inode->i_ino & SEL_INO_MASK;
+       unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
        mutex_lock(&sel_mutex);
@@ -1077,8 +1076,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
        char *page = NULL;
        ssize_t length;
        int new_value;
-       struct inode *inode = filep->f_path.dentry->d_inode;
-       unsigned index = inode->i_ino & SEL_INO_MASK;
+       unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
        mutex_lock(&sel_mutex);
@@ -1486,13 +1484,11 @@ static int sel_make_avc_files(struct dentry *dir)
 static ssize_t sel_read_initcon(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       struct inode *inode;
        char *con;
        u32 sid, len;
        ssize_t ret;
 
-       inode = file->f_path.dentry->d_inode;
-       sid = inode->i_ino&SEL_INO_MASK;
+       sid = file_inode(file)->i_ino&SEL_INO_MASK;
        ret = security_sid_to_context(sid, &con, &len);
        if (ret)
                return ret;
@@ -1553,7 +1549,7 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
 static ssize_t sel_read_class(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       unsigned long ino = file->f_path.dentry->d_inode->i_ino;
+       unsigned long ino = file_inode(file)->i_ino;
        char res[TMPBUFLEN];
        ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
        return simple_read_from_buffer(buf, count, ppos, res, len);
@@ -1567,7 +1563,7 @@ static const struct file_operations sel_class_ops = {
 static ssize_t sel_read_perm(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       unsigned long ino = file->f_path.dentry->d_inode->i_ino;
+       unsigned long ino = file_inode(file)->i_ino;
        char res[TMPBUFLEN];
        ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
        return simple_read_from_buffer(buf, count, ppos, res, len);
@@ -1584,7 +1580,7 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf,
        int value;
        char tmpbuf[TMPBUFLEN];
        ssize_t length;
-       unsigned long i_ino = file->f_path.dentry->d_inode->i_ino;
+       unsigned long i_ino = file_inode(file)->i_ino;
 
        value = security_policycap_supported(i_ino & SEL_INO_MASK);
        length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
index 38be92c..fa64740 100644 (file)
@@ -456,7 +456,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
  */
 static int smack_bprm_set_creds(struct linux_binprm *bprm)
 {
-       struct inode *inode = bprm->file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(bprm->file);
        struct task_smack *bsp = bprm->cred->security;
        struct inode_smack *isp;
        int rc;
@@ -1187,21 +1187,15 @@ static int smack_mmap_file(struct file *file,
        char *msmack;
        char *osmack;
        struct inode_smack *isp;
-       struct dentry *dp;
        int may;
        int mmay;
        int tmay;
        int rc;
 
-       if (file == NULL || file->f_dentry == NULL)
-               return 0;
-
-       dp = file->f_dentry;
-
-       if (dp->d_inode == NULL)
+       if (file == NULL)
                return 0;
 
-       isp = dp->d_inode->i_security;
+       isp = file_inode(file)->i_security;
        if (isp->smk_mmap == NULL)
                return 0;
        msmack = isp->smk_mmap;
@@ -1359,7 +1353,7 @@ static int smack_file_receive(struct file *file)
  */
 static int smack_file_open(struct file *file, const struct cred *cred)
 {
-       struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+       struct inode_smack *isp = file_inode(file)->i_security;
 
        file->f_security = isp->smk_inode;
 
index 8592f2f..fcf3278 100644 (file)
@@ -135,7 +135,7 @@ static const struct file_operations tomoyo_self_operations = {
  */
 static int tomoyo_open(struct inode *inode, struct file *file)
 {
-       const int key = ((u8 *) file->f_path.dentry->d_inode->i_private)
+       const int key = ((u8 *) file_inode(file)->i_private)
                - ((u8 *) NULL);
        return tomoyo_open_control(key, file);
 }
index 6b368d2..5bb97e7 100644 (file)
@@ -496,7 +496,7 @@ static long snd_info_entry_ioctl(struct file *file, unsigned int cmd,
 
 static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        struct snd_info_private_data *data;
        struct snd_info_entry *entry;
 
index 09b4286..71ae86c 100644 (file)
@@ -1586,7 +1586,7 @@ static struct file *snd_pcm_file_fd(int fd, int *fput_needed)
        file = fget_light(fd, fput_needed);
        if (!file)
                return NULL;
-       inode = file->f_path.dentry->d_inode;
+       inode = file_inode(file);
        if (!S_ISCHR(inode->i_mode) ||
            imajor(inode) != snd_major) {
                fput_light(file, *fput_needed);
index 536c4c0..11ff7c5 100644 (file)
@@ -642,7 +642,7 @@ static int mixer_ioctl(unsigned int cmd, unsigned long arg)
 
 static long dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       int minor = iminor(file->f_path.dentry->d_inode);
+       int minor = iminor(file_inode(file));
        int ret;
 
        if (cmd == OSS_GETVERSION) {
@@ -1012,7 +1012,7 @@ static int dsp_write(const char __user *buf, size_t len)
 
 static ssize_t dev_read(struct file *file, char __user *buf, size_t count, loff_t *off)
 {
-       int minor = iminor(file->f_path.dentry->d_inode);
+       int minor = iminor(file_inode(file));
        if (minor == dev.dsp_minor)
                return dsp_read(buf, count);
        else
@@ -1021,7 +1021,7 @@ static ssize_t dev_read(struct file *file, char __user *buf, size_t count, loff_
 
 static ssize_t dev_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
 {
-       int minor = iminor(file->f_path.dentry->d_inode);
+       int minor = iminor(file_inode(file));
        if (minor == dev.dsp_minor)
                return dsp_write(buf, count);
        else
index 7c7793a..e778034 100644 (file)
@@ -143,7 +143,7 @@ static int get_mixer_levels(void __user * arg)
 
 static ssize_t sound_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        int ret = -EINVAL;
 
        /*
@@ -176,7 +176,7 @@ static ssize_t sound_read(struct file *file, char __user *buf, size_t count, lof
 
 static ssize_t sound_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
        int ret = -EINVAL;
        
        mutex_lock(&soundcard_mutex);
@@ -333,7 +333,7 @@ static int sound_mixer_ioctl(int mixdev, unsigned int cmd, void __user *arg)
 static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        int len = 0, dtype;
-       int dev = iminor(file->f_dentry->d_inode);
+       int dev = iminor(file_inode(file));
        long ret = -EINVAL;
        void __user *p = (void __user *)arg;
 
@@ -406,7 +406,7 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 static unsigned int sound_poll(struct file *file, poll_table * wait)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int dev = iminor(inode);
 
        DEB(printk("sound_poll(dev=%d)\n", dev));
@@ -431,7 +431,7 @@ static int sound_mmap(struct file *file, struct vm_area_struct *vma)
        int dev_class;
        unsigned long size;
        struct dma_buffparms *dmap = NULL;
-       int dev = iminor(file->f_path.dentry->d_inode);
+       int dev = iminor(file_inode(file));
 
        dev_class = dev & 0x0f;
        dev >>= 4;
index cdd100d..9febe55 100644 (file)
@@ -836,6 +836,8 @@ static struct {
        {0x7063, 0x2000}, /* pcHDTV HD-2000 TV */
 };
 
+static struct pci_driver driver;
+
 /* return the id of the card, or a negative value if it's blacklisted */
 static int snd_bt87x_detect_card(struct pci_dev *pci)
 {
@@ -962,11 +964,24 @@ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = {
        { }
 };
 
-static struct pci_driver bt87x_driver = {
+static struct pci_driver driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_bt87x_ids,
        .probe = snd_bt87x_probe,
        .remove = snd_bt87x_remove,
 };
 
-module_pci_driver(bt87x_driver);
+static int __init alsa_card_bt87x_init(void)
+{
+       if (load_all)
+               driver.id_table = snd_bt87x_default_ids;
+       return pci_register_driver(&driver);
+}
+
+static void __exit alsa_card_bt87x_exit(void)
+{
+       pci_unregister_driver(&driver);
+}
+
+module_init(alsa_card_bt87x_init)
+module_exit(alsa_card_bt87x_exit)
index a7c296a..e6b0166 100644 (file)
@@ -862,6 +862,12 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
                           filename, emu->firmware->size);
        }
 
+       err = snd_emu1010_load_firmware(emu);
+       if (err != 0) {
+               snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n");
+               return err;
+       }
+
        /* ID, should read & 0x7f = 0x55 when FPGA programmed. */
        snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
        if ((reg & 0x3f) != 0x15) {
index 748a286..5ae1d04 100644 (file)
@@ -1127,7 +1127,7 @@ static int snd_emu10k1_playback_open(struct snd_pcm_substream *substream)
        struct snd_emu10k1_pcm *epcm;
        struct snd_emu10k1_pcm_mixer *mix;
        struct snd_pcm_runtime *runtime = substream->runtime;
-       int i, err;
+       int i, err, sample_rate;
 
        epcm = kzalloc(sizeof(*epcm), GFP_KERNEL);
        if (epcm == NULL)
@@ -1146,7 +1146,11 @@ static int snd_emu10k1_playback_open(struct snd_pcm_substream *substream)
                kfree(epcm);
                return err;
        }
-       err = snd_pcm_hw_rule_noresample(runtime, 48000);
+       if (emu->card_capabilities->emu_model && emu->emu1010.internal_clock == 0)
+               sample_rate = 44100;
+       else
+               sample_rate = 48000;
+       err = snd_pcm_hw_rule_noresample(runtime, sample_rate);
        if (err < 0) {
                kfree(epcm);
                return err;
index 21425fb..78e1827 100644 (file)
@@ -1640,6 +1640,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
 
        if (pcmdev > 0)
                sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
+       if (!is_jack_detectable(codec, per_pin->pin_nid))
+               strncat(hdmi_str, " Phantom",
+                       sizeof(hdmi_str) - strlen(hdmi_str) - 1);
 
        return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0);
 }
index 61478fd..2d4237b 100644 (file)
@@ -928,6 +928,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
 static const struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       SND_PCI_QUIRK(0x1043, 0x8376, "EeePC", 1),
        SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
        SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
        SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
index 7641080..1112ec1 100644 (file)
@@ -35,6 +35,7 @@
 struct revo51_spec {
        struct snd_i2c_device *dev;
        struct snd_pt2258 *pt2258;
+       struct ak4114 *ak4114;
 };
 
 static void revo_i2s_mclk_changed(struct snd_ice1712 *ice)
@@ -359,9 +360,9 @@ static struct snd_ak4xxx_private akm_ap192_priv = {
        .cif = 0,
        .data_mask = VT1724_REVO_CDOUT,
        .clk_mask = VT1724_REVO_CCLK,
-       .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS1,
-       .cs_addr = VT1724_REVO_CS1,
-       .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS1,
+       .cs_mask = VT1724_REVO_CS0 | VT1724_REVO_CS3,
+       .cs_addr = VT1724_REVO_CS3,
+       .cs_none = VT1724_REVO_CS0 | VT1724_REVO_CS3,
        .add_flags = VT1724_REVO_CCLK, /* high at init */
        .mask_flags = 0,
 };
@@ -372,7 +373,7 @@ static struct snd_ak4xxx_private akm_ap192_priv = {
  * CCLK (pin 34) -- GPIO1 pin 51 (shared with AK4358)
  * CSN  (pin 35) -- GPIO7 pin 59
  */
-#define AK4114_ADDR    0x02
+#define AK4114_ADDR    0x00
 
 static void write_data(struct snd_ice1712 *ice, unsigned int gpio,
                       unsigned int data, int idx)
@@ -426,7 +427,7 @@ static unsigned int ap192_4wire_start(struct snd_ice1712 *ice)
        tmp = snd_ice1712_gpio_read(ice);
        tmp |= VT1724_REVO_CCLK; /* high at init */
        tmp |= VT1724_REVO_CS0;
-       tmp &= ~VT1724_REVO_CS1;
+       tmp &= ~VT1724_REVO_CS3;
        snd_ice1712_gpio_write(ice, tmp);
        udelay(1);
        return tmp;
@@ -434,7 +435,7 @@ static unsigned int ap192_4wire_start(struct snd_ice1712 *ice)
 
 static void ap192_4wire_finish(struct snd_ice1712 *ice, unsigned int tmp)
 {
-       tmp |= VT1724_REVO_CS1;
+       tmp |= VT1724_REVO_CS3;
        tmp |= VT1724_REVO_CS0;
        snd_ice1712_gpio_write(ice, tmp);
        udelay(1);
@@ -470,27 +471,32 @@ static unsigned char ap192_ak4114_read(void *private_data, unsigned char addr)
 static int ap192_ak4114_init(struct snd_ice1712 *ice)
 {
        static const unsigned char ak4114_init_vals[] = {
-               AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1,
+               AK4114_RST | AK4114_PWN | AK4114_OCKS0,
                AK4114_DIF_I24I2S,
                AK4114_TX1E,
-               AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1),
+               AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(0),
                0,
                0
        };
        static const unsigned char ak4114_init_txcsb[] = {
                0x41, 0x02, 0x2c, 0x00, 0x00
        };
-       struct ak4114 *ak;
        int err;
 
+       struct revo51_spec *spec;
+       spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+       ice->spec = spec;
+
        err = snd_ak4114_create(ice->card,
                                 ap192_ak4114_read,
                                 ap192_ak4114_write,
                                 ak4114_init_vals, ak4114_init_txcsb,
-                                ice, &ak);
+                                ice, &spec->ak4114);
        /* AK4114 in Revo cannot detect external rate correctly.
         * No reason to stop capture stream due to incorrect checks */
-       ak->check_flags = AK4114_CHECK_NO_RATE;
+       spec->ak4114->check_flags = AK4114_CHECK_NO_RATE;
 
        return 0; /* error ignored; it's no fatal error */
 }
@@ -562,6 +568,9 @@ static int revo_init(struct snd_ice1712 *ice)
                                               ice);
                if (err < 0)
                        return err;
+               err = ap192_ak4114_init(ice);
+               if (err < 0)
+                       return err;
                
                /* unmute all codecs */
                snd_ice1712_gpio_write_bits(ice, VT1724_REVO_MUTE,
@@ -575,7 +584,7 @@ static int revo_init(struct snd_ice1712 *ice)
 
 static int revo_add_controls(struct snd_ice1712 *ice)
 {
-       struct revo51_spec *spec;
+       struct revo51_spec *spec = ice->spec;
        int err;
 
        switch (ice->eeprom.subvendor) {
@@ -597,7 +606,9 @@ static int revo_add_controls(struct snd_ice1712 *ice)
                err = snd_ice1712_akm4xxx_build_controls(ice);
                if (err < 0)
                        return err;
-               err = ap192_ak4114_init(ice);
+               /* only capture SPDIF over AK4114 */
+               err = snd_ak4114_build(spec->ak4114, NULL,
+                  ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
                if (err < 0)
                        return err;
                break;
index 37711a5..e149034 100644 (file)
@@ -19,7 +19,7 @@ static int do_mod_firmware_load(const char *fn, char **fp)
                printk(KERN_INFO "Unable to load '%s'.\n", fn);
                return 0;
        }
-       l = i_size_read(filp->f_path.dentry->d_inode);
+       l = i_size_read(file_inode(filp));
        if (l <= 0 || l > 131072)
        {
                printk(KERN_INFO "Invalid firmware '%s'\n", fn);
index c2206c8..74659ec 100644 (file)
 #define CPUINFO_PROC   "cpu model"
 #endif
 
+#ifdef __arc__
+#define rmb()          asm volatile("" ::: "memory")
+#define cpu_relax()    rmb()
+#define CPUINFO_PROC   "Processor"
+#endif
+
+#ifdef __metag__
+#define rmb()          asm volatile("" ::: "memory")
+#define cpu_relax()    asm volatile("" ::: "memory")
+#define CPUINFO_PROC   "CPU"
+#endif
+
 #include <time.h>
 #include <unistd.h>
 #include <sys/types.h>
index bc4ad79..c8be0fb 100644 (file)
@@ -314,7 +314,6 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 {
        struct hlist_head *head;
-       struct hlist_node *pos;
        struct perf_sample_id *sid;
        int hash;
 
@@ -324,7 +323,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
        hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
        head = &evlist->heads[hash];
 
-       hlist_for_each_entry(sid, pos, head, node)
+       hlist_for_each_entry(sid, head, node)
                if (sid->id == id)
                        return sid->evsel;
 
index 85baf11..3cc0ad7 100644 (file)
@@ -1,4 +1,10 @@
-TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug
+TARGETS = breakpoints
+TARGETS += kcmp
+TARGETS += mqueue
+TARGETS += vm
+TARGETS += cpu-hotplug
+TARGETS += memory-hotplug
+TARGETS += efivarfs
 
 all:
        for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/README.txt b/tools/testing/selftests/README.txt
new file mode 100644 (file)
index 0000000..5e2faf9
--- /dev/null
@@ -0,0 +1,42 @@
+Linux Kernel Selftests
+
+The kernel contains a set of "self tests" under the tools/testing/selftests/
+directory. These are intended to be small unit tests to exercise individual
+code paths in the kernel.
+
+Running the selftests
+=====================
+
+To build the tests:
+
+  $ make -C tools/testing/selftests
+
+
+To run the tests:
+
+  $ make -C tools/testing/selftests run_tests
+
+- note that some tests will require root privileges.
+
+
+To run only tests targetted for a single subsystem:
+
+  $  make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
+
+See the top-level tools/testing/selftests/Makefile for the list of all possible
+targets.
+
+
+Contributing new tests
+======================
+
+In general, the rules for for selftests are
+
+ * Do as much as you can if you're not root;
+
+ * Don't take too long;
+
+ * Don't break the build on any architecture, and
+
+ * Don't cause the top-level "make run_tests" to fail if your feature is
+   unconfigured.
diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile
new file mode 100644 (file)
index 0000000..29e8c6b
--- /dev/null
@@ -0,0 +1,12 @@
+CC = $(CROSS_COMPILE)gcc
+CFLAGS = -Wall
+
+test_objs = open-unlink create-read
+
+all: $(test_objs)
+
+run_tests: all
+       @/bin/bash ./efivarfs.sh || echo "efivarfs selftests: [FAIL]"
+
+clean:
+       rm -f $(test_objs)
diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c
new file mode 100644 (file)
index 0000000..7feef18
--- /dev/null
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+       const char *path;
+       char buf[4];
+       int fd, rc;
+
+       if (argc < 2) {
+               fprintf(stderr, "usage: %s <path>\n", argv[0]);
+               return EXIT_FAILURE;
+       }
+
+       path = argv[1];
+
+       /* create a test variable */
+       fd = open(path, O_RDWR | O_CREAT, 0600);
+       if (fd < 0) {
+               perror("open(O_WRONLY)");
+               return EXIT_FAILURE;
+       }
+
+       rc = read(fd, buf, sizeof(buf));
+       if (rc != 0) {
+               fprintf(stderr, "Reading a new var should return EOF\n");
+               return EXIT_FAILURE;
+       }
+
+       return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
new file mode 100644 (file)
index 0000000..880cdd5
--- /dev/null
@@ -0,0 +1,139 @@
+#!/bin/bash
+
+efivarfs_mount=/sys/firmware/efi/efivars
+test_guid=210be57c-9849-4fc7-a635-e6382d1aec27
+
+check_prereqs()
+{
+       local msg="skip all tests:"
+
+       if [ $UID != 0 ]; then
+               echo $msg must be run as root >&2
+               exit 0
+       fi
+
+       if ! grep -q "^\S\+ $efivarfs_mount efivarfs" /proc/mounts; then
+               echo $msg efivarfs is not mounted on $efivarfs_mount >&2
+               exit 0
+       fi
+}
+
+run_test()
+{
+       local test="$1"
+
+       echo "--------------------"
+       echo "running $test"
+       echo "--------------------"
+
+       if [ "$(type -t $test)" = 'function' ]; then
+               ( $test )
+       else
+               ( ./$test )
+       fi
+
+       if [ $? -ne 0 ]; then
+               echo "  [FAIL]"
+               rc=1
+       else
+               echo "  [PASS]"
+       fi
+}
+
+test_create()
+{
+       local attrs='\x07\x00\x00\x00'
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+       printf "$attrs\x00" > $file
+
+       if [ ! -e $file ]; then
+               echo "$file couldn't be created" >&2
+               exit 1
+       fi
+
+       if [ $(stat -c %s $file) -ne 5 ]; then
+               echo "$file has invalid size" >&2
+               exit 1
+       fi
+}
+
+test_create_empty()
+{
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+       : > $file
+
+       if [ ! -e $file ]; then
+               echo "$file can not be created without writing" >&2
+               exit 1
+       fi
+}
+
+test_create_read()
+{
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+       ./create-read $file
+}
+
+test_delete()
+{
+       local attrs='\x07\x00\x00\x00'
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+       printf "$attrs\x00" > $file
+
+       if [ ! -e $file ]; then
+               echo "$file couldn't be created" >&2
+               exit 1
+       fi
+
+       rm $file
+
+       if [ -e $file ]; then
+               echo "$file couldn't be deleted" >&2
+               exit 1
+       fi
+
+}
+
+# test that we can remove a variable by issuing a write with only
+# attributes specified
+test_zero_size_delete()
+{
+       local attrs='\x07\x00\x00\x00'
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+
+       printf "$attrs\x00" > $file
+
+       if [ ! -e $file ]; then
+               echo "$file does not exist" >&2
+               exit 1
+       fi
+
+       printf "$attrs" > $file
+
+       if [ -e $file ]; then
+               echo "$file should have been deleted" >&2
+               exit 1
+       fi
+}
+
+test_open_unlink()
+{
+       local file=$efivarfs_mount/$FUNCNAME-$test_guid
+       ./open-unlink $file
+}
+
+check_prereqs
+
+rc=0
+
+run_test test_create
+run_test test_create_empty
+run_test test_create_read
+run_test test_delete
+run_test test_zero_size_delete
+run_test test_open_unlink
+
+exit $rc
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
new file mode 100644 (file)
index 0000000..8c07644
--- /dev/null
@@ -0,0 +1,63 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+int main(int argc, char **argv)
+{
+       const char *path;
+       char buf[5];
+       int fd, rc;
+
+       if (argc < 2) {
+               fprintf(stderr, "usage: %s <path>\n", argv[0]);
+               return EXIT_FAILURE;
+       }
+
+       path = argv[1];
+
+       /* attributes: EFI_VARIABLE_NON_VOLATILE |
+        *              EFI_VARIABLE_BOOTSERVICE_ACCESS |
+        *              EFI_VARIABLE_RUNTIME_ACCESS
+        */
+       *(uint32_t *)buf = 0x7;
+       buf[4] = 0;
+
+       /* create a test variable */
+       fd = open(path, O_WRONLY | O_CREAT);
+       if (fd < 0) {
+               perror("open(O_WRONLY)");
+               return EXIT_FAILURE;
+       }
+
+       rc = write(fd, buf, sizeof(buf));
+       if (rc != sizeof(buf)) {
+               perror("write");
+               return EXIT_FAILURE;
+       }
+
+       close(fd);
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0) {
+               perror("open");
+               return EXIT_FAILURE;
+       }
+
+       if (unlink(path) < 0) {
+               perror("unlink");
+               return EXIT_FAILURE;
+       }
+
+       rc = read(fd, buf, sizeof(buf));
+       if (rc > 0) {
+               fprintf(stderr, "reading from an unlinked variable "
+                               "shouldn't be possible\n");
+               return EXIT_FAILURE;
+       }
+
+       return EXIT_SUCCESS;
+}
index b6eea5c..adb17f2 100644 (file)
@@ -268,14 +268,13 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
                         struct kvm_irq_routing_table *irq_rt)
 {
        struct kvm_kernel_irq_routing_entry *e;
-       struct hlist_node *n;
 
        if (irqfd->gsi >= irq_rt->nr_rt_entries) {
                rcu_assign_pointer(irqfd->irq_entry, NULL);
                return;
        }
 
-       hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+       hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
                /* Only fast-path MSI. */
                if (e->type == KVM_IRQ_ROUTING_MSI)
                        rcu_assign_pointer(irqfd->irq_entry, e);
index ff6d40e..e9073cf 100644 (file)
@@ -173,7 +173,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
        struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
        int ret = -1, i = 0;
        struct kvm_irq_routing_table *irq_rt;
-       struct hlist_node *n;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
 
@@ -184,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
        rcu_read_lock();
        irq_rt = rcu_dereference(kvm->irq_routing);
        if (irq < irq_rt->nr_rt_entries)
-               hlist_for_each_entry(e, n, &irq_rt->map[irq], link)
+               hlist_for_each_entry(e, &irq_rt->map[irq], link)
                        irq_set[i++] = *e;
        rcu_read_unlock();
 
@@ -212,7 +211,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
        struct kvm_kernel_irq_routing_entry *e;
        int ret = -EINVAL;
        struct kvm_irq_routing_table *irq_rt;
-       struct hlist_node *n;
 
        trace_kvm_set_irq(irq, level, irq_source_id);
 
@@ -227,7 +225,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
        rcu_read_lock();
        irq_rt = rcu_dereference(kvm->irq_routing);
        if (irq < irq_rt->nr_rt_entries)
-               hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
+               hlist_for_each_entry(e, &irq_rt->map[irq], link) {
                        if (likely(e->type == KVM_IRQ_ROUTING_MSI))
                                ret = kvm_set_msi_inatomic(e, kvm);
                        else
@@ -241,13 +239,12 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
        struct kvm_irq_ack_notifier *kian;
-       struct hlist_node *n;
        int gsi;
 
        rcu_read_lock();
        gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
        if (gsi != -1)
-               hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
+               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
                                         link)
                        if (kian->gsi == gsi) {
                                rcu_read_unlock();
@@ -263,7 +260,6 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
        struct kvm_irq_ack_notifier *kian;
-       struct hlist_node *n;
        int gsi;
 
        trace_kvm_ack_irq(irqchip, pin);
@@ -271,7 +267,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
        rcu_read_lock();
        gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
        if (gsi != -1)
-               hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
+               hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
                                         link)
                        if (kian->gsi == gsi)
                                kian->irq_acked(kian);
@@ -369,13 +365,12 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
                             bool mask)
 {
        struct kvm_irq_mask_notifier *kimn;
-       struct hlist_node *n;
        int gsi;
 
        rcu_read_lock();
        gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
        if (gsi != -1)
-               hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
+               hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
                        if (kimn->irq == gsi)
                                kimn->func(kimn, mask);
        rcu_read_unlock();
@@ -396,13 +391,12 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
        int delta;
        unsigned max_pin;
        struct kvm_kernel_irq_routing_entry *ei;
-       struct hlist_node *n;
 
        /*
         * Do not allow GSI to be mapped to the same irqchip more than once.
         * Allow only one to one mapping between GSI and MSI.
         */
-       hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+       hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
                if (ei->type == KVM_IRQ_ROUTING_MSI ||
                    ue->type == KVM_IRQ_ROUTING_MSI ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)